From b4debdb94822c5e66ca00c85da7e4f576ced2808 Mon Sep 17 00:00:00 2001 From: marwankefah Date: Thu, 8 Aug 2024 03:29:40 +0300 Subject: [PATCH] added docker submission instruction. --- docker/ReadME.md | 185 +++ .../docker-image-select-evaluation-queue.png | Bin 0 -> 92724 bytes .../docker-image-submit-to-challenge.png | Bin 0 -> 391642 bytes docker/media/project-docker-registry-view.png | Bin 0 -> 247905 bytes docker/media/show-page-outline.png | Bin 0 -> 302785 bytes docker/template/Dockerfile | 18 + docker/template/sample-test-data/test.csv | 4 + docker/template/src/Model.py | 32 + docker/template/src/main.py | 63 + .../Dataset102_TriALS/.gitignore | 0 docker/template/src/nnunetv2/__init__.py | 0 .../src/nnunetv2/batch_running/__init__.py | 0 .../batch_running/benchmarking/__init__.py | 0 .../generate_benchmarking_commands.py | 41 + .../summarize_benchmark_results.py | 70 + .../collect_results_custom_Decathlon.py | 114 ++ .../collect_results_custom_Decathlon_2d.py | 18 + .../generate_lsf_runs_customDecathlon.py | 86 ++ .../release_trainings/__init__.py | 0 .../release_trainings/nnunetv2_v1/__init__.py | 0 .../nnunetv2_v1/collect_results.py | 113 ++ .../nnunetv2_v1/generate_lsf_commands.py | 93 ++ docker/template/src/nnunetv2/configuration.py | 10 + .../dataset_conversion/Dataset027_ACDC.py | 87 ++ .../Dataset073_Fluo_C3DH_A549_SIM.py | 85 ++ .../dataset_conversion/Dataset114_MNMs.py | 198 +++ .../dataset_conversion/Dataset115_EMIDEC.py | 61 + .../Dataset120_RoadSegmentation.py | 87 ++ .../dataset_conversion/Dataset137_BraTS21.py | 98 ++ .../Dataset218_Amos2022_task1.py | 70 + .../Dataset219_Amos2022_task2.py | 65 + .../dataset_conversion/Dataset220_KiTS2023.py | 50 + .../Dataset221_AutoPETII_2023.py | 70 + .../Dataset988_dummyDataset4.py | 32 + .../nnunetv2/dataset_conversion/__init__.py | 0 .../dataset_conversion/convert_MSD_dataset.py | 133 ++ ...vert_raw_dataset_from_old_nnunet_format.py | 53 + ...egrationTest_Hippocampus_regions_ignore.py | 74 + ...997_IntegrationTest_Hippocampus_regions.py | 37 + ...t998_IntegrationTest_Hippocampus_ignore.py | 33 + .../Dataset999_IntegrationTest_Hippocampus.py | 27 + .../__init__.py | 0 .../generate_dataset_json.py | 103 ++ .../src/nnunetv2/ensembling/__init__.py | 0 .../src/nnunetv2/ensembling/ensemble.py | 206 +++ .../src/nnunetv2/evaluation/__init__.py | 0 .../evaluation/accumulate_cv_results.py | 58 + .../evaluation/evaluate_predictions.py | 263 ++++ .../evaluation/find_best_configuration.py | 333 +++++ .../nnunetv2/experiment_planning/__init__.py | 0 .../dataset_fingerprint/__init__.py | 0 .../fingerprint_extractor.py | 199 +++ .../experiment_planners/__init__.py | 0 .../default_experiment_planner.py | 542 +++++++ .../experiment_planners/network_topology.py | 105 ++ .../experiment_planners/readme.md | 38 + .../experiment_planners/resencUNet_planner.py | 54 + .../plan_and_preprocess_api.py | 137 ++ .../plan_and_preprocess_entrypoints.py | 201 +++ .../plans_for_pretraining/__init__.py | 0 .../move_plans_between_datasets.py | 82 ++ .../verify_dataset_integrity.py | 231 +++ .../template/src/nnunetv2/imageio/__init__.py | 0 .../nnunetv2/imageio/base_reader_writer.py | 107 ++ .../imageio/natural_image_reader_writer.py | 73 + .../nnunetv2/imageio/nibabel_reader_writer.py | 204 +++ .../imageio/reader_writer_registry.py | 79 + .../template/src/nnunetv2/imageio/readme.md | 7 + .../imageio/simpleitk_reader_writer.py | 129 ++ .../src/nnunetv2/imageio/tif_reader_writer.py | 100 ++ .../src/nnunetv2/inference/__init__.py | 0 .../src/nnunetv2/inference/data_iterators.py | 316 ++++ .../src/nnunetv2/inference/examples.py | 102 ++ .../nnunetv2/inference/export_prediction.py | 145 ++ .../inference/predict_from_raw_data.py | 917 ++++++++++++ .../template/src/nnunetv2/inference/readme.md | 205 +++ .../inference/sliding_window_prediction.py | 67 + .../src/nnunetv2/model_sharing/__init__.py | 0 .../nnunetv2/model_sharing/entry_points.py | 61 + .../nnunetv2/model_sharing/model_download.py | 47 + .../nnunetv2/model_sharing/model_export.py | 124 ++ .../nnunetv2/model_sharing/model_import.py | 8 + .../template/src/nnunetv2/nets/LightMUNet.py | 287 ++++ .../template/src/nnunetv2/nets/UMambaBot.py | 269 ++++ .../template/src/nnunetv2/nets/UMambaEnc.py | 414 ++++++ docker/template/src/nnunetv2/nets/__init__.py | 0 .../src/nnunetv2/nets/mednextv1/MedNextV1.py | 432 ++++++ .../src/nnunetv2/nets/mednextv1/__init__.py | 0 .../src/nnunetv2/nets/mednextv1/blocks.py | 265 ++++ .../nets/mednextv1/create_mednext_v1.py | 83 ++ .../nnunetv2/nets/sam_lora_image_encoder.py | 206 +++ .../nets/segment_anything/__init__.py | 15 + .../automatic_mask_generator.py | 372 +++++ .../nets/segment_anything/build_sam.py | 168 +++ .../segment_anything/modeling/__init__.py | 11 + .../nets/segment_anything/modeling/common.py | 43 + .../modeling/image_encoder.py | 396 +++++ .../segment_anything/modeling/mask_decoder.py | 178 +++ .../modeling/prompt_encoder.py | 214 +++ .../nets/segment_anything/modeling/sam.py | 208 +++ .../segment_anything/modeling/transformer.py | 240 ++++ .../nets/segment_anything/predictor.py | 269 ++++ .../nets/segment_anything/utils/__init__.py | 5 + .../nets/segment_anything/utils/amg.py | 346 +++++ .../nets/segment_anything/utils/onnx.py | 144 ++ .../nets/segment_anything/utils/transforms.py | 102 ++ docker/template/src/nnunetv2/paths.py | 63 + .../src/nnunetv2/postprocessing/__init__.py | 0 .../remove_connected_components.py | 361 +++++ .../src/nnunetv2/preprocessing/__init__.py | 0 .../preprocessing/cropping/__init__.py | 0 .../preprocessing/cropping/cropping.py | 51 + .../preprocessing/normalization/__init__.py | 0 .../default_normalization_schemes.py | 95 ++ .../map_channel_name_to_normalization.py | 24 + .../preprocessing/normalization/readme.md | 5 + .../preprocessing/preprocessors/__init__.py | 0 .../preprocessors/default_preprocessor.py | 295 ++++ .../preprocessing/resampling/__init__.py | 0 .../resampling/default_resampling.py | 216 +++ .../preprocessing/resampling/utils.py | 15 + docker/template/src/nnunetv2/run/__init__.py | 0 .../nnunetv2/run/load_pretrained_weights.py | 66 + .../template/src/nnunetv2/run/run_training.py | 274 ++++ .../template/src/nnunetv2/tests/__init__.py | 0 .../tests/integration_tests/__init__.py | 0 .../add_lowres_and_cascade.py | 33 + .../cleanup_integration_test.py | 19 + .../tests/integration_tests/lsf_commands.sh | 10 + .../prepare_integration_tests.sh | 18 + .../tests/integration_tests/readme.md | 58 + .../integration_tests/run_integration_test.sh | 27 + ...n_integration_test_bestconfig_inference.py | 75 + .../run_integration_test_trainingOnly_DDP.sh | 1 + .../src/nnunetv2/training/__init__.py | 0 .../training/data_augmentation/__init__.py | 0 .../compute_initial_patch_size.py | 24 + .../custom_transforms/__init__.py | 0 .../custom_transforms/cascade_transforms.py | 136 ++ .../deep_supervision_donwsampling.py | 55 + .../limited_length_multithreaded_augmenter.py | 10 + .../manipulating_data_dict.py | 10 + .../custom_transforms/masking.py | 22 + .../region_based_training.py | 38 + .../transforms_for_dummy_2d.py | 45 + .../nnunetv2/training/dataloading/__init__.py | 0 .../training/dataloading/base_data_loader.py | 139 ++ .../training/dataloading/data_loader_2d.py | 94 ++ .../training/dataloading/data_loader_3d.py | 56 + .../training/dataloading/nnunet_dataset.py | 146 ++ .../nnunetv2/training/dataloading/utils.py | 128 ++ .../src/nnunetv2/training/logging/__init__.py | 0 .../training/logging/nnunet_logger.py | 103 ++ .../src/nnunetv2/training/loss/__init__.py | 0 .../nnunetv2/training/loss/compound_losses.py | 150 ++ .../training/loss/deep_supervision.py | 30 + .../src/nnunetv2/training/loss/dice.py | 192 +++ .../nnunetv2/training/loss/robust_ce_loss.py | 32 + .../training/lr_scheduler/__init__.py | 0 .../nnunetv2/training/lr_scheduler/polylr.py | 20 + .../nnunetv2/training/lr_scheduler/samedlr.py | 22 + .../training/nnUNetTrainer/__init__.py | 0 .../training/nnUNetTrainer/nnUNetTrainer.py | 1270 +++++++++++++++++ .../nnUNetTrainer/nnUNetTrainerLightMUNet.py | 141 ++ .../nnUNetTrainer/nnUNetTrainerMedNext.py | 259 ++++ .../nnUNetTrainer/nnUNetTrainerSAMed.py | 306 ++++ .../nnUNetTrainer/nnUNetTrainerSegResNet.py | 154 ++ .../nnUNetTrainer/nnUNetTrainerSwinUNETR.py | 159 +++ .../nnUNetTrainer/nnUNetTrainerUMambaBot.py | 30 + .../nnUNetTrainer/nnUNetTrainerUMambaEnc.py | 28 + .../nnUNetTrainer/nnUNetTrainerUNETR.py | 149 ++ .../nnUNetTrainer/variants/__init__.py | 0 .../variants/benchmarking/__init__.py | 0 .../nnUNetTrainerBenchmark_5epochs.py | 65 + ...tTrainerBenchmark_5epochs_noDataLoading.py | 65 + .../variants/data_augmentation/__init__.py | 0 .../data_augmentation/nnUNetTrainerDA5.py | 422 ++++++ .../data_augmentation/nnUNetTrainerDAOrd0.py | 104 ++ .../data_augmentation/nnUNetTrainerNoDA.py | 40 + .../nnUNetTrainerNoMirroring.py | 28 + .../nnUNetTrainer/variants/loss/__init__.py | 0 .../variants/loss/nnUNetTrainerCELoss.py | 41 + .../variants/loss/nnUNetTrainerDiceLoss.py | 60 + .../variants/loss/nnUNetTrainerTopkLoss.py | 76 + .../variants/lr_schedule/__init__.py | 0 .../lr_schedule/nnUNetTrainerCosAnneal.py | 13 + .../variants/network_architecture/__init__.py | 0 .../network_architecture/nnUNetTrainerBN.py | 73 + .../nnUNetTrainerNoDeepSupervision.py | 16 + .../variants/optimizer/__init__.py | 0 .../variants/optimizer/nnUNetTrainerAdam.py | 58 + .../variants/optimizer/nnUNetTrainerAdan.py | 66 + .../variants/sampling/__init__.py | 0 ...nnUNetTrainer_probabilisticOversampling.py | 76 + .../variants/training_length/__init__.py | 0 .../training_length/nnUNetTrainer_Xepochs.py | 77 + .../nnUNetTrainer_Xepochs_NoMirroring.py | 60 + .../src/nnunetv2/utilities/__init__.py | 0 .../src/nnunetv2/utilities/collate_outputs.py | 24 + .../utilities/dataset_name_id_conversion.py | 74 + .../src/nnunetv2/utilities/ddp_allgather.py | 49 + .../nnunetv2/utilities/default_n_proc_DA.py | 44 + .../nnunetv2/utilities/file_path_utilities.py | 123 ++ .../nnunetv2/utilities/find_class_by_name.py | 24 + .../utilities/get_network_from_plans.py | 77 + .../src/nnunetv2/utilities/helpers.py | 27 + .../src/nnunetv2/utilities/json_export.py | 59 + .../utilities/label_handling/__init__.py | 0 .../label_handling/label_handling.py | 322 +++++ .../utilities/network_initialization.py | 12 + .../src/nnunetv2/utilities/overlay_plots.py | 273 ++++ .../utilities/plans_handling/__init__.py | 0 .../utilities/plans_handling/plans_handler.py | 307 ++++ .../template/src/nnunetv2/utilities/utils.py | 69 + docker/template/src/run.sh | 5 + docker/template/src/setup.py | 67 + 216 files changed, 20567 insertions(+) create mode 100644 docker/ReadME.md create mode 100644 docker/media/docker-image-select-evaluation-queue.png create mode 100644 docker/media/docker-image-submit-to-challenge.png create mode 100644 docker/media/project-docker-registry-view.png create mode 100644 docker/media/show-page-outline.png create mode 100644 docker/template/Dockerfile create mode 100644 docker/template/sample-test-data/test.csv create mode 100644 docker/template/src/Model.py create mode 100644 docker/template/src/main.py create mode 100644 docker/template/src/nnUNet_results/Dataset102_TriALS/.gitignore create mode 100644 docker/template/src/nnunetv2/__init__.py create mode 100644 docker/template/src/nnunetv2/batch_running/__init__.py create mode 100644 docker/template/src/nnunetv2/batch_running/benchmarking/__init__.py create mode 100644 docker/template/src/nnunetv2/batch_running/benchmarking/generate_benchmarking_commands.py create mode 100644 docker/template/src/nnunetv2/batch_running/benchmarking/summarize_benchmark_results.py create mode 100644 docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon.py create mode 100644 docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon_2d.py create mode 100644 docker/template/src/nnunetv2/batch_running/generate_lsf_runs_customDecathlon.py create mode 100644 docker/template/src/nnunetv2/batch_running/release_trainings/__init__.py create mode 100644 docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py create mode 100644 docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/collect_results.py create mode 100644 docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/generate_lsf_commands.py create mode 100644 docker/template/src/nnunetv2/configuration.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset027_ACDC.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset073_Fluo_C3DH_A549_SIM.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset114_MNMs.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset120_RoadSegmentation.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset137_BraTS21.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset218_Amos2022_task1.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset219_Amos2022_task2.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset221_AutoPETII_2023.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/Dataset988_dummyDataset4.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/__init__.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/convert_MSD_dataset.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/convert_raw_dataset_from_old_nnunet_format.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py create mode 100644 docker/template/src/nnunetv2/dataset_conversion/generate_dataset_json.py create mode 100644 docker/template/src/nnunetv2/ensembling/__init__.py create mode 100644 docker/template/src/nnunetv2/ensembling/ensemble.py create mode 100644 docker/template/src/nnunetv2/evaluation/__init__.py create mode 100644 docker/template/src/nnunetv2/evaluation/accumulate_cv_results.py create mode 100644 docker/template/src/nnunetv2/evaluation/evaluate_predictions.py create mode 100644 docker/template/src/nnunetv2/evaluation/find_best_configuration.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/__init__.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/dataset_fingerprint/fingerprint_extractor.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/experiment_planners/__init__.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/experiment_planners/default_experiment_planner.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/experiment_planners/network_topology.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/experiment_planners/readme.md create mode 100644 docker/template/src/nnunetv2/experiment_planning/experiment_planners/resencUNet_planner.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_api.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/plans_for_pretraining/move_plans_between_datasets.py create mode 100644 docker/template/src/nnunetv2/experiment_planning/verify_dataset_integrity.py create mode 100644 docker/template/src/nnunetv2/imageio/__init__.py create mode 100644 docker/template/src/nnunetv2/imageio/base_reader_writer.py create mode 100644 docker/template/src/nnunetv2/imageio/natural_image_reader_writer.py create mode 100644 docker/template/src/nnunetv2/imageio/nibabel_reader_writer.py create mode 100644 docker/template/src/nnunetv2/imageio/reader_writer_registry.py create mode 100644 docker/template/src/nnunetv2/imageio/readme.md create mode 100644 docker/template/src/nnunetv2/imageio/simpleitk_reader_writer.py create mode 100644 docker/template/src/nnunetv2/imageio/tif_reader_writer.py create mode 100644 docker/template/src/nnunetv2/inference/__init__.py create mode 100644 docker/template/src/nnunetv2/inference/data_iterators.py create mode 100644 docker/template/src/nnunetv2/inference/examples.py create mode 100644 docker/template/src/nnunetv2/inference/export_prediction.py create mode 100644 docker/template/src/nnunetv2/inference/predict_from_raw_data.py create mode 100644 docker/template/src/nnunetv2/inference/readme.md create mode 100644 docker/template/src/nnunetv2/inference/sliding_window_prediction.py create mode 100644 docker/template/src/nnunetv2/model_sharing/__init__.py create mode 100644 docker/template/src/nnunetv2/model_sharing/entry_points.py create mode 100644 docker/template/src/nnunetv2/model_sharing/model_download.py create mode 100644 docker/template/src/nnunetv2/model_sharing/model_export.py create mode 100644 docker/template/src/nnunetv2/model_sharing/model_import.py create mode 100644 docker/template/src/nnunetv2/nets/LightMUNet.py create mode 100644 docker/template/src/nnunetv2/nets/UMambaBot.py create mode 100644 docker/template/src/nnunetv2/nets/UMambaEnc.py create mode 100644 docker/template/src/nnunetv2/nets/__init__.py create mode 100644 docker/template/src/nnunetv2/nets/mednextv1/MedNextV1.py create mode 100644 docker/template/src/nnunetv2/nets/mednextv1/__init__.py create mode 100644 docker/template/src/nnunetv2/nets/mednextv1/blocks.py create mode 100644 docker/template/src/nnunetv2/nets/mednextv1/create_mednext_v1.py create mode 100644 docker/template/src/nnunetv2/nets/sam_lora_image_encoder.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/__init__.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/automatic_mask_generator.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/build_sam.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/modeling/__init__.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/modeling/common.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/modeling/image_encoder.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/modeling/mask_decoder.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/modeling/prompt_encoder.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/modeling/sam.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/modeling/transformer.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/predictor.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/utils/__init__.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/utils/amg.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/utils/onnx.py create mode 100644 docker/template/src/nnunetv2/nets/segment_anything/utils/transforms.py create mode 100644 docker/template/src/nnunetv2/paths.py create mode 100644 docker/template/src/nnunetv2/postprocessing/__init__.py create mode 100644 docker/template/src/nnunetv2/postprocessing/remove_connected_components.py create mode 100644 docker/template/src/nnunetv2/preprocessing/__init__.py create mode 100644 docker/template/src/nnunetv2/preprocessing/cropping/__init__.py create mode 100644 docker/template/src/nnunetv2/preprocessing/cropping/cropping.py create mode 100644 docker/template/src/nnunetv2/preprocessing/normalization/__init__.py create mode 100644 docker/template/src/nnunetv2/preprocessing/normalization/default_normalization_schemes.py create mode 100644 docker/template/src/nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py create mode 100644 docker/template/src/nnunetv2/preprocessing/normalization/readme.md create mode 100644 docker/template/src/nnunetv2/preprocessing/preprocessors/__init__.py create mode 100644 docker/template/src/nnunetv2/preprocessing/preprocessors/default_preprocessor.py create mode 100644 docker/template/src/nnunetv2/preprocessing/resampling/__init__.py create mode 100644 docker/template/src/nnunetv2/preprocessing/resampling/default_resampling.py create mode 100644 docker/template/src/nnunetv2/preprocessing/resampling/utils.py create mode 100644 docker/template/src/nnunetv2/run/__init__.py create mode 100644 docker/template/src/nnunetv2/run/load_pretrained_weights.py create mode 100644 docker/template/src/nnunetv2/run/run_training.py create mode 100644 docker/template/src/nnunetv2/tests/__init__.py create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/__init__.py create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/add_lowres_and_cascade.py create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/cleanup_integration_test.py create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/lsf_commands.sh create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/prepare_integration_tests.sh create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/readme.md create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/run_integration_test.sh create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_bestconfig_inference.py create mode 100644 docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh create mode 100644 docker/template/src/nnunetv2/training/__init__.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/__init__.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/compute_initial_patch_size.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/__init__.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/cascade_transforms.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/deep_supervision_donwsampling.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/limited_length_multithreaded_augmenter.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/manipulating_data_dict.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/masking.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/region_based_training.py create mode 100644 docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/transforms_for_dummy_2d.py create mode 100644 docker/template/src/nnunetv2/training/dataloading/__init__.py create mode 100644 docker/template/src/nnunetv2/training/dataloading/base_data_loader.py create mode 100644 docker/template/src/nnunetv2/training/dataloading/data_loader_2d.py create mode 100644 docker/template/src/nnunetv2/training/dataloading/data_loader_3d.py create mode 100644 docker/template/src/nnunetv2/training/dataloading/nnunet_dataset.py create mode 100644 docker/template/src/nnunetv2/training/dataloading/utils.py create mode 100644 docker/template/src/nnunetv2/training/logging/__init__.py create mode 100644 docker/template/src/nnunetv2/training/logging/nnunet_logger.py create mode 100644 docker/template/src/nnunetv2/training/loss/__init__.py create mode 100644 docker/template/src/nnunetv2/training/loss/compound_losses.py create mode 100644 docker/template/src/nnunetv2/training/loss/deep_supervision.py create mode 100644 docker/template/src/nnunetv2/training/loss/dice.py create mode 100644 docker/template/src/nnunetv2/training/loss/robust_ce_loss.py create mode 100644 docker/template/src/nnunetv2/training/lr_scheduler/__init__.py create mode 100644 docker/template/src/nnunetv2/training/lr_scheduler/polylr.py create mode 100644 docker/template/src/nnunetv2/training/lr_scheduler/samedlr.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerLightMUNet.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerMedNext.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSAMed.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSegResNet.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSwinUNETR.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaBot.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaEnc.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUNETR.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs_noDataLoading.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDA5.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDAOrd0.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoDA.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoMirroring.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerCELoss.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerDiceLoss.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerTopkLoss.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/nnUNetTrainerCosAnneal.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerBN.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdam.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdan.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/sampling/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/sampling/nnUNetTrainer_probabilisticOversampling.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/__init__.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs.py create mode 100644 docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs_NoMirroring.py create mode 100644 docker/template/src/nnunetv2/utilities/__init__.py create mode 100644 docker/template/src/nnunetv2/utilities/collate_outputs.py create mode 100644 docker/template/src/nnunetv2/utilities/dataset_name_id_conversion.py create mode 100644 docker/template/src/nnunetv2/utilities/ddp_allgather.py create mode 100644 docker/template/src/nnunetv2/utilities/default_n_proc_DA.py create mode 100644 docker/template/src/nnunetv2/utilities/file_path_utilities.py create mode 100644 docker/template/src/nnunetv2/utilities/find_class_by_name.py create mode 100644 docker/template/src/nnunetv2/utilities/get_network_from_plans.py create mode 100644 docker/template/src/nnunetv2/utilities/helpers.py create mode 100644 docker/template/src/nnunetv2/utilities/json_export.py create mode 100644 docker/template/src/nnunetv2/utilities/label_handling/__init__.py create mode 100644 docker/template/src/nnunetv2/utilities/label_handling/label_handling.py create mode 100644 docker/template/src/nnunetv2/utilities/network_initialization.py create mode 100644 docker/template/src/nnunetv2/utilities/overlay_plots.py create mode 100644 docker/template/src/nnunetv2/utilities/plans_handling/__init__.py create mode 100644 docker/template/src/nnunetv2/utilities/plans_handling/plans_handler.py create mode 100644 docker/template/src/nnunetv2/utilities/utils.py create mode 100644 docker/template/src/run.sh create mode 100644 docker/template/src/setup.py diff --git a/docker/ReadME.md b/docker/ReadME.md new file mode 100644 index 0000000..1b66efc --- /dev/null +++ b/docker/ReadME.md @@ -0,0 +1,185 @@ +# Docker Submission Instructions + +*Useful tip: view the page outline / table of contents by clicking the icon shown in image below* + +![width=200](media/show-page-outline.png) + +### Important Notes +* Please contact the organizers using [email](mailto:mkfmelbatel@connect.ust.hk) if you have any questions regarding this step of the challenge. We understand that this may be new to some participants and we would like to help resolve any issues faced. + +* These instructions have been created referring to the instructions from [Syn-ISS 2023](https://www.synapse.org/#!Synapse:syn50908388/wiki/621840) and [SimCol-to-3D 2022](https://www.synapse.org/#!Synapse:syn28548633/wiki/617244) challenge. + +* In order to be considered as a valid submission for the TriALS challenge the participant is required to submit both the Docker image and their writeup. + +* To upload any files to Synapse you must be a certified user. See this link for details on [how to get certified](https://help.synapse.org/docs/Synapse-User-Account-Types.2007072795.html#SynapseUserAccountTypes-CertifiedUser). + +### Overview +This document covers the details related to creating the docker images for submitting to the TriALS sub-challenge. + +### What is the Docker image supposed to do +The docker image should read the test data identifiers from a CSV file and look for the corresponding images in the input directory specified when running the docker container. The docker container should write the predicted masks as image files to the specified output directory as well. + +The docker templates already provide a Dockerfile to do this. Below are instructions about using the provided docker templates: (1) where the participant's should place their code, and (2) how they can generate a docker image for submission. The same instructions apply for task 1 and task 2. + +## Editing the Docker templates + +### Download the Docker templates +The docker templates can be obtained in one of the following ways: +1. Downloading `.zip` archives of the templates from the [latest release](https://github.com/xmed-lab/TriALS/releases/latest). +2. Downloading the entire GitHub repository locally. Please follow [this URL](https://github.com/xmed-lab/TriALS.git) to get the repository files. +3. Cloning the GitHub respository using a Git client. + +### How does the code flow +Before editing the docker template here is information about how the Docker template executes. +1. The `Dockerfile` is set up to launch the `run.sh` file as the entry point. Any other entrypoint will be ignored. +2. The `run.sh` script expects three inputs: + * a path to csv file containing list of test image identifiers, + * a path to a folder where the test image files can be found, and + * a path to a folder where the image files containing the predicted masks will be written to. +3. The `run.sh` calls the Python script `main.py` passing along these three input parameters to it. +4. The `main.py` script imports the segmentation model `MySegmentation` from the `Model.py` file. +6. The `Model.py` file contains the segmentation functionality to take an input image and produce a segmentation mask for the current task. + +**Note:** We provide a sample model trained under `nnUNet_results` (located at `template/src/nnUNet_results`). You can download it from [this link](https://drive.google.com/drive/folders/1G53ttrukdTpdQLIgsW55VZbb_1adoD8g?usp=sharing) and place it in `template/src/nnUNet_results`. + +``` +├── nnUNet_results/ +│ ├── Dataset102_TriALS/ +│ │ ├── nnUNetTrainer__nnUNetPlans__3d_fullres +│ │ │ ├── ... +``` + +Now, let us edit the Docker template files. + +### Update the Dockerfile (optional) +Please update the `Dockerfile` to specify any base image that your code needs like PyTorch, Tensorflow, NVidia. +This done by adding a [`FROM` instruction](https://docs.docker.com/engine/reference/builder/#from). +For example, this is how the docker container can be instructed to use the tensorflow base image. +```Docker +FROM tensorflow/tensorflow +``` +A catalog of base images can be found in the [Docker Hub](https://hub.docker.com/search?image_filter=official&q=&type=image). + +### Insert segmentation code in Model.py +Please insert your model related code in the Python script `Model.py` within the `MySegmentation` class. +Commented blocks specifying the region in the code flow of the `segment()` function are provided in the template. Here is an example of such a comment block: + + +## Creating the Docker images +Now that the docker template has been updated to include your model related changes in it, the following instructions will guide you in creating a docker image that you need to submit to the challenge. + +### Have you done the following +* Set up Docker on your machine. Please refer to the Docker Guide for instructions on how to [get started with Docker](https://docs.docker.com/get-docker/). +* Downloaded the Docker template. These are located under the `docker/template` folder. +* Updated the files in the Docker template following the [instructions](#editing-the-docker-templates). + +Please finish the above listed tasks before proceeding further. + +### Building the Docker image + +Build the docker image by following these steps: +* Open a command line tool. +* Browse to the directory where the `Dockerfile` is located. +* Run the following command to build the image (please check that you have included the `.` at the end of the command). +```Docker +$ docker build -t . +``` +where,
+`image-name` is the name to be given to the docker image created. + +The docker image must be named using the following format. +``` +trials--: +``` +where,
+`task-name` is either "task1" or "task2",
+`team-name` is the team abbreviation that was provided during registration,
+`version` is the version number of the image starting from `v1` and increasing as you submit newer versions of the image.
+ +Note: the highest version number tagged image will be used for the final evaluation of your model. + +As an example, a team named "medhacker" submitting a second version of their Docker image for the binary segmentation task "task1" must name their Docker image as `trials-task1-medhacker:v2`. + + +### Testing Docker image +It is recommended that you verify the docker image built to ensure it works as intended before submitting to the challenge. +Sample volume are available to do this with proper file and folder names that the organizers will use for evaluating the submissions using the test dataset. +The sample images are located in the Docker template folders within a subfolder named `sample-test-data`. +You can test by running the docker image using the following command in a command line tool. +```bash +$ docker run -it --rm -v ":/data" /data/test.csv /data/inputs /data/predictions +``` +where,
+`path-to-sample-test-data` is the location of the sample test data folder on the machine that is being used to test the Docker image,
+`image-name` is the name of the Docker image being tested. + +## Submitting the Docker images + +At any point in the following steps if more information related to Synapse is needed then refer to the [Docker Registry documentation page](https://help.synapse.org/docs/Synapse-Docker-Registry.2011037752.html). + +### Create a Synapse project +To submit files to a challenge on Synapse you need to create a Synapse project first. The project must be named using the challenge name and team names as shown below. +``` +TriALS-MedHacker +``` +The Synapse documentation can be referred to [create a project](https://help.synapse.org/docs/Setting-Up-a-Project.2055471258.html#SettingUpaProject-CreatingaProject). + +Please add the team named [`TriALS 2024 Admin`](https://www.synapse.org/Team:3491688) to the project and give them "Download" permissions. Follow the documentation on how to [share a project](https://help.synapse.org/docs/Sharing-Settings,-Permissions,-and-Conditions-for-Use.2024276030.html#SharingSettings,Permissions,andConditionsforUse-EditSharingSettingsonaProject). + +### Login to Synapse in Docker +* Type the following in a command line tool to login to synapse using docker. +```bash +$ docker login -u docker.synapse.org +``` +* Enter your synapse account password when prompted. + +### Tag the Docker image +This step requires your new project's Synapse ID. This can be found by looking at the web URL for the project page. For example, the Synapse ID of the project at the URL https://www.synapse.org/#!Synapse:syn150935 is `syn150935`. + +Type the following in a command line tool to tag the docker image before uploading to Synapse. +```bash +$ docker tag docker.synapse.org// +``` +where,
+`image-name` is the name of the Docker image being prepared for submission to the challenge,
+`synapse-project-ID` is the Synapse ID of your project that is being used to submit to the TriALS challenge, e.g., syn150935. + +### Push the Docker image to Synapse +Type the following in a command line tool to push the tagged local Docker image so that it appears in your Synapse project. +```bash +$ docker push docker.synapse.org// +``` +where,
+`synapse-project-ID` is the Synapse ID of your project that is being used to submit to the TriALS challenge, e.g., syn150935,
+`image-name` is the name of the Docker image being prepared for submission to the challenge. + +> [!IMPORTANT] +> This command will fail if your Synapse user account is not a certified user account. See this link for details on [how to get certified](https://help.synapse.org/docs/Synapse-User-Account-Types.2007072795.html#SynapseUserAccountTypes-CertifiedUser). + +### Verify Docker image on Synapse +The Docker images for a project appear under the Docker navigation tab of the project. See the example image below. + +![](media/project-docker-registry-view.png) + +### Submit Docker image to challenge +* Under the Docker tab of your Synapse project click the Docker image that you want to submit to the challenge. + +* Click the `Docker Repository Tools` button and select `Submit Docker Repository to Challenge` in the menu. See reference image below. + +![](media/docker-image-submit-to-challenge.png) + +* Select the version that you want to submit. + +* On the next page, select the challenge task that you want to submit the Docker image to. See image below. + +
+ warmup.png +
+ + +* Then, select the option: `I am submitting as an individual`.
+Ignore the team submission option even though you are part of a team. The organizers have the information about the team through the email registration process. + +* You will receive a confirmation email once the docker submission has been validated by the organizers. + +Thank you! \ No newline at end of file diff --git a/docker/media/docker-image-select-evaluation-queue.png b/docker/media/docker-image-select-evaluation-queue.png new file mode 100644 index 0000000000000000000000000000000000000000..38d18380cdf819e67695149b8b083932a2afc01f GIT binary patch literal 92724 zcmeFZWmuJ4*9Hm*QX(ZNp>#+Ki|+1Fx~03jrMtTuq)WQHyGt6RyX!pcce~?#`F@~YE3Z8No5F81$gF{yhwx-@eRT?@9iBFzr&uD1r zhu(utNLcb^7|Xo>hON=WMBEd~!SOL(Vh5b`ZX1klxnpJ1KkoBq4X~K>i^GjY@oz%& zMi*a-+%6ucGg{ZSEWo)ka3wZ2EUa)jV8GNm(fm)raYY;|46gA%`k?9iYkWhZj##y3 zE{<50Vg5;8;UB2My#V(u0?wB}0-Ea_ZhaeS!TNjp9Aa5!7zq+Ynt<;ND+x&~v;|zp z)QA9NnM`WNGU5&#NVa(P4pJgs6m05hL4+Cyia0v^RPddiwJTp9V=#*} z8diL<*4!Leh*_j|G!Kcl>lJIbQ{MK-oYc5-8WoC3SeZxO3|an)Vo+`=1TojqH;$ye z0eWO->Uu}Ba4uq%Gz3)0ns6PPP##_|ezJJBJ^cX)vNJ*C17G=H&H5328oL_A3t55p zyj#Cf;7(wo5ztUih-3t*8(aB&d!OENdrG(Jv2oq1REw>6XHGSP))a=pnbkgwg~I@P z`(cTbi^HzampUj^_Lno##%*U$_v)L^woz>#7&r8wg&m~*l273@(X@`eoNI75ZtN3C zImskGkf_D=!um&ads0Jde1p0Je=qb^9I6%<6=v=h^LCljK@xWv?2{0*^VgEEJ_3Y& zj$TKY@NliENXOhikC-?Wsd3(LJ?zquH_v&36wTa?`Oxj*EMCJv%^o%LF^DkjQG&C% z2QB0LWG=PTXqYV(xqW2k>~PhK3xk!WiNFiBuEZ171RG6#C-#bPZuzE$@xb4=wSc-% zg6V}FPC~bxQ=2po^a}FI%k)Tdy?`{KJL#yEG}IkvGRyEAEh=--@7o@?D{4~j$<|nE zI7+*d({YFoUa;45#bo;_C!pO(&bcMFBHHn42-*^6t|M!!m!A(4$ePHi`p9P0K78Ul zQIPgM9mG7ip5@&2-8=n?byGo91;hF+xm`lzZE2`B9G@)jA~D?ZWb!+ZC(c;5TJoTCgodE232h zW?xZi$g3DBPsxN2G#>S_^hu$p=1?guZy~=SA;GS4p~St)9R9Ye@+!w&@hb-@xXxE* z1%xS2@vr1FuovI%(~z;Da$6M5VO78xTi9zH>ssMQ5P!N4!+3-tk@4`JA!&6$l<_7Z zy<823#MKab72%bIyX}V*j$081C4})Mq)@O5m%fK%oyHQb_M0`XT^Lo?TZdq0o=-AR zlROBYMXf=7T=Dv(n7$G`kezf6CCvwN%Yn58BEg0}{H5l5Tj8UOX- z-3u*^a0S{JAOA9D1;FKGwQLmV8iuD!B?=L5{ZK71CA#-l;9hsFSx|AHrFv4gPOq^Z zzidEj_OV;JzhZ=l{>oKKV2?E8b?jx`2I7HD_b5#^_cMRVA-d>g$A|yfSD)Y7b66AQ zQzTPljaQAkf+U;p9xey*>o6fg_K5erfp3IY!%l>WvTU*zvrw}NvP{RIiFAmxh)A-z zvbBlQzl4AB9;3=$6OoTj`RGDi5J}ndVO@wel+Cjts3CYktV!&E*eyq2s*qGIib;f6 zheL-lJH*n*GWA42QE^sgR+U*vF^x^qq3A?yikhAK-)CMs+DJxQiE z^TbCaTj`5SUhjnS&KE<3bI>{7xhI+UBzJUCpLBA8Qr=veqwIM$rs5z^a2|tVkyH`S z{ zW&V8OqOv5%spOXMAaGB2<3dm=;25$VLh-)VZC&-nM9UOz*Vmj@IZp;pa9)CTT;A{% zmiBQU7XcQ5CV_Z?f}jwrbIkFe#$c$ff*{FvoViBh@n^<2N{-}>OC2qQGklumomq?P z6?xGH4F=%`joUBi0x;Ss292W?0~MQV%N%O1DNdD6>(RC0efWJkF$1m84=IZ%P$}Ok zF(_RqY%7%&!4(-69Vkl|xfJFq$t$Z@d1yIe0b@mKsc&u1Jm+kE@(V3df=Zr^$>gOSb8?UiB2#1{T*lJDX&- zMOOPZbatW^1(x-lBbt1lVrYiyQ5WtPw(4+f9)GRugddhItx-!l4O0xOrm(LSb{%({ zU|r7iPXw-2b{32$9we_e&eo52clfSJ@1OQe55#WOFJ5Ad`^5Jy3_5Rb{UW(wUB67( zXiXSo*$%yizK*@AhSq=C0bPh_|4tt>?t=o>49+?Vi0+p*4IUH7p8trl3NNw0SmV;S z#(Utk0fH=SFoG)14e0=b81@u)h>x#V$*X1@4P?9^{UFHjy`JcB#Eim>9D(=#!~%l+ z@&cT}7^rmQn<^BPwjz7QGj_j|mKte@&R1V54xZIwz%#}OG1()T5&%;S}#M*FekI*O^XSE^%^ z{Y>#)7wWkZ?+>z4GU-r!?nPucf|U;7>%P=lR5ou>gf zk0@uNCjN&0HS1G6!`_MLD}j)Kb8kac8%&R@zo>0@(sq{zlv9h=h*CYUY))4;zorsb zo@*MgneE!>*l62el)oyRQ7rzfrtn~|bJgb~dnTJ*@Uu9j{D#NdsxyZ^iavrq*{FSr zp@>es+Icbd9Ano^=0x-B)N`%RnNi=1D zW%w$N#hB9l1+$U~~>iGv7Uvt5&p}6ECdQ*D(`d7A;1^h-tm5ucz2TkSOZj#c; zE{ida$v5SVUrV$PPhCFVnjRo7E?IuFNIOXDY|ZEW!JCaB)E?yX&=u4*l#`Vc(vIh= zoWP!l_}+r4eC>PhXrI?7p9)w)Za@OkiU_Vfms}Yy=S$p zd7V6|y(MCR*JjnSIa89yoVUF6<~sh=x5mxXU1k#FZs7r5deGU`)C$c3C@BJT6ohrf z#lgscV;C?5cu;xOF zR$yT0L{Bep;V*w9PbisO(M5pPmE8X3qi~n&?<-;M$uQ zn_98hv*Z791q*Qe^f5I)?jM&}8?oanh)dz}npx`LGEmV_(cp8y;o{=5S!(ODeBl%L z^LF5W?D+cD*5)kK)OL1uRCaV!W|n%?pO~4MscC4bX=y2eD=4iTOszHSDNU^i{_~LE z&*9Ut(y}x#w>B^{#eI6NhNhW~H9J23(?Eay{AZpz_6C2CWNP*2vVa9rKb@ieL`6gW z*Rz3J*`7XSkutE?F;?U=Facx+jKRUk%*6J`_5X9`?-BoTr^4TNexjxM*RB6J_20M3 zTIpEwnwbD&T66rZuRq8A=gB{BWTSps`aeYRA36W=DIjPLI5z6Pl*R$ak15>@=n>n1 zPf`YW2dwPr2Rsz`hx9-1Pse@q8gyUu!N9n{g!#B-?7{bvVedY)-*=NB!-jVte}wg5 zB0IRvkEqKte4qWGBr7ucSxF@CmScyEXYF9aP%5e}KihN_T@RUuXH|^vb?@V~@j&FP zgKo*z?9wS)^E!LUUHS0A(9lqmovrO=#nQDzEXO_cAPpqUk0h?_cSCXMZjdlsU=T>( zz#wtKz<(cZazUup{mLMJ9K8GN$MqJnn5V&S;WMPJSP6{EC?}fEy*4a#ANVl%IoLD5 zEyikye2BFV1-ujaX?ZUpi^K4A?pN zId}aew2q?jEUjtmHDvMj_~6;Lx}1o2U?*`a3dA`|*lf&8a2C@NTGbqTt~x?!!&olhn4Mh6Gcch-UB1fSc; zS}F9%+RRD2TE$Xza|jYPT^0Oc{*vQwk^T~ctdrxT^p`=YI@u9Y7a@;h=@d(3>vd+{ zfFHLhHm&t4=5rTqlbaSHt{blYrf4!LJ1{hFX3|u4prm`P6GO}Hc=k(-t(W|nnInDU zN<@ugb0~M%IA1hbXS0cFX@?z3N`nX?WX&j00vzn}0O zkH+CdyJBJ|F%EW3VC!v7SCDfSF&@MJy6qo#q{%{}zi()Y_iLFXyGwfetaj5rL%^s6 zqpaIMD>3&N6s~2%kFFB0pOwc5sry4TUcl9|VHAEh@BhS(r%C(>0z+4!$-5&_Uu_CiBEMcz*f7$Pn`VBUYNvuZ$K3*F2g# z9Q1pvf6qWk0L;BW=hrhK>S>i+7?4OceQ83N&(DjJ;9JWi?OD>h=Oz9MkhqQ%A;Rj^QZr>RE|Py#ypeOB(U=Sr_5z2NqTNik-ygSsx_z1SEp-M)<|^^U?tprR0-> z`n)2exB-d03djO~c3va2z@q*?sAfwz6}o}5JRqlpr`CP)wxHAhL-t^hB6u{?%Z}aF z$@RXGKZE)Xsd7Y9A3V><;l^_%xR=Xm6nOsELq-f>2I4-oTW^nLN8QE5%YDSa5BKAe zfTxl!Tjp6dtfoC(@x@6vUh7T_<$RbCO91tr?ahu;l(sNBohv>jznS}bWyE;+0i z=Ad(|>I%lvBbG?gM_1CvJFP5n8Kh`9YRfU2F7~c?=p{*w6QypP5Gzt>txK0LQQJ)O zd~L~Cd4I@y*f1XS?o$SF>bz-vk)lcEq~?5JNAUZee+6$5LflUxCU>x?#Br|oH}SPw zDfQ_Slf{qAo`^wnr;SdRRN^2bQfX{#!030hp}3Pb>JRGr*&eRe1ezl~ST=SGD?(;9 z11U3nsXnUFRU_hXu=Ulu4MoeV)sEYo-5ZhCQ5~Avy(etsHfnm z8%pbbVz?&Rulnl(ahP85z7^5+YZI`mbymIg$6^gb`=`T3@o|~Bz$(*{U9AREXCO`o zQq}1%;>UIrebVZRyIgWx8lW3y(_L zwIeWsI3JEflPT}^NFjicnrp8&6A46%GPd4<^uuOtrd5L}4#yxMQ-1420WiQd-XTkRy#_{LU(fI>Liz7wKjDR9=?4%8v z6J46dXj8&Rei)1o0O~SG0mK>dZIr7x?|9wZJ~V)ae)rtF2%@Xi2MX(WY~QsM6JNHsc~b zgr8f#mEtsjz_>6%5m(RTX?L`u0g)7I^ zvgc-~+ud3rg8DtnLCsna*;F}i#jw2X1MjvlF56a;mDn%-w*jeoiOy0km-R~s0{qJX zTHMP4r~edd+8WHS_9`FLrpupB^d!CHmf8Nw<&e@Wv@EzkQn(g8$Wv0O3X02yf>KVc z85isA=upYmCwJDdk%Pcte37G${ zo|UEpeo}1bqgLHcH2GKi0ZC&^w(}WK{;>4vW*W^qZb2HDg_E%8ik_8LF3p3A@&R8INwS3C~OX%Wn z%m?YGW$rl*UcO#Tt(;b4{V=LHURo#k!J%W8_Eb+0R%MVweb76)0WXw3i&$U5tnoYp zLvCG0twObLZWoAjLvIpzW2|9RjuU74SV&V)|7jwdZy?V7RQ!~aORPI6YuIoA)fP3U zqf`@Gdba_$n#L&V#wVywt2q_b9yT3GjZMm{FW%j4ah5RWU|=EDv_+9freN;_36>y} zN5%PG#V|uj!%0Ae&gO6kN=<@GQvy7>g=g0Qj?-S5|3{I5IlWYuf#p_DPYdf*z%oL9 zQNK$s|4auf(qi!fB33lB65ol9q>%gXkUk&`8SuyaDzv}0PHtXO3V#iIcXGS*$Pwpc z)8Td;s1A8)(B&5?!Sb=DB$E-}oL=Ipy7|5cgV9t)oaa4}IsL6O{ad+JA3`HgGJvGL z+q9S}!Ar!f!+<*-Vl@MuO(YY)b0?R8ga0qzNf6}1kL7R<#h}aC%$g#JwIyVZ>fVw= zsH&O^>-R9a>qZ^G%IuE&2~TbM)8%-js_xq?JDqUFzJdMfNIGxue7LRRg8f=>t8Wz$ z;73aZ7!L|cnhXVkF*i&DT>Iw=fQ^QJei7k>h~Du`-%k3>35aUCwHq;kCc^~gy^Ils z!%?Y!>`w8tofI|89^qhkIMRv6<^J2V6?znlSl(Cv2$-(q zI<%-}@>IRup(zMds^07>up!Y7>QD0KyOhf)Y3jJ*zEp6nMOXL=?k2xl$G2+t(}ARB z=`GeNj0!3l$Lo!e$L-uyhPCcc##FZl2brNxzTwfFL{rS@!DU!!2E)xbAa*e)P<|Pn z$W3*{%%=QFRw1GuiK%~8+ovYSblidJIqW3#gA9dnyMYYLQ6Sn*Sa`7qQ?SDL4QM(0WW z`}eEnhn%-qmxLFnj}QC^LSPO!AMZ{F*-i&g_eC|)*+%BSQzM~Z1bBt8rLrt0fYX9H z#)dPU`%IdyH+66Kt3$oO+}uXb0pFccu^#1bZMR*p5UiM#Q--=EGKmT^pOlq{vb)vc z6kQhSPjrGnaFeN(M5$b?xuH}yaRZ_$!AkqmTcy~ZY&`~X3Nm~pH2>W#!Hm!oDhn#f zp2*9r+fpFLCjxuy^nztSJamNqYn0~mr~+QmAo1+=EzUr z-v*p^zEzLgEotTeV%55?H?SGe=1&vaBmcGiwIqq8eedzEnPe6b$$^^g5RM$|&+L+e zEdGe*Zd4Y~BD=d>XQg7eHIVK6u;6?;=pX+S{s$T6jU%sZI6t}fg~-9Hl($Ji5$vWn za8F^CM~%|HiGVEjeyhfGQWmv7O%XL!;(AU__q7GgYKVD@Z>Wpcf@faan=n-(ohBDn zMUvw>D_kKl_r<}<7ZQ~id56Nlg|)jSAgwge?p!yUzJYqHB6W2wOkPqR6oHn8Alw$f z*8r@&8~H63qfzac2szPf&Z_P{!`(J`>Y&1~y{ZP$jcvx}>tJCN&Hz6-{?)qcg6cwj z`|o`s5tEu@E@6H*n~7$au)z+VXno_WyBtXloAICL431CsL{Vx$*X}z+ie6|4D0D=!{?j3t1U{+G}O{mgwtO zw#b5-8JZ|+B$zj%^uI?h>c$E~#4f5^f9&G##7HK{(_$fkRY7Dw3#$c^tU&ydu2}Q* z+az5@nc$?OH7`^Nn1DzWdEoqrS7zUA5@F#)qGQZP4h0x}NAb_JEfHQC-V9CT(R?A5 z#OWW3S|&+_8$i-+5~HB$pP;X^7T7E`O;5C$uH-*`zCfTTc`U6OsB?S>$_rek_k|$bhI$f6gEdM16oyU`%;E{A;?Fs@`gYVUW4Xji_8d z0+IAOzwhLQPUwCiMC?f_p;Q#5xuP6@Y`Nd4HW*6J0!=QAN;yq<&4hj!4jJrc2j8vh ztw^+#JKSrHtzMs+RM15AXqdK(0v`~kbfRSnBZ7`weXva5Rlr42*J_0@6Q5M* zl$89`iNIAbbHs-Jt6F411y@{jJLx6)d@EjzvygU?C7}DO-R%4Re%FSb6B4lI^4SR_ zm}};bnWx(0+VP>^uq6uCa-mCv!qhPk)F+PWevFtx#CG5^d&cT`P|TE5yt!DglB{Tb ziPb^%sicL-UChmrmRem8k7T{ft63>LjC5XK5w+dP@ z3d+5v+k^0EO{JtP7tY(+VUK!{myD)_pie_vK;IpZD-31{M+Vslq(qf?B67?KfgOAL zyzPs3B<&$dB<5Dojn~OIDaLa!co+el4OSMK=eSN0WWV@g7 z3XDA&vLT0yVOtzCb%I;rxMZo{49<)B<>#=TxRxdn=MJc2$+1LAA+)9hV&UeDR%m4}Uq6Mr=zCq87M=GRo?h;BLresp zaZ8V#xy834BK73`$}K7UU_++ z*PHBvYw(f_;hqd-LU^Lkr8rG1)C?LB7@eXyp<-g=!_DrdUGuGKsdVmc*byFhhYy82 zc^)r`5e33WF}Q@;Lm;no9W;eSLHVrhvIfoEc%dhjgbG;<>OK&DU3{Jq4s$fbdU*9N z)5hi}J-kXdE*>9`94WTz)k-+Ow7$CH{FoP-|A+FSv!X+5Q-755hsc_JXhn^Who)hZ z(?C|ZC_ZZD50vXprsnp!eMP!72>#uyd!?SUj=M!#+umMS{9%<*9UmCq_$RKHh^(ga zk*L2Z-@;(EH4d&Zxx9ujoO3}USoDQLS7`a5rrI|qGHlyZM`z%7hUbg@Mfz<|pUkQ~ zsnE>jVTBKoAqoU%&yJ|3ENK=yPG7TETNr-==cyj4N!4H3wfn3$S3*tYvIo3CECwc; zd(Y$p5$+`Pw5I3FVTC;-_&5jhC3t#Zp_KD0v%{N$`l74If*i znlYcXnFfxVUzy=(iK=$7uF$OF%Sr%lfgi|lX@6<=-OBK?KcY~%+SHc5s^Pc`E7J*O zUO^F7@xvf2gglBMLg%;cxNTToP0{@Ex7C=Y|LFPk4#D^!D%!a{3D&I_9#ZZ=6cuZ6`i=ggPgp{Ew= z3E}4mR}|Vxs2y)eq#f!Gn{Mw0!}Sa8UV#5q^A8+A5}NjU2P`mMaTjBq<)&Ted=LJZ zs$v+JTUl~%t!Vba9Vq!Y?qfrINc-w?>_Te?h_OucAFR)#2m9aa^%15)%g6}=FoWCb z2o)ppwNAu&1eOw|d7iT{(-#GGl{zDH%AeH<%o$80AFFU(62$#QRCV4&W^BQT%%DNrS$G-ACS zAotk=avpq2FyhIF9#IN52*@eL;%M;lQPmu`vwOA@DQ2~U*Wp)%T^D%@1O4P2)^ehw zf9&?pk>Gd%#fBZd(ws{VWnL6A+)@Q zKN|_)XKBX?DgT0YMob}3{FE(i($f&T{Z#gCp({=!cRULuAGyO@vbA$0lQlP$?LX1! zYR`~}UF7+VjzSV9^vtcLI2{PHN8jD|m_;KLq1R|TgiSjx_Yj1>x>*RPCYcVmSITAX zSK0%xqSrSp$}Z=V1UL#YiOo@AY3W~GrMjnZE|}`!RSc$MyA8?mP^q_;FstN2go+<^ z7RNKTU^^vI_F%*IYjJVS zyftc|4AXD(%lZ+)_dU$M2viKfYH5-j{_DWI5r{%AI0q=*2cA+``i#smA(Tb!Y&1;W z?^-(uH#KRFp?^d5y7CbG9hCiG08I3Ytsk%y{#YfAuC2&inPOcZ0(15`hho#$gLz)) z%#;_D^b{egwzHkaRkOOblOTNad!QnKVWE0`ZH>X%iawQd^g#0Syc{Z)WT|KozrH;`JgsN3_tp-d!zZxcsUc zE9B>G$KW-1GwahL>xFLdJDUG8hTK7}-lNC2R76SEV(y2IP3A8XC0e7~ugXw-qwaXh zetNy07t-PlC0}8>ANNTsNH+uCyK+j2eDZ|%L35pv_>FTqvG8)=iEQ?djUP;)jy%5> zBEd1e6bD_>0{Zy~pM>iN6Qml$k;%$*HFt6oCW90W_mfdH%RL3YS2om423Ox#_W*E3 zxAA<+DpT*_1G}Sj24{^pT@_(GhKN+R^O5Cha)=O%4QT1aCkvEY++H zGlPLUv9j{2lZ!H!hXMz@>=5!w%o6o}6yOT3U`qf9kw6Fv%qE`4{YQssfG|z5cSA~z z;N{)d_kGH#$`7t??BeT~G+(*a?bRY#k(ESoe|d_&REa*2)Nak*3&wc#ci3t~1fbP* zr}LZL5)xP0OMWs=qw@R+8(GpTeC{vZ3R7T~AynYFZz5P+0+eNz`^K5;_saVR)WXyTpp;6yeaEx@}V6Zd8aZTI*$dj&f3Bz!GAJKNy*P+MwpL5++Z!NZXke%O~& zbJ)P#tq|~v#)ps}YmAtTA&xonq@ba%<&9`A)`e49xn!0(L7~IGX1iK}jW23F z2V{Mj)OR~^d6}fGAGIwsB;`U|vbE^3g)f8w5PXR{#W*SyUem6&T7bAv%GLelsx|OP|g%h53bi zmn?dbaC9Q|Bi+ZvANzk|xwm5(acJ@?pHQ&RP9kLkDZI|I1-cF5(Rp}xFp=c|kqI}& z6XN%k=K{dqWT6`%BLP%CeV%}<5iuZqUX`6x19_~#A&!xF{z=A4oNzlIuZJ1IWx8KW zG+~z!{;R>|oxnQbyU3~n({Zs3;TR*7S{ZfaK=#Sm6Ykt{95_YZQQq#-zuPF*#1Mv@ zd58HA{&OAtl+|}evTpqQ@`@(Cv3WL+E8SX+*N~RV=6L4xkDI@OFjw&(WfZv)^H?NK z=%dgHiGLY2;zT4l-X5P6Pm`N(h%W^Lw%g4=#duIPE4w__0iM8xE2#jKAJR}cYz835 zCj4%_4HBb^{=&6NbvaQ%92e^mcbAm4@l~(BhE+!V!Z*k@>w;o(hUsZ_+^C;~aKETh zefm^Tte?}8&DKvVF(qukosoHxtZkE{CHpqTun zO?o-c_X05Gw1TQ zajB^?t!|(4IH&oiHbi1P&854$9}3=jBv~&6vANJDF@pHSQP&831+qpqpj`$l!%Zki ze30^TM0U8381c0~oc5<0;gG3~`hG-U7i@r-_Qi44Cww=FO8B$aQr0HdCt4Q@p*70e zh=3ctLKxK>6hYPf&#rA68lygN2 z%6waebtV3(RUh-lp`e_~&K?8GI-m0UW2rD)LecLTJYHk)imD*qWf81$hI%-aIy+&B zocmiPAkP2*Ye+fQO2UX~)Ui9b$;gzDaEJ1wZ=x+g(pI}35zbjgNV51x7^hNX5LWp} zhSZBV0hol0XZkEeEN$8?-ti}cI74ybMudcqUwHHT(J@cqRQf`EkkkP*R6tX{ksD$W zNCq(*UMJ>2aw9NOE9uRz5#{Uy4M_h;_8EJM-)YX1u16GMIw)!y#N;81xg+W_Xk2#Q z=zjh|qf|OEk`FY^a5jNXAhokCf^4{r@D$A(8%z3uk7nIsG14dL)*Z-#6YD@`lO2By zdP8U6(~;0Yjunx#G+$Fu(UM(me!US(!ctcCst={6k13U9?PN*5W0i09=BLx4$+}h% zfrr-`mT3(h=1KfIh-1IOv3UK3nI{5+5E_00k@?4xasX|OlD35B%SUy6>a37d=zF(h z8;y7oqxgYVxPGRk$L6oeY)10@;pf?>{aXO?s~Q~@tukMs8zzHa9yTG>2}%u2kK8Zs zrxIxU^moMs?DZ9>?lL$kF6gRtv_h4ZwP^81Hz0Czb{1Aux7>Pk#diI;)Q&U?Q3M2s zS^do5j1?B3sW)s>R1Oh-s%sM$!E%3slZkN)Y9JC8f@5}&z|8oLXJ>hF3ABXyDx9;g zgTg1Ls^(3No>0PxUE#ji*L^1&QR{>(K#~n4F~L4-*BH`U%I*FOxFk!KZ{EXgrMkIN z6$Cu>1V&$30$npX?dw(LmBO+vOkDt0hfmHZHql9voN^^=Mgc{7*qfC15`TtP2{~b9 z`98pTHp2V4>BO%-VT4y&_Ac|5U6;5vU<|m~18i3BCf60q=3qi^zr=5zLi@^iVMCdN6 z)0%oNeLe%g2dEXABzE&$ALIw~_|uw91u zSEUNbJvp3fz@z`NH5)Gg2>8*v2^u-_x^Qu3E8w~VLXgF_wWH-zBZa1xHHAYFEZ#fo zgFtg2$L;ZAkvJ2ig;G+i@=7B?TAD zkV6(6@Rkr+&p`=`4)L*^Js}c(v#1qa=Kcbjfo)u+@olC?6iRIrwSTW;4&lpQvt&pu zgssT$V~QS-RQ@<9+(eeUieonVFZ+iq-;hVvy+kMZ^_76zX!g#sB9C;qnWON zV>_cUN|DQ;e)BDD)(!|*Wj(ZvXWT1JOSdVrx``%xc=4kCm6-y;0ez+RK;Kkl)FoDk zSdQK4VnECj=C1@CXfXaNgk0qSdM)yADQZ}DO^BBMI5}brfg(htea$VGiz%1<=BdQ% zQ5syr!6e@4)_DIJ%|2rl70FLm!8Avr7in^R1ZdI^4wFs=>fI)cPK z_=RLL`44&R5F7zM8kf%N zJ7rgDHIeT7!7xVM)j?;XaYtfK%LQD za=D~8Dl8LF?Y2Vt*Pt>Gvhc;Gi+cFKz9{NvX%bm)k1w6K(svG2mja4Ou8n>CJroQ| z7-#Vx?45U?@M}1Ke1531U2?Ij06;Jw+ls%_k*C^32#==UW0%~$f8a|^9RDXM?D{{Y zstB1Q7O(l!?a_r!cifm~BWbmEww%K-eECbN)t=k6r;&PUI|JD); zj0|PXW-*M41qYzTIx2vXTJw6J?unB(?lB zjZBgCy#J;A-yIzx0;$U7a*mp$o~vccuSclQKa?GIp!`X}PCxN1q5f(o{C>vM*kK}R zhOOR+#WE6mUe6AsFAY&a0jPR1r`pf>FHrGc&&opnu4GPhyPV)~b(Y5zFZrUE<1b9; zZ#V=O%zX;!JDdt;#xE${&u8CC;r{iI-(}*6D`Y+d;3NBXRz1l7RR3SI=3;PHZC#;9txPPTnHKBzyeI@(LZ{NH)H z)55$0c99@$Ap%c82IJ@h=I8kVv5cmMhDmjPifjHiSDc43s>oz7^baiv@EZ zgO!1zqNowk6ZLq09^$yT?nZ4jqq@LV30#TNzs>lcb`K5{B>@IsJQf~-`@AvH{b!T} z|75G@JuHeJ&>;}=DYCL&Jg-182!KgUY%=t`mrJp@=R?K5&zQ-!48Wk19M2xk5Oq5r zkJ}#4iMvKu8wkR<*58iDeOBYZN+v|p1ePsP0D!|K+aAxHS^En1`B^YL`7U5Rn<-() z&kGa+vfRboCHft@3eFFDUjt*fch)9k_qIW z*A4QMb`hyWAfK1I9^p5{;vxI#9=hkxM{`Yj06{>O`0lsU{Y_YUQu8l9$kOEaTawYKae4RG*=fx<&2N;xubwv-^ z^K++kUjRH_B2m+kvLOzG?qqaMX6AX<2XjY92Ik4X%uM&^N$o*`T*&zcYw` zt951+V*RQ|m8Nj;tZq}K5~bzGkg;CGwY^7g!be&L^h)1CInSkKFK@QIc_i-5=a zyqR=CTK~3ueRV)@`%Y#(7=NhMnSVL;@h-P^J10?-4PDvzlhs~XcURS9#YqRM z8@pb{haO12oKjsj_FT==0ske+{vB|G}e7M!R0tkB!6&)y|GS$ZvGM}w`LWenS2ktJao9QD-Btx>B0q8Lv z;LlQ56@^Cw{IoV~n~Bc??P!ao{Z#dF0P|{KG3xd*GnB)SSS(H|c&S9a`HPQ?9IEjV zv<@R@i*0l4b%|FTQjcI7!H(}+HT%{e3_9W%cQcn=WdR(D6E8N01P&L+voanWfhh$o z1UmhZG=;JKxu1SI5z#&{>jw4o)*i$-NwPVKN$(|~-K$6dv0hZ`%e}+_I`}=316XDa z8bDHizY_v*yk>gT2Y{0NJjY*5UezZ0v+IKS&33M!M`C^O(l+D=w>t|ffb!mnzhBn< zVe^S0Xgt5yG**mFSd0ynsGshDhE3Hhz?jRQw@1$>6?p_0?5{E%HwGD(g0d+!L?Ps7 zxijv8Zvv=9?u83}Fo;ny%uCo;#CpwnXMPHNERzXb>Aaa_HI!FeeFLoj@xA_~Q~aZd zAFR7GBuj=XjAGxz!)5a@`x`cLNfd~)!ktAEPrl;&i}~T=1X~j`8M>ZOAK0fwJ4X-` zh2@Gdek4zN36bmr1SFCg*&CEk08JWGG*xO!N7WK&d~cdkc#ZW`P!@!TlOU|V!&Fj7 z^RP8B-!vdRrm$X!yaOyW5uU0B*VY4ow0r*G^8_vX$KGT$XCOV1wrT#m49x<48f)V6%UO|9V_oD&mE*VlC1A^E@fym%jThputb_Y7RPsHion);vyL%#rwetVoq50 zUF^6Y3moQ5Kl)Zr7PINh>DCbR35~i255kMW{D=TlLB74kRIPh4_DVe+Hx-Ij6#?oR zn`zaLfTvz$b_y9G^$V>WZunCVO1Hxgd6EDGDa;#=2@8;BL+`F?48B)105C&cVtzBL z5pRJbIMKbn{A&kH;fnSOsWB4@dW>sfW`LWaWnk!+%2yI{ac&3T!tpEZzTHFjfTG6l zj(j^^HtP}TGAJ_1gL$aeJY`ZV?aU00MG{=ufqc=ZMq!Vq5x+_CRBu zxdXV$4j41rx@pVSLRb@us^-Y5 ziU{P^%^zZ$?{xNFcyN#=0XE|8<5pcyN=p*Em~rj~*d;)9K77JkL)l-$sC>dMd_(x> zTI=|No$Y_6`wo2noH@`(USd3;;=RB&M34Fm{~Jc)^e7DmNau>(-3rERMN{%U{qqv zPs&xXlK!K?FlRlb3PGDWfE=SjK$GlrP#a#l6r^&09O?!_oCwI5*wz%iboYnHZ4=R) zF0Ln$b+YuEbuZF;qpfwHQW57%fC@sC`8pYEU0t*y;Ccl>6a#bkZL7_mn83+4Qx=S! zu5M?ef}0Pwhl~l#V#MERJ)>W2J>1_4R(zsrx>_x}IuKz;z)=+4E=b7ndjgEat4$a2 zIW(`J%EUp8u^X=th$f=(sH(V@7;WmxN;k z*rpT`0GVVE*fT#b5(gsKHP*G?rVm{KoVL{ObPoXS19Op)QD1;>+Ah@DW!9iz98(GK ze6^^Uv8b86yX}-$7_@{+bz4F=l%vuE8{I=vsXJ)`*wdB%l5#ze9aF7q!_NDtrl za&O$YJ>I5{?q%L!0gZo|sdXk59ZAvXTdAILq5LgH{qD(n`F$PLe51a z1obH!16x^kbuvyx?ikS_?QWRm>0$Knj`tb zhdCeFRtt9D7$MY7(A5I0{&%W+mj{c|<2H5-UrMH=ZEcz!fz4pjQ6;A1{^p!6e_CeR zU=h)=Np@= z@hF^Aw_ov|DSo1G)G?L?yGP5#$lscVorjwgd}qsZF8FXbUu}llO~;s*e6(OS+oKtSWUgsLS7f3&84t;a;K~-!zNrVpwCk-?ms4f^cH%5B1`~FPphdFK4sW z2v6wna|8b$y1qIrs&-#{iy$SSNJvXJNH+*biFD^kcbBv%AzjklJxKQqh=gZ>b)?&$Z@DBs52>hVwena zyYG|%fb7L1vZux-IxfO8ns)|?<4D0al6`QXQ_aQtQUXFsc|2WAY~PVL?wfz&Nr0Xt zhQs<|wEhnO0O)(DFY~EQkSg%I##`mlW{Y`Hn(w<0FqgcIR>aE{b;+2 zuH%kdROE2H!EG~9h&U+3dpeD=92>`!@9Xgp2>bnP4zux;$C1RteB;>8jhLpf&>4+G zxafRvJ}H2+Soy?g_O<+e;4z99&N@Vt)Gx*alTPJPU$KIlO*J9=HTvalZsRheptiuKL+QZD z+j?^?wLis;F!ePKB=Kpy5j^nOCgCiwZ+S3{YF9vzsZfdMk$F(Z;C9RyF^(?W1W`4F z;*Rr^)do)#HfFqKgzMxfX(%?>OdYCG{lLp}E%0h#KuNqL#6rH#^UcR2x(6s>!vI){ zAcDw2BWG6!iqzWcu%<0Z+TkAfDi&+RKT%y{V_jx9^Zi#nf7;U*?uYc;_^(un0N!=2 zyyqKXBx&&zy;9RS1OUG>*g_Px=uN_0t|*V7r>!oFaAbZ-9(h1FzY4ANPCw~e&h|0B z=B*m2bToRK3Mbpf5>OGjPDs8Rgv8@2PY98EKMyuewf#q_WCGzjtvsilMCs!8#$jN{ z#oC;QzohVHs+l_U=|b~eZIo|5ugK>wq^0;wC&^-%Sb&SQ+OsoIr?mkfS7rJA*KF=n zbtTs&)ktayzv=hd0i}6GBQcfWn_T5aiPO2*7y1`y;M##@fE)P%2!^v~nYCo4qm{{a z0D>nMfU}g?vqh-0@!p6GmJeS{7QQ|Cx_rJ4VangfXFdL=5WWIYHJ@a!FdjDWjLcR^ z*2F)q{N&L`;gy=<1evpx(5D2_qj-yirBdZ;1iQr7l%H(u4sp)EM?O{T8zMU$?`?=ce2t0DHt|1=8zmOF+-kfg0BJ`<`b&8C zQm$kNW5smpP{Ecdu>8X(({1=V4ertvs2qC23g5v3^% z*={ZLk`Eb`qSYwn?bh_`^A3;?cnK8UoiA(-KWkR!S>Mw%Vviqe!oY3NAcS!;DyxsH zGzicc1MCrvJ8-&3cqP!PlN&xCVQ}8*$h~Ofw1gSX>X7|Zqav`VQTjLce>TV_nEXj= zNg`(TPmbF%d?oC1=c!KE>#sDD(VBc8jN(~U`s7zXk(zk_Bo&ZYu&;~0TE2j6IdTuQ zvB^Z}f|ws^r6c<1IUh&6JDZZYY7Y=KXBKJ%BIc#U!)piLGpU8sk;WwPYI3^2*SmOY z6Gn=g=N8%+B3N2O7a3L=8kUm{zhqNfL2O%%LSqU&aA*@h>_6d}fYs4dY93lQhu@vH z&#P+BVf$w#dAyl$uJ!>-62Yyv| z#1U5Y0cnZi1D#1Gr6iB8*4*`rgvlqibMqErtW_pO88XchJHcce$e3-@^`I=`UiQFZ zJFD1){vp-K=^659de1w5r6imd8fcqZ#rjw}dv$E3c&T3;AB6V46TKliNp(CCE1?Z=d12q&f*=Icc$ku6A)zZj*@-!t{ zbj>1}T>^C1S_$P7<#bXtn5?>=S6G?g>$laqa+CPXH=#T{D06h0=K){^zEVugp@Mzp zKbwN_v>?t@kGrr?T>(9d0nBo(@%2&ZXxFqrE41j}$yzDzO{67p6D?2`drV9N zC%3J{r4nzqYz}r(iYqZIoTKn}^VRx5id)P{Ys=K15_wN=eB}ncvlU}L#0Q^hDXnI+ zd7knO|9%eJ<_|+ou&;6U_*ImNUm;-?_L`9C_a4Xxup3X=AU#C@T=9c?da>v7_ z(5FQmuQ#yYxbU^hC{yyW_2J=D8s(61CwY~0Hy&*O_Sr|T)B0;X+p8PDcl`tx?Rk}e zLrWU|4J7kC4^@mk^(j>~IgNJreNx%2$PShqTl)oBdp!WMUQwGZW>k9IJ}yfp)1A8B zCxv4MpI39a$up^j-uVo_mcFWuEBgAebnnXYOyYJ&#qQ=hN$=deik^ytzh9kU_{Vz) zg*h|g3C>2m#>XYEX>4Tcp68F#cDq?EeN_@Q>0z9^6f3Wsdl4mq@$V!!LWvSsdIqg9 zyGgZ<;+QWB;p}zB!y2)JrtG|#Mj(By>TSJ=lTCv^+oW`FZ7zd8hZ;A#Xrdz`t8k!K z!%nscy0%fmY-wqdjZ4slFe|V6J>!z%60MjQ{>tgoes!hp#-&$WKdMuJ9{)gk-Z{&Q zRw4rx#qZ8(%t~JU#cWEaxLR?7+sRH2WZdRoiKbtTa}9?jMwgBhA4CUIpi_SQ5UoNk zmIA3}ZTh0EeWkVxz({rshgqtRMF>c_>El(UYqhL)uiVg#HdG|LF_(N7v4Rw-D5Yjr zeK>fZDmn0CQjw-_r;MiQnB=z_dH7|=m5jW5@z^~^)mK`Z=UCBaLVc?pkb7~W zYiX3}zW>kNT2**s<6Ekn{ECCsc={ecif?Z&u+N6+yGQK!`yj>9Z_5(Mm6G-{hy7NX z?=XbHUdTXnH0vPi@GP?=D7gjl#&&II(bmA`G{i3`WSRKR+rwTz&9N|Q*6(T%2*DH_ zBp3Q7MCARIuBuL@=f7)8aO^&l2g<-nvL#CAJFHPGL%`N!)rooFtfT|F0*wq{NGbA8 zLkg0qsfXxUd*Xr5H?kO7DI3b-m3G-GN!SKCEvq>UjjMjj$2%3?*mZAOy`RTvdn}h9 z(?RBL%=g0(n|J>L^wM>`;)V~O_w76BqQzH{Bpve7)H@+0$W^LZhNIgN`OMS7aN}gt z9nS=hz|%7wlo+zpo&mzOMgO})?n8w+a!qaj8Vzy5si1nvr}e{!Z(1LKK*GqmNxrL% z_gNa4$#-XEkDS;^Ju*pkNfXSZ`_;Rt${DFYR(G7&hP@r+9?TY=aNN?T2naV;kVL7I zu=xqlUC1-&q$t=fu^7P26=-WrzFuDO-8YRP&p4DVfdNFwlrvkp>eTl3KwlJ!2yi zI>~|~aC!FmyzI{1pQMb3u?vQS|HwUXGu;l{Z{n`EA-4sf^`t>hrq{|x9jqHWi?)nMv#D-w?4W)G%+-z^j z%{o+$Bp$u(mhL$Z$LMEAIGCq5JG80n89?Z?t2XCy+q47JIcI}E%M!_B)JcNr7p-&` zZnyLoY6t39nF!cvd1Bw)L#?3LRlZ0Xn{N`;-#fx5XFak5Hl?I-$YFiLqK+ zlDKc_F+w2x&2ugyMDvu0(h^MJyy{w?N4k^am9nziGOxy!C<;{nh}45~iEQ8Q_Pij? zE{XX932o)6+4dvFUGqdV3J4 z5W~*nofyy;8#PWqi~OmKsp1%i_uS|U4un43j@oq15TI;~+h=+jnSO@XFxTx}jTZmh zD|}LLOi(K|>8VGLAW{1=5x_ii-cG$vJkXRbugLn-9H32eWjHkI{rNsN_TGO#>7F)Z z%uU^?qx`gXz|q?0PoZz#NKmkcO5-u&vrm-7&Dj?3h?5mfl# zfrDA=C!*mM0nYxvLqb(rHfn~B3;ZrAJUv9UK%r9G-Fou6xD*u*!%Wo*hiXk44kwHz z^7$#8&ssKErfM`xxOwwrNt8F~bxUzwz6^X9-n&DW>A{?SKEI5Mk}{C+mQ|qCQOz5# zfnoq0xpcyC%W)i)V$_&$T)0nalh5Dol*jlRG*d+yojb={HB0!e0-z-z;>K>@BUf7B z0oNpYUs!_=yeIma#@a-G3IEEOlG-|6*uT3TAa?MGl8Wc%KKZ(LA9-$fTBEgZEUSzBEIn!j;Ko_rRpPAf+GXE}+gN|Ld~ph5;g3Af2@znuqgXsT;8%_> z60v_>VMahYud!|zVMliKfYMI#jd!~QD9X6X+4YeS3@Og{Uh+l0%1d4y_Dd^J!&^Jo zwau$6-i+Kp`+x2hekk$>n<9q?!&!+5r6al|r-_$QU)c+jtc$RpW%(#NJlSGe>v<$k zr0-l7^+Y{7@{~*2R`yb+Rr}tF9~;6A-jxj1ZsF;;p*dv@;*~$9M=iv>$2XvJFRQU7 zs|*mn^N~lL`HqeG+FMGVKD$3D{4MTl8oKWiX1Ft+s zDly*>kX=#hlcjhG=gbZze3yA9^4Q0#%y7PJ)PsX{2ssnnTJo;R`p=-9_AOOw%7NQz zwgm6*AcN{=7mK-EIyOqU7v}&y%@W&?I?`LOXTXP_`I2Xn#aINW2i`D<)kPS1 z8j%Y_p;i4n#-4M>C#%(paq$^(g|BK<_gTV4ZP)ZGn8=_&eXn15DI28dPY&xN5@=3d z$@46~el#G)!5h?%E3)^jCEjIi$IIG1RKh0W9`X+Hn2?z^Ks;dv*a|o8EJmR3bz4|s zasg!1y%v~3G=N}L#F%Y&^D#W1bDmm4+w~vp8ipaNeP!Dl*7707%2;`!)uB4Gd8B#7 zGUF#@lLdJc$7}S8WI$M2+3ZpqaGm6=jn2p_w$F*A!RUBB$@bR#ct60xJ?wxEhjAd$ ziHwmGR(q(BM!wg;6w6Wq`DAgy$i5`&06Ye;@UWB+U!%Od$9I2VV|?XYEG9CFS1=cm z#dn~);KjUNik^on9HUV8dHlLJhhvFQl=rlguxi~*HNOLu zNd=xpD$ z5^?gw57aGsFXm>zC@BWSt*~6U1*|6knRiCBDl=!n`i;sf1C>P&jJ%kMHVd?CP9LZe z(X1vxNx65K*zDtwCJH}JwDmsp#kLWXoW=s@cG)sw7*S@^GUP>Fi@jvjHyP%tLE=~DzOhTHz zwJEXRWH2JV8Vi>n@F7aS-D`m*%dM#UiGlJIo-Wq!Ibm**HvYyHU~1b75ytZlrVoFD zPjCpN&gqz__IFD~!cPyrb9v-0=qldVe zcU`^v^)4pBQTrxJ&^we zcHn)rgLd0u2q){|L)N|NrHb8lQ!fvgfM^st({OGb0b&uy>n}kdp2ZcsVYiF2I`*7@0B)MWgJWq$;InWW002+)_+sZ!kY|L% zN)z}RKg#U>V8-RUC9f1-AnQRfakqJ5q{y0I3>j}Bvzp0-=JchZab%0K@}=?XUT&MsMJLSL@`sP&JNl1z)Y)5uAyllE2;E?c1x#EGp%WQH$^EGCdUae<+(&CaB z;N|W;9p`PAWpnaMjs90wFzQdBC_7g=&OOMepD_KEfhZnhJCr*6OjrB%S$WCrML`B+ zo&kbZeBSmWm`vhSgjGejH?R>9!Xf2g59)Rj=y!?>_Jta00@l7&JJ8^HP{m&D#umMu zk#y==mn@MHe%p;gpnJP7NQy%+0dZ0I`;Vg#R>RT+s>=o=FKww^o>pNNY5DbHW6_9J z_a2?|PP$*tz`UiuHKK`3N~%VLZ7O=u34?$dK(YjDC$5DWuakCz-%S9q+P$L%CBm;X z2psxNg2*`9R!_275;Sdu9+cPKvbji&UZsFbH2vWO6acj7?WdXR$&bp{*0BTzG9>IW z8*NIzLB=j!{M|r3XCmuJ;fm_%=-`L$%qF1Y6G8tQ^2$tvg;mBlRhVCK_6X{{`*OEm z54x1A3>^r;VL?B)OJ7Q^XS>e+_KTYLOEiO=>e?7qeJe+jh{wyD6@DqHXgcVloHI7w z>p8b-Mt~b-;=)1;;iM3tz#JeoZU9={=pu;`29|S)UmW#>Td0R6GKCqy4Hdygm|Tt+ z9@{s*JH2>I0!@+!y-LXaNSX-7c+~*6%6{8eEi3(tt_#{tqzG%B@w9i{02pDRC*LA@ z7F_vyaW1Z2V%_OigLs`yu<(v&PBK^}Pjw9zok6eK>T(O0*$uom=BIHEf#`=>G5`d2 zk6=`ATyA0?VtflUt@c)Mc! z1v&~E9Y|b-0s2t*azkg2en6?Hp{0P=xbp~|i3F-RDOIWgd;G)vdw5DINhJ$SV|_lL z=CfVZ@CM}UW@uc9sl}KK8MM^~Y>T?gHn#@_sEY z#dX&&PVodt;XFax<^|4NpPGuURcz%uoogTwo`~mB3%77|hK|^^k>(>_UK3^>_EGaL zWEp4ULn{9i^? z!Fl{+>Ix`d#-V*3VeADEI90|{r99kNG}gS#Cq8yX%y29OsU$@BlJK?87(Dhi$sN5=nC|cqO1Ae4JO(s@^ReibF

+dcOi{L_TqwyA9ghFoY;|&@Mo$i4^|eC~kh+ z1Qg2Uc(f2Ln0m&oM~MLyk1LMT2u@w(_d`|1)GV(sJ3wdP8C1dZ+v4*$VO_^8%M+v6 z&S@YZafkm@qlp(k{iQE+_C*-!bGNjTlBU{GRzIa&`=Vsqds`LLET;dTWU7 zHo@JMcg+;`@%-J@{Yj4rD^6N}1kX%LWeZmr2Pr!2#>to7T;l#11V>JS=MJy|%>XM= z0_bwB#mO`MP{Ts|hH35by$yZ{f~x_vw4y@!VxT&dxm@DI!mtm~RPspW?05nz#uF-hb z>2&*Sn&T^-kY~HZ9^}^-hg4THYDRGc1022&q^0OsD$r%4H|8*EnLpxtsiOwEVt&NE z;e*hHERKsrsj(LNBquFGa3Q8X@V)gCvCNHNvQ_D_x?z%zi0C+_!9!ho?*Zr}dGzjT zT%^m&{G`kuf6H89FSO7~(&dKa`7{Y^ogpvxLO%VHP`GzV`=6T&HmkYC$89rFQSG!wP#*yH{t*&CuL1q6uyb$g|l?f5K(fE1k@-H{keva9Q=gq>c z?V+IwbD|4Yebr5ScVo_5V}D-tz}i3#qW99JN{WNjXlNW1iCPElI0{#=BP{T1E;q&F zdv8RgEGkEq&^lCUAqi}^VQgtsd0Hn$WJhr*{$TS8Z$g5#dZxoNwY2r_=De4?`x2#XS{W|DZGeYZP{R&^3)&~Fi%o~9;yN!vz4gHVP})}w5yxSBSEX! z!AEBBs~Cp^iLD&+Xel6idBF!1dXR*$J*$$s{$`nsCvo3iDg7!OXvE^D@y}(OM`R?4 z>G9c=hB@uWovtb?a`I9N13ju}Qyc@o$rIXSPmcY>v#-KM09fnmrOS;+hvBs3u@F;s z==Lu;J#|%?J!Gsx#Fo=2>BP!08(`GDYf(9RF!joC7zfL%6wJKBHUdY68mAV7vRIa+T z&?#4E?@>xCrC#tEAaM(~kV1Kz{AE7K9 zWnNMrz=gJZ7fC#yq775rei2)!s(n?~w71|lP5Y4a7S}X*U!>IjBT1%=N@|8TAhYm{ z+5^LI)eUG7qe^HqWc9%huwLAW2a2g=CDA`IZxC=-@(~C|P&E1LJ9)cL{;B44=*YHe zAF54C7SLO}OjIlEfr&R&6?tG<*tQXR$LcFIzkcyh<9L2(P{;0e=<UUjah0QJFT#>l+UbK(y?wMkA6MSol6` zjBjrXc@>#68cF-Pi<;J!_C38wd~BMpm*75<;nMT3yX&h5&#(-AY6dFjaF1M13gHy& z&O79!>NFY~q875n^R{feKD(F}i-|Ni=o?&$+mkxBOJ6EOCjeIDb)5K;x; z62k%K?n*%YXO$ppLv7>jUF5n3C&2)l?{NXG9g#qAB@tlQ%=n!Bq1*>G=S>JwJ!^U) zUp%2Y?X*OyTAD6mnRQV^)64-a{jo6||J<=Z2vw$sPGsyL;t0SyJ;k*vJ<4l|d|9>C&4J zhAj03rO!6{3tERVN%s$}ni`FNS_m>;YgJ9z$qDVx2$5~tC<-DQ6?MQ^*b=unC}tEV zTj)D8clZo^jZwswLGtla_!mySRaQ8pB~wwUv6FJL_yq5TC9y_kh z75VZ1f~P?QoAbX3PMLKTxqHO^)Zr1|3E4-MI-`N(nx7pTn4KW5qZMkc)%o>?mCMPb=TR(yWsea}KnsNWRJ*j{Exl>5^s6wPdkarZko#zf&Mm*LS3JYZiH}pfIMqpz+(Mb`x!Kla}t%~ ziZ1cxdgxdolWxzXB8lwE&@8d%je+4E+)v9Qqn z0cb%e`X;7D64g;cb))JM*fWl_krc6`LZ@2&&_O7V*#V?1@P|c-SDCTVr*Ukk9sy{H z7C)@;CaS6B1VXSSXHrtavK;@@Bev#)3{nGl%7K+3njhztX5*64_*=Rgp{ewE?h`hX#A?F@x75K%mxkcb6h>%N0O# zzpvtYtApM1hbk4u7xcF@Nq`AwVF?in5c6X=zm46@(A z@tfoJTKGbv`zQr|^Ua;6cADqp zW}&=cAYCHWtv$FIpKM&0)1hiC^+K1zI%fhOTx7VPvab?HxM%1$NENGH>fskQon9{ z@o~4R$u{(H8F=iO02`&{Mce4vf3equiG$t;*&X#DbwXdAetn<0f$9kK60>%(e*KGM zgo@ce8scZ!EGe#Uo@Eh+EOo0WEAhX_bY02hTOU9-0z8dpU63VIW4 zvgy%LCcX;6v$#QXVV!3%kkdiO#qungP2+nUHHN>q8bO}4D_Dlz z`z!!J+J}80{HgOn5?N19?0T;*ylfCQ4M1d{D`~{Sv);bn`_ySKvjWJAN9idcR+Rz$ zn7V$N$RTamubzKj{k8A2?* z*#?rF=s0NPg3hmSi?%*-chXCB(&tilfjvro?jJ0*wW(d={6+Vh5B-D+1|S*}?E;pS zSK9@#rdLuc5x+nDog4G-b;+f`lwWDRjS(5TJfiBYg*kv*Ugp1~{qeU!2Y=CE-WcG| z`kk;31ybEOS*$C-bnmqFU0$Q;L}9Q#ypO+r_t!@Ug%5XE#Jq_me$QVtCxaY?>c*t7 zX`CB2C}=hQ^FO#yAe{|U36F>KBRNQWy3lo=68<08-j@fKTARH6^9~@i{Y?u{P-r(> z$NY~Rtyi+ZPuC3tijuT>0gyYVG=W*6?tgEZ|9L0YiYU9_rfhG3;ASS?yty+@i{J8n z#dgp@w)p>lyoWy;q5z8@OCo;5z_Syna9}yvrgNA^WcV1#X$lN8as)uJpY;Ip{4k){ zP8j+30TTb?g8B*{9$59UV&|#eYtaJ3-^w)1^upPi_mayr7&ByucAT=kkqT|$^#9={ zKiq}B@*5P`*XOkPe(ir&`*j1JHJb|lUr#89qP>DxYHK3lv8?nyscI^pd%IGezw&?Z zsN3+J(Kt4a8ekxSwchC1+Q`mnuvfI-f2P4|VqT)a0N{>IcHnOr9-9fe$p^_*9{ZsO z22v$3-@pD)phwc={?=l@{av10-UaKnN6|JXj>z}Fk} zGNS*PZP)lff=F%(dH`~y$KV3~;fKQnmaQh=|G)qnoy2~ohd z0&xA_2K@y8aRJ{L17CY9JpY?u$iLspo813;XobouvvLpLP6m z@uPK(&7KtF^5Xh|cdY4r+Mue|kyv|_z>b3_54<_spA?PokyF4>w2F(|rE&u_v2!G} zhhzUf;Cti_Kg_VkA#(i80@V2|=da`L}wWUoJ7 zSX1U6h5j+M)>&=L*{f;0GM0H`=aKr_B>$=_HUNLH7Y@Ek1i;Ibz+>2KoQyK2EuGJ@ zoag41_H*Pt)`k@<%Zpc{93#65HwzF+G;lm6 z2M$UPSN(#0Lx|7^sK4og@F0-(K`dj0?#y|PPDb48vow) zY_L#71*dHZq74d?4I7jgAopcH|zQl^!Qu^i*l0MU$T~?RQtw=lbN8V!gTji$8lvGIYYAstp_(@>V7W^xv2%%84~u8 zxuzj%^t(m)knsOn%zurZLhii?3zasy3yU10A?zFFx4vAJIBe|$%ML1Xh&HXlZE9v4 zWwn93Y`E_VOIIsc1{)vjF*x6@e!3k>&T!K2fOy`G11s*yE zjMDEzkKDXy&7}C*DbHr=3*~T!$pOqhN{bnNv$OPl`~T%1$FP9&Qk9#1k_lUWy?Ap~ z4eK$5bKlITHhg8{QkK|V)XjX%+D)JXjBc3aZ{tQTUC@LbIYDn@NbY}!6$7&@EZ^MA zutM7Sxml}uXJsSVBQXqflv1A!zrH}&Ri|+g4#u{$c|=x9&gi@0oJ8ZmM)|&EhXyTa zxs=~;mZ~MqE%s)J`y*57S#alcz`6_$-T(@2G7+wwkVOiA0n##vor!>Ijr6)}Q-0a2j1=R^Kc|yAm;?u)zweyMR)P_*P zscG5MmoGbD+t|Mf&PJ_uan`a0dd&$v*55dsW;%eS7|gJNt|p(20qn=up6%MTs9-1@ ziO1uQJVm&)>Oh+Fv(gQJM{+_+d#}$M4ba2-Y@T#(fnM4e2b^i091m8f$BN$xrhlRy zu-fo`soRp{SlYC>sBW^MeENNwC_~4RJQJqWiWKo#X0f=cir6l{VS9alV_Ea)c9@7T zp4*+%J@+O^?0DO3ns)}QyU$*8X*zyhlH)xidNnnvMq=bJofLgg237&a4176Qi|+#{ zXLn~)esw#>nYim(W5`H^7lWfQj=ZH!Wn!@YOZZ$LElDdkgQhSgKz%# z@Qx`9a7H`wgNmv=8Cm|zXf>#OqDSa(ICCTW=m0?19r7lAT7@BbuMoun7cR;1lNx68 z0+i#2>r0yzB)eHg#ZRWD<{opBc($}uaig*^v(*CgtE&TCKB;ZZhsse_xkFT9y&(JI zUd+!|4g=eIm&_mN*tlY=*0g!AndJ!*&Grj(m5+}IK{nv+-@QqodhF1@wTTRR)Sd&J2#pM zedl_=PDkriRny|-6~uk}@C!=x{o<_O>_)3ywMC5hF^sEA|N8EFteb1xCw9pz6185D zqvHo9sR3B``Ad~*3?*+y4Z;0Nx1le1V(LoLJT=Bbd-=J|y3v|fF8gPMUAO7Gi%OfL zI@$xjP)_6)fTwTLI?^CZ7(k5T3SQpNiQqXiM^xev%fJ4Bo7!VES)N;KIRxHKS9_gH zqxT+zMuxUCO|4|tZCjb#_+UeXi;b0AzogQl$@i+EYulMUgW)}ct&n|+#r1zo))^F6G$|{m ziViQj!5^En*gK~5e5?-1`u=x$1kHyLge=kqSD_Mj=6TUw(?y2T`1y3M`*mJxl|;nj z?+E?v+l~Wox`FguZmHG7Nc)xk4(({s8a+Yg!U)Ba1;5AIgHl#Q4c(?_K}Xt)>V5?g z3BfEalmkBUWvY2n+LA`CXCusxEj8vZfC)5q#o7KGgDj3M$Z-c!6qxjkYd`VKPltm& zvOzrw88kdNw;al+;7@)D+Ky9LF&sto&g>sg*eB}_H@t_tvVD2$X+Sxh3E#)A8K{C3 zS=l%1j>}BT&kiSPr86V246!y34CsX+cjsTb@o<7c62Ls{9jk^BT|O>&Yc1QZb3jRy zrLXaP54qo1kfbpe8hD_+I)WL{X6Q(M6!rLTZZ&rR@M>Bl^Q*jm`=vFTM~pcU4(~tr zCIvV53;UTrKwgm*agK!H-&4qnnXUsUqXC!=;^sR*wot z2+;QeLH(!IpDY#G8n|xeT{47uEz<$Z+ zjOXy?Vkzp5uG7KNEc42x-7tu#gipY#L2#HtU!X!|NXDqOCx(;ln?;FgkRpd8ASEc7 ztQ{S%6&K7vkNI)fATv=-JDIK4+C||Yd{|jRWR0}dYv0~TI|J3bWFD#JB4bwCX1Vm# zix;-+&@{xWOs=H3lCY6nIdoI#Te;kNhC&S6zU8?*h6D}jQ(r_QOXUHL$?1*@iETW_ z*-=kU)^ja*_R^1IwIEhe6EAl-tnvQ#&suDN=XVLub!S?yc0+|^HO5R=AK7^9(cKU0 zl<6wMW!{ldzD2`AR%w>!w*{JPK^3)!6{eVgu157n_c!jpV%-Okh&L_|ni*kcNewhE zChOa~!y2Qa8FhH!>RFtkoL?{2iNAGDwccI@Xn$}S?~ZsGVR3dcsyHMK#2O4rS@CMJ z;0O)|9T^nZT$MBiO*DZPuxa)%g&PeCOzvOi|pz>BpcFQVs%=$-wKHd2cBTZqebQ~BpUuDyEIc7UlT ziPOOG`QHDc<4?OMxNVcAZVfl^>G3Km$ME17>yzLLGX#V@U}-zI{d&hy5Q z6EjQvGW~@oFTPjaCCe3&uP|dEuGnKGEV1gzJeb2XYLjBZKH26Ad-V@rLwhE)Qi$oB z`=?$!n0`9AWNOM7Cn5RUvo~wjqbMi z@al+ymN;4j1+{Yz0`IOv{w#vq_Ds*=PC0*cMf8nE@uv2F(q<_F%LQn}4;x+FSza}D zA<0rT(zA09y6JXh^w3%gbMIlX0|W=h9ZplS96^d)<)=d}bjuo!FYWl}%*|2=zdG)V z7=LpC*>F9^tbFH!lJZs|JTV}_nO(BKsM&Wm%grFX2S4ccTkJ*(YoNl#^5(wR-7ao4 z+=>eoYzo-DhqdV-k}^gA+oH&a0AT}5Y9uWDvyFs_5fPQ&<#<%A0W>Uak|7;OfXU4) zvW`2#%`BlU>u{jq+Oh2k5v{F2N@tNv6oth2q4%iEW# zCLy9;$Am2TE?=ko$cd}!H%qkHM|n%KCwMU|mw$iertZ&535jl&nQS}kaN;uv;UW<#+xr$p7YeSvu&u28l(0})W|bLK)BF$=i~R8zWGiqTk}cHOgb|c%lEG? zRcOK$UDuA$WZ+92I6;Ha(y*34lcRgKAylMCcSHK`*g+5)!CSZ&d>F%)Cm>_J1o#63s{Rom?S zJ#Tv@21NfmH5@)FwV5QI``b@{%V!C~#P%5*{-I7m4(Kz*eyre7Y14AZ<0Fc z^H_y@iy0}MjQY9Fsj>`8(6uE)6+?Bqc6u679 zXq$(oNw!6RrKifYTuYJgtMRH&TTI$PO6_^#bdEZ(<+j^iSqsKe-TP}j4XSu~*dQ9J z^mqBO+^-LCXND{_if;2Y9~|W?9W8+hMt!l$X(G2!YP_ zyWYSbY-my1E7nCOw6Yecr8<8c(0(f?xMCWP$J+a|!38~}5;h&a?{Sd?&HCFtg;MwQ zTpbVIhv*s1!wck!Avg~=aW`Tng1@bd@#J!SNZ^UdLGk|#UB z@Wi;F+hNLR($?b7Rj7iP&jXIpz}vSat`rMRT=;zax;w=Hjti57HGY%U*`m#9DnOI` ztVKpg>@0+J+Jl)Tc)wwbQWl$rNvolgFnX|9eEL3j5Q$oIwK`(wp#~?^@*p4pbsZUz z@~$I3jH7sw6TVO4G*KMQ&hZOk^%*5RhD=~3*Xq05Tt8VzQ}w z5}!9l1RSTF>&~QR%$r@&( zMvR*|edkI`Ze?@xjGT-%MKS>%v7W^+>}0UA$Iya!?H2_n2qOA*l3s^9C7t{`RA_U% z;Bz8Y@^Y$q$Ub`n2`4yEm0kR+wHeJba zJ{oai<3&DN$M}&Vo35g}&EGFs#cjZY|Wg8WYz8GtvkMR zue@H`Ct?_mzqv=Ja{thq|A;gP6@n*m!&HC=T8ZdwkBH!Cb<%H=;9S!Otg_!B84g`a zCt*`-9@W~rvo+LFTnB@+L}fwck_y9>gh6{nyR*$|wz2@iu-(rw`;W1qOgdSy#oKO_ zIM1M6$X#?(ui%Zlw z@P5)m8-v3qmBo5^XFUy03J4nr?h%P0IY^nI@2uz~EqhGA5UZzk3+B3r?Z?m!J*!lT zO0-_a95!D4tKp`Daqp^N-3slh+-O6T6s)&ZQQsK3%vJ27Pk^@JoYujCKy^DZy|%kh zYmcs+iTUf5jaB2oGwN@gIz6({o&x&Jhs7CN=3hSwOU&q7@P3~m321dZ6{Ow8PN5UN z{RPgS?L2P)(BG+>C7A@72|XEX@EbbCSWCtnbA(f48Bpr?8mcFA8d%vh)oBrSaeR+Z z`H}gvt7*^H4Uhneb%hlnL$C~8H|D#!eV3gMagr+QyjorA0^vY~y!GqMO z6E#+WmH`|J(auWXrEa3Nt!{q*O*?30fFESHx^A<1a8232twAJ$-@2a8q?lsD!7#Jd4a-e)d474A!w$KzN&bx8s_9!&2sT0(z9oTHK1-=J$sR1 zW2Vi1?zl7Iv}NnQXfF!r@dK;Ou2=h$SES(miWh~=$b!LhwMeISd$^3g9#6Vokm450 zwQ*$QDA(WWg{lpJy(~|5D$>_6tWx$o>nKH21k2J(y?-e(ajE@6!HoGz5-|-V87V(8 zq9xVbDapzm4vTr}4&mW5Gz4@K2**tjUH=Av}95kya~ z9oIROynjjj6?D+fHi^{5W`K)LWmQ-kly+*VwFw9MoM@mt;?d_P-5rt$92~MKHtygK z>e;}9I@Wto`1ew5HYm|J(|2$#{R6O6h+`*sh6)E6z`jB%`EcTqxTnn({C3K> zAVo>(CGptMFcw!xR>e<&>DyIBt-5F@lc3d-13Zfex(?3#L)HctwG_vi)zT3QMYVrA zXnX&7yuVtC4^0q)93V72N+W8J(F^YPAH{o;UG*i)Ykr(5Cf+UYWN_npe71ZeZF5Q^ z9jV1i(5xv4kCnuHmEs9u@{Gb-T<-9?$CBaD+cFe0|M0;gflJl*Rm!lW4lcHMLcta= zuxDh8*0s#|UR><6pv*Y`hAeDa|Hn@~(w18NEY%rL$VVLtVB|MzY43JlpsU%8HmuO@ z#E9hfWF97Vv1#BGHm88fp_Oo!Yq(HbB#YyP&35U>Dg#;pl;UzK>D8%DfDGQ;j>fs~ zcJG8!>dpm4C*iu7CP?wI-k(haS&rVj%sw1dm!_X^6&p-L#0k(#;g<%tGHj7wn@t8l zmzRd`t`A-eLYC=|d7?$|_{{at}rlO0HX$~O$^XDl43SrhRjkj;y7b*QP(yA6y@MfU@MV@ zVm0(A(37y9WHaj?VgUZSh(_4yN6f1GHW7utA}}RA`(wKee3RVkrQ;cwV88^y$!K85 zv&|NYs=NQk-g`#F*{<)yi6}uxLXbp|DA9-LokU0wy#=F3?>%}%5IsfjA&4>~`ixPd z_voF8HW_Q?EUQh{J#Cayld?bW?5_Qx$di9=XIXPah#*GyNeG~=;Em=Pd5-7 ziE`=|O}aA6bFftXg^e}_QsURftwY*rCRe&^-O@ar&N>%?xc61b={W!+rahTH=CWvp ztDs#wS$3O$a>{kh#~3;Oxf=G_XvJiAUR^m}l(VvhxW~o;T4(Gmdtp6~yfv{~V5$s! zWGS1HtWoxC7uIam-L1o^1Z<>tls>1 zRJrKhdDyGcIOXA@^JCu->(>`nW)}XKg%>1|w4Ls?IA(O6aE*Nd5oV&ZJmY8P>4ufQ znv7V7OPZ4UvA>7sq8!JY(2dPFJiT#|HZ6*!-6$;$SGj}m*(|xP;{)QI?!!>^$-Ic0 zVB?%A)?MF)@plX8%LuhE)2x1KNURG)VI2q_*x&H(&K3eToUkne7vHacSGLZN0@l}1 zG2}zs-u1IY3K|LQZ0B8;6ChpP3n1ed8t|RjUOHTyaOi$SU1`%d+ib2*9iQLeK5;)k z9iku4Rge^IXUnx$8W*{ob#+3hfJa-xz94#CnG(b|K(_#Amf4Lg>z(7h&gN^KxxV7mbk9|#*PcF}=n1PJay6Ke+v?pF<2W-e&a;@a zwSzZPnrB7k?;C)2llHn+rnWzp3u%lGhJB5eJo0csSBvr_X-IkD_4dZ@idCQXhypM@ z5%XLc50*n3^|6DYezVRlKqcPJ^pvx7?&%rGdE|Knz{CjuyArO74{)1mwvZ1$H+lU8 zac|f_r#z|5I39lrr*I94Y}r0k6*5iYKcAsx(SvBS-y(COs?uW}IKZ?PvU66-fazh^yBu z0#{xJFT8vA&m!b1*0|2ZU;nt!Ji_>ZQ=PzDItjq3)WnbeD>s}}IJ<}w1s z18m+?!1haUMl^7t&Y#=)n4zs-it=YG> z1riF}8MvbJf4z(rXKlQ^5)`&GXX;2Z3p9<l(a1x&Add)T7rVD?*{(B963etq;x+;P6Dke{=T=R6QY!h{A z#rO)@Q!nz6{mtio?)a^@md`NnzMV4+XmE{}TPN}le}N|fyJ)8@GT-3LbMI{DgJBm} z9jLp2Bj_LA_(cN8&3c5#W*DOGd+E8Nw=9xvD-^qx%M>uA6?*mKkALWZuOD6&!iNnC zKw@eDHYApQ^|QV)JT~U$!e0ozzd$P44=*S{T~QYU*!8D94{{RpBu{o-rWaT*|IJSN zIl-i?u0OYc@Q1d?_Q%|H%*GY55`6B!#3bwaUo8fntPf+aIg&!kN4sUFQDP6t3z+WH z%NqaFliydu_JF#U;qQskkl<*CDy<%b`*ZZYf57oyPhY^@X8LMSQ@+C~50?j2D)Mow z^9zCX7wc;E-@ux8q<}%B)rv_Egq~Ncj+#S%YgZGKMUizghC<1!v3y z0flT{CZIe=rAcBOZIc%o|0?3oVvo6V-VHgp0qitAq254=5fMaxzWb}C0Uyo(+9R~8{{uJluwvK!ZXzH7b-@ub z@M`xsy3np-)W<~~$X>;V5KQ+wPCQ!9rR%N$lA2i$<;TJ{%sS%|vA%n;N5=(4oI876 z0L$9idIYMkA_@ddKIQ4d)&~2)NSYPwKNqa=MfH@FT%wq=>+6_P zD@{R+f=pz5|8ut6?5ID7)EB?=t+MOludVV!Mg86el>xUl*vV|x30m4CveQWZ6nBzhE*5*@U6DOD%t^~og)))nwiL2eR-h4c*G%gWi{@53GRRY^%wfjJJ z_a4N$KvC^+(h(r)+{lJlt0)kj{o2?S%A9s>(QLvKlj_lxBJ9#VlkSxdelLETpNF$Y zXo}CiT07{`5P-J0Q?>;N=m)R8Z3I%+m?!azHuFu}6md(=qym|pCnIQi1z4aM)S?dW z4#c{@N_mG?1rLuudJalQ7n8*8mvYodcc$_%@4U}J%vp0g39j&9U#`QA-bRb< zO0iz$M~_Y)_WF0Nc}(i8!?BI~6F_N`jb&mOTfj;d(LqyrnP?g_OX!2(e6VLVfs4~# z;dxL@Nlc5{QnmUYlP|y(fT(6SQvW(-S5=$8=3#T6? z&knP0@x6S?^6dz9a!TBTwp5aU=cQ}L`1-Go-L`e_XwR90)0Bj%ms$Xd*GlQ{Azlew z#8uFE*9Yeby%JCxUDZ}`aHmB@KP~v_nJ*?mx{zO{cDb}yf|paTyqzWO@w=yJiSC<5 zOr-gbkU$WmvfHk5#+cTBHZa(FHweo6Eb+bid5fDOh-I za52ZcJLwime!)odx>d>62a2Ir0f8l{q}!FHS}EH4{7tr1?e~Q{nZ)7wU&CvuH(+*9 zijx(ln#s%b9vb!jUVtL zKYcf~{EfKFe6Leac@5Dk;pubMW6W+eX0LAYWXBLoR9Z3Z z#m;NjF~2Ef--v#cGEzM#oVqrQcx zQo9AvSj`hI24a3mrt2_Z>Yr|Dn%A-6LZR{QT&*gm92&@#@$6C^i)@umG*?}ML+xpc zkD0!1`e~~q4wa6_slWZ?=@`zyC-JRmSJNaf(91v2|EI zqxD)0EkpP-Ch>4N4)Z{HijzXN)!nn3{*8}IjT&%V03}_@y(ztmna;TiRvZiXPw@X3 zT}(h?3-(m2-v(JfC9H(R5rCN_5|Ve~X3LScBm)_8U$E=$`ZRwMp6e>@EEjd}Vkrf% z6tSHiw*dch$+XDwy!k_3YyU%j?PNeuet@6aHvQ$SoXf)Zf)! zn@H)0E>3Z(#M3?lu2nDvM}ze+h9a8xeS1L4PaBamf8zwIkzl2jEV%ZD#A0i@A5lk3 z=qrn~p?;sEI_xc-jR+diaof_pVPnMjR#0+juL$oUOE1ecm%HKjbVy~?$pYAQM`*NB zzAG))D))pb0QJqJ9}GD0NZH{b*B{@4KE>*P!Wl`?vnQP-%bGJRHVFd2``85I#*mOl z@rC!?>SA^kT;-TpKiA z|8>_Gk^(xBX6cW;8c#Ue7g1;YsD6EtbgjEd&g{_y>oEVO51KxKlY-6hXm7CgI8=eA z7g1l0nf7kT^IosYO1Vs_&73aA%*I+ff(*E1QgDUFJyP`wd-fNbFNJkrsU0OMeM=>= zABUp?F-NoEJBmdTW{-jjADK@JiMwjHXH7_ngwrEyXmu63PF^*FQX zsoTuVc=qu=K@y~LoUJTa8rcyFLbU@?;V!Va?Ms!MQJi@6%dOew#eRG7Z{D@qy|jyf z0 z6Z!hA_5cE0XxJFCY~y>M%7r|lG$ zT`+Xu0F_5oTZ-d}Q?bktAg^CXmm<2t^@P&QthyIMJU@miBOULeu;{9MeOr86WG7d0 zjcic0ev8r~RtLeV)%%7T8hxrlN>p^R?03NDAnAxV%zODaVQ9mn>hbpXkKIUi!7k7Yb# zlCuKDZK7xW^hcLxjV!BBt{ctS3!U-FRD~~(Kj<(0P{$N@#@Ybhyl&|5?9*OviH8m6 zIi`_QTf-9@H*2_t4ZI<**47bgar`aMW$3$0d(Rqu>MT{c=kWn1w1nXLZOSnhWw9d< z08_t_3?!48wA>2pt{nZ=vSC98V?#KfY0+;{0fZ0%AdNujhJ-gkZp=YC79+U>?1Vbk zR|@NE#)`-*{RjJ6amT!hraW8iii`JN=zDGW9uO>4Oga<0sC>L{tnPfz&>TKexjqM{ z^nJIt+`F$cX_-Z`HhU2hyqC4gXED3k;y=w@q&s5C1i6cdu91bawn7-GPX&$i(c5g= zFW_2rb|@!+SyMvHVe1ejvhoEa!zRiZo*OIQB99T0vnUV-We8nITM1(N_EiaalSC68&VRwLuEjam!{ z3B<>X#)tT^RMiDd%gTt=4_Z12C zk&Lu$5S3+8g#73pJEVm1n`O{f*gRr_73LPQzxKL*T<}@K%^UGS>J_n9h$&!Yj||gm z%^H^XwzzGVG-XPA!Vt2O%5;y;^BTM>ho2^NQKBqYVOxt(#0F`HV!DX%4bAy+7OE%N zKKl5+PLBgHOognhJ+YJ0u@#j&>Og4HNb-0fM;{{vWJMA7=viG_-aI_s5=;IBB)*tG>>YN57t{3WL`wo8{90-Opy#G;U6VeKG|AGN$riBMmLZ~Y(AAthUX6(D} z8)NLYvY=w!A3So~sIU1Pu*sJvc=Yy3!i|H`h35I*IeO7)4dOpm4)vy(N5HAwhyWi1)QO$JWcE)T2Kgi8)FnHtZ?;|LVH>V%m$s| z*S<;;4nwQ7RO>lB5ck?Ah3tKM#O^VF+jrp|JI`ab)NL%ucRGr7e`JHo$^OK5ac<=H zSwMI;mUOezQZgAYA2BSXJ@zO~c~8@_25hQ|pqT135q`0mA@xktQxaZ5aY=1qKJ#`w zU%WPe*rxJiUDH&WYf+;8by{Z7<0y-r!8~WTTG`m*MvfYzh5Gr5KM8Cfnc5hTP^w%G zD7avWr|}TUc0u>Hrk3WpXzy`nMLN-S*nPz`6Z=$AF3ZJZCsQ1DqUJXrsM9}atq(jg z1G8x$Kg2#R5LkP3QF>NSl>K=2D?8qQr4L}5**&5%Oeoc>APPerf8E!=7cIJfb0FI#k>VZJDLJmbyh}Hy zOtR86_(GPqdYm?(TdRA` zWUuH+(9KdpaG2dtHagoFUJzoCIB+~_d8C2! z;e2POt_X_3)a25|XOyAIp5IdZ{qye;8YPW1hg0emuPmroiY_;icgs+v84izi7>?w& zFcd3miTHJBBR}xZy$SvJEuZX5bmhZAvs-b>&L&TxD3!H&21l#wS9NsRLhGI{Rr#oD zU)8M+EDJgtTxaUMo9FUR-YdT2lU9#R={0M^zQ)s37>l%6hY~G z6Wd%x-@$1KkZ>-`FnLHoAd|s8;zg?rdLUJ4=*>Is;zr+%7z8=8Lj@x$$d@$Zmu=_vjDcnj}5uXKH>TTJBlutXYd$$4II4hd|y0Qnwh=>H28^gj*n&NOC9s zl`Ac*f=Yy5$D{6YkKW>5&t@Q%oZMX>YD^^pFXCT?^9Z?92CaW~2+fWaWNNFr!R#o< z{SJuhX>UcClU;HF9`hx_FYk8dW@r--JbXYBTB2ds;^deS#aUPIo5H?FefI%A-f&as^lIP6zbZjzaiaV7(D zY9dk1AjImOC`0I*OZcQj%|%N)*C}94-Z2#9fLeX0(ao?q0V5=mTQ-9_nIoph)=5^V z*DGXMKj$;N%ITO-n{TwrCK3Q{gmBFtag?Ruf3)Ku`wW85_iq{u`?|c23|Yc`7-}5| zy-9W^n$s*F41U1!0$E7|E<1hSS$!sjJo}#MPwch3LPHci*AgKhs^cL`_G^Da9z4z9 zq1VoPtCs|LX&{(SSH=;vt|^7;}N|&A8oA3M=%@_l`AJ&NfRd*m5!Q!5PPl-yRS+DJoe3)G3{ zx+NP0iTMsryP?)RNwCB=HA6opcV1r=8cKE>c!s%Hs|2qK#J@@iIm(=akOmYu)SU&q z$_qGt&^lR9A(^iq;u0ABGL%BFH)&Z3k5ZtdO!c-?H}}97)yB;f_j!>jqlVY7Fx=>1 zl!Upj>eB)i-R8So`f>Lp6#=kc97OG>FKcOS=A9p8{<>e;(9|O({ z7g73O-tU6^dZx~VQ1X@#f2$~imlA*~YJlTF=cua(xH{^W}biqW+l(iI(>E!AAY(=KQgD{b*ZMK$%7V9%98BMp$X_$M(# zq!+T*yMyow8+rT(BJlCG4SUJ18-}vWWEf*N+pdsUuE(;?SB45m1JDfAbGC^TL?d4x zG|z(P`9Pt$w-Fz>GQ>d)>kA58!@4+9h5Ctq`@rZU?j2 z$JB;xp?-`g>o$}mDlR!N=$>=zh%nxjjd6&#N>}vQeQgD15i{~Ijdl-$9lXagA5>mj zRY;+&r5h&96hD7{HBW5^Gyb{}t~k3==s7r@(3jQGy!v5iOpZBTho)m;osC+XO|QzV z2V&ka$fJ;nSci4v*BFXcD8m)C1>{t`w>Oj%neFN^Uu5FPqpIrJdPDCuUxTrQ%^b94 zMJF1M#6ZTEXD$KI2h^j3hv04f!lFAoqpPI{(+gqCzM{J;#t;28-nyR;JCUyhF(kL4 zIg+WprVvl!*ihmwtjF}sy=>X5-C-RLwM!kuVRu^3mps`FwDOW4s|#w?vsvz;GQ3gN zeD%+=m%kg@rfEGJw+IA7Qboys`Koj@@t}1yr`j%7BVD5mXU*D-j_t7v^X9qunzzSq zN*vdR=CB8Y&CbpS)gSH;vVQCcrD!IDf$K^nJWnG9BRDh}U&0wb%_SL2H{NZ~-GZ1E z2&}sXO)Rwnc1F>-W(#P|U=fsibsva7;;*atd15d5cYF?AkXSk`u{UH-r|~x#Kyn*W zHR}lW3Y=f42m4NzjUq15(2Y@~awPEDJNXskW&-ID}4Jtr#Y2 zAo?wpXd=$c2#O~*SsKq5Y%+TNci!3n%?K$8dTrJ)y>AgffU7(BR|u~VNINcYKu?fC zAJB;nKmH_e)+NWBip&qz<3y}$Eqp`=gN05sYy&zS^tAMSSLMT%aJPbpLS06#aJi)K zqF+o%R!SqB?bZ8A98OyC87iTnpl0~l(&U#u)9#vsDXJTtBb)d3aLQmRQ;!s>SUV+lG9P_#xzF^BU)}2OJz2{P2mTnUMp4-W_>xl*q<<|^5MGGH zR?~K7%q5lg^$Q45xCp%Bm^;C3Ao7+I7`Qs@egaVmWP1&hF^@c%t}}dj?;2b~%RX4Q zzm`o+V{%&(nsq8S`JpX)a3v*97Ai}@G(+)pSg53x>KPK4^$$P_+PJv(0Vam zVWXvhfyJ9GKep|C&^Ox{(l#K0#JftYq%8nRN$q1^6)*-k4!Vsbk!V+q;w9ppd=!f4C1Exx_``C?<9W!dg{!={n^- zCe{1tpI>eC)C_I#CO=nKO%Aguf0>QdNlj>>~e-AK5Txxhd-Asynf+r@lRApz)+-AN}FZ}u%!~5kJgRfqeX)U z4I-vW(y|+8GIv|sv{`lDM}+JFgH&cRZe~L3`Hq3teyHqDGq2JDgQM^POe&+)Cq?Cl;5?#EYqz+_i;UFZ<(3R@ z2eg7Yo`Gq`x|<3%I6bE{I5Vf&Srej2fa9rq@ zW(`-4T0|Kg@z)q!kbf72c)FjCzht_X(i?y#(7RUSv)|zj&-ACB_QTk@G{{g9Ovz5& z(x12tJ}dnC8{FE)hztj3gY2#5FM|_zv^Adqu11-P4{OjUoOp2S?)6-az7=O6t`OW8 zW-U+T3^hW5DCiwodxj0M?u9nr=8sjdK(5K|M2mFt7|z`}p;Z=h6d|>d?(9bez$W7l zx#ROrw5w{5Q!LZTD>K#y&8^cnX$wJS4l^9LC5xHq8qs7QvoSxs-?Q+&yeJk4$j<0^ zB)DN3Fm!L?l#jW!$6w4T;@Bh?#yY0}B+TvR0b7^-atdK(LssW}cHlAuOX45I)!~GgUP~p_8o-3zG>rk z_sB9G>hodkJWgbv!2RIMEP-9LO{0f&8s8TFfDvNtwMnh!;F|kT!l+V`B_JiCX=gQt~Fd%vep>G#z+DunzKV8b26;(|Q5P`dRGA60_OW5+A3 z>~vF{s%%7xCs;vY_e9)(0!EDZ8ScJUSl(oaVmV@e0w7Pqeg}<)6p`DK&Su8E2g7>L zQbpWKoOe14-qG3QUIPSm59wBpzYkd3_C5Gw2>G&D=$QHN-H_5JIlCF5og8l27$LBq zr~d$uv}fD8pX!>%y=2We9CJe-r+5{km9ebkqt5qF%;BOg$x!Owtx-3?J7Mf|KHb-5@xicJ8$)rl9i(`dKwV z;USvo=HBX*XoUKGg#-g}gark~2|8k3ySzSIlbBz-Csl*4zY$u?W*R%FZQwiPik{}Z znY2Fket6Frvqp{Ey8a}NlgvnewW`+FE4h(WyeUSnogbxT9_c+z<#(JXJOgshFgmN+*IVm|=eLf54;_zOw^+^-zTsVYO$s10n!fu^ z-@uea`*QQ1%bFWQzd$7`x)BIEE`SB8dL$i7wjx|R4b7KI_HkXOymSJ5+ z?Q!RD&MrX2`&JXOpUrTc0ZjM0l@^Fw1Ju4tf+Yu%FJ~tGv4O@ubiQ56r48Q%^IzOy zid3|ZCVQUdwX*E^g-ZWr?U;RbtZc&a+F+sT0H5{X_s$3ACnV(O0pVMV%YkPsh)0tx z0ATmU(DtfiTIXBo@`GuA0d3t8mV@HLu;YX{&9|j~s?!Vcd%R)qV>yASzSqN(JX=)O zkrgp`rEboX3fIZKtSEL`$!@yK_65B9U>ajSygIfBh>s2CaSI11+#*tvbQ|AArr-vH zhP}*y96Y3j(*~u(ZyJO3$%!t5Dq@IQ3Bese7WfTCltNW-1VxPKjF3wH20+3C5jvkU z#lO!3nmpFE<#D!#W10-<-e*U5CF0NuTg~&bmF@Sb5rvLu|G{u&1?o#m2RQ&E%(l8* z57k40_o(@P)#mFRo^^P(LVQw#HuzSHSV6Ap?5c+EdhGr)8I+d z-OemXg6X|408sc92in8nAQIRK&%k{BrPtDgk*6R82bfRl+jw#78RyGsD$ohv+hPH` z?4=x{zMzXGd&|ci9-_vxj5bW3-p{L3BOm5y3ZDmhj?O8wuknb1I8AHF2yMCS&Wwlk zW6C!K0M|*!JTdI_8GDjwIDKl-Q>5ChP)~yA@BKq)Jh;i)c5Rmo+}l0gYZ$l@0bTvm z{kAd?s#1#=zg?ctyl{yiXoGyJ^Ze>?{a<)L=CP#)ToN9Pbl z0APIeClF5Nw=cwv9OiS7jF!D32*e!qg1V-$$V&s~P7CKVYxTaXwPCl_9NnmvPR>K< zHJE+kpb%R(ii8tun|3m)R#v@K@R9Q9=rdrFlz(QmK4mNbe2+PA?1SLr0|Ye^(3Rot zvKVS%>oWMp8G*AVrw26iYO!U|$~BmQk5lcknf{tHlkeNAgr~LjOz#&~j2opd-q)j6 zMaSC$DTy#z!Xgrt(ikb%0l(TjfR-xZn`+X`NXVrcE?Wn!`-_TQgiqBede)Aq^9akKU&WU8mPhf4@ROJI#X{uIDMs ze9ftwVmw|Zl6$9a35Tf9-PN!Tv+k|6>BV1ApMCQs3AF&7BC{?fIMd&F*gV{v~8Vdy{N)I;lICQB-{@s!7i}rKfJ^)`7bIuiaJ->^d z1M_rfaG`m?6|rDcF2>5sxwQV#J$ub2@TN~eil{?6Ra3hhn`3)NfoG|?v0w=_(Q}}% zKBxcyD z20hK4T#Po?XLK7SRXkP76sD^noAz0~rlsoji3&Ttc$dBEgIdhmN}TEI`#ud_<#3~b za_*IrR1ZC232Sxw#Mx&kRi(+n_iDpTDC^ekelzBPwaB?C%ylP9}7$^oLO zZ-{-tEjr%|)u^=E8#D~+}Cm)+A(;l_H`FXF;0IC?svMU zUAml;jPIfndqRB6qC({7n&}iET|m#gQ(h%%n9;lE7O88K(9O{a$kU_pzX|_=+4_b2 z+@AoD{{Miw{{!k?{vWFO=N;gGK;8cz0Ckm3V%;h^`x#5yfp!r2rf3>4BkYuF@3JnUd4s1#sO=9!rluG4VTc zv+|KfTCiKj%j3)<|J`AK{ zQ1QFQ|K1Z{9^sS^KW{9o(b8$lWm^r-5yznbbdzHM)dL1c&+q-y%^%q9W_JlL{yC-o zl)(~lAxl_+AHCm44l&BFKSE5*i{W5!2R^A_`WWhxHZea`pouU-ImdY zTc(?N<&E`7zDyT{zO=g%o`Mtqx-0oMf8p1mYs65WP&Ly39Oa*{zN?8Nj0cq_6?P4x zCMjSMA+!DO?l^CteCT->0ytNlVnER;0mu??nHZ{;#NUGRUrjk5hHGSPtd0=)@HqRv z<>2>02;x4D_S=Zx@%q2ueZP7xK#l&LI@0>P5r$&I9%09A0KTDMHGY%$zgl@dPhK$q z&x$N%1ZocgW*!URS$y})v-n@tU|~9MndQ49@!M<0If}`;@^cpdNF%#|Qy?4wO!c{% z+sQ!81+v%%^ZC!=`8l&$?xw3xCI$2NwUe zV*XZT#`^%D<{zm&zq%jb6R~#VoBp#}^V4U53*aA#Nar<;Ap`CWYDpyj$5_2f0LuPH zCeV3Jfo{AE&gP?z_`9;=pO)W!0F?cYyruIeROEq;MBYl^{}@au6QJyWWLceW(it%T zTPe2RmYedQN23WS`yXje=kr9072sB}Wse#CA447rpwIu2lyyEi{|xkdQx0~m!ZxS4 zf2k~Ico7bMQO)uqklOgO?g4r!(8!m^*URN!I^VqgXWHrz-gP&VX(aqp`TVy)H&X;a zo|VGiK)@)r0cN+qmYUQ(T<1_7cn?KCmD^u)OG*Ifov}6zX}^@06!E#A%sPvLjR*cx zzAm^IU^}Y`_5)8_zl~4`{=KL&h=7J^-kW4pe1gZ09i8#8YgvH1{A*E5X_CJshn$G{ z8=8jFYVRFd{@MHkhQfjR5q5cK6gs!|L4sGdok>K~dEhH9|Jpd-N=anK7`CiEsf^0$ z^l1IJQj5=(>cU?HtR0%fOcYrbK*5PPi0r@m;@X!R7Yz%YF%)(~pV-y&dh)LPHTTc+ zyV6Vbi|xv=Rc<(xuGvJagOld6x&Cz9UF2nx`7*BcQGSw$pU z`E@-!2jd4MFg@%H)jg~AazARO#^d}$k^EW(tm*t5(fUxGm1#oY`!|rFbbmAG9m$dE zowIM&x0$AXyF-9kb3(ZK-AG{ob)w!Kw3Jof3Yg4h(0(ZgXj8dy^ENdDo= zU<%*&3U;E#op$UKbbEhI;-6(*QZ-g+RYHD%{e)MozW!vMH^uy?H!lQeMoMwA+fKRr za-D-LRpvh=EMRkO9wBvy%;UCx_oV!}f_R6^Tob1o`VHg@j41$pgWd7{U+bbiUmm$1 z3xs3ecj{Bo$1^lg{yy0PI?frMS88xpL;^ZztPSJzfMk6@N`r2|(`TZz=BjuRqW-U4 zECxRSZd#Qr;CWWqB`cOsB7q0(0%fp@^78W&8Q@~D_?;dWbIIZS^hwAK?G zAZmjpxNrfB&7dmJ{3H6bIz=#fqxnb6)+a$#T#Dj*TB~8=`_E0n&muWn48ArQm^!?z zF|{34uvO)|?HyiYLPbGA@mQ4=r@sL!QQ*?mBRVoYftZLt5Im}eBWk9`YBr{6)?L(f z0j8B~UD8bFn^5v+=|jf})b=+mshlP*2b@cFXjJZ>73!2moSAneoF9_rfbf8|yXbL= z>|oFR%XeR0zyUs_1aQ}@RxJmh`l17X?`ELN&1L|YDu>EXMlxl>4|}J3Tdup@`2Fj*4Sz;V0{(TCrub<*bV5@6giM%csM`BpjyVhF`RJUpY#w+eM zsq>F7@%5?kcdW)M9ny(my5-if#1)reD{fk&T<4Xzfa3zaa8Wz5*RJ@L0;F>HNxkMA zdE@%8pGw`;yod+3ZpfA_N75tZW8;CD@Uz@{^1H}wsr=KgD)sz;HZm+cQ`d3LX)<<5 z;>OC8KX1;z@7UV>WM0>?+-e~BRe^QCM#Rr{U%(-|i_G7`WW>C(*`9AV@0}wUJZu}K z`Bk0Eci*MeuTjz1i?}Xb^H60(%J;@5{izOzR6T`z-nbeLG`~;n!S5fuc>3xUyEJp4 z^uwQT&bW2?n&Z$RBNZ`>?<}s0rT=w73v=+)z2BXlk3$6GYpZTSX5wk*muUq5uF}Pe zIM567{8ZN+HPz#*uK#N0U3MH?u#PbK9fDu4>Xv@>s-OiWe8Um2$UB#m1G@&!3xD%h z;hMN7uL=PEo~%EoZdu!@RjU_}3*h%auu%Z5*7(}BMhNS`HFf(vI+h=RS14|mIeq^% z(0n>TMQ*2y03U&+qJM@k92g?~(J|KF)zCVxMs6DLfm{X1)jxY@g(HI@EnUwpU4y9~4TSs{d3U+MWX;u#>IK5oVqxxcHU z_p>@)|BtHUKy-0LO`M8HVSm+l8v7#QXg%ZsUXYfdmDNr>=ZHd~%ih}6WQRJs)aNaM z{dlCu6T{}kK~!ya+^(aQi*f#v6q5E6IpxC38;GO|yNUTU|CQ#3#iNnwUU7R;4=p`o zxzY)D>SUc#KYpy&68x~n+R%e@`eRVfd+{hWd*SqKmIcFHN|Xq8x&ZSmH)^dLG+}Ew ztOKd^Tv&_l_KmK60pOn1rc#H$?)RjMBxF+>w(UO8usPZ2qB8WiXi-klXk;+bF24qJ zZ4eLU*BrTUkzK0ZjLF(HdFYh_)vEuvquYSlx>AlzIHho8BCkbav7|Ehf!OxzqXSc} zg}4b(f)h7|Qp|qxXvo%DDAlB`f&anBrjo_|j19xeFr0+Q+tsVSQHUlV-tAWW&M7?Q z>EW5O(AOM#vZs8*CwrSab0OiJe0A`>4|A*PTWcRx>)ob@4jAKNkBt%yh?8wFbkjzA z$s67slM+jb_?@L+r8Wl0uZ}S}vE&-MBRsbvug;m3zf`6ac@lbwef#d(j?N1Yk~BZ$ z_`qzhParc9$4vo~m@({|PNG6pM6R_H)aJnYgZFLM)ShlIJV-L~Xl&JI2@Y>n-0Jy8 z?9O{!X+AX+#|zP`wB95$?@FU{zd*qJ=G^sOS~aYs@O4b?kcwL;~|k~`ao#gu~e zlBU3%{lH8oLK*|JR%H5bx1*98qp9WnBxq?~jbBb24;us_;P-sn2R!o&DGMCHnE=nC6!S~u^UP1&>|`vJssxdMzLu>AFzXu!`|atmzt(+2c(!GBx^?Tx zy1jsb#X@z6=ZU9aQsp$3V`B#xM!rk$Nk(+rQ`~lGJ)`G}Q8PSL1|EjfDD++vz4#*U zNV@mu(njTIm7Ys;CRo~D#ojv8Ai`yRI)+X6$n_NB&r{GR!yco8r^*$k zjv9Nb?Ljp^0@SoEY9fHyX}0>tQRr8DyM6$BA-_a}4iQXA$kL*8sKtg6_Xe1?Uab#V zR|)nh?2{U`hfCfLN9Rw_Mp5yX6F)m}(?FliRi>DZmnVx~uC7b>kUZMz|YT{h~BRu#siNR;U^jTHfMJyZ}?l^S5Ayu{51F(%0!7a zwim*-;Y-GJB|I4T(uruf>cdNf;n%5OL>EzFxsh1!tV^dDE3ECNa8Wz45i~#(D9(y@z*P)uB2C2NF^j^ zPA;lDZJ6}Js>6p(D@Y^VvL8;R7H@UeV14_pP@mQ4H?v$Ba~~*oj&`${|MGwYUrk$F zAJIiCYBRd2s;bY$Si!z4#haE#tp6!908=@NjV6XC9v2^R_izOe-+8hl9QsRWE#+G@a@TTB~}W zSY^jKSYyK%KK^1yX3!2@bI&k|D)Zr7>zUYt;#yh`03%wup_I%VH$$WN+K3>#=OFEh zqE80Y2oSQ*H;-NLO3bD*94!ks&%JXt72Vgo>$o#aAl*IDXZU4a^ogE*nXFij&IS`@ zs37KnSCQoQD)nL?YWj+%X@__^(UtuvPnS~5=%a42RMD1o(-uYU-kRfDPvlm7B>ple z_b@;~_@XxDG;wp!-b}@eP({i8tju*^cUd*Wv&tmM*OAOAmvFx8Z)%C9vr+U@-Lf`C zFSHEYol0N28ZH_}9ya)FE^9AlS4wo$TwSZU&1D~r#h9L`kTb%jNSRM~gl`9;84&sK zxdsat&g&lI)~7E8^f2>pmqd4amX1{Vo`?BQxigfWC9x;i>x(4Eaq1-%t3E$+8q7Xv z@lNNSYr&F@cNNyP5QRQCydhGv-@`1+yKGZ?{EnC2g2HI$!?7=`J-+k4F4Q(%DdL)aR} z8G1b!a7*jiI@>$l`C(yrOR;Ke=grq2TLkMjJbTj;WM-(X?NF4d(G2@zpfkVXPX7I* zX|7Ld%P6pE4dhm;sPEP88M`#0J3}7tYt{EIX>t-@)aEQ7qxx-4o}W+R?|_LnThMMc6kx4jmIR%Id8)arNIXtnCX8)hJJ$ z*s8vA$7Huo0LQy&Z{01oB8-Rqm<%(W(eT@EH!7E&3rx(-gTH^)(zf-a8RMwd>uNtq58gOih?j0k7)^xN10wWJ81$ z{D~QEq`?Km$Vt_A2lH#E)OSX#;wR$-NBb6SWF*7DjP zJfd;hPO}_4N$937z5B5sC~nEOS!0-nK)-IHmy8(rxZ{@EI(g9hksKX#->#1+)~9FB z^!qDbY}#d6a?wcIFJ{n|K3D$>+aq#24=!paB@F_q=07*iX57oLFv9GU>N9B_naX`- zPS0II-(y!^w-pUuvyL03sI5zC(=LrGH|$yGx5J|p8d?+&B&;NkOkh~oS*Il&-r`5d z09~=`HO>%?xo7jm8dJQPO%;N?UBPKqMl3oMk}v-YM%BZ=Lk~8>cw9AmK3Dx zX05qQGW86XZXEvz6@BF?9MHCUn|c~ELV$vlK&4wM&TR|+_8nFYu|Wrqmc*)z z&66HJEe6%BAKg6pfm$3ryEG>=bOkoZqj?F2;Y^z}Ej}o!J-zNSlf^Y2vIU`YWJ>GW z$r#k(bFJw15LnO$3V=G&d;q`>E4s!0E7^C?sJPL=7P+WOa;v(exe;vo9<*Pd)d^Lo=*pVRw787Ua+q2O370_vhZ$0hs%2m)IjU?Nl=KFxo}0 z6pC8wR5lIgn&duq`=@z{uiw78l0;Tjzl**suGN{)sFW^Fouvx^T!!RwIwwmJ1i=+9 z@p<8`dGV9Cxwb8a)wxSO2SpT^cF~?Ek9Yl7=rbwt*v^ofB9ivRMK7~)um`|hh&N=T zM&E>ldgTpH_8KOQqPl*w5wLwhZpQt1pYsGsI9qu2sx@}TvC_80-5dDouPo5BV za*#bGFavo63PQ|ag#>2U;0u2B+065)Mb9EYZ!5#6D=P*uHP?>M@?`6WyxV2E`B8Vw z4c+A`Ex_(i2ANhbsY6~Va27#@QJWvWaeCMLNev{tQgG9E+6i22U^Xlt!UxhWy->_r z_qJyhILpvKnElcWsp3M&OeGI>Xf$Iz3jRO#-ZQGHw2L1dKtMsnktQI;f;0n45dkTJ z;0Q=lYG{HWz4wlaNOx3}U7=d-g7atW^&CcFf6&`o$3Cl}TMyLOVU5<{u^N*Xsejy-b^?RhMs_ zhkm!y#1mH^A`fplVq$k&m+`yGesvi7M(IWo2BUA`hwJ=lOcvC<`;8))o$h3dobypw zRq*(PkLQiDadEEVR0kr&eA6w z9Gw@$8hemcW-TESW4`bGmD)}Ha;&ubW>3Jy^w9&Jq!x%#MMy0Cln~JkCqDkxH;}*D z45qK|%|+73U+6dOWlbAX;Bn2}w(fm3uHsxve_Y}EZP{@d`#P>HxBh{$2==kzlj>8{ zco4Ki&em@2$a>UACe0?M`C+(MPT#De?&0#H)kA8t+|kTb?1C&@&0B-5W`sSwU>x~n zepr@AQmaPd+JbTU^Ld~4^hl!R@Jx#AekvzDyf@Tg~3->oRy;NZtEPCsJ*mA|D zZmuT3>;-O;5w)CgzMA-KIYi9m!>--l&cITi>{gX-n=(T2o*=7h_@(KbvJ~#%9j|h4 ziQpl59X6EnTBj4^+1d^)S+GoZotN2ln}CY%U$trrvWdvv2o`-(8&deYtEq4A{z|fH zNfR(OOz{(WZX=qJ#=)B=z7(W+-!oO&owc0;XUbpt?MeGDS3$1#KbomB7;AINTW((t zKFy@C*%dpsGmrL&x4q1r-f$-jA*+e6 zWQ;{y@4(H;^-=~f571End8OL98dlzzEK4KD8GkI%@U-tcgZ1>ymZ5<0$fS-?=sr_v zntuJX&mFz#QdKadtUn(O!Zm~?h8hD2CiP2DmPp9Y(lN0Jp{KuzmtNsfwfbatsZmx$ zR^E+!jJ!qfVZvae%s1}db(HiyJ(M{N;)8zH%>`^zL6q~7xg3=US9CC5ZvMGxj8azM z6v|eP<)GoBXP7j=8kYBs?ef!{4_=7#VDcg$4pvOEm_T@j|CY}=xZIwM= zkF;#!d*owWcmnp}0ynEgYt8NI5qKNIO{NpYX+4BEs)KQ1|8fxKQ2S8=gPi3kBp@HD&~%{^_ZD1??OD{4cU{Y)}tH8PlmK? zO_Wua00erd`q+>bF6GSyrDdIttG3k%1le0t@?MMGo$!IZ^Bhzwu_XD}q;N_Ra2V9F z=jMf!=Z%n0**CUGGMkn@I*)f3RExU`q0Ti2^W(e+L>YAHjjxgu5m0Xbg?`^Nmmun3 zUT^G1P9$aAGo`c0CurpIVKU^4Ztqc4@79~5)EqPvt#Z(u4=pqvpGmc?H?89!9YYnu z)Y87kR?oFSvNA&h7}Rq&fs;@=N|6*)K!@}m_9y!)dLsE=1dWdFuT@5L)bRue2A@XP z2X4K_cI4D`y|+c`@McBdM=rXphgY$G+WW12$C4+A#+CAdsrzv$3p( za9j`R37^rrhxGCMGoO6~H_2U^n8)r4;p`~%ays~g$2Zk|BP0bwKx;CF}AC> zCY8RsgZKA5-UdO~p1CzIV zTKporlm#gYH;c&CO;4*++LdlpLYr?gT^T$k2a?8=0?4_5;m+TZRqJzZUzHeoYK}4P zgK(hm!J>oHc{ejCWVcl~C=56-d^;P4z)b-;HK%5d91IG$*FJ^kZIAd#Ica= z71oPfI?VSa0!;E(bc7|qATRqB(N#p65>G(w2Fi<|kG}otp}AkzI_J02MloQe#K#vx zaHAhR3PYP&sb%-6(+I^IN>jFWF+v>;xzSok6qU0>m2veOpS@aDh_V{Qj@BgQ#tP=s zehPiRnsl5JMbe0IS7{rgB#s!@L|L+%UDmtDY7tD{<}MP38Jn;O1=oY(novdBu8G_j zJ#0C{p4x;?pK&yZK$F3MZKyHt2z6N7Fc9bG*)j0(S@9{U4Zxo=?jm;{qc$P#;s$aO zpV4({=l7XmjomF;skMX2mI~O7`^)Y+AbEOy?ZS!o{t75}I*ld5{zebx1e2 z51KT$5fg)Eg8Ys+_r7U4$}TCwxqoLaH-E#>D$2nQS!z>1jdda!wZgfUH3&~T!$lS` z_Rh$qtD5&_JA-ri_oDk6Tm87-vb##!SlDqT223o=4%7P_d}Eo}HpODXu_rKQm&RA2 zpSCyH{6EYS`J2Wd;RCnNj|16(4U*yy%-XQMoi4Di@#V}MWBJdE(u-{BHzMTQUhlUo zzr1#&BhUe=IK(eOli1wg-WHM(@W~{QOkO#sFy32t3zitNe}O&ch>890#mh!@d~eC} zM-iF5`=U{COI!Ks`JBOa+lc`gDTK}RWV%;)vg;2ZuDe}N1CH@$y7xPx+dqG6tBJlV$1g2*#6B z)LyhFSJIfZCL23l1}s{tF(1%h?!IViy3SrrMHH>SxajPu~;MNbV0GPwWdUeGi(g1 zE^SLanN87`t_5fZh0|uY#7g03Q}MmZ`1#6FAMa?9JNB+^DlLOTlTd_bz}F#ctc^P+ zDE54|h@mr(EL|z?kOoSIM+aY{A5Xd2V{|V|lpDiB)t58y@#uFqNVVENYDk1Z+1fnq zNwMv_825`3*#!i>1+oOC$WoEna=7J~0#X(K$a*ocmf!5Y{DJ=iXB~meO)8NC@q$>% zXabZckTw1evO!EPmgc#Sx*$aonx7ymb&oDBwHxIC8}MOMc|28Gwkj66fIr5$M@rho z6P+=@UmtN;6sgO}MPFJ5k)ay_rVAC>E+^)?nUTh(wQSz%lZwr-i@fHo*AYm7&Lxm)FN6iw`?&C9Q+FV~gYD6t7_R&~R9I_
A~GJIPlOTx-4IFUq{T|L4I)LdlD3j6+Byk_@zj!eIyxET-_SQAG-Un;Q^CHjqxq5>8In=~$ zE91~kBddMME0IRl%6LJox<&7`Nlu>T)VUxxW#+$Kl%! z^MISyjmSJRcZV~Wi>$eO!1b}#F8ooAd<3*9wO?5_EXN2RL(Wfj9*UBBl<&qR9`ebhU8MCFRa-K2qpKheVW<8E~V&vx!&oK%7WGBqp6F!QQ=%o9W9-w;ccHIxzcnYZoY)etva5pPB9By<;Bzoetq?km z15k>~wZ<50FaI7=?+H(}ub&P-UU;zKqYyia%}A9O7U{Qeb&xC7y)?e#1YwE~$ZryzFn`PFtGjREY09Q$s&H0?)OT= zI<0qJ8h{HmiWbzC*!uGn_tw>2+8E4;8Ju5f8yHO$@0nf z(fsN12Ppwl`nMy)J0EMS3nAuo+PGN`P`H6}O_aYsl!~n8U{epvg-XyT+fcO`Y>v8# zmv@2WK8PE)w`Yey;VjAQQdwNk_|qfaM)}cMK_%TzDHK?SSsv@e#i8tO;-jtv4p(Za z{h$dJDFs{Zz}0@7P|h2zaH{>}n^km_J`#41E*c2rY@JmT96?QuRuQmMHQI~2w_aN3 z=hzEaxW?n_mg{`Ti3a;SuohTok>TK!X7@w}Corn=rLv@>waR;T#R+b{ypk1DY?+#84 zGh(r7GDcZrnU76yrxgQ#>muVCuzJhgigRIzPWi=;Hn&y!ob%ff_Q9yu`SI zk}%ef1)czhy7+y2)%HL>Orhe`tYg`%An43@SeY%49~C!js^j(_J^o?0jHYXwt$&|R zrjAY*AKeMkc?g3gr%u`X8INEQXrYe&GUdr`AqJE)&w5PN{3t(@SK=vGJF1$8n%5sB zJ!&c5kkJ2Ap0mkD3xgI4ezrQ&+i;K}I$qck0DHBZ(LLJ{6z;D@klZlw4Z~R~fS3>V z3c5{_-Er1og93eU6M(+f7lb+Oy7MdPK6+$$~ z-=oWMx|6O|9FnL~`lBp=XTmo5iU@=HoTc|=d=|RL72>?!m= zm&X?dMpVR~sM3>bks~|&iQA9s_|rYPajXozON^R;Qv=??UD)eHSduJZy+w<8( zZ~RQ!h;w>%|MtC5Z)`|<7eshk(~7c0i7FerQ={ElmH2p|FXr98PgE_NX4Te!?VFMW zrjKZ_EhoI-dcNa6a(as1Q*v63Ac|Odt28EEcuNIOk2SM4>O-i{-VCzWDM(BZ+OIms z^>Uz(6iLY%WX7CXrLz36!?6%+Tit~2!itUe25)>)9?R#OkLt|Lt#ZgP!ADQM;sVnVlO7P5TTquJW$EBa5VYtL;z`tdJ13>ezY zH*RBA7^9mFWcOkm+C$4v+-}Ek(xL9T8$&7q+B(1dT4w zbUPG1V5czP^LP9apiTphN)Xnc*x6w;J7lR#haqZj2(N?wWpc|3vd7SbuBcG1??rV; zfVuemydg$tvSp6)R^3(M(5!Z8N}0M!1pawf1oo{;SW%w7ead}}8;?h=Ty9#q2|jRe z)F~=Y2p~n-yjZl9xoI_M_okSnz&uE#cBalCc+(l+ukqNBP&uQ__SQ7iDxfGs{x`EmOgj*%Y_KP#Y7IfSvFmr|7K`#&c&+x#u462=yJ@Byg^;>Ik^NRqgS<>Oc* zZ3B=CK(Cg2{2k+@q#eTYv_Q2t_T$iB*f_#Q&VnbI4XUq?z%IW(yW05=OA3^ z;lC~w4>5huho4pkAU+?8|F~1z=hHOR%aHu)O!V5_ltgetpN#n#aJcbsrFTpZ0c8)L z|Gy7V_Wv+^+yBXOlqr%DJKjk5X=KRbL05==jn`sLi++2d96anlfO-^1dYQgamFUBb zU$a7tFYH%HL9)Md)NhmtKqB>o3}-fvm{>n5lli{q61M^OT=W~hX8WCeivMCC)3Q(p z&^pit;4CWu9?P|ndGV_kB`QIb|PGQg= zIYjaO9S;tru-eu#r_wV>p>DQtj9~_;;P>_-)jDWBm6CF{Rk1Y1t!h~1%OB2 zQoHm0(c0i4MZalErk?{{o`k&qh9ZV44uIn6dvDci+}}}Cj(NH?hog+Bid~~} zo2HPyJS{1X2WFDTHudjIM$HvF~fMx=^(I0kh!ud~3M2bW~pp_CkckOzzRQ4V& zx6p4Zd`~PWW>SI|(|zM8FW_{w3y?cOZIWlQBf8d)RJdxx=J@z~+U_Vu&tfHfR*D?I z4ZS?Bi^`cC8`uaP0wZPq+9nCVzgFL%U7uqu{;)IV0Kk7akmLT}3b5@p@iqEW*;YUH zAyjdivf2?5w_VqPjif5^UK)N4w!J9?mI%E%R~T4|`5U(S8)b*lUotvIV=@dUjwm1| zQ?6L4;#73bV!wWQ*}+allpKpWV>)&I!9P!)qGpBWr%DsPnQBCw;EHL0AMW)49NL0s za;l$xYD^?IFDF2L6f>#-2&jL8%R*U%(|KMWR9f8Zj*E~5$2sIzZ+YP5v{EO=;tM|M zyjTB8s_S7YY>W0MhP}kg=akl3D3Tf=$xR0^JUQz7YJLlIr#7y1yg#$t`=z+jM5}!1 zVo?kJ)Cd4EpAKnbK;5fjs9bca+IR_`0Fta?|7{QuD1kT<+U9-t-sc>Pwzw9_ z`wd6Ly%!C$pg0}R z!n2xGo}&~I{`g%}9=#_7GEW5-=aEQ?gc?0t>uNzXFlArCs5jj+&ScQ7M|C1;;BMK- z)zy=O$G?C6N!wSU47tq4f{hE9g1cv&y+B^z1QJ(FI#gn%B;!`SH;L>h+c5?2;WSh1=+hIhZV%Xo(GPU@(I-df!gV&KN%|zR0@AFyq&nuG59a`R2s?pX z?Fb&Vp%yfPgvUQgtm^Xk=MA}v0>$qq5;qA7%L*kkWjazn#9RT0`SG+hZCe)OT;)#W zb!E|r?;!Nw!lVs(%`I4rxs9*V_I>O95$v7-@A6Uz)}S^*wh$RK9phkX4Y-=QiO<5R3pDCqn6@u!XXu_&;PR{-9uS8|$g{)Zy^ z<=O)^u)e!_k01X8x&P$LBPRt6z{Z$d8Tfe~;FoKkUjgfst4T}ybCZ90wf5nGgeuG8 z_#ZO*%eCi%faDr!Crt$u2uU$1E#qDdD#T`&Jav7evvw_n}7 z^S`dW27=p)nIE_KgO@za^2~F-Jy&NT%Xe%`rG(=aEXdk>7NJM|@S^ z-jTybCfTH>!^2Oa!L{2n;?bhDzQij&zX(n~0%#L>(-04k?e3dLxWVWGaog_9#vYLm z-x_At`?Rz!P2E^Kby~{#&322ze$TiHAqYmIrT#}jhWQ^Jn^8^+I`mc9*|rY5A*!&M zzPi?Wh<_FDy58rxJStcAhPFd%ylOUG9JE;VnGE*<{>1w7*jR-FRs>x8Mb0rwKrynm z>J3JK4qEKu&4*q`E|?RhH>f-zl&6*9Z00)=-l*1gsXWHWvqy!gEv5A_@NZu;%5T-r zWd$Sp^~KdMmEE*q3~GHPIMX3DCNb3#FXd2LQ>laHrK7Z?(@4Jj@r`?{pbc<0BJD{d zE_GXSNru-VirXkdC5YaSBR2Tq&e-cwM~B!fOmmq~^``X+8F z9$OO;{D~{S>NDd*GAK#s1bx_9D6;wd{H)F3yZeX3y*~FBhGrX-n%N9lCpo!NynqT$ z%c6giC!(fKAxg@cIdWabY9p=hOoTPSnKZ9`oRb{NdQJed&?*_lJ!I`M`84=X-hgK1 z6Hhs9W==~#&xK^JBy=hq1g`ZPnDd!EYq&>k;^98ACb_#f3MQWc@D564nVy_)8QoC- zQcHel54c_4I)T-FnzF9}Ij)z*{7UY7z=#$NHwa=%2Xw*b*7$+u3DE<&`u8HZybb%S zpS9nnz=CmY&d%uhT$)NY6W6f$-W>JE(;FtPpc7##=%5R6JG#~`9$)pXfu5<&H&oGz z8guRz!~@rha4_)L9C`>`#bH$RENVBf)+VD@mr?{y2Fqx~pL*zG7X$KAZcQwz@LGXi zSpXQ$BK_W2To-7_<0w+QHF+}{ZmBoVwmB8AFzQJdfDabN1_x`>p_ot4Hw=gxow;mu z;|--WQos|nP*{Xh!QIO20X-z$BXvj;$UX9Mh*VMW`>5*t+c*OQXD@_j8!Uo51t~0< zQ97iX13Y5O@%pgCId3U7Zk{ekytg=~%Lg}0cE(=?B^L;aq}SbiSr6a+6D=jq3yI|%rd$<{KP(8MwntLRMTTX_kG)sg|9O$Y;rbSxuvug?2m#2@I zoL7yxOy1c@1xwU|?{@;JXcXiAX)urSYTp6o-F`vekW^vU_vrBkNb!IHX?+zgW21p! zPghE3(e>Y=(ad@dmF3cc<1bQ;TWeNZh24BO8D3nPD(*az>p|`Xqvt>~g1-hoU8>t# zbxYzs*jqad>^Kok5CQXay7;f|wwdo+?yu)5@Fhwn&?-lqh=tapTcDtx;vZNW+$Y38 z-pdN=%^?vInp5C;WN7%F!Y_`49xHIW#a#LNPQMKwDwlq_0`Wei?k7^Y8KzOOd zT=S+E7eaI3{&A-AE5UXl!iDn>fCxm2ir|GLY7=eVueO0X5SH>=^Q7ZOArSW}b2vs5SLPkL=j1QKBXNr>6R}6k-YY9E!GQGse~zhx+Cu3+WReO#GTL z*d8>;tv_0dO2mej(S(vzq+X&8J*PtO2Y9osO)C&*Fd(mH4a_V|0mFRG1v~i%EllQ5 z^*G=x9y3@P_>jBF%>!q%4*)|)o>VG-cqkb#d}AAPV4+uv28Q~Mc*d8T@ZJ4y5elch z9(EYer7?9}%Ujz3r;$_Lrka~(?|-Cbi+Q-Zh_Cjk6`~gj*`7{{?Sw&OT>LZa6D!%w ze9xw9qQYa(;hOn?3;31nxaN`GeY+LuG1IToJ&xN=a!pf{{xg|N41CnU*KA$ z8+f0w4#+|`O!>M;4jOUD)++&;%TiwKiA830HkB0_Y)<*)sto#m7^y>sP;8}d<~RKz zBO0ffEt@j&t%fuz{+O0Aj_@*IwlmF&9`UtLBO;b-PJkZir_ z$6!pPwXv7Bsa~3HQy%XO+C{9WRAz?bk1OTbU|? z4G(tv42!ntz$Lhth=sG+|DLB66 z6WcKHnu9YJx|GyA5y8Xlv%Q`X0F-|RgB0$$VPKC6TN8fl!S6!;bp?p$N)l-iHy+c3 z>qjTqZQc#1+HNq1za3)mI=+ySQaUGj#{KhIH;Y7mktNqlxm2O- z{<f&WF^@6T`f85eIWiLdeNiY-h=o3pbuw8w!n-RaWLA#b8;Toc*tqh zkp#?pT?$pf3>~NJt`F!NI~md1$+}Uc3pmiU^l)HmVYOR#_NS?>HYzdud6LL@7z_;H<+-#4gEq{Tj*jf;db~{k_^o{zgZ3ynG7M z+iOgBFsCjMUsU0iNGZYi+5zd^a-1Av5i-fg$w?ObOJpzr=%B&`lDk!+?TS(+gvJ zTSv~=shpJ$O87#FyZ6UxgFGUP1vGlLlsFs;^osP|9NsA5Vfs)f1PYpJY-H;w<^^+) zex!(yaZ@h?1z$Sbyfp8q7M!l!f~p@0?^CVE!PWASmgfyLQI!iI$~XwOKRz~>n0YB8 z(a&|;iiv?;tkIm0I6UJGg?MVRntJy5?g_+}zeVq@^xdmMpLPc0HyJixy4zWC2W_bG$y8PAHvacmYyY}#nPmBs*StBl1e3oO!R{;-WvaB9wjV{#>6)t}FNC(>> zHR_xFvCM(Xi$x!62vxct6{K0JhYyxa%s{6IRUBub?)GkB%caS$s@AUKOz=;^+E>Ykzd0fwhnR5TNuiBZ5-V_Kgj~}Hx;D0Jg zf@DS^EX6$y{kcDU4R{Vlx*afW7)bYuL;(R?_vN(>csGn}j$KujF%2~j-kzpz+0f3TrM61v+k=i~Tuy`q8a<1??+;@q3rA6}q&h2`BR zHdPp7=Lel`XUFM^)Uyn3O+69B$?PtccYxNfPK+&I37a!pp{t#L7feK52H< z^-=XIhajGabh7-B?lbTRTMeec~_DcF*2iK96+#u+nS;yyP|rUAUmXt|?JPcu%^T;nz3@2 z2LJ%k)rjsbFE?uX{hD)%0(lHUOZ{pg(+ffgP`_vEMlp+f2Wi4;@Aso>v zY#*F;0072Q_X}I@oaKN^_&KV&rdO0F7{r$2mznIPb3WGf?=&R>C1(6NUozR;b0wFR z*M8-0esq4N`rm|~+YWp5D4Q?Iq*q}NMMy>0S2Z!Htfnz3@M1RUUPx4?aK8AOy3vQv z2vA^l6@05(#98-*Qd#Ov-wxfM%#c%tcaJuP7W5~m<#%?g6bqyX63aQ; zD`87PpsoCyFT;z0SM^$lbc^nn0H*&ifT?s%AYfyRINJCn_Itpz>@z-Wn6m(SBs9RH zGIei0>kA}gZ;Gd*q?*Vf3WUU-%j@&m2vlk#$?M~c_jIokQpATUb|l-i6SBaaqwid~ z#Rdn5$~Kq_3&N8*V+NPVP{f;Ug89L4=Jd@RFJ^j_65D<-Qi^1#ur*GVeYp=pD5);d z5f({s?~EKUI6d7pIzyvkoTt4)B~mv6tA;ll=_Y?=rG7&v3sluu6d)-X7`>>bc*(t4N}y*$Ulm@yz&IYgJtAQ8(Yk%_lVQX zbdq^xYBQ3q48tdR598zqT9HNOF{cmRzR1Cp`@~px8!p#(0+rG*>oD0E=|+$VT28nN zPJBo}^B<`rzwW+45nO-CIfqFACEKfAtjo32Wcu+H6w}nYT#`4dT<&kDf1T6p38hE~ zual1g$GagO@ghUeL=ckiSi_;b2)e2x8K)db`)l*{_sL3irQp>JHPA|ZqkR=BZEFX&Y3g-D7SD& zc3jAF5g1XJa}1Fn>RD$9u=meB34S~Cd7Q^5m%3VQvEY=2T`?r&#gS?!L1eJ4j%}(o zy?RZEWMrs0MsAR?f6dQ!MEmq~aK;nLlEZY%VZ=A)MJ!XWNltf#hR?eVQy2^=m@m+k zum&`t-?cT1?gs#GJPI-H4CFZ#^XpIkC?p?-8G<02loM(Q7s4>K^JrOv zmeN-b=Ol3~>fSfZ7k*JQepl+OkE()RW|`66m4J%t=1J#Bk0h7_QDG?yE73NdyWc)B zGDYLfNy=U}ka2fLGq^P7oH|ul-Npd)V{~ia6yK14D?Hny(ev%%dsJzVK}hkHRuU9e6jq4SS|Su*hn>~d2^e-~Uz;!5bnliz5+YRQiG zUU{&+Ch7Za+}B$jt2VIUU94ZQ8I;{^t`LRd12{KN?v3SSS%;izE#fbwmFI^^n3NPj zEDPFj4g|5ToA0%g9abc)|n8{~T#+jehs`^%`3P|BvvOH~`@S^|4tuu(aHU%O+H z7k0oT_0+BE*fGxi*=nr}`6?qdPRDVGFWAO*5ap*C7G6O}R3@4~J;60>5h&utuNN)q zldI{(#!>13!YRqTbbtGj;clNaO5r=kFy3Csf_EY5b=*e?hp&0qd#Dm53a5=XH6W$# z%o3h*I$!oLMXS6#J{Po{*zIq3#6-p%xY;$;MKth*Q(FHK?=Vx-(U^dA-hQdKIR{pG z>MS03w%)%Z^;o-4OK}jIMS+Df!|66uwrd}SCDPMX;j)Nwbk7}+nPLRq>a>ZNzA3cj zB}R*&1+I181L*k$?aw*%-+(K^@65FZB(%kd;9=oW82 z0ef_TWNC&M?PutNc&j!@ENIu{n1s&KJ7OT>I~pyfCXyvDN5%J1f6%>>V12oyZv+Ux zVl6mM7k(?4AkH9PT86_!wTT)XMJYY@CR`)G=9478ULgSx{Mm+%cASv={}M*uk3Sm%O$oROr;uU*EttItYG)MCK0T?ULD zAWn(Gu&zgOIvLdA7`QQ_HHc)Fox8tHcV~4Rxx+yH+J6b06;5HeN0A3zMV#`+PW)OA zvpcp3;McAeOPZxx zR3i>C=VA?Jx=t1>VboOMdiQ$svtQA>#v*eQwP79wq*&wTC+U4xcC20|cxYAxB~q3I zvaQa|FYvbK;aLeygW4csXOwp%R=U5_k(Laay~9d%D+;Ozc1jqLMoe0 z4`}{ankco=b=W1}Ivpg94){{;NZ~P`P0NKAc=oglB*rWdA7{A+QcMcQdF z;?3zNf`}6+W{ZL*ed(q;3yr`0I+%Yd&(7nL z?hcT_d9nqy!o=k82FrtR!Nz;f9zbn&x@H54uS`prr9$@-Gl?EBw+-B^GD*^P-!dAK z=v!!5pI|8cO|krr&?0?;CDlf?Rj1@emw~N5=J{>g*9So?GW^>#uMOy3IxH4gsgv#b z79!$^GQT7y6+!74oTZg4x|c+Axo)MIbFETh#b>0*pcIv=y}P+Y&YN_Op_5-lJn5zZ z8cxcd?6$jvi*4-hPxr*3xAf?fbSagL<|KEhstt^9 z6|jxVn1!>XsFc65xCu{Lw;r!7)Zv`Q*(+lc`f_`uz&gn|8UDUiX#Z|ii$7zLaD{^WYg5=2iloYp$ys%#CvyQ%{0Kcu zjg&-Z`o2?H&Wlh2$SB3N7?*p^dzW`y6;BZe{Q=N849MWXSFqf-iydHg7&Z~!#FW~rxc($FZJGIQh#cbYY(S>u_ zA3OWuU2!X4OPC7#c*MVylt$|@q%0=}n!fk^v)G~)5{zZ9u|ZxcZx>@QzA)#9xQCjG zlbwu)hPZmmmD1me^+1p}M`+Eha=d_%=kgx>=vXzKSctx%QzXZ*3a1~{{+91t0(GfD z7y*mO=~zU4NNI5VcUtR7CMjm&cmhc6G+YM>yU09D)XmvWh|B>;o2Kb0a#pvh`J)um z&Wvyj0}2BtfDt*j9f@@ji!Wg%0n^9DGD;I-9`%C^7dZIC>c4Dv**j=clE!Rq%{snx zaLJ=f?jaE*e;VzD;T5`PH2GL%7$x3qgJcW`ty|~hR*8~8EN4Cip&u@G59CSCFuxJJ zeyqkU_E-U_w%Jce|3O~Y$9MNXO+Fukmm1dhGb@-+P5MXGc8og?d%+R!{s^h3U9@8vzYp_jXZ>BV8hdzw|X2f zDwTVC`gTomg0V7DOUhkEEJ>qMGGpkI{k-F`eqFELmll^~W68jdHbML(q(evJ#cb_W z;GPo1(Yc=Nso#_{jJ(cYBGn`nP<)pWH0mGG0B7?c+*^Jw(vyOFsamvALfw$`#h# z$VbNc-!`b$OqL!(1sEH_5H0W8!$ri z7!@La0b6SRxNP{8u;e5Fu%7Q;_;9=Sw)ko+R)6-BHR+!cT8H9zDAkE)5z@ytjBRh! z_a7gn_;3?EjyFD(rjkeO!5@=D2&|bbH}X>1y9U}LCM=W#z8+rY{p|@0Vhf47=rT0n zAA}D8a!63Coj^&@VC{?Gs`Moi8-D?3KEDgS$lhHXNYr@4LFE9(A>H1{KZeqsF7~(P zI9LK{quYzO#9L-k7tfWQ`;SuEi%^gN)WKY^47MxlQRU=o21T5vFe=&|!piBt3d>=t zhmG-4;uwA*w&J)?7|3nREN9KpniQ4RqN~8vs*iu8et0FrKys%Kl&t9^cMeOVRu6SF zu79NZN#Mh$^76WlhtZ~hT)XTw58ubECPz#tZm|B<`10O`%MJs@H>Lj28#rt=3M#%) z3)WV@-fb%n@(~@gCTnWHF6r-7a+tJkXsAQnb@&H?UvK#vlajf8l=(R7Do|ZCkK4s* z4A%TSzWuLf-#@+DU^blJip{2|9X?{Dp=v2 zedblle_sLn_kVLEHC4%m)F7|a=e@>bOdW+qaC_(4`P_>KS$-7%@YFwldBoL&DLM~F zgJw;|4Q8u2KzqF-CJY*ezvORzf|H(K0p>0y?c@pC|6a0UCIIu5tbQ>3frR{Z<-Atl zd)Q^qo;mSL@cm;AihkhYv|kx$R{cu}^zZj8HXlNgyp^t<`7as)O00s5t!tJ}|LfZK z26s>mV6N#tB zL3JdFQ8g#i@fRoQFS;tSP-C>ML7mQx@lv=+8w6>>LQ?GqxUi(6KViEIiPtO}pP~Gw zK!j!sI@(%ea5^E(|F!(jiJ?!Kd&dAcq=PgT!V_TvDpN=CEDoM2z~mk=B{~E6WzgwY zJC)A(;R?s07XYpH$A6*KGHEF3kf4*bHK5k-q-Nc07Gt;m-}XtS01MHHE|z29zJ5(J zILXZ4?y!e1Wy$+>L-t>2aJZ1GV>qUBYZcEvf}H0b)E5gw$m%U{2ZSqNzOv00PlZt1f<` zF%d37S)Oq%2srl(aOL|t-a|5-qv%jzJn^S3MbPZkqZWMsl*y)OWNv&=U(HU^N+c!4D59D}ZlYNUf3X z-;aEt32+bcc~8I(QH7*dulMR1%a3pTLAB3sKnNV9c?|sEqKOHiMFPJI|62r}O9Qdm zd2-uujg5)#|sOBmq^IX5-kQyT+Pb&8}J%M0w;?q4w3$YD9|yOV^)6y4pR4;?I-@@ zQ4~i~o`*IR?<#QGjqU{(zn1aPFT6E+l@49G(2sXS+$2R2_{Iy9-|4Uba}x#M;ROAS z$CBm#MNbKau-A3__B0IS&g`Ih7nh5d_Cwfd5AAA%(GvBWk}4j+T!x51PpYU>ZX{oRU09*v2ldC7Bmi+&9+dbiNOkt2xEr7m*X>#j-nJ+V`4;3loRqpinK ztGyV~F|hGFkhrGQe#pbmlKh^4i$$?yCIcIowHSD#APAX+7i2#ZQXr*kn`h0 zk%AiuM^U=`ck+}dOFYjM{7Ymq=jk*emEf`p+}wZp{v7k51L-me-iOC)1@t0SavO@`L?-?sqB%TdiUm8v}Tk{lay9Mh%=Yfz~ z%5sjVQ$47jE&p?-yoc$Lmk1VKJ`|FZZc}m<{Z= zVZ~1-ML7#J8n5Gk+J5XetZD`4r3%|i?V(Jrda;Q=(f{9hBPF!{iJf@+%E9itujln- z;dlH+EWC};5q9$x!zot?25W8WuWe>Z%d<{IHD6!f_L+%YZl)oaJ}5ac^G;qpS>%Yx zjf_j8pd;bj&{^;kIq~j4_KM<&{kd;MMuk*|F;4x}T2>0OpR{NVMfpVPHnPQ=|T_@A;?2et&?g_)|4d}fj;=N zP+?Oo`|nuoA3SbrOi5CmdLRdF-hx((BA*Re{JHHex;k6X*HO<{d(}K9)izkK4}PkZ zl56Dkq1<@_{dq$eS_G&}2>crNG*tN3N3MVEpv}e0!fJ0t{5|s7H`m_2F!QUn{Q!T7 zH5%}Yrxb#wZ|%^E3#42rd(#V{tBi{@4)Y=?(wDg7-oFz89Yi~o#!nwbQ3_o8dF|yr2rG

BP~|0ce<8WUCCi7;Y6JL66D}$AW+PB)rv>rLkw2d~`0&9%akO%a(qS zU}lq^5BB9@?M~!&@Fer$zD!qpUEM9k^m#_v+;wz3J8oZWVm$&3$czC34iuNFdegt8 zpPd!u@!554@>vfb&;6oANc&awOSY9JffRl3Rib^XZ;RWcFzu7>e<{B_oaR*VInXB0 znoSH?zKZBp&=f!@^-=-@(5d$|>VRQizf*Z#bIq*CVGmweS*VIXb!cBFV*%S;EY_q3ec{}4^#8?ni*$i9Hd$4ROI>T+ zv~=HbzP`lxq&^>`ZeIBvU^vrgY$+;Ro$|r zHe(3+-HhS)bswNDd&{tA_RG`23H5vo`E#U0n7{ySE2-JW{r%MwpTl8u;qqhJN!JkB z?zhk*W*0M*K%67tZeTm;Bw%X;C;NM+4u2f|J>4%=3A_|K@={yzN?A+swVdzE_EDaQ zuY!KrI+(yHiO%AqIW2B0MVTmvZ&GJF3eKX#P?UxKpfXTyu6)^&*$ec2$XACo0RzI< zg2C}1i1;W#HZhHg{2kP{q_=C9anGM^-w}a8d`Kv&G3mExXd^vr;*v6X&T?aO6@*QyZrinc^%)$F${ksVd9xb} zV#(t{-cWc=Fceq?ktw-Qz$Z;nCBIrl{Pys^A-YH2@HREpjrv`>rn@gytKYv+d^~^V zLt^b+-RqL3bD!SLDH6lNja_E}gAKs5bAYidUmdjRtU1U1*ZU{E)zw^{)}n_DVMldfNhVH1B~ORCGCg5V2bvX$a9G=2$RP}*FPR(yEZ1z)+D+S)!|Y<+chtU_kB9Uv zGFzjwpkEG|qRf}w^`VnwQ70JCXqH3aJOdmh^`a$uWKOn_r5Z3rZ8QJLy87iq9%BAM z{wt2;8Ruz+C$wevEILghtxE$%Fu1uUj8gH{`db~=N;8^+0NfC(4l z=z=zJ$O|XhT94`1uHD(seF{Vl0h=~ISHTlD-X(H`_O@B;lc-f(wo8{_9tjoS+&8eh}&xjUqEo!5DAdv`v= z(5;CJwD{px&08Wg2ssa{-(5w1ZPeF~^m?mK?+OM&(ADp>C{NxeoUxq##C)d8dCW@U zT6*3$|5UvPzyX>1H;%!XoxAt4NWJg)@xB=a&Mi6pulepTnP9wdB zV{Xw8KzXCM?0@I-*gqwH1SSl8!o!@vdWnA`RpTwp>$4N86>lHerEZx{c~u%@opr#f z_1?+utb;ivU|-UmE8MC4=nvUsWA`)BJkVKVUw#mI#uN1&k`q+OJ~!TU!6v!UGFBBe zo|~=$EUrT%#uR{@dDhVX8wZNyLtA?FbV1j`q^SsM>RmqDs~tfTjR`kVh1^GY___s7 z-ijFj8opQCHXT4`hcwm)Zhvkw3}joTTyQN0hs^($2j(r)58#SndnNF%SI;4X9x3;aP@EUI4sDDAC z8{bbC6%g%z>Y_@x{B`H`{!ji;yjfeWwWanuGj#{HKU~diK8p;4+E|ci-4AYj1!Pc` zW`=H(=`2pV^HHrL&U3~O0ZbA+)R>BkbnbEm2fkbEdBM%ALxNsA05O4;`7(-dNIHoE z*duyLbUyX({63{r4$$qM<*eb=WrN#pLkTJ%NdTH2Jhq8P0H9h)D}o}Oq260(OBc-g zay-#}ejMJ#)2K{MCY+#a_g8*|26voE)N5EE$2PKur~m6FL1Z$ErOifFpQ9$0fJ@@K z;EIe}YVHKpn{~tGTK?+IesU)4w}^*tFgV(jt5*97Rw9UbgcYb(znY15ZO043R($n+6n;YN`b}9 z2qy2|;@OM86G=^Rn@)$HABTXrB$z7x&ba?=iT-@%NNrw2CsFjPq~V6C71UnbA?5(Q zdd0$-nlue=u5yWBx@C$U>ioGnurtF#U-QuVB0xkM3hW1Ve6GLio$9lD&mS)WhMofj z`@%90edt+7h=0}5&_6u zuDZ)_%M?>854h^31#TeJ-)s91#B+Z)EZMlZ6u<^y1E1QqVi6V~Je4iGdoWuOs}ejE zs7*y#f((|awf3TUm(>2{C%thYLP<-ZSMt>Tv7I-nxp6^N0ijj-eZk5P6t3_#`)Wio z!C%gq%#TLtq zSzYo(TkXW8-+b@V3jXWjE1o}CO(94Iqy>dEoJlVP5{P6UnttF7^(?;&`rrK~_Ad-6 z9w2_aVUH)IY4Jab-)u=-6c}PI#(i;1%$-`_bJA6POR&D0D*aY6E?Z*mKfuH9)33G! z>z`AlvDN#pUrl8;(r6Mq{4^l9C0H*?RdCrfJ+@oX=sS4mU;B7V;+j*14Mq2%cjIsU z%?BTWutDOObGB5eRAH;$asTt?ouzK{Lu%z>X<>`4q%=Mf$kTJiT~D?WHl{f6&_nOr zR#K`U0(6`x8x!YN!4qz2@bLdtLY~mQGh1GL&b*vfE6Xru^z^CgZe9*g&-*ArpkEZN zgTnQgZM7ga!(y*qpgkn~HL+-Ax70QrhZZT`x)JNJW$nhToML4~zG>M67*YnAaJ zW}ygkWTW_O^9kLxmmLN_BBaYpff_3>j0yyY!G(BhFDLOHIwc^%y)-rdh48_n!knxj z&sdyj%xH^10*SG;Ro?_JZ1d@3h_1JPd%+!A%%_|mtp0SB9kFak(a-Z6PL81FEU*QCk91A!reuW4Tfo!(3nG^_s zW2XYk27SDtdnpQB|6-wFP^BODvHd{Wn93vAj&=J-`ulIzwkPz#Pc4*#j2a@u*DiiP zE&k#5YnunfBf%hw?9-<^fynPNA_W4ia9!>Xf(8qNpYrHE;;ZAE?d1w!!7CALk4iCN9=v29y5|iwe1Bl`Ab#qg z1*Q{yG`sQ-emH8+LytUrQ+*RI1E>vx*}UZQ0i3(?KimkKxlDv)cJKOwxxkFfg|{+-IrMoF(&h*&JNrI!6a{!WDK}Pz zk;u`ynOFFiAjzIc0531{r8v{#uPT-|?V~|hseaq|E`|rsoIAZ>)p;_q08=1%yD+gu z3Z4a3YByr7fW@eVYN`xFiZ~s<<84K%b7LG6_l_UC_6^^25kBhDB4TemIlwkLZ5sFW z$_~ASol8aJ_mp{+;5cVWEAAw@S$DWO+1$&S^YG#E5HD=5o}N3pAg$$Xt82wYJiNre z@12x3UV4C@=dUR*%~?0Y{!5QL*&MzHIDP&i)hQVXZ^D+qZ8%o^bxRaWWGRn|GTxQq z67QX%!w>P;JT0@7EAJagzf{q<|N6c@H+09g3mR+)=p3QeiOV^)t*=0t5D}0n1JFT3w>V=b9*gltRUY3FLb@Vdw%GHtPWqTSX<5`6YTT2O*jMb#0PXR4dVY*#*r zsjz*CSk+B1d@$uRuj0AH<7&~g^V9IhgMB}?BUvqq&&;&FQ9fw|ZoehAoh~1Zaj6QE z|I#?{JqX_p6eN2xxbwnPe`dCb%UPbY()JcBE#B4{dF->8<(*5jbRnp!Wi|iWH}9Y) zdVqe~{f0*6AZB|vW}`#$P)-KN+a*Q#A;@1cA8Hg;sXjaMWtDEH@k%0BUoTuAVwq*{ zr+My6LYzfhEiGNryIcdB=Nn2FjI*q2 zJ9`V^xI4pkA9?nrACq(*sG7=&GA`@qLORYoUe#Oa)STRbWL+N8sC;1;k(tz5Q7$NS zn$*n@?2oNR@4z*6&8#>LiTz+9vnk6GDKWMRM2ci6i(@c=_I|)Xc}!@Y=`%6S z!RZJ~Ne$1M(crq6tdqaLr^R1OB=BT2XJBSs<(G{5#w{fj;XHj={=W9v%w9q~abC?5 zCRjJqSXVl{vML(7l5xJlQSB?h*|=i{n_q25x?Ovh;Gb};`?*MAhNaVS8pKSg&-H>U z#4vuIii!2^vQOyl4&(c3&_Je20g1 zz6p0$Son+>Mjz!F*83dhU_A_K6ktbD$v0M+ih`!OPj&m}-Z~3X+)K(O$gBC;d(7Kj z;4j)D#4GT9Yvmu^%S*@G2KCU`xz+K0PdAW@z)%``OIZC~EKi%pCl^|wm`BZ>E-$ap z8Lj2A!n1iZUfjU8#6q)=cnVERYnwQlU@c_YUd{@OOwC*+s5_IC_gsxD(-B=x)8IcC zZ?UV`m?B>=UK~=L5~FItPhn?!;7Pz{Tdnq(PHVp>tW34+87DTjV~-QEubw^NpP6^y zC?*G7+E|5FR#k<7YZDd4uJ%UgnCBZ$65l;PtXEg>Oza_;IqPpo&KmBSb6`yVj5; ziO$fP6&Ba!=6G_c&9U7(Goz9(b2w z+~hKCx!(EUeox!bwU8GOm-)q}u_!Op^X2`W5+an61k14^bm5?AK}y5!DN49?y-~~I zrD9H(d5$b0H#)q5c(gE3q1t--Gm&t)Vr9rZTD&Az+%aWBiy4`10;@F_7(|CACe+$H zye?yhb@;ih5G*Cz`C9K)B|aP>7VX3Z-K(K@5XzVpf?vE%gca%|7CayG=+?f>PIMZt z2w{Z_JT{`d&>BdJDWa%3OBrzDpg3N1z$o=>$(^Vr>zo=OUwDs9d82F#l?;DxA>;I3 zw>*bVjvkqNyY=F(z4@$SR%W%`0;f(rE!!t~T{8`XK7L!Q^+)3dE+q=n7UIIklTE@d zv7%SmJam~yM%HEujGSYm#WBUHfjD1%^tfp}Uk1x!n#b(elR3xI1Rb|J;ckhSaHDB# z=l%+TngsvLG`Ku+QHHcmjbu(>jDB+<~12NxM-RpQWnqrY{ z<=OM|nC%8tO;{c|d8H=&0(vYLCORT5I7Rjzzl&tPYMBEX^ zQEW}M$dix-;qX%@Z|1qQ`_Ld%Rf9M%l>UCDBBwE9kW2MRr)Oh5t{d{(i`j_I<6o6_ z%%0~v#)`&vW0zil4Tx&ip~xA{wa|YaJ=^KAIw>iXC)6H?rNP+@rdJUh7I4|Of3g77 z?;;P`s?&x%HxL^`VK=%tu?BAB$jxXJT&-jEv+-JvIdGM7m9z4ER=B{9?MMm98*8~~ zS(2qPg-3-?d}YzBIcMGCSpU~s?YU%OLdS7WdDZMveIu*uUcGUuIi*V+7Z(;UQ5Fig zmUM&XKkvtEN6N-wl~*Sx)_ydpG%3>JjVl)Ol!=sy4_X_%G47KzD_mA9T-K?B?eva^ zeq22~(ay3HX&a98aO+cfyr`KE4On>`O`+^`SM;pqRH~yDDp51dEhfDEq45cS?_{A^ zZXAllZfRD$TditL{?y!r{`}SY3EbMtl8*XW2Pr|@%)5|OA0w=LezX%(WW+MFHYE&N znP8fmT`Tf8$D?;N=QkgDZVl9U;Xo8TEx|wIqRTx?CwOV8syMFLqGW(8zbU6!y>_f) zrRwCAn-wIc0`SO>lCvRy1-r*(2}Q-J?Jv}xnILAQEx4u5uuc|}d8gTF5VxKOlmM3_ zQ?_dV24m$6eZP!)u@^$touL103-+r^UoxI)}3fUxb6^ zaaRZU$-Mqef^0Dcua)=o*a#(sOehG~qmrau5nl^5ILtSd_BV?JC-n#V;gacMQHnzgocDa3kL zX&g}W1Dnx+G!TT3j%cNr>1IpE&@`t#6IUE#g{Ld`4**92{rZvEKeB_;FL_*mVMJ2W^=b}4HD37OwP`0U=j zHy_(p@Y!XglO6%-Tz3CvjZ;{VJSAT_QaxP*`Dal)zOF=xa zIgtDsVoHQ95XS4%40MZ9oKZ2JJ={P8eFAy*lXa*oN5+=H^D)U}`J_O~g6Y?3b@mU2>jDM6GBN6hbIZ75Bh)XcMLUWjuV505R23mFS) z)zR)LcrTTpxS~HNxoCJVvuE!0Gse8x#qCKPP}b;$Fn;^+d{t-IT;i~EE6ZHLZCbok zxyGkdDP_pmjsQASV#2@*>MJtBK6<8fnPjM38J^L3A}j)XHPup;WmtwvmC_s{sp|jd+_Vp z!uZmla+Y0h-ly@>x4pLgi4#kKd0(tsb7g0$A9j83EUPS`VJAbBMt=YrqJ_ICSoZnwe)4c!EL~trs#AsWR^2kV9PPUqv z0lj+|N#+VUXrPnt`pA$KEG7p9K@(?}j$ErONP3qkw1nlfHDffN9k~vhxPFY0&Tn3t zJTEo~piX)!v4fGX%60K>4 zYI?6tb>t#3rs8$HTH13*#zXduM|U#>`F3Imx1&BR<>XOMXH%=fF~R+0kC)MQRyU__ zr2)C%oUW&J9a}M}NhBEo%Y*OI537<6NdwKy!kv<~sHOgMeT()9$O6Hl6!GpXVS084 zGyBj0Q53B?aAaumT5&casDcaRk7oJ`lb0oSAeGKhutv#-_r5DbVrXzp4L6*-!@Rzh zMO50Oy-pl5^}N$$ZA0>icA03zdyh82^rNyrzWXqU)s-t#OEtqdt~^Z68jqStzu9m4 zwZ-B_c!TM?RO~qdKx$-RZc+5eI_vy(iAVs3`N;d$`m+#=F}*Zv?njO1Vs9*KNGYaP zd(n{2^E$Y-3ssUv=#a`F7U3Gprb($@edp@XdYS9@&YqqGlZ6W`f_~(=LP_j1AJZc4 zNipWFc1*J;-+o!#Wy4G>6m>jZEb__4Fa_Y-bb)q9k6?Xn1HFysmLv61H-}cO7mvsf zWOXfOWSz(_S| za+$JR507(yqAgsQ-5F zk#Angf)sF47f2Z-eG8$onbdESKVByc6!5j8Rc4y_6^RsNp;j;pQld)XUW5ZQIr=>% zT7@$3N~FudDKvZLEJ|ot;}&|z!BBaq9roFI;`~aK>&ye^pB8WX%mi|+E5?M_f}n&n zt~tgAJ5{q)0Ee`orevi@iRG*bek8hv>D$vhm4?MBHlvD^Su(Kh9`hRk!kUn7Ov!H8 zjZS(eHnu#p$XrS#3>7gr8eEu@QejZe39swgi?gZanwTE6JL{PmGwv8_N-D0C>3Jvt zi+2i4amr^xN10?|BY?6Y6z)aH>aIxsK#e#pxHKYOUB)CbhoAD5{6LTFacf}PAV~m| z@u7`8g4w>>vX`I$P@C?9mHbDcin??bY6aJT_b0mA_OnK`N|eI)6bB|O$#Bwev-HT zGp0$JVHb4Y!@vtdCKZ(e%(romhxl*Ya*zRGlVEDp=ewz2!hkHQFc)<=H|dwD7I`m$h53CvspZC)3DIr2wI^6E2sWHKks;(ym6{2P%X z^1L|K9M7`HxWeC&UY9ei9U#9CRS=ui02RZ1M2#^N^Pk=v(4k%r zxKAbHaWUQX=J@Y`!3jOUSd)qGHb)Ss&zW8U27TD#@Vhnp`rm!(0%AZPqw4>Nh)@Oh zAqWN>Gi-ot+?Ss}Df1l!9S)+E9!<6TzWmw_0Meug-9a!5tA=ZcN#aQglT|M42HEm4Ea-2Q}G66zIMHV;z~)XG5+v= z`YqUS8?C_}Qj8?xq-yjjP{iWjWCKgyvGZLrnWFfuZN5O zc^7U$kcqnQXey&$)hoF@yq7NN3uva@PO}7!*AR}8hTz^;y!$*SsQWZvD31AEWqyOQ z@p((%gTX5uQef7NegXb7K>!BDRjH{{G11 zPb_wwHeQ@TP^yH*zr;$pldN-IIgWzH9VUzg0=M39GqQ?4)lAn`1R0E^gvYZZ%}<_( z%Z=Rsr{?~KN$GbFrhEW>LB13^ju=-2^Jdvmlv;~jvkxCRvcp%W4fV}iS=gd%Q7w=a z77r%X$gzO&bfggbr~QA|{QqZ%sgzcTkO61&Gg(OGcxRC@sC$rQKcH$K97ax@>Qben z6H3kUMHbemuhrG8FAV(OFM2|o8w#!JVAGrTd&1k2wfKa;J4;}Cs|#)3GKIUBFSXhm VX{QBL7;giAigKz~@mKEL{Vyi^2>t*7 literal 0 HcmV?d00001 diff --git a/docker/media/docker-image-submit-to-challenge.png b/docker/media/docker-image-submit-to-challenge.png new file mode 100644 index 0000000000000000000000000000000000000000..8bc2bade1666b2c9b0861e37fc3a51a25336cbd2 GIT binary patch literal 391642 zcmeFYby(Zawl546X^|QQiqp~p#oeu?KyfHi++BhMcPUW3Mv)M-xE9yoP%OcNQ;Gyl zfZ#!Hy7xY3|IT^Od7iuXzwb?+Z!(#gHM3@Bt(nhSYZ9ZWu1H2qLyUuiL#FiR^;;Yq z5>gx-Lgm|p*c@0S(IyTKsjP#XoTieT9FwMptF42x4GzwmnB;dv`q~4OnMR*K%G%@N ze}BJ(|KJ7g_xGc(P`8C&J!JZN|JKXiL{9cbN*zvfxp(r9%0g!zNW@v9NZ5%7oAs+W zT;IyVgVqDSxFRpl5yK(S42e@HJt))&=6-^@Fmye$toxq;_)t|HuVWpkO-*Vo;FOwzY;OnWI}(6~&> zel-r~PhUh)*vD9e-sk!J+f%IK^KTuoC61byIE$AH#G#*wqnXu-WRHp0wg?tHi*n}f z{mA>sdLJ)aA$mrFl{JOPow#RuR002oPS&T32=$9)LG5TEv5X@N&aAjHwsVrh*RvVa zH+JIJJ_7^aNYAU@>pqvGqhkgO4A1YCEoi>b3v1L!rFn4Y2TKnlzGRwI58HFOhjcs* zie!y`oT&o7S-9IjSI?qfIperz^GZBb?JO*~gExD>n<8ADYOQ9KX*$yPfea6mo<)s8 z25sJcr119`166{MOy+Or$rt`?1F?4OM`m{Wv&2EFb-c{n4=jm$HVDGs-~OQc#QWz^ zESc_1JjbxPJn8I*=X~R5BTo`nNFpxRF7#ifi}EU1n5BOa5j1lKMupPnbYRc|zr)wh zI}MxY3@+WcXDA>asb#)*!)O_W1*sU8Wn`s&%A$GVlXN!&@2_KefA;@=q~-m!i$P@F zj!4N*`vVwFY)N5t@IJ7SY5l@ColS;aoq^Rfb>L3S=l%#DB8yOhOI-Rl=4u2@Oysxc zFf^E7GJYCNzi{~85Cxi7nnx)-8}xs_PeVf7nRWl*<Wi~$ehBiZX6UXJ#;2tfna zFz45lzQpc#ZxhV!x4#xv7Tx8-l?;pj#keF^@?eE9=Zl+NZ1$T=?XN4@ z+? zy`SZ+J+L;bK87*ty6Boj>3(mz=94+p*N#Sy&>+ueWp<)>(dM)lHP7pBON4^E)h(#1 z^7NUrtyrsA%E%;Gdb>+^Vw}Cm{Ng_Kej)I}zu6b@w98K$29|m4dR;z64IwmAAbGeP z?*!#I-@twr3alh{6xKumgy8)0vUGFX|LMcM zhL3|;__Z2xCSf-cY81_ngSZEL{u@s$T7?Jy?3-qUMmhs;cuR^Sh7+0i2s?oo> zlVtSXQW)%Z_{aO~ujG#ITlL`mkjuDF_&Wig$>I&^=l9>4wm#hd#8mT<;0^WL#4^Qt zCZV6wYrG!BO`)DlJ|DTiQ~4zXzT(p%n0iH~rQ#_#D4S-_MiZ_6YGSO|{)u5cw>&&| zY7BF+5d@hvNhPx}1dqGEoC>0?z+*UC#@pcMwMQZKuQ z`9SVYqu#%_LVUMx)ty_KA}fNY6TT{OK-fyr9_6!gbtZzBY%W{H?0bLa{lRA>?ZXetTrBxQ+`@DXt6>Tr?83D6=$oEC`OXK^-FntY4rClazmb)L{Jd^rNKM?mw z>G#J&rRU$hzAt_!|6ckXFi!N`=DF2#)_mD~>*qOdKfR3{=gwbMewUp2B8a8*3)fGE zwKx1pk`b-(tqBXN5LF~gNP)d(8Jp=>QDqhzX&bKmM2{$stV0C@gIS$f6EQ=BY)K8j z@g^q|QgqpqN?v#vfnx$ZM|6mKLR7>vA1C>Ay-*DQav7dmHCC+$_;oG-2d&XZPbdwW%N!F7yF zmfrU`;Y|Wv!gHDyL2mma`_F=0Lbn9{(tFd5gfyI_3oHZ$XyW?vxw2jIz&Tz{59-Ql zXAIK}Z43iy=jtx%gzCU`M9D|>(!>ENQC#Jmw}f=5XPe&+)GC#%m$-rq8>bx1e}OQ8%gCrqU8t%TrZZhw>P8 zzYS#bu=n&8n+xWs)T5<$)bcgoH(qE9`NGu# zqM6a_W^-lx6Y9tEDvw40j1y*(DYaVGchK)bdpa{T8Mj*3TRH(kf`7NowoLT*M6YV^ zp?|`MQxGkSr_>WsX#)!*fm?`8))R@f)6DhG^bzr`q;sP4l#2!;d%_-~vU|Rd?P*dO z^l4`p*B%NAY+Ca^5f${6-{-1-0vf8YIE`+M9KP#7rh6xW%!KiRZCF^9ZkjGJD*AmT zX*;9EgD3I!@%W#1eYL_edsr` z*Qj6Rx6#ANhb%0U&ssH(G&nVO>16ybcMnoNAAP}BQXBZb_I_<|;1)R^OIhZq)R>Ls zKo96k8H-w$r$zPk+U`JqL6O>us@TU@Z@7_0f6X%bT#LOZ}C()aTA4LWAJU`6t!n$sfcE{Z&n*>dffamolT*)CQdf;o*#jE)Sgscs^I)p~45l z?@c@`piiyfGZAfbiQ0?r709F@q`Vd;&l&*F@()>0UmBRyq}4PGzufo^bx3m(Ztw-1 zwr2ZDAhb8^MsC`E{wQkQ+zam^80n4bMaMGUJUmiyiE)f+T$_Fciv&@w#No%yQ6-tQ z0uCD9nr`*-_gBYO^Qbhca9@jW!0X!Ya;q85L596%`__BbyVgbCot4cPRA`y%U;Eme z4Myo6>E@R%Rb*CQyov<&76^S6`YZ%?>Yf%Z7cgrGTueEp-f`8r(4Brqt;1E}-yYVm zvaPV3-~}QTNp`uYHK;m2`IgN^HuY_a!YIawq+WXQTh-ozYy7ff{f;Zo{IyrKn<4_5 z3N9A{2nn^2dUKV^J3X&!Yhgt~s{2DUw824(-~7QB)otdL)+lt)3k(2xZ*kcp)IA%S z-P>6rS1XrK_NF^N>bft!4_ffOAhG*Nv{AYg=pMbhX!UB*z}TSGT>m%vLOLF4Y2V!a zo(`q40f_ciAwz?zLz9pH<Uw%8o)Ou+j2d&DBoYaZ#-rgpgVIj8 z%F<@r?7{T8%aH73N4Jj{O1horD*My|q1e|C%z7vzg^}DN*&3Rw6gC63M+UdZqzZ_# z{r(_Ut$|CbC&)z>3YWmiX2F$>KnM)tpPy^!-9Jy$jp_*LIuyt)YsAweYG2O7{DO~g z;hX>>`gw{fg&%~nvE#TSap)zy8}EXrtP#o%Pr85|8#9%~V#U9fADyS6qZ>m2VLDUP zmkZY<+9QF%03d}Q)};7mqpxJEriR0gP2a{LzrTx$J zTU@q()WO5SiFUxj|3@7|?DwD7N9=>8{>S&N&rvu;*k2E^&--k=|ENtuntkg((uB&` zGMty%a!N|rZ*40N8yf)7-qq8FVf7DJcii3>0C8|ApZ|GqmEJzv$HBpyb{;tT+a`$|3iTMcn+`cF2`Q>MRF@pO`U zs;{QWB_ z>7S1N^YwQ=ZG0X6*%JWxk6~d4$n&R!hmV_==YJyebg=zj$o`c4o$PPp`g?bhe;O0l zbnvxt)_?8bf}LtCYtsCp{GyV7>*xPd^iNLzPO1yE@sM+M!BTok|8ueaLHyUk|4#U~ zJ`MiaC!a7M?_c};OVM9Q{|rL>tqsuC+3U|F>H-`*rTHa!{-^B!PG#^Lxtf6e{x6dE25*omGI zsG98zW`tZ`YUh!>QB^=lU_YL$8;@vcfmz+B$^unF&Nxx89qt{x35kQZbGBY9nxe+f zuQ$#XcJ8!7u9xkguEF<~82|Pp83%-{83y>aubp5b;f0eB=@acCt^PoIHsgv6^i>SR z>41@^vVFOGWd)sS^9v&!*KzEDpbCe@AyKywM0e=?WcE+HQ7a)Kfyc>hiD&C}3_Ld; zP@dM1!+@JVE7FPS#+&>C}-X-et3YhuBm{d`c?h#B-`fUH2 zZN8mRQK5QJLSn6raRIR<{jEmMYYnVBZt~^gHd81L-oNB^c@ zw!^K@v+Tpa+-;~VlH@*b2Bz`>*J~VH{QI*1(g%|jE?$&CftCEfN{H>n!z?s(Wg#TN z{>i^FV(LFk^U-rW{Wm5WgNILwxORF?^59<^?4OzWsgGqkeoOzszdThqVGC4D=Ax%& z%!dEYD)nKRwo~no{THR`&-AQ8aB#buZfM3{{Tr*qPl;vPN?h;X5sp)W+mE4F@DG&# z&MN)DGEF`4-Y)Q{n&n zN&EQ!RQO-mrvIM`|KIO>KmY%y!oN}%TrIZ%E3wI?;9GQ3Cs#dT&1l1V`;B2BBTO17 zn|oISiweS=Z*Lk4ZY&-|E*%-z9f@Sr5Rd#Myf2%LgL|Cjx#WRSC(waiSRT(Fb)WO> zOz!?uKAGgha*+ZpR~D!Kwy`7MY5unjUG~B4uM*BG4|bzX@bMXfc9rbZn{m1EbMcwd zUgLz#;3FG%GH2@Z$sP#Oaj??L;e>_C8jFbH3)5m@wTKA{|89RZpK$m;0g&tyd`~j| z#hs+c8E{QWg;|M)>dSv}^?pkuXwQjzHA2t+jcgQPkXUsNxNd`YxcL3e;OXAV%xRbK04hr>mErhmaf1o zm_@w6N*+@Ker(>dF&$=@%gZq0pZxuo)LH@dxRXC>-hAb^fa^!x$L!Yl!Bo~8Tb=sO zeZhZ=DWr^{YnBD_S#MAtbx4&2Eos>8)wN4GE3&3W>8eC@QnT-^%Yz%&r6_(bv zd%BsqdVGG}i9wE7lb8K(-Tnio%Ko^0pVt_Fcgt3G8|bJ}W3kFo<9Iyx)lgB^R9RK$ z6;4QpC&TR9Qi{tIS)+th95!MJVeS$yd zF4J?|pbBVRt}tJ^LPZ;UEfFq0@xz}-=-`+03C#?eqvD*3e_I45w*5g`=8+V$;Ivee zQUx-BqL8c}!9OzhUr+CBH!K9Af>w6aeIXzbah>iFbk%H8b?q*;z{Qfb{33}dRC{A7z*)xjj4prU=Yz(t2@M&qiwevs z%13xR6Ezi`&!jnyKiU7>yzzUR4-e#F!*d|0XDZD;N zZ*zBd2(zH?pBT^ll8IB6QO3E8?ytMk&kC#@7#yila+Z;Hxy;vko=5^NpP7-=miZr- zKqaM435lFicfwB89m`v+w}_Yd%Qf}zum;MUi*%+CobYS1$YP7P2i{QN2Q!U8NvFiw z_8VDCt&w!0;h4R7mI47kkIm6S+xj5-lQ|Mysb()>Ax9mUwLqjPVt=LEcDCBa_WW?& zc5kjufyy;vy>T^BKn)};RBl+uUvAV;Pc<`0P|BMi`-%AL8c?UC9WyvtMtT4cL(W*K z!L+f4A0phBK?bucP~*1ps>WkoxyGWKFb~8Sp6{_fSYS|NTi`NZUtrN4)!D1c{0EwT zy&onOa3YGh)GgIZ!k6Nv3pyIJ`IOT`UsEB#Y1}$l zR-bp~CHbQbeUaQ4!4^e0;lo4d=}rAYGtZk0FN)d5!>Zt-h9-dlZA`be-WpuW| z_kat#Ky#5ze+4F*f`5ScfEc^JzCmP**VfnDx0mXiG}z**Xy26CZZ(OR>r+S9CE~>{ z`Y){;vLZuUy>>*>EqGc>)L4~=C`X3#Q%KcSiLy9N>c?AcqZ1ClStW|~U2aaJSlY5H zn0|%GT+5)?g;f9qz?~71iw!jp-$d)tAZ?6aw;7`Yvh|8myAlBIkvttRV(w4ZaPCuX z(5W@>2?4@Pkk)fagUG<;TOk@{B96nN0!P0|hKt+O)rUD^S&3^?CH<;re^@C(S|$jH z2q<>!=4u@_YA4iz;k<_?F_E{e4VaKXo$8J2RWps5-N-GwY|&}Lv!&qq(?F0K!Y8Cp zv1T|v$qqB^3a_s#)=TJMrII(B839YcRkNfQ=_Gwe!Sv@vW!>nF5n%8m(K2)@-XPoU zuijx|K&r>b0-q@e*mR2D4%{gzGt94cn5u5lx`_uxe2(F3Z^ZY^d?%xWI4q)-Yu%fJ z1htI86d8l8#l5zz{ZDq&j{0O#{*sE>&qyQ>j(BNbXWby0!xJl8+diGt8$q-+>*BxJ z)wqI2TSSE{}Oo);g8%-vFe?#EzDh zWj;0vA6>s6DA4Xd*)W2eq=CqwVR~aeurR7HmS^~PcO}dtBH=6r!1aN4225p|=s*_! z&-wbp{uB|fT_4>GNQpXFe4|!70K}q36Hxj!9YJ)Df!}BG3b=e=?6Wtuk!g00IA`Zw zF{WLa6LrP^tD>_3u(m?YKJhaWN*R(%{6lC+#-V5r}n<1wGc@u;x|G(-DYnFXYE0Fyy~7Po%WApL;Q~&YXFPl0`{X!6Of<)A1{Un zY{_`d792A?#0;ASBSsHv`XC~mUwO=yGCbaj0H>+1z*>(VlK6$o^4%$tDY-73XVevV zv};;ptA@y^L_=b?w-iOXgU;7epec6Ss0#fbU8D?>GsQbPYclxCIUN^dpy5oZtVi2F z6UDcSUX{ZJn_kGai;0*~8S09LNg$j18*mr*y-(6^Wza4Ml8mSR^lmpm*t=F$rPtqd z;M$p`GjQ@RyU1GZpIzQpcfni~RS?nH zWk@-Ech{Eeq@pmsXUW_g1#HNyrzfQeKKj8r5^=h?qc?E&!*+y59cM5{F8bm8+3z@> zU8)O-y_74Vhv-%fN-on4u&Sh4wWr^B=n4gg-t+Ay4e?sn9ceYmU@uWg>o$H(N_njg zKouPulZ0(maKQxsyN=$uI>qpQkf4Jl;a$2^)W;E;SAc~^_n6~Hfj^w!+^lD>466Y;KQt0ba!I6=ITnf7F!y`kd3Q>phH4@*O{u$ zczTKH;NphJwX9 z8yTkFVa&%Lp$~?|HY7xh?Px?CH96I^%#K64P1ScXYysLGJlCibEHdcA8{{p?e%U}u zGYZd`#ys+=el1;L+%(>bhDs;|=~LM3qu`ZUTiCOHoaaq40br#oawFoNG;dbObrZS$ z<>3$y2ndr((c)KLYYA0g4wWsyI|2zLTeW>=sp?|6r#T)8a&3mZ0Wm1H`=ORvX^-M+ zS9UqM!mRU*HVwIPyMvW4kiqu~s#*6B%9;AaKX=RakH5!>MoPV4Z^b1{eC0`lm`KPz~q(is0PA-_M8gY$edS_3#`M zkml9(l{(o7tzfp9y{WM6>pjd7M4r5=07pzjUq1|Tg*g?WG7EHvbawFHh=j-n?>j@f>lWWkr&-O;Pvx^4&WL9JHf|y@j&kT(wkxiU4)q-jF z)kI%kr`|aciYuWvss{k8yH_Z;DGmM)gi9f4Qt@Cg!N5I7ZwN-cz*&6F)PcAgtJ zDn8UsYjM-E%>4FZCS+>P3*y&8BK9=8CXepSb)oTKBfOJJ;*N<&wwPefZGz2JswcuN zul@p0!@lCb;Y)w0y`B6B2f;;*pEyXjTdqBdHveWpP>L@=$x)?`Ike@{Gy45=X%-UX z#Ol^J4^2J^heWmA{dO514tJf|4BTQQ{fRlMfek~!ErB5MC0mdVVY~U(GD>hFJ2*(d z>BN|o*#@TFOlz{s6`Ol?xuz&WYceD5xv4!-2kd{l~*ZmTp^i*E&oLUmWj%%Rq+HMOw@gHFg@n%Y%N2<7Uq-%XDEE$EbUZfUlqYw#M@d zu$$b*+7}IV=V;d3kD@~Zpz*85$z~YZ$x1sw^8xL-uKPFo0OvE9rTZ0$JUh;I%Uuiv)=7Jjtkvd+-cYaivI)xM zwFnm|YopcC3m3I#$C^1AA;>iKEJf5!MD*P`oXa-b2^Inon1;3cG2m?LrtO#YiPXw_ z;-y)G5Qq~TS%%6J+>@;=^m&%_IoMe|+DV60q^9a3eeiZAu*pboo+nCZJCYXPxKN!w z58_y3JCeb!TcYC*U{B+>iPe&sp?6r_F0B-%a3*9Y#>-cd&Em=9XW}-fUmy*zXhdnk z#PdQtPY#&dO(7SmUsZ80MxLX+@}`=e-}h-hpI>er98PN(a11AaR# z%NsA+X@TSt9exffMx|=)a$6zo8g;Ik;Hl=)``W#EpBY#y zaHO=`?4FQcO`FnHN_#V0^6)hJ)ljOIKUOwVmf+u*<^Cz0+24uZGv_I9e_E4m$>XTJAOO=^2P(odv_sf)EtZ6-70J={= z!ZG^lB{k-Ro|@QwzTQPahM1p0(ziRpjAGn<-yA`#Cqq_VQIAt#%jXw(*+nn|ImpS-Y$~5=zrr`%ly&0OO~P zs?iNt8v|j7HCr~iNO)8iF0(@|2(n&uSA{$EB{8}_lHDMC&0!%aNEP;#;OlIedmdrV zMT(htPpgbYwO_zw3&f`R$<7pfi63qPOmhvcXZ3!_n?ZR13BpX2WbGsus)N%vQ?{cL z&8zk?sCLG|`;|DBSDwg)S<=V69b&J+@H%8)0xh_mXnUswRiY)pcDoU;IGDwI2l3mg z8YIsC^&U}wDUVU=wKF$^jLRX(sOw1pZzT52aX$qKu|o%>n#E<(>SK;$6|-a2T6yhd zmw(Ut-I?JCI(@kRG`Wi<*tSx4lU4sjF;F-$oyr&{akV`e16><6l|KzI`(Y^$e4B(^ zvK56@Lbi#4u*zUS@J%#kIF6_7w~@5tk-o%ds67zbWQW-h&=UV69#D94-e2~v?C;n} z3f(G)en$7^bBTD-Ih|R;ua38+$vE~XZSbcf!M`*QRlafhl)jecHm&A-#7j5TcXw~@ zbw#^XzdCDaw9kr?RP$pK9O(Dm`SxYiEIy7}$G8nOwVHy2AhZnT5{O>;N(~{^Bn}3Q zi@N@_Hd{|+@mx7k39xI9<*$4pP-yhk#bvTcD<+$iK#xsS|7FjhUSS6@*E?P?Xv$#6 zNn&ujV2>R*pJtc21^BFpfB(aMwvy#5tWh;LEK)N(?K1BoytD{Kxg2j#WdKzxGo{AG zVJ|e@&SQ0L7n{j!7ca=XK@u4_Ukrtu9)p*^J~5s3t~PlziCKweWbw|Ets++!eZtpf z)qfYg6o7gT1zv!1hd<9ZxcY3fX*#!(5$k>YVf0lh2n&&+2@gsdd=D%U8$>>0Nw>cW z;gZFa@pP8z6g!*xzCaZ)!Oc$=&NpI*qTXLPAw5pDg76k6>%15JYxs*5qz43F7i9ZGa z8UgdEFCX5w1ABc#2^m(Ihwe2eu?O%z)N%=ZIQDkECn3tB;2}I@vK}BzQXN1KH{qd8I$EsB85AaY$Yus%_XI|E6B`O?3CSpv)Bf$8)eP% zXW=u`F!f)b`g$uWWoGOtg?a0EFmlHebaKm`NrMwSLBsU?>Jy@Wm{q zwpCbE((ll?Fb9wo`C+{uLeMaY{>duDT6paAyJ@?IjKD*s0-Lzj_X=?-oI?~S&s?a z0t^~BhxKO?n_NmSnff;Fur@SLJxrn%0SGfIB&w_K45US<`{2C`6tGIyH1GNRTqUFY zg@jv{bWos?-e^ySNlS(w)~_WTggL3Bier~}Nkz`8?#WN;)I@8%Qc<>7W>C|&*;Q4) zylm^Jtw}@>CW;R@o1S{%Qt-^?{m=;t0(kHHdkZ0lc=~dCNX!NR_j5JBkyel^rPO(} zc?t`Wu{k36+OVl`&&D&$0hY(SMB`EcaV*-J$pJSg@511fW~dCO+Ld~jc^YtOt9)mV zI8`az7i)Nh2{^9ZsNR{+sz1?-TaSaOG2j$gfc5gJOI?|0;T&Lb7OJfDm!OSGA(Q4b z!PjHw@ihmiTZ?JjIms?cvjMtf06~ZoUG22PX0jt%r6XH3&EhMK>-xZ+3T2`@@z*g9 zDDuVol)G7eDM^8LWj^Dv(cAjHvL6MN+i3-*pPAo}qa+g8^*G8(ilLq?u=ClOaZdk{?K^bjd^p!#p0 zS))o`*p|qIEB7!a+UtQE!T6p?65VkH+F;JaEa{wpw0}`XTXyRUto8mjqLdyWTM2Cg zOL*~8kx9dqF0ySpb74z7_ZU||dFY9TJJUeRGn4A+UH@Y$pLt5b(qy`nXb?ed7RXKy~nrA zree5ZdNa1XKOb@w1T_9uIW_K+BK{nNr88`wbZ z1=JC3^Dj1=xUZY3rWczX(d8n`sYkz?jVjp~uu2AM{8(n*Xzm>|0uqrJ84a{C1J=}T zX-xov8Zzf?M|>i7JaY^&Iao0}xV_5-wNmW+JQ_tYLIi^|V zR+Wi&m|6RBa4EFb94uOpd}oFWEbirz#lo^vq4)N*(Z7yKokYEI+lLM~0K?3LaU60C32JtCz{G<+Gfh;U}AJXNSF@{dew z=(SbBOk~|qQghNRQ~)8EojmW?1UFR1OaAUzV8&CdN#!0_-Im=U&}}MQ*~-U%P&2_I?03Bme>h=SZ!;u}?wijGLLV9U zFA#kR!m^v@YCD{<2NJOu|N4tNXrfXbo#eS@q~Al1b(;yc1soIad0oUO$)Ojhu`WMe zyDSM&hpH+gjhF51kW-(HLIFExgO-*wa5KBzW`^Ba_tf<%QJt zf)7Rmc}PG~SZf)TyZ|iML(KU`eJG3iYn0I70aLe|o_>M=X(4}>JOx3JUqmf8wFiX0 zOY|nk%V%%SDr!EC4WMN!Yy*mCCr(T~TKUxCO7Y7btp-=rRr535(c^mMCG;#ygh}6k zw@to#i6bDN`^bN&MAr#i=9XR?fdo{0nKD3t$3i#vpDu+RWR2dB+0%b4T1NU|YqML8h0Oyc?nR zHUt0@;1#G`N+8qDYNXW3&`jyJBo;ZOT(CFcTCkk48`Kww&rk8Da5-aJ`ZqFbYvI&& z9zNA1K9wt-1>KQpTP6B&D)Xi83HBTU8V$^WoFUi28zRJSCjDLKcPA2Pgx?q=5=&OW zg)pMxM+t`fP(tQ=G&4NO+Yay?RHiUBhc(-18%zmm*BvR4WL>d0UJ4RBX|x*oEEf0B z$OT>(QLK~y*`Uc+-qIB9&83RP-oqEViV$Xp(Xt$e(eF94^?o3+D&`Br=$0_~9r$A# zded5qa);%r7=Zx$N(l1z(-7xH@yn|rEc|&op98XQ4?&j`5FY!#3tmllP~cUnw-F$+ zxC!11xIPkqi5tWO36P|otydz2u%@2St&1#ES+yHW6X09(3XzsdM|-22;9V{4oL2-l z#NNtqBLHMQ+JXKw3w0BRK1D{khH%uj6;TgX7=Mj)T9q#zf&4YV%tRk%HD0W`xQ2JZ zKWtPjwN~k4)`^&%X^1i`ebQ8=GT8g~tA;r8$r@o6o4B$6=?OC_S-@YO9 z+JQHi>EOq0fI-jLWb{Sg1$l|A2BA9{ek89tyRd}6v8^Nh(*wa#2xiSWF4MNljtZ@8+_gO1=b!ZNH1 z)KBex&DuwoEVTMLZF)_Apmfh9FANdnu+BL-mMTx#^0k!E&R4dnnOv$B`Mt*n39hW} z+OmB~AKkDiRbVx3Pu1zY?KPR;11QFZr!j&DY^CO-riF=n34qsS`Um)kjn@R)0XxH) zElf_h-2}%*6!$Zh$sB1O8pw;ZPL;mnanC@KgV7YcWw}8Qcy|Z)KZKf@A4X4H3GC`5 zAx%`_Q2M*cm1ux8OTSBqoB5BK4 z*BX$Ebj`&lRHnZIO|$q`nbA>;6{mDL;`gL3-^}55-;+H1=yz%o^7M&~VerOQzgoGW zP~EJi-{#7``%e1to(yEf6E!`>GA26LdUe8v-d|=oD97~6EV`;o)itOQeqKaT1WBm5 zhdZslc%?=5*Lxzd##kHkDXR!%w`(rwF+E zg&7$lk38HA+|^oe+li}2d%x<$Aq^R@Q7&zh+1cDE@$r*dlfCXmjgt972E5ykOl8Bm zY7h(q6**knaYc+v9(fKk*};}XRA#=J#V$O#kdwao7(ai0_i>$xqivNSbiE5YM5!q9 z*!g2xcqBzeDM%U;(Cua0*IyH~^J2$JI2N+&A&?Oe5uiN_$6BZH!8&vGE+9eQ;YCUW z-@EXBwy+}e&?5gOzqGPdllI0;MS|bh6O*@EOuHcXQF=&sLj|824Px=*ci^WB>vqhl zSpt7@ll)b?l<;Vse;sq474}jM%-Cb>Ts@iWM1}3Rp|#g!156cI5xQV5cC#Rm;PaY{ zV2u6Nj|Q`1ujw))M&iebn2aJcpo-0?60+$N2bgmL%%7!1-)uoW7=#WLv3GLfD{CUR z2wU5)R%IX;#5gb5?4{UwD-9-C)PGeAPnXyL5g8~l1-%nZ0=Z4sR|~zENK0`DM*~w~~(YLQ&WlnPaz^ z48Q*;N`F^EbsZ=|sa?8lY?=#i*{-ntPwR5<%%Y!`c?KZ0)v@8!PN+gz2ljz z%QIW;rnk=vV*$6{9mdPZY~BljtR(s?x9d!DNygKX=fvxK8*ZXtFw(57y;`H|q zT6-u#(<3cmp6B5<3Nq|2hV^a?@N#{pqrA73i8pSp3CKaNY9VZrSn~*9gV9DKbNa~GdYI6c107y9NL~(W8MQqiVc20p6*uX^?JzWQazZp74(-BfwSX>7G}q!XE(b zZ6}sg*~-6gc}R7G0HrLyuRp&6L97fL0PFa?j46>jtH!i26oonUUoQ$iYQRR!`P%T7 zcvkBMRKr8jPRio*B-3Q|Te6nQ-lQB0D|9A_g-{jn@?Bo7htFx0> z{KPijz_P~V`XnjD9d8cnSheC&hr`a%i$_}%BVS)=XRMZ(wL`?v!vf^}KgqHGCNN`S z7Zo*-`Nd8vTR_?Xc;VVkcgNOBFRQGs?LZ}MsMV%nL>)=?_=J1t$PZ=8D)zDhc|q*x z6_K*&=f7($5a@?VPH%961EkLWAXgiWrAI%Jc|4fb&MqXBLUOUfg@bjo16+7av1y#| zVdJBh!$?6#$BFfg38TNNL2ai2uC7N;izSh0^0E|;iHiUs?2QksWx7Q6ezCS7y~DJ8QWy28QJ zF8Y9T7w|fya>~c7{_=J8Em7-UD*tmj>!3dB zTS1*&C6!u<*C(S98?7YVhZO)Bt9E*7-m>A=&cjWa>u*GA2nnH~XNr%eMohy$sJ9T`?IkSTsdkVO#3?XgWK9@1&K{1j}bm=v^pt|jZfqc;Mk z74t+wr^#p>Lr?NSieFvqupvQ+@Nl7tfPbBMl(zGG{3;LO1X^aMad_t{zU%#+81ogZAxCRN zrLNaHe}v>P2-;xQq$HR;9Y+{Z*wE_rohI2q`DjI4?gk<6UTj89=nM@=eGVC0ZZkq? zj-~_nU#R9R)0Hc>i(&5-7T+Na>CUo|tg9O@ox*y;kM!7n&utxnv|S7{u4?uqy5OW= ze?BQ;GdgT|iighKwzV?N=%+{2kBd#Sxf1LvBp01)?I@O)`XQIMe;pDY@0{aB)dlWM z??I2z=8IkG0ho!UVDI4hd{k!E-Jw>zN_Fricz6yiPkYEu=0r4r7fJD~jK9_{%W{Zw zSFq!{;px+JIq(M%Mi@YBM}?X~Kk^ZPDBAQ-p=v~&&Pt|AnH4DwTdj*-P1G`Go?M7` z+6WI-1@ph4`6Q?xe1L8lnY9Byz&cl!%qxH34>4Y!+2v*FDDzHu1n*p4ypz%JZxd@j z9yjT$JMio+O8Tp3L;eN!A3LgZ&G(TbYo*TgovQ5v-pT5V*7N&N_(|*Yint@m&_6^TfQWWn-myxE*8+;wnW?TUN@I~>ys{)@| z0(1i2l^fbf-&_Li#;})wUA}erYfWYNEyZ;%T|F}iBe?z5EeyNwZ|K-Ab`h#?u8%go z6Cv^)5~*lf_;cUT%vJ^EVe?*V+TyBJz(1quq(nunnuoq^$L7KqK}A67^g#-oMNB+w zTK$X%T@4)?{8(Y$XdQ{x=cnFCu?fQ>;^@2Wj#HbV7P>kczYu2rBUDUubFvShnwskfr#zA{C^>4xyJOx{I#asBf|pxfm2>ILnmZTCkyNx{IEuQ+*> z@S5)y@0K+gW4+>q9-M!+?2B&fa8&cDJh z+Hs0FM0y$Pd@M2r36F`y+glgh-J4INW(?XjQp{7@vVGXFTgS{mppx`|*n7{gCew9o zctj8cK^>%5MFo*6O*)E#bdcUbr1#!IU<8$>(g{_n^xiuP(m^_bM5F`=5Fn7y0^iN- zz2D_pd(CXu{P=#&9}hTU9-g~h_f^jGyf)|F+_cg<65swr);<@OD?}*cPOSOFw98VE z&zHT2_q}&+S$I_;09!#KD1MCbGhX2zzms`fHCoX^XFg$AefWi8_@fto$cwBBTy8~b z$AsFliDTQAvZ&kY_gnMf4E7sy1F)}y7VWAYEVUeGweS16;T>wyDvikJ3u;M7emCSD zR@Nc0%d7KyB6);(#ZP+VK{C+}0Qlnhuc!t!o%-+SqLoDbtP#ot#&rYP%_6OK@Rm7UO+@Mi zQrgAFBvLxT`+e0$6_-{wFU+D7xuS&Z5+TMZ4fadr`}narf~`+mjrRMD#)ll6pCcD* z(?96d_$?I-m*`d7F(&8Hh{#4z3*UCpb?(;?>K-5-}QWw zW7z0%5TN$azP>ZvhUs8V@xAqt?&YAb%o&+|r(1|H@>cz6k=y2JrCOXmH|aL`)-Jrx z53nuR=tCL2UlaU*VVZ=$(txU}iXRRTTr3yU$Focwkwk>}RywZ(b2vI3Xn?C+xFPf4 z@H?9OtF^qmJGzgj)f3ecTazj8%-}f-mEZg2L(}Cch*MPfzAx3DCqN@UrK@K58ucvL z2ybNc$JRZbHY&Mj@U_xWSwIdk6D^TUW1~LFl`4(yO}Gr#*m}(%&ZRqD?dxeZvfa&u zfKX99Lf%f>*6jcKDH49r;%&blp8FWRf>fW`;O(Y2Txrg8i7K`%Jul;Z%?BYT2yo&T zFIE0X8O)d>_Blp!(l?d5Y|7Y@ss|AS>VK4awONhr4a) z2}n=>dd+H`lzKwd9!ivk<`U~ZLnb+HP#Jr70=&fBO_Ka= zO|FjDx0RCbx05$yQ~_kdXqIfWU&tMNjPe-$)y?1J>vUrRT*a&&q+`|R^92=V7wCH?b#%r&Z%Tu`cj+~9J{a0wwA77 zSUIy%_wisKJaRWYHolG=s4+S2CA$v}_<=k=znV0QN)kqG;@)Ph)kw!hN{^!w29cYs zG>?N?Yv(-l$F9Fz<}|1qu50I*Aq3Io2ZH!1I`26tzuM5jtCnOUbNmit!%;e}!F;}} z_agb{PcNY+RF<5V3dTKrNpBQ7Ojiulo3=HoK9D=u({FaqN*i^IddeFb;95oEI!ew~ z*xmsOAfxI6gG%ZzH`2%RueMl}IpAbQbI`Bf4G4Q}nOFt9BtFTg%s;W>dGNCFDpoI8 zG_}_(1^4xSZJa!GsoDOB%vjS~c8)m1%pC`Fs|64|dRwm^AdQ7Vd)a;1*+sZc=W* zTj8@#3)ucGZKE=Hl9oz8R^l}+Cwe4M% zH06)++^63!{A5}i)JS)lqMbcp@k9hXz0(P&PLdZeW-w<9m?fJw7<)YJwbu?6y*abY zsqa5kqSIYn$^a~^VgqJ+-n#{a=I>a5&5@|m#T1QnXc^d zl?57;$F-|NhqTlvU)qcp2qWm{@`npF@5XRUDNtp}M$#_vKXz9Z<2?*l#o=hgecWz% z6@mKxw2cJxamFVikXr6NM!dF^-q8HTX-PirAzI+MgHz%Hd)!?J6U;A*J6{1oh$`F3 zBz@ffs`q<7cTO;(=v#qW`=ICX%8|^5OL)Mkkloax^70WiLRjtG7)ZJDwv-9ETkDuQ zj$BD}wd*o=969WfA$+|q-;zrOYtd|UXQlm|^2c^lD;K^ju}F#@zt zeX)rs8Zr?npb&o;vN=X+xPZBkap$cbI^@>c9`Qs^we{g`UrjC#g= zA~VP$a*4zheL=ga@MWLIIg!x8Vl9nZs!cuurtTT@&_Q>5`mOz2r zbtx``;h!qx*8p9g?J02{ML)gv$Y*nNJTjw4dk(GAM^4{LD~>L3%ll4_pfMXARMNrj z-xC>qMh+|@2U?aH91N)1nY?$M(WB|d%x>?*qWmoyB4N$GkXs7z_k)}Pfc74zevM*s zrYr7nPmt(2%g+HvY*u#n@q@bqRpnCQb9-CB2 z)@!A|ugN~9xhYvLjeobyZDu#=3Hm4%PH>}v{mhbXR3Cr?tWw~tUPISB2;T*4w*gN)*N_E3u6NGinRS?(Uj^HQH z*2X@XIWnGfM9_+N9WU1g64@_QC~#jK7>(%TqN63oJHaubTxDQx=Y!nsmnBdAZ)m6> zFVpf%UA{wM$l|>}PdM<6gnQdu;KXa0105)Nl;hoYfr+qGX}2tB5ELS-Szg-9tuHJk z7z-#t_yysOtD>*HMLjq9wGGjvS-(7Y^j>>?y7;`UEGou!0jB*1@#ATIdl!%iyLaWQ z>_+-mEI;>-^V;*K4dg31Q<>u_Q}PsDZtKCjF*E(*TmF3MxP&48JSlgiOhI)Ftl$L<}#$oC(nhI_z*x zJ~}VTZwRs>(r8~OcU|zgPLxXG^V;%}l_uPhwy{iF2y(obd`2sUFIKY-97&8!q=4X* zbE_+(2qMMxp2!dgKiJ>Bdm0KTC|Gmp$jUi8k{|=}{UP#Gg+e*$3+XX#i@)Uxq5=Ia zX3Kb;lvt$I4>OIhIE+YY`Q@bv?)fMw?HCC`)*at6p^&Yi03S#xJP(ri!GWXkv@|+- z^a$hOkfK-RrMoE1t1J3lf|VTt0X)nj!&cL*gI41c7#oi^i{Ogui{QGZ$A|*-AkAXx zlCM#{%PmCsJ(Q2n$vV39<{C)fwfmvA14!|{Rjuwd(1lB+MObki#Nq2>5cg@Er%S-C0;$U~wbEhJ{dbe*mq~E_AX^}+~ z`|CM>%w-FNr?~@OPbL%U#H6i6S8fC;pTHGPaz4y5CrT z+-o~Ka6j5HirQM1#l|kc4O-lC(7=`5_RSc6Bnm$!Wc2$Yb~KA#9)mTf)DTqOEOddl z|s(5F(KsfP6MXKGBgp%u``Kz(WOSC6^y7hKe zyKmhA>6zDFua-w4p?J5rVD+1rBSAF_bZORgNC2?VKg5_y2R$DW$EFA)iz*etwM*Wp zbzunV(8$TV;s0U%J;18+qn8=RVKx8R2j>ry zv9R9N4RnE4zfnLME@`Nkns_$);B*ucR_Ut>E-p&J0DB0E>PNDDyku1ok*}Xa0!+~AM z92PJ7CHX62O0HQ-g_9YAJ;0}2$?&Ga(z;K_Cn}d8Ur=Nvp`3{1oiq^UZyi|Py?3}N zjN6Hw>THJQnGY60ln?jogb6l2_EN4VZA@`LI z`!A5Qt8dXcHktCh-5jV*^2Mz4mNsLOX(R*uKo%c5f?*?KR&B!TrFM%M@PHMVqho15 zaDNDJLnCK`nQ5 zNN~?J{B<&cbUROCYV{Z8LOT1g>dL&SqMph%#OSMRm3X~U0Y~=mJ|Uf{&j(%A36O;i zDj5O_g7|E8H}wg7$$f{JbI%Y{5``7X6dE(rbdxq?`9`V&mfo|7Pp(6Z&o86|=*b^V z2xEHtxCL=Xpi(Lm#7+!*lE?N6vnd*w_&%wou=To+sJyAV)fMh9d%RQ=N45s=_^IB+5mqhg*F=z$X|;Wuo|4j5#7?nt!h zs<%T&c*ZUPrMIr^9>&?M(Q0^K%xuy6gfr8^nE_v_;U1SYtR2pGr9HSA_ra&2bWh$T z6J_+?xOvvDG1xP1qc;ByW-mN*^4j?3Jy>+bZWR4?!|K%br`Vcph;N`dpl}w%uGuUO zY8%mNnJ~N9(&Zno^$i`#`1Lvx_UaseT8K91pUG*Qv)$mmM00Nnu#92u*r9>3k0%g@ zJ(};_muD@x?w>Y2YCcZO_jhdFYfw{lEukujL>%6Ibo4{^mCYCLId#;5z$C#wFDgpK z&K(5&DMe%ef}_+wN=`sfq}MFS>o}3(ZIQu{CNcV9exZunQBJ6>v(e$fP~2yi&8C-` zHU|VJjFw?G4gf;2==9Z|q^?k;ie=~$={W|?C{Zq-mfDiw`&vZ?OV8K02DK>xkue!U zj=uOp3#WmZpL56^a&JHW5YBav{I()bFJ(~$;H9HKk*nOFo^f-otzlUHs8?;HO!*xU z0+Q{aHNu?_e6fH{L6udMa@Y|UET#MF6OXhnW6aa2fQBQrZxh(oBThd1!|uW1?RL9# zn{AVW{f?N?Gs z6sl#8kLh+b2P~+xx8j38eWfARMg)mEwtlA3HSO$Dx-LtQXym76#Ee|LoF{8BLtMPO z7jZk1P6WzmS-D*TFYB>dSq!dFUGqGZtqqtI!vdFGwXOY7#Pg1h_JhVD@zVl61D6V1 zBO2iS3r^kIy)Bb48#DKDxTU*V^^Njk1*ybu)dlwf)#lbR0{aNLav=(E)1lX;5|Th- z5TZg{cSjMb-WC@80?xQsk|gY#HeHE`8Ftu}0Ip$9{r9io>j%|-R^BxhY$03b35FIj z5!Ca$vhG|=z*QZ%#y>a)k|xfBlPBYj<3oS}91%Ltxxgwvk7t3+y&^>qL1ZI#-{IY|n#&w>M06yGun|^Q)KYp)InUm?!zyq7Tx%YA4PHABD znj23L7V=qXq8oSA*4L}GG3BE5EBUBh`fX@`4>>6SZ=M(7TYowK&R^Lc3BfFPY>0Z? z673&8{=JS(Ku5cW9oLsl&kOMBzMRtPO3tbjd3-5*NDg<1@sNP5pk)GbGH^?^4r*Lk zD5KaJb2H;!Oruq&UZoW-xrnhW_@ygvrPD%Jq~dv~=H#K)73#bZ`80F14a^f~dA=}-jk#d_b@33jhK28u8HE z-^vS-D%wfq`ko<;i#pX~##)Bz#T}wY7x9Pm)euSfd-qbQ#4Md#dShm*ZEN|eRshL8 z_4;D+3`s^WUV`;Vx);H9$Fg(2hfga3@AHo3a=%+TL2(|l)Z9Lg|KaWzsrcteA=`<5 z;pjWV5y3C;n-H7??_|vLG&9zg2lQit-I26|YkI>Xhrdcpx4>v{M$e3Itv2Pt`$z?! zxG&@mC{LLB3xmQN?pbsDlTMCO!$z2&HB_rJz!^Su;W~1;vLm)`==ITM`m&%B=l~Z) z05@e8RV`#TrX;McVj9IV-9Rhv6XV~C6faU7iliMEihFa=R}S!i$!CcT-R~g}zV$>& z2bTV~5Q^@%gutGf_T#U2zKNrdL@|5XA-MkMd%^M_OQ?zx3K2)mN0=l5j95b;JOH7v zA4MjqSqwU_R(8f0L;Isr8`p^(k5W$D!T3^I-E#!4p7+PP!LE-f zJ^todDv=PEVQS9HI8zxtVy1hip~nMv7-`WL&$)~bg_jtYrZunDS$81<>sOB(EC%!{ zrybCv(&-}UG_=#U+=tNjcM-zaI;>wsw_L$Ui@&JvLX)Jhx}5K2C;KL#yHrK>4`^V} z*E?@#I5Tq4Q3=JeL{8o?^S2kqKefcdTQo7Jw`A zP-@!lYtNNr9o`;t!&%MHK8;F)+tZ3gl@vq(Iv_NfDNShUbqIWaVVbZkydHMw3n2or zDtqVzzugqighyF<#r2FfSpGNIag;=i`wrAdPG=dWngJnp$+3C`*9beWTs?3`_Ape$ z9W3X{6)1hN5jW2mwM|qg&AJziLBZj|C~i>9Sm`=ba*-8T3B9uCx`)lQxp<1lv-3;R zYpJUw8a31PA|6%=)y-=uS)Ltnb*u@ycu?gKy@^b;f2&SN=%}r8&qoH& zQ^ocsXZ+oUU|RP_lFkYLjnGLsT+dKTdgJmu*KH;tvf+&3s8>^a0Njm4?L1}v&OD_E z(uYtEg`f_u6dBlS-(7}OW58;gKdw9KsqVI;_jG1rnd}NbqA)rAF)PCSyE2Z%aM>{N z6a#v5|I-8ob&=vD7)QrkgWC?lCEt*+ISN+)6(*Z6bi38IlZ<@Fk?$PFKbh$=+3=iG zdy~HM!5t!KpTa~iy-_)xMK;CZ30PMyn^hB6v}s%9sF;A4`a+6}*vd+rfD9>1!-?6P zL#A}^8W=yd{!(W^E67DfYE2HR6{=71ul+LH=;?@%#f{?xV_E_YA!aSpa~7)6}OV{XW$_;Kk35{3h|uMw#SB&RlaM=5NHqo}Rb7I3j=+$R4fAcxcXG7&^l-hp zJFG29OiyM(GvOx`?!?1fXBu*-d}TW{65^NidQO^IXDaU7zi8-Y@RL~K1H4GVUNBVN zT-6YMWvM@9~xe@6aF>6k0bearD)f0zIvfB-&u}KOl&3>0 z9l!g`)H07K*6Z9}CJ=P&Nf9H%>v7pJ&s6(;5B~m-@Xc?xHNCc4d|mT3`9ZGaUl;Vh zR&@@nDxwWZ*Z+3$!qvMxQ=ASj$~@B=BR~REika+Os_!g>=KGY<11K}lP`e%vG<_c6 zZ3_2oh*D@sLi^?*4J)PQ`pFx=YtH??t70#l+e6IRrsd3g-M{ef|ELu6rEjz`vcKAG8uPc)(my$wa&Vh(`Fy zFMn}96EHp|_~8AI1^VfquE~JY-?388_0K2lN0`?iK1-Mj;J>0;rT8!Z!4CLgfd4v{ zpbjD+UP&!J@%Udq`M;mV+TdYMVg2z3i*>+;2%q4Mi2cFl z`?IkSvw+z$-PQTQyZOI|`d<$WqCXQM?>1&m4*RKv`KJ*`nc@>sb44&B|H7tzyKr`d z_emb1|E~@FZ${1*(Z>5yH|hV}(iv_f2Iws&dF(kCH=@|2?74QO<`Jblzr+&R9(>B1dZtpZEh8NoWQ0%{CRsYR2{_#6z{6+*SE|!_` zG6O6o_dk8Q^cQxeeDoP08Bx9Xz zbcQJm1v%gRzot;tD@eA*TVY?+T}CNy@V5=@QP-GPV!0 zDz@gG^M>_NsX^PUS=Ejy>r)*%&J4luzhGshm~DZOuj$64(ak1BPoId!%ug5Wws>BL zlGBR)p+fN=uN`=r_v@E9-N3nDOtEUBSP7L{Ugc0z&s(gcWgp(o(Et?aPtD_xgMT44 zK@fbdeY1*j4v{4I{YhKd0Eb;|{Hlk}Uog<;@b6tCvY_<{`7I9*&gP z%*g+Et%;bgguI}mUw7zD^? z!|vu>>QkmaIL9c|oSh+1x_s*99Uu|KT|O^Z)p# zJ@YSrLaG7JyXWOX4_A?YjOqP3?H7M#m13ELvgB3ut=a#ASv(zr`G;w%+56KQ$4ad< z%SG-Q#gg4Hc~F*lBbcCwdF^v3+r*^r9l`ZiZ0z|a(i$8(4+zvm@Uw~ViI`JW&;QmY zYTI#%&sE-~LpUWC6_Dm-0nlQV2tHRA6jLjQOO8~USW@Zz6iMwj&ZB=e5&tqT|MTl} z)1c9dM81x91Mjn%c$!+g|-`um0!K{ulS>?{oC` zIr^h$;cvU~_kHw7k@w$r<8Qn1x83-!oYvoV^8YtGdB7qp?8@TQfQM2_W(TO_`*YU8 zY4B1N73-P0fMH(xKPou>;bE;P{3U39{czExr$K3)FuBUFuX`YYw3#h-Q=|J$RI z(jeH8*;_N9|D$8^XHNPJcBz;1uXgTv*AWn3W2cY56MdVBxKB*+KhCBMsf)W< zE}<{g*^$FOQHJGfeCM*Cv91hzJ3T7=A7?+mXyJKRj@#&R;Dx@p5U?i;4a#P~v^dy~ znoZQW8yuxV8&v+|8lOuDnDp7LSl20Wn=R~@d1(ut{cmYiZ_76Ip@Sx681L?79x?~e zZtV}3?caVt#4ZJ>W0wj4LN4hG45X4Ov1qfs|8&`bUyV<}%tId=7V;Oq`rHvL*Ywdr zxzB$%)-%f!Onil!the*QU-)W8?b)`+9qWky{L%e?k;4N5qDzEJ^UQzYtJC@*?S=^m zC}R9i7Z)_6%K&Q>vg7mv=HzD|612_!LeErQby=JW39(j)AE zg`F*$5gVl>>^Dr`9r$n0(H~c8KnrMQFp)xH=91WbSoX&Xw`pQY*$S=6@B48QB|F>W8bmf43Bxh9-2${!Trn|22y)xkO zYSCi1-v64CJm?hu|7ZdHnGL`4e64c!FN6#k+;;l@>7tzc%w4FbUBb16 zM!k>E9^Wx+r~>p8R@o=nxw*Zw_2}mqCf97a1sNBZH^VJKK|y^u1UA|g`PzklDgV=W z{&jGNe3`(;#8I{SVjT{43QIR`QQmo~Q?lV6*_<9H*nulifnXxnXRG^KK8JHPNwI57 z^ua7UwZE;5WbMpnM;j1=J~=);3Eb(0(F{LlU6>1ru22uMtK4{l9KPa^`Qes|FF|*7 z&J!2;_rlSkFBLS4oM&8f=_S}2V}ugXkDjIGHMenywByU_Ox1rn3XP0~9PY-R`1^`9 zP7k)@(f*ny85jiMm!knkji|bS6(mQPy*xEJV7a&^R__YkIB(wbJ_nQehT`s*s#0Z~bC`eI zlun&hIcS^mWs_Z}EW>Ya=^Ec6)21g5NPowg_r*ge3ye!0bk28P7T@8^?v=?X=;HTcpT9Ab7)Z3<&M%a``OA0E!xh8jHxGa+R z*b@L4ZN(}xcf!zjS ziJ%%+v3#5*Uwgs3TBLlOk_LW@)ZG^f6lwj^kTccD2;#4FIe-uWo3F5%zai|!vk_>! zmtm9#S~xD(MYJGkaGU!EV*Lz9m)AY#!d-ouy8&k+$q*iR8q$QBp&l_g4onYtx;9aa zr0*5{$*Zo-8k_;|2qcT8Tb8duI+G^@)uXj4~*IySW^a-aPG)2sUX0~*Tp>&6RtV}Az z)Q<+fh_!doOZe#=?zN6H0_LBk_r`d_x<5*WZ2?+q%j$DkYxJAze1~;lnaQ5{(hv|~ zkxy?CYS_F!>_aYO|K8RY8#g3@ds3i{C51@rjRhLU*_|4-+#J?WTOC?Mk`Uw+Zj%Kb zPG38UeH@O{=jOp{J4wbIkzC^5jX=%T#{lEiopOT(<--PN2t>ZINDuw)Yvf*hiRq+j zw`u2%71vAKpRCUc%YeHf$~$(J2=4~@?Z#8FCTEIwQx#zWo z3+U_sZ6#ysDi``~(pyg@pm}R~T1QPw6Qa)#JxBZBJ zIAX04@P^}{4$Ys*bVA)m*ho)Kj+p#AIzX4nIG`oaGr!omb1=qjF#cgrw^{szA?sJ9 zy2M>&#o5KtggB9k!^4N=HggSecX^5^npLPNs*0UT1++8d;WH@G&y11*2FtW@b*aVn z5J8NC3~W!eMHKVN(ke^($m8i;z7>~D=C)y@XAj06I>3&A^+_JCkUt&RhnKnzTF;Ar z`q}!b)GOd8N#mQ{$J5o6DkR~KGPdSc>Flsj-G6>L(-d;M+p%f83sT{+Au{PdQRH0% z45*c`DA(ZTr0PPlfbG&|BVo8!Q8kcdg)Sn}EuUaF=ZZ?o{5Pk{`LgRD(imqx^!7e3 zH(geFLe+C4B!Mf^vp75SYQCTAI`TcYu|#F7erPu=oQAP)gqjz>d7sW=)no7Pa|K^1-6FWK)i^apNF2QRnemV_)JcOLr%^&$M3-9X_k*WnjYou z_ML{$g7*4^c!k}8%cQ%Y;|zy-Zc0JKT#>H&h?8sng8SF;!qcmU3F8IYrMI;b`EK2M z+;X^Pi+rnVu3N~7g4TlWn0hy8JAhh6F~8l|aFNAKm7Lk@$jIvMBj{9%e;9{ug9atH z9tSW0*$#6JA07;WR@U;MOPoQT)xJn`+XA2<86bdPU>f#vPfAfSV%Eh~GQD?*wryN> zyTZKBe7s=10(1?X6l+-EFEKda0Mw1@p+unUB6nP2Ug7eY{lND#@pQ9o?U=x3PTI*> zF$?G=g%<8c>J{s0T2J5OoDvzze=F*Eg2xiuA`>dX|7iC_+0x%9z4hb}a!>>&<%ZA3 zn>cp)ki)%!E$fjFkNqYP8&eh<6}UsB5g7UDV+VermaM_*`^v(_1Hayq|FR(yw^g}d^F{o$eke_0!(#AQiK~9`S>sTB zdEn`}18~y&Ju$%k3$$8lvTMI1V}L3su=H;f@2-nY&Pceey!AWnjNZY*?>-OTcA#L> zQ7xcDxd7qkCD68Q>tIk9k%Rr%s@*hX*y?-8;3kM<+&wBc@1sOF22AM2WyftBd8`k^ z_I%q!xMNXi7y7M#a;pZJ!p`BlWJnnd=$%9L8e8q3n6fDx2BfU4< zG19k_m1Dh$g|tEWVBKuG9<8wlzw!7ujJw)r&CnAkyG(1U8o2RtgqwyUoUkWDl(3F0 z+Sj&;M#RmXR6Ct9LVCt*3in%N*=VJkoqJCSD*~F)5%84Vq_>jcO>#8dnOx zc85b~b{M{tbKS~bvryw*w8WvRd>p&=5Mp*816|$u_BL~ni={tV>w#8hIBX(Qy3y@? zCoH47j|{sXPLHVO@$oc$@^VNpn(0h?2v@QkMG6;6*_Msz z;(SM*VO_Ui+GU>Nd~gQDjGO-Xs`IEO{gZvODCF>`*G!2=s222u^~)l|7$1)YkBu9K z4Q`Z(qcc(5fb*S_xZUt$%dL-2<*o&_-K{J@=^>%EE0T0yd}8|$kn)G;2GtFrIqBX; zjNXgaS4VKaY6tp=9$s>6TD6$u!Lht#6mMue+{J$O$6j{c(-lQ2dBKkj`2&vc^Of1} zAuZY9;&`3P!ZkmW)%enk=k1Crc0~{^(g)qzzWdzySpF!Koi2_!M1Yjkb7icD@dKho zXjLOl`{)>|YnMnH@${ArEJnPjPS}KAjC$ z9z}+`T1Bx6+>Kws=1et8U+s(*W7k_1Qq^xccC1IW5E_^cNtyA@q$eg)^K;k@@R@uw zyay$Bs*X~^eOE{?D|1(r6lgm7Qr4Yfc-_9%VWVV9ED(k_dEmNyyfhPdTrO*H6o?a; ztVWSi*V-e~-~#vYL_OiIr7Aj8-ZbJRn$;}hKpAr(c?OwBDA3Qy#$lIQL=oaxzvMC9 z=vg|xbU4=LRaRC;HsNXZF! z#vHShxL2&Su2*O=3US3Ewf1w%dZn#b3dn|uKY(svmkwi6QtXD@*Qa<{rZ5>Q=JOzS z*)QfV$TTfpNsfbPDNN*!$J$PaK`lS(*<+!Ot*(3N_njXaQR6BKRffiGx@_j{$zgQd z#sR#Kd*eBg;)|8=fqb7L3bkhYoyF)J%ORt$&D~2MC}go4Pz1xX&GaoOBmm({e!HQ7 z14*N2T5mmiJ2%ob)OxgKTmQ}sZPt9R)Oa3Wr9`pcdTB+O$U$WT4ihAS95q?Zik2|t zwzQ5>=>Ucc2rVo;;EgxTL-Sf)SpOC5hxU0JV|B&at4 z_btX^zABVHvXdLM>GQeToOaS4JkC#D5;w^Abys)R2l~sAw+`a?0Ac*$x~n6JhN0p4 zr9$n~1%}PaRuCT6o3*8uAXifsV1Cl_v23Q%r?#77H2f=!zVI2V@g0M7_$?sY8w2FA zmEOO9Ut}tg-rnLX5wiLx36Fndn1T%Myp`032?#|>kx@E@z8GyBxGlrXW4b}>n;05; zgx#=4RldXFH)*objbTYM)(ovPv^(?KX9`Q3*udS*8`ht40I|v+nNADbq*;YELx5N# zmXQWK0lnw^oe=X@+sNP0XRo2FDF0wn)G;aON%8v8ikZrc8Q@N`af_W6H5+G0opTi( z+s5EucMv=3CjzQC4rd;h+!Gx~t(~!NspD0LC6(51N4=X6aoS(D=2all{M0@RkvPLR zAAvDiwihcY+O_VaW=4UUj6N%-t(nk0q|lejAqmHmStn`Zde1HKr*%%IzR9A5uySCM&WcdxHwmRagimz6l%5ZX$Q>+ zhdBT_Z>{e(V{dOf4o?hP?Jq*jVQ;?*y;YnWAH&+r4I29LH|{3k;HS9BB>~KU^G;Xd zWdFjUqD-XNUVtuIOk~}(e*Zg34!FWAY{qT8HxEX>7w_0c4t%jJfA+cm#U5yaw=u6b zSCez~M3IMmTLhjom?|m&tllU*Gz4uff~m? z<%D~C21n3gYs6*JKTjG5-t|$@qLX+QTWuY%Mi1m8DpGuvN#x|@-Z2Fp>xN2*^$S?6 zjd7c=3=ukIwskAVsJzG^qY2|`M0VwAUY4ghd=;TG3*IO3O0^Oh&_W4x3{NZtU1Ex6D?BmA?i%^NyWWfOehvC%o>t$t(Y=#Z7 zG+bl7RUjyY9Uq_v3`xao>Z+yMgC|VUe8w%u7OjjxW9?w{B+!wOagy zf%Ha-7K4Ld54`7nmX}5JWZ?W z>=1b!O69l-9}#DS8$1kqQE^kvrP@MBXm%{HtYLBM4_zrR_T|ySu$z>2Z~~#*2FJ+TEzw%$(H?e zj=j%h1vHTvLMf}oUUrdv#OWu&i5n3-2#}DQ>C?B;IB=^0H?gqiz%Q9vay2tdm8WHb z14DInw9A%krz68=(VGd0l4p)6Q{cXw#Nobrc<%=w@cy)}IOX{~ZTI zXXYzuKt~sW9fs~2j_0ECD_1LRK~|trf!}F%<9qn?!(FEJ09=cX(p_~?$lyq(w6A4C zz5UBoe=_`){bMw@foz)3jQ`la7_#dfRMoZ#Vt@q{MsDKC)7xb8oJ4 zd4c>N_tT&J*KV+G}l(t@X^@R`fF#beOf$%1d?Bcr`wpH(vFyOiNTeHMd|nDIWxmqcX)tP?;SI8h7B4L)gRXegIKr<-OK!)in2%k_bt1K&=oiviqkY?+${^}- z7_qAb-=lG=Fhjwbm*YLjNRc+oP-9!w)|S@IOIQJib+W;=#te!SM8B|RoEH2w2>KFT zwRj8KZ67-z8wH^I607^VQN#5YbdlqQ3G)KK*_t#rv;?81rh=D!ODF=0bCHpc^lJx& zR0}nus_o%5bMA5id)n`&;8@?&6P(Q3rbIhvTDhdQ&r>aqq8KD9%@?sB zPhcax&>+~nlRwsvQkL+i+`&a6;0_9Z=XD_`N=cLAWqCy*8#g~H-G9SU$ZlBbEDhi* z;EEVXkPahzO;J;M*-NmJ_8n$IE>Q z+CXZX4d`wKkKf|6II%gIVqg_=Xxy`K)NaF_mGUR`Rt;e|MnY347 zGfmp@O3dzbF)99*C4Byy2%O>kg`VuoaG^p&QA9M5?q_8|d9)mZ{sAxDPrwqS*qYP* z$%*(!k;f(Kg`*xjVwTP`SgPcT&b8LB?&$~THHO?scPgdhOeNW-%aA~GC-z-dntSna z5FyihAYdBc7Vmi8W`ZDslM{1yo={_7ExSq&Ce z3)0DAAt6K7BVu#!{@Zo3ZCEHeM|hd>Eo{=zaU(f>ND>?2hJp=@Pk!*`(|&0tUgvD~ z!B&w?G>}%)DK$t-+;j2OsT}ZLv!C}=$=7i=@4jggn#WyIfe4kJ61UK#eOqkXS?W7T z+38`8Sto9mAjG+8u%Sxoicw`+3|!hJMMEnDGA~y~?bnuzt{i_bMcCOxt2{R4>CE87 zmyki)M&5JoP5zBFCAiV)9%6WEL{T6J4$Xn7GLUXGc6r?lTD24S>I-QV8*zqRW~%Aj zn`n=_tECHBwgd^X9I2qH45!b;o5EF3YXdG ze!*=ql7~7CZo=@TTI*2@(Fm)7=^}4++tFhV&4O54V^4mKkC~4gA>zo(+S&-TXb*+9 zfdzKe-(O&Z&R3G2j>a(kf><+!KHT5&!>s5&KD9FP&t~YK86tnJOA^g_Q zu=;e^IqVD2aSXzxP5Ke!{RU~@uMWi*54xu|0>1?`y6uWII6_oe&Cg(?9H%RCkX~$} zx*Lr#u$h>cn6v1&XY`a|lePfWe$eG>U7f4spT6ok6dm8qL}x*I)!om(U&q&2Xb?Ho z4#Ns{gFuQC3zwW?bUog8UhrH#t;hQoNh=VmB9ZnydfQP z)VEZ`PV&W@K(lmA4ftHW+uf~*S75L1Ds!M_jR-7L8Ig)W%i@>3p8821VK6ATkOjv; z$(DwgSPpjbG~stu^UG{SOPv+zF)wkQa94ldWmS z%7%%52L~q750pF5vgb3ElKJC1WTgDeo_+A!{;Vx=P9sn#Ky&Es(n7R~W?PD=W6Rav zChf!BX5}h91Q7lOeK>Na3W`3u8u+`bXtSvw0U7!{CXdl=Jzed)#guO%;}p0zu8$X{ z6GKBqU}^X9=%BTzwFm7p_mF89^JdyHn$Ayc?rG- z)aozn)|cp%KLw3G?_NYs_5oExPN3~qLl(BgmxUZ{J8<{+p*z=4wT_GeG04d+xJ}uHwJ9>NJ75B!7Vav&~exphuKWe&C7Bs731wk0Ff9VHJ zwaq2XGmNF@aGNrFn~tri4%oA5&2)`F>*KT1!Is`#l8b#?&>26W!a-_Sv zrA1mmazIMDn<0kIA%_NO>F${Gxc7PYd9VGw=UndZuYqfT>tUYXbH`fiUN^wMR_0PE z@orvC5)s1#A?5p~F?coEiPPu+NBjrp5?KfEi#VW|9vHs2=elJA{DM!zfNZn$0uPUp zZa9i*74Tw-2V7E{4%F3JtVhMYRr&r;I{ANn75Ekq&V(@$haCS?y>=ZMp=$m*a!uim4Y6eoJlI|bHKQi>wf8^^G4z$wg5iSig0eMi?@nQ=g%Jx}rL88VXbm$cO- zJC6&ajt6pCty%`}Ego1r!*_J1;kHa-#K5^(I)Xq{!Q4$>E zUcmLoS2Fw$5O~&WMgB0!VHfsf|4(b8=^iR--i!wzr7 zHiq?hoKZt-SBr?Q4m>xA+&(tA7HgiYgcV2*kpBYqD=r1Hk<>kWEj#R?xKZ@+X;B70 z1o_$;lsgY!AQ?*qjW0GEt?tDaAPyCD>g_U$B`*M0S06U)3bou@A54b%09d`h6$I`A zbZFIl2H-h+2x8>%7Fi-xt-v(zF#1xI>&DySPZzG?Ug}BZ@AJK6xk4g|RnN{fEEILbw#A*Z|Y?xeRKS! zAdo-OvP0<&z!g7VsHWGSNI8A3BFZpN@bF|eJx;rTjf0@>{wDYwE|qH2rk+yqU0sBa zd)PZObgGamovi;iIH!ZJ)X)p&f8eaqgUols({pQ~lIg**acZPE50Fk_dg{ z(Pk0OL$6XC?RVX6?sVvjm#SeuA-uKJaoGoKE!lLRX-9JJ`wwV=M=o-svoupCg~Dx-YHOutp%A07tODp}yxr$ob;wGypifv~T0pv}-(n(gw2L5+oNonBNlm(_9TZ0)h_UD#3xW zVP)of6y*xpl8lHHRR5sozBd{EJs$SuO*gt@6|GZem&X8W9uMs0_@ddCd9)-fnhmlp zIjsgrM}Sve9AWz_Bp#5f#r}EPm3uHTOjrXqqjvkd-lIy=S2g^B<;VJqCdn`>__^V^ zr3yJWKXxaQD4E&Puwgw2OJfw00)KuLQSeQk^7o_GweQb zGAdTXg{z!Yr|H1AxcaXFYrtwNgI<7}`Sby{$09{dP6yZxx==bFSa6)?^i%htQ>(^r z3?fwdOOnTqmlT^v&bMHP6m#cjO0H3yoSY+hH#RVETYG_;Fb@L+&XWt=`meFOF{0VOcFUUKI3i+KzuvYb7IN?GK`6XA67;_gR zFbLQjmFJc*XGXMarjn)zAX1+*fz_#w+&p(TyT%)x3atqGpUZ^4jBJn^xpvPI!6gsl zbRWJAXYi$we*rF65$fgkgx)7(V}sM&%Y)s+b}b`c^)g6Z`EE;~`x5Iiu-6qhh*n{t zwTjmTU=_!x3CdEdk~T56)+;)&@wUP87K+gSD^2{XZK0XQUKe!Y>Haw{H#mR{sMJ=9 zKBw^~(7rqrvFbJ~CVKU)@k_GtBG*L`sYabHe?GBSHn z3pl(LeYhg@1f3~ekd`jpE_t_38Sf))+W^8fMMgefgi)Zy7*Jp%hRE1L_Q(~03y(jp~ z6s-z5^rJ5|At8T%W-h~Pli-A1(iKa4dBNaq(l37%xdpX>>uT0Kdaow3acKv+Kd8RT zlr+~Q7!9z_{t?LR9$Ezt0+xq%^0Md`eTsUkdM?bjaU?OD(bDmTxXhJGEQzlFwZbM4 z*3BR8I0*>#x(@Gy$G7#(Uu}gLm()9zhu(k5z~KlresCD6ocZ|sNm~U&L+770!;=l~`L_2tWYU9WTe;^WBVzrn=Cy zjOiyeeEyd!8QdSF{FjylVe74$dsFyjb-}gPxxVv!pyn2|;9|`f?yZUavsB)E#q;;{ z{I=TW2MlFu0#lYYPoKLvRwQ(z?(qLS)Lr?5qCz9t;?QLMfriw?7wV<3P}9$w|Gpr) z%-8L&g3&+ah=CSBzZD%SG%Zatgseh7RsXnC==?z{ELVF4n$r4(h@cMImXi5!1-vNp%UZhpeVv9JXmST$ECliuwTV z`FhPhf5mqE?;E8~*hf*z>o1A9pb27vvWkk!RdRB> z`NCq@-Tlqk*%MRhKo&-a-u?Q4KY1PylA!48#nH)_0QATDMzXBZ$^^y%ddnm9|KwxA zNBs%7|B!Y*?fZhPB9iFI56FA~Q5vpiq4x!4Y|Axvu#e$SMvKH1Uj^^IiBz&c^Fxst z^jUG5NK789$+tiyL^xY%Twnu8d-!Y?UWFy#DFIO{&uw292y5Leb)S;{gQMAK0Gv!a z|Hlo8=kyK<>e=qA>xG;5fPj-;C6zDk6YU3}x^R+qJ6!aaeM2zK*ia_r3L-%l2Chae z^UXVelQ}JriQJt1*v;~928aI-Nh*p{wfNG4&*_3NvYEf4i zJm1a-fy`Z3%%XN9_;P05h06m(G8P089i{Ro@_!0?WiAzLq-5TtJdBZ`K z>ZY1kbq1gmpt@Stt73C3w*LKM3(WlZ#?AeFBQ(Q1(@P52Tj{X4qd`)ikXF{(KDD)& za=rEL+ULfmx%sV)#FDk1)DXwHF}t$MSp)yzH1gk2?q0KI8@%}s}|fmF)X0!$O=JZ~G*(QQ>UNEAA;W(bS7#GHup(0(`%N zPm@ZLHV`&PWYx}>hTzuprAi@CbqOC6s`orU_vXO4mRp?9^@$hW#+UnCUv$%hc#R84 z60?D1Du0RPp}EPL_KqVM&;zxo=*9HqmkJM;fz8?D$L#&tlGB&RF-nc-$XoOOc<}+8 z3rOj+8j4eRJ0PT<1A%svczE<16-^7(Ym25{=+zz8)zepIPel>(GlcsIQGh@NX!HMe zaQ^3SW2aDfzTXeCJ8#S>!LXZv-A=<8UT^p*(>cu}mglG1_g0ClH_~Nm@2Ozh8Hkc) z7WNk2?$%+e#KA#5di43*xVbC4%{LK7DKu(J-a#LPi}iEaI$HoY&tVj&EwG(Qby%0v z)4uQkw2(t;KFOD=nLFGqQU$8!JS;7iokBXTCbS?Bm{H>IC=L_!K;Zhmb;?Df>FbvE9Dnp#UkkZb?k z=VtpRWh@dVQVaR${pCt;Bk62 z$5wxNXZ7zW!5^<20L)hB>{3_f?ow#B$vB3Akvh)FR8c;PiD2`bNOv)SL{*GR$7<)mZ+TGysPqf3bZUT ze)r5v>E%FpXxuI;`>~sTy!B2&LC`EZCZeC+V4gPt&>d8n-4pB7SwUFzARYSN6JF*@ zokCMwHEy@7+crKX!JnJm?Ovl|cQweC>DDXoFr=j<`CR?H6SYsdtM;q5Sx#X<{pwxx zw(c8`wL@Kv#RsL6&2t1^PuS|FLGs*4YTJT`YTEEtX)p6PR!bDsV#Ql$imL_i<=xqZ z|AVokmz2o}v@Os2c5^%0`N>R`ZSwWkWJI-kJdmxwY$z%ucsc$!RBrWo$VlHR$frsU z8JzC>v0-gGKO*uYF?Mj*w^t+X2`h!7O$YMG%0=CD(}##3<4rmuw+*$RVe9=}_us(t z_X3Ez*Pd>v{h!w6M(XSl5-AV6g;s^l5T2?Qf2K` zZ{WP$T4E2c`Hh^_+uJWjL~hr$*1YQ+2cOKE+K{koN;4(0zu1Ra(wJ)%uctDPMzX`i$0*IcIA*7)NXGONWuF%eYq38q9$Kf$MI<-ana1w%uucG%e33C0^86($P7yWcqH|ni}=z zqc?Iq=^{SzVwT#?O{X{f1 z;017=w`)A%hj44aWKOQzwAhL z2a=|fU)oD}+5PoOGPlX}RJiHxSwE2wQ^!b}GbT!DCM1Z^UdK6x3 z97Wk4iezuX+<=i0{kWr5CwQss{j44irL4*nXfqs?K&oJJrw1-V;j$oUuD z((e@x4!2*Qzu-uJnx`B+mSvusie^;0xljjfzp=CxjuEaeJuQX(crgKOZ#rD48yQox zp7&e;AVgDN-8I)`T{T&NID(}P-nbaT$P4~AUN2-fymse8g>9Mv*WS`^vekHWae)qy zY(`Bi`-?h0Rq+m4rzL*4nAWEx&)^RtJw`gHMgK^r3i{xz&OJ?FDSZG@(7uEO9|jhB z#|r*`KcxS+vQnL3bSGO%4)uw;?%??7(eWB%E|9oN*ltf_Q2o||4BzL9e}#?;)%jmBZi#+rh4H>z9C<67Lq4xb+G zu7@n)_H!?*af9BbX_n$XBd*&V57?KY?;SjqC9SiGvdMJiACoL9SVn!MQhgoL;*+WPH1XibFb{_4?d zb(T_rAAruc*6H+d=W^BK-y^_ixuMkKZrB^2Vx7g=9T+B9M5z7*6Q`GAssR$Xg4cE> z^V09Zc~lSX?t5jOuu!RSh8z>I<(Q&T8c6v5Be_<^A=>c!a<@8K#V3Ap&f+d87YAY%q3RHgz{jO#dtuA+fymb~one~7j;q06)9x~A(+&#I0^1O_ zf%<+DX#vJknAp1v9Z)D-#$}%zWmtJ)r^gy@U#T`#)Z0e7(h(_C$8jrqz3G1Y-OeJ! zur$_2L(YxzO=C{BV(CtLTs~||f_~=e$J1Y|8c+k=DNjEyJ7ze;Kx~}7uo`AlY4*x= zCKW@i^@k@yBi={7I@ksVjvCC0Q;Hk8osGB#fgsAWV)clt)B{R`HSXq|m7QM0?=&zG zr{XSR#b|bFb z-zGGa7sIaBw3t8mEL!E-F6mD}QX}c$wZ98S#M_$>Ipa>;dYvro8capE-E6CoA&xK0 z90T3XGR8)a2GyQ?`yqpcN-nD{kfm&a&P74~lI3}-^Uoa2Kg1mL${{R7s)0w!`KP#7 zVlbpW!%$nh7~8y#*IHjKm8Xb(I0}{@wQKH1KJs_%2saJKM)T~CKtc0~Uxv%fLVx0t z$;DmUFh_;vH99Yg7ih3heYKx+NB%Yu^Jd&+Uvt`1U5CWl{=4 z3pEJhZpg`A0%9axB6Ct=w|{x!G^46>Mx)iD88Ai!fY|!?WV2&TC545(FK8W$MSRAS zxrbucE({b-PWNQo+U(96TN3Wf7Thb^kG|uzx@F`Dm!?i7Mha)rqbJ}+hVo~?WTV=y zK+y{eyL8}FuDO-N)Ow?7l_pZyo9iR>>;48j2)lXO-G;K~r=#jQ`>Px8oK%-rfh%UXBYefHR$C>agG@o1tmUbW|m^A9J~=YeDg$iyM#l!|z332B3yYF+;|7HTWjU7WQdZ=yy-*!Nu&+3n|f2BlUx4!Tv-Rqz^$ z)s7zl_51q5OV_;v{=1fG)~L1QbnwNSdSQE0bGllJsFK;Ro*g)6x!UMKn=aXi^eTpQufrz2PW)#! z2nsDw$vi+o37vTH=@GKIVLKMuZXz=AZ`4r-dzzq4wti;YzvgK8bt*7z%@vxGB zKrGJve=A4+Z;>9u!~h8w=7*T~nMO{Vj7dPZYtsdmcodV)L7bIljqk8~2*nVyR0S(5 zdCY|CK8RHB|) z)BRX#?D==Y2jwSNVM;eQwWA1B>5khx| zyF_YG)(sq*kTI3YZ@0u|&1Vy*I|ZxiMVS*Q*8uk0jx1f=7D^nDYcO4XXn1gh(+P8w zNZ8wJ==#d$P_@bokR5)n;~U=XAB{WgJMrh6N!DcMofzm_vAB8Ixv6ChH(JBOAz{^j zwb*VW0FQ;KW6lUK+dHp83}lb)eNKHR9+R7uyJ>oQ`kYBmH=t?YQU8}00N9MZ?r|q( z#JoOC6CGZzF@=*(60Q)*VAbF;YFyq3J+gvqFK7G+bn;gLcdoHFlozjMHeI5Dx7Kiw z^_)d4QI^sGNa29*r-4e5M(Y3(Dg3b*Iq3Qfxf9n~EBOJ!_N_YV%PjdLybIy}CO+^3 zDi=8rt@f7L7a%C1ovh>^TZ?#Cb3G#7Aw_@rQ1Vvw3$e&K2|g0K6O9~pFgezf;JCPo ziu$GsO`L<}dh@K;+kfRj5op5{TXPs-p`mU*F;F2T+p@nG54l}5KFiXKt4$64eJ1y~ zuQ~ogX*G$G>wfy$(wV|{4o;E%*8J3P+H+_Q9&41*FaWn3h1Ic=&wxPIAF=62?}F4J z@YxzgD>LLyp|DWJwXa!O+`ol)qGB>8(&ncxep`fM`aTg5B8YOxtvB$Kpb%y$>1F!H z`XWP@pO3Y)lALPEoo2RSx*c^BxzToJ-49J@D@>MmrQ5$5@xsauWv+?Wlj=KjM?$9lOp5a)|-B=>nKDYxDq&hSpm#6q5LT`^Y8DHi=7VUa1pv; zQI|&n%r2$qP2@x|r4 z+*|wR0h}qlTX@0gt>wAO_miBK!>b(h;^phYw))S!_cYTgl-?juA;ciEfo>pFAQ=+BPYx^gDG39hU5oJW( z9<}37{3kW=GSisnCIJ<^`Xt65Cvoz@n6V7JzYiiLyjJfBzt6NWT`J2(I6DVTlBsYcm_V6 z$K|{>6PQ}|F4KsB3>$HHOj9AumYl0QAKK>KZdoR_3$q=}C){VrUe!fU6&dyezf-iR zdAg+msVcA49IIop)Y}TZ@w8j3!P;6ntvggaace9RynX&=Hv4d>a`Hnzn7tHQ@2+0} zSHZycR)F8rG?6IWH4)jHK!n^NuknehpRX;a-JP6VD-*SrE5`6nt2HaN0C=}2 zFTI=Bv#58G{YGk4mFIT7RYdXw4@zcQrs`ayfS^;IyaUnV7T+yIE(yA%AS%lkz7{18 zKQ4Xm#d7}L*<+>->~ZYN{p=v)$i=UGV~|3CPp9z{*xQVvy=-7M0XqR zo9vuaE~7>7JB0l12l*Mixl_8T3M^aDYU zg*7VvCQa=%DWitGO>(a&u7|ZG7Ne0NS!=yZJb}iuu-Y`+>yQE?#KMRoQUBTmqU^p` zJML~)WMSdg`m&1fh4+jEg=2{}XkCrF#Vzd@)?L&O<0LvAf|yy; zULHq|&SBP4Px5@)%n0?)?!C%i4G<3&?2YGLY*tc=`h{-v$0;-h!p(AIcfMM_FWeF` z9Cx}s=wxxP`+B0OUw`KvKq>6G)Opa&6%tHri8t8GU;d4K;9Y@FD64*>s)(q^84m@g z@6+9hI?;*#BTJQ&>jvq*9=7O&r5<(svOGWs$ChO`b7?QrpW>z9|^C; z@~5Bq?dE&49P7uiUj#P?^{Q6GNbXDS zi!20G=x=w&Q@OcFVZ33*zz1DfdQNAedCl2z^6gMK>F*Kzyh2= zF}_1Ht+K_MA=j$hh+txn#YRa_fAmD_ooXY*>pp1|?apUglBJtFVirxcWHKVj0b{=- zp5^PUHfJYKBNPY8(q+YcH&}D*v79Pr>^m?+ME~i0YywG`s^PD=Qt?{sPQvi+a%ZPi zwcR?8;IXPp6ts6o=V=JzUNujvk0DYY?X|L8Np!)8RR*os!WYj$wqlvk0TLKY9OM4H zeBY4ViMef3T1q@hFwX5tYq;M&0{2rT&o&9*}^Ly(rxk!kLd=Z^INe2of+Y@R|AIW z0+q8u3AiXU?DU7N3~nXbCFW`MI@MHbB7HqHQs(O$604}6g`ne{S6&Q;1G@(`D+mi(c7X|U9P)4l$*@OHL+?K}o6bMctkQleR`#2d z7GDqpMTuCJNvhD``;OB3x5r-+|Fe^UB>8Jw`q8=^H*&7@W0R)(B@c-OxJT*p$Au(w z26+L`8l29TPvC10Sy9d}GjzMp%G++YyI?3SK! z#A{(N?Gvh#$5Sh32i!(kI;I7wLEDPYE&ffvu`(BaByUG zdJp~F0CqxuxAC~d)7hfsZVB&5bEo3K{(`$R8Jnk&IJBmd7CIGdW^7E6OE~370Nh|a2hgDLHBOLlcPjlQ>=t5f1O*(=Lno)HcTyO1~ zh*B{BJ^=jdCuzRWJc|FgW#;C*g8 z%zaS3EBkK#NYWv7l~#k+R|EOOILxuZ**Y`Pzvn_$+SG&Q+&&+lJbot<7XtZ6i~ZLT z`TyJyMcPLv*1a!lb>6G~RPN^WAV+1GASW8EEDV~j($|4Rgz9kRni1aCn}syEspSMz zOrI~(h$({PLac6D40e zs56JjCc0}>I76*jrg4{jhKnJAW~Q;B)S^DGTGkAr_UF?@jGD4I1sw6-9go^YX|FXU zYVT>cRtBRB+Xh&6nH1tz&|xc~OOq$b2S1Rn9YAhlB&k)-qO$jt5;azpo-sr7qp7nG ze97t#7dnM5$n(>mvFLt&p|a^6COtB=o#@T|I2@YBZ~M_)75s6^8g5F)Wya@BoVUlI zqpkL#e|$sd^+Ti$QYH=OZoH_JnP!fr!6K7lifh$Tl9<1#7F&j~;K{R3f+wl&3tp?) z2HK1`F_d!&M65Q8*}HzeDH6nJg@yyq4CNG0nj|IvgPQMOMWQ-Mpj-@9CAU6kHm{hU zbSPy!@6+q}7nlMamj_}W<+P0JBM7^<);oC&Q*|Fd*?aBWcdfg(7s9+Zcdqx< zF|V~y&{No8XC2A%sur7Q^-b?ui_?8RjkM%@Cn(DC9vTVgfz64vtDVVg=sOGN^j#XQ zts=27M!cYW-jVEtFwBwk zyNe4s=RsT3yu9Q|jk47SCQ4O69CL)AnP>SNO7u_P3E9s576IA71@@Tsqo%Q@D43WT zgF*OuYGBdVHD^VS8flC0?az4>s>#%~MLUTs!=%w1wHXZ~rN7_N`jT;(e0i5^gEPZs z)ZC!}-I`tTMPOA`C=1DV_P@_F?SG#ir)fCd=BHP>_l0Prw+ClG?_i&_WJivp%S-q; zp87wJ0sp<43;Y=sK#5#IDW*;lGMXEW(aE-EB@;TCb1e59j@g+~ZEBuUwK)p$PT@XU zWX4<#Ib6UDp7|pF1Itlh^lXPLOm-QEj9Ui7aYprqp*2p03Qgx~bVbTg8bqmN$u}Xe zm*lp>I`3AIqMvglcDJJx&a;P2=_ULB=ZO7RJJOyI89=yNPS~c#_9Ih}9$LXjRdZuHksM8sTu-E#lW`~EbVX`1xJ|*LmZ=(X~ zsf!TrFoPzKxZ=wg<^{AI1JZLO;+Yuyaa%s#&hZfKSToexb!viIe(wBiphotCP#n62 z0`YXOK-es5Os%r;T^Q57r}Ne)VE)?VRZbIWe|wMjqAiD6R}l5nupo)31%m&yDg0M} z0PY?>1Yf{&hexl*CPdL?^DEhmUTtyl?MEEhafe2(7dqEmd7m*?BV3h&K@{bh@xd!o ztWLR6Cb1-vzX~P9tyW*29KAmsc%AcK=iooaA?+``KwTE-1)crMdbqUhq~zf z+2&->0-;8mI$+5H>-7|?H0jR-B^vo>bjMJuwK(1MR2iL2BOcI&yK5V}A_&u_Jge*O z18i<;ybAb&yjLMITU7^e_c=mD{Oh1H8i&(O6}Qzcg(Nlup`1>O;@1t7UQN{2bd8hu+d4(H>p)sFWBX0+AZ0C|@Xk3-`UBdQ%y2cG%WDvtBsS zwFcM*O6x#5#v2`u3F>Tb-P|??G!iq?1)YHi6ItC8MP3Db5X|0O(zYlApCeO9R zK9A$oDi_O%D!>#E`}0&J`x_Y6I*d9`L5Wc<^Aq4&Oys#e(`xX|;OixFn%+Xwx!8r- zV*M}`X7k=`Rplq2=Nc8_NSY-Uaj$pUH5n0ohH%L;>ul%jcg& z8kn7zhhiU{H;0Y=o!`x8LT*2#624UK`$EDzP^48nkm+65QpNcApO}yMFO0h;vr?K) zsN7=x(mJ9N`SN*|QnA3%{leuM65Y3r<&uxgN$eoUwH{oK$#7>rpY>ODmZ!u|SNsw8 z1sG*x*A&dFqueo=>-o!wE2%eMaiA4b!n|s>M|G{OAkJwbK5(p}4@K+Uc%+ZSYvZFh z>8zOF`tZdY)kyJ%*F_egkDkd6-P}3X`Sz+x5GVEbrqi}+EP2<9cD4{<(&A&G0H%$~ zufoM=0Lueoz%CS5hUeMKe}k)zr0ISU>yZA!*O`IPoqTv2x){V-5&MN?q!+Vu#SXEi zn|gd4?d#V6z`3{d#Xy|CZ!IPVU+o^#jHC2dV@Ks2A?e4V7Altx8jFtGBv#GoCZsGm$ z!pGiqK8>#u!x-r~W0aSJhUUu_n*}Sq>l=n+X+e=L$ojLGIZc9IzOnttH0?!%OLJHdds7>RJqox(>t>GAp>3=Cpq%$tBrx_y$G{K)}v%FzTdl~h+Rd7~4X-nHqCi=0_w@n`u9C0d?Zx9U^ zx7FsbD%V|ql2rXH)@sjUKAVADOm#^m!f52Z@k09BgWE%$6=;Y~Vf+h|aY}Pi z!(3TN;EGhBFU?OT;`V0#`mk+p)@eig)J)}v-pOkS8+|~fZ^m5h%z=ovcGS+4f$(dT zQye39-Jwj+O|r_AQOhd5daDjm#3wfImzM7|tIb>sIzop0KNK3Nzo2Xli0*UO4Ta%q zlwX9(KbbrJAWLX~CUQw}nQw;54iScPc$yvWEzjdH8rC^i@W>Md>h33F%b{$Nz6m@J9)k*`TJEW6PR#ogaZngg7EbScw@$ zEy)mZ!{JtQ*$)hn#Z?n7|}b!MJ;ijYg*Ofm`rB)G`$9oh|z~P z{vDF=T4yX}{p93eaXyi^~4v=4Rq8iH#<*b@ahUh5_4?;2a2365hnSLOTocBZGsXu4!2 z@=TgE+UhWFe2ofulqOUq9N$1V-^!0D)uDW&xgn8)k7&F8+zsyZ7xdGG-P|C!R@MD#eH7QI?fMbOS+g&#Hh_=e9LM3Cb6f z{G}lMYp3@2GQ#+j0Z2_&C~T(_A2Vemn9iAta_NP>1?DOwR3r_jRT_#k6|P@U^L!Wa z5KogNqA{UC`SA)FVY68DPBU*{_X`UPt8rI&M=CF_mgF99Cql5h~Y=S1cV3BQbLm8-Ic;5TQS} z0|r7farAOCb(F$0fYSsSu@07V6eI+j=o2tnc1CsvOpwkV`rq}t?cHfTH>cz_!}(Lb zX;w))YL?lM5p$azd28n8mj`1Ko?w1*M#w}y1C{FWkysAK(groqQMZ*C4g*%v?9pR> zwa`K6(hlC7IlL5M@Mh%OYe0jW4wS827L!uiFj2keJo(J@j5n@dY?oVu?yFtX9I$PG zGU-rD2^YHDWXmk<@C#qKF9rrwy|~QUG-89v{Op72g513?L$$^o7Iys+A|{@VXz2@$ z&YTvH^{d@jg6^T^|nh^%Z6aFSF^mqF>>0!ajo+_6}z(-c?`Is=Bz@X?0w>K z$A-RnQ&%`9d2Y7z9M2=3Tpt&JbtYa$MU8ZcM6>GO&I@HblFJqK#!$!Psez*t*|n0g z(?6UsUcM=idFy{1_*c|?`MC&tIjYBrh{oFwBhX*I#76B|Ojqmj> z5HU<&zsr4lY0>*G4{NaAg?+bx>|x$RFVG1?g5n#E1Tv`Z=5~8aV^4J2DxF5+E+BeP zWN7~L;&bls$vaHtQPJ$@0Sn2ZzFwL?KO#BS2+3cW5y%^UHQj#H>%4f_R_yhekJUW- zIUQKwhi>xw0=38o6t_{Yn7UrYELv3CXX}mTgONf7_1&mg>(!IbgKwHr+p49|@i-@i zCk-spdDw7>{V06(?@Q5-z~J%-A5U< zJi42VlFz9Yb*A`>!hdCLA0I}g%^VXlYR_`ziKrTnu1ySE4CZy!T2LH4m!|lNO^aCb zWeM-@+bT2g4$f#3!j+pDdND7V{w(g5x@4W>gxQ!@wu;RB7q3=ec3)I;ZscNLterP^ z^CPO~e2CCj;~igxigT$?;T#z?8C)tB?&@Nf`^)S+6F>I*2YcO#3_T+*)mHA4UCNF2 z&k#-iIXbIWQ|yYD?PRV`-knDc7_QMX?x$oN2ly}sCRq#bCl56rGYxKSbAvp7UDxf; zcV8Jb4QB`!SlDksTILEn$IIKSvo^5q?++G1#z#bvCKIhObVYK$T2+CzQKs?OG34ND37bOcA5rtGwpId;R`fNTMnJn~yX8Bk zDe8y(^4b%j3p6*!rJ=JrkIPWkYY(rJd8=W@rmq$pwNqePQp~vS#3eVe0^?G}8 z>#z7db+ZMRNY%1jDG%V~2q_OaZ}dj*%S<`JphXG|rpRMAjt!KxQdUIp(@sN=6+y7= z1Fz~{uK=#=87Ts-{fZl%TKHEQyr|q^oK2|E$gb{Cbv*FW>2OKpIHNXkEtD|Dp$exJ zkE3@@?R`zg&F*dnd2c+1*oWncKH*%&jp=KB7`0E?4&I+m_PF#zzDr?6SSaGR0Qjh{ z=TeEob@EYMZz)yuv9JnUL=Y~eSLZs<#|eKtqT?yZ0=s?sP3rYkU3)%XgA`>iKWR^p zMFOYa*^7>7^z#aiP-5q&ExwH&+f`R?Z@e#9z*0;@1TU3y)AN;6(qXKg7X@xR;W!+l zeA-Qy?!0jJf@LeW6XLz^z@ipLwa}Qs)h<48Z@Z6cvMOcc%T+z9WgRD*u!{YWPI;D$ zXd(_X8EW@mza$*vQ8!KAW;#NZ%Jc=V7qZ-!zPfI$bcWg7ANp7L-d>y@Rv3R0eR@i9 z6$)LbWp@-ZbE{^T5?bs_TRK1yB9>X`7y@;Opjv%Vj2@`JxtC1E#QwI*$= zGVYDx{LZ6lA zrHkznSl4MQ#Jf_F$cQK*f|}2ocCROftlRA48wmpj09;M*4`f5sQKkXe^Yr8l=TGP;*Yq4dw$a?R0 z^x&O@vhWr%{c>3EOTQDoV_WKNh-H(j5{t`+GF=00&?ldcWM@bptM>0hcuulV-P=n% zZsH%ZPpAh-QipDJm2>gPxxOZJO_M{4yjzT0tPKLnSr{qj>y7QOqztCZGCWY@KCHone=?i@O!~L-FG7PH}H>=b**i-Q69E zLn-d=ZiV9RP~2U=r}Jj=B{TD%Ktf=jd#|;wwfRr%$j9K;^$%4jm!`JJq0`lQUmin~ zeGq;%*{p3h&!g)dwQFWNn=hz&+_$0<@qa@Px!hbak>$rp%}z=L+NNq_;gF4dUmjD{ zvrKX11+DrCIK1@vkCxhUG#&;fo*K#ovI3Q+0MWomWO}D(hcGTA6qx`dwO5_smeG>z zf;kkcjKn(K!SvFV#-~W^zXCj#)e2hgJ(COzUh+|bbL9`_ixRzz-RD-$zaVuQe=YI- zuIVEJo5t-^u93z8izlT=vBVPxv~F&!2Q-uI^iU;mk~ZKry0=1lEQzWK~uX@&Q$X%5Le+eUbNdX{y3CRL)~)>8NjiOj(#`}O{hK? zO+p(cKKNGw04+401aM|*G)1X>4kzP_M=*zAykES4$vj9q7BS<4&>vDXmQr>Zimcyt zcQ_Yl7#N%mt%5qSY>A(e=*rMHRYmx=KHTbZUaIY$uKxPa@!j^qZYzJl3c$Pd&I)0c zfmdO}<W2XPR2zXHY@CXrXnfkE%(KDK^1t_U zh*i~ZuTL60dzslbHC)G?+!dBk1SG5cU^D7wo(Y|PVU}Wz3R&Pu_vQm^Ur(^sL5yDd{2*Z1f*~j zmf6~NAYQW{=oVa7m;~NiTU<9{=ZZzv^tg_}Xco3jsIu>B3kG9z(0Q*|Kud9 zD8iksew(LT`}2eNzNJGpv&C#d!c;RLFfV~$Fpwyb}$mVMt z)n_zhF667oLr@Gaw+Gm~1$SmNtUqWc z%KDZE6G>zt+z?_AIuUSLoNaBKm`*SWg-3tvNlN?@nffa_k6TmXd*ec1_XBV&Od^KF z5~reCOg}(yghpZcOWW<(>7Br$(}BC*<0e$%5FT=mSD#GPQ#0I#@dGEDj8m3kHDB@Eu{y}Q1%uB0{FqNZMeGJC znvi36L$jlZE0zv^O`z3z?{&mtzSWv<#=UGjovnrzbEwXEB*Fb*AbRaG7a4xzgw4#G z(biip`n7b8H$dtF71BD;n@Yx$<~Vi6mIK;?%eE)#u)fd}|MsnFUE5hh`%Qo}4Dtgs zyY*m6%wI!_6oo-V&y_A!`3I>C)U#Tk$W?~X=bQHZJb6$OX{Iw4BE zcP|W!9eovs#%=CRq-qJ>ks4nSjnBHdLn&(pi>rZkFLI^kMK}4grl_EbdI>j!PJ57i z7M7rk8iWw%w_gtg$E!608|~Ts=VwNG)A|Eoc^(wAc^m9dPhE9+SYQ$#dxp!fKhym6 ze!9gC(miQJ6JM`XSLc}-VIRG!fe0F4`pbmw3MvMjN~E(~vwgvVg@AVN>kUn6x$b18 z^fq#+@h)Nf{s(GN*b#(7P*|?qaUi+aeL6Ia0Lom@b|Wute( zgVAadHcj@q6pVABZ3m%ioaW-e;GsahiikENw29<`_+DX_!FH$Fc_)dn zA-ek|8zPPA+1anPK{6i=|Sg_g1S$3|poU4%R;UD%TL2@xLWDsO}x-9Lqgp8pW? z<0q*&72)xhmq?zg`D4UW>5k_+brV=rKQi`a8KOU(PMd}KV4c8xMqu;`5EbCN1=3U` z2LF*b@TQxd-7i2DEvWy~RrPFe9LhpZ{ra(&fXyZVP)7Hlb`8QgR&&eq&^RJy?v_W| zIE%lmQ|}Wh83YbZ7B z9f*l6!eHPLl)oS>oAbnU^Ci4yr3+;tf}Z!E&f6J=dt2MYLeZ{0u-DZ;S#`*Ztjslf zOXgWf-~Uy6wVQUyM}s?5L0m?TA}lm$pRS-DBwn}vLHODgH8fF{csD-cCOjF~<#1a6 zI=vIXL%!e<%lj+VdaC5`mMdH&u}#}~F%4%GPP;PqRH;}MO>sGRkD=u)%y!YP1m($P zz}4y|&Hh)6r+BaJ>SxSBJNxrNu6f(z1-e@1b)HU$TNWDlk;U1NyFr=*xM*WT^QV~= zd#_BZhiBV@1;2AKe93BO-8)Ab#nyv8WK%ZQ)GmkoP&1Ps0%kG5uWyI!)sx^&_`qLH znrlGe74gfn4K?vcxNLglai!lTo}q7#1*pVdbd%5aIFfF0?H``c;tuVOeEa@+y<=(ZSm12mXYQn)TH3@LSn(6rnYBa7%W2Ce0=< zK}dnQ;g7rpxxWp3O2scP+zF_iDHb^w(@1>p6}slKcI6c_yI8W%=FNS!9xqrp;FAwx zYuqW(?Hx%5&os6k!ZA-yVb9&;f>{)%{^E%=pRYEwskfxK_g}jZ^U(r7G?qVDy0u3l z)aMm0Vy2-myPX%GEi|7g@vwK!V%=EdZJDVa!)3qP_~ETm!M?YtT>(czJFrF8yZsiB zxSLsn{I}Uz3x`J1y7heyc>c$ORv4cD5&l6C^Lk{`H)t0`c zE#;IbQgnTx$s(?d@;m-ISy*o#R$OqEe#Wfn^#V}e<3MZjpC%RrP+4!)9tOb*Ms4nE7qb~)^IrpnGZtw9L|(rUK(y=M&_%Cf$H-Mw}(bi$O>AHu#nJh>@G1 zdQlulqmS%uQV6%6$(PWM1NKP?Nlw8{{@lwSAp;Cc7}r#lPL~@E8!YG0mn#gy+iy;V z{K&dMDECJ*isqYB8#4zrJIQxC=%73dX8DY282iok>_ih!2VXb!I>8(Uszb-Wzl*ln z335lE7V{NEaz#r2z!_@U7iqUU2Ejl`+uVdH_SZP}2D}@xKx4kcL#dnl_F{}kEY-V5 zR*OEmy?3~_NL*TpUdQ0K%pGvWdBzQy`fzF+R$fXk&)Erd}~HH}QTf z!cFFEmrZ3vFv_R6bv~Momd|GQr{s*wK~Jq}-+a7?FTnQ4H11(3kf;W%#_f3SR$_O* zVhQ+46)2=9s2ijivvI7+sp0}}&e5Z}t=*~5RqCvQoE?$^k3YSsZ;|_7T5UT@*~2%Q z?ZvF%d-7?AJ=Gvjw?Xk@Q#kn@^T$)R$Me^Xon16sr1n>z*b8KlPaEshEaLBctsk@- z$jnozqKFINN$diHDd~UY>xv4kR4FnY&i$QJc&$d9Qs@fe{h6p^-wLtpO8e1r6&%0x zlVQrj^K@5tjGdq=n48w;Fds$~bD`~=>tk#t*ABtqhP8pysBrFFgTLsp8B67*=(P?(kn0?%q+PLO;Att|OwkJ3O4!GvWsZ87nFA zce+Qj+F!RLCnN7a%@o0fU}79yUw&LiGahi=U^CxfMSPlMBDI_vg5>JuKpbWA0Yw$8 zOUwV@&UgPn?VG17y77LT!BMWa)|BMhbQGG{=*Xd%n6xL0^GlEjym975jy4@5=ylv! zK{BM?HupZ(yw<*KJ((2)cf((u|D%a(ue!yipSN@yTbU61(pKB~gQ*iZRFd;K3 zw*iGP*=R6|HlNq)A#8Ej?+$|6U=#1s%l-P7vEM3HSF~Zc?TUlR*e~1Iy1&0`FucZt zIE5QoMjib^l!h>@bQMz>vk(5pc|}pib9tE+oL^ISj)n_$UA6RMyng5`vqKhu&W!JR zbSYXkfU^EmXDSB?<%Ui0dOm*z|?OVr8oMpT-Kk)7vG# ziR$`dppByUpAgFM+dVSLAF+QHe~e~i$UpW9&<2qHqo4iL3%4Dxw*zUazxxirK31QD zb9K`NO=>f)dz*AfeZHgbI|wuP?{mJcbdR`QyuHF1{rGsft!?=*MfRPf-gWZK3UCpy z;?Z{W#!IB_wIln;O z;j5R7DWH%xQ!Kx;%pbK@)7wO+_RH8XYqnI(^ENwVB8d3Gi)_DK_~%d-`@ZZrv^D$k z8B0NkV=(;VqXuP&-M{?Ae3`A#Wre@TkoWH`non z77ZDixI|#?^RhIN!BBtA{VmhOH+aw4sm!c?dpeqIwAMY6CLAKK{=X7hZXOu$nGn7n z*Q8`TwzJ)d?di<+i^h#z!_s5bLP7awj_AQvStv7~iH^PzjP!y-?~K17{Y7)G$9ntf z_koIxI*PD@Y9!tS{R_dYueQba0p(MTxRlzXRDCRjGS3A@qZ(D}R6o1m9XR_S9Oxic zxhJ^KfDDSWtAK913yJ$;bcusRVf(FWKsq>4mEgv1OFd`y{jx;$32Z2#cLVeHmu>sj zo`ja)izx-YQ9dftxQfCLUdm`u%a|;kka-@t4`adR+@ndfK-jjw@%k0pT|N<56C7I3u|Zi&#d%Lsi7heXl3Ni6Rv_oIJ%R$Kv3c4TC| zb$2v95rt$*UsI$mJXu&4fhtv*Bn~e7zy^IPrYT)LhsWD{zMA9LsWJ2nVJufJt4^fFntYB zNV1V#OZwwq745#!hCPNiH>%Upd}YYsz|LqZ%D(j2>=vncfcHdSu&1c3LG|BU?XcAF zj1Fd?UzVy_`7Hx%?6wb|8j={Zc|GES4P&4Dc@g3Xx(A|jgn+@f=5tt*)Azh>Dld2g zLLxuxZdIEg_=Ib|GMU9w=CE7e;qp|eDr!X_oSV&R-Fk%rB%5CkcZz`kOZXGz5dWga zw&f~2co|FNY9yYVkLejfV3`j)J{W=!%kHJkigXj%8{%osltHVmm_h4O9@0|2!P1B= zNRB*gVoKaL=@z&I$eEezqh*02`GO~QWWB&9oyeNxN$Yuknkr%MT<5cB!6#y#5=7oE z)`2GZ{O8vf$GJ(JHW%HZwbM6!n3IGa8_#_wegnBq)fShsSJpw?18A{M!xB6Bohq*In7sp1>^vl*nr zHbqpChIov-Z)sP8+b(J&P>5Q& z@zKs^_K~L=rHT;Xg#Z%?8LU1&C(LP!kR#OFF~j>2!@7FSmZQWSO0ZQU`p}6>&GhC^ zTIkFM3Vb7XX_4Nm>j16TlWPk}Oy)InPYkv&yk5%Ro^5BIu(I9ZVi%U2IFE0Zbl|A+DWFP;wn5#kdv973Az-vPwGU-iz{TMEWI zkyXa3nYfgh3qChD4CqLYC*A0N9~6-A4=Q$j6F*e)-y!5B!oUI>MQ-u zqQ(4|X4vN1-036<0^z%KdTnW81O59yC_*?9v*l=CcMub3>S=|AE1%3PAB-&Q*)arpkVFzQ1{b+70s*Ni<`_A7Ibh!30DN zER*@nKgUmBbim3u*3>61BxFsoCt=2lAa@6(rmr@;2U=R56j2>17i)3a?(aDPCA<;4 zI(xSZ3>ysaNT5uyTvfn#!=ETE*M8@-yjJ$S?W@V#8C3jsi>(z$t%!H-A3NjlNqWvr zNJYEs2U?tG5s`q49Kov|piq^sj_X&)*?xPN0T)2k!{)EpuhvlXDRm^aUx8 zG1M!ILGeN(LDHDHKqGBF?ZXHl$1SuvBP{erei5dt8I9CaT=#A2l;oM}3mBvhr^OMa zT*4%16Hy4tE%(PN8jH#1OUCKUm@CU{Ol|c9#3JcvRvIzh9v#<6!E*4kOKWl{{$adI zorH62Fthcm0H0dhBpmQtmjfL<=CA5&Ds zVa=*SCwH=VmcyOO8?yx^9R~gv(RSa991Z@v3*gD9L=W{8FatLlen{|{nwPE^`B}9y6!nn+Kd(iZk+)9>CmTrd^k+1EC$GOf zHVk+{`UC^*Y=3Hdd^6PhGWP;Ait0Qj^Zn8ch>+ZQAUpWYek{eL+nlO+GHg1P8%3c1 zNN45KGf0rlp3UdVI5+uip1rtj?cLLVf2#MthFC83iu%7HHob#1c;Z6LIcVLagWp%v z>dNxSmENpL6}8p&7_C~M^)RR?K$2xdZj#ezC{oq8NMqt+1o>ikoiLXs1H^u# z!?;i8oy*vtYwuMRydK#EkEcrm_wZ1(Xln9M0-^^xIhXPJoR9TU#UZngbwAlRjy@ve*B~&`<+^53K15bU*S~TCiqYuX~ zIP1jp`Fz)jIJXnYWH5rALLafSP+5RbVO5JB*I&Z`Vmbp<&!b(0?M&Bo+?@2^Y$XcG zGTyRQ8#P}|M~)*<{#^F;EdVIc8ECHTaP+J$5%?&rs({XU*w;MI2bvux>=4g%S2MmRXN<%zM&xf@x-yE{_ z0-Zz#_yj4YdJ9n!L1gWp0gt@{k;aH3hfNhtg~3J!n~?j6@1l+(C8^2!JkSii5sQ1L{lP%FH%DjT?>i$ za5}jQnXmu{Uy}E-6> z3Fb%*w@aw4Fza{o{%85*&+Yo0PiXsk2|1gR3VMA}?e#WuqUM*q9atrM(2?9zL6Eih zqe3|HPJfOQm$viH)VKnrAfuB(n&c-y;5EX-Zzo?inSLY!m+1&Q^-KuQ5%jR&Tf%re zW9tWxqvhoM@iF4Omwj^65Xxt~RMQ=SLLk_Ua8q{5H1Hb|e-u*1bWT-!*5+wMQoDP_ zF~Ta=K80Ef&lVu)%gADdEpszs*4nIj#DckQgGVB&RMBl-UbFdp^>}Et3d2BCpOFF~ zwDN_K**|{Tl(U7(rNt%y;x>)ed`X!9nPhR4j(C^ETF&ZM4v^C?@#I5`71~>`hlgxi z)(GrX$65*u0VXzcXl<__k9d3zhD07D-cU(-dJ!g5RTHW%Y^iBIvF_&UPjJhrRJAX*P zAp{)1YL*=LBXYa+>KDNn@A?YAQms)qIUbKngex09S{aMI;PLsdl5-5U0i(4s+q{gW zGwJm$$%3uiVWCJp0m(XDzP$XHRB-2KM9zeGjjmw6)5< z!RFHxIxx4|y-kUb+)8$f0wl2fvIyCDgC7a*{VwQ+%oxxCdb#>U@XPA59rvY>w5N&Z2@~&B}k;% zjQ2f!FDoBJT!g0`G+N!|6M($7d6Or>hQB zBSsG)i%g|g2QaE-PV4ngCwUFMa&5J4!|x9J!(`EV3lEpQHSRYD@k5U05=5+4&1pn; zZs`3eILa>!Zu_I@UxR-F48xFZ&fzE@F=UG<7R=Wgmkc56yA=U!2)t9&S{D!|86!7(D z!rE$H3Rw%3u9d4#71nHFip0wes&68@#QZmy_Gs-53!JRS|9$sv%DHY z;T5Uhb!Vk=7k`3|L8JWa@*=`Y4<^oODOBya-dh^n)ekezAAtg#V8_n1l!YYba?Aa> zhAE9;sAt(!rqUsoWzD^%P>4P7S$h>0*-Yq6E=h~BP}YsC)XAWHru#GNI2bGnN}n3b z%f4GaY(u4p?p!@6H1~C$|E0gsmzvq+#8+ZU%_^w!#F}<~s!({-VCWB~0GZ4SGy@!6hg$ zf5fm~EIsbo6>+sxRdC{Z8wWu@VtR>5^cOiDp`rB`JTXEjS(aVxu3}Y!4xA{0zUFmf0TS z+F;%^=7$$8dEq1$vj3FhC_AqBPD7>8(OnEhgWG-2S7;n=^x48@HeRWz$E9m-bvP3; zg3t@t-CTRpb}m^16SXd$fym;37u`!0U{hK?ytN@~xfV6Epn=WG{T`GhhoUk;!@aBm zE|qWldKVpJdoRHkD041D;}}b+fPyv7RN1-AKPO3lvtQ;h(?Q1ObrofQnz2 z8Ou?(!`;AbzwPEOM!@TAK6H1sM#sKnC`uYB(n`f=PTZp$3bM0Wt(P`u8_(cC(yliy zMLa+Ca86ztH^6C1=UYSF{SBxJbadubDmVPJcB2h;zoGedobfq~KGVnk!=*YwY>}X+j!`FAN0>aN z?`u;WUIt|Pqqmj|MSpI7G5Ju)Bq_E0!IdH9ck8bHV5lzq5VUV8mE-felq9_qto9Y` z=AjFvNu2Qm3WIICc@`Qw?A>4$#55z6O^Gv-i>*c-Kt{whD(%uK1$R0y)}>1}YBIB5 zfu;G4@i!S`GeQbc*!^;cCGMT!j)WB^HMPx3r>=QZvX)L<$vvU>u${PZF0^Xl0FD19 zTDkCe4!1?bPgl^8TGqj8BU4S zX8Rd4ffg5mcE%&ad>fG&8csXmKw9rw&L!B*B=20|08NJBj8LP?C0Z%Ys&z53IYocs*Yw+ysDn zd74iJF;&+42`>IRe@ID$d_=i}Lj=meX?gAyu9GC?vP~qt3<_aW2z}M%&-0l9%*fX& zm4`?{hUTH6cjoZ*9ml(7h87Qf0WQ_sYOfdfByo)T)@H9w$KisA6c50)klN@>E;eb@ z{^lap6&EYKjqa-#$H@r#Ba?26cT#%OR!{GE?WRzk#k=w7OTQnmzI)3S&qYJn<(j^} zZ=(tz=f%%&24l&p+!CHmGgVC^9y&Iu2m)4VYfpm--Z3m*`u9)GgZlWw+za%BB=N*> z+h~oI&oXsYW@>x=siZq>Su&f7m6kh-9p%s0)H%=Jaf=A`)dhYJTWv&JB!MLgI_A88 z9#=)FSIJH@CM|%6`of=iAp}|hMTqCW6d_MnHE@SyZ1~XyDWBSftQznjnbKNm0ef@^YSY;<}{y0iMQE#tFE8td;<*s{(5gQg?9o-dSg zhea=YyYOD%{-KEn;eT1F=#Ut}!WVT+7|E;=Ls0%>W-&GOORtuHE-tWo3C%I|8onn> zB8Q?88-=#0jfuS=%J&sT>~3p?D|>lNG8`Y(?a_iDuR#m=${R}rNr22-)m*KWCMev6(Nq~I7B z7`mlCgc%j5+WPuFY;o(CI2@uX_^X@YKRf-;Q0aKkxaQ7)1v)o_^%LlY+Ux}I7+Rvd z^e>|d3dUEI+*f)LmCh8vIm&6RxGXPX$0{N%JmDAbj%mws?b!9+FF^Whsh$>7^Ui9C z-|OJ~r_-TIfYt662ywMPhB%YswHkwv)2mQw!mO^#04x!$MC(|PWdNZ~0!Igxf+TiD zg2Z2;-B`WWC=9j#FEvZrE3Qe(=L5O&J6!jtHsI-H&i&Mrm%yz(Cd{Hi>Sf-wtW?Fb z$he>L!q&h2Zc9@~+y}5kL7dKXU9RWrs@4?Cmfrq-qp>zsn1~b08f&MOx`s&L>X|X# zGrBH4WeYF;4pS7*_cd4@wkakziYE+nGSqJYrSI@xNi@GB3(04E8~vrz!g}^KJcH1Z2Re#rX`#C%i)@xkP=kSX& z6G|#$dXD%eFg~tz$Le7nBM zp&S88tB@W6&Ty&T>UFA+{P$GCY4meIt7%uD^b=s`IA45ZiV~!p`ma$j8Kl8qzv80_ zjT`4IKEGu!swy1*H|Epl*E~i{V5fnJ$UI{-8NIk$)aQ$tsB!?Ok%S02iudQ0kWIB# zK8KsLab`SXT78dCU^PkWg<5S}VjR7gsLZq3nAk=R*8O>GOEXO*mG+dTN$Lx8Er5}{ zkNL?{ju3tyV{7oxZHgP$-rsg zMQF5cvL#(IZO}cJ!(b4i z-6+uf=`uD!Qid9zrhbZ8$C^#C5=H6oc(UXDC38fY=s`yha^~@RsGj0}L8AWH_d_aOWUL60_up4fjnTqUZclln4JqU0DSV^*YD&BtCO+w3}zrBqB0N7y+e zVrA36^`Qw#TUOEGysS7MQ222BtjhS&vCj!BF)h2EjQ`F-;~Q0ZqKy;sni&#$4=Rxr zOjVELS2j~}h18@;eOqk}$YQu$1JVniphH7G2I?78uU}GiQ#UswQ$6MKWhvWShO2mY zJFav=Jd4rrz~zpQdp zoJUf*sut5ew2}I}NTXX6hUmoo3?ySM*)#Ul6jXlCo12vp#^Or&Q>0D5Expv~9m?*Z zJNW}RwLto;X94Lj{oCV9t|o{{Wd(gb6<-tYbgjyA{(C1R8*eG82eQi(DU|Bry|n+J zAnv!h3z3Bdf#Cd}L#M8Qf%c}5&08_vy463DNMellI4=FqS^~{%o*$__o8TxSyl@Ogz-jtGf8A?qoK383v@lQk{2Q+jn4 zepIK4qf!3EWM-bq?7FQ8FkEYv3bOZgZ}%51M&3VW0KaE;ApN#U+Pi?4}BeX z)^_2uKG1lOa}7Ba5Yuu1h;5oF!Ada*<5!9<{|g2{_r?JA+Cg{mq4PUKAh0Vzh}`ba zA^G#7rMJ!e6~Gml_%s?%4#($xR=ZSSZh@OFZur6U3l3+(Q~_mk(g8Ih1Q7&vk@|*D zanJ++A=IT*2pIu??50yH-}(0yp1AaKyaAN`>8cEdB(d@cf-7r#+0}Pn>lBoBFqy(p z#(eAxg5Y%fYi-gHMVg7?{|rQl^_JQ!q!K7f5zhdoY?K}a4wcAwSRKR)^i@^-G%UC_+AtnNt{_@fpGn!+WEO@%T;J-PE5{<@74!||Fw%HDw z4+w}ze;y9C%AB~Sks-|kpA7dl2^p44VEv%&J$=04MdR{F-T9UirBXaUT*S+{4;EN!ln zneG0K#p*depX)#Ctk_ov6eJh#o9Uz9$Rr3x&q8hNTWPr zJsKRj`GVy&ci|?U=B5Ewy*F{26NgrrX9r66J~W$yAiRix$36Bd-qFr0+3p}+Cn!|r#G?}v0x1{4N!-DpC{WM7oFN?>&4!@yORHi zoLuHMkUaf51bTj8BH;L2qb~)7E(bThJUU@s=dYt~V|Q!_`~G)^JZ- zK#>A%j#?&4wKA1h$W1TtdSnvdz4dX)1}8EXbzEKVmYC0GUpx+7OfwWv zIw^2WkWXQD(Kp7UpDdHYtO7+fsa9VGS&eqjN%o+Zk;(9ekO^34lXm^1)1E>keYcJ# z#fU=_TVtF?<>d~BCI)pq^j6tj&*w4?uTzw(4~sBKp=3(6iXw)*0ux=7L-3Ve5$&I2 z>Y)=*(@m)t0KbO%5OMw-NMH|86<4l#ey)0bzT4WMzMS=T%2o_JVX1&*{)hss4{2g2 zBVDJ3?V4pcV?%#Geg9U)sL<_xwsb{8AYR1h?yMr2)!4&|@>U6m5m~zv`WcYkF)rF0 zc>J#G>cx62m9HU_k5aUntmVEb_g4g??s)j2KJp0^HLU`U`%$d2eLyF|Qjm1wUd1Gx z!sxTpzFDzNu7+@qtk+UC{=)D73=;_;?1?rsda_TH!h%}Dz4fdVL!GB2>>KN$vzQgq z!Zj{NErq@z)+3S&4$y9T8+v=O44jnj^~n0~u7m&IPw7uh=&#}Ze3k5#056uSvGsBi z`M8Bt;reJS@u||}tP>%xk6!uy{9{61UwthSrM z=UB^@#q9Oubwh;`NaOXoZQ*h~`sHo|%qkV+|HA$06OttRwxXWb`_;}Smq2A1kYU^` zT-;4b4=N^+mMK-O;|)|}XM@nhAeaVKIighIvxUSTF^>dipYS~>&w}1S^m41z(g{H# zJIl)(KA6U;(ue{3bNmEbGxUrWsq7XiA~Xp@D`JGUX?G)d^!D50C*$dL0pgLmcPea9 zQauBtmI5hT!*n=*Y8*)uM^!R7EXEOW87XW9DyP=rw?7sFIZ%4vH!qUOJ(R0Pw-FV4 zBx|C92pnwF%>eLEvVVfy-Yy|sf?#5jZl_av#XfW*Nyk%|*%cE&J)8h=$*{J4RzvF+ zED*nzfQ{-Q5%i#`{xO1(1}5AnBRLijx?zSk8p zq}7ilK!y<+PCE6t(y!nrY# z^!@dFFe1In$>Jws$_QMuu9v4WH%Y?Bwhv?&k*SyN%T4wSCHhP{6m|jN_SiGsHn&~k zp=`0JJqZoihfr6c^RmZY4tAZi_?}v`vErkNrMkc3j{vbN7ul5P;P8BBt?Xj*D8{3A zar;U6Oxbq+GW;?S4$ZzI>Yc^hC!zUJQ!JB0x&x?u5Ek$VS*Sbjd{zVMg`=$PMfBc*=kc(DLv*@%r0@H+V*I;XJEK2Mdqk5b=tQU4?hmtoWmMIZ1h%2e^zVod^luk$ewV=E z1#!(_s?Rru_HOv=ZMH&$93EM2m7e!=p~q)gvuK)X#%?`#vKOI^dPXhIKwoAg6FjwC z_ll*$)cf}y$SN3Rz;d&$%5JwzH`7!OgC@AIZ3`I_^tc3;u zNvHD3*-!k_({BZwURiO%uz8gSso;UAfgws?I=*H}LW6mizIfH=3P?9AKbLwaIMj$X z36u@o*W8(>JaCxbX?->xoa_~3b(_Ou1os)2VK4LvJJU{LvE0|acA34=a;x;|SjYaf z86e7JY}750Sw&(iB|;kEWJr!YsQtj1*3I?ll-(-odp{TK7BR2IWL6Yh916f^eO^#l zX2NXdFIO^UIqP3^UpNyufe`B1Icl)fS$VeTRHjcpn#`IvjFm#+QW(V!2pF`xDO0b~ z!_>Nu$3>?H55*L8qGc6td|$O3mXYeBTzwk6;tKgOxdJvw`;i!QE00qon}U@IbMYD; zUDNh>&pud&uuYA=GoDP>g$rgPr>d>QKLrW^BE;@i7L~LCweQO}g(F_b=KikuqJ0^6 zM0AC(kEGOvg$mg+{TSvv{yS^!orXjRJ*IV7w7vU`H_R8v=SB?g68+CZZktaH$5tpg zmUvu-ANd@1X|lplLf6_{?m)W8Wcv>7#a?-#nC$`xlZ=3F2(zaw8xD0U1}- zoYQ*wCXbRZ$rNedTQuOnz32XhOqS|ET;wgd@?{$SAmu{V}E#QxpOanlnmsO2dZ z8v@8)3GQyVnGZ25LiM=5Z&Qx_LwLe4fB0xO=|!bZ{RY=@Z%+?M6WcGt9v?Din7k(3 zxpV@bp(vNIyv2zY>}^Im5w_0gdSq!c{b@LNX-J)eEAPa7uC+aVf*2c$c7Z65w;QE! z1*wbaq463|mSy9Eh?5sB{Z;#+*62gcgxAWY?tHcGLkTIkPV+;@&83xeaf2Sn}k=Gxb~}=JG-0spp89d(P#k# zLc{j;yerb3e!ReN{0E~ouOM7^7xm-wpz8>Y&-VhxE z5MWVo&GqMtn(G;Q@lCPI^*9s)Y;bN>Vz4L%YaTaPTDZ)H-&J8n^jPTv-Ovgoyn0)m zj>+^{jYue^< z$S?S{89jddL*niRe^u;WhX!MTbWWsFb*3#6VsO^}<+?b3e6-edA5mPLsOG!+XcFT9 z&}UOv&oc35#Hjx*`xu*OePMV@jDbTO3`VT)@I$97QbSSvSXY1V3WINN@a|JCR>Bx? z;pf0+fA#IFt@+=52l*}20N0oaXrTH6=agfZ+BD#5y4Ia7RuW*sV*QZ007=teDg#;b zaSd(T%&vF+*LCr7wUL*p^Le4qFmv{etn>GUZR0-;?bhv}Ve1L)#+064$1At%+nH4D z4F$da|HX%?AvhUGrE)T4GTX3Qt~F*Kq^e(He!O5qe98nEUlyxn_+VkN-5c6OO@P%_ z^^D|Pkefn_CS;p-_=^7`EXK|0DTT6jyHQEa*^?IhHzZs8-?o6TJ$zv(Cf*fuM?`Q6 zKzP+A!8!RYK#L|5*{O@pov2M+CrKHT} zMwO}*VObm1;ZzKy8SWIzBMTMyeS?gi6?%E7yNuuXkMICc{1WJD#zHZ+Oz@n(6WPQH zdRLhvAR<3d*f~W%?eWh)p1-qr`nj<;7+blmaZNP*XRDMN&wC2RTAdwGR9~GFsMxv| zs~NevGX+A4kBm0-1n@hBCMW=dLi{dm9(gRRv5t?VfB)j}D^(5GuZ1fa)@A$`=HdZU zho}+BXuM*DumqBZpx{xsfJ$z#K{{o7faMJdJ;gJEa&nWr1!K}+#FdC_X5L=J zlZ#q}vJm|r9BT<2gkr|!OyarJghc^t1HM$Bip^R(Z9VeXvv4J-HeiZC)8Xlb5>}-e zgy0z6+b0=?zH7KDX;#b3jtI5jDCvT!$z6+xJ3v?pFTC@u+GsTz6$O84J|J99&Vv9Z zGE~FdcIHqXuRpeFgw;d@22f<-%F zVi!HQYsFuFXV#2to`5E(+MjsTp%2Lv;HvB6Bjz;D{p1kXjM7_yh2A3!&o*-s$qHq? zuJ3h6JC@D&5@vt6P%bxO=2AI0;4FaLj-=UCV@JiI+!MX#0Ev(`a=P%Z@q+rqc;_oPxL52V)v2j5)a}*YS4;RL{$SzjHf_UYH@u z)#)Ejp`Yjra&;j?7BxO>hyOQ|8maA8Vrqit@~@Mts^cl*s`y@Lbh>{Nm_D08T7vt# zH-M1mpQmh%&>itBeCl=+LF63A8?f-SzSkw3**X@qAyVv3IOD`zxQlfyBTf7z1P1}_ zEym~*5UG3;wGw4G>`CvSkj)=Ix0Mkhe8YuUuLUmXxuM0J!?=;)x+2L4wOwr|vwe*J zt@_UZS4*8(Jp?Nx>CyuEFly_+b$eqqiY1E5LnEX0BXr9@CIz!iBXnKhUb~Ptjw1Ne z8%)fPSt^A+)2ZYMzMlVQ1W`~-Ih;y7n`B%Lr5rrl=R?(BJRK2c&l>7q?xZI=1hWk6 zhVLu^xUw1VS*m>~>nkg&PIeLkK%PVg;A}ZMXM~S)s_LISLma|gh;HqkW~Hrw zmJ9|!?kqpx%|XWiYlX*mmDQ}`p9kJnwKZjw*|Cz$a#5?5>PW|ZWqF^ikfko3gxOr_ z09Y+i$F3j$kG=PbYI5D$KrN_Ikg^a^s=`u~B1rGp5ETKDDlH(=rFTLRX-ZX*CQU$! zp@iN;5CoLo0|XKRLg*os1QJNj=l=hF_FB9ATW`+A8S9RbLB4$N?B#jqyd>!L?hS|N^~-<~KKRtwH`^iFgxJGp)|=*by$+G^#z{Z=^Tw3$sBX zMdFL4tjh*hOr3P3HG6?>y148Myqjb@`%9*$zVFzIaMTXEyL=tjnR~o&-Hs~pg4_dq z|I#y6Vgr4tgWy$Ep<>&Rxr3bNjy6 zmo9d{mO1vLsOo!M!p)QQ-kD|}#RqNRrv+J3^y);e54+Ph4|SSh7t!5@r7v?K9#8w% zeP(6G>)ak7fe8-ngU?eXtrt3n!ddU`hljrk`gGuo-4{FeRf6Thd4)`@eY5Y?qY^GoXfIx^S3@n!s;;$9Kwn4HK7Qv@(14oK)yqCWS;c#I=oI{}#N zQB%3;4qQTjLk!|tBoSsCN*PUFucg{Dn_inQH?8pHy(JT`{cV^Zyk&G&AnYI0qD2Q_N^`2T6411?6 zfSb^NeP=-3;)aaTXO`DEv2@(pGm#{D-AigIOuusjfKOPN8-r||GQxwxh^IV~;5NY< zP?#SaxHf`Y%b}Nx3e#T6rC!rW>bb%9X6bKjk3e|k^(?bF^$P;0#|P*b28g-nI}smf zN}jvSknIhYwj-Y6X;2xLJ$3s-N8RY3M!$S(v7&x?QtREkZooc-(5ja7Heu=d*BCZo zgX_#;6*P4SP+qlXPv%oj%ZUFLdQMV+;Ek%Z=s0>fz4+ z;Jx3*)8YE>Kf+TS@4mZ>9{Se>D4B=oW;x%9?%**PDZ@Ju7dFj&^jC>9pzGZm>3iLJ z1F!0AH0-z-?|iU7JIii=mJTZyAi9M;Xk6|2CjMJ$W~S6*_sX;7O845%gVfdEif^)w zzHGhA`X)T&$hU+Shwj=xuC!r6Je)uIw9De;hr8WCbNjMa=x?JB^9IjeeQ9=n zIMu#gykW;DOF>^KB8%tq)7(*=^yb#+m!xH3szv6Fw5U{KtVOHq3pa7XqI0&ru|wL} zT5f_opgbf3q5Ug(X(;bO<*V=ylhTeAI#wM~xD!mgHEL%gE-6T^rRKkh82F7|5g3og za_xG85WC>53oX45gsV|*dB7w~EJq?8XHfcB@b}Jp5U1LVop1puOB^t_EtwV7KUK)* zje5W=l+i5K=%&$<-sE^LB0)H=xLr&LMAo)jt`Go9Zy%fB`hxpUd92Uf35qp@MpiH?UNO|UaNy5*Q~yV0eQ1~~ z=3JW6P57X|h}(F{%IBHyNvPZD8?(1>We$5^O9=?82xPj%qct4?r7CC|7VfJf_rkvN z4o55p(s4SA1N#BuWq%h~-JI_-z7wLG|GT9xpX$y_{3}Ve0@WoYU3;e_auzI$hh_};)<4obBy8edHuu~^7rV+81}bexFA?1Z`aWmsI)i z%|pQECAr_aZ{!}gj4pU9#ZXaZHYENM_HEdj{<7@(W4Ep|L>RpZZOinha(!#f)Xwdn z?k(b=dn^aFJCpPkdjQ56U-UX$Zkoh4o%v(-)(k^Pcy#Mae^8$Jv_jtz0nCrjK!?q^ zdYS`Igvs2gJd|Co6Qxw4gI2Fy=`RXmF%EvS?z22f|7r}g7;!Q$8JK^bqk2R=j@KF( zU36LEQNE{rHbIz-i*o<-N-$t9?Q_j1kG$~{-B7(CGqk^H_L}bf%&Gd1Z%PLl4!@6l zWg{kX>Y`z$cf{`(?=q{N%8PmXZx&2ER|Zgq&zNM90U;ovlWib9hqK4sMlP3EofMgq zoWeBwM_jy@bf5WoZ)Tc3gN%o5l}6T;)jB-wNr$9L6fh!HC#{U`;cxBplw{&c^5t)j2kF7yQ0t{2byWTjnf`fc%Xh)(XEyj`Jk zI~`?MBipt=v2ZoE<%FR{qJD&dVDQvQL3VKeuEC^#k!TUjH6HdL#-Rzfzb9smP-B1m zG{WF2jM3(}=7V&(YZ+dBSw(Q4ZLfU&6RdO~=EaLgaG8pGMepZaRjWG%{lDobJ~#q= ze*XINLm^B#EvLH03^RzT{fP6uvS8Zw1Y9L!A=Nh1eXkMUoJQTk(DDW174nyB(G4pG z!l&bu>brMv&d@RL@G_=VVWk~InyyL>*8)R_+s&9Yik0o|fY6Sjhem&sq& z8}s&XS=YeQ<6KVMlEBLvp3fYmHO;hc(j8|qmpN^sosvT`*HqVf{y#@b**V5;WD!z@ zSHWlsrhB^kRoJHcc}HdKzILzTF!#+4=tVT|#KH>NIr!e$!ioHcX*~HHX2q%BABrJn zp`NP*K`Cy{eMM(P&;e&s>IHxaf2Y)Smov;~Gf$Qp>z#`m zzL0n*g!hc^T&(QS1n$A7M-7BKd`q9Q^cC;k4) zTMcO7YBjFlkrK+5xG*7K3BJ&&{&#AXA8A}|vNCoWWy;HqSH7yeJtSgZ8a2<*!`f0m z9795VJsfa|4tSM!nSR8H%?ER;L{U9D666+_GI?*CsOVBqk9*&^`?|9Q?fNzD&GlNk zSz%T6Sl*0Gh_fDH%anl4mmhKuJy9?p1=~-9tk*YG_oq7L?q5Vg!<s-)Zgj;*`Ae71Mi#D(bNxg_&@*Tch6uZ@2GZ9vWER(fdoq*> zUp{xqZ;Lm$Br#Q77Ou}$8l7_K=Z|PCI#4irloRH7L|$rtIyA`Ci8&i< zC(Ly2k?6{&S0`D~_X(_`FTMosIi;PKeZ(!x`uKaaa<#VVhE`<$-@o?1+V!_rvwH=wTqC7X^{!ry8DuB>lezsa(ZofA zP&rLR6@;H)T&4CESap}wObD6z?IjMEM?Q8L_M?!*w3l~~=8hij&{MS+%70qmLCkh< zT2@6lJH>}DFegT6tB_l&3|P2JYc<5`e~`|sq$%eZ+HTi_k*em%O)<(_37q*Utc>s zF1qAyv6~55w;z2iIVzW-e7|eXD?L{kmRddeu&PT4IF2d2Z30tvvdyaXUh%B46D3^ArDQLjKtb^sE9VMUuj{+)7ZZ z#wJ8%sb&P4E5JdFNRy(_Hh{pM1361*$hu~(dTc3Q*8^pq3s^sLe{u(c=umJ0D|hk+ zHkJHHLGI^i`r)2MjV#6=@@QD15^idd6&mMUC}XOc0ov{xg@HAojOuyV)cQ%+(%-pN zGW36o+hO@Fu7+fu0Pa4A_`go_&(b4$CZ*EFs$`~(QL%V>FfCcud35~}J5eSiAx`D; zbRa%-sVZl31E8vl+&=eAY*$;G%=r1!rgqs?8!aKH1N|K?wSF7iOi`}JtgjcypBMRk z%IqHMKQK}#>E^M&&jR>&#teS%dN>yCS>51P%yTSqrP27=z!Upa>l4Y*2D}m+?vR_d0bk!}gjGytsa(4737o5I49jR}fx!(qvYLtGPPTpQsKGr) z4j4A?Kb-a7osZ7uVu(uR&^Ni{K486RUG9u4x`Fq6oVEP*bfj3-zzh2~A@sP3tBA2% zZMj)lzdc&-VsyVONA{LIYsx1G1*z-at1Er-{-^B;a|#<^P;A?a6cRnUK4TY?+S;xK zBH?Xwd76%l%t%EMNnclaMfEHddu+(jm7=aC5Ql7Sd$K8id;bAxI;(>%@%hTybSuwz#tae#$lCwxt#RfaW4nEZO3mk*r0Zb^;6gtLdNB2@N@}(+T+?VcE?r7VWYZ%>- z*#&XeRmJxYH=dqA{IppH*f;#+twoK=Nk{5`bAwpU+T?IwbvDl=4K__F{ad;D4+?#~ z{}hOUR9pa;J_$N582LaDeEGci)XitNeu0JnJMF50$e^e_ z_V3xx&)NA8;egzTevvuzD>OM2Vn7dKs5PGQ{dqC^p;3Pw>`%VwFOL05=l&AKKdJ0r zI`)rX;xFUz&%yFv=KjyF=P%>&m+|}7o;tz?p`OjvO!^5^S~Kb{t{#Id-fgkM#Lw=JovLH|?qx9l(icOn0Hs=8kq5t47EYO`1Nsy78p~n7CPNQc z7A4oK3Aziv_quK-X{)V1TF$*RM(0A`c*&@Hqwf`@g2L)4u@>U{8zv zHQC%|1h|cM%izBFUj^fjGz)P)31aYx?!EJC6!DJ)xQ(yVihKKO!c_|}pt0RX;MXVu zzXfpH$5c@L-cK<3AE*0Z0x;k&b^C*9|4ZHeRm}fVw|@=^|G!qZfdU`y{J%T5eD#@C zRocbVeAI{U0bu5E2nC;Jljc;jG?C$SvWGc@woh8{{FaxTOrauRLGq$1b^{FOI<@^LZ=hsK&1OdyNf$Ag$(NLq9Y&j8)f@rlBM5Uao)r&9B~}i+rJ(~f z{Aa%vbV*Lmu{QSV9d6@t-uLk**6V!I&XONm=filhKk^Zx^EaQF_Q8nWN+C3iKZ)+u z!_Q~Rp=2=ul>xjDMEYDdW~_&~J$eRN8=b~GfzWkmyd3rs0487P)ekF8e(e9rS-wNs zcX_Emx4hAvPjc4CDn>BNU!m7LtTp*+oCk3N($8_<)NNkh{<}c@J0g9Lsv|AdsqS;n zKeO#$MVrp142Y_v4yo#eIm9cGQ@m?mifaKl=b@YQS5w$c?LJFAoAw%x;9?LKmal32 z{Ge(h34qNZ&3yvBZnJj8bDS-en$)^&i(-XFtAK2KDol4<<$h!uNvRAlu)0geL zlos%2qxyvpEPOc$p0t*eF=q)6qnPchELTb-yVGeKSb-tD_JOWHaRSHLcjab9nsbxe zB-RQgEduL*06@Xb{62hHXicmFT?p&D#~-!>VF+mt(nP_uBZ& z`Cl}5i&T0NNbWnIRQLp*1)x>M?h-D<6lD43Pr5u1DgQFtF8x@{YdbsU6{}!=noE84 zSz7C+QnITAGw+Q&>?`v{$lL5PV%_P|cG>vHh`HE7*$X=p~-W%fJ6gsRPKC{(_I6v(=r+GSfSm67lx#C)k!-zyt6xtf)75= z(aV;kDB? zMO8Og#4|if<%hq%iT{Y89`IRzB{r{Hk_nvWtH^p&1#4VLzgiRdwpaGwZZ8q z=GN(_bFp2g_P6>JUHmsbW0x9oPC)`5+5w9BF_cMOgeP!>MSn&XPr@N~_1V}ngQM7tt1zz~*5DP=|?puxF8SCVGTrS&h+|Bg#%>#Ev^A)~Cs}1Z90XHlt zqUF1@8N~Rgc3SfiB7?YOXAPeu_^(u#garcDvVflE<+cn4u3Yr!@T_&X|3v&1;tj-V zs2fYmIiUGs)o1Q`9kS>E$ym{5e@Z>>PTqo0cz-NsVw^Jdi)W9Hx{RLu>arZUSLq1L zS>CGg9Q%<)92et#ySk7zhQmiB-?Y$+H5(m&H1~SU+_Bf3=3}?NOIR8i?kfyqQr$jb z|20b3V5?iK*3{T#+G|L*8LnNw3N3=&%cArY6y2w7?PQ6aHuKBl-);0aw#z;?3=5`E z98l!jdi%=wERJ{4v-pG0Csvaq7|rUJE5_bCel+t&U*qxqqGNpwvunb4cz)L=WQzYI zM4b?ObPy1gb4W6@!-&q3^{N}B+KyNO#nTh#MS_oX^kEg=rI%d#gT=D25V9oDub`=AZ0 zO~akgrG;09=I4@wsNLY?=(cCZMI0D^l+%38R?o1QD178mZ=<%h@`2u7zcCC!^-uHq zDX_KYRUj3q*Di3R?5rd=@9oeW7g@pdLA0YxNryico=q_cP$nPaFxLx<_U?*qB0f5t zjWzeja?6A;6}C9lYL;H*E>L|2|HkLO)O!X8cGY3KJxvK#K{B)V;i`dUws|}%HD+XF zdva1B;3L5#%%yq9pkWqT)ZKjaIK$nd5`}tqHwahO zW@T1#2+oMK<`Sfwv7L}rgDjc0)>hqm@R&`(V8L;8%B?4*yf?as^$(%%e=;wgIzVm_ z{m^Vp2DnATX8E2eQJ;>ay7+p#m0nCQtpjQ2!+tgnCJ+xJ1T}+bO~lC+du;fL{Q2Wd zS}YP#ueA(~_1-ouOs+hldk6MknwO&OZ{_WhH}nn885@|!%%zRVShj|_XQ{C_IBfMI zRh=5Xb&LjNzC0C;G=~KGlP7G4Q<6K+U<>_*pG1o$$&fc9)*?!tG@gipdHt}^TUUn` z(wyGTuAsqy!k+;v(!L~U=iLZp7e;izm$y~4dxWyFpl&m>DY1Tbku51AO0*4N3l1 zkST;uJI2BNI?3n>VHJQ|;STgg$=&JBN z@l)bz&97WJLxIEVn&Xu=?Kt=^2zw%W8idEKkji&!33K8P%OO@tLjyu7OE2P1~rHYASKe1;w8# zJbGN@p@G=+k7$BFT4X3c*pU5;$2I7;xXaK16|fSnO)S%kt5{W}xpW{T9*iEDkKZR+p(zZLp&;bV6x%!zI%#2TNqWjmLBF8dqw zx%QVHAub;Jc;)b?Jgb|FlS+ed!W4@Qa#3Hyw=S7Ur>S{lF*L6^p@I37ceINOJ3 zZ=-@5ouY7U?;Lu}&WWMt>Pu9gO)EWaww;?`Su5|%x(uH4`^|F0pE6EFdG9tCkt8+$ zlQ#b&CI$-f5AGENK0IbK;HUn4aRq#EUt#rYE|jGsKI=6vuY853^cT=W9{(@#U9Vk6 z>r-AHdK`9~MvV6uE+9?io}wG2j1e-!;3D zbtHV#%DMzI`=|(Fs{~e1_s5G;2s5rY6}gNN^=&O^PBj~h=hWCblu z%vU(oprN2J#9J+;`tnIzrFh`F>f5T3w;K5ZOf<_hlFc*j3n(jXxlhW%(ng^E~1r=7&JYLV~l1at+? z20SSSG4lD1Gj%gP`0lkQ4Nd@gA*kft?JY1B3Wah8@?TF6og;87eIID!8p9oYaomER z$({n*BzlptAKa?S9D?{Xqg}#2hzX&O5}M;{_BTUOVM*O=_=6~iM&M%HV|fxn?c$7` zHank<{8q6?7cdxwPj6j}sxoTQT&vR1zObYL9&c=hGU(%Qy%Ai`UN59r#YODwq}B=6 z_=_ol!RXN?!zhTDam-*quq<9cW#)BT1tGI91PeH((uR&mA(iItzR`fdSs(%~wn;W| zc9ot^eB~S8t}5U;HX9TV5uQ9HEP{EAlv$ZoxI-V=D~qb?B^zx_dE`QEA5Mir*7iP0 zS+;Vm5!dXnCAC^78DKS&4*IpbGoz1j`}ObRU-@Rrh@pUl4NB6wWh?n_lFxssU*=u6 z;$*xZe~RH;#gjDg;RBNDxcHs0@Zg7ZtvURzwrlCCwCYpl)kypZV{7Y9GfRW~a%X3f zzQRhkVj6XtPs@pwojM}uqs@0k=s413JDSX3=;8Xqnri|V>;@6_s> z;j|T0(w&W6AP9uXdau?kv`hBxPXtpn(#l-*gkxQuoI81gpFEbA?UzM#7`Dkqh70VT z%ld>w^CK-@$)r) ziVZK)%#E^E7U9M<{Rle?-=(!hvQ*Ia#DdM7hLuF*NFMxa8m)a#S7{fXD!f6d38Rl`#y)c>_DQh$*9zQAb+PAx;R^JjU*O@I0 z8*$CZIpPxL?!h+LR9WXtSr4`VLaX(J2K}(-z=XoGie)EsDV1}DNqJvg-$ORpICWZyyU~60plpF@ z1_rymTe^@~ham#G(4>8DB(xz{Jzgu4+xxbcC<nIQ(883&`3My@^C17g7;=BF{%I=wrS#^EzY>{ep>dW``MyU;!IwcGRKAY)#) z@e6|`)9hFndc@t%cVw}{)tD+oK74c`>ysAlt%jI32ixJ1sb&McRmU9Oxg8(f+~RqGsK@?CxLOkI*ORiHpc`Z!@tp?Q61@VA2zpW*Z@98`J6%`r zactRSLDa@Hw5asaah|jyTO5@@#+W#l*aQ<35{6nr5<;^k1Y7H|=MocDo3O{{-69?J z7USl1+2tH|i6R-5B?~beI-h5Jb5fllMQ;#kS))VSWOyCP#EteSJ>14L- zh*Vb@dKj{w1E`uaOW*#OY_q|!;{xMN!7O&Mu1}3jJ*K^Q3~t09M3O?nNkT;on)2fu z{h_(>^36&J;Px!sP17A1ES3x{bEmtftwN|1dU+PPjKWOU1~lJp$GE|o-!H)Q<*B$m zj-(sk#*xntAMJ`lOeiJAiu6g}jI-q8^L;7wmF4SNvGV5E+@+K`t-^*c2X!lbiu(8r zx490s&nw?QSTKgeqkk)(i$CEo#Lq`n3flm4?#DqqClJ}G) zWjXbrr?Ni@DT<&mie(lAQmSY;rPkT{l3M4vgJB+wrtCyrF*h$#3N4@`Uk6`qGz5E? z?W|&)QBIS}&D_PxyDbM?=tzQ(pwKP#XrTdY!s#DrPJn`%um5KWksyyXIGZpYs9kLe z<|8)q#5|4IWXF8QawY?*l)$xWWP$eDV9PqT9nyM%xX3zZ=gNzk^TI)mt_9|5S=s#S03Z$JJ2Fb~c~xapK>oQrD65+td>WANEma@Ci-D z*_A(oP6=T=rgwJzb-t~^NKr`#FIecNbN#gEaB$a*MrJ$`2w)TsW(!pB*=7VeD6GB? zEqZRW&I+~#!&=W_KkX;t>$St?;(*HGQjF?B_7w6C-=3?ve)FAyB6*9^f$Wr5E&5x? zKy_b2H+cW%EhOkV&Y{##EsRH7b%(Unsm}BGc!DY^6}}1Glx~e>nMyZ~zWz|pCg$~K z2bYsn^*Vlq%+2Yn@-nq#$aP9l$4ssk7BsIBzNQXsu@V74D{mjVaDvAug2Mx`fGpfk zy;Z1V=7k6(eQIqTlzp)V-p9ao8H3uD4PkdUFKkvueeiQG4+LCSx4^#tZno-Iv#zM@ z(FcmGEMF9HV!^`5ca~TIM$w~)ol;vQG_iwH#LFHRL@!!rp|JQIZ(JI#xd06NFtBc)1To>xKfBfWWFgV8Bk}Wy5$}cX{;tx*tvs<+v*U3BXB3^MibfY2 z*Cq0dH}*L8i;T^)*7O+#b=>>yn|_kMi8`HFdrT3tkkgZuXR#+#clG;^lo%9?|H|n7;acaI2t=|J3ZcLwDDdXcc)&CazaD%74vMQH7@x8VZHKpo?&YsU3 zHTF;3IW#lU1ICAU)Ubt|8L>~M-)e}wQE5bG0uOC?Y@P&mX*?jhO9<`CoRDvv8~*1le5Y9iUiT&cK6n%Ac3MIa!|(j8(f5^d5DTiBasGU}6ec)j|R&P0Y$mlur7nq!gC^$h`f$p)i)DKKx2yfGKP> zLpBbA&xsZcK6DY?uei2b-sJG!eC0C{DDEq2qLv^0_blxEfOH5^<2P?76L`bJ4mx& zE4azI%j6DZv++)JR1wOhR&&p5<(iGNEZS3-Q^9jy3AG@7vlDzJu8~^(-ASSMDLvpW zZAzsAd{}Pwgmi+Qt5pvWjcL@ae9+6WqEBNKGc!F@j}yK>xb%WlYH}xhGaRm(<<7i{ zQM*o!rcqSqmmyJ6Wo}~X(a`rg@ESNfn^o@ShA^z@QE8l2KR(`{R_GcydCoQ|`pE%H zB=68A?sNKYJkHC!*cUD?3kc*G^`x^@ZP;6|23N6?nXVrOJ9ZaM8)eIwn zsW>o(YaR0HY6OrBP^ZLT+(d)O39P8qxm!gHqL^--SJH9HHNDwwi`*gGg`1qBtx!Tp zXxwBR58a-OY4E{zC%^BxHEg81L0Z2K-7OCbNb{_c6|L9lP0kDdFv+>5Wt0pF)c_Li z+$x5%AKrlz_w~FEN)dz(C|*KC%*@GMjnEY1NPNTOVx$b_uQO(XE%f0Y<9=*BKsQ%XSRd~k%VWJ;! zb8Iv_oqMuE_|8b%S>CfcVc&BbysJPI+!A$5T7`o@g(tCKOZgl=6`1-Y*QY*q5aKxRON8|_vUG*wDk@LxWg zs3}l&7Fz)f8YpUt&kXXu-#(CNXkMS&+dbc#7>Qcp*1zo${o0Xk;XP_g?1B7(U=*ph zytDT7;(`aso^CAWJx<6BpJzT4BR+f{NU8?O z&&}JP0g_bFZM+&7jk`m4DR)|*&J`vmw9&D3iQIHRR2(gJ-)N-9KnC|=*3 zt5@FpKI|tUT0QdCdS?OD+(9^)HALKVE6J7Eo^&ip9TN(gq~{pRl1(-bQrqh(_MVh_ zL%B09xjyL6Ht&zbIkLs%K60`B{1qVDL#Ov7u&A#~PNm~-W;w==O3_)s^dPD7-4FM+ zBDHyg_X#B8yGL|=D92i~OnrlEarbER%0a^luwxH&gjP^^od>^GxXHCupF!#0+Isq! zr~bYDd!zEufgn#4&%tke{;!tEffF#yJ0WEGI(>U2a}9Zs{o2Gp&JoWkNtVr6cDB9( zX>o3p+Ii(8TL^M5{z=WgN%}4F{EJz`us*~AFjFg#q*`WLb)9fHWC|G8KoHC{4G$+> zRk364OoQ2mS;MZVJa=2RFGvp55;;<*Dy*V3pz~D$Ut5VRybh;y`jj+$fk+Xvg!B2l zdfaq*1km*L%ijBvEeejc2jZ=Gs;k3J|E2o-{?dz550uD@R@NWnW$f`P84+en_kYIs z{Fn5!r8xFhtYH!xZ3<~#T`k^K%Xm;8n0RySv$$bSCx z*?}4b$1T6wC)D|CR!~a=Q%m==RmKBSET6xDO4B%0TsLEUYa645I?e21O+&s^zqEFC6DgpOy5o^GA>2FR z(^ui{MudC>b+z|ns6o!>dT3*^RG4PrHKC9!?~cdwTeA^dF88x&tF~Pa9m8NhdNuB= zz6c;TT;SnVr@N)-2#9*89-cI$Ay$w@-A}f5t!@r5E|?2t%BTjJE*K!x1>SKT-pJy( zfmMuNE*zYtMj!InKO=5kMnqxA{!qhN9!c-6>Nu(ye2iead|BKSVXH_#+xq$V{B02ZHs?G{#rmj_+M4fkW zE`p8KG&uVNT4gn^H|d@`8@EJ%wW;tFgDQ{%jbfgIB*dzpc9P6(`D9WcrB^=B03k%X zxOGwR`AdC)-CFAupkSQ?{5N^r%8z4sA84>}CH9x2@O70<$vKmx0JWX1U5nPT9QQ4o zG-qHNa-Slv5j_sw$`naFa4}uJ1){IZ;RRP2GeSQl%sBO78t!p>}gC} zNBXLKIdOUxy+_idiGuNPJhf_2U1bW}oSWklbZK>?d~K5MyTI?fAp|=@b{r71eQXQV zbW$t5$#<|BnVnw~yYp`>^uJi~VW6|8v}R+5=7+vpXwgL5Ee?E3loI&;-BD{lGd=!i zFzHA~+kfBdw|AH*{89B}t zGK3!U2g*bbR!DtCux1%xT`cRRswGyEx-$%crYff)J6D9OTL|eMTFpLNj>YlJsO_3j zgM)``_f^O#bHmw@+#{F1D%q|Dvd*nFbZeP$dbcwk>Gg`dx>mTj{YhEnjW^mp@=W3n zqBPIqLWExPd>)V^!e|(pBWSZ-j(}GD(Vp#35Qa5hQriLiJ@n&*^GZq0v)UuRDy1qO zo_L+$FFLAPVoe)2>(f9(&SliHs_$<#J|2rY@2Aw3a{yGxq1T_*f5-sC;vm%*22fLZ zL0Qs<5_lxA!Go%TxhHl;zeJ-Jg4bYDl50{R6}mU$gHGfp#Hr>(OL;%9PVrYYEJHI4 zhx-Br=q7cFFsgG=7H+npr5ZuRetFzGt4}Vv!Av8`xFt~19kt~;G5+u8R9R-?Nv3F@ z?Sm;>60}qe=u&pZ^JV3{^{z^cI@RABEZoJ`4qM;tsA1WRJ2k=L8}F9sJNr%Ix(Vzm zFq2OgI<-Jph6Pf#OZvU-{RjDDGEDUL`bd`M2Enn*6F)kc0MIC7S=ks<3SX+j&k3Rt zYt{aj14s4qr(D`1RrS08@Q`K^&_4#@?_4C*)d)mE7lw!Udw7FGXZKWGXWqG7vqN`A z%1)Iv#Xs2t`bAPow#$KYM>RRvv*)MY3(IvzX7Ep?n{3A~V|qnZv!3`(A@+{yxw#;V zqA;807gv49Mw4an{XaAGqhG+Nok}R=F->NgPMe+ZROOQV6(R>HPbaqOdr# z7uwIKtLcv~l(~6S;|n<(_iWY-y+wt^5w-oNBThR`LQoU-gkBST&C1oK-Grb=edex% zh#{b#|ImRks4RR=&Jq)upy*s)sBsI4y7K+)!Pw zv?~g-ORPBM8yFZ!&gW-NL%qHryzp)NM>%Pj(6hRF>o`k5rVrY$n~7OzYt>_vXqLLW z-b_jEUp_k*kDz!(rinBxzAN+cwugD>J&Lo1RPEZnl`Hj+W|AFkYayMvK>bubn5)ha zf*rK31s%g#Y>XS4e@f~+p{gMHe)8#>oxT2D^xa(oUJVC(+-v7BuYxh^Ru-*BzJM$PV24yk zSD|Wl;4%@7eqfErRrS z$nMwj%`?askb&fc`RLgn}seC0yWX}&w-mhK<~ zx&T0Vb~&I4nTxA}P(>U&K4VH&$Qox54Es`9TqN2$!}$|P`<5yIU$32Jcnc4B-glqj zApDPn;A*H7_<&qH#1!P~`*^1UE#F$3HSUBY=Tw>LNLQix$34GhtACV?9vE#;HaCcP zCWhFA`X_HX&LkVVe{-0K;3@`(pWkW)9iuH(Np?l$C7T6VlL6h+&;B%Jn^scWld#rI zTpJerf$ZaYq3ssHqQom{^Q>Ak?Cc8LVSzBeui23W-LN`|Snj>M=bB<>O!OL?@Kj27 zmO39qN>tX@wK^WHFo*)AyJ`2sjVl;cog15-otXm*=$ApY7&mnQ#YSUESgR?ieeX`& zUuP=F_g5Qjx}ld4^TF2{x$d_PgedhId(B;vd}7e1zLzqXfP^+= zIbJ9ClHu`^_^mGCxgkE9r+=VMATH6pit3U=@bq^n#B!SZ{rd8k{;0Vqrr_^2YhxyP)zPl& zwkmMjNo!ojP?vDYyV-NoI?=9PrQ2NDy@k9}IxBH5Xv2U*7C>&=Q+cIN**FX%JN-1* z!B1Oz)wJkrpq(4fWKj|q+mN>0(szeNvSg95T;i+wWRucUw>y=|dTYGj(wdcQ9w;mc z7JQ$m!trJm=UeeZ7Z!+-y4u|dPUhN*b3G1^^&~TsYStAT4X3r`_TG7iegisg-I#o( z;WX8fnvmOO$wiCXy-`>MyPsvch(En4)*#JJEsT9=&~l+i15ml9|+ks zpO(+o(p`ny`=NZi26Pu>@9DU0?(shb%GX7(`l~)JB@lyjwi)OG(UpC3(Y7VfDl zCG`u-?SW`PHGL%g)rLaoEZ1kEs0^mc5=>cZ)K-|8;rmzz<{zhgPteW>A+Ewl-nP3n zz5Y=gb*K5|mg5>qwbIuhEVA#3&O{$+*7Id4=%^7_{)t*_Oh zE~%+a+D1Lx%6Gkxfmf+- z)?OAwsTNQ=x=`t$^bU%Gg(5;iZ_;}WNC^lCs7SHUn}`Gm38D9drlNERB-8{%2oNA5 zgci#8vi3P=pL6zg_I9m*-*xRD=JE&0JMZ&6Gjq>9^UM_J#;po`}_Gh_E!--*(iM`}#I}baFJ?K|K)#d9CyFa**H~&Vc6acpgwsP;1+#fZt4LXWX!g z)ec=?KmWv)jWC31C5~klE``K;?l*H!Jb}T4g-Yz+Z?wfpj>|uC-gX7w9tM@*)54o& zI>Yo1$hcwa+naji2LDYc>qsd)tzj5`a)O+e1IuHNG%5hmC4n5wbuW2r8hXvQ8qG1RHKw~ARw zKFdv`Y1}^p;un9e3fdn`F?4Q>KZ*wb2HD68!uj#nkq6|4iVeqWUsTdI<>B+FnY+8|ZHVlo(X5eOLJb32 z?t78kdTVUAFmAQjeKN@a9V|=Ra@)zrkKwq37DUz0k{;T#Mk<=((3zTKej_ zt7iD}m#1stMb^q&t|6TJvA-QXdGX1={NO1*`uzSnPw^w4mDI@Bcot0Oo(OH~8$7tE zzZ-bQxzJAe(y|}<0x_9z5PS>fb_;YGWD2@r<>jnkujO_#ZmuW(W>9&J9zL&qkjerWssV%+o?d#a@?3y*?L^jvdolq)5C>(zjM8DwSRs~RaX zPm8XJFW9@New?9JcpgT*WriO%wz3_oQWF`0jQB*KIH1Ikm!1?mK3<(YFu756PmAON zOoLl`vZuPqd>vj_oJ8=GY~rZlM^;*)8*FpqWPFgIZ0sr5#p1pDEu+jLC-GtAGcTEKUz( zv;{Ssd9>2By`f(h1X1I~t~nD8;gK1`a?TMam>0Rdh9Bx0di$k8a#%U#Ce->0L)C|k z(@ockzFOqn^eR{ z_T8{g>CMZVThHBN^%nhUjVjcI5bJ zjeq%(;2+UV@lzk%lwsY_pRR|4XYbSbN;FKN=01^ft0Rr%wU`){GlwWuhCR%9OFK!j z&YIdH`7?jed~4CA_m1FU^mKbn9s5R8mzzn%47s#^jG=#$#Q1rk{xx{2Yy5>dZQ6r$ z01&Ql(0$6~#+|v49F)8KEKolKe&SoMA>y)_i@jwss8+0xU^W4@s0u6&d+o9 z;VWFaY4ZJQ-PKaVE_s)U5gE^+FDv)hrOkt^6goN83DABzV#B zh=*U9@O)s;u<=;y)RZ4Xx*K<7a%}c?RqsrqGA@9*F&@mmnYT$K^PO{2RCP7Atj*cR z-%;kEKud`(oWn7Ygkq(tS{GG@3HtV)UX`&9GK!ZcAY4XYMzD~TPK`*rC=iPBs9S#> zx0EUM-9Lq0$oS5POC{2kps&6eDebDRcOd<;6cW^TgQ$bMqX*62-&oQ0IT2sV8{GWW zMbtoQjcU2uw}u@rY&+0IcrSDA=~b3sgLAQE8+%6Nm@FF(6@}bD)n4*Ng~Z+)+3gLZ zKzyM#4|}JKaNTXq{5>lO;`ebTQPn-SM2Q%sYlRM>Rz-eFjG4UP6y`*0v!uBW71>In z7$zD<1xCK--TTRE;y9gx-!;VK&Pl-V8e$}PqsIAT(>O)D&pLs{&aDA~SKV4_Lf36+ z)!Yjz%9A@f1uzI7P|dzkCel!%NQ#buw5L=iI%S1rTpjp@vkv@8gn-$*FTIl?to#EZ zCM^Ma*7_J1EI&AT(8#d((G{8-zAHtX8*c@S4jXqb!)%0Zu2_Ow$`HkFpfRQkLvbZo z1xK)~7Ib+Bm@S4l?JtY%XMif@Whpe=?KBNs8gVerT=&evu%LZs`%k9@=GgOrZ+9|t z%V)fO%O?8Ps%NOgT>jBJvB5B*kBdeY!zv8D_*(edXTJ#up_EwGx-?qZ!NP+pHtaA$ zJh(I{;^)w?5j43&4Tv5T0aI6En7PuvMfAG|fUBJPtlaoJZ(A5ynNd>2%{|skkXF!T zh_q+htwy3Ks;--*>9)mP3H7`+`@NX>06GVd70ZR3^-|w2WwA>Py%`(5nfa52_G`2T zNI-Od2Kb9X@)H3%@yY0Yo(H$d!vM5F<9rOu8o@mot){;xc#UHw-K%QCn}@YMM7y(x zgEuKeuV$9_+3gullMPhILeMmLZ5ZLQoX8&8=MpmgDYd(bKdhoL=u5+yTk=njXA(sB z18CVB_DEVh-m07zvWH%UnJ+KE5%3mwgm@z+ym3Z?40U^h<^o}pJ*?zG8{lCFUg8UO zQIqip9Ys4np7OkFvTot>w2kFTqB$^&ns05J>>gLD>=u47$)oc43qz%RmDeNhoGzrc zr9MbiO;&a|I2>*dx{pddotWiLiI*oj8s7q!1+Ir{e_H17m>dg-2K>Z?WiL_)KPq4a z{AyYLaswOjq+dKpFJ|}B2W{J9qHmwl>k|^?3;DMBf&I``g<#)J)h7{J4aSh3*eOp}v`f`PG2YZ(E z#V<=nzoDLUGkoCl?Yr>C5`8fR zkm%{sTod!U7oULWLBH;rmpA5dN2-{qtK|C`p5kz{ewEsyP~%wR(RuS4Pgz;Sk402- zudgvaKSVxIAthpIOTwaRgqhJT+bR8|w84Hn-6(X#jb z$Od?9fXDcEa9};5Z>}eExvWfSWw73`G$wl3sjg3h@UCpmfKgt2hI2phUjnQ@Fc=Te ziXF^l?T+#xmgUTvm zK+hE|3>~~>>H4!@^a0~>WwVt6$_fLQ2S>wnz8|RV*dxaGT8@zF+L9j2fTmw%Z$U zHdoJa7{m&Z0^vDw6rKeo@rJ7(-FRyGuO0Ru$CDx9w}|OTKg7txoB;{#_-v1m2T`YPZy&@8z#H5{yeNMwccV@kKw&ureO~ zz+KGzpq;G+C+j-0a^hlJDc!MOU7eGc^EC$SCxmKF9RiJuhEs3!)%~pv!`}jn2WiJeQY6XalYoub{+#`4YW-X9IhEfJ zzkaz;Bg`nNr})-vXL5X)v*Ni2RKnGG?oo9`vT$H3m~J7MdNPt!DuTZBs}D<3hV82=I6FWRTSzwN~*8b^V}5_sKU4Mp1D#%u0qQM41ovr{w1Bf_-_^d_`g;7 z*VXagD*R_m-l_jDz3>ASPPs^rFPIj9cIe5|T$p>XbLmY^X@|d`!d}_kS{PYz@3Hx- z-*3MRd$CyVe8v#rY+yLFBMh!e69yxS_Ob%QV*b1rW~c#x!_k0Uw%_!WANL;;sXQwF z{Va?fm5h4N6f7BerLKyXmECOr6vr1DLno@M>}wmhN5tcF zne04jNg0lvZ$%vLy`A!ZVVS7-XOB%p)0vCY>GHj{_}4jMBTM7W#1F87Xej2Lj!}P* z{=FB%rthB$8#Yu4n^oPpG3sA)?$55Z!nZ?C3VTVf8vYYzVhbn)D+zo4`@SoC=;1Ma zTYafvqGmGIH4({w9>8&E zzLrkxjjW2I?^#g|S?+8Ms_G22j)yb3v8KVlk&;wPS;2SSix{!zWF+O_iRoyo! zh82U@mPa+O*+Y{$;;BiUC?(mGjHKBkHkN;UKUzyeer6sDrql)#K?SyGR_sr0AcT-; zEHt?>a;V6@C~d1isn_hkc+$SyKLWkdkRifo2vU%oHN9}CmKwI~{WK&Ya5!soy3O@f zE)5uQaDi&Lu`Ex|%G>#gFL9~V{)#l$uW4|S@zHUhh}{@e>sZ}nc7^;m3;wR3QGwFf zOY_d7|4RSc(f0bMIY2^zHM-=glWZZGcE6p`keVv%GdSF%>REn!T~@BwKtzHj6%Uv)F0C? z8~YxaJxLEeTJh%6FRt@{qggI)9-7~N8S&+JcXjV~*-DB@XqpcGQV8@vyq~Wzu<mi@NK{(gptf7wm5qBU@k4 zg=kdlF_+?Kufny*i4GR;j~6=eRaPni_UNJu9Sgsj`y|5e%OnQKkfr~A`$gRh6QGW* zr|;Tv3cCo|9EoQ3gPlyWby5o}Bmf;k*6V3gIe=Mr>t(rXciBqvD!rzZUK1$++p zKAu9ivDPYu)Gy7W9lB}mEI}f5pi-j&o=LRZzQ=nk%BB3@3$)m;u00bjFjdR%V64yitS+m@jC6D{hw8b!UPk@p|jeCbmEysDCE;hBKh-da}62rZ4 zNOLFJ@>q4(TVcb`MsTCAGM*D7Jiz4*m~Fe(^o0Z|2!()l+*x06Bw}9normtMUk4U8 zzT(zr!v&nKPV?^1xO7E7*l1HwwL&7AlsZ4OkH%U zugh}mPP29F*%`J1&$udumlzehDg~~%2d~fDRvo+=1Ik(B6U;qjmIIqm+;pLWIvifn zrqZ<<ucy*+>s=Vma-Ho&JeB3C$m3S%pZ3_d*XI^qUQf zBs1FEnBb%&AAY!VL%=EI4`Avj|)}aE0mZ5e}Zp!bt0t zVSB-A?X2o)V5C_(w`bizsVkS1pG{=S_Qtpu^7N8FQMxBXwvx80%k*;hN4v~gD?L94 z-GeCIR;xXP6?#l3L*^?BlXc)E1l&@q<&Aqb1IH=24s|&Kig0PoIh#R<>@@KH_8d?$ zE(hFNtcrXrsI~X9JY-Ws+UMIX(QC@BEON@cCCx{U{Z1c49i1KbYA#QVsC>C;kY}_v zACr}+w$q;l!I{ogVRxb@Q?A<)AkCjz#Y zhn2AMIo1LUEy}jG9#L&Sg7>n57hSfi?;>hXpPJ9eyS4|kr}6+JOjgF%1TI}$8o#8) z(DcO8a)g1gq`;j++qtfV-yq3#pQkB-+|UYvuw8+_FQu=6}0UFfcfWj&(fdl+WDNwPX0Z|C*K4xFA^ zC35UQWqPrhW?tit{|r=sr%ppn;s=e<-3Ua7`N zcRx~Zb)>s>x~v=SsY>qfaS%Ir-e~%}NoM3%R<*5n^^5S5WsxRR@xtHM<_GdCGkTy_ z zxx{P1wVoVRWp@(=K~a4DcMp%TDr&;q_z&QMnI;Z-TA4ZG7Q+V^EW-s0OQ&FHIE>5h zugLGcj{Dx%7>;)N2Xma1bu^F&4a{*Lsq>k68Mdyx4AVWuH+w4hi1pVP^_ujc`{3=3 z_m8$KU{0j`P%Oudi}J_UYa6bJ;?fo4QZnUwxHD?Rxzpd{YAdj(e=xA6kY7i(1*-gf z(MV=svX+~=tm!>+MY!tNp<=Alv+}*LTiMO6epv8gyZMab#L@I~9KDx*k2|b6pfk~! z?u}-=u(k0HBpMbmb!^-3R&o@7_-^hc+i59>&NQI1E&~oKNeBA(jVF7cZNEUsVe&xx z*RuMx-bS76pR-U)Nvo!j(g|`VN5>Z$rTK!Km_@y>Lw@H+pa@64D^*h3yw;OSh0pAF ziXL3AC5CAaSu4Qr-K;C~u67+p>}PVT!ajUR16tERefpSv5pZS)>2VSbBbK#c&wuoV z?_+rr1E-=`39P1xJI;e2ii?d=rIVANKl>~pLDyjyr zmuY_QZJ+`st0P34Gr`bivH`MEeTTicD_Qh{6RYM(+;#G70()1aDDWn7< z`<|(K4nw{UOoh$iw+L_d=5W)J>BHoGZ}}UjLZ{eC8~DsphP`ar zqh+l%ioANCYmHd0mk|n9zx|@JGD}k3QAiA5Ag`V?H3NELqE19_Kl4%&h(1J!JRQ4J zc~L&y*>$EDBsPqZqL%Qg(eR5^Qf9!`o~u0*G~`#2MjBYD;5xL-5l@~R&0|PZLIQCm z?6Hp;MuCseLCOv9H7$Xl{oh3`>~*8oer4Rgx{u0OZc?h!Z% z&`nE9kuV7UmQ!Z_9yU*tp6DMhB!=9-Aa3?uO&+pn621OjQV4mkT`;S^eQj;371q0-{OVgl2qlK@nZ~~#PEY- zxNW6Tf){VBS+y%^!<7LPuGCD;ctHLSH^(@ z5$C=nwv_FMXM#+&;G)@IW+&nksJDndS2+P`b?8OZg5@UqyFj?a>g! zjZe_fTwYb&`ts!03I9=3SQGnY*sP9C=XwyPtSorE$)j-F_ZySnb_+(4+;mzT9frl# z_04*#6o>EgDoyz5+;4hvj)kT6s;YMKw6|2W6W8-hj>PIV*;p2b6Df}@#4c6#WY_i& z!PC#U-&Ia?%rFPxQzXo7MZiTOgISfc%U;zryU#Dn+tAGlmA689q(Ae|feP$E*TK)^MgZ5~L4UIZ zjC8enJRKx|x%5Sqv}5-*)sPJ)iAQV=Ac(zJ~QiAZHw7|p}wPSR-hsPyUGXvES=-3-62Xo-kMXqR|xTApTX zgQ~N2s-(3icjLKA#cO~oC%zWUdHi1T*;0tqwfFs%wGF77g!(glEKu=4CWNA&KfW|O zT>$sM6G6n=rkw$`qNgGm+f~{>ES(gpTB#ZuGIC1NPJB{Y4cNn)Qk1qSJ_8KC^7AU# zdrvLjbyep0YXc4Tn5cM>_8VtD|8LIn4ok$1C|{Jqv%o~m_MCa8ZTpG9*I|;SUeB!ILGiEn#vxycuJM z&q>NL#Ij^aqqx3hhV7i1>?YE)uIt(z2jH;6S-KZ|&y|y(LGPuMl~sB%bK@%8#Nx9J zXysre*GARvx24;w0i0Y_m|U@_>;UT#I$fCdwI1!9>PzwOWBsTLq*3~=`q zC_P5+2`dDWk+ngB>A(%@G5M932p}}!tm0>HGHj7IGlVSq^(x}!y8wVZ;S*iVAME_p zvY^fwMeqUfSpm_cli(|wrb^Fw#P zHza(=C|9|j4cJuTVON5ZH~O1z^ZpDadyFKcJ<~XH_Lp`mDI-D7SY;ZG!)s;lqbFfXN`$P;vz9-0{EEFXdO`lvPycKL z(;pGh0bRh-$aGi%x&{VRj%tA(xr9X&+z(%izAkD)RPdhg>7Y5ixE9cTy(8`G8^QWM z32Tg)yR3C!3zXW9eQN!J)=; za*CWB`75b*ui@3twxALXNV}IENUuv0RISdD3yK{3D&6{#ZrBb_j zGSr|A6zNAmbC8$4&k<$~)cZ*ntaod@Y5;8UMagp*imi~AA~wDO)AK6bV%?{VOrx%4 zWkV`-_D>021a1yR2;9Hupg9*W>tZ=Cptz_X2!M=i_q zAS{sTdLYjyA`=QU?2NH~R%}2gVl3vRRQt84AMxNv4R2#lL%ex>OnbH9{#u?3_WdiD z2ZE&LMpnE!*4OwQ0yIKdSDT#B+gus<0+&wOryzcWN5LFTs@#$3)i(7DW0+`V^<%52 zsys%&sCch^nmYs*-9h)`Kht=LJ?jgjmp`PTtxlU<4h$HgoE# zN%=Mcqn-Jg>K5H?ol7=)cGpV(@i#)NR)>4<`)=X_031Iw5twkPPY>x76(I(L%lAb= z@DHX{k6pgZiEVqND>-lyn9%DxNsI)%k+d3-Nr%(=6p490o0JdbzB&riQWp_Ha*VVlo zorj9b8+nytn-BIwDs5VilDE9dX$GKvI1ZBMN0OME&reL~(xAQf1MI?Zz!R@z_6b9d zJ5Z6P(f7CS(;#{K`LZA$%LbXXv4T!mg^?ks%Q`IGWxKR8F*%_G12-q#Y>$aYh!hEq zkhH4yqxu$PtKCPX+%l_|MSB2?x9oQlx>)^g&G0;2h)ds)s+aAzHrG>$I-_@|%yrmx zV}_C zi!~N#q1>WnLAF&cSV!AN%jPgNvNaNf<^*nDl) zkgfhaP29HqUOr@wY1|AYZkfLx<~?M91p3$UWzGg1Z?AXCh3a*atfNjaFOlD)uj39X z8s?PN9H@1GKPQR>6Q36@KtA|&x&R}Dt4j8jW`h0Qvt4K#4r9vLHq}LR>$tC5u2=IO z3r+SN2I3J{z*SiaB>=RVD#f`b2Ix~SO7tzv35I*A41O6Y&mGCVn?(XGn}Sl%){-QZ z3zYm)VFIq+Pmofiir7Q|K6|A7eu~(tYP0wBQF8F>xlGtAfR!0Vv6G(PK}oBkHYF?# z{d)%07OEkO#f6G-X1TWK&GAjd<<!W2_3B79|EiFy{em$rUu>lNLAyv-Tamzh6f<{Sl2wd1-ODS z<_vT$M{2+~**&~Iq?{x+)nG}`<>p%I3e-J^pP#Zb!wJ3;xYu|G)gx|R!(r(}HMPu* za2tj@pgT4z_-7Wd$|%o~G%1IQ0k<%K$JijrK=-l0hZdHZwb$u3#djd)+_s`FgW{2C|Ui;eX2bWkf;s8TzN!mATNs1s&33Jl-CI3FD ze~618v>XS_zH37u#q+?Uebj%C88T7lqeX<@j?0p2h)w^P6Hfc88J&Mz>+gPZ}y?L~~Bf!h8p~{2LGXOSQ`NU9n zH(ES#<{6VdQPB2}qW0NHv_wI)C{AG*qWQ2OB zQV;ieoVQ2GPfOrNW>(?a>~+)RjHx(Do$=;;`4&AB*- z$o((7t@M=faBtRLlzN-!$M;iX^!%YV(VF_#fR0gC-QoT?IRM;oZ}68#LEZS*V;|E! z0uF&a+gVzd_5NavPSje5bg8<%`o>5;2KD8Etft`U3rlV*6ZMDOQ;lv^pKw-DE`Y8w z^woLY=xM6M^?5s9Ws#+72+u$aN2cTOIgAvL;bxjp(ot#72q^o zl4ibEd^fwX#WXD#0{V?#+VN%Its^J*jw#__GwZ-8qJ~OQlNn|vvWl-6K&md;Df)an zP6J3#h{`&G=Ku1fa*=_)9w3b3F_x8=>?u(Qy5B&oF#^?RkL#~I7^_{E7Fd}2aBs7 z?E2Z(nxpLY=6SZizez-TV1^h5Y6HRCs|KwLz*M}>P};IQW-+_>D8r=%?TD&C;$ALw zY%h*?3X;?JHNUr3p73}w%x*-EKFUu z$p$~V01ev_xO@$9uL$T5?E2BeHw7QWQw$;FR9$UMrq#X36NIOFX~;G)YuE9Q)54xz~*V3 zHaPxXF^J?B12{c0)7)78xr6{EfaV}G)b=(74^aol{VTP~=tSwrpHHLp1QZp!=-Z1L zIECah2*$wmIV@SJPCQus>s>6f1fjDFAY6KXn!Teg?Bw|kxhRFBntqBn$!I4s{0!+c zKS$0gTGRTZhKc_Z1sk z@1ZDrR`T1o+=36nf%6$kS?k^1nRvGu1-MGi$swOf2%hHI={*;>X+YXMjP)n4$y~BG zh<-@T?@J0ssjhYJYgx0V8G8U|VWXiPsZW_D^ttw`G}PQ?U?SCcg^lcO%_PntYYF}V z_%Qd$hH7>#{e9(H$QwLB>BK(Gu0Gf61Yr)}oD78BPc5ATsOR|l7fmTx$PH43=ZRBt zq_ten-}}j1bD3Vx7sfKa^tp^#K^?p6MnzQXzHT(Ttn;DPHUOXf2P^9((!IDR^dZ0H z>EnZVb`ur6R@88GcEWul=Wi#TXS@9E>e7F&!qS*N?L1x+zbEfOp_o!>rtFiQF4M@i zOB_&10HHVr2dH)WdyaQAg>QIBLwmh2bVknuh8uKYKIkus3E7RC8F$rtWV^uTw`KN6 zJXy@>fCNjCD;ExFtEU3XFSg8of$BqO$$5-=Ufox~BPPp6-N!20_j#?4l}OStva`7l zD3HS49u8Z2c$Ez!O?zri>KT{qbeP68>c6DA4 zsndL}Ap<2yzaLP3J%8&#UGmasV;=!FTOx1LVbf?({f!GGnyDmPl0mQ>Ps8;H9K_!v zwGXB5Hwy;15eF;a?#9*oj-8Sliq+GJ9f+#4do|sD z$_J9TsY7m5aJL{s?r>5ZyQu3X^1=2l z5u6EX_L6bR4Sfj|_|%!!ehm+MD{eTJ#Q_Vhz~))$Ps2@fCmJh-9e6XFJd0iEdsv&r zswU+;3625gB!2|SyUwErxh8HwY;_q`6^z>Zj#s!bz5809Kt0QmusXt{r_M8zRj)km zWF2Zp$1FvZX$)L`6t?Bk!2YF6#Xnm);M&@Gu_{qTB{#3(_w}RBbWKf1rgx}|T<9B~ zBXtAmJx1js95IX+m8H9Lq}7SX+JkIS-aS}-bq9LaY-r*}(Zz)_Tg@ItQWm#|9MKOR zK*gDRZNsN`98bQS*M#ps!`Od)L}Q(W&#@yHdUfe8N3~Yo=@Q~7y0!%LT+Ycy<)(fD zraaP)N(0l^{rb=W?7Ea4YTjLTCU%Ku)xvk+BEhv2zV~&8x!4B>lnzIGni!u6P^0s- z3fp!dhDB!&WE4Fhet9L()1TKqfr1(j4*~Ec?bJCbO;v{dPh1?hRpV>J<2#!a3NMve zRIIp#HdIWc!D6`$hzHw!MPi9)k$I7pf**cj9x)(@aWV_&lj1|?k@3Wvrx|c3M9&11 zR$2Per8^j^k=Md$?66m>8FXLO^|93rzaA1YiN^vJXo1p< z)&@dR2Z^R1@1I2kQlwtRuz%U#Y_@*3U5(tsyZziwaC9YR!;)-2SWC@*zOFv=Qo5T_QiX!(Ee$KpWPgNIqs^vT*LAsS zuvZCy^l{803<%M89QUzQ9S{fmaaOtPOa?DS-3T8#Hnc7(2vjjFxgukMG?OLE8cBcW zTDH7e*xtI>+!#oCUUGjKRl5+AYJ=T=cdwhFyTpYrdIusVAd4o3@A}N?Ud_@X9%$m`5q>6+pN$Ix%%Yq0IpKTtod%Iu%I7T6 zFk7CK0MoO!;bJ|>%p`hCc_)g}ohAu;DpuLTzz;{ef+WBWGvlQy__vazgUpBCd6pp$ zyQNX_vWBoeMm2}n{jBF55HF__d$abUNfy|2Hv9|_{`X`j?&Ci!ZC)5P5IhK*hb3V? zo9FeV685D&3>Hhzj8MX=+A?Q*%El#RJ)G2|H(l^B# zjX&G(XJfR6=M|Z2u?qBDc-Flb?;!fjHUcVKp|^=P`hCjK@zsD^f7bKQPKH#D{p7gluneA1+1k&sKYJzK3Ir%+S z!*NkY9cT3fSFEh_vjnm{I1tuuQRn@<@l>`_P35tmxKuGnVYM2(fPF>C35@98dRHiz zA?IRG7X{BC;yt`cK=Xx!Vt@b{ZE`3Xtrw_@ELYB#Eo)`qC?5RrIX>LPS~YWZy40^@ zgJ61AEz3D%FJgN@oBvettrA|~s~DTH5)=(YXXAzCkIQ}hyTD7X1<@dof!>k39&c)etBL_m1W4wYZ{KTG@-n>rh%OX} z&dAw3*s^mv>nC#90tWf1?Q~h7XgLcLldhsMn`Dt>@nd9(>!O#7ykoIO1^PEI%9JM2 zF69wGPi>78At^n*`TPC+OpKz4A`t}8`{J6CQ;L}B5+jSy=?@-580BT4YuYMZ3`ES> z6m*AM-gBw@?CL6z;@RL7nksyE_XF%nWvya|RmY7Em)TBsOy~2A&E;C6)J|s*-AbhH zp72F(cX&>5S?d03bLe)(X*Z->cUs``J)qB9)C_|fojDMnBi(dAt9$ZFQdAhW)OP0c z=kj8;>HzD##szL}Ei2nFv{i}5v8Na-;KtdGb6)>8{p?6RAOQi%LblYK?gls@6a38U z0NJ}lCWkyyu$8>nX27q zCTzXmg~q{}z*Zm)^;jE<_l>So+grybVY2AfGv$Ice0B`Dn z4(m*n^lGk2k+fc+F(HDqcms+YWScYt7Ok{NX5RCG-KUi`=G z^oX?MaN}a<;6OJw7#+M_FSqfdh0@ezYyJ?V2x3!pyFt=-ovo3pF)?5t;MB||D*o4c(YS9lf2 z)M`wl36+bY1d`ysLWjzu=;%sUrGpsNy_9-)4eP_?rD>0+y1L(r*IYB$M0LV#8eq0# znVG_!(dYMlmaT8@SbJq#16IS4>#-R{Oj`a@_N;f?npf!gvk5Htg5FxUm9Wn*>-HEL zZ~D~*`I18!@qylEHxvb<#(6y_6fYFX@aPqK>j_-rneoB%moaLVcz}4A&Q#p= zAwoBiiqs)&DD=2C+X+ZU+bxE6S&y-9XG@04`NU;YSzbw0VM^n{KK;$M4@z192f$ye zj5P0nyAH!QWTHjcUj95GU?eP)qU;tme6LSoMki3!l$pn``+5M#oKmiCKN(cy7dZIg z?#j2Zj@6lt^tIl#@Fzq5beT=M9bixnx7_-H-Mu3Ups=+VZYul49xa!BL*>WD0x#H^ z0vIorQyC?lMK_lTr?cA5!Movpf+I$Ix;N~+8fOy1pN7tE!*sckYn!<%+a+qG-fTCZ zcx~2lhz}MI_Ni_n zC>BGZ4E*G#4L)mKR^u~f0f0*=k6OtPe8?CRUmdP=6)?bM*0~OyZy^_wR{;)qF2QZ8 zQxwH;>gwS%s|~!bewIROwlV4}P!4t-#xO*{$16Q!lj`oVE5)aaXexV5)KR)!3bRvY z)iaD#C&s@QlC`SlEbQ2}42PGdb{kk0EPUq-t>GMF<#yWEwT*!^uU&%&#&{=mE#0vt z3?1?eNIRT5Y#a~2%fc6``$bklAzWn!=*04)r*x47v|LN#jbm@V0p*5a0@<2gGV;PILbhj9Lj-4cynjG{A7uD${i&f|P*9g5 zNx&8MJvWUxR}Cff3I1%De;?O0`Tg>?qL8&h1u`O4!ZeMJi^-gQ1n+$#6FFN z9Gj-QV8QXz>jjVvGDmI8Ln-Kn6v)EhmG5hQGP#G84-$2!Pq-+v;s$%fUaprw59Mia z7w*(vK@FTqIXQY}L4RE1ADRkAL`ni_ijc;OxE*SGcqW$Ow$2>!3L{%FMS&0~H{R@2 z1AH>~{s>>iB{&-yd5=p<(CMaNp`6>s&$j0YOGvTGM*ph#V!%9_TzGOn( zSD|^H`&ddZf5-y5EE#CM_+7_OCl!bB#N_PW*Ri#w+^khyuLxRdHty#HIX8wGy5sosWzo0gW!4Tpt_?9Rp9kYgLkt)-caS7Y zjvzybC|ilQJHi9TE@nJWOSwVXI&bO@+Kj&qYN=@Fq5W zQ$x1=aKQCkJsxZM`lj-Os{WOrEajZ}e%Gp=^y_{p+d(^$j+?n4ybJ|ZIX8w4Sm|bB z5I3nC$*BoiJ73%A&}BS^1Aa*5c4M6QoD!az=ud^=zjYVM$S_-;(FcbJk z*D%msG%)=`GXV{^?=rkZ?`1eVXk%XQTY!>!e=`RtR1F*{i#l4#@I8i6`2;OqrW786 zw+ytrV`8#K-hFUejSk(U8!=4BI2R%~+TNOE3uMx7wzXv#T+)qZLicd%_`pP))B=_S zdyDoCtLDrjP@u?VE?gzTCQ zZdd@0d*%9L58QsVTYXSBP!sEHoo0$xpcBcj2D%I8lPhbp1I*MZ&L3^jo}SHn8%g-Y z^Yf#Xg_f%L#*g2#vTxs=t%i@XCJc2mRz?>8rUor~k*+ zdk0b-|9{^lB~*$~W_Fp$-X$wD*;(0pJNBW>jxEVNWMpsI&XJKlGmgDEIL6}7}ekx)e)=Dmfl%c_XgA?PI# z+8jI56e-yR|DE+@331ld^m+A!3XZOuEscaDLbc0|c(Z6b3}7}}9$}pM5u|N@YXaw6 z9!ZjENLFc#^B$V4-T&yY0XPEEdTNgkj^SBb03Z43`sn-z80@J z(6%7@^!50i(pt^G6!W7t?t-0(g@Mx+`=h>ypwf8>QojW%N5qdEHog{#1?Yl_N7Bnq z%L`nvVU96Z<^b$J^+ocW;>m^lVZQnWzmi?>I6mOjeN;^oF6|ehCxq+)*{N!-(eX=AS<$0^1zR29AlI_4_rU`^nb%oAsl$1Bk;C7$e zq-#qViPQjQ^S@L}8TAa&UJ(W-Rtex4PB zjD6RBQIzYC_5UuwkicKJ1{UBisq&;g*rxpOCm^A{lTMJfu!vN{TV-obEV-7s|2@c8 zA1q$Zd_@vcENfQ@J010*C?_c2C^2nrx}Y#pSe+@?fS)qWX_*zk%*v1j2A3|MCZmh< z{Y+XH;8u0Ja~7EaZ0CzoI$;KFqn8}?Xv40jjV@~_b|Lgnfi#;qlD$BFw}#HGH@e@Li%+;3r5LJELzSxS>do zP*Th87-bBgKKR{UIXXn)vfV1TC?HWE zi`(jB)=XLd^d;hl-Fs4PCP;Db}UQL5!?PX?ZZdB+sXBY)qQoWDwQJ)@Nv zy2APEblq1UYwhn;xq;6`+}^o0D8LpGLHZa#&2q zW()wwHU;3?-0FmLJbj@}@RxLAf$9%b3b!L5Ti0~;v>;p+Wr#hAw*y@4d#U-v^fUFu z62=rF`lh)%+T;&J65BgT?lHbl(?PVT-MkmZSW_%sAoExTJ21w7yZZ67qj1T`KbwGL zUG=>O&nO=!l@)s~l?SOIZQ-9jPc@eoQ(|UnXQA5Fme#M;DULw1_kk-Z`YM!o z^{uR&GtuPdVlMyLUXCns^87vN-FsX+Ma2`NvZLY^EfcIdb|1SxWD6DemYmvu#2?e| zC_{&^ZHi$CVHxkNkMMw02|B0OZS$q@9K$xP+b^}tMhts>`o8a$RhZ-ST2xe2 zj>>pvsV6m{!+HG83AkSkK^Oe*HHkS^|`0a_cqU)cj5l?@j0rLA4#Ys<3_@W zaWj($Q$+`HTm@i{0#6}RSDP^h1+(@~LYCflhCj!K4dvQ8<^JcNy!e&GFv@6W8vCUA zTbY1bl1Yty@zNHoYf}Bncg-Bc|*|S%p3B&K1bScNPe`AZIi0+^5mUdsc%vT4N)AAWlWSF?3l~ zEl2SDr7{x_F(~mfF&YQ{(kq0 zOkZcl^tWv`NK0)J`C|+Tq}|bdnkB|ywy(QqJ;aZH3= z&H<=z2m(8{S|p;dyEnaqh2jBEv8miIJ+|QT;&Is)>>?+Mw=Q_HV{ZCC;+iv=x5XEU zM4@;f4dI(SJXA>ldqLZk`u%7X{a1%cv`m&Fx42ErPF>#o2wf&_ROh@%hl3`f%?}0> z`O|2lTIa+@(3A?(#fy95n-#ilHQbjAf>vje*yAb4f?8EXbFGWnW5sshnyc5=KXN^b$R`&+JupN%Ps*>3 z+o+TTW2dg})iyI?Pc5;1DJ~g0({UU)j3cJ$vI1^5)D!&6A-b4`DE-*%WRX{3y+5hy z?G_*WDC{&qb`#}R4T5uR(mQCi)|MjRu`2!SPvsM75G7U-ogqyA=>9Mp=%>D99w%=Z zoBha2ovdNe7bwa1rL)xi0m}rtAJvg%L%&x3Mx|bx#QqFHgWSfo3yAfdu>QfB`-XD} zIp-`Fc#`~+-YAb(?ZqBe)$MI(#0WtsXUDQ7$*>?n$2-4Ln?d3roSw4_rYk1Z#Wo?h zUH$K}I{oaroz?Q(0^(B;3A;~7qMu3jx6;d(<)??;OAh6Ylf_Mb1q!*s;QP(>MdEgq zG$lA+8-2!^Vab}-U56MQsl68L<;sA01O%3ZKMcjJME|_E@JVwz2sILQ?+iJ(i#+&=e(b)7e}vyWd+^0D&5Pgdgp6rInKBYeN0X5(1OM&Kiq?$?red$auI{D zFrnQ{snm*~7`>toI%JOwCyFmk%?Xpq>VJy(9+O3;Ctyl@V@ zy6~1bXEdr0E^cdH8y01$^Nr@%IRVk(fp?mTp;m z_yLXJ^R0YO8SGX;)>+>XdKOVnXY@qED>_v|1WB3`djY(6uTp?5&2J(wa||cF-XUK| z(uAF?JzEot8xH!oSah}mpCwo{lw&KQb8WB@eO29;rEaDBj;+(nUx+Z?j1Nb_N-u+l zy4sFDPQH~c(L)+>v>$Y;pr)!4{R1PDIi^8dOX&~1+R*>1@`!kM;w8%GywU!T{lS47 zt$2(CGP^z1w>&5C!q`}|(YURC@$71^lz*mJhf6|bz!xksDiuc!2rHM*6qw1B2!{vT zP|z6_BzQ(-@uam?X{i+D?yOm$PLI4i2w2=N#(K8&pu9_cK~$xh&}y)?P^X#@ktJy*`s^4Lb1$Y?q_V{={E|N z6)TC3l#2YBD9rhmBK=yV`g-AZSe;h;!1C(3`6RH7gNIrA=pbB;?Fky@;-9szrVrQ7 zrw<&F-WY!aX4dJ8(al->ROm86-)-yfPbOAb;=9`6R{nZw=&C~lJMd-5t22sgm|=~g zs!tO)_0~@`Twr#9^roU2q}=B6)uN~U*j$e^6=hWFq0zP#2VhB;VnYRxBz zFUOLl)XXDtQx6P(0+@&2Wd!8YW0;lZl7G1ty9)a#0;0=nrrZpyng$hCAI78%scgJlaVU)3qep>S?xexrMncCe-BL0n5teDT>if+zU* z82S{563#@u8{TcSNiTfm-uqpklFmuO({}gnz_F)XCuQzI7+Ya<=w_x$VHY&?so79z zR=@$If&?N-F<(Hgz(!6UK5ec|9PYILtyAc1DjQs9KcTGH)71G$`T|1&LLU`(JRm-F zCxv`u%TpGl5wzW0Ep#Venyx7Q;iIx*W2pTKh*b+%ba}Fv6&dZVrErg0GBa@~NnE#7 z8|d(3J4eR>@1Hmb)MLX2k;o}sFgaM|-bPsH&qjIFB!oD`Yg*pI!X&S0Ztj(no-O5! zf%FGSPxgwUCHRU_s`UKopSM1)#@nH-zjMC%@Zlt13-$zXY^(HZrCZh z2H41w9r!r03rsZr21=5e@~pB!eQpG}a?-w+_Tx5~M$u^p?DrRo_)xnhb`hYwR;ew{oL;@WJfq|90?QpMme)iNEutFT?qD zwImMj2r~;2Qk}z@igmK!Y%kt;=4_xGJ@^VZyciZk)g-`Z*5 zoULN+l7^jOkUNfU$mEJM7K~rn&TOuSZ6*sIL(TR?x66vlWBcA}k=t~g)oP(Q4l}~3 z{IXP)yaZ7S$Ht`6R9w;S%WF@k$_$H~J=bpCo@svTFxKxTKh&dyb}`h9F(g*`p?Gy? zd{iux)u;CD(9E{Q`A=03StLvK>F}lYr{7&WB5(gqd5I6 z7dt`9nqkVSZHx6n>Ata37oLDwLf2=TpeSIi0 z+62`0(n>ywKF`yZfP~MXScR+QtAoy~*bi9n#X6Vi)Nf1P^oaaUpYN0hQ3IIi4k#u*01`LmZ^c8fU%Mm0>IBOxH~2MK5H3B2Nz(`?Y%b4Ir)W zfZgwQXF7}%GR>FdRdUU86tw#{?u!Nujpc49^E}Z7`(5C`xQlL%!}vdP>IG*lK>Dp* z6sU%JVJ^p{qbcjs>kg*WN2~femB}(a%-6O20^`ue-cCy;(mwajbT4b-)Ats+#(uVy z!`;q+taI9!y2QL8I^B5@J|9glF_~-~H3sIoG>AVLqDoeFWpmSNQCgSQlK|=@SmJQd z0MM)6*W{f5&P-rUC#30h$@{TI#V%T9gm>!b3xVJg=7Z#sCzP(Nr#lo2x0OC`_(+mQU`r;5Cch0DQzl=$I!|N%tjp zLOuP(*i>3 zq){q`w^H;|^{^EL5Dz-(xmqQ3of*>n4-0^_NgDX6tMJF+09iqTtb50*$#W}>%6s;8 z{lDrId-#?$wPb}cP_nn9AT7AvpjC8X{MJd(cBwT^$*Q>xKjH&I!566JQ8c!uq`>(> z)nZekRp1P7HSL@oY6N)T#A0Ub8}HPM_ARYg9Y@Y6o(D1@5o( zO^9v0FPU5$@IR_`m|!gnV!_T7RjF9HOp466SUD7?m+@)wl9pJwR!5;xU!Q6{Z5DO@ zLk+FQqVv3El5#04XDf}f>!?>$5!O?3eHU=|&)gN2pRMYn z;oy4P^=?ezNVdDz`6#zLXL~|=zq87HGAZOBC^K-@`JV`1@@kc-hPR9hSaRVUwUeug znjb4k*cX4uu0KCNy-@kE`BNxyY>4k~4M~PJ(D4Cr#8I^S=8uz)v2gRp_vo?o$EpJX zy!yfw{XtGM}{^#?xUJK((sUWt&3++N9_`-)@YL?=cv1^*2=ovTDA5< zi*YC0^~gT9FGch^CAziLW72#!J#vJ%+X6QVEF`=f;|wKrR-TE;_I=S>Df{`g9rf90 zP$7n9J@Bv(3y4(G`LvaDp#UgBU#^q$%|b=%rG1*nuy3SRyNU#>t=c%TUVE!@Y>hMX zvXm1IU&Zk>Dc?Oe{1ZOYz)78wALl-0Ci6i9VKF?!UtJoO6|iThiqknBG+g;p4niNr z!oLmt2UD=c-(zU?I&B%?@VHOxjNon7Rw?cUN+>4>L}7gJq@my;HArk0m@e6qc&sJ9 z-wX(C7`XupRm(x{NLq2v!rhYutxwJmT57BZ4~o4C#2>gvX`?+V=phAcO||oyYyr|c zl^reIeUGZUsC7293CqXn-27R4Jy$s`Ehmg#)SLGn?PjGUYd9QcGIJ| zNo|twiAdatT!jfy+Pg$JDC16k18=L}PleTTpaqI4kdOM2;E@wcJ!@we`@QXCh4f0R z%O=xTSGB5APZunPDyAy#OPlhWX@%YIp2yyd3<@q{8Il+)>unAVvZb zhQ>FKM88n~eqvI-&r64If*4+)5`J^Xr}I(VXNh8igPJ1~Dc${Ca1pw1JNV zn?7=6hT`mwkBrfGc1wc6?~dGa7OOtYm(PW=JzQw8^z3BPcW$c{Urc`wx zS5Wc%xlb|lgDY7}QCIiSMa!`6?7ma`jEejW%i9G3y)1vv!cD=}qX;Lx%AvC#4<#CS z`TCRS>(_4KoOY>_KI7Zdv%VHvr>qwH4-sF0x2&LxH3zf?siXMOa*x5ROA)DAl;9Pz zX;Xw`)}kz+rvdNb)S%>h(WTb5J#W=4&DJyDs20_=kvPne`e>EC`9Wv)ZD^+$X>yv1 zi+|s})8SUt6j5p2#+H0N*SVOGD(4H+Hdb%n`S4B8#^nvF`xbXgLe`IW=eowrH3tcy zg{D_~9*K_*6h?E2JT?0HDR+`#8A-KcH|N#I0!phYh2@q^vW>j*e8js9^A1D>{mSe#h~%vhH?C=|H}H>;I7ey9|&tG5AXN3T@+pfA*?%&hP&2Cd?vVl*33Uu^*glQ|p{&X}~~57#%ssYI#pQ)%P}A8u0Wl#*=LS zTrE4WH9gv%=npuawlR4m;J$WOkEJv+x3|?CBPI@6g41V#boqxYn1_7Q3y~rq| z>R#=Hi77R&(WWSPVY^9rHh8$QX`K^_P|A1Qn_5_r6iQbm#LhYgm>sRvE%35DY9>6O z!I~(&mIMG&W2FWHirQKe*Yd_WaOc+y78grZu6)WD%-jJW+&cRi43ZXCC(8u$9gEpe6!wn8R6|o#?(Eh z9U7ahr7rf@?V-%Sfc?L6z)VMGeHn07wNiVHCBSx4#5H%%LO3!v*%b4;8T^eb;e`Wv zi7uSe(=9DT&O57X0AT5*$h>Eb1p7`)QCc#oZ?*bbV=a9PkXCI?HBdrVzKIA#-zo)+;-~GgNlrRw=Is10s;wQ@!EM~L|tcP0kPJlw^Kv%0Ut=RBPcGCvmLGy?`xIh1XKr7p*=pGF9IbdZOwqwa+V@b>#0&d-jW{Ea3pdg+|FbsvrdT=Ff`iT{K z*&*5kL{_Oy6GZ3><}8SgUrrRSox|VP`29|ZC6V$#{K{`RqK&D@`6D~gw-2HqpS#@p zDv0)iaZ>RaQ;K8k5d&L#(2t|5azhjDArGoEQNJmv%q;2yp)?yit7oY-v1o^lEBM^N ziCZQrmJ>Hud9oAr!g-+*)SG|dWZueFZGzrJU-&MpjIal$x3Y4=-oR`r3P0s9?Nb`< z@~9LrB`))GF~~ePO)H^Y3*=YCBz`TT{|FXK4GoWlqY}NB1}y()+)IoIMi}e(d`Gp2A}1 zyarSk_K?ktB8`L^SIvh!{)QXXII0h#WhPR{l^lDZxdHV=S7q41B3Yj$yl(S1uI1p;fKqLw`m6*FGR{EIem3Y^f`iL9S$_~RtK@HB@C81L^Z@EP42#XXhJ+=?RmPem{u zrpE3tr}08JTd6Je@1ME@FKT8IebktL{rudHiZbF&q`X&Q)Y#6Ix#xMbtpjfXQfGTS zMASez+a^|Q;|Z-`TP`d*qNc6q%X1)*g{MRepR@rAeFpwc8V7(7;lZJO%uP*r!4q=yj`#?VI~8^%m+-fxsuf__s93Q1KK=V`sYta@{ZnhNli^7 zSH^9c^sk@v5xGB~sS=p`-rr}=DLwPy7h^Fx3@YeNYs2L!FnKhET>LxD92xL_!Rn< zdFUlSI%+q-dZy0Ny!y)R-Z5d1CZ#yB8qG!uyO7-zXII=07DuVolo0S=)2k&9Y*)l; zfZa!*)3%ZrIj{Ydlec6qWzHps10>Kkroxb~Y~_+a)312)c<2-M#|>If4~9$Tv(-7T829r5Wunf_p@9AwlcsnI7kL_w7$&9 z)zO^()t`tsEi@8C?&8~0r2WhB{-Bhbx>4-+jSv>i(PszixSD}LsJkkJwe!-|Gj{(+ za;r{FhQ*{x|M1}w20DOsiH#;7B|=H&dF$cMAFA?wT^Ht|P3MDSQAFYQui6WDn?E8W z5>`3V*_iNdxGpgla`g>PUy_H(Bf7^b zcjTEHy?A@?J$;(GQQglg0%WNQG(KGbLwG8!OUsLKv-fI*_){P)P%!tDr&q1l!wap^ zShCH+v<;jf8+Ys6<@Cw@db34T^-B;EDunFWRn)l(VCz`vvnPP#aaq7YCT96F`PZL< zME3PVjToS@Fb3qk_TBIa$$A$3AkFewNaXfmF=+O-4m}Bp_zLKQ?59RE6w8LY|K(B$ zjzzxZ%&FV4alKr>N^6RHKV2@e%|kIS`0)^7+*62cTwE(@Kp6^j=tX!Na@^E1ppmlV z-4Ux7F-I*F2;5X!eWvF?0Ms+`Zu$2Sz_8C_sJts2pA2?JWUsnzjwG|ZV^O&09d`kw zubzL)PsPkPD~4{sDQ{Q)^oTJUL}RbMv8kv6pHJ>Q&s!o}>wpZH)@b+^9}VP;Bc~hO za+~wjp);{u+dt!tM&F6^MKkVRt$sjF21b}wm{c4dqa|9VL-XtWt4JK@QdpeT3WHy> z6APnJwS+wYq~P8fiK$Wbn)hO4Y3ZiS`;%AaH9sRHc5DGU$m$s*hWQhc>+30CI|ltx zJ%krLJNtnUUK=Szh0=!*Ehdya6Z8p~Uf1I18(jmyUYNi;4wAM^YkL$f@r8+IHnWH_W6jgp1iQ^(gJEwX(|8x`M>i08Eo8sYIjdBwld_H-z9ME z%E{`uf^@Kqj7;d39|&Y@*9{h!?`acxGCxx8ITllo}UbmKILmV7oB?=7Oksps0v z*0%dO(|BOd?Q#*#2wnywcSnMnAC&c#fVp+wQ0oy3k81=T9k1t^`FEu5z}f@*9iZOV5rM z>Q3)m#nCzs!)E2h&WcCM$Xd;Z#IjrM(06C3MbAwbgvEn`cd0~M9tU{#V@|LcWQlFO zhQ*-Xq`6dzkJ@_H`0{7SgCQNGq62ri+IhQlp;@Wz7CxNKwPD48g_ufYiBzoi#-2y! z=`6qj4z7%5TV=U~*LX4TD5)7k`$7<&7nQj#izlm>YYA1xU+Va7ZeS!_J<3~s>4xG`lBcl7XY z#`eN)b_9c9dWMXjpWAwp-i9B%r{9~q;!;~fgcou}v=O7CkXnY?pn*V~sFzFonu20X zg(twjr2qe3HgFV@WPZN9lyFL0)K)P!Bkb^2l%%#R_|Q|PO1<&|JW|aIZWs~%O(XUj$Z8u6cO z6*42{B|3rZXL5Q|t(O}Dx{yx(|z(P`Y2}dgTR2JEP}9- z{PfOSqrdY}EO<2F?o2iTW6m?}hV7)WLxS#Q#AMPGDi|v>XkdBR%QL?Fsj!uN;0{Z@ zTMl@%UNQEwZhskFn^PsDd-~%Wv0qTk;S)8xC&Trh8&8ELR_S@b(fqJ{kx^sl0EcMZoUA}p7u)*-Xm1hv2)f7y5bw}@XoeH;j zC2d6=Ben?em=?twQFs0Z!0%mga~swTMAL0uxsQ|}zQ9Y#Rh3^mbOSXA7G`+~DNz~6 zsjE50wxhQVK#(t9`7!rsB@e;(C7lY>I(ZY5bf?+6BVt2<_iY0tM62t?H?+;ohwI zqM%-~p97Tlsc&j5p^}I*Sy-tOu-?v0L^Hto$^Xi0ofWETs|rx7yY%E@6ZX4ul1~9V z&Cz<|4P#7V(Ov%gkzvCUy8#{m-y}UzU3?n6x%5qiR|x2%enD~0!NK^v*y$Hq=PUk6 zanxH=NH2f))*n3+u}p8nr=l=oqN&(|3fD*~0BX47Vg6(9te-Epwe(JUV#7HwTJr4E*3-`aD_?ZgMSSQ5OJIu(E~e!kF3<&WPOoEE)s_?-#I5dfZrZ; zj*~im3A|{vixy}Ye@zHqZL81u%CpmGL%Rr)Lyn6q>@9kSfedhJyDiwdU5PM;Db7MA zBsguM(kw#m_h!K4^ z9GM~$6gc^Okkr(KgY4%2VTkKx`PsS(zPk8cc>gUoT^K75aRDPZUJT*(>OLvN-lg#{ zuTsZtOuIlXp>1=tw(On42`WjZdKaFLkoNbDB37IIZEHCsvmamB;?4})`a(U})#@dk z!G|!0L?yjXU7 zZ?9T@zqNmJ{=Dh2)ndh1=Em0$T#VMMfC)9~tWT_eYFFE}+SQuZJf$}J?Xu0y8b=t` zL`{%vG~*?9YvoYY43LqG-i>tCr^Ff+uxH)^k(S$s2a|=mf9Ytt-}uC{i1Q8f^gMTP zhOfyXctleJl|F&b|J=%n%yVy%*J1efe#GKc9_gt%4dC;b(!aS`<-$h#EAsPI^6rP) ztzM^6b<5lIi=3GTaaA&j*DKkukGz87lMkq<#7G^ zt#`OH-ET4gg1-s5Q=XUx)l^R+lR1wE76e2=cS!D#iT(+C69l-S(Vb&evY^}Y(`VIB zA0xXs8Tr6N8Dbl$HkrcL6BvrYVOK>=c9!W4G_=o)!5>Cg^d`a6d8O^kEutBnlZ|!T zI)Fdoze?r#_xm1?ZjarpmvBrHpGi}fUyEKHpZ-f3%bBTxI`*FwQ#P+8uljwjc0@>GcG;t6cX!F8*7 zF-n?D5JU7`=_$*KC?V5d$~udjQYjxsa`a4zGQxnC75>}W2YSz6xL2FfO1PJA_#5h_ zr;9o>yaM*x@a3o$sP(W$>H!S4>9L9{QyPLeHmreUr1No`U!u!MU;-4Vxarg|C^rsAKCW?0b9uD zK)uB2Ncyi~PBdGo$=K}+kqjqwv1;$R#nK0xlaq6scljw|YUuY@dlgUa_wQ6wixCXH z)H9HXlj7^1BGrh!kZ6F4hz1>gzKEH7AHqbm(VQ7De;V^CqPf^=?9PKI4{|S3Oid6G zc5NvtH$yoD{dleVkdK35fvfl(ah?p_MJ=Ck4q8QBVj!8y<&ApZmAL==7H&A`2sOUP z6T$O!nbL;-X!9_GKRc8(b)LkjRXNPxNI@``dm&PJVz`pvnZys5jQ*yLp+qwF<*Oto~3DgNkyvFSaIgl7u*R1K*!vv8td2jvuTc$8% z0u=`xLuTM-+bR+B%pPkAA*!ogF)>DABF?~{&X_eVaTYp$giA!TrlE))vKpMtU*N^jaZAq?cUKgL}&sY@2b11ukI$~fC=CU@02I!a&Yv= z_m`{|=ElaZVgCtAt$bw(Sk;uZr1$55dVX82o}E*AQpo=-8v&T$5OV)|$EQ3r-$O9h zis<1Rg37iK7ZISj;EG2zRw;jdt|!*`aCWrQ?c!c!B~m>2+x4yI8#>uEkRMn~5H~Bs zr)hP(7!;J1m^ar8bfbk?A~d94$3vm$$W6eOv$|HJvq}oKM}5?E3s-6xh35ef=e);P z7zP+a1+OQ=Z->3aF&4yrLYXtQ4mJSXm*Ge;FJrE2AceofL{;DQdo0-5I}g~PV-0x5 zZga@i--+|Sjrqd%V63xG`I znu`2yOQir;0K7QZ-U)z^FFP)JA-`ms&JkI!k<(5lE8XU|_6(eKLxvIhtr%&WZ)>+| z`EuF;`F)!Vw{RBr zg$~#;O8gm9L&lUA8`WeTz2D|*9213$#6*PuF08u;Z0O=vsAYqMogb|C4^*1%o|P|i zUc>JzY`>`feb*f-r4HiW{PO&0!h=X}5$2!oxzBUY3-}XnFxYDNg}UsGBBo$(Q1!%c z6=%b@K;N2@ak49VVqJ9pJN%d#DC`m_IFxZ1*I-~iX0zKkfF9c$ zZWRbBHQp^zA)OHxt5ELDP_<3R(vT;z(-29NHw?r0+m%=yyJ;Dt1vo@k3=LUWh!(xd z($l;Z1I-ko`%jx|^wgQ>>dI&&f)AwMp>Gk7?t(fi$jE$$qpv2fB+9mbOz8ygOXER% zN8JvH^vG$kSX-U)$}3v_(Ex!X4i_EEV(AB)6MyL@f`WjGJ#WI{Qgjz4Xt$xWWpBs^ zD1Ms)878g}1Eb|FIP}=H+h{K(lI>MY@mdyPQY(<;9^e=fO}ju5U{NiLF?2Do!uJa@ z>PfsdYt7}-^8w;!klt|L@5MV;cTRn2{r<1pO z{)Z5aD4<)u^v?>kT3|c^4On=+Z@qwem^3$Br~`I9^WDtk-_orXucq$wKYY$fNZx8r znZ!VCF7ry@6}{Qy4}#qY?urz0n0@nLf1sH$AFF*(cMW);@`9 z{^2ZOW{*STx&;Q_9cOU{P!OA#f+u$S^6!y1QhIHsjG%rwsgoB@Gp~GNWP6tIoLQBS zegMTYn`4LehQv`kdZse+#Ip^%{XIpsU5l6*Hd6I0A%}SZj#_LD+T_XwMP~#Yys!pj zco^rr+u8LJA~37{cud1!fB$oKn&!5X?UIV%cjcAOpJOI`{xSME?#Fg=Zb!{$74~Hi zLMj2kU3-JbxY_#3FGxaYfcs{hyoZ_9Z;f?$W5DSX0k54W-T@h2Gh6&&f81aF&V3uv zDHnb{zRCy!uP>uWc%t(C?FLiC8v4c7r;8%}$xHY_UrEkTRU>_~$t<~4Sd5Mf*rZ?v zNyFtnb;vv_Z5((?={E1hwsDP|GhWUEpKmo~^wb($jHak3Z5{$pDKEx>(Oc{$o1*bQ z0}BclRPJXTqY!*d1*<^F8qYb>LG=b`+lCVu#Z%br@!S`;;h+VQWJ2Tv*XwvbXs>9^ z8W`YFmCUX_Lh$8Kq{$pIoL`#DXdYy`(zA?|TrockJX9 z`bu#>Gjj&&BQ}R4!PMeZ!MsP;<#)N4r2$(@hWa~)uUCM*Ax21W+?NG{XH2z5U!%-7 zQ?A(=gFyCrxw#9Kaooca*I*p$%~%WEXHwc-B{aIhY4t&ee-al?o<#&m0QqSkg7?w; zXacczidO-0OFMzT%mY`yjV9PjzNt)pmR-qn<2twa=y68M~h0&Rp|Eg%z(1_O1s8axu8uubY}wf@o@`#(b##HeIW(ruEA` zY%1*Su0PO_dz16j|9$0#`GtI>>u)D(lQcGlA-O2olUv+8}g7C9JX z@`c$7et03xHQjv-ww;kXC|^Kl{e#>4xTpqp*6g1cew5)=WNTeB;SZrHn$-xGX z;UGb`)kh0;+4-*fs3(v(i!ixa2SO{zZ~pUrUf(Wvn$ z8d3>LE1RuL?@Q`yUR7IVP3_tBQ8{eMlU}z`Wv@{y8k!A^v(DDx1j5rJ1gt-l2=<5H zv$-c?;hOHRxJoyt;LuSH874(D;;>&6L7tkW_5CwMKFseuUcTd!gP_+kZp{kxkZHUjmOCR`wHka zWw(VJqs`k@KkwDN`ryfhJt+VrsBMeCivd6?hs1s%zTXU-#|t&JkGo>hPyVg-0J}=A zyh4?r;H?GOCmIk1HlxHpFB0C*w;^1-(K9^Sm1#=gq5&5Eet?b=@0vs-Lo+*3qPUCj zN$O#%PiFB{VP}2)Erm^Y^M#n$;9v2~j$`Qdx0krejt6l%WvljEEtqwHGvTr$`sY9myhp|KzHv=M_~$NY~FMEO9^IK)5k8X=;){Erd*1yENEY*<1Fz z(v7;eE&JHM>ESi0Oa2U;p3q`tbP;0wgr~#AUK* z`+@ngc7m#@6uE96y+>AT+28{G$ERX_>|9iJ{sq-MX~M23)Fk{@!dD@Ys!C&NdaOIG z({BfQy+Y2LKkMp$3bJoaWzKCs-nEX|rrlp`RiPdGX)-7o6^5?s)QoK>H}kR|lQcb2 z9^i?vi4g+}s*_Wa&Vv87^!Zw-gSse4iTbI=-7mz2jNZVE;{K`!gD(``Kdb^G34v2z z`Et|5XWXdE^_kp^KWN{!Q|<9oB@~(zuXk9^RJl$^d;`ALyvn*y4Jt;+l&&jp9!KOX z$)QXaP|PyN{wzKaKS|yx+W#o{v9(XmGMh?r>aYGE&Bh5>*O#pKj&n?4;`y0aWz5PF z`_9pHCKaoPn13I?J_ckKEL>qqZVC0xdc;#x|g7rfXLqL1JqF9|I8 zC)%vk4JI-0za@gYIFyd6)kxMxpGBt}-|9kTx7K0@D)mjCT3@~8NVaR)OvjyUHyj^4 z8H(2pR~jv0loAG3!O;s3szEv37lD`Z!UxwjKwvBSqokZ{O*s~9?fw(Wx+hw$>%BZX zZxh>Y7Sk9MQdj0Vg6+l$&^;0nzIh|Lt(b8Bmy!y#ZjP@?m&>Rci|sP`S3gHx=I;G+(_N=wxw=W_S7}e}}~g%rV)&MO!vwZ@CAV;E?6! z?Jdg$^u*CTyt+E%;Ti$?@Z0Vymw;~W(*wK)fabBzJ>bcl zrA|ai;bC)sX%DPwjoz6QBwN47fgHq!R4DTufG2rwA-^4`!XKOx;h5jQbGF;T5)V2h zSo7XlCy<-=I!Z3LG=+-c@O~Y1tP3>Nl9UE?vzWTh?a_NPCeU{U_hlYdKSRcLbK_ag z*tY7;sP#3ogScOPkMV9f6~3Rd>~)CXTPR1%QHdXUVdV%ac6R5mXU` zXJ+jRF^$Gd7C73Zqn^}0Y^uX8-ZQr(bz9Mg#_nlTPkTzUE$2L|e>HaYy~m^XA&)d2 zY2FMW4*!oikk%ufh~`mKEhO#u7cR@^m<9mvi#X>>n?dgakuGtI5Pj%DDd-m>MxVeZeN7s-?IcWlfY1@{}s`9nbQ`8n2j7M!5pA6M@vv$L6 zkF6fCW_ouETXX{CTzHdPGDD8)OpE6>-TOd>-F|ERXkr1E!YwLnL6`J14LmWVEugTm zFL#bdrM)H3=`YKH?ak_>qivtQzGTjIw;Fbtsd9YhzP?iT`Ke#P3yjUZfk1~C8QPf- z-ONKMBalf8-}a1mpUFef3Eul;3zM_Pm4a5W1P{mNph^o5%Pq%#XuY~yg?!_dCjY7o z)GFabxNX+T(8FHoZ`^&BaIMb!{4{Su`GM$Mp-^@>*a`fbP5c)}aU*U`Ymc!1&}syl zwLd2`bTo@sLsFcSQ`Qh+<>*C4qXBp$ zD(61W>oglU9g`_z=7f*(1jCL2-?dOYYG~C3o;HBOWz1F`2;5K#w)%h_jD7GpG>+3E zreN%ev$gIGlc(X{?Isbf!9f6*oV*QS34Z)uV{B(Hj5NXl_@Y7=08h{}|9csGihUZ3 zJr&F6-O1=2nLapo{GLtmM*EIdYWi)!XY3r|linNafP9K8HyXYPXrdn%8R!=cdTN2% zmMOWav4A0#BKRay${du3zc7hiV^zpiv085YJf02X%0I|l6JpAXa^nH9qNypl(UTuZ z<&K``&+Aeq3YmnMyQNq7|3%3EZ#v5-T~hBP!af5O-z=xq@dj8uFPhoM02=E4HB7np z>Kx_5t2&@=ir0wip>{Z08~$h;-?81tHzEqOfaveqOm67nVYaFf{%yLoYYS)_{-paf zH>GKEAQLUIGU0JLUu1b3-KhFp`s|+S8Uk3uEyteZ!_BU!B_T>ikp?QjVCX;*SbI3$ zQPGL^>ktX7(A1+7<290TU%sGyND3PQ1at$hU(#pd{flae-yFf$7&Yu31_Z$;j^=T) zRs$v>_l77jZod&JdI&l?+q6}I4qQGMi|@|$u6U^aVZH74?N>#QDCPTat_MvQoAH8Y!hj4UlPmh0k_%BpJ0zjAd;4Y{@t8#Ism0H0jI#}HRJXWs+8 zUdQcpaH9@Vi4Ehfg!o3kt($il@Hzn0XV$Nrqb#6F6UkHp_Uk%LZ?&^c#=puO|Bk+J z>4D2T+`KGn_t-z(sKYTb{CL1r0ygnro(`wEcMdYB;a!i==d9awmY`>DuMmB~EBD

#hq#f4cxaOc$uo?3{$#KRD(bD7WHQ$`Hra)lksBC+POEzygoY zf&{G5A498uN09ja6w%`IX8-15aUTsqpl7rxMTe2e!-Qy$%WaZ=Gp#*T^n;oxeBH97 z&-H-qy&IZ=lt>y4UgT>3Zz=j}=ib%b+=~4YD$6l+uO5dLR9Xy#BW1exJ0v%5f=}*qlAX`F#63TM9SV_ zZE$Qw?Bgvoz+uD=l1g@luYff)xWrz#8CX$?b9xzhJ25oA+oKrJ^v!nQo&&G;@4jh*`bNt5ZLZZd)oYC%+(G1FFhfiY?>v; zOkJssj*1Gjt-JlDJO9JlE35=kX#dI}|NBu>mVtWltT!h9yNfbFJ1EL4t>%fYe~tXF zi~p}h;$OTgDR|{t^J6^?ppBL8G+Ly*2uZ$&_cy8jE06i>ce81MW`8bSQc@xfUtcE} zQgO?dbcYc1fX=yi=3pB>8){u+2m^m-Tef}nyAKlQV9Y?BhV%;eHf1xb7JbUT zBT$Yqd4b$~wNEv7NPc!t$=|*vKvmf<5Bm)Zpir=Ec?jJGPI)3m^4w1(6LSpe-P3J6 z%!1W<+S1WUVZnV5PWg|0u@Lo!`X<*!&|7x4_Bc?`%_ifNYcj6U-Bg7Xl;k(0-& zrx*@qGwmy4zlmX_23WK8*W};E^g#jg=5(OhG(Lt!h$&B54SqIuci&5(sjHjY z93cuk1{F`=spQCCF`4^OPcnw~bM`ktBu>o*92%ioa7zisP_6XQbxA(VzbglkBAQvj zxJ(ob6i&u>C04|RhK7DGr@SAYMpsC@`3CD3M<=J4$4bL@ux(Cr2W!7QLwC#!A(XzW zTaKysx$oLp@|WpBCg_0UU|#ZBa?}Ly5gUH^N%k@^zkA= zH`7NTA-ukGolWbQR${-;!Ks)C(x^&!?^*Mk$2@40cZf(4Ut-#094B2ERGy4!mUQYS za#t5T4Q^Myg~!ImChc#xD6Mk0Is4u^bJfj1-s}%*P$cau-nNd3mE4_Mv7=L60lzi6 z$ltX=3-Z#D}SiYI<_nj2UoQP5+=@#%IPLm8n!$oaP{ZENf>l&pgBM za+w|{)H-zV`O>+EBAQ!-TaAOC{8kfBYc z5_Z>#JkjEroG)L)2&}N40)E_-0S6N%714qn2q=-*DXidn{;Xa3`hYS^e+!=hclEa& z0d99)uAy3DZ~B(PwjfDseJovmhdi(x&ISxU;GF1`%*pI7-^6;+joNroE4;RhgfmWd7YfHyx z;AU3b6XiDS6BSv9Lyw7xjex@dZR_lO{j4dTH0XHJnIP~dZ~RYl310fkfF>FJEgzbR z*bhSd3>{}*zFhl?hix-cG-dl>*dg$E!j@+G&ioKPeSw_13>lT3_*ikc{cCj-5C*&} zo}IdYL-pr}{{Kmu>3#s;hiHfFqb8DOJDTJ*wCy_+1A^-J3HezqbGc|QseFnvUnh&&z*DCznqA^IyXGXs9{ol`8y9h3jxJ%dlb zBv?taNrWZ)@8;yMh=fQs0H&%OTsP+;_=k%Aud?!Q9w&_h4{XZ&NWO_%e{J%AyNkT} zO`tY7s}r#O|H{CB|1Mn^kg3>jwcT9*q>q35^`DWf|A@hx|A@hroBt7m|A+xl7ymbi zfrxSTnBrg5$W+(9g{A7j%DHH-QN74^qlwPKVhiy%h9<2g|h_VTG$We_U) z{ezf8AS3zUjb5DX&uHS`w00zcj}5I1*T|HMy=lA#fJ5YSG*1LYu<^2F{BbFQZgGG} zD`1I}sJZzv}IY97sp6M<(YE zC@5r$)$+=`Y?6OByGWG6p080(m(TNHg6Ym0dk+q)QkD!i)?Mb%&xp#+$)XJ2jln4C z5~vLI>U8HmF?-i5Ie|rHC&W1XJ7&OE)v^!(pWB-B0?>rDqk=R~Jip;Cg;h**&B(f7FQK66?V#Pd^w?8(>*0 zDdnkjX9Qc@ zvCUMk^LVa|{K)08kY$`i4f9?(cb93!*~r)>2z+}GZc$8BK07SM(5?jkr0Xtr)O;T# zH4^)l6Kx*Fw~~7`c)re{vw3$??$sP1N#s~JUgOH{J9qPrF=eD1aJrOwKHmR%p9@&0 z*Zml+@XfMuJ-FjJV8t{BWu?aLgJ#C#`^3zF5kkb)b!VWFVEgRCR{Mz*b`sijuLZ28 zNf}l=puWFF=AF36pb|H;)X7RMOtyHq5O0#ojdxGiq0#mFWJ&s%Rx~}N^I$~FNa$$g z)krYhBDPzBY-rT;tgE7nc_=5ImcpIK-1EiD$obv)VE0K5eA9!3$H>7DV>z zeG>8UCC2ejCCIHEq`~=EoJFL@G*7TnS6C_D=zk~~-H|K$pgZ^6F&;2?hE{{(kgEf^%U7Tp zLxEj|$Z6++M_xNdMFh8;*G3yxlqP>LEDNl)OgT?lpX_fgG9^G|5-OMV;bJ`rXBqBw zL(0p`%HnLTVh>!|+1NUVRCQ9?nH7nyQQi&|8RJ2y{k3?#0KVy{p6XtMCo--XmuzLY z$m1Au?v;lw>;&-T_*?DpwqrR(<@)deNV_bju zQov*Gc-iIMQb$2ucqe<57n-hGjr+m+ouM@HuRN&E0qWJLxlO; zsr`Z9pwA;jG5jy;@&9z2FZjJ7r75UfH(+nHrPc=MSJ^-ek65W* z$8#XlUZP~C4#m^am*O?!!QaNHSvLt1FCFU@J=4~nFB5l~rP+II%LH5RoP!dI(Ggj% zyL>zoTP>xl!f}4MYx7LvP*4ZOl^TBEiesoQMG$78?kb;+w% zoPiu8spWwt%hAHdP`jgx!wJ%o*K-e)tYXcLD*~{)o_gYNgpqI-zYFNjg%yzDL_P9c z-mKpMwWXlB5H6c7h`XX+Tv8&|;Tmk$mb}4}+kL)D3kaj!1{!R?O=UPT*voHSl`%Fh zRepKA+?8qrD&@PVuAab$PLG#0*sYtcIqNXzhjb-!C*lg7j3y(``S&BQD4@6s&T#Dp za^(6p(xxxXe-YB+pCs6+ZQQOX^6(hnjkq z*U1PlO+i~t4*WbJw=*D&Bs6@(h3f9w#YeM&&V{WkzRj-?Y5vxoa?fffWA5d+P@=U9 z@OE6P_wmr_505pzWuJ1Z@k&&&LdH~jWiD*;-Z=_NGRc~@=Sv&DIhGNat&(s4nL&r_ zK08waH@C(4QL}V1*OYXMKuew)ylEZYN)FYY;M(M+oK`ypG+CXO4gxN_348z;+WS^Y z8{=g%ARql;!%Q5o6kLV3$O)gV8~5>c*?Zx{td4%Fz0_!v3pcvGA_&(yvNFC^wgSLx zDVc}M9+P?_C(w#{#pxV~lx~q>c%Wl9YwUUv`{ACg@zlvdIKowMAm3tHa)K|gGf8mb zqwP7F(+ZU_2o)~H>Uwit8cwKmvhms=%xIhsCAxg5jTnjeh?N*$ujdobiIB*c_gu_@ zbeC_gCFRD(;cOt;qUjfTrbisQT~S*V1qC67J&)^}7)8>8?gZ6g-YvrEC;JP>YM&o6 zBEF39TlF0KgwA51*NkKUIV)5bHePO(2tPRF*DlJoX%WUk3hxKO=gYk%qD%H(jv@!r-NI%2hJk35&EA8MPX!i@b$Fks^I~l_iT(3nG&y?ezQB$*PtQ>z; zl1uzlP)DxeKm3A*Z{>S)#_){oNUim_SyqzaWm0qVyyxmC#`7&W)zWAD@I78KbcRbV zI{5YnhE899EWg}N&8=JPL%FKlazclNcM9|*rH_EdtM2qel%9G6=O;qQ!TX~;WFqe@ zZTnhthbkk0U9wM2Tsq5QC6wm>>@A%7np5};$&=#v&1~~%X!pmAHxdY#mV22iV&j5X zu3^#xX$;)KNAC|B<1cDab*?8DDfUq|^#@C{YrTu&)f;zTT+GhPbrOa%IL1BUv?P6o z35u}JsC4jV$3LOpAaKU1E@W^ei$!{@=lN{sO+~IK*{0a(p|b+ctFBW!-v-?b+N^{X@1D0DspBh4oQuIc& zCrdoL=O+l8rI1`0s~su!yOw$u+ZOkfc{8;X<XYsNOrKUUi4>!Ouqa-jM3tM{%E_xJLRoE9~?U zHt>V+AT_ntOMtvtkG*;uZo5I8Zzkc*WrsyB^c-FbI80PoM0jLUk|zMt&izH5nLt2Y zsO?>voP)1T)3#O-&&dk7r|c3DZWgK;_D%82I=9EXbuRp1we-{``pGPpm2|KNOEnvq z%#8)lj_|T+@eQos`8y`HKh|swan8e`*vWQXd7p$%{ehSKLtCPBeL>aX2^i5bZ5Zvr z6c!n~9U0Cydoi8;VE-}-juhWd^Up@GP)V`v_Ad(sW2ofSuD!m;%(z>M_4DDimv^pc zGFBQaV$Q_Ocy67Ei$3+&lBMuoc&b#OqwVb48cr5|b)9Ibip06hMF((nPb!AGS0#2a>Pj^;IkVD6C{?g*7f&}2xHacEEtC&ym zxTtl0&Y8J;A{pd1=enFwJ6`5EDQ=MfPC{R5TxHZsO^aOjgR>sAAORl-8%v-p1}>qU zSPYg;egY>o!u$9@)qcLK0uHO^p0>*+R@^chFzyk2fbe6-JDzsy2s0izIhcar`FZo3 z6*DBL_$CQijN};M0K+ZWhfTs2Ck3md|G~(}nnoHXcx4`_&Q37+fX`Y$KMQ~cI(Y5Y z_f)nl8dp14!0K{z9KwlN9Hhy-MjL_#Hk$n3b()zZBdim@Z4^mR5Uw?mMSN04UX(af zZ1cr#MoXPyqN@(ms<4A_6kL`wIU1mZYVR_pG~)hrQOOjzOQN%%dr z$6>v9nr6{U@}+LY)YlrsRooL|HvQ9k7t_Nf&xdBTjrLdBb)FC@!xjoQBb~RZEz~>0 z$$2O1?;eD@wd6K!r0i4yz%KTz1@@Mi$2V^Cw4o{A6KbP<(?$K}7qpv{|Mt(tbsy2| zSVw_ydlM}^mwBj+*F(kYw`koV9@=VZSSuC?h}n@3D%{Je7AoUvkTZRHxL&$^_Qo(` zlRNQnWBeY`U6rhL9g(tBqUl`aB42HBNQm>+*gOk`P#R_;W_R}EwBxS}7+$@d2c8y< zqV*vw$$sc@!#ZDj{r!g|s2WLZD!Vz%9N^jueh(=UuA<{qwunuag%J;=u*xKIupQS% z>IB>(K7C^#*?LV}1*TzCr?V60>AAD0YJ5I{)#<`QHwl}MuIt&Tb&IncD<0>3EWg~8 zh&d?8HFrlibeRj1?qa*wTlmR(Fn9aZN0?M7tq}Pokm43b^qRn6j`~z6iD3Vh(9(N} zu=0%rUsvX|?Cp6$XPm?4rnQW12V|lR$HsVR@jJT6A;scyo{`)VKnP5_ zNpX|7LAD{DN>yZt^T4f$oLbrV{V$FKf^%qDm;KrBlXh7zr0d?yVc^sow8I`DBBClt z=E`Rd?Gns^>d1l^NUy`>Q7}{nC0ia+3M+b>fR-Ia;YnjCSS1YG>>o?PVk)puMj;Y% z-c}<-2-680M$Wc0e9(@&$ox*K6-taZG`x8{C}Z?p>W{t|eX?M##BB*7P(nC$U-0Fz=7OXFlYL8VG4i;mC`?lP~!zuXb7l52* zqPny<0;NlZQ{6|D3iFS-HcwWD_U>L(4odWVlqD;8;mjJ1sG*MR zN3)e1{HVi@_ya-ql|L{Uiu`i<-mI>V-W#MOCCwL1C`{Y^0Kzqi_~y5TO`>JYFl#>E z4$18rwwhR9pYgt!s@d>&9wJawrexJnQriiCdbLvwciKjGRl3S_%JTUT6%w~(uT;q2 zlu-GWjSY)SYwCh%V&VN4scQzBG4Jw>yLKVBOCJhm{9ws4~?*$YtoZV>{D(; z4HERG9{g}%H(n6)nw`oQX>`_p)f^BJG+K_xaE8#R90}H}3Yee4tftDRu*YY!eD~Z0 zT`nRcf2l(#LmkYxO+g*uJZ>QFSCf0w+{2ahSP~Y1!D64*8@0jbt#)LXy*`c8q1V>w#cMnQJI-Dmz4NKk{1ANpAl;Y=O}XO8M# z>y*gqweX1psTJ)j%O%}>9ZpJw(|YdbW3*v_xNriP>r;j6@3?P-x25iEy%8NZ#}vFc zZ>11#Y}$_LJ&Hz%1iuO~1c}X~9ozr&^gyT>((@c*C)-&cUi&&HQ-fD*9Gg+n1jA)Dr7# zjUivFomwIENYxc=8pC_*Bo;h%j)v_H;b*UX(Q@WK&AcS+aEDaJ#yQS-+FTuKT>Cmc z42tVcWn!3#FZQ~(?iq<7Q=8`9&Q3d6#33+UmmQ<}wd1p9@?yyFR3Nd_dZ@dsSMj#YS;#fjk& zJ%OpRT5lQma{c0sdC!?@ZjP@AKJ?u1Dzi4%)Pp96C?-vn>Mu>NRUN9@ac`d=n`pIY zS8Ue$$QPk~3lneTPXxauuZLwdJQiyti?mt9z=`i@j4-aVhL z(cgb*9sq>&wci*{rUHQ zZW4|b+GYrUQqHbyPasB%HpeC9nN|IMz>j|8aARgU!iWc9>aVG)pvtand#I+F(LoYd zLBv>^3wQs1w)6>f8;a(cB#~>tPs&pS@yf>3d{zdUhhx7)KkR*hsF^#l2&E6p5!_iA zSbEP&Ef%Bbs53(FBwv}ev>lpAbndn3`o3}258XRVLxHn$-HXL^J%NAgX~(vF27<%` z_Yjc0RGz7(GOIeVS=II~(55nT>w4BU0c6HW+!XqTu`*3u_O&_mt` zkvIpaXC#uL`lXA$1Kk4bu0?!;!a5$V<$VXxkb1Z5sxtc18&|&pSG}AN;x`d$~xfaG^ zV+s}6`8*|eU8)}Vz(km^S^Vo8=E?;CE#7*uM{4a&Q>13OCFga_g$Ve{8mGmntM$ZF zDSt-7`(co5HAs~9b?tmQ`=!qFlSz?pp~PL8v4dqSG7}{x1@R|?>`T$S1!K%M?j!my zZv-g0T};{1>`%j$(CgiH*HxF}EDAOarsMc~n4!NIZVS+sGIw2^b`*4`@o$!Em%%iv zTCO`c$EtFw5Z%)O07&J#uU%3XLK#*zG-$U{sy2o-ABdJU;*wX^Wv^-vS+J*&sqUzt z%54Xrk=mr^4W7{R%mY#ALk_lAOYgXX^1NP2L_8_87!HCrtLo*?7(2H?6&sN$xJt#yj^YH4@3*&C~2~Hgj-ZPM&)5Q8>?M*Qn!UKGpNw|MK=Z zuyJp)K+)EsadZmiFwPEAK8No$Th|M<8FXPuBpgGzDhGkW7Y{Z`QTP{RYJ1>hpm<;o zIeU_wQ#tL5yp2+PKBNcgC3CSoQoYanEH;z<-&sCf&`bS#tAkR7fA}n-Z2AbQ>+;k{ zsm6tMVlh*_ATBF4dC9rH*uFmp%c5}-Zg<>)?pJ;OwV|QTv;OGS@$S+=V1(oBd_?SV zuIX4!ytmg`@N~mz#ocQXN<-UQ(`Qr84SFR`8+_Hed^ZH806NxC^$k>L zDBlA$j*s!2`%lgl|7{ch;`13}eWrfqmZx5&>*g`B(^JSiP~L};(Sx~~BNVEV7UvW1 zeMBI1&IB+!_;Q2Bj+H#~GMk+sue`9p2Z+ z-4fC!KWipTtfil^VRC6b94eFl{92zs&!Wx>5#yvbvLh(A$(=JwHeH%WFN26LGc;R3 z{{_!pxYF@aW`X9T%<0A39y(fmu&~SIt=T}1zaRyXt}_3$`C;o`{0GI>?{2px!yAa( z)Psy}|LwH--=*S@CLpq5jdSKc(Y}(rGr$Zm?wJ-`$}LchYuRGic~5YcoPC!nBcz}) z=J5cp@#&}nRCUYCX&2a?PB~#KE=~*OiYwo2RA6ic^*d0aNK|Jxi!Lm0weM_cmR_2u zAbl-o$f6Tn=elNXl@mhWx;8UW;pVO?%gQl;MNhnw(R$LM<=q8&^l2OBn~6_O{K|S| zyo;IJ<0~DDOJa+bBGQ0gr&fc|KDmPYIzxr!l)2mfniZ@T`c7eSWX2mcr6QIhG=JSY zvBIJH@tFQo?}u&oB)6^;*wpRZ*z5h?=d}7*PQVN*?KY@85b3_TA6`_A-5gFL3d0f9h@>%cyR?R3kKU82e)2QP`hn4ve@SPKjuItkd$+QsVWG+uA#b|B1w&noP z^XGhPV{g`<@C$h^y;}mf(ogbh_4J;y+^&7K%)CV^vlpB1ZcFA3|9a~qy?WCY^p(X> zu|Mw&7M)2?%G5^&&2Qa6sUf)|zxs4A=P&!u(jK=$TMBAVUS_oSzp%2~w0CDAxjR%G zl_j7LpwrEm4`KdnnNnjR#WUXZ(*m3R%fMdxcLBJG*g=_=b=}PN>t3d3F!#U&RN$l) z{YMrXoO8EcMwA2G)ny?rt^*oC+-UI}Wv_(P+^}99%fh8w=t^wWVaHTgrPJ-E+fN>S zw9LTo_=NfP%-*4p&fBX0XwcDNwTXj=bDli=@araL~$i|REE6qU5`SIN9QOr3^ z&iaZYPxh5=cWFdSmyG%r-}H~`ma*=H(-iI=IrUQ9I^M~`>o681_AF~;_HJfLqagYI0DEfneZ=1ZC- zGqnGSP~op%$yOB^V#d!o$rc#~=lujy-eL*C$ooUN@d~)YE2?ijc2`ZpbmnBD&053A z@9Yrn=8W@NuGB&=()hb*JA}%fj%d0~lh;*}*-TVLg9NPSpz9ny;Z5w#Sb7X$Ay;4B z=!yv$?gT${-1`vcsY1DBVwf%qK$1B^Wr4eBbp~J zYTj&9=p1M;Wu1}o8f&=_@lO}Kva^hiv(wznHXq24xW(=44pAbE@i6MPI3%n?BNIy; zW@@5s<=4qw>YF{0WpT-SU87^3GkMaqUum(gb|^ee7Gz=!m|(hk-M6?fBdUgL)Rnm7 z7MvlEyc{O)d3zR-mI08koP;~g6q37KKQz;-h(6bTabnWU)L1UsdeeAkim-b)@E0vp z-t^`dNxjp#2DydhLRz;i`R)o*>?>ZkzOI_cZDnu+XT^eGMb5H1-vl?R1*=@vZQZsy z$UUOGMyNB*f^D>$55ef(ZGC`tTopcr&gknoxqTHmh0l2_Wfb)k@!~Sd{LxMIm!oXj zM~OGW5+(8!k;OW?1xMd%Se;YyX$n#qVB=MHc~W{l29X({q0!1y?i?J*&B_)%{#o;> zq$KjG7xWs(k8@g#zHBJbO)w)TW@j^28zox=hYPk>wY@51Y?)J^Bb~2%QjqbFUt)<< z4NL_!Md1p&)Wg@>ySaW@f>tBJd33bB{0t?2BX}Nb!>s-U8LYWO2sDzpv zW?$RQk@k?PfEjN$crBFss+sES0st3B=!6dWR;S9@YuMbc+0 zZjnQCx{n{BepOp+r|i|(h1x`fje7zTVN-vit?Y9+DII^hvvBK+U3rp2cgFQ<$Q&vF z0b|XmHJ>A9H@-FY7H$586(*`0Dw4aYKZWQM;M3{r4kbG%Y+KUCGGC*x?2EzZj^OeT z*Fz-H*yl^LAZiwWP_+?&6O^8!xspWEjLqG$^} zTX8@sjqDyk2^%X2>4{~rdB($9SNrQZ@)GrprT)}_!E31_vDFZ`yXJ40>kep)QeYKK z(_BcmAzQ)+j4Gv)9}amRPw=I9A~(;*Kr>923aTrLwnE|h57S1m3S3U+s;@Z1)e8nJ z>%^+IP9v}>CqYonR;4X5oF^fcVhulV2r)yj3l(o`C&R^k(QFNTs7>^ar)5Z~jiXV~ z+Iqb4P0(SOAC+QrT`OeRex;0(W%3(1! zyt!+1fFSlS&nQ!PMb667zP|1HRf?Rii0qHzPNA zsG6^{5$F#B#~dxqO?>B_@jdL(^eU3f&x>P~Oyvo~Ga$UOhc6@cd>PKMt#hjibMhoC z++eF(Odd$Vj0PB5UzO^I34*l|23{GT#iN`6T9f2#qxh{l&8@AM8ycKvbK5>nR+`5? zbD-df7{7MA_^hz0NFb`IqJsyGR*JG%=%Q^Lzx>@#CD;BwGm&&Y@hsiDD925QQuBOa zA0K??*&LRObqMhc++MYF!{%g1f?v*O3M8<;t+$3QV11VJhSqNbNXgT=?JE#}=X}Gq z57v>+*ZT8*@tNarH|Krwlgb!)t&;E1E<3Nb7_|&>^F83j;weS87kC!RD$ERzB6RS7L%Uph++eAU4c z;iRk-jK{r`f(u^UfC5*gg~Iq3pHlkQ;u?Za!m~!IiKrguY<4XPYF{~+&AWPGTlPzgNb5{>}GH$S49diQ9N!9_$g5`(-66H zv-bQPWrN~#Sc&9y*-mQ%&9@0c);KsmQI9XxqWw{?eiT)+`XhkHL~E~l&OR}0Z`~1- zf9L`rM=dWS8W-`egdsTlxGaOUjuTuZMhLDFixK9i)g);!#pzJ_M&G4eUUD25Co`;2$kAOP$M^!LI}eaSZ~ICDck#>7`Eg9*#@mCr<$C z+`aN#s;VK9N-Uk3K3^? z8wr>czt$;QX6DhUqgCnd>))uQhvY@(ITqeVGkZQ;T53m0ivII4`ri%FGtArf!a4LY z{}sA@nuwkS|-l-FU-#02Q89thw@31vFtZj|5@JOX5c@|`)7*h|C4f_4dt+> z`I{LQNxu%*g-oVh=EI&LBZiBfzq<c?JeNOAA{S<_PW#7K> zZL(kP7QB;aK=PL?PqxTxsd6FiI@+a+(11Ze$>@PrG)C|j9EM2DYhRuuValRaziDW@ zdvuvE1vDC+pv=h%lYma|GA}?d^Dy7NCq-~+In|pBz=Lv*0g$y}uF@31fg1-P>sl9j zQfHE!G}cZh{SLP-XGJs&`c(k%e8^p}kiR3x3T zEU5r+HO6eTQ2+b>Y*14@)?LOYAR$)73!L25>DuaSwIYMUcVy=P3l%Vz+jpY2>TRyJ z!I}Zwu~`TqgR%MG7k%>)*v2CXPt~JCK$XP-(p&JcDn|>+Q~-3B2$+8wO3jDbKTE|j z8>WbtdRn;y#NDL^pR1tm^<*i80T#4EWYKhqjRzp~Oie6OTO%kaa&wbmZwly-n6)aO zEV!^6cbdNZNT&}RZ#}z(?{~KNyign5)m`9(IBxO|k@wo+lc;}ZW~`3+)TG1ooj*P7 z{HqUw03mc2I)#TGK)=h1lutuz#@sKno*J)fEeVO@vjDv2#sH=LT5l8(QatyxOEV}z`f`$&lWg4fp=F^nen7cE2ZX;>na$PMD@mKPr9_rh8;4D0 z2t7=?5{#PR05V?YI~?*|t-}+wQ9<>NoY#)VP>UK94@5NjRlZ}VX1i(x>Wu5;-QEI} z3SA3rQC%SR(yXfAO6Zg#>|NK#IR#`ZO32fVU;C+-b}J@FE`5;&=KiDd?6uv_c=oeR z45r+WCb589tRQ|+dNOvdMhM5^PrE92t~WqB0|+fa&c=pg%-k zHTo@(H7f_t$8>op$&sj>owfVQJiIkB$$Ue&Sg9b`$wi}C`C6aEKt)d%m4i@ts(Sq$ z0BYr`#>{6Q_Pk#gW;#D!0B{k-6A2v?=!u2*d&Ruec)+0!YSa-UK9qpmny~?KJ&*|+ zzkT3`D`XmgOJOaBY>dwSunRaf&m32p(Yq2j*)?K)2LJ|SF_o!*K4HKxI>CJo)7x6l zivt(4ZXXxj2q?P>0#TgT0;4?X)o%ZV$(FPQKKKioWDc_rfJDm%uk535c#hJdsl-V#U^8=cmV)Xr87cp0f>CNCro$ey#JlUj6L{3$pFh&?lvaiM1hY*rbN%k#e8O+%C8GEuP##jg0jeQ%8 z<^H;U*LB~|>$#ujb)M(-U%WKSaU9>z`d&gWy2*oE?z0Yh*7Bx{n+L!Eg^Ty{qF%-X z)cIMS4_IpV89JTKZoh+>N%roff0DEIGKx!m_|F#nxDPW?P%k%j4tSgVlyY@Ek`iV2 z7&|_67^Nxqq@Z>MaJqGF-pOUtSnO?8;qT#C1ycYDT{ypH%#R2gqC0))hbqfPU4z;~kR+1}E61_jFOTHb7YUL~}F7u)OoZ64arLGJ21p>UA(`H{g2iXcC{{Ey3de zLeeA3*9;*$D6O^_BvG{&s)q75wG19-QvB;R?EeX> zZ+StB$*Z~%3aJk&t6IhlfdO!bhThMmotYE~aF|!CvJGZClcXP@GIsB3rX3Fof=b@& z*KOGZBaf2i1;wQJ8s~9EKx&B2y5BptW@F)Tc(5;i8$F*7zI16?pHaoDxJm>mzS?tr zHzXTU4$(!~c!4Ck=h=MHF<(!la*2zQJ-ccxxfO zB>hVn(6`SR1Mn|M1{rE`!={biLXUHWC-Pv8+b0S9rxcaA0j z=%G7w-n+4z4y6nLeOeaNFMb}X3#+I2FX?H7);c?z6uxM=$ZB!<9~^vzu}jQvlWnD& z=;u8aeT%O>_c&h&TsXyRuME>r@Ad-u=NG!dc(I`C&?{nkl(9g1`%NtglV}a-z7AfR zkrkny*0nL$!$PeF!x%f5K+f}iFsO93F=WGUs<*f=9x5@zU%X=$bOeC^KQvq=XQ77B zUm|%DK?o=tY<$kN$5Sf^o|FK>d;c>?BkE6a{KAiL7UsM9C^^9DECwk*=TrxXe~VST zMpY`6CAN4G^X7;WsO6&CGDpxC=+rayUXntkZhA zZp?0JxFQ{~nrXvvNefgj7;LcH;@}ib9;iuX$&_6Z#sLraQ#4grH{#RH`oN;rM=7@H z@kwWKEU#NKROHj9wj`inn59$cIItQ2Jneh<7Cg9dGZ4}0}#h~vTh}9Kra_om$pB#W{b_WQ&`*=H( za~yK4PS^UDecqkRdJd`)`GsXHRjJauO9jV`VQ^TQ6Py7EXT-H6N)}a&1<<`mb@Q55 z1I;j&@-5i;%UWV~%uIcoz=b%!6brb{Rk}bo!a>XwE`hSm+iGOcj6HKjBo^op553$6 zpdbUkes$O}m&2o*0!)0%P&aEx!#@@CUQM+g<@`{C5SNZ* z*(yok*5&Za#Q!B51w1M*x&k8}2tr>+K>f?X4YmZfuv@I12@+kw`+>(_z+Hgd`)1cs z)PIjs@A=zskblLYe^+y6>tCT1pT!b_y%x-W0FI;IU{Ot5i$fCj zsz2@k4dEB@^%)T7M|R!3!>1@VED+zmtfx2bNh*baXYBmN%X(rXfUvEc_aXi(i(Dw5 zVZrmD8U&PrU8*GQz3fN;p15qV5)0~Je7u;Q<%x!?di_R>@#!TX9Q35H@-5`$uT;z= z+bvbEYfSLM`4lL7>s<*)^@{+;WjUZO{`{F~+apDTZN|`%Qzgqw{pgD6_*ly9uCh1G zV>i*Ivmn|FdI6R;X$M>SwrqvZfTcp-;NFp&YMN;fs>HYC;w_cf z1jkCAa_djQP{ti#PF>vgqCK|T$a}FQ)R;)!$f&1hVVc*x7-JZ^g&40765o?`9#bGq zOjo!=AuzGICsAAjw%rC@s)E=T%O*kXKmqy>3>c{VRt=|W5d8_T45Z4|_MZ!FcdUSVS zoLW#rJwC0}*GLoXxIJH+6OA%W_64*>nN;R6!VXmU7<#9$2F6t9*a&@@jlwU`)_x_Ec5-#l^vk>dn- z4ffy18t;(7`gCVRSINlCq|8LpWxAN)PlWgE`yTF-6ZFX;;5b^ibL61%b98gB;o{N! zPy;vQaHXg6U*_ZAUu$Mgs_XIbh796c2&Vif55WKUN-yvi+evtercUuW*?X)UvR5EY-OOFwiO2`dViFHd8a zL8VoIxvuo?GSmv33a6j*K%m2Fx;<_JMvGSq;tIsu$RBJ^eAt0d_hsvFzmOfj0lA6N ze);9jvxP#GNR{iHc1?of7x}|Im&fJ3DRERMP`7hXxpng`gT@Aqki&VOoEAgQ9#h77 zpilou)(3hd@uzVP!dkF{XO&&>?9E@%Tb-^|v ztbSYI9_E*KJ}?<4TW-vDAIPRDxBxY6dwsp*@I1Rw9MZD(_3;b(f+(NGDjfy0`!<(R2k|t&58EGX3)G1QMAZEwpS-QK&mjO&>TKhng^xi zPL!+3oX6qz1ic}r!Iu=|R@`P4@aPV&|NYnJYwRJNPvxG=-Ia){PsIZ#M`a!b0h^DDnR?yxRR zpImag&|5ORHJaCJbROQPo2x~<#ACEAf4h3gr z5kH2CZ5S?I?khxzdxO1dvq6Wj<{<_uou~c%-C4gXq0;(PNR?ieRvqqA{#p7O$g}$< zNqj->uv~q!2DHFN_I#zlY@UlTu;hrH`EK3)Rd{CV=f{Q=pjGO2C~pKwIb|3l0Dq6h zthfOv;E-u`{h}>Im|ILJmz3sBUV4Pj8xhV%6&lw8BA3_=Rp>Uf?K4l5ykX6PW~@*h zJ)Qf~@W2wLs_$s!yAC3EJgE8mtjpQeoCune$Ie7Mxm^$pX)-ua5C{@QT?)d z-bAPL^7|~UTo)hU88qYx>Ekon14Z)=V`hnE&=)`yB}62Pp>`MI6&rvoRb}^GMR5jh z4N-vN(!d`MxKcW$|El2m#_!*td`{UX&|Xp5qml2!=TD2NWOp6H2H6eCl8d9h_k9Sr zPf2IbDa{UennUohlaO@SRbydi@5e-DyF6X|ObkEV+pT`9(7QfndTHLRZpp{kzXl+I z;{p4pYT$5(Y^i83^^FO>=V$*!YKYH%XT_IB>uat5uIhul(8Zy++ZI zbjV0$hHaNs;>Y6VKfo5lp)0EIdxmXCJX0ReNuv^5X!=ahAjpr3R1E^YFyamdW$%A#Si^5T3Kur%H|FS4J5Dd`0&nt7Y~g z&vm_`l@YRiEMle;o&jKX8LBf88CH=?mtL=xXuG9Wx-S9p<^o1{d=a$M2>`rXaKDS+ zIAUTKJ<{IW%oPuo&^!AFZ6_Rtibdo-noiNP@8O4CC(d5D#5?G=c<5M2JzYt3#=?Z9 z-7;=IAJ_3v+(zcni{ExCF>BzG7uo~Z?T)$=H0m*Y(n`0K_K%zVS3>mPyrzl; zW11oio+y|KYR>S}oxH!8b1&swTLjLErntQ|RX^l%^?HQOU~zUH>%8X1DHTtX{nRlh zAAUXO;(XXS`OBB(e72o^awIc60mdUtp9t*;kNp_Tb^Dhmn4kcDCyJWye2z+|fDCFS zzq2M_qAueD>1bRa4^42yqoU6y@h+e;gfIb_t0mw(%i;!}&QN2%gJEbhg@Yl;5OI#Y zI5ZRDmW~#^z<^*QO$Bt1haFj^9NdJLp~0}l-SJm%woWt0IgTu2Y~_)$Zte_Fp5Ty^ zp59Jy!p*^C^fsMGLb?Y*-@MlX;mT=ghTixRB`1c`1Dzj*osLxZ^d=r76+e+GW7Qnk4#?WZt5iDkk}sRzCL)V$a< zWtot0v1tOwCPk$F6XywPql!(zJmwPRLtiZR`LpGC3cJUz&J?+Ir6>ta_8uHOA!|R* z!=5ULj!`sRF!D}q=Cy@4EjsOVf;jub2ziGeYIi*pYRrf9uRfDQpfURZit(UQ+~)Vq7czry10uTPQJCbc z3@!V7o|Slczk{)FqO!Rk>;TBEW2`RFp=l7*EW6Mk#&RfP##EY=Thx9l#^^aW8{ml~ zyF=gO5vh7@lo#f?yd|X%L=I__qfBFE2Int%$vKa`PGTfo4J%Dd$XFo(Yj-oC7sMyQ zGfx47c(Y}mj?d;orxyce*WO_U3(*K_5B#u*+f9EzBMWOpkLNGRw_6Y*$r12Y{T48? zvscd?r?63-xKCcEN=e2e{sc^;YFSO_^p=T*7OFn*xRAeGh3#ch>#mX?7H=54rQ1Y3 zvfM3F4%4zpxY7K6eT^zXvvGRhM-5f0#Kx>!R7>35kfcJNotgfd+NPs|l#2uFH9hM1 zY^P;>gZvo4#{f*x(HJiC>oLaQDCa&djl|2^vii_~Dg=qCFxaI_mXP(zF8{iX(C1kx zK2y+EgGe@fbN#of;7-F;triNfgn6T;8h!(!gWDRk^b-J&IiogX}oUL13CU2v2h zhrTIsg7EV_&co1$GJfszVRqC|u^_R6A-;Z{A%&N(EE28a;w*jks;CfJrI6~kmw2OX zC6kfPEi=N#f-}F2=@J0enm*R=qW$Xur86(x>@s%wju(LvjNd$3*gWz}M*~j>9vJ$4 z%M5be$!8JUw489lSgaSYgAxF`xgSlEuoum#b_&&Vq5kT-J+6Gja9R}IkHS+7?UKT5 zDHA+mTEjUnhG$BXE!%HI4njMC6Fz zV21Hyj@_l}npq&V4;eUatUd5{TNaKhdLd3-ZKQl+c@MB|gosTb20L}(D-ojuizz@R zz-^$7#c0|>_j=e~cIr^Q(sEqQ;m*+K^>^2q_zW7+hdP3~SY)$Xkwu%HpEpqNie8-I z`h4f<@aHYuGpXuZR)zi9i(MV-6JuE<>uw@n@*F^K$H z*C3=0HW6L;WmG@)dW(DH^=89|UI6U~>}bv2yq!1JPMWdW%5|sEqS}d7FuOZ;ab>*0 za>vRf7D1G-#%w|m8S;0rJb(!k6UyhH;z58GQyK@p@W5Mh&Y2AFEm-8^cD;gj0LQlj zJyNCiY&mgOuE*=V$?CX5K31)zgp?I zxjx5=sdtrImcy7Ve>xdCk{9THQaehab?=+Hy0R-P`4)z(I_pksXcMD8+_?5LNxCfI z#l{CZdP);yztpyox8lQ4S{8y=iVLD9F0gMfZg6BpD9l;(eY!>=wa6->d&CtxH_JOJ3@SdHK=pg z6^Des1}r9`sE**F8yv1yjY~B$6h>)vyzJeS7|G|j1R=m?Ss43n?9oOYT(eOhfm&j* z&%tV;jnQzA`%@F3qTWrAu=_(3%xHj}`@wR(z6MgWxg_z6sp}PGl9&nLm<{&kkL2t{ zETo#%&w0Y+ric`FAv)|eqhfKdg&P?_i=+cVFk*CY>$O4s7%uUqpFoL_XMJ5B>U-3G zYkt2aXwXxAf1eUBo@e-|9p~5oz-{m>0d5}oYyz@gDGu`cTQ&a#_uS=7@1W5D^|amu zr@5(+mcVsO-1aBf?flp{QQyRt#GUG~JL?th@D^xZe2o9E)ko`ot5XBw4_5jcS2LW` z`qNAt?0~cj=aBP_NKIs`tMzE#Mg@5Cl7fj;`#uAn200*G8 zvyhc6?A7KT1KzN;oB-{dfvyn63SNEK5F@ zw_4!EtmdA(0{y2!BDbivYR^k~^014W`=w9NO(SYcde4+{h!X_Kb*uOaZZBIXSpY7T zZd-m-UXP4;FJlR=bHAHDZ^+SRYWDG*X34z_2P^%okngRHkTW!f`35hYy;RQfl}o!X z8L_BAzfW>L2#y7!oZcyeJZ6ETkRJjD#Xt2n@#1{mDZSd)q_X+JzR4}A(#^F)23YZd zqC&j1{z$UfR&ICpMipBb(GG(Z|HI$~k?>ooquG+8v{O!|2jIa_i!Apsw&y@G*R*J< zBAg}CS4D-~vAX3#VL2ccm@BUz&GA|w5*T<@-z*UZwHo0Y^n=3ndVIm^FVF4Xwr0pV zwstV|S-d&oR2d->!(ppaU(NmS-e zE-I4u+axNCS1V2{dVFQ#C!i9W%=HDC;JK|j%m2VpLO#{;5K9}w!BQ$0$U%iu5ndc& zFERT2?u&I4q_UY$U`ny$h-U;Sg1=-$+&;I&az1!$vU7$_T;^u3K_lgh9W;7>VO?iVy_#X z{i@`kZ_g^=HrR-Mo$EL8u=!FGd4Wyav>uoXTIkY3^HRwjHFdT*L)hW_LU(eVr1YBf zp%Q%}QdSUSBC)I{Ze#Xdl(6}aEN@pN!I9H!{PLwV~h zqCI}!_OZS`#bca0+MZFD`J1mzAD>%|BWd8q3s0yFc6yg(-=?xp_o-(`e95%3U`py% z@7*p>d2oOflo5`#sQK+^R>?mAq{6}{Qsl6OzneeH?=kg%7;Tq;*ieKi--2!0*`2;`VGQ zll!a7Sj?7|K9o>*w@Zd$Q|r>Uu7|h*-@<-9TBR7$gyK1d8e3YmhvX1nHnUj{m%k*! zyzTSHd-MtUz${#1ly$SmHgupFYzpInL3;v+?{=LYBw5Hzv9}3W*S=^d8NYkP;&vHD zlH;DHKN>()L5Ld3GLqE^Y%s z=bO>w)sku=temurM)%r#Vj`wZe`R}NJzKX0;9;k>L z(>Svr!`J+Lx&)MzgU3&gK0cQ{NII&P@-y}$m-kpgEJAL%-0m&BnazFFF1d9-mIqd< zc5*bcE%Ja33*>o>Hz#r&qKEFxSP9Qz&zq@;CanXC^3)I^_i2Yoo;?u8bd$f7gZhzI z-I}+wyyWLO5IK{fUUpoGx@>us_g7Vu4H#bYV5jGIJ`LrR0Xq1%V8`YR^wHUzeMeR_j71ud-nwerTF+1=KUmzgRK;A zb6ujOX|aBsEnY#vJxjT;;AMaJwNCkRHbkN zX$m1(N0RySM?jbrA>k;*&!F%&@!1a!pY9v_lU#(#%4?kw(9NX^)0CI)`1zQ;2E=zd z8tCQ51TjeM56{#I`1FZKA*>YRokj^A`q76zfFFs3_SNq%B3KF_%F9cc~bV)HnZ*d*8(UE>pqa=XWx?tBMN^ORzQbk&Ei`b)D-HFk-|ZV zAUhUO>oFu1-v)!oeERw`jInGFup9TljE+?XHqVl zc00>(1}R0*M2@8|+^wrh4wzFGaN6YE7({&6Ogd^FPKBj+xRb`&{t|L=?Z@`>q03O6+CJzqemg^5zTYo2CK zBV<@t9-bgRAb`4vTCvQ}emmIMqO)+5`ZLEm6&ff2T7JH5JS-z>pHlvfgdN6MAfVGl=oM`VDG-AFNSudz3R*-wJFrr0Oo<2y5U z{1ILs{uq{2%ppN^PO|wuYi+4)rm)j!3jjxN+4qL9MoSfT`LZ+y8tC=Z`vIAN7x z0`i^bj!znFQ?#5z30vJgNjqV$^^}fxgKao6`p~zkU>dF~y}^=>Si)En`i$jKYRK&s zGlXivmy19*@2;k1((_B&0K1jd@w!~g3g|9vpMq%QymIyMJ`B9&d#}2P%u0*-!A6uH z+y?q>9eZWW+k(_SP%-im-(P@092>=89kI7;9hFG$5$YT{u!%=!_u;;6L(MB0ulI5e zPq{#kmnrVNDpge(EkYeL!Wj*G=pa3~QW;`I;aN$$Kpg;Oe5SsG8uB6nh@hBy^e2w1 zZZ)SI45z3=%*95cfW{=GMR3lbJA~iJa0W^7%$Jp3^gOKIob@fT8AXS4FzLx#IPH3J zK8F^=4EGK$rsvn#K^UsGfVAd?=48O^vYh=;wLedG`mz2V0iH1b-s>OAH_U7YhII2e zc5meYL|#TYIjr*tb(C|KG|plVf$&}%NP95L%?&8x`F)uouK7GdM<~s)Zy&}1N+;w! zjo-^Noo|`+&veqC0);OBRM9%!RUTXF;g*r*XhPw&uS#NMl^)=(-jGC4l z-YJsH=zDQ#s3x7Iq>;4_b(5~Ao_cehvtx0Fr-YGT-kgMKeiGKX+dJdYX1Zs9(lY-R z^V1;XDe+)YXCkHfx=1*c#idDA8$#V#SSBCBnK)G?bzOpP#=LAT> zjO}_#__@0##Zht_RNam#jJMq!hSM3qKefG3TQOE+jPu^rbd1&BLI{eZ zKdrbMTbDVf1-O-pJkiNy-4Awz68dO3>kR>QcyT{_pZ-wcNj_#N&^pg8uRMRRY!rPL zose9Frtt}%}^2kMf zv|ip--(TAIm7sOdtfEe(5_^jY6-K^z(-KF&TRcJ^K)4?ce`PVzHBfAW{|z9{?d{Aa z=V;H6hVKnct0%DbAim#uXUA*nUh2Mh`mg1OYgYPt#I7+uRHjo^+){c0K+G=x0gmJMSgGVw7$s;+o!6<{mulE&m-=m6y-Dw z1~ydWj*-u^r@9XSelFW)<-m)H(nj6j+=P9H%|HUIcVVGn|&oRVvp<4NQZF{fbo<+`9N=?(D^oJK7*jm0$YJPaEer zKErZP-@Bw|G!H$cC+)EU^N!5AmS%Trf;J7M8LA*$0yh?ZKq&MMYDi{Irz50T61vh90P>!75zrH<*`tb76Z*;9P2lB-^ z&zcV?JOC**`8`psdc``hdj1y;;}LVI4`bp4_Ea8f31biFn;VCR?}qxy2A#AG)CJxjrc}%%u?Wjw7J47+d4}Pt zc$O2Fy65o1a7CM*`PoydC4+Z0{OX|F4^8|*|06D=hQh|Tlu>Q$lV;O zLiEs`xVsb%jmr|gU%OWuZLN~yHmuE8;>x+CuRfjhDR6;;wo5DHu?aaUPDj)=4_$WAL@UxsY&#tb?5G`HiauaGdv)o6o=rRxhpf3;uo3 z?eZr3sRAc}7s$9LXDgosCazumTfFJxGfCNbrBT6z760?3&Zi3J|U4y z3DYGK(}OBm1-opRxo`oeF^cazlmM+2?ME4T__N_(qD?rMf1caw&JT!?I>i=-Qc|`{ zxntT`WJ9G1Vz3WNsB!Ib(a#aBZj^()^S*@$$_8ybfp_CW~3w+xrpU^2VbzjJ1vHn!1R>vFHmc}3nxmaWpwx~ds}vPB((qt z^R-%+FHt>Rvg8Vft(n6E!M`v@*1vL`fs1{A=0>2vVdcuFT35cT-Xq%N)G(dEIT-%7 z6S|P=cCl%_IfEHG+telD*ivKEp#az8l9jF=jgLzS?sVcdB+0cE$cYR(L97wL&0a00 zw8xkrZ_vx_WeI|3PkW&u*?VEBXl>rlBrx3qGe&~kTQK4K5tu7p zcPHNc1;2sj+-R0IFogDS1*Lt7wi+sNU~r8UveEuPZ)IC`uwo?El5oBfxRyD-O(M(f zZB8a5PS3|~aFV{28e~OFRUR!JNldKDchX9m< zT7n$>z_ZWuRc{J?3|r~)V&k*vcRPb5r|*mxLuQPiIORzGrju?ATpFKAPV<6QOVve} zE={&sCLm}}zRihmDC(amNy?_!3@_OV1SvH!B}Fh&#v{e0skpVL zOGddDG{yQyW^epB1dCT@`gF5G_-%mVGku z7Qn+ePdJt@*-B1r6b-Ll_BebX;=bfFkiP8ryI>+i#B$-@6>eW7JG=^5XC-gF` z^0kz$%tN*$4&)wfnFfRE(CS)ez4T%^R+{V3b6V29Gie<|H|4ai@rgL(z}FVT@9y}u zt7b(z|Dt(ephY^6I-GLE!mSW^;Y5^X{Q}9lJSI zUwVDHh7a0kn{O>96i6yyigwxO-w}K%OE<{F%*+G0!#dNlp(JUDjwM=NQT{c!${=sg*MEmz##Ev{BO_5xt8t80C^xW1=9>L_S5o?G3Vqb#Oc?O60 z*X<8cTo2Wm54H$VQ*%}+s)XX!$_9FA-RknChB}T=B|2_)zeJ-IjEkP6+;H8=daD%o z`{gKYV(~TRWIuGjTTf78B5TEdU&2(qs(^dv7p_w5c!uF`L^KIgp`zxIcvWpoYGp}! z3K*p#)am7S%OQ_BG#95`c_L-*fu!-CPL>aG8X9|kv(gUzS<+=jDlIW|3Ju5$Lyi?K zv~z>mJcAAN!#PV;R?=x3E`rrr_5I8b?lu;IGV=5BP~qsS1jV3L+5KL>d-uOObau8i zb9f5%uf-k8sKb+Dex?$L&ezva{Rv~v7SDAV7+72;UI@eUUB+_nYG8VMGt})oC;cgt zqKv}v_auCbgQbTmHh@9anQeK;E^zk6@YL6FVkzPI{}LBc-i9SmTZhQp3?-6oIU zM$lk*bKC4n^YaT_^!@fiGl$pIW*3?|eX(wiONBiVYX>1OSBo&1u--QZWy7vmBd;d> z($tx%ECCl?dTn5pb@LnHVz=(e;&B%d%`%g3Gnsc*v6D#iK# zYb>hpZ*RH(^9Ow_J@ORu3nxUzMjCH3Au(~X z0_n#Ck|~48l01!G_9aS9A6vvc7pOSZ4H&~<49YEQyvH=4dzO> z5j9{(yxMBy9mDvj^!S?s--)TvU{&xxVI2;B}mzc4V|fXZ35>K{gDZp>ZaTM4P@!# zSs1#^s?7m>!BSs|UHRZt#~k6|%uFKU(V)v#W`vxw z=$!MmXwDTUb@jzi>v2y`O+9(q{l|3nW5^-TPXbp|R6>zK1MCMkqJD4%(uL?x3waZk z<4TX|5KAadRca__OpyH`{LR?KC;|* z`opNQ>uM5Tu9;}$iq>q7xJIeJP&j0n`W@C&bMiT10P8-=sm%WIK&Hs5O|?rtY3qAL zZ;4j00`mI#Zq1vG^F0Cjbbl|=9X%BKY*bwS`Io~V!SC?%BOJfx8I~C*&0Q{HqP=5+ z8%E9}>*|B%-wtUt8L>5D=YQ*zO-CfZMle(gLkd|{w*?>N>By_eC?RBM{P|_O(IatW>Dq1s;*LkT`o|Q zz~ijqs`f5nHJKS1pWH<}TFN5Cbv(#o^9Q7%iW%&PV8;~ktE_TgnbniU+r1;RfcDC{ z_#%5VvBF09Ded1k^t+Z{dOB9CClMi0YFbYmTyh5p=EgsRXSmc#%jIYovJvyW-A#iF z$`PGWozRL(7bYfgrm!5At75h{v878Dl?`^X=u#49iOSe#ZYq`ZvYDM>l2Fz@_$Rv6 z0;Daj*O<`La)`we{siwz?D_o4I@U-#mvGFI+Z7srvHRD|)zw-P|9*P;PfYP2ym(pO zEWl?KPjHNfyAiYX{&WqZgI14Tsm}_yfD>lYn+N|%aJmNKlzr_Krz&h`=TPVeyOX8G4oM}PKzN4Ed(-}X`Y12pVUJE!Wo(%uRk?g5)-w!V73fs^2@ z_@9kB7BGHxy}S_i&%V6^+l8NRAI@8IWn|O(mk~V$XMNY6a6f)Caen363GmrcGfo!T zO1(b#XRmtfq&OE<2>YuK3x(?%jxmNMjwQm2yHX>B8}x@!3sq!e$FmND#m1R-W)uIq z#O_Czn0({s?mxT4{$eWE1O+vvT&-&!mDNo~(_T7ynoj=fie}0BzaP}BJM&LJ`2W84 z3dmcpP0MlYSNi^uVfX*ze)>yY1FvxInh5pZk7oa_;Q#$h%<~;T*|53!Ghg6uKPCVB z^B?=yoAw?DuYfrAk>~&Q`@j3pT?2*N3p^pHK;X2!{mDmNtmAH;zkXW1$PphZ-zyl} zb1-kHJ@_XE;s1GKt>%;G&k%&QG6HMo&wC&yd)82pAmn9PyKijeKmD!Kl4^NU#I`Sq2t4QE4CSj0VV@f7v)2=>`*+X(R~G?%zFIR6M3(R=D#XLu z#2{>fb7bGpPvLnk`!wYQ2k1kiaVdLb?)x5RHh-(D|NKVPK6-d8WITMAX;-gdMvM;L7=zy4_VUFC8azyInRC?G)bjTYe9y`LIE8*83V-^`U5 zgp^G5B8)q;dH(ZH3~q`R930sk)px$<^#f0~tCvfeY5n|IF10^*6g2)5R{#D#3l+4y zsSfv-{if0*kwl-=Ae@B=LfRISLF8 zVYcbdC%A;Xrr#PI&tEM+M}Tk!{fE;@+RPyhAqq z+oy-O$CX69`hMbxFIl9$R+=2aaomV9FWVWi?P^l^Z*GG4IjSox&5r0etf-_k6rJ$rmwsSKF}cPp>4;gMWUbujP+ls5L8RYTf`^UoqCA zSKGDp^=|{>M+WV`GEwb7R^i3=$~gKE$XDEwmNpzLwz&bwP9Fd}=dp%{ z2J}JTZe^@F_-jWtpyhDUP}$Nlqu72x+iPoq2QWe1rc48Q;tC5zu1Gj0swLj*ZuWdZX`OhWZY1*ctA0WOV^OYrg^s%%MlTGxMxq(szR!DgsoQeMprgt3zq*F@ z;9S_f^Z`KKcu_CF)B8fpz;(0YTteFl>SV*HFC|ZAwUOb8%d>GX#&tmio;v$!B0^eY z3m|`0kUmQAcC&yft7Oj~npeL0?X8CRY~o>`Vq&-fy7&8$ZkzRb+))hx1kNk~V*uCs zolB;41(23o>S#FbD6@Nh;|n?LwY2w0-c9DXTT~xjsdV2WcoVswOaG6{4Nz2UMk)N* ztynosY<1prc{wZU?d;Q(p`nKLXvlkeu)3<@#WKKB)41{F{b>2cX3BEk1Dgke{U+!B|17|qOP`M0LDC~vs0&88JFoP{mb@haac{scpLrOUujzaC|9Zg(g6?c0N#(`f;18DEf^d6752LT!i#Gt-4$F=f4l zIFl^dr_7;Q1?f)iHm@YLvo(h9+9#jckC`QDw;95#J2qd2g$nxD97T5Vli)zYCT?rQ zi6}AM(f=U&`(eEtfIx9S5wZ_3q{wsZ?Cjve_EwuYo?JJuySG@}^9=|Xp(7_Jz}}A%vR&_MR_q0>GM1C;Bg@4Wuj6OYY95M>V#YAUC1cE;|CH!+fr` zBNFXzz|7%Jbo~7-bQt{jWN}>4;YaYyfYw@>-z)WezLdQKoDoj*Fjeh;+Je8A*Hf>a zym)XEZ~e_*Plb2E(m@{cjY%I-m^j}lVJu>E1}ll|g_d9@Wg zhfp{8gX)yLW+xL;fS4`vmkG(dPg$u#+<1M}^A4uFBQBnR3*A3=vO!PWKYvT}B8PW( ze#gW*dk%{^Uc8ScL0XJMw~@LEEy+21md-&vpY*^M_o<(-i?Z*h^Mv~)=$MyxhO3_T zgCi}gN9g?c)h9=c(h76BbXyX0ieAQDd>Z&zb;3aov*~!QX0b65mr~j^qw!v~edc7l zfYbhj!~L2@HzUdZ42MzjrrndzC7O*+U3YID)qYxZM|bapF{;WLaFuS}@d0u{Rk34b z<>mKcM_5DDk1aGDsWkO{pB5Oda$o)?#TToCaVM!e7%{l?iyz&=UiFsxAiAje`YDI9 z5xw@Eei|2Lc2VmOVL?Gbox&ABbfa*y&rsFPt#o<%_uzZ=cT#sd;x z+;{t~q5HR<|9_S4{+r>iL-pI*{scTzhvV*ijDOoVUu>VYRikMMvv_pJBedwe0+I+1q_bmrL zdak?DzzKdm2XBO8bkk^MOr~b3F$$J zLApT^q>=6!y1N^c5QZ-4?(Q5)x`Lt z|k>1DM6{tL*R^ePD8xRyNs&5$Zb@L1k6-5bCYNPHg5aR%@t5{|kRZ1{ke?CdhY2nmbQ{9zEP{pMZfyel4=nV{lOfwK~?;IO$co2>YNp6eqv^J zQs4BG>7c6_^ipNiibr_S(cZ^GN0XzbB1P(zZ#rVm%817X_Tp!1->97a0=~7Z<0X*kB^88!Wh6GY!?VYD@DthIs%tC{XRtT7LNe zLH!9(IQxPA21dYR#@sPyw>R4!{|jT&BT_Y|M}PTk<+PvWV_7j$R&zvJY2};i zE!98cGGTjRB^*QY%)Ms!ny4%y{>yt-5R*-%o?3KGofFLo^69m92{36F;z{6Lpia^V2?-S}iF5-}5p$>5v#6xj zZi9nGOjkE!0q75DJ2#6m1rBOb#|e4x9DrsRU_J)EVK1K0O$2-BPdwhO^lbQljs^i{ z;LP^EayqtQsatV)yn5zwp(gm(k9)^Y=?i|X&_i>oMY*M9SLK(aME|1PipHBT+*AHI zC-eqw;A_74n&Z+iWj^coOBUN=!*6CfXLGe^zghk#X|6KaRF^DE0D*Plw{ahy8Vy0q z#I5zYV(rW_92*(f#Ez;u;_1vQ0{v*#(zzk>5#^hjhCQwFl?rw`(Hyu)T6&R@mS80O zZiRRHbsFEo$W!&%YdTTQ#IlpUqe2}|tK>6kG_(+yz5EoiQB9vv&j1-qOg}l>pd;JM zLoYGNvQiNgYA<6`tf$ZXPDQ?AJ-@#G6aN=GWrn<~z@Na8-S9Svq%usE#nzw1Zh?|# zKgz;2g^QBY$?;jJA|9UMvuwal;bw|G{n>T>ugQZX3*GE)1hg+`@~Rz(-g{2vsGR$( z0FzzmYSeXqr+*p)Fbe5{xp*}|ph^jNS`kkG`DN*+*suTeJF>q{GeULgr!Gz=T}YVD5-@=~LK zbm$xxA7ur`kD&rr$DXIrmfzlJ`E&j9?FWDQe+AqBSLYQ{r6*$9ADziI$95-EmtlNg z%Y@L~Ii6vxlwo+pF$ZILLEuUQuLvALXtjd$m|mpg_a7;8SxPaIMYA9i zrb{W4!bi=$woj>Le`%R)8dW{)pj67FM7I-9W<4!9HF6f3S0N{;l@@a`FZ~8to8`!2 zjyJN_53_``D6ijg3zcabyPBkcTAcS{=UQm>?bN30glh2&uX`5y;!IyQ_s@o^;# z`N}F!!(pNJ%8VAPt9}t`#Op=*YF%Bfpby{(ap?DfMqcx(7TJ#$k2n0%XLzVJXGxz zWLcXKpH+(CHRL~yZco7}Zw2&{WNR&>p|-NZ0o!NF8bDwjEg}uV0J$sK6Gj%T6$1P_ zTONSAYZ%N_8v($JdjYfsKa5W(0F=lGa3E?4aG2G~ zKLTXvy7VDd0R3sZ@o*qSJpSuH{^$8$(fS9HXQoNrQ~sIZL&bIu=&fqgJ)NrLxla5& zjn+C4YXa}g0sRwZI*akWncxtU;JI~ryEXxq20M$n(U*6H;+$+RWIiMn@TR1b_M@Mv z+eTW-O|ftehn}rDCSyLO&b{hWT1n55Ff6g3|J|TPE(BIY)V6TSo>-9_H4Tw7_tX(q z9D(L!EVx!rteEK0aBCWyU_LKClwK9h`1R2LarDufG|t(S=_Ij#tZk|I-8zEHwhS`O z`9+b?nYMq$gV54&t_OLy zdKaY~=?t7MthiXPNsL*)9uSyQbv@8*l`6LIpIJyxMoonOuWQMVW%$C57!N|E013QA zHFMNvcx$bfp^2Q9^q|DEHmx5rA1qcv*oGtSZc%hR7eF_@3S%x7rz#NYicZ|o3#%ID zm9hd}a4esy@E-6^YSKhbORmPIz>)I1Iy@*IxK;bo`Q6Vg8diRg_}H0sCkJ@u52*>E zzyJHiR$D_G+?WvY1|2G2Kym{BC z_2gF${k|g8svA+$FRpPc4`qs^oKcZ#S-B)}$dX6{G`{GJe#~)o&PIq!*57rQR!Nay z@2V;HJ4614%t8^!IVwjsle26+ALZ>w0^g< zrxvHEYOf1X+KX~Cms*zz(K1>!wU<(vEq(7CJ<*8#mTlRpG_8-S6Uln{B={LKQ!)|X z9giDxztpo38MR%Ju23!dC!tCf7A%yUvfN}nPr7)!{kj@lZNJ-6m6awPp@`e>zX>H= zi+vn%6Lx$SzvVoZEg=DMp0}*awuAdLJPGix+f-O6(-!jVd#8wriMbA7odG|gfmVV& z*($ZEiN683vd?IrHLv&y0|tulI?#Sy8)d>>oj_r(w;V0Y`~ID#5zu=W1E4T_0h&(q z&Nv~^l_|6hTsyy&k}BG0kh(_uW9p8TJAfQjq&xY5@s}&DlLgWd7O=nNI4Xl*bt?@b z{n}!kAL|faXe^O_lR0gy7FQ(9VUoyJL5Q@`qjA2lNst_mnlL?!C@r9o*)<$>$r4<3pV%4?uUBGxD}jo(K>s0?gplbixw&MPXV-lB)u2)o zHCdX)Q6o?IG-+!F1|!H^r8v+UA=pRcEIkoO-obkDDNSSvBVH!3SZ^(&oChj9;ZA91 zD(qshx>o6TI1?N;J#nYPQDy~=Me^=G35$rfy{si$-_u#&@*L~zl2t3t)FXJwWl?OZ z4>Nf=BvuUm>fbZmtk%_;d%`9U$N zF#zpcOvGkgx0?Kta!EV*zFw}XNMNStx*wn zuP7nPP1jmD0-&;f*DNz<%P9MTh)BqL%w^v{F~Mh;s3U(;D$tY6u3WXMFkoKY6w0Nl z1Kc9zch7D1`*+GM!WwFDHsMyoo%@DZjw;M7J&yM3T#*$cJ;*iO-Ro8V=5HCyM%-~u z_Ud(ao7lSx;RL0)h%#iNvPe=F7{(aIg^TvNCNv-==^d?UUnF-YM2Pb^)H5HK;}d4kx;K@OKtV4sI$P;qv&ol=I&L~< zY7-v2#X(h#4G9hle@hd;%x}!e!FZt|jJCVTw6>~)Bq2gy+iJAJWzhsI3SqSN{r%w; z0m^qy;Dd8LF=4G_m)Xy!8@Lyl0WFu5^pdYU--tAkROYtq`gITE0HH9(;YBsWsIyP+ zO^5e!PVbiF76Vh_>tQ0q1rcic*|v&t&ih?ED!U?`Ad1?iU|3T#T4gF0fX;?88pW7( zm*j?5n`~yi!r-8@>A>O2CX3Ms2Xr-vp0eBsBP5k4bBbi^!M`49L;I`31}6<5wz#sh z5}KG<91|S&e{G^QSx79rQ1k!Aa+~a7QQ)Ppgw__kDOGIJ5-6qoytrKlf!m}dYG8;I zr!|7-G2ZGAxAK7gQTc8y$zHeQlEQOcm_99NnzsDm{aGdp-BTy47+d*rg8NmG1dmEN zAHb3Gczo^?!9zl~u-bupOgMaWZE-k);cna6*)3~8>im(@c)h-(0wdH@>5&}_7BbZ3 zLWUVqb+#Az*b~HV!Kg>27pgELtr+UB!I@#Z%JTwatUjN+14^onx3K&IdwS0>kskZJ zDsM`4cGyluTSfAv8q${)Q54E9k*D}Jm)hsmj19vnRRVg{}4gr@d4 zT3ezSyKec*g3pJngf0o?=9WLC7u9O-SYFta!oIEPX%pP=j#^u#=E6)$BS2eYCTaQM z>*`C5q8gf1*2BL*pwhOu{OL__71Kj);q1pZp3yn1S570!k1T71tF`pHc64^3{1rs9 z!lQm&Fyk=BapSN}QAnsLB%EoEJC3WZVt@{9HJLNM^Vi4_|GxZPtdd=f9 zW{3=a_vAXnw0O7^L<6<}k7YMVQLCtARCw>#PoxT5V{2%4c24r9I%qIGxN2A=zJKRB z{`WOxCQU)CEY@p?&mn&}Miojwcx-zlOo%q}bH=BE3TxA3DcC1iDLuz3{JTP7w`R%R zKlW&x#ytyu&u1{cDaTAwfbTf+wCT_|u1qTq=W=WHc9`^$Xq57_AiOm6Zsoo{0T=GJ)I0M z5pm6znZg$BKV<%CJN)74G0XS&<(~3$jQHra;|+Rj6=NMj<*U~mdB9dlrDpZ6?PL$a z@FOZvw6RrRY-Im&vEU%5j=^H{uj4Vti*gLNp;@lTpoT|9E|SUVlA_9+V_R96zHG#a z@6@syFvX;QivITt;6#3;VbJWV+4*B`Ye?zx0z?U_HBN1QPQZ`a<;fr6CL~HKpMeF* zsF8{gzAF3Bqv!cFQPay(v}@C;NlCO{>TH(xvvzx=_QKA;EVeB{u%Vv~37CONH42|9 zz6>O>Rk)vDA)pz8C+s69R)%8U&E}s!G%b;_zQX?ditP^GqYIcb;TIm3LwrXJ6?q## z>)2zYaw!_@GZH=>QTSdSOk$ zgE(qJD^^tHF6{C)1brqiiNp=J=;mn&cw~5+CjRpb|497!&6{f~iO+r22(f`$Q{K)Z zy&T%%6z(0rPI{_ZB9lRNA#oy*NS9PfRR!ZTFugKJ*9HU}UUPhp#`MV&#nRgGYqK|&vFa&)h&)^*x3(^x5Np>;;S+M~(L+!7DQq_q8_@4mTXKZVxC zv)N+p`|Ru|;8I?@8K@EcFMfXjnqJqynrs`rLJ*j>VvHY_sCl?+3J%I;{VL?zlBv|G zg@vSYk+(_3bRVLur=Ua|R-OJFylJ5}!+*E!aO>gr!&4aYq+Pb$z(?oSkN?Anthe?iJcx0)~N9>$!^Q1OJAgLzZMf@(EKl$eRbSM#nI~mE+ea!Ya=1eqk75&qwSEm zYheBFl)=N_Vfx$W;@o&&WM1=I$BG~ZV`i`&pC%s%bmoQ;C9u(faWx!#k8-*`#Bk1i_VzDJ4GK?OI za^kAhb6tp>xfPUqbi^NgP4a0GmSgh06At%R9ZJX9s~W(y*cyw(|+E3#3Og((I#77Xe`no>qBm?6~XSw7_;z%YjrC?yd5 zWi8;LoU8x1B1-R;rhylukei#=9WIhr;C{HitQ9j`rK(M_3-Kl4RQgmN+$IgR#< z{wb%fjfkkw8(E444b};s=emcyf-h$H|7T?}A4fiQjhanwHmJ2qo9MuRO-6DTuIgSy zjcUmtcBau!2hdmu@dxxmML{%BjoW1c5IaEA7v8(tz zWkEyw=7N&0*^+m|GQ*|C56Q z5~H%>3t#-3h<$am8PqC!Z-)biN0;ARYf>A@w`x7tWdCX~>Jds$!}-G2)M2uf2mkTv zjP+X1pk6mhVXQjRz+p~{m1|zIQd(reO0mzZ1Zk41&CuJ26Dw?V_ON;aRw00n{#)^O z=JT=G#Xrc|Nb+j%J$}kffXMx8rix|nR|yRAz1Y3_-_W`ixd4jFNg|Hu+x_v=toMMP4*xfXpCJZ2bt8P=nR4@~U2#f1$gf=1O9 zX>(h}^me346}j}}}zp(%oh^;s0Iu2>`os3HadIl?>f`hO86o1*=0(jcN7s3>R|74!8@CbKmxYKECw ziH4tUe@At(X2T$mUBUQIE}+;Rh2?A7{0Q=;2KAaY7GV0#OVDU9`coTm{NE!NQ0A16 zuTd0Py81+-Dc>O<=vKS1&O~zy@Kac3yJi>(ULb>|PtKhC9}~V{R&SAfWNV>b(Am9h zO&V(GYAtRM9TrtHi5pdOHMJwSAX7P46ko5K5r~<9F-Z-?LEVzK{ug=DDV|661Yp&R zmSY%wy8L&WQ-rl>t}MQ~JhZtc!M7a4FR6$xjrU0~&(petT|FaISk!3oPx$$Ul67ee z6iDQ-+Kw5#?;VK)rlI6_y9~iX;+VC;2Td2>JN4veosyuHu60)LiwcCg9tvgXzCuy$ z?ZP};-)?ER;=UG^?QD4n-bf_~>cb5T4F}}K-nJC(S6hEX>kdVU3ZgvfPiO3;WY|qiNNaId*M@5Id8-m~MBzTr*LR*2 z@|vf=#N*!cA~IKIQZ%mI4Q&usU!D*=)*P2aU8~m+Gm-$7_Z)rC>)7-Xc@SSLYEN6K zQ`_6bgRiO4qyT($*Mv^5+s~5hv7Bz7S>aai)tj9FWFMZf^FBC%=@f}xbcHfJbF#Z7 z_1AH^<+my({@VlZduZTCAWG&)kV;|LddoN}uPtG5>U^DWKudLfwg2LLTD8R;kOJB-DdgOfOhhK9!g*Px$k*^6$?>xv^DBqa2;c5hM& zSKw4L*{0U@bmo1JDnAT_w@ej51_M8b2RZ!4aoUyjG;7^ zMZYI^Q3Oce8zz<1#l97&je%-j#Bi1Y$o_0B5ki6M$OuVNrAgw-?0*;(I>uy z>rWIwcnD7@EMj`ugRfq;gP{s_4|U(C1On{bKw~(h{(Y~|<)J=!*||>89P&YH@^%9; zs=pE0lWK#UgC0ab`{CQ6OqU?;%ilLAQf3@n86Hz8&h0#_7x{*sA{DqLPq}2JwF85O zQo$8hC$uWX%)(t`*1Wa#YW-)VjN}{&K#4AD(9$`27s@9U#)DV4M-iF)bQ@iP;n@!! zjK4kbfZ(qH?H|&s30RrM>4n1GqA^u#K5Aub>}T0j%i!X>uh}{9n_88X5DecT&#yv+ zsRd!}kqht_W5D;2E^Xb8{fJEhjlsBUx!f@)8WBKY^IVUJ{Yc^}yosWv-<9pW9s$9Xd(Rfx)vKL0{bcdY^_WR_2`Jq|wea#Fm1WO{(|DsaMHcEA5j z^p*ck?FwOru1A+-a5CXI%d(;-t!gNyGkwXkVHX=e$Wk-;aU$5(dyPi7iwIk@mO$9Bih{$EI+mgjOS zTEHcua6Qh2e`{CX*&Xsyy%s;Cd|QyD@Oix>AFLWl6Dg}HZZ!QiDu-$%1T-YV%8K+D zv4ZCavjcXa>XmzIbk16CQtx0w)jEpY3y6q#O;BZ#AyqfeuDi zy==^Tj(CzICa&f3FIx4!IAvl9UvNnJMGE@r%P7zl0{t3GphF}cw_2* z!%3~=9*Ks3!&R#7aBJs6-`-pEuqGt^U=r8Y;l@LJXYwO+?$xgW{b0TWnvw~D#`^5e zn&y)w)c&2G=PnNr=AiQy*cK7;d->QD!{8-*l(gfs;k_j(z3VfdW1jJx^^1yYL;tjF zk5y)T4)oOoHYNVzR{t?>yq-c602+rHt>T{ItPXdbW4NJ`Q9z?`RQ_|tylH5}M147|%ssG)tAAnsutw zzbt@L-O2@4+PTD!c5cjzy$1K!V>KP( zJkOUD+Yy(F_pV>uEEg4bq2k{@IzAJrtfVR0%LVsap`>OP5)_V+Ullm5{9(@Hjn+p!Y8^H`FCr`64ZM)upOE(36H82}})x&r}))hen7DIlH#-8d*#0%8$+g z;EJH_2w+Mvn-qf?FBz!CUxG6=X^5L)yv{FM11pf~X@f#0;! zqQ#YKyJ9l7axzv^+{?@B_lU7H`^8!~HH4+9j2!zeO=KYJcn`ojl>%+o9og#Rn4iWA z&v{;}`94fO*r*Igh`fxLU;NAOhESioR_eRkMlT8o79b%h-3_K>* zIC(x?V_&{}DL&6>X*kl-C{bqx>45lp>@?`$sBpOrswMCn2A%R<(QM}$W%0Iyjf(E7lA~VjEQx-R4fWt%MjM^)?yomSYnoZ6ep(fUVutbqtU&B^ZhA5hXk_ z^E|g#MB4o#o{w|^Le?Z4+>TX&{KG~V8;;sgP2e!E1>7@}q*_aZxD+Rc(h87>w)*Rm z(y2AC+etES`(DfG(h`Sd5Bpj3k(Q_&aArZ<^;ZQVU2{I2dryeHynJM;uQt4?q(fLj zqN3^IE&zORPwMgeky&wSQ|ct7wN-G>V|&Vx?LE*_ia0_O$TKSHMIB3l9X7_I6#51R zqJarK9n930W{CCjkyaZ^U}B|aj>|c4r8zX^(UE#u{t<7ko2@v@FrNlLi0qcv*)`2Z zzA)j0x@Ax1cMz{WQ3gURRG8RXFExtScQ%J&$7S}zlJ4Qvcfn^1#94OmoPW-&h{*s;pl5Ky%W#CQbvQA5v)V#-PrLAs zclV(BzG(bZi3WU42k|WhtANr2vJW4S9aB0ua*_!rKC8OPN10~?>?2fmh5;GZH^Sq6 z_t?4hHJekhpe7w`@g8JmHayIvu#l=)7BH#axNPMqL7ku;Ridp_{(ABDA4jq~bxE&F zt^RHgy3wOnaf>56V8&zd6lvf$Y+`=^`anL65Y~ch%>d{FYw>@ zjC^;LNcZS|`QG*72@Ka&Gh6%#^NP-OT`msoSWSSz={^I{NE|oFNhYGiT~#XFSZnDk zgLk(qW|xkMdYFYhaTS6AWy*aDw7ehwrY?oMUsx$){tzpAw=?Yi6z+AgXm9+}XT^ER zQ+xldi}#L^*3}(4d8Bz6TzK$^mE!>x!K&32qnXdm^eVXj=d@PZf?<8{33Cf0I$3$E zR_^1rZ8oFc288x*XHrWRPnQM|R6?EJpci<95gnhgiv z12z(iz|KCq)nwIxQW0VpM;gpl#@O?+{1TapE4VBN2`?bDJtRZ}ZDlQ$>)q}JoM|n+ zW)K}4a1s>VCXv^9|6Lsb=BL=K@Z} zDr9iIM(w^Q)kPBOrk>J0j%MDtb#t=*HGnK?g4eZQsbQ_u9kN)$wM+pby=~1)li69r za)M}i?Lmh*Y| zrO(NF&GfRvbj0NB0LEsMrn1Y)p0h6!wZi}|D%6y_&s|EAeoc-Gg4b+y2mwS0nl>AV zR+ZPj4e)dMFYx^*XIZ#x%^ZC&&a}N&-n@s(@4^x5>)B-INbA-<|2VBwhqS{B<;Jgz zo;{ZI?4^vI94sF9kLnuT)m&EZJ#SzPiI?a5d`|=Bm7wSr{|Wm2u1(X#xXWcfC%w{xADKRmDyBrKQCX7g6M56{ZsK&(!6 z`S=YEiIne>Cyg_bEZg9Q`~96;V9f1hgNQeIURa0Qu&ZTr?kG&rpo<|~Li#}J3iY-? z+rR|6>||V0l6O3ARAQNk^S4RG2XP@n#2ghxX)WQf)%dfk9fwu)XG)aqOddZE~aei zucqcZN%@K}LCI%&x&*lifn(>R==LPdVo&;ju()N3oi8Fy=}e z&*OlucF;S{iVG`GC!8sP%dGH!A34KA6S9zJ^xWseY1`KmK=qLN3)=Xp65XVY%VxvF zPjf0Ivpf7=xmY`owz%JL@Cac;qLghQE&k>eZEhpjs_ANq{$e~#=JIm()al0JE%~77 zSx`}#J-Dc4IIk8D_}L8-^R|a=ovBVk38&d{UtI9FFqggPH0wp=vurO|(>$lt=_0Uw5r%-S1?E6}xu9L3$0*tkdw_g(W z+~+I{0YB^9PV!RYey?tMgOzYbS%a$KCz;3eBy1;@Ifa@=G*S!D9Z7dHS@(>aPv?Mn%&MHKzkyr%>s!}d z4AqwP%qUO#*a$^$m><-YE9SrgX0)6 z&~ia?Y%31DT>8^Ir-%&Hnr!)|js+Df79Ms}QPf(Yx!S+pz(Bd@`(U+l3R5iSMf)@l z)^#>jLH_~z#6(3wk){0KC4s|72q>^UtR+zAmK$3_c8)RHYht^Oi!6~E1zXfJnZ6i6SI`& zqLaeX;9zjGaH`8LAaT=xA&35^(X6K{TF8bA`E?B4xyGKdt zznkm;ZWHHbef;uD|LPO75c#ezjHnEL{uB{whSF)@E&n)io%e&YM00?FnIBoW_d3aC zZ1|CO+*~Vo>Htw~slEm9zj(Gg*K+LhWZ`?lEG+kLJ)P~5QHeWu}W30Uqb- zHvC~i=Ud;hmcPJHL&m=n26taHVS$|>!vZL2ghNWHw`JL^gZ(w!^#p+`-uLdI&?b80 zoto;@zr5kM7KSlQNnoPwG|92gU#G9rZprslu<3RQC5VJ#*8kB)h+A2e7O}m6r%Z#0 z>rm|xBR~2cP%o$2DT5_DwReD6Z_GHyZx`M9K<1UvA;e(eJ>0s*>Uw^kD)-6xQ;y7R zBIC@hkcT%h@2k~}E}?yk*0}D2(_1ynE$|#`{`~m( zc)eafEY^$eo_6NOJJl83n`bmAFezfLJDP=&HdMeuD*bx9t6jx9tx@QHp~@HBdVeGq zQ8tngmSly`XuZ^g*>(sJSG!1mGR2!rX?bYYl|J803@+lq(RQCCY`e_6JCTm48wBJU z{xh0;*SYt0>nq-pt8-dU>bnOUx$#Ku{QX2>649n@THg;;0*tXhFFAB;8ti{`*wcSJ zU-|BSAdA1I-VC!VYdEgdSqQjC%{Z6E%yFi=-N^F*FNhof9w!_4+NrFBokGSR_R9O( z)#3Tj%Jt%fPy`7IPwxAkOk1E7Gn}F~?&r_GwXcF3{hM3-08Hv}M#+Ie`w~f6V>glE z7NWz~!z>p*-E}XJ;sN2m9~)>p;(Gu>={nAkO^ICHS~m&WeBtYO{tpOakd6L?Q~m!^HPt98jb>i z!<@iqz~{;L@Y2t~Rl=Y-eiNZnUtVrw2vIE>>Z|~D<1V}JT?<|mv-w_0zdP`SXdZyj zI?nBW0sW_M1N}?FF#S zYA|}e-wMF{hi0%|0e7JzJ7Ti~SAhF98`EjhFOhR-vB&CB5ARBKE4jMa^o+c>V0&l{ zkxFqZo@290pjR0<>j~9{TCfHpnn}W`?VKmJvVr*1M+@Y&`n>9p-d)p!X zFL&*Ow>kCp`*Fbq2(E)F>e0Ff@_lfJ_ns5 zfyT9tRU_9T>&Ic}d2HI!0EqP*JtwrubBrvCuHHs;Jk@q>^$$-$d-&$8^^R`dJyZNI! zozeurEpc$Qce=ZbrUYB-wUnoiDTD4_Bc+&IK3c)=h>6aM@wm`kHAlY9AJ`ctjfhl} z&DG(}F;!X2vLZ+=uE?Bc1l8hxc4U0=eY(R=Otdk3Wx6)@#9l-efty%rt?UR&y`T1v zLmMS69eQ#;KJo)jS<4)r&u25kF_|pfyteZRbr3gIGWFDXdqKi!rll^4j&Z_~Fhgjv z8T_M2zq%E?d+F+Is+ooHa{P%o%3M4|h^9h&Y8tKU4*EfF{hjOf&rb>p3i#6FK=I(X z*Y~2qBy$&z?O1U)ai*2OU!&~*8JCyB5pZ}$m4mh`9VI<`J*$T<7r;) z^lo9SZl*CgE9;ncUW=#Wm_EMV^h5=ph4zZZ9bibH4}loH4qM#U3%rG>0MgV^wX|ln z$=hV#EU#;x*NwahBWM0k$#I2te^-xSm}-i=;;deCY#96@anG8Xe*Z~+S8u3EjW9|M?L4# za6MsPHCY}ROVzZbUF-R3qs(SJY&Sem_47Mkw?N9X`8sQOA4t#EaHTuR8YhwbwM)2P zN~CAU?kwSB9s-jkC&Xe)fdcU>2hElXr+Jl~@n%g!d6yj7$^64ib);Eo*2>{&^!+OW zjvpPT+iX6o1HRB`STsYJ{O^d~kT)bmG;@Lvd0=U5GF*-p@aZMT&YWC(k2Nw?K*j9_ zZdt2d-q#nMhvv$+DMsd6?}FIwxx39)V<$QQdf=fCr7~%Q)=GQo_~IW4a5Zn2XeaC< z{2@&tf`8^)TF$#QSw9~zEqkC+9tcBiFU#&O&M2&Jr$qAiI2sE%ILuXSz18YJY`E4G zwiTIht+*Dc5aMpG?1QjtiCA`9cwHsCt_+8V0VJXwLEo9-n!YO!^iI| za~z!~jRS8joHn%bT}anbHd@b@H%;12RaZ8dEL>G5aC&VuU0d>+&OL!*x0*DyXE~rm zTTMwT&K%F$+OrSgw5$--4%uE-7L_^*OAz3AUrlmVF1NT(UwP$t z%?T_%bdn#=a4 zR7xR$q9i>W$Q$LVP`m?FQxZ1 z)+%{mf++8=Xlv5{xEwFbIvNt30rDH&QjlN?kAP3bSqNa8DM)_uCK56vW*Ck*MpKe>O`@+5kXSmsJ8-zujJri95;%p$crSL zw6zv6hrscn5KmkAWRlGc)(@G^E8w!N-}iJzQlBfdB|Zb}HPxEj!&}|{i2`Nb^-{A` zhdq#_wz{};irT%aTUx`xT|ly$i{TnFlgUHYe=%8=v#x|0vNhJA2scxmrl=D>P21f` z&IntbANZ}CHjrf9%8-P_!tq4yZ<6*7y}20&Y;Sbmd&K69X#g1wH8u6j7f7izqy&I>|PsgQ0uw|F*uNBr={Kqc@gl3x@($JL!SA{#IILrx2 z`9>y+Ogx&eG)ckKv_M*d8_a2;CYQsP&4a7$I@@K}5cGrThTlk9qs@k0Ri1;CIXLJQ`kI~HSa3y=9Uo2^n8Cgya_;6Fb?_*9h!cswZvXl5Hm%evDJ!c< z#GlN}ptUs!SI1h1Ra)Cqv-nx(wB~;+E{5uszyQ#+CjP{P+po^_%(!UW(NS1g@At%2 z5!-a(b-dbX`oHJS?Sl50o`5B@-nkYoLmV3ov9(J4<)Ex5DO4|?72_VrS-kJ~!stzl z)lF@C!6KH0He)t{(_8P|QM*gs0PfLZo;I(dRrLc4%hECg)TUy=Zv1S_cpB0^K1Ht` zhphAW(|(l8l&)aDl1CTM?KV`Wx1xuXa2?4@u!NXw05-wf#yHgB z?C+5-w6RGX-4%1VyG2+tMHTT1oW$y(F5VCc(yBv=1f1_>m)mCWs`k)%Y>oD$AWzJs z)7WVSFG9v;3monxc9P$*OWVt6C^`|2q&e98A+J+BG6}FX7gH{<9Y(`~n?u%oVHYy_ zUhS0dnwykmZoGWVg04)EjC;Q- z1@Pp>)_uHFvNC@KnP;k3<+j}bjzB=Mgso9|#$@je@N#-g_ZtCq8HJf*VPg6uB-Z8Y zGXCFodNJ^q8O-tUE6$07>y;Rh_`@}jf-=nkSN`Un{I%xv9! zQi?o3iuj6D`k*z9#5)-1KkmuT0!X%AQeA(XikI(o%1QJt`gVd^Rck{F=gi!Pfaaz$ zt_w%|;#&91z{o6v8$VbjYo3u2qc`8F08w`Pez*FD#BtP7=p z9l*SlXrGQuJD8e3IGxqV0g0dMK7=iXQ+TgdHKfLQW&l$OaK|FQJ1<8%FxPcW2Go>s zh_B!jxC6YdxaQlYnD84(y&OVqyFdeDag{hG+A8(El=Y4zE$w5%yK&?|kUHI89|_NA zK0Y(Z*P+ddgPU_&M>UC!8%bShew8{xFB6}Q!Jv0~FJq-0KuZ9+((!?!Xxx-DOld3Er4gb!J>kzozoc+8K{V^FfZ2W;&O9l!~`}f0#9*A~~e))S1t%YQDQrkcAuvdg?!B z8$Ip^=#i?#NCp6Nn5gB_w*elG=NVU}%i-bS8N2UBW&_af(j~v#5&jEdg)|koj5l-fEt+ih!9+=5PDsHO5r2WMWzxmqhq`6;WSj-|3d_LB#6fP ziOxmP<#`V>nlepd#-Fp`L=uyhw#!r#sdsBi2Nm6@oj;8N=EOEa%bJg3`BCjsZGmfH zJiNWr>;xD%`8#)V^X8n@F#4(hGB=ywfEc=vR99D*E2Tea4Hv@bY~PHB5b5d8PhAM9 zEX18JTc=g)J(R@3Gfv9LBCBRtl;>-X9pki;nBioO0Z6oycLEFb?QQW`x|x!6O0E@P z+moL1BSXg53mL#ay>Ir)z@D20-UH@kO>%$%`tQ`)oUdqEmvpg9!^?1z_XE(|x)`s^ z!p=>)`!6tuZK_!TH>odK?KSm<*H!NUMj%419RIH#@()iUPR_?WQHjkRZ}DesnIms@ z*PGJ>#WJmL#3Z3!YLYFtN?00EtrL_Z$gLaS2=|pBE{($ z+{Ly)xhXSq)M2#>o!m=W`cy5&W}}8H5LTn$3rk>bKHf>cj_}3JpF*trJ~F2qFNMtO zFhmzU|B6m#mhey@Om<%B8ZMd6WsTG?3@*Vyd3;#OgWW0kl0Cc_$e$gDU5;n0l6}~_ zDX+xCW+pp4gVCjTl59Wjg6mO)8wA~BBhNLtqYDLp2r(;+URT3Mw{6L;dq=EfcHv+vCmg+ zP5$gg`u7dMzQ?=%!3Hq#0~cIJKGupKaen+7LBt-t*2u#~8!f7o!0Ju?7 z4FedhvTxm23(~y+iv#!A-#>KY;k9+ypMHw^!&hfH59(u@n^yg4HJ0IP&f4Y(M}3AA zw-#EK6)z2=2mk=W*72t1L46ZO7HS`AEAxb`Fu`;@*YUUg*m~b5!Q_HH4wBTO!luQO zJulrh=*x!VL}c{v;)E?|p$<71@EKR|x3PH&r`gPhc>V6d{tHk067R!EF<-%&q20G| zI$V!G8NX5l_^oYiE3`ZpL#@&*TcrjHy-X?jOL?yVzmAG~dhhk_@5Z{_64{*xRIvM_ zHmJz6Ru}wKdq|l;Q!GbH__jc+!yJt|=Q^4C^*Witb(=Z9W`xH<5WD$19uyf9&(4RY ziXX+Hjy<#1f4vDQ8AaW8M6VQeQ! z@PozqHI9oE;~==ibtgfNTZaaQ=k<5-oH9H8SrA6^34Fo>m4kL6D54-1J}zd8_cP|^ zab(PdrPFnu=Ydkf@ev)k(?Hq4+x5&5Y+ypDP*qiBc<1xvxLvBOm&CedV|dgj)qSsm z7U(n|%V@;5ZDT9yG*LDhLG^{_1pGkXv%RoxI;$;8pPL~vX=~Hg@<>RwS*Tz&svC`p zOa*HX#;*+PX8e#ZQUAR&$R=yO3!b-V{(jq`EV1_uDD8h!$<+cc!ks3~lF1Cp1bU|4 zpcNs}nDSNSo|isucD1i5W`NL{PZj^{BbSX#s4^1csxr%0y+6jR_?FY^xH}`i-*o$` z;U;b+{j8*J`8_brZ1TdoxQHJ$IXHVf#!%J|lW0%l{hL#nDa=4H5`kiOLDY~ZpD&u|Od^BHHOo&+`K+Q(!1cXMa3Gxn z`8jucL4w%)idu3u>;}r7)g!GD&dcuF{|?n{86Q7;Q<&CgI1oE9VVX9OMt)`PPBOOig-j zbNG3c;gB>g-|2JfaX;HNZ?M<%s*)$#C3Z)^j1-WH;bkR5Sroxn?qUA~DXD3`0ioNC z(Jjtt=$X`eFgR=&^>6XL*N$mHVIh8z+V9IEkc zx$qSC_d&?uAoc~sc)5=0h;9D2dCkWi!Z--&t;O8P{e|qk5YJU7LHjaJ7eveO03YYD zcv9>2V~rQ)nsblHe=+!t3S^1yiQ3JE#YSsmm2tdPs3# zz574peP>vcUDmFGAQ&lvfKo+@N>v1cAVm-b6i|>Fdhek04$>5)SEWl6LI^dK0HH{c z-V+E_q=w!h1kU5k_sz^X^UnD^zt0~o)ObBR`&s*5Yu$UTb04bK^ewV|Ah0p_jeh; zBz(2zm|vKZlEIO?*&3rn$K;ql(>k+Mz0#%sLGDQAZxY8@qa;I@qp2Sqyeu4}c3Q4D zl)ZMzuBo!{$S$N`UH!9V{}HI0p*(`+KKs6tXLUhLw+(O&Nqwr&|5PGsY@ZB{y1P z2BV(6oOHphQgE`T3_9A;3OelCFW41)IRVu)K8$NZ-&pqb2!&_D)IvTtIEUv_wzph zap#(slv2w9De`V|O)#+XkW#;U4SH*a!QeX6&(wxc(C-u#86 zjEykd>QGm^wCPq(cV01XunXnYYns5GN`$d3Dm&~Sjl`>dY7C1e;65(* zJnMPpuG=5%D|)_%zdY4nRSTJIDA^=+1kVf>ZMZ#Dc!2=MH9W%gJe=&0T1OWRVrL!c z^QY8N9dQ~8hj28;8Xtw&q`>=Cp$ScLs26pG4OB}RwBE$>Lv*PF)+k=+oza?9!P8Wo z50j+cg2lIMt0Dx%45K6ttj=vV4!aTJhbgqybDv*nL>n3e=OP>gncf>&7{)A`M|DeWpD*zDxxfLLTryf|1 zTxoSr{uJMzm>?(ocw8Pj(^epEQ>8b3+D7BE_@MA?cfEXNhN^qf-5_DrvGQcAeV_4u z6mJ2#3*OUWN;TzPo#t^`X3{)(GzQgDEmMjB{z_GLR1Shhpf)!QyT8RUCPmqL+lXMj zH|x}kz3k=e;gW|jDCv_spR-0K<3*rC-b?n?H9TqC-NRKhZ`F0Zo=xE(+NIe^Ng3MM zex6fZv0eO(WSfS*gB6>yzC-gP=%Ss5Axw7JKl8Xc&3{^T^(x34KjS_r^$yPu9`#hA z-y=k7-bP$W`tgNfqEe4ubD2YQ=x>Q??ZI8{P@xk z$^=K{_g(3jKNyGjozD_Vt~ivhAC4~gA58@*qL}OmW=nPi&9{f{RFk-?YY*RO*=`)8 zT~F(xmiD!*NbKS1U!TkXQV@;P;*mG8?yU>Sde;l@rX5xMTFZE0yizqdJygr$YdGrQGFD{V{<|6K=-S%23B za{)2?F$1I)Z}lCJV)a3-cZcKt+xvH`Y_$|e_oDMo6aBl}QBxdrsD>v+hCa_^xjq`t z$>_?co$SZ<(Dube-O1ozX}DZ@au0Dxd`Wg<0J*E4O0;mY7Wdk z8th7^u%9tVx{NMP=w)lPLRE z(k#coxPUu-115xWs_2hN&t0zE13=@|UhS0wFcqTQY0#t)21~q1z>D0lfZ@y&fqJ00 zyzcLPT)ez5pq*~$(iGi+FI=VX7L;_#-X-EcuO?&)P9fc$8zZMy@|9D~$9K$iDV!TX zi*B1^8bBFOG27r6{*x}IE-ANj!dmi+9qp4uUQj zQeIzuhj;7KdvsWzi5#xFzp&lGI-kGp{dPN!tK5%3TDFQ(Ce)T@7UZ2cSNXM)Y7N%Z z)I?xvg%RWZ`#^SUUPX!ofh3TteT9XEkyG{KIFZBC)OSP-dM<_9Z7#Owr+AKy_7C}X zGio@|LrX;2QrTP6+G5>?E?>mj;$^&@%RQc=iXB(eLu|sn5*qHFSh?=r=!@VVnVjh5 zafj;x+J@QQwoX%xrn*qV)^q7tpsS$P^zm6Vs?KJ0yQBO{D|pfcij(>^8>E+qJ94c* zMm%=kO!?7%FqM`APSX~fkdhF^xIVhPnHlTbq7kbLHH$%7Tt{tY*C-%e`A183xN*C0G@nJ4-w(Uq9lEeMdo51L zBO!N>_3k}=ZzY@L4VuoeE6IUJ@7nm!m5RSwn(`3ckto+Ng~4@g%I)L!b*e`6L$9hJ zJ~Ud8U1wmhr7rb=9>#qB%uc}XV^`O!E##m)6EvBHut#n$9MoUEx+N-HpmPBZ#5=c7 zX2yi^;|E1m4!sBTlpC->dnSZDfCAuyG|S#IaYlNntpIW?AD`V4b~1Ckz0m@iHA$n9 zz~o(Th8tJDDq3k?k;-jnwQd2H($>a&(O|CL95SC{Ug|jmB`#|+GiS`DQ!muj6pI<2(ji=+tTFnp7y<{b68JRbdtx ztTdfGkZ{QLEqWnIO80He%qw#31!D@Tg6nRH34LvGLl3m}#c=v zGhQ7qhs3dJ#Dte=bcdHjSb)ZL*1B%$&xCz&xsT^ZJ@5+m@wn%bQgw-b&IJIxk|7&4 z55*l5KU7C(s`EEjL36!=rq^=kvqFP|7->q<3og#d?{d9qymyRxdn%~HmO(w>KcBnY zt5Mk6p|oglS)n~9HZCrG-Y~*C^Fp988)^I>BD42(g&&)IH}R}ID>;X;tB$C+i52zA z)#hCC^Rvel@zU{us38rnV|+A_4kalFxT|w4NL_P;p04 zjF0OK!60DYWf{fF@=}(im>{Y+;`Ys%NjXHhfJ{Iu@R0x1h=?v62Ko=^@l6}csUdVp7aCQWcxw$PmX($fyw60G;Qws`ID z`N02V^KLxU1K~|Kk?R~nCRBww2pJm-mH1h(VNGddg4O6;`IXo&;w`NQUqr3ndnkSk zwo54<4XY3q|C*gD(|<;@EFwcR7|gvKw>ji<$$9Y8s(W>!&= z6084DMR%Zo_4<#-`8|m*p)w$H?W)y|vx5(hkFa42k>Htk-mq`qu0#3OL_pM;UOS3A z{?cN~{`%EHGwAG?V4iNS8ozYPlkY*UwwyK3&Zo+JV$blV8No??$(5tJIyK@cdr!!x zaXZn(H+5BNobM>E3<5o1SL0nkS2o2phr%l--d#~u+JcONS;(xRjwDIH*Y*dNf(;6g zzhy^88szY;q$DYl39RJcuyNpY0`ZN?-VrL)5qoZfI+K0BYgv0Ms#s5;+}M;bH)r;| zQbxttGIm4PQ(Nv)3DH~QvRv%|F?dDpFWUfZDD~x`hjI4GY(!*sau@M^kXoBWiJ;SZ zpC3O~YAE_GmL63Srgac{DCa73OLN61Pn!F7e|qNG^SsJG&(|r*tTOL@VdlHywW2mD zPkOK^*d{)m^ki-zUx_>kmK*~jv!$+iL)-#Q4kK>qrVmfxqq)a#yMt0BI(c)65IE!? zvE2Le#4Nb7nS;|5b3<`65B$j7%}O1A;8&^`&&?&-cI3x%^^W;h+KYdLCm5+q#j8#y zP80*nMuF+NzLoClX>iV(SzB@)1LeG1zo!_n$dk46TOIJiLOjQ-+uEtXhkzP&SW9Q&hE>JHopRYD* z{4E@wO4h#m2POi1M8=y=VQ|(VTV-}sC5Tg4>$3OU!LLnbiglNjqPe*Yn&qz-O_;8PS6(R#4RhXO=996T zw}Ux-QF`2lvHJT;>;#E^!$T3~E<1?Sab&ew^NELC;U#oQ!a$MvZANNJ*vPgG0W7~@ zaSeYctDZ_4%*CNWl)g7}RbHP<{#-eB!W%%=Fr||KW5t?`(#^!;yuX&%q+$IveM%>Z>LK{Tz5s!MpNp$EbA4s|0JTIT2c(B42%;0^~)wv=Kc5f zXMa}#HiBFU4r*SY;hD*5o~5997Ze(VhL3hN^G-%S6S!489OL_DN0mz8Mvb&`I5EKtMW4`bJ?tJo==>Z*x zj6@3_Gm*C|^0+ui^LEl`H&<@9$-U7F+o3|Sy(ePZ?V49BN1SRkZ}6e7v`4%Ue)^c7 znNwxh<&{bj>Bs%;pCS74E>6JrOs}^2PJgR!>_S=S2Vxf2&c0ZgI?`z3B%ZJT1xGR! zB=4V)+m{Tz+ImKwl)Qpr624V$oId?HS0l*2^i?T^8^`Om{PuIEtPL%ej1^MMQcWKl)ZpbdykEoTurBmgP zc%%z~>(85HYYV5mkxvd%ZmFry^H5(-7L3!SpbB$g-XZ#~YyN61*@n&p#-o=*Zla}D z6AQ0;rX?`CuH>4zE1Xc=YoAe+ofX9*ER#$3k^iM#5Xr|Y;VpC@^*u1M-x+TqsLLpH z6M+j!jwv0A-(X|3L_=nM*JGHQT*EHmmAR+b4ZC4RN_v*}2grJtMA9I8?ZVOb>!jJP zw-ZNusYp{Rr+Rw%#jL)7Q)ggH|>V z-rO#>KYk*Q1P*O0f$NTP&p2Xau*Uaq7ma~Yy8 z)8MXT*Mid6dT5vPJuSXyLUFC&X?x&Hm|1aR!r!Atvb7IH*tG~v zGbQNTX#~M^w@yS(Dg#Doc=Ta`MofF|2;|&z|L!`&O8#4>i9ruVyov%1%Pu%&oHZhz zWSUVgPrOibQCQD6P2J2wrCP5)3utmedh&{0`y*(V$x0_%=4e)bw=;Q*;7*>IAT{(@ z8)5KX*Hf(K-Df6QXi^B2uI@sa20OyoG}mNKGdP~Fr?OD;Ym2) zh>5#>@0i3PCa|@2DKGKE`u0-Q$p>c;O_*lla?Mk@@4R{+?p3}HLNN*DJTdvrX7AM?(~kMT zYYR-*xOF|@m1|XKpva}-K%>I)GLrYLreMe6M5Zd)(zEFbnbIkR`F2K9g!Li*m0W8t zK@v0IZ3G_6EeJQW#h~9eA0UxAXVb$o{&AKXK}ZE9V=XO7vAj}kwaRd(a9HwJ?0_2? zU?>vbv7Hfr6iZ*cG!ZLf^94W#rmv$P*7x`nikrX9){Qc98!Qlg7p145K`Mnaaw4uo zsLhdlTfWl^ zQ)*{Qy6N-MA+JOTykRD%fwakku3KV!@-Gd+Ov=_1LVtVjQhoA~F(s>AZk9@(QVmkX z{ZfB%8fpYj^Vh@!{;SN5H@TTFxX~TxQ)~NYCfeTcaOTyx9QC{^)l^7$HZ>3BU%p5B zdCloQhGz{~)O^f%RbZKh@-njQtYjlM2-dL)_2^}B$Y|Q4JWSeo{~r!dAc784`uhC7 z?BAXE-!COt$bx0r%h z0(wm?-={II`PR3ga-pQ*{GZI5f9h#jj}x^|SAs09FYp?ThV#oXlV1xf5P6xFZ7jfI z$DFRo#={_Cd=>`mTxD^97P+=ulbZb#09iw?gDztCxYwgEy>D9-AzSlXT2|&0AX{^= z_&7OYGQ4nrSMaB@AL=?dvHhkDDLSJpPlc54anpfFEUhtUzZs9Y*?J;!7oG%}MoM~8 zfx9l*voaZ)W2)#G{mOLBfTk@K{Mc^Gp9UiwM$Hp%n8Lj|(-J_m*m*1(o|r``8b!${ zL=SSQr7k1+$p5OCU4Y`&i$szp-TSZB(sd%+!@#SJ_SBM%qW)Q8;_H%UQyN|)Mc)ss z+$r2@UnP?uE7i(U586*o^h+@zAIqdV`u=+s0KXYCIUS^lm!nmK&&^mcovOxjUA5|Y z8$?l&@wbJ=lVK=ib;}I)+HZjfp~4r|@?8yX@6r>wH5-BvSDM8QHA>iLw>HTg*NTX} z!8tWdYHJn=^hI{4D#tJevfqHTmQM?{aYWsj)b@9`A~^+i<_f~5IhNh$VH9bIGdYPP zKfbS&C2)?rvHMMz)$JS5zvJKkk+PN2lK)z;lBfE;sDJqSyPxYyNFw;3uDx&5EyJ{nW+t(& z&j_)sv4z(&^0%!%ZNZ1c$!YpG510gIysj{nFu@qLcm z9mTrfgyY%V6sicDA;aOuPK+Dw0zt;u9bl3RsLB~1EUV^QN0v)(OA-7= z#d9JQH^PG2M8_JJeZDvvL@(^qYVS_pZYNtyBl2nu1?}6Eg5TuK=+hrm#lU;DI{L%# z@^a1W`u$K!z7u_1qx+OL%6-Zm?n`BhnzEZKYV$V58z6XE>Ku{mIg%p0dl@vfD;(-5 z>2(WArpd!nUpB}M?aR>XlMng{eebf3K%}@$LA<2Ju7e$xgzQpvLACqyM+^YUcS-!f z{UORzD_kFVWh(;}$zQ!S<#k5@%?hd+pI?z<0xLM>kcPW5hev@wBtSAl!(m=|pfY(` z`&7PHcEj{Mh$qBvQ(@tG?OwET4T@A#r+ti)F(i9DE?Xg=>^siXw^Wfqy7c_Y=aw1* z{B~*A$g69uDd>=;sUeY7hRGyC9Dr_Di48M3GlXtlnBsatXvf_BvS)F(cLf603`-It ziNys)5j-7?N0g3mQ=)epusMIS$}a(0gVmbE)dFj_^Ix124Jl4Ti)TpuTqoLNkDYF4 zqweH%yMBhj6aCilf|cWVfsrEAhQ#ZoDnT=2!|I--F#P0U9=@;Av`WqOFzHg|ty1V& z$&A*`?V03*1}m^}a0fuYwKN4XQ(VGPcMejQMycN(cCgDf#!u(4mr`8XQ-Ap{j^O{! zMj0*9l>!^fwkse$a&!KQb)E@DeMna~=eMB9`O9TeaNW0@FPSNaEJJ%CC+fcfimb&lnreZGsfi%aL|19C>QX4e?b4m8ROXRIQTlwCrOu{~C4j0W* z$6l57LT-#l+Ka36GCfbiVW98qut=w}kPoIrZF9!~Bx`CeAA&zk)hpW?y6B?GYg}7! z^3`PnHPyaNDruoI64y)b!zpPhQz>0aW#E)^)}Y-`yj~Ny{BeDhU-DBl+PXR;N@+Rv;0 zKpNlHNAVWddHuq7`Kro0#%(!#9k}n5L=xycb!&XC44g@`;=X$z+3EhWUUYX|LB`*9 z=s$SkzW{|SgNraf+vtR9_PkQt256{o%n}d1mfgtPohLk1U*9e~i|x8XF%v@ECXl;S zy|nmBvx3jXf{ZeVq7M3>6$7Ot$9w^n zK~OX1&(#6dXa;c9CY6@h*!n5Enc$n^CqJFY#u9u@)Nbs{McE( zB|hg$ta{fVu_Z~gWGe;z6tPYv$`A!DN3v2s227Q; z7T09^ocWv|1WiBIAn4Ti>LY7PUGQ zU*lo8i^$qZa(e4gE8vV97gOe?>*x#PWerOXLGOk|^K096Y(?{%t>G$Gtd_;H#!Uo` z?AYg>2E-mmWkwa_-v5Yo<#$@s0J~yW>v0uYY&wgO5^tv|$k@kXOAfdv&U;r>Wz5pN z+;l6x#~bg23rV(p3Z|KLO`o%NG^AhLPr~E%X3>a>m=Z_4!ZMhdj8gDWHSCn#h)}!R zQm1Bn(J1{e)T%~Ht|4~CsS{1qfJi5CvY%jT@y)I|UalM57x`}6tPgfHWm>x-iaGX@ zkyybD8o??pgBozDK!L1cdtYlQ9>*Vl8>YTB0lPS!ct7lHqhTRh;vM z<3{QIQm{4|kSuu<=s6LY5{yI=p99XG4*vPkF-j$~G8zF@jc9~d3t!Zi!WVAl4AXCnQVQN;E_7O3Y_P@_RyQ)?P9E$c zqC5Rg3Da((ZoOk=^Jf<%{>4D-BIDwDp`J#a>08T}q*5aPsyFM5v*pB1>Y zMiTh2wRwh;{z4zkkIJGAakt&ME1Lp)Ooayy-IMP?JHym6Sx+wdsWEbzko*~w0HFRv zbO02vD`eU!clw)7qGrur3R;iAd}{l#6Un-*~}$g30^jf#l}Da@6B!kE{RHw7NCOTTG|aV zHAE3kWg7s1t;F>Hl47c^bT%vG>1e%n!x`L$un%lp;v29{ z^ln&f(0naETBMBCCPwJyO-9;J0RP*N1+t(ocnTf9IO9?&t?Ve(*%=cM>dVFPxF!p( za+zk)e#!||-3Y_UqUxyJ1=cBJXn~7Q)qtkbJIy!#!W;YtZ@fxh+GLENV8Iy_l)Vr> zWhyEed(zq}e$;VheJ%vLi4`?SG}W5JfY@vo=9h)ERxt0|8UxBe3}~gh0RLf5fB%4} zGqx-h?FZrxv-6B&pMWO@*n4)ii$CoMHyS=+1FnMcfmUHS&rx?``mxbnXVY{yQ_$Lk zi<#Y=zN3@xbUlfe{RG3>{$*bd~-CwN!67*AA?CfxE$9Y1>(S+I5cdzh-ER8--9TPC)Jik)?34+ zhnDuSmftnE3sUEoOzDv!WAnC2C8P8=+j^eje~y*S!tAM8u*L9x7^@EJdbO+Ytj(_Y zrFv_@dZv|`@~9Bk2H)J}6LI^CI)WoiV9KN9@k&w;;z6E9#;Rd!|3m|D`d5NAB7+|- z<7>8N2-cgz@|@aeqsIozuw#)8jh*jcM9dYFvftgN0rzz)u)MsSWqH(2S}>$(>tNU5 zZT_EZ+uswfftUai#fDoJayZ1nJJ7*1o||D%CJ2`)=rW{9Ol=$UzQtjJs979S54qzo z4Y9y~DyA)QV%Cwi4iICeaWh_6-f=P%Gb_gG4x7(?DwV#<{Fzd4YvE&6E9y&k`7wrk7qyij5u+YOuJtXAN*g=x zM|b*&f1f3zg6pL-&j9TBQ;+}8&taaBkX_x(a^w>P{rRc<$8%_J-S1iY4@dVOANq@B z1+r)Z%md}=;?Dm=!1#~n01vhN#~=RrRsN^n{=a(YE&I!U_VxeKBm&o887=nc>>Q$` z$JQQ7A7FCKCu@AaG_bkF=w;Q_>1x7t@6plGsp;t4v$nR5OHAhnB$I~<)t~G{F3bPr z-Ur&>22d`w)cF-{iX`4vr_$>6b)a8Tj#A9%{UxG6D>ho#9V3R_886&=3h`Bzx`^L; z%oAw(&DivgI#(PgP$2fs**T}ff+(>43ZG8-8;icQn-wRRGUN+mW8;@>5C07WN(p2M zz@P;{X*|`p#TH#+dmx2*^0f&N#Wp`E{;c+O^R>Ob{m&~C@$vDzgp#7ls|NPUg=&w@BM$gSP>(C3$scmWil@sW^_3{}~dt<^5E0>bQC=vA!zIUr>( zxQUW}w5azyl5xer^5v4-V5|mna}XL^O6(wo)tb@&1RnSk=15x;dCZ}#_+uS{BQDiRz{)8 z%)WcCg*>)yIbJipw0b>7(%Y@Y5*Q8fp~)%Tgzqn`9-)%;#_=@sv>ETE56^98v76N9 zHtb6gFznCZ%6YcQ&G9@kU;Am0P4T~QRsVEGGB|ST8xE3K?)BqT$3|->LapMQw7Vo|^@b|}5EDnz`s3H#IVC0a{f=7C;{j>TYTt$wda+bM z!d@f8XXhSFoaSX67-v{v0b^=BGO4@DDEvxAnjpL~gEAc2UwQi17KON`U91}6y|fO_ zA-N#*P(lGv^UHhXIJr@*>a{vq`>LpjXMbXvQK!PIs`&0bp!jCNfzU5Sc&b^V$9|dF zqZD~IE^68H`mcegNl?ZJ120Qg2A0lTi{HTiijkv?8&;7}h7RT1S;7N`mAQ@R+|Q3X zq`TjffJFH41T(0eyRV3d$cBjT-e8Ba_#rU#q>BPP$%)o;P6SbbtxYwb{lz!IIFPhQ zP3do-&NFhC{D+mRJBu`+Io)e@%aVtabm@nxF3SVW(%pNSS9&tR{UIT8*)dqTO(|b9 z6rtmVrlwmDtMj{eZJ~|6_}VkzU34b=VEZ8H+C$U^U;%#h)sz{JrTx(0{!| z``e{Xb1kBeC@(+x$8xN+18{B>OjYfSoG&4ARgs6s?M#k80L5FL>9z?`smEcqT!Cl< zZ+$>?bxg_I5j%BucIY3I9~wfi^Z;b^+E1@_qCQ#wC6ue);^$Wern|&Z0%emA1Th&H z4Rqbjn>Qcegoa%LN1<9jyf+)?OQcix&*txRewU+)4h^xjwKY2~H{Ti6G7KjP2HRn% zB|Dn}gMJ0at0ZMede=6NLR}0u>uL4-04eu>DM!=w6%-9gng-u?{^nV>Cd9WO8?oufjxZ5wS`dd#{h3EQ}RXk8c z7q?N-FU8=Smg(NEDoyyb)`-91=!RtWKNx>=yf@q*mw})5-sA#gNDHph(J9Hx=ke*) zC>VIm$&c5(HSIakbUg;d;uEnB$Vd5^H~(UPE?#2fHA3sGi%&j=B}o8->2%7?KK@#A zj|(KmBXO>7y?rI!qWKz{n!wy}AKnVf{v@Ek@`bW8Y$FH7XHfq%^y4mpza~@##jHZOfxf zr@J$w(w{~;y7>x90WDr8-sPI7y}M9Nm|Nf?m2}!|)yLbxY2F*r)jF>O$15y= z&d|@^kAf~!eIy4ZCna_93ZlMIR&5JvHJ-c+wJR!$+rG@aoRpWFyNr{5m4WYn=s>Vm zmrlG$eAy~1Mfx@8>|o%3+w#BKw+t`2lSoOT5yZKgo~&^pDfO2Ys!n&TScl2^oBOn< zRwI5&-Q?u-VfHJ9d}X|z)bTI%vWQ=9ud9KfdnOkb%1qtegY@g|?JBxD++IQh%Dna& zVKGRZI=6KmU<;E0@a`%>QaY}c#Fbbs;eTTQP43S} zkYH~!G)=qDyj4S+l7Stk`QQx)&w2!?9y`5vS;GwOe;)7o{Lp;xYa7sFEG6{zC;J1O zsC)j4ksM3F4a$=ABhTj`)ZnZoPE_@youPo)`iY#ay(FLlG zHy^zV)*LI0jg~smmO4A4B{b?R7{~!~AwUwBou~ufF?KyxlPOzVGpZ z)=uioFxcz3%knvSlerFc?Hpp#toPrPQH-J*vUp72)p+&S1b3ZeEz2= z`qzLZP!q@?CT%KVNK;UQuK7m*MmZK`*!+%+(*uo>WP|}ZRJ@X}UIt2M8N=EPHt%}Q z_^l^lw-0Zf&i=*`xw>lO`70LH_3Hbc(Cy`G2tF~F3hXUSmm zpJg3q_WSFWP=$FL@W`u?e6 z+eQ>$EKZQRt`jZD?RANnM|#kEo7ZMo9y&(zJl^T#&?lige-BuBtnht14K2WN1g0Ez zVjX}1c>*!EquRydJ!L@40)O~T)r`kF#6RM5e1YbQx5m3bKq|?;XP0>gx+UZ^d!hDc zc=w2~2CK(x_9aINLC~!3x3k=Ml?2nMM?7`@jwa3TNysyifOKjv!4At-aN0{WOD}+s z*3)+#O+4vHV%_-}A-Hm&iq~=;rZ2CG!4G63I}^1G3>>YFj+YD*3V_Z^V>UM*yg{i& z_P%XNZCA+Vyw;~`CZPB2>X-U>iX2B?q5xaFa?6c}MT=k;xjI*{SwLJ7U27X-yBQc(? zzYZ>7KxkOA9|>6}L#_#Z*SNVaeNs~C(xCZP`_6Xru$IAb=40S|Qe;GE6xS?e;PvkA z0?Okr>l#CjxRvW3WY@hdPvkH5-0iJ)&>*=A;jT`cfGfjW9P799Q(i!Xp$5z|#YB4t6ODkvaj-g@wBLDfqZvyWi|A`R zt8e2R9P5LTXPH4}Sj>)P#&NU{P|yBu+&p=NDXVIo-^m+z>4cM)OaE%mazIa)6gTLMA$Mc=!=L>1heLL>R z6&_!{ToM?J=bc<{BgAhXYU@&?2BkcNyYF+oNV2(cga|Gz_2=-G22{00DW1)IZC+zO zzYVt6hzAZ@doa_5pIVg{s!+1dr|b}0EVM8=5!~?&I}g|wB2cM$@S3><=Bib+BRDDu40hAfiEY zq)XXwRWYY<+e_pSsCAO>il}4ex#?kW{b-5zoOMY36el=P_D2i zNYqM;2=F?zeU|GJRS$dt=V1dxqo_f^p!qdp#Tv&+^guTzNLKSJB{q&+dq_4y5rkFf z=MqMzQ!mZz{QzR_?=IN3{r2Dx0+Y_&2_72oh=I@cf+*0AAu{WTN{@F^0-W}XWh|@s z8Wr2Nzf9RKLfWhH4Yt%cXwvtvK`~k#{a~$Pf74%D2mLeU%xsCR;(cFFrnI!R`TWto z-87E|md%aPCh}29NGKCig70R-p=r?_?~ZC|9}#Z8HJD?fsYC6;7^z&nX4|9#Jz!q8 zrCmMSy#}ZTID+jTA>cpr!~y0&AWW4o-i;7A37P)p_YFgvzea9waH%jY4a4+hUA(i- zHSYmdC6g5|EC@RG+w?9_LYK6a1hl?>&T{YIyO4i`KT!%l@4b85@SpkXifV3CJuMZQ ziI|L13)QY2Di(1U1x$&EMA!aAq=-ZTFs!SCxkyek>C?3RC#IhuR*Z-QusOI1k?jc! zz%@cNA&?xj0d!LM{I73z1IHM$TAXl9)vMP|_Ez|T>Hg7=LN(g9zL5B^c`YAK+qtBD z`EuK|BPtQ_6?LmOGyP7iWtVrd!~E``2B;)O4{xCPc7^31m3LE z=pHZj?)h=Qvyk&*amqgMaX-(axfw-WUF_M7m+9gft;4*NwU}eb(FxWOm1ySw_{IYh ztsg@Xn^;p`{zyVlcmpXutCrl9X_P)2j zI;UrtM|ErTN39He z_B~{|^E)^Cg`4MC*PG{!$M*01j>^K={IpN=$#>lhXt`ffS9=fUB$n3aD&beT=HOBp$Hqx*xbps9n^QJ_(W z_Vee@8(&(Vv-vU)qbzzE@9|C2BvjcdpINZa9E~B}9d$~Lj4LI^MaqGMlKb(azHqRf z*UG|%ZrAc)j^5VMciQOa4NJ(&H%}cF5i(9THuELfNh_|!U<2>bbcaR&fu==4E zZ%J`6#sTinU4LKvuak@=qDW*Hy3g=?pO4p69hasRrqsCGbefOz23wLayju|UGL&+P zO9^b*_q6tS5t6ejYW$?$dRQuX%AJjNU6nq7SgZ zcECnaK4-@agTC5ZZ@UtrX=M`z>3nE4Z~g+}5!oIIFt|X6p)Aw?OnxQ};ud-{gaog+PX;lZhc%MHMx5<6w(4P zpP%-&edVkq*xH+_q;_$2nJsqBvYDq|J8$;L!g!3yG3{5;4~!6`IxFiIyOXW|eMZ2Q z9re;K_DkWz1V+~QZvbgR?XS4*hVhi`5QZ~ZV6Mx}^@`c8Dzr*?AND)Ix|l2ibA8x$ zJA@!sPjGyzd#j8e5b;1unU&_F4Q--##!l z##P8{04!u~qGBdu+^V98(ZC2(l-7QEcdca~KnF$&kT(;n`{aX@&PLagqHkzRQ)s@e zh6T=so?eCiLgQNbiz)e?qxGMHP1qea!g3nWy^K1$SCGa`djnw(K!!$8pfSi@G+(Iv zT)Q`26rD*}tAD1^WWQ0SYl}BajD_*Mcp>6{gvAzwpb5Zos})GeDo_TxT%!HmB~i)V|x(@pDKAUSk(Pp z>noze;-s|&5ozxjJpzpKe(k~lC~+ki-M>N50aQ5Pq1o_of$EQ}lEV6I==M)HV5=#%Qzc7LWJ*)ee|Nw&yW5cc1Fe)o8HYu8<^VmYGC51C^w&7H|!*Os@UlV zS29;U+cx%L^uML%67T^1%O^lyN<&LM4ea98j_L@3x~G{Z!jt;W$2;AEv{%@_8&+>4 z&d2-xcn+ry1A0=n^U!n;5vj%eF?OxPs?r<)4N+o1%t%+EV%u-_Ji`Y8uvNbLRbIZL zRnu~f2dmuze0HiXsUDcQ!CS7U&usU|bqu<@t}ys+E#Cagy2F6~=4wU9cfZO*hytfC z66nOVN?u-_Ua!MBIgX~3X>dh4Y90^Uo>{};s+o-?y20ShNOd)}?>xDpRc&D*{9@kw zqgpEugyJ`fa&tMe&k@)Sg=HwzbhL;cE;ugkb{Z<1_XA<8lG#87gq!|@7tEaxs`#s` zR_nI;rJPt%_4jsvNC6Y|KPl9Kpw#x8d4%jX+XEd?h%60|_Lc;fk*v(8!e;VblmTp( zn8oXISN>b6$;k}rQ)1(mq887^QuiNwY-JtwPSs1&471%psDRcWBo4NV zb1WUQJ%|#G&N6i3+Otd5uYUp4cwJAs>sO9SOVKk(caucx^QGH5H^zC?0j_wxgUQm; zvb!^uV*yCw_Umr^#m>DBOg8irnOj^~ZsAtZ<}yZlV*NRg`i@1sd$wp{AW%r(yGvix zus0N8-j`Op1MpM80PKgWSB0_hvm1?gsYmChyJr28hYEG5vE{XI;Gd9+)B!Y1pI2VL zZp1TXXvo#1*~Hd+t+pEJr-|+|cpW%0QkjcR!c9c^{Ut?6ZIy8$aBlI~eiue~BC%<9 z`lQ#aSsMEIOEDK{2L`DYty|S?xXV04O1Z{1Gx~LxzGjbgCE2dyN8i11FW3FH%L(&n zUV)8?pV7TQv3ba&1`B`YyLXp!N3A7nMdFNgS!#2tt&psEUVU%D@2uTcnTqF@BapF? zvcoWvb2nFCL@WD@fSOu)3#txkp+B<(&hv5*q>p#J ze{Z(=W3&DFv9{Hhrc%S;e`k`nE*ba}TpfkP5e08pTn3(Og!;m5HX076*_+!DNRn3KbMPH<^j45erVwe8FyH2oPEDL6}cWk&p#I4Vz^2%|L_-p+8;N%Q$1Gl zv?he_PpT95XZoIelsH`#f#N8xVqM1^ruJ4eyd3?`FA~?QtB4!q;LFEyETc(%<2C|R z{`w7Jc!07DNZXi{X!&Y0ebXKogrR3}EZmEUjh!=G%U}Nq3sIkkczSyJB%4_{6t1P( z8K#0eSf!0GE3bWk6G1epevmNsOKjdzJOqNT)Zw9FTHjMVFu(xnkC4w-eY9Zqn`w$z zgElI`4dkQg9}Q@37rQI%I!8rU|gMfU4B?qDXf>CU&4=VwQMMH&SCp!zVlzdA<~;Bs&JFE$)}bB zh3buZ&b=BBo@jXg91NvkI)!qjBW$U`Q!X!t%nb_ksh9vhE6wBQwyOc8oGEiYQ`Qw@ z92PzmVE^}2;ORFe$NF(Bj0wC~S(%auG0HD3?W6;j+Bz7joXe6@GIP2S9_ir5E0XCI z$!1;|&WK>)jVEeFx6wIkE3SV*dHufXu#yP;WnF`ika}(t+GKdPW7RT-U#oEMY1;rO z=is6u;L1vY@dhGdV7nJ_sQdbLHuxqBwsF;lUf~nmY9d?ia@i{<{dIsHiSyp5)vCs- zh3w}V_CD(X?h48M_saT>%bjdoa#(y!0LMmdZ2a)iVv4C*Q-EJw zJZ;I>Q{V>W2jE<=7K|YwA7>r{#AdD*KQg)nTMeKAVrUX`4LFa^x1v5{^Z#S&JiGlL%-Yo ze$&xI$ot$!w!D*mc>WWsI)?|i6H9=w6p$vYsP5e+Y@SZ-WS&1Rz`D|bp5h^bzI=e{ zMFl~BXWVkw?C19&Adv$uq#rS67~C92b+DT5i`W-_Wv(bq@7318mgq-JvE8ayhcx=s zBDFJ9nJC@82=t`;C{+W0NHh?TZvs(GI@-RPpc@9MlvQ*aQZJ7=0xqUvOV^r+sXiR+ z&h=b3(2d9&58Z!??0@YJWA9$V|eKJ@L+uWGZx_FlD2mGA1&F91BCno0=)5y z)&FAeE5oYXwzju`N~<8EG}4VCor;vC(%lV<25CV;q`N^x>24O?NOyO4cYbrJ`<%1g z`5_xgUmf4r8m9(kVm%rVA2?lHzau?t_IbdTtBRgV(RB>+5+#zcr!Bb@a(@MhD` z0lq)bZk4sQZt!yz6XJ`*s&?-;@>{kF4Ey!$0aj@wgp(K{>f1e;mn4;;DKSnCC7)c# zfU{fe6UZuaQ=Kj{&N%dRyu-0l>ruNS%1}j%f=wURn=DlE7!{-e4)y7zDEBC`2I}!E zQ1Qv-u*$?}7`qd=MbFj_^>RbZrO~laZw;QA9*>z+JgL}D8rD7Ce<_Wsi2W!30@5qD zC1i7foO1W5!qR^D%tfJ2au6GwO7S(4Mm3Ptsu3elskP~y;(e*0IK4jq97;= zJl9Sp#UEkiSitifJbSoLhcfj+i8ZsFCRSC4t~I7kcVsYx&f0U+fSv$2q$cDdl)lY*%dG zxTYRA;b3JI3g9^twx9Kk)at4u*nJD0!m#Z4ZT|5%wWPAaBtg?jACMb#+^!-ltnO4O z)<1vmGfev>0S1`ZM_lM3Rfo1P_P3FGaA;s)rJBdw&B~0^j0+TBrWPdF)e3Y(>emBu z5qWIZiaZjJJ^Yo*8mjeS<>&X;(%MHCprR?vs6caco7r)C)3JT%bU%^MM=|g(3YWE* zt!-_V0*J3($-PyDm^@d~Ag1(wOb zFJsbvFzV!#=;{gB=rP`L^8N=-JUgG>FMh>V>sha2-aCplYb|p=4!wfE)r?nTJPg5s zBf#xn?v#-UYd_zvrpc7gwp6`5zY|o)uZ1G@cKzvJ)zra88G@A#97;OJ_Uk=;RXnGf zw_qnwck!HOc?<#-pZ2YazpycPu3^xmsK6IaGNsll|erB{uY|` z`k8zsjOjjA@YJo)s4*Hey4GU7jNisfTDN|+V%Cj+XMWhWqjnH{-G}- zch>_`!dzBk{(To{R?3q@6cFc84PW&VnNWayz-^^gBIzY}(^nucvC^{9d zMa6F*F!6hDJr<2@=85Q^@@?{sOj8wtaU1o=g39@@-Vh`=P0k}w#a_OJ5~%#u=C5eG zpxV>r#N`}Sa+=zi>7%mwfN(%u=ho2Zox!>u+}$OVC}-;cfyiWQY7P2=w!BVJ=uhm$ zG~vxIeHp5y0D+aecXb|GmBZ2UfRE>Rg0OvOwqZ>rl^Od!mQ64ybrNk?9;V3YPLZ0e zzjae9#V?({+H%OlugK()?eaQdvD?CnetHT&3i@1>Wxj+*nX3Q02=3?>R>DgvWMb&1 zR^{+boK-xd@Xc2F@>Q!RMJWC-brresgS3lpk}h$1tpPURJxQ*V2K4nEDiPZX^yTfq zs2%2+FSz8Zs0eVD-^j2(%HG&zMM0W!+Bbcprbe)N)x_YkmC-H8`54odr$5Cx;Tf&T zc`+}XiJ57l^U*Vc%yr~eNc*s%Yfst5o@g4B*=1WQi`i_dufU1;w9Y zu|J=KgJ6fTxTt7g^l|sW)}%6g1FHyVa=Do7HLO5)n6l^XrO?EeUuKq39))WG1?cS+ zTTqECXICto$-`pF3o>;AXBvlsJRJ`W?z&a>d4M`(-&QqSy{6g{@N2Cg@PwO$v#b|p z)Ow3?(K?8m*14bVZS+gF-QKRDDAex^lckY|raGH~1|i_mgVGb7sTpcc+$+zIlza2c zK@ZBphi$yCxxO6arbo>dM1P5lo#0fW_ccw-LT!o9diW{Y0Q*3BWKafG@gq6;qB6&!h5umJ5?jT*S^=?) zijI|a_@+20i7Z8R(k?iG5^~!Yk>RzbMHzXr)3f#5@ac;sdTgZtMfPtUAgV3YqX1x6 zDPG`Eu~zW0RbJgWy!r??Yh%qz z_VLV9#9O=LSv1b>`hMQ$1^nznpzB3prWW32mUjJ-{__LSc{7yX+|_UTd>xRhK*PFP ze%rUJy&W@C@M5ZOa4ePkf<9#l5^aByq!| z`^C7BvME(baJWe@kEUV1j-DPgHA<0axB_xI^Zt0Sd>qhiY_nr)phagU;Y=6T^93=T zO5UpkhqY*rbxL*DOYwNtDiBxL^6N?%sHl4LHXqw{7OK~{^nDe59lu;i_dsFhI88es z^c!I%1+4qyu1&bp{^7Ibu{Ihlo z9sniSC`&iK&>(`j1HJ7 zw0r2`*B8s|@=dIxP}}F{2mO+wXRX}Iyww=O1ZodB(4GD16JG$wnSx@Gc~T~ArPJwCq+9PrB-tq_c>AZZc3Ri)n2nOYn3E4`88kZ^*Vi-&WNIKz}@OTeO$)}`E zv%|GPh$GPO1KI7A&t`*6TsCR%kVIxwAI zJ#pr26SJ`?R?}Cb3dh3QQp&=jYHjPX9ggF)p&ogyMxOh2Aor&w zHQ1=6C*wKKJh!3h?4p12Dc^hiJ`sbI2@T0Ku^n_z1n$jxOKV9?LF9Nr)4_y)XNd-A z2{`j8jr>}|QZly%e`kWGyuAF$_|xB827$2aJD>#l8c1x0h}{n0G`FQOEl`6I{+l8^fKsFVBxDXgBsXF#ZbagXM|mYb)V= zCiL(B*H4!ysCaDPT&{I75pmNCvcuU1?{A=0Vd_d9Pyf&9?e{&)>G>O$m)*zGXl$Q9 z<-fmgYCJc1^e7_j<_&V!Q*gF6=z16^&pbG=W(Vb$MNk=vH zD;rtS{$)gjr7@dwL<|g)^w1S-3}jE8N$49YDyrac5N$QgZEZ_Hc|oMc#Tg0NtaO`^ zBH*uXvo?SR^i#Juo7wCBwP6B}kWHi!{+`p0-n;>zF{N3=jeoPwcMTNX!XhT|9si=% z@z>kw`=0vohuaY;*azHIxN#`IBA9=BWB>dC9WA)n^Z6+L|Ki%s+`ulg-?aclz=}IM zu(DzQyCwhYE%yEKl)$usIm-J2&i}==W1)bVJvxAx)Bp3K{`RM$JaF-YTe^0C`-*>N z>_rs7qBmD3XH@>jSM=vcH_!wZ)1kI){co<71fF2LzoM@EpEvWjKlRkZ+{nYQ$$xf= z{{3qc5D-yIQ|;bc{dKq4pC9vI-1#R@FvwEMNLlf}*yBKp0JOYIZr@sV|0`4c&;Q1U zf&wnql(L0scl_;V{Sn#u9Kj6GM8XtW*UkUUudVrH{!~OhFS0^)5zoevS z^&`4LCyb1Zb!R^doD~wikr`3k+uz5{FWsU3`t@r;<(HSp99Y7o5LMhN)dH-{jEpa! zGZ{3F?j(@If4=kmM%`E*fCp@z!9)M&egEOpiR)1pyaZ$p(l9@Dema4>g@tQ}T0$%{ z-*fuzE@BWLBYr;;ou?LK)$jrdA@M;(&l8qGEW3Gkz>wgc9CX=R7bC@9$q< z+A`yG`UwV(EtAO!;`pKxvvCZ;WX0Fbt1`2eSS%X( zv8VWB#Z3M<%wmcz?Gbc(jo9jAt7vP(Oz|AHMXNFsET&T?puaYu-}c=05sDYtvn#y; zgZ+jaLU*WDXOj2PlfewR;!T~iV%tq6CjBk0LWfSALhbW_Jf-jqS-4$Gy8aAZ6zLSP zXvRc@-yet<@bV3(I{wmvJZ`V?{`T2YS^`9z6(x%I+2Cj%FerxPYrI{|kkxCC$SzR> z_rD>fUSYe5+|Oxia;IN_>UE9^yEMJI@X%hiLVlmHQ*0ZRVm~;}`vLt_!D4$qvKLu( z=e=DV>GU^W^QID($e)+S2I|eBxHLqkh|mIP=!bu7YU1OM%Yx$!hB0&<4mTiekqm^& z0b;)&6>>!27UGy|sO0tckE6Xy2|-xX(T{!eRdlpTDbGA9xaV1@`D5*H(~-RA3SUiQ zSyJR?c$9P1RQtbJ%rn-ck*y7+W7^tpLTxlTViXGXOkWJ`Ezt@kk#n#AI?*)+Xpt) zjfrw5>%r_d#UIngsi)E^Ews*vHb|2Y@&mhNPefTu5)zlU) zGK%qVI75+%O8rqz<;yYIOc|v14KxUb3HHmP&9k&F8}2k;EZ2$vxpbA$fo$brKOF9{ zEalQElkt+yEcKeH6Z-xfTK<2&nHzXs#{|m%&8uk&dAEJH(qK1)E7D@MKvy%m*&oMq zbuKJTFKI!5|ne8?fH!nF^zlCSP=CP$^=IldDmyV$5!gpi}P17-WYVhuW;g z6zI;61HMX>F_%owN=ogq5)x!lZT!=7XX$FEQxBF%dR0B!tue#>LgQhHsFdJNx4U*W zGjLgHQq}IkoaWD1OviMFvQ&)=jrDb-k+G;>`jYyAYsR2tpuwg&OSN$5VyEGbAf&^^ zpjs=%U^3%tG`C{_zIwHmC;o9eHC?I}_UNcjdoUHN{4`oAWD;oa$(O%gf1p~R6R?DLQT>;fh&qhm!lhAbpZZ;2^1WV_uMaP zZ=AOfx;|TLY4(qWlV8XNrV#Hpzc4nDr^(+lwWom(Vd1pfc#1_M`z)MV_9^gvC?)eM z&&y~C@r}-vs+~M1Zx>Y=;7TXr4CE#jZN6N8ivHsa%vJ`N8zU36|NHOpw;RB)wf83v z9z}RG#`xo~_LzQMWK)I89}kL8!zn$9Ms1yl!Q-IdqtpZwNVT zP0Y^cQ#uovT975kG`N@2NJ8W6L35GW(k((yj~1b;wVf|dV|ZMz;FOp0t}Ztia~XU> z$sWpOsaA9Wx9}=$j&uE$2k6?;V*)cgBz2odo8#~-ix_RWQ*>YC^yIP|OPlbxbF*9$t!Sf;Tzy}X>AvkN+IFkj88U; zg6!!6T`REr=$S0$zDP}EBf)}B{~_6v-4i5Ce~(q9{BZ;OwE;uH%~F$IF3kemV79jh zQTekab9<9g=ddU!cF1=j_S0aF?z_dcyg!mLE`tBNhm-=7%u~OcM#y#!RVXs>5hX`k zMBaEDo;D&Lh}#u+=#D!V~S3-V$6i zAODtsbC&0{S(}36)*vRESeTYh`{9`0T%#onKW(;?JORkxh+PQU3Y^4%PDP@arAiu5ED3yg(2$z(_gVAfGxQ=ki05FP*Y;37! zgCNt>yRA@z>6~oCzAqM_ZPseJi%*Ah3ZjpJ7Wjy>3xY!SD$HE=Xrzbk{gAkCEpeGq zI5n^`;MExB7pSB==9)w|FG%lijTTn)R(}eLQg~r;C^`PFMD@u|DK9%Fj4z6a^aUp( z z8LCk5Oz7omGjo`6mX(odU)gJS#y$q@$Oz1JyZ+R15ez+oiA;U4xiLw5HiwEu6(-ql z%cC_m>9&U#pVHX*TB{OMj19hhe~Z1VtUY3x1D!@4&H%d_C9hqSIqY5W7Q{{)Ntq18 zc9s)eUw^n{^y{ne0{1um!j#l?>AuWvI|BlnwQeP!i1RmFd$r+8$HYr~mO5jjhq9Hh zipobEF#2D0Os0ruB1Ec|Ka-4N{N{AhOQ8qsysjjVtmNvrP~Usbptv_nvERc~Kx=+$ z5*>Nci)_uNern_vN;FmQc3qXD;&Z1{KGcoIdaW{z=K;T|omO$>=jo*%CStLv+kx)t zjk?bQz70ijS+FSALiT_sPr4<~R2~FTXo_?yx~knqNo?-tJ;VqBfSJ)mxMM3}Q^&qR;#&=vMYLoZmFO_6Cb_ z>8BwiXXn`*kFV9{c8Ww=vaP2jYLj?%J6?ylFN8G3mCLDHZpf|TFlQJCS)9BKW?j7jYcJVK)~#f)w@R(!)&TGR$>?`GUya} zZ~sAOY;1DCNjO$L)MgGRhSRM2(a|X4JYK45^-yOB8M~Gho$)?Ax_Kyu7noQ?j|;X^ z4W36>L&8FS(wEPFJ~Ae1>+8(?daD5gt;G{zY7$DI1HGkX>l3I-s~kZi7Wuv)s!4#~ zOntXBsjOh(Ct!R^CdJY39`^o!+@SyRg?EyC=;au!U5Pcd93ySdiRpB*2* z_)&RS#Ui9XZ~Z-9z~!FiEGh<*UKg?`v*E-@k=Zmo-n24Oe&l`;&s-JTUHERap+u_~ zSm076p!hfmwJ{zES7J6wQI&JW9V=qDJ4eJFfk$GOPQ`#Tn~Iz7%Nmu*jVO#yjnM_) z4IyWb)EAFp($Bev6@+(y1xfGU z65UzDbB{%MF>%p+w8>dd&FOydS>PLbv*ry3VtG}hC&q&vSX(*Bc<;$3R3DY0RRl?u zibeMg#VbIn+u@frk-Ua;@4RfOi1&B4Q;=uKO=?VmVWC*~=b0*=%B4nzq!1hTQI65_ zfI_28fXg&bv1sIOp;vTv08mxWBg|My@i^0#K9IXtjodVhPI(B5!-_Uxohlap07B89 z+FkU*syH7Eg5{;J4Q6_-mU)b{c`B^yuScjPS*VPzB6aHALFQih*4u?`ZP{zB5C5Dq@(8e>k z>9fQ2uUUykFVaKE8bmCOvlR;k*%Ihqr-(h`9L0tgt(+(kH!jp_vg8!6AFy0(8!f_f zjn_B4wpy%v6F5+`TTu5j_mWhA$-)ygQPB(xkCh~fPN1(Lk_vF$f z>5UxGt1&imP#c?&eLl8tKftOeh(<3o(p9X>Ys*!wpubDKT%<^HmphU!miD*7JrTyN zAceQGJ$F3gKGMjfY4qPb4Ph(Oe1E2Q5M@zvme#A?L45m|<)%f8IP*1Vl!+Bkv(^ucMI}aH6Jabsbg@Uel;Q2Bxv{XQmyl!RZ)@#Z6F)CUq40?KFoa~tnl-IaVm7_?kreH6^OIh- zO6tNZEE+FeR>;Ktlw_n*Nb+fP!1=l?WGr1vh)C&5Z-Mf>*LJyHy*ty+Odk@%J5iu( zTqX{dtSRv&&X4FQr2&WNh2j_R@$aq64=?was4mqiLiIi()pAF)eq9)~YTH0p8`kDn z2Wrhi4vmmnQ{UJ2j<|9wZDWaemQJylXH70FW@B+uDV9BgO_T0J;HarlLjY0G@PabQ z!>j@tzzLyf`^LA?^KPQCPwD9n77Q+L+D;D&mr!UNjne~_f)$6XJ&Ysjf&%2J+clRd z;yAGYhK$ns*a`*Unc8kmtYTA{LnUl;5S8e^(MW%&V7*;GEFfz6vP9{2*J9aAAB+@> zdBNEWA^%6X2d46DTNoIsuB^^pxXjhUs-|V8x{5?{)0ONNoKj?_{J!wd-)h!lI~j9l zi_d=GT0b8ZoHKNGx~*FppMD2`OXk;Z6?)%W#ZSMPpysorvvjOf2UE_gJ}5Y zwEvI%-@oz#wRRY`?w#cRxVS!=C^)L<8#<))vSP6;lTOEz)-(lS?=um;gi=T{qdZtZ zbR$Cr-Dl7HvYdib$cHkOS<}UAU*gdwS;@%6$xsFQ8N_19at52I46+H$$1q!}M$E4* zP~?iQ52dJ8#pw>8J>s^F!%AIB^)D;1G~k(emooe1wcGn$0Mo_~NoThKJJ?Y#;M=jR z?(_&biA>`tSzsUD&+2_d`%YSKGJNTBli(H<)KLpJ{Ry~PvMi+{yPDbWS9B;bTjG1y zoaud01Ua@1jT)%LX7g^1@8%e|c>`R+QssDvy6$>-C_H|Y@(wiqRCGW!)MC0P%J)qk znO1-TElPVNm2~sL$`msmA+v*DG%jKw*wh9kH*>l%vI3EY+MCgEO&@JagPIj`d+1N- zeBX<2wO%HL{(r*J{|L0OE&h@ZP7%p8o-=0;hb1ldxmz*_fhl&~1WWC?HTj!e2~5K) zm}ov06}H^1t%fW(#m4&H0WC3d6pzv6)cn^5G0)U^@7gyPe=Ud>l*`8R)|BU=Ro*|SDR7#S}yV>KSmDo!+Okz|~FW8U8FG+u0K+!jG+oFbWMm|3o= zf=j3Lj1?p@Vo{{8%g>&0m}5uKYy(8p>46lA(N=bkj{owk{IN|6_WM8c8uWW#=_tO-+z*(5YZPdv6!V)$sF*f>%F z+fY`@x%{|IfSuzSfEC4H$oq#$`LT*c0qudfM#>w5ncnI5MoUyi$=%-XXfHDgtsI|gY^OaeDl`Cdu8KH`r|j*`|5gZJj^5y7-Of1jFgF@y`@TiKrv zsbE7ExX}{F8h$jFAG}%u$@IOn_Qq zUxiaIwdX6Y+wCvW>9us>r|9^E2tHP#ui)E5W7vn`Zc2oVhG5W<+AUBx?9D=)$!m6^HN!!5RjqamarQoEiq2ix5t-RTT$9X5OI zKAz#0Z2p|zy-;SW`lcz9EVobeAr+8D@!P5TbdYxY`{NZ;0?-pS1E7NHN81x>}O}>0g%#hCeC6tSTyYjZGWI ze<*A7Js~CP1-Pjlxk?A5DKyr&&wk`xj)}EkfNC%9+eEol!&8T}FZTOOf_J=t)**36 zAUWtvuQG@@?RLF-ufgVn{OI>h`aAB0L4hV?=YnUy{NNYR@%`(yF0V9>Hb$3%g$SO2 zhC^0Rxr9=I{1w9osCiIPnSsU(5&I_gd7xXt3Zw^2%07S+1f$)gjXaygFKf+bK^k;% zC!dX;tR3MG(BkKP{rc-Qc!?Q78!wYi>s?F=bRN)b8A=H{$?w5_pZ03sS3RH$Gpi5U zSexuE6j^>*;0REkFhTrJ59s!o?~`aGz1~VsQa*Rp`Bp_IP>!rOv<$0UPH?QZ;Dxnm zxQ0FZ4ioR+gJU)xdZwS~T(Y?d%l80H*9lUa4i6;E4#4JzL}*P_+Am!mPqV_`LKTi0 z0%M0(?R2G43^f;hCX>)X>+Pw@B2Z0C60;zDPNXt1%-H?|8vcHVjetPJ6Y}KxoQqNNM$fwn^tB{kY(uQeJ;R2s*ebsYGD?(K6nVz0-&S$0`2d z4*rmO66Gag1o1D%v0HV3ZMdlqgtk(Yij7i$#A1qJe~K-oP`jAcc!{YV5ZKdO>WGHf zsi0{lTJb1Qk(CCW7an@l@;N48B0Myx<>v$Vy+f!gowSX;d^u^gNb1x^mFj`EJ(Azx z_wY&m#mRi2rW&w=%_p24I~cr|2ce*S-0-baWUK{XvHkZB8AU_M#r@(oFIT_~g$OJ5 z%j(ZH`YZvx7iJpyoFepZ4i8i0vX$geCkyo@rAtf{Ytl!G;=*mJgMACSG;vu>7&+8^ z-Y-oi@q2%9pw8S6ePNTl3zm>vC!GP*mnB=Z5&pOye6L{y31u$#+0VlJUsu5>iEbbj z695fbQr0%WPAkmj8q0Ns!{0`?PR?%tx_eU-cGJ#Cb4*xa3JS!*a?eP`1R3$Z*0X`V z=jM(X)N^#P-_6SJjV^AWGm!Qg$$b{^O))@FlrE{mq6HtD=3zwIi4R2rBY65(l(|-( zzPs}+6*_ETJuGWRT)J)d_d)l2448EHS_})SwFh%oaxX8yL{vLaify%C8=y+^<9({v z@o*pf8j5%;utK_?wdzj0p7Fz(a+}UC5E8&{mW?ZEC&RA9<#yCEYdweHbmQFbshbXY+d~TGrsHc^;1^q-zz;lUXK+0E9 zzmuCY%OpL(`H_}RgAvZyV&mbZ3QWt7t}|}8LmKN(A(Ux5u(@F&0((*sT0B!xxWyI% z#}2UsE_RM@0X6$woO0G16(lWKDqwK*(NmodFip`3oxNY&%tj<|#M_aX@t*&oTgvS+ z{2hXZB=@0AHA1yzsuLJ;2?-L(I|R~X8htCM&v$eH- z)9H@r#e23wvOW?!pb&$~4&TmQ4djqC)1#U6$=PwdF{MeZA;BbhgdEG2*H`B*a$^U8 z3G4)WHogQE^B0nLLlWWDwj@e>!A~;ymuLCuNRRB`fIyjPh(Z}G&mPeZWX@J9E}#zs z0#ymooOTen450NHjVa0Xq^|sp?q~Wyu!LV$fzk(+bc$ba&k2}{5i`)QeRo5SF)rxa z56j$m0tY(To&t9-Y5;f%8>IPYKfr@hB`JV^?f||-a}0FG=yfNUssSC7LY;3Rd1AoC zJHQ6Q2%y;olOWL)iB3<6B}ssXI;!jkE}+n`pPYpCe6>H7A}q8&PsK+xva9?nH<&{B zVkH4;qb7;g1pNoz1P+u*9V7YLoo24*%HvyZw+S9{pu1k3E+Z|U;yG+WaHc@-&Kor- zYI&~^As+i;n6wy}IyVJ9h-1-A-CX@JOV?ytAI@bx6K30Z@mS+p<+GEI=k-A=r0CqTe}e5FeP=%(yU8D*pt%x3MS?$=?k{%AhXV)HR1&bP z%O7J%59X>*`;TdsTP|k3{}T7;614h>yKAtWeLQzKTsv(tRm%N(e6!)EMY)(!80z=` zRak_`!9?!4dR%P5XI4$vn2Lf#0oZX!_8iqh{PjBOj9g%%#o(S5m;q@%S_ayRukNB| z&Vhx&M)q11#PbK9t&=CJFInhLKTr)I>CNN$M}Ow;Z<7=g)Z02d@c!dt=Jgv?3>gu1k9S}wE(YetN{;jY3_OFCLWU-eegmHZKK zr%3V)dOv?@jz=-;^SJ+rBS7?orbPoAgij{@c$)}ZW+2Ui*#9bwR z!tFAs7HEs24f=hZ!)LD0`A_@@&4SUxEol;*WY1{>B7A!&t&HQH)yn){meNRg1z< z86;Ecc)WF!9PH+n%LVOr3NRM9eP+m`BXL zH~x)C0pY{h%B<%wZuIo(e7m#Au)pJEcfJ!ij*u9fCt4otvmTfsd*6ORA$c_bTvpa( zTSQ?05x)EZ&K)VkIWSjF5MQRrWyb(Lv~|oz9Sl3&#}9xtt9Ak70f`pc3F zm@xII1eOsC(i#q828a%bZ%a89TTf$I&Cn@C@ti;?@N&-uNT0l_i-%V1M@F+JpaHy^ zUVn;sN!K0T3yp)GW>`MZ=!4mr_Bz=wV7fO5(`^)E43vyd2MlQ%i8R?Be zglY`bilA8MXLBka5A(#H!SsBrI-ayd0WUo2Z2;;pxOCqE|r_T}MWHjxXfo^ETf?TCR?_(WR0-5_z0WI4yfY!7>mt#rWl&!k7HZ=})wqNp;#!6$r68 zSm)R_3>ZEBu*pAy;lYL`O}YZqKUno2z60D7Dq-h+&_4AADm5sCGZ8HIl-itB|pAJyagIw)J)@&$t=2_k1}J^90COyCr;m&z7mwM9#8WW%{TdkT zxsMI^x-RskZ2F5_y_(IE@eTldH4hxlL5;>{DOx`U=nusL=Ay%7s*+u)F+lYG5g37Q zk&t%%ED=IYIh&`(e+}wb6i9VixQ{WvFU1>-Ajh15QH&ZreSO^WC0iiZ4qZ#h3e$Qq zTA+)Wvch2$cOvik27mrgqm^^JQd@#^v&f_5*&8sFNCW2rjJp#|6m||}D~GnV4weCa zh${Z0Y!WbJIfRR)ShE<|9xf-!^h4+d3?ve@ymQXWNd2X-uU{|r&qCe-_?Su9>khUy47$VINzzHqUEh(%;-n@a#f^zOK@3e-IiTepq-J5S%B+`T}&-+^up@VC7fgI$KHR zjg4&s-mC*CNNhT6^rr>}H*_QfB%ZUjCv06;k4=I0m<@lnLrGR*8+c3C52yRfv9Pq5 z-Uz%{|3>B}vJv@@?J+v+KVhsV$){@*3^WF%qE_TAqv0G=AWa$9do2L;D?PE*gje2O z8M7@5U3&KyPl5$&_rvF8NtytHDta71%Kmq*VS^I^D5J*h)C#9c#CL$mi4k!G8I35boMWNuGI~w?Ku38sCvY`J{Tvmvf z!T`wZqXDZK1zrXDLW_S#0{1x7G)n=r@}#dtC_{W`^!7=S@d1tihjU1Ff|!Qcw(8`v zQ+GR-B#^65tx+;6N=v8S18E6}|2+YM%&#buaPI;(fK@5de$rq_d=1Ddj7+skkvQDU zt?-HOF<^YGw{{NBG1%O`yPWd5+StXCa+R>sLEcs2;slTlrrf*?{YCV0>nWn-#NODG z=5`i+A{5b}qFm9-wfHjCh%yml_;-sIn^ z@qkpK;d2!6sSb+ASj;z%-hM=OHEgkxRoF!>rOYez^;;-KEJ!YfyE62<6PCcQy=K=t zFYI{lyr^H-A@*}ioparo^a~cwBr?OdxU&HD1gKcI{+jkuKjgPlz@A88pms++A?d!h z$WitB?xlA2F?tSb(kd>1LI!4zr^tjOh)GI98mdB8`6i=JX?g}u$z)e9h*~tAk#V6C ziaq-Zn#kScubZD4>|iuTx_D!F{5zG9v+RFGi_F^3Z9y}dtdn+AS~=% z4RLPYYFSSd8-CBxK+Bz@#VosiRkQ7^F?9PDamyVp`jeUn*{ldNb+xGU{%WQ~G(t@d zTLZJRL+)oAMw&-L0nWsNATQ?Q0X>{uw`o8A&KXD*AqnrSsd-bo={$#cJz$2z&;uu3 zgw9$DoijYo;&B+vLdfe4BFXn?c98u9xvzs^`4tM7iBJf285BB7G`j`V!3=YNG?jF& z8aFGIEv07G5+OG9MpB?kHc)J0vjV7!0xPuNXU%xU`(>vEajFjxd}~?pX*NjP2Ee$0 zFdzS!K@gT?h2>cDK%%<`CfTyAJlqDLUFkLB+fJEzcL8AXY63HJI4Q0H9xrduFL2Qv z>y@|OZAyH1L|E0OH0OFB`-uS-1ZmPMQ+}=D$4q%&@ zczaoVl+gNB9<{_wYS07w>MF|+3`WsO_gwBSa~zFI?1A3?`!ujdgTXZ&&7XF0S6GNI zCySoLcB4#EJ~I7VcYi{10Y0%&ff+`l?o zR8i6z<=iSGF&SRezb$ztpQX@CtB^+&M_%`-P0gy48AcK37_fzr8~|~p*qIk!W4kOH z!)s+&>7DMKxL%*DpJymfUrIwtdWWaHm_CRI*JkjXN^}fn$QI_#hre~$nkY}|dF`&r zo;79OfTk&m!6Rtq=Z&e2AyJzn5`Sv_w%?Fj|70|nF?53^ zORCjl;uF~m`Bqtb*9R!;^-I7%=qt8vaxm;x(kZ;GA*~(GO#hdfjoX|6Usn)N@j=CY zRbz3;yO)+6fb2RAN+W{(ky}FXeZe%QNk190^uFC~!roL)UZBoe*M$b8c%Rr* zbp*-`$6P%9Lj0TC9KXi!or)JXR8m9_8@A3dq9eL9CsJlu(@?5<#fEE-y)+a8vW$3f z+WMcoMsNA(T743Nbw}N`Qv)_co80t>fz)lLL}0_gyz|w-rpF|j`!!7<_XXe1w1+nq zKH;Ul)jj8xmc{Km4St}npgjT?K)tsW&%E{qef!FtEt=d)Ert!6eZV0zJD(jq0f-o} z{iQ13B(TR_MMn8~>lh(_U*d_A(B-7X$hV*;An<$~FFivc=L0m0dJ0{g0Mva#l_ z7Rt+NYs~Rnt1k9s1@fGzmkmJp$je4pHUaVhCYtNCxCLV`557GE;Br@*68o+YLrZ-uQhF`-a+kvmjoIP)qmtfy8%sr8#&BcEpl9 zi!|+;$PWGMiaEdxvjD==GG8I5%(Lh0h+l&LD!)Ea@MoJUtc>?*;2k2i5!HePpv)ALxTgf#clgnbUa>AhauynYihzX1tMf z?F1BcI>z*o(B48vR;w{8irDnJ;_XlgE}kR1C2sz0R!n$`Ii5P36JG}CC#k|=?y&Wo z>Zc~!U^H3pFlJD%_cNKCOxigUY?4V{-7cAZy`#TVHxiZl=oSmHUm|-iiNbLU8XI|B z<_Lo-v;&oJ*HiH>*9hg46?<$IodoB-C;fmO$eXx|V>|tV*>O=jq2Hg^m+0~0 zc-Q6l9L0ajTh;u^-c9=`NksRfsNgjJohJ-c7avvQ(%pJ0m4{CpTQDiaB@`CA;p=t+ zX0G%`YZoZ#t1oR|L`_yHv&IfxPvhoPjaBX^Uf*i@CXwP;T3>U?6soO0a_>9q%LP0Z z_e0X(nR3c%gahl+>n~Y&UcOc^MqMr_Q#WjV6C_z-3k%e4ISBa<%vlqbSU{cUSqnkz zIZix)L?iNOB_N;sXgVm75majjyv*#v^wm-6QV%~S18ErL>w4(mt9K4S+flJp&F7Yo z-$Tutp_+?Cz|Pe@l3~nv&1t7u2z`dghb$*{xz%JTZi6mdsmr+3BTtokz`Jj}vf0;bYyK}FHKOI)h3|`ksx#Ll6*x#(;wA*yv^QH&ud+zjs z#2jctjg`8t&R&c0FWrC!2m=UvPT^*2I)3pu01Ab;cFxfBy-0kz$+>@dTqxfDptC=s zT}LatExdEq`!JBsVUt<@rWZ*=N8+_@YW&rlC`0U7oid>$_u`k4J)H*ueEsD9olk`D zqX#~CKK- zHD2ztA0?E-CRc<>iCU%;Nfy{RW=4`beXq}**eV>iT^Sj#748Rwj+`SLOwWh-(mxv= zE#B+^OkU}+zLFRi4V*mFDj>CI5Y+F!=M08msc2eXE6mm;*>sGYn7-;7dYblCS z*asb!X~$e^VPc>>8` zKn-NU;-$4L>e%m?Y~rMqEvIpr^u%_~{PCUyNTdK6s$wl{ADySBG>}K+Vo@4S7Cm*h z3GvP|fJ}@=G4U;Y_6MWcg?sJH`N4O z02|OOE7{gh6TSE)%_hZ{vg_t+h$jrOZhlL8jjavXUVpV|Qq~fgT~O)w&h<0`mF=VhPI#JdOMTI^EQ#@t8iDhM%61rrfX0J?G=8p0@31^PcmxvC)2tR*bBB^6$&loP!*^;L%8laPgN zfWlrn9+zgE{6eHf#u2iEljxw@GuQK@%_FR24hh3am{Ngdzl93xt=MOV7}*#BsT@m& z_$wpSk+S)fpy1oUL7${u9MPH0ibP%9H2`ES=t3ckwxHMKYyUc+pH&Ny=K2_!gjXJ> z&*zOVfdFh8DLp6Wqn_NJZYsQpwHB+Og8^9cXw_TcgC(h#)B*i~*P^wvz_2q-9GfD$ zKe)I4ZJ?}&UA{$8Ku#`#ySpje%3di4R7poUqBn$c)U4C?lTg&9*7rX1XER`r-2#ES zVka)msfHo_NL*gM1r7V=<$m_`y81aTx;~`G`dO3@z?qHN3Qq`3q;wbFO~^5Z)3cd2Pwn)UPX=LB(+Q*FJy%^ z3k84ubDZ?0U=Vy!SNYuM`S79mvS%hEJp`H730*WY^7}H**=F&D)>-{#lh$yTm1F)1 z!-IODQn;USR46uV+2*FQ&eviWFL_X*HeS#*zIt?OaNCC90uYcJ+oZ*0E|zWmyNWd; z7us)%a&U(&_%3_bRS(D2TbwS*dtH*IaX46YFwWatv&GI{J^I~Id}O$B$x^crBD!(C z2Fzx3J@+j4Q;)0sWnk}iPQ3~|ef5FDQ`nc!S#EIqK~&0L+EIA-l-HP$#Kf}Wd*iL`xTl?+p4pwCJIiW)%%jwi@ z!Wdq)o%4|;pw3{#b7}I{e9{RDloaeWX5N6QX~Kz$NE^n^1=fQsK8Bsqo$k)hGd!JO zs$!G584OoNlepR_xVJtjfqxR+?(X7;UUZi}fcq@%i*W*wvS%T5-ZM0Jw$ES8G>!$V z+7lr{`w$) zT?9Rk(B;5K5pTj+0ZXN&6S;`k#o!7wO@v23oLI*^47H*Hf+Z*-bO_qwGyjPB{W$?1wHERXMEny^Rq0^yr}^5%{dqDwnG(I||iVffo4 zimf*QHqvGzB{x8D5+!XV|kuk)O99y4uV;*CzsUqU4qsD>n{ z`OXUs%Fsy*D8gZeErc@>y2aDr+OP=xFM^V;aZ|c045Sv#3~^7PJL;L4uS8~Oyulk< zqg-hx-zu|{;j=eZ_^u>@exj=rW7naFYs77i3s!*ft(M1HB+_5^BL~kC zvE`Pn-Iv{V;XWvRR*86L-95>?7DEU=NQ8=ee-5)ja z;LnphmB%j^c&BCgvt^=S^1E)phUi36TgID-7Se#4q}*X4ozb5!;ww+AA^3c8129bN z{Gy9xjB@)kYyw|~x+U8D)}ufteuz%SqhsLB(&6|}Q3>f!lh&6zPrMpx+X1knk_6{w z!e@r7!sMz88@a6?y^nor20nVYlT8yok?iI}gFKs;Kb32-zg|68jvW-?*h~4!k$@8& zOZ!Wmqtlw)XH~e0EreYl&KQsYsQ3sN(!N=P z#21C&GDmqLwKeW~DD1-7uBn><)p%h0_%FGru#ri*wP5k2PB{;^C5Wol;&}-P4rEcx z<+0lK@7roBncE;A7E-T;cIjaA!E!Cs?sUwNIa)q1jz}JIOi`ZD(d5;%g6+)!u$SCa zxMLoWY`nw$^%AV z>mu$!0q_sV~92f3y6G}_MDo)qJx~Dsu<9? zwbDNGJDq|T2*xfQ;qo0b%olYNzcR_$WRPv(?!^UNFTNAqKq8vd#h)2*4t|g+Eh!Yo zs$F&tS_mxN>}@p;ec8=`O>Y)&VSHs|G5e4vg(A05ZTFWP*>g&DUd%Q9ZWRnatc?z7 z{!j*um^~i)dMoFSr*TiTvGp=u#&7OiM!qJA`ow1PgVPA0G5xh+%I@ybtS$} zWctHD=#*98M|zU(kWV#Y*6L#Xw+fObLV)(As=uIY_)g^g1MJi&S`Ouz0R{%!et^5q z+c%y*q90iS^x4RB1(QdQ>v{3lIBe@z^@3dy9%OXzav#%uU9*jKmrTv4x8q(kdG^T5 zim#`?*1|D+y059?Zqu#A3yYoftb#QA$Ikc6VnW_F^BVc26)b)=x_UPx)G8*bV8F|)0<$v_^J^KzQy+Fy5g(_oqX%4T#mh_;-J z*8ELx?yqf9W$FMeEFbkE53g7NeWbYM0Wh4w94+>WyGCkp3Q`qKnUJ6 zjwGihl#Wln(}b0Px68H)abe2#B%8(FLQ*zoDu*Z2)9Z*%rfh7RkSafOrY?*PLF+>lOuY61y0c!-8W_{*q8Gip zn(4b@ep-@po#h4VHC=3VaE{M?m0m82Xgwf7C0AVP|q%2Ct~fbpX=fVa;8pjDR!>unS{?yi^7r*AtJ8;6{TIB2SO! z=mS)H2}W6YL5kk8MstCzwe5Hua3`2~U$%6;T%{+*fS~@*-M8=gwU;M(skMvbVm^}O z5?SKfXPX9oZYbh@2&-tme<4P;S3~>szf7qj+G(kM2wsH`A9G_3;}@(fDGRBDhE)%E z=*13#9ES^So*hzCEj_g<(vtq9k#j&xDXiyO++>6vr8?JOd7|QEE3;AY>xE~EA8*DZ zwZbEyK|rywJWTpAWf^Gfb08^}j59Jpm)?~RtK4{NWOS$8@iQkD|4AO;l`H~I~4#g{tavz6$cJ<9PHxz>#Y&~ zpmM|(|>p*Bf0~t+ty(f z$A2aXlTPZDxadIz%(EwKBNjY>N?kIOTs*obQ@vMsa4qJ50g+qElDByL&gRajH%k>u z{1&(0s|ne!gy%eW>IduT>&FYK=<`HJy_f@I`S*Rw=Bf}T2kNkQ%I*gY=h5imZv{Bg z@#ADLYyKlWZgSFf-)2=5XA!!|h`6xZaA7LQ`YDNlfblcOJo{GwpXc*JvJOR$Sg82@ zLVl>l({N90bxY(PiE=x`BVr$Gf|?1qFp={hpnn9h4r~qTirfU!Wa2wFwzBG>rdOEy ziHb&rp4fPN*#4^hzTzx9#ByTm5fpyliI~Kn5|KZ?bAI|_ecQtz1=A)#)O}oo2Sbn{ zF8K<#)?b$%G0fJwMm;oUZ--m%_!A_I>750hjD1)uU(*$UbwWu?$UKG{K~jE zn?BM^>vBPvKms<$&fMS#hwKf=Wqb7DkNF~^ew|$If>NdaAIIs&%^LBGKO`+ay=GNr zE(6LCnN-`&zY|H1fj)uDX^)8eqq?jDfq_CxF?31>^-#CoKIUI01S`H6-_yoEfH(-< zxTltd_0|ZwuRE$;c(XTYJf7baZqC;}uH!J)SKX@M5q2`J8|tdu3-2n~OrLqZ=lR^L zgoD+Vr&$X0dQO??>f5l`S??vQ%2fTsLdMMpfv2gFstykELZUxDzkA_T=l8-3&>!;z zNezay!ULc-yhQCHP&>FZICn@p=LIx*Bq8GRmujcgtpC< zu1)cX_yu1#t-?F!hxQwNwUV!-%~FJJKC}bvtUf8#!ky^?_cLwk*@XWwZiLdwB#8rX z9Z;i{I518-K9F0(mo(&s)a9juWk43omv;~o|TQs!QHf8S^CR;_ILZxVZrGY62laBZO zw~S*8GsZ*S6`$yT&a1y$K{N1J*#U(S7s3|J;aZph$c)wJR86_;J!acC72iy) zd}BUb)P(bXyl7fvAI~faY{jEXmmXbVjXylrw}jzsxZyMbAmXxe&@p1LU={;z)@c4@ zs-C#2sHFMG4@lJAJVxmAZM9Rp#J-KNzLrls*qQ@zxt6nYoKY94ymib)9xXJr!*TQ2 zye{Pa8Q=6npejm;62lbF>fJnlPQoGeyyn6UdAe)f`<#T3x6;3Pej|>Hq+f;&m}@<= z4Zx&o2%_(S>>tGUj-gsr z(W>z=^}2z;(f)!`k&M`EE|pzkTc`0i%TiUAGis+MbI@`PQr`=5?^wHp)LU^_tnrHO zG=H{)YmzVs8n5#LGt58D$^x)O@=JVL2lSyF<4A04a+UZZi;pPmOx@?7}WJNmD-kD>wl7~37$oMgDy~tM(+KuN+$6u2MpDeZ)e>v+q_3C%b zk8+NW8h0v(5mx=cc9+j2t;P^yW_{TCkf?=!5{YN&?TQn4zb|ju?{fkG4q;G(8IqKWzFl+!}-Qr7Z)j@sv^ILUdDKHGIdAP)=b1i z#D3MemAlKWZ0B^aTX;rn}e`GS-7p^fw7Z zzf9gA{oeXV0F#%IAuteUJoPLmTpGa zJuaMFBgG~Ex-G_Mv3yvUg)BZyZ^ffDM#0C8I zMj`qnBhCGYdhNg6pa0jq_&;~rw}m9oOI>-YoGS>RV{86HoSyR!Jm;JX&gL#Rsm}GE zvtbBq+gnJGtY>Rv;!fyM?|Wo~SABI#l6L9*iKXDuYza#xe*gt z&lhw+0e25;Z{>e8=MV#3Id27gj5q5&Mmnb>)Han{=WnQxa@5uPn2x@pg=&+lEMgRhI&Eu`z1*7pf)C8+TS4caPUwcy%J|?ixAM2~ z(P2{SXrVz6hCgiIpmFd7yL0tfOwwy6rex8l7%B_Tf**cBF1EUFfjk=~)ZA+_Ox5!k-f6_<&r;&%aimH}Jz3REv`4sFEbt z`|>VFAfG@-m8s8O(dZbj_Y{Y^a;#l_GXL8X>%6pOBy&8+H2XQZ#}1&mD~6qx^4HKY z;8yBR3gp7A-g~2<+BnXjTT7XlgRIgPDsW$>3ricBZd4aAttB04bl`*v zXl>MJY8$>vt0ZF+qUjt_8q)9-e@&w+@^<9wJ)}9v9QaaR_ie3ybgaH}H(cVgZU-XtwlU{`~?&HQU zp4gpPrFH~OqkL6}QS|bJ-_Mw{leBx{d3Gj5(Xo##l}{&p=M|+&{qjC%jxhZkA2R9; zXpk~BgIFe5defjFCNHX!Dc^dHW+ed}~ea%Yj<(vVUlMTd5+sbUM$K z>lI`uKrpO0ce+=AM<94cz8jhmy*D*v{S4tPQ{V>!cF19* z=dCw^PT<~o!0y}iM=O^vkP^sA1T9q7`APBOGr7^gp76;9T^GXc^fr)!^3$jT>`Bpk zZy{{M^4jdhtHIhgVrW}YicaeLVifRewqW<9aEaOC4UUwU30dt)dTSfmM#zH ztXI96K?=OwMBu$wHWlet-}869M{@E-QUpwGBJ#2TXzzeLwcfB_G|I1mv%2+1+GhKX z@0sLUOvTKaVP1{DXr222TwPn$WMC|G;d z!qdqc@iaCol{k&NrVQB8aI_tzyrInqgr=+6ooW1&|ILs_y_mh=>i9s=X zGMtbgFokm~BlAyVS1LetN}FC`rbiE*2|hPnIkGlWT(j*sfPLy)C8i$|fxDH@-$uNB zv*lGz>nXf%tv-B9O*Vdi;d(YSL)c_ALd^j&A^%pj-n%{Zj@PW;^nek$5xz8*Y%Nl+j4f!PapukWwJFYaAW`C8yg;Ys$yYu zAcy?XykdeT*Sb|Z^*&f3FQF{D1FSAxUPj?kpnXv|=W0ue$>Uf=fSfsqM}sL~iUQmI zdivp)QjoPPQ@q7ly%EjT^5L$F?b0>UiKK}G2gMdjAJ1hs^KgS^+rg6MiyO%X_$brM z8Y1cI3(-G1*E+4G4YF3dgu%+TcQSOf*M%Sfl_{Jh>!+?X4fNHTopwd&O3W!=+TaIK zRhsJu9S6jppC7~mJ>A-8cLVc^h8{m3fLTaxnz4nP*EQ@aA zP|;daeA$z#KP-Xy1!y!C^D3GnlqT?@at=jT+)(*~JI_k8t5T zK1^B>BRfSuwL0AhhP+0I)1KJ+px(banFwMiY11|7Ree847?gxWmA5$!!N_6=j|lQd8Syc`orR6?V>i`el5`auTMf}+rsvIU>Tk}iJ)?G4g`8awV%@q4nA+3GI*@lCAg}dFc;#c(r8~7z| zk)`o&Dfu>G>{+b)io1uNH{4G)oh6^sUN=W}07zq0a`0)amZ#8vg`%1`G|3Ln+n&z^ zKTTi#$@7mOkW2y-ztmeD9t+KnoYq}h+9lS!;knky^i8cUOSYqulnVp;)}O|$9}Zy zi!T*r23AuM8kDyzOxt_!ThEp+;DJAQa5Z=^>3s|}#9bwO^J%u5A^YkV&L2m_Vprt{ z5~m1J>?$s=h6W?D$AQukofAAXuIv;YFy zt@%vOyHKbvNjfT7Mqx|PuZLb2d}|w6oC~2*^{ICXIsUw_7cSHZ>v{3A_r>9_*Kw@!!frqS^%=2Xm=PxPJN3 zJkn9FY$>o^f7PFP&z{$S+oa*RJfh3?bK^MtZ)L*UqU_USlB-|0ECEYE!qvNa)P0*~ z(xcBYM1kuWb7}i|#_WVM%#hNG!*$rzmr*ZzU?+B*#Omxf7zqvCgT*@ zo>r;tRw4GUj52^*c-3{FSwI#78%Hy%x>ymuj)4o=+`06?mN@DM0{s6zHLy+cIHv2= z(Z(?FV5$2v*eNszN^vDG?HP?J$wb6HNXjiruJZzu`^h}&t8Kw(Sx8=h?smmF4hwr~oL{H`0=TBN z-Qnoa#GHV4Mb!j@Wdf}AcmpF28{ zV!7~MICDMw{`DaxxtTU?%JJSh)bMl{v)M|KKyd%a%5)uLOkJoK`GQQqm> z=!F`$3Zo`<8#h{W96vu8eyJ>2TTI;WZIvf3fp4R7kReMXc^>?h6f)>yr7p&8tlW!D zc}cjKWIFE>6V11UkgOy$8jl8pXqf?Z@dBkoS)CKRhQ{_;w?EMbP$<48ukri#Q&Zo) ze|d$+M2F=MD~nZslb&t@Lf>5omo=3XL?#GmS$w+-wAVFrp-AA4o7Vcj=6(ds7atD} zF~rW)eZ~z*!t7`@KLV zf_#=<^XFfFH34$m7uAjw6>>QN4uiI^HX~JofLf0ELuS#R3@J0}%&VpD7WqPh8iJ;$ zRAWHYH#)lyLqhThDjO+f%u!&8cfR%nDH{2!W{vwFeslx-s?dzr9C3)rOC{7KRWl_( zXFS8&(8n4`lAZl`Mo6sQ3n;Q=je4mN+dZSHcB<>y<3}?OjNKF6EN)qw?E+Qr(S^2h z`JEx5N|cQ{*|Re!2qZ21Qe5}A%GT*F^0Zj^hIoYdG;UBrAs8T2{2EZ%HNQ5lt?SEk zAlAAeH=s374A3d`smicas|o|3>9R)9<@`Nf!0N70+qwWJWKv~HOqy>!p9xi-B@MwVE zZoSfAd)P;8QbWHf-*Wm!(m7e&Cvs0xoqhyTxHU*q?r_z*-k&59>x(|N`fF40cl=G( zM{BmjLrBTYzLn-xxMiuL_2FW7Al@WGPvz)K2;k;ieBKL#0L3>O&O1PMjVYx#Tc1aB zbq^Mo%m{$Z2h0!?wsyOsU28pBf>;>FvUC6@SaS+-*j4WQVioh2S_ zAJR}&c6XjkT3Jdc!SusH(_J{lVXU>7>_l!d8T)ttKi0nqOP!|8ZGg$Hf!!Jqiw0U# zMdFQ&HU`e0w5HCUD^4SI zqi-oWwZXI8Tmt0pVm)h&S`R{gGIMjR++VHS*u2o}{`4N-fY_+OqX)_K99O_$I@U)^ zex(>muVOnrzBZ*-fq}@%9?S;o)OE38bFkfs%*?MM?4NWov!SS)tqt96n z=Vt>FdiJCtwqiYGBm*TKZH-#OZK4Gn&vdQRSI5wvc89=r>RugSaY&vDn2Sghs@Ba{ z8O0wErZO3C>fVPXS4G|ijHp2mHTM2`RrTsX-%^Cads-x-RRAPQmqE5H@UN=WQI%p@ ziciW-d*Xm%!|;OiPa4Nk!!$zBXqBy>-GGoO$OHMAaC|J(MwMcqwOSqa3?PMvm@iA3 z)pQK1Hs$9_+efm>JRKON7gfz?b(;6?y{kxEL*CL$o5O_K9$Py77(hel;akl+#bjs7 z>~kH* z;BQ42ajjQaBO49uhUj;0zsww9_#DoCkum^NkBS7mK5jmjZHM ztnLHuum(y+sqr&6x>D)zQH_09qG~QMBy0x#>jiwFnLi@ld^@;h96PQck%^l+LCNs> z1T8)#NO_sb4l$z<)G>BDv`&R|OGnVR<;!lfV9`Khd0igfiO{dV5bkmOe(&(wuiJstF^5eb zT5gw-JZ{bXJgDi6q3GcfbrfDNz>`bD<;{7~!=ccS)jdBTJ$2LSZ{e0Y*+br6&ek){ z_U9CFXayk*4Q4R+zx2=GD`l#0z#XGZ%!3F?t|Ckf6OE)!pj1cJ@W~yS<7Rd96tbG; zz}Qu@GS~vR#DM@`>`s!If&=>22PH&A)v`nZ9@bW&c?1-Nf4~h{FZD;|pTtzA3o6MJ z93Q6N>FsS~|1(Sf-p4{#ni5T!lLcaWNsBD{HLch|eBIb;~9%KP61 zm~TB8jIND0kU+=7f!!)7>ekF{M>R3~Gp|t1v@Jv&|Jdz2m7g*QCO10)dbH=Fg3f@a zTkMQ2WUs;X;*!`dHv8=S-zV&wF9+r6*A)}K<816U5$=^=`HsDon%2iLDBHW)ax#El zGaA(VGyva}H>#`BdjUzdK&mWmUh(!q9CiE5>|@hmz2f?nMNG=cpvFfGCMp-eAW$jR zVE#BwaUZ*;CIm+sLOk4*swoRvBduX~Y{L}+FYu>GtdVDqV?9b33 zCh-kHccbtnqTI^VudXTGn0TIZKhxLGqUx4l%c0adb1A~2&Vidp6k-U7=mGV^-lb>1 z+hdHDn}n;(M^>2ehY`bA4#gSIQS5m*!FDSig%Lbg!RuOP4_S4jwYlrkgP%F#64x^KE_ z3VMk*S#)bjg2T84LzX_=uG@*?ygFIuJd}8<>Qfe%0eg&ffCME#=NcQRXWknsbhU?T zqwk|_ zP$kPB{IoBSAVycU6Sm?M$GhnLWqH!C>;Sk&XGhI*gAUE~B5Eng(AZ-UHt05e94$h$ zxvgFn*O>r2gr&}&_;e(}zrSeQeyh#8@w+6!_wA6}?CY)*o?fhv^=s2bF=?wwokI5} zZBTQ*7lL+l1L5|%W$oBr;d!%kE^W1N@li`@pi%P(@ID6(pdMX2iaaZ)yIa>iN+~H# z5IBBgz1!RiZ`eF@t`ha}c(+eVkMEy}ec0#9gxh1&0{?6zyrZuvCa@f=wa&DL?7gei zR@FDTaY(UVI!iw=av5ciYaoE_%uL}Gp_T3-Ot)KH->;j`yP&Ry172>&if6b5MV-eg zThE+|mRHvUKzAxTzQOX;80rDxpZ@B?1^0Y%_&TxyS*}ZoKmFq*0@n<8qZ>9WAGH>* z5wC#g$9taYX3(%acQ|rWA?%XD*kN|5SPn$+H+hY{E-P6hrVZ@pi!;f?713DJ1{6L; zL)@L}@)M)}Q(NjIg}0uO!xb#vl4jek`qIyXcPj+s<5g3bL6_^PRMJ%B;Pr~NjB~-P zf+36LKGRRw<;6B8WGUFT@T2#_?zP4br?+13RRwou=ww&ISFs}Ho0K1EdRY6>{47B} z-if&gDMWa=fF8G*o{ z{PG%MZdP_?kc0>tNQU6;p~#3Terhbi=4^CcfKe!*0I|0jwn4iuS6*H53(lO~(zvA( zhSh8!BPl$#;jU4QUYYdD;Z>f;c9zeA*_dI{YE)$!QEs*#h5q{Jsga=%rorT{WwY+0 zW&7pCQF^_hw$Z7(RNbTQX*rBu6MD1J5*jnoR4MVOE;y}?L$xnEWmXULu!<^%?WjR6 zR$S&XO)M_J3wh}aDuy0BRHX($9pV||HUN*n z%6_OLq9Ft#;?*XKJKp$fbHUx5A;-rV-}lc8>$fw=-kHm>ZE3POC4<>7k;t(;XjyOd zQlOiU%UX{fA6wpNrqSvm?@30N(ZI%S0+F2G+?nBrO#^{cs`G%>HC1Js*Y-06$>Vu& zQ>PhBX)U`no_Vy~uY8UxUjtKrSMn47wDMZ_(6Thhh*xfI$v4~^g^h_>SV`l*ipl6N za@m06KWTN*U=ix}H=e5GcF67UZ zF3y~!?W7TrS%A8^u_`njD#ZOmKqdV3R87sR0`bXe*Ou~9|NJn}@tfN2anSofuu+T- z>Q`xGwEsuydKCfsN;V0%*S9e|X+Dv6OjGjoTEGbpFyeZ=MsV=TRqC?)*bZZi*oD{7$3UI4tfV?Xhomc#l(62 zhsWxvv%5D6=ImMo8rr2NJL-lK`o#b&N6Je7cif8erJ$?a1qKEBI^P#Nk8_$-Nl0p@G73I}YY-~9YWw}S@Q zMaBO;qA`weXK#hs6@}ntLJsD6uKVomK#>B0(MPC!{N}^5Ij2GX*U38x0|3G4!2XaF zV=k4g!46u`6AYRQ*yEL1$o{ah?ha>vj}*ClhFMMg2OGDi{MwK9d5%r~V7`yBa}lI0AldXR-;DeB zun&f-#&yZsptR!?%8T=su6FqpuGD|%>L&PK(}od%%GAO7K7144rLd|aFII^wix)JI zX2aqZB~nYaKOl9`fR(}FjJ9tRuj#WMc6!@*JS-(2KoZTjG9#b7y(P?rA0B{XdhjsSguKgI5(YvsjW(4>Z#M;Xne;4zy9R;%}oiNaWE}mFozbQ1S zR7fVSCG!Pc--1lB2P*ko6By`w5OXSLd6lIu$9b$y@wr>37m))KP(f;~2MBD~U+-z4 z#^bemn7EEN-;&D>K!eJ>f5X};k&XupQKTM>j24ahxsg7pcx2;EU%J8>O<*Ha% zkP({Wm_W-=x|@ldSE+N}o|UlfR+??w1k?mQ$;5dxx=j+faepzccKXfI8aJ=5N|}rD z^3uUA0wdV4VXVl7?6EctBV?4!q5}E=su!_oV}yLfb9TuoB5-!x*|P941An0IZWoT$ z`foq#FRr|p!~4&YDdU97p@xrEY4hHfm5QPCFcfhLY6GuxRY0|Du}Zb=e_R8&lN;@o zJI9X-A@n1)QW9Y7;^h$(d=xOKqN zzjuojNkn%u3cePuJYBuM$S6?X4vXRQ&G~C$PD3&6w}Grjoe2PvL{@ILd9%@L3UlI* zDUL9oyYar_wokQUS~J8^2?%490`#WaXwN1v0BT_#aQwJIBxuWE6GCRut~G%fyn=`08E-40{&y``Zl}+DApO6xcA*y=?#e~TFm*+1`1^!rW(|k4qQ&0L=_WsNuYs6 z=$|t?; zFh@o@w4g3!x^1peGM^V)%2?t%h?0Wb3 zjAwhsunpCBTW&m|60EC)@lZmGNJ`n?1E!f2tx4HG<);HAr@)VaPoN}7C%psUH7(kd zDm5|DR^X&w?=x%v^f{2vun{}#6ICj$tl3+3fdus$Y(15@usyNlr#&s7L? z<9!~q=xQ^>Xq!F5ezd*?gnG5W6W<0E@Rff}1n?4Ovp{0TUU)QVD-LVqWTYY)z0y6h zx4$;pRa0Zb^K9JkF4~(}=(+J@6W-^>SGn8$?#l0%NO`u?BOYgB*)`l0V#)#14LSO# z`H1_c48^?{Ym1_6Di*V_#$LGM5Rm9I0RLM>bpaNox6_TdKLJmEViV|31nE$!-4}Yx zSm=Oc*>r3LlDM;!EiQ4B@mm1@i)KUnuGW7UB7m48Pl&d``QPxHFl-U!G1`h3w|-da zSfgqU&`5$ijXPT|+9M(G;ZF&tze_GHDc<%88hC&a;kOK+kdGTr@Xz0kxFOyE(bBTS z)c78EtI>m><&Gh{_o;5*TgpYfAcp_+!M{NOX2CO)ZeZDl>3Zgp-K-w~d`36uJ_YO3 zz1&h=S5}&zVAr9e+*SuqW@rlmZ{KugRJ&3a8wZloHqA$orJ5tawmQ_fu5%j9;9#6O z@$5X<@chUeV_-h1Vu-v5L&YLXJDTVM--$!}8>Y`NLaJO739y^h!qCC5)wf&MAl=pw z(uNTG#|nL2}sDZh0jPl++WmR9H4EgW=jpW_i5ztxwYHUy73dN`Evaw6LZo zvc*@2=ihoyOS_!_o9l5ce1fbw*$~fAha) ztG92&5#r6+YlSEjvNW-CyeszHWAyXJ3{@C&(ZRa^$FIPt$IrwV9S5IIT)t!w`;8*RF6-tq;OJw``YZ=xKTuQoL%+0gmi{v@+ z?pfaq`D!#hD>;wtL4=;dg9|Kh7M?l&=~H!XyLVpL;7KJClE{aZBFBKMHM1{{jYUQb z%Yd3mY;!U)`?B^u?k2;vEt!GudC2U#p*G<@C2YsC-7&We5z6Cay$OOQhP=r@VGaM2ogW< z4wo=185)bpuzJa%Svx?(kwby9nKe;T6tBkl4kh6(FC1<$3KKBFT>i1-OB{%qwpN4qn%xCRGhlZP=D;%+oy0l zRl%;~AHD#i%vmq2=O{N5#*WEI(=^u;5F3q znDrPztn}aL;{@caMpuj21^V`|0V)QMd;q{!(Br#lTAmDUE5vr(0l^eoIAtUN*2O-o;s`zyN)dZHITX3yv8wsOo7Qg)? zW5aa&h|Bfxb)~Z5idhg!91i;>2srZ?EEpKlxg{~-#H`UHbu^a%%RbmGi$?jnI0%?U z1QGr15tJ#r>OoubW}#0@WV{6s0BQqmVQ_gF5=gD`E7`Z^&h_e9?>mg5eNsj4s4KR)_e*WMI5pc8`b$&i) z7&vIeYXuR*F!;HVb9)})t~3BZ+bO}Q!0*Ykwh?Rr{r+1G0=E|+3bF&4_qy0daGLGp zaPc0PV}@i+7_g!5y4}$4L2n+vUf#3Q#SWSb9KBG#wMK~%5@B<;dMZGkUZ=z&#Z)QD zk{|zgqo^CcPkGuzgwpl}l-B2D5f)B)mc%U{j8T?38V#Tvr$|=#^1TAO&2xbatxirZfK*@^qTAm@=_=V%t(wQx0Ugmzzsrh7QMZ_T`c`o`5Zg8$=z{K zKuLODH`dGk>SknRwe9143Da4zpxrJ7A&2f6nOPG6i4(U6VvbrP&l~_+G=t=W{v|cR zhQ&h}80YaoBi;2DBlcVS`8d#YVAB*lh7~TJTB+Fu9&LnutcS5sP@I%S4^)JwKW_5g zqP4EwKy9QYxfIN#zKcu)+{B4R3&@o0e zz{LWpKU+PUI!HW(0$;Vh{g8mYc*O(6ywVNe3E2LKmfS((UzIdD#ULx$x~&?D zynC$k{T0LKqU_E~fImAi*L3V~y$t)%of4Q}}zDkCMnWGGI50dEUYcUXPH%xOX9nu&JXa21Rb#8;eS}Qc^ZlOZs1RKg8#`)1Aa`6r5 zj?_#jyD`FQeijO`A+ZNpzy2$!x{WI#Y2$pY?{JQDmGaAaWL9Ob+wyZ01=L0XPfW-R#T&r~cjIY(+ zU}H;-C3{5Eu(c>~Obx~y5wGk?fQLiMP% zI{>OyH~-lbrbSw{&_4K5PP;#TuRMVxp~4kpP~sV^gz=T$L^dXCFmvJOcw@%*5gK6j z%M1|hu^R~2i2aavWI=-$w5s&h!Pe*h{BK=L%=R>MPutGA7Q?IyRcEu+|6JB7svaxz zt#Egfb13_-HJ!rnAbcbM?>!vq}aZaNV^tQ!sSMKU9u5Q)f&dyuJ z_yY3;U!C}2lI>5CMkesqWDwTEe4t{jox-aO>=V@Xthccx@4Y=5#y>wFf3wJ}IyP5j zGexSh&{ul7{z;>@&-6{;SkjFsk1owUPajiDZmSWpTb99}OLE13=<$Q^o9F*1RlM+% z-D(1RXQ~k5C(YNrD-kx*! zK6iiT_x<7X1SH8?bItkI@s1I~=x$?Iv#E5UTdZ)|Sn6H=!o&rfuwe*iSJi6Ddsd}y z9;$=Kr1mr5Z@V7zCkAAg^im%tZ!N0}ob|-DH4QAc6|qUO#nI5N_vBRYg(56&CN0H~ z#kD9(EM*mbLVQ`GwA4^{sUVRp=sEYuYz2Gt2`;6ZSX>IQ*q_~-UWOzkh5Wnnkq_h=le@(K`0IaZH zR+D$#Q=;SnFk$y2C zbYu6WrLcI0Wl|``B5^9+!sW0(<@H4uW6hyHiO+5r2$`O6;LV+WKm_+-czBL;orf&Ayhs-mz?eCVa z3bG66bwLCYoO>V0P9%Q+m?>XALR&oXwFD63H&<WM5?>k*1XD;l` z5saVEQC(J+`12w?d(s5AhIScf{3wpj=8*$GdSOq;qUu;l*yF_TZ=H zQYPoe6}Jj;ZCe6-0u#4OhsJmq@WbTZuhbK~2 z5?gS=o?;g34O2rp(ntUkNoG$!Du3n%iQ%x$C>DLm!z>gyjR55-(EwALuPq7> zJbmd)Jj-_%5nLT|#-FV1%rIRF=m`qV$jl|cJ)31iF{$&#Mu_J7?N-iE#G7;%6%$1Y zP8w$gTEzYFK)tE~%oM3zDZ=jUfSd}XuOCJr;s%rz4X?P+zbRkS5mdbwv~?KG?F~{P zR}tvj8}jonVe*TO#2D%D-jw`pO$OHDuqS)RQv7#D1YiX){7mSVh|uV?9|E$6<2Y+} z>bKPD6sESmTUMI9M${~6A83t;m-DHNHJ%{~FY#n`<;JkR1Jk;_u2$kehW!qrG&8Ai zF7p}8z>7D!^dWcg^%FOx?p>*7PKIV43SLwPkNXgytM2%4Q{CE* z0k2^pxHObHE3sC0qzT3dQ|YYLnkgN%rOaAD(5(SQyy&Wo;yf3xULgf+&K7k%`}caB zDv!t3uvrHn$hJ7Pcq!$y^Yw|0-I>C~_L|)}dfgU-d1ISMk#yFe6{iY4jG?g4LaXV; z1@C^J_lYJ;-aVd$+~uvh_!P3BJ;rw&L%Q8I??yp-V|SjQOHgk^?O7dgE-E%#!#rIT z6NC55k&D68+O%hGc;$70bbIdUf6R-cEVyaUSjEKv_%5Y zrzykrY#)xF;LF=@XGJ;6x7ssEIM?*KE{~;m71fN=44U0NXmXr~S70xZe=iZ=wP2Z= z(Am|t0~~_?!znu6PO41=H4nmK@X>HBLOAWJ!5Zb7WY1Bb16t2@{>N-ZRc8<@@(Cq_3T3OL|f~+T|_|O z#dfX6^iMjD2Vc*=QC08G(>Ss4JY_AGvBG@s5S!QA%ss=>Hnne8YmKL^Bi~twQbequ ztLxyM(4*?=$ETmxN|S$Ap)~gy6ld+ic`+D<=|IC}sH6%G>vt9wpWWV1mk?J<6i444hhzKqUn5&?Y0gJg zSqE?B>3PW(KNPY&K&52FjIHB`*$^&obVCjGwV;N5Q55-k+K#b;vU&L3v3E$9xwQb(GsYrVvv$B|x!iMyaD9}gKC8MB`l~~dBF#uC4N@1B*)2LOH_xKZJJnkw#iV9%Vf;RV}S>njd-H1DZxz0GTOM@$_SW zY0%{n;fN!;a(3$x(OOj<3s96>~2O2w^JfLJl5H;O*4D(Vn+LJ?VOCh3n!Cv6&t(MUGSWt44z|C7!&yJ=Wh8#asTu^WH%5A z1q=U-40OuNfHsS%AT0$ebmsHrw@OAxOeqflrt^?EnN^Ln6k!O0e18iHBtN6PHagt4 zB0M%D#rB&dohXuUOU*xg&VSkq@_CXGY_RsN(Ls)CdKLLWui)GK0v_EzuLr^bz|NC_ z?T3FlF6l`?BTaQAk>0jE*#d}5e%0ELW`;>Z1Auy?If?#L9Q4Ke&JZz0{czoAfK`q< zN=5fIRQlha@a_Q+=)=a>sdzdl?4beDHOG5!Bf8%#623z=5=N3#{K%ni3D4ED z*buiaXgn_)@zM&^8g|&o;{osGtPYPuloY_4JVeyr9x zz3hs>yM*H(ybd!A9S@J#-+sk+D@sy(vCe&C-b=8yF{;x|DQ{%5=c=)+_NeIqG>3g?^*;?jt{jiETx>9}u=tq0`jX03%GIXP96w!~Q`jCVIOSkNIWyju<{uFi2nuIR58=*u;r>)l_+?I*^+y0-($o00nw-l0XE9 z#uS_&)2aG^Dbc=U7nu2w9iTDiN|6iO@-$hgS*xe)xE^aA?ojpcs<|%7m5JzVF7*S@ zc777f~W0w&RrS^z|7-b!X(oBZrw#MMeeROP+tvV9};=obTVbu!2S!x_1gYtFp8%@nozbs_&pF-BA5b_;j4I+ zfCUn{{tYAgLEFm3>qS7Gu>8vjf{VZlz(0R|qiy5LDp}4$)U4T44*lXp#Ted})w{de zw7&*$N!iC~Y#sy@erj_4lV;XG?uG_g2SV8BJGZhUoc&UOvHg-!`HAgf??2CT(o+f= zaj^Y#&(+sgUVVmu3}$I^POTm?^w~|V07+d3D9kblZHq#Kz>v`q0gq(y`Cb|PFi9{JXfLT} zEVs7>E-KW``u-koBC&8wHxfV%&A?FAOJ z@T^%B8(jhz6q^InCcL19(bjeAwKZLU6X@ld9X)x$2r#mQUD4&BlqP-Qja@QSy0QRn zzKdFO0|*mx0XX>m3p8-p5UDs%8d7n+M4<-+tWAX7Rxc?{3zIw)Z;cK}i#h;2d+O-y z@-EOgkk7a^mZ{E4(nes_5H!d)@~Z%rXC>eP1SkIE>$x<;S|4|vak1^O$E0{dBxLnv z5?d?eAzsmV5e%LVVL9pCNOL^j-d-l&DD`X!XE{OO%~RwbzLuPLP)~i*Y7A8D_nrrr zh(vNA)N;082mZP+O)ADw)xWRaAM5y!S4kR>YnqU8_xX6mt^_efaEwC`hnh+T4Smrh zozv9P-h42IB~$TCb3ogwR5--%Y4r__chZp&pAxju!3jzx-_n{k{h4%qrC$E>1l%w9$a~J$k&_ zlcbWwjp<7j;>~7Ox3EaH&{qh)xlA%CA1M3)mG=exL(@8yIM?P-2DG^;&=@c-o0tkR zq@=sx1Ef&vElEn2#SsAnlU=mPO8)aRKnw4KqBAcb zQ|6xV!i5Y!oel}{@6vIz31d2`#y1;j;L$R^_Zx?9xfK#-Yy{A}fKP9b;E(bvq|5#tCjjnpZ_q{}V znnRwT5wPM>4Ky9Lv|+S_rLd)81dP3C*fe%{Y9Evff7UgCO5hh#^OZ5ovz8t++o{Oj!rX~Qn4OwH|0ClKAzxx$+^%Bqcwy|AfAen9&e7lqjVLu9D z@)TUSua7GZ^)&ZpKB=^ZjJ(+enXODySp`@AP7O0ieG&^u)oeGs#7M%}Q_jF5P$H*= zfs&gEf!7O!$qE+~rv^dFI0h$&Q_KQ&x)%+bH5GGiNS~H2x@OqQcJ0-LOIfcQoaQ^J zSV{bw=W4E6SImA)10*j~YZJZ)XkkP%xKyUCE}3SKKbRVLV)%5$w6Qqg*&c!ULSul( zH!YJumF_hVR?A(2oqBc##bxh&2uLTDStoiRa~r zJ`s!F&o6$|?k%yW1Wo9aT3eJgVz)t2J_cO%0M-Nv6@`CW2G)!_J>(!Z$6ld=YRN->k8-3ZQBb0 zV|gN`W91IU_I)AQbyy(rYL)Qx`w6F|%=8#wijh(UU6uJH$@vovFqZ*_JXAbA@4HUz z$4zhU0M&T)dQlpK{zewzW;QNSAgAym*oe1~>@%QkD@B9psQLik6%Da{`! zk77v%+2+|mKvYQr;8ZTDzAsDLdM(HU?d&li)MXR)F(M?TzQ^NU37D;=_m)#df{W__ zLU$pQ&Uh((ioB8p9|>O{x-g=+Pr zaxlK*8s)!GnQ>EH{Vf3iqwe?w7%8R@q?Ypt>a_Iw0Cs3$HcLK;QaUk1Wtww-y_*Y)=uFKbQ z{9|^ zCB^bRYn;9Gh1Y6B_b<#E&L~CGowcdb*GV6z23+V%ddAw+?~2j zN7}%WTV#;QP$mg+3^_z=9FWeE5Vs*ziIzu6QK!RvZTZEeOP`kVg@Is$BB0#fzP?@R zXCN_|6@1?oiQ(rls(bdLJAW>V%Yh#Qrc92@;j*ZL#Gap`eFk%RtbhEyo}b|~DcExj zCv_P&s8%^nL;d^rD$|&AIwLJ(YT4$ORu3(~WoCet9$1i$caDg1$#qfWK-7b2Qyoo; zaD6@^*e?Awb}jwen*_-Qy-{!!&US9ALgUco4qZMnrZy>^qk!xE;RStKXeJDy(Gusg zx40#8um1QrB}E7*p;mriMhd*fd_ccEbG3e{sik34Sd&8)m&WwV37*vlIgk6qks+VI za8*ffjxT&1^O#Am=+-a}3WSowCp8aGlJF%{=T{(1i>_$IjN9&PBt7h1pU2|IRS2&9 z?T0L{49mb=!#;j19oqA7mu8T3iy8C6+*W#0g5qnX#Ub3(7rDQ}y`8VJEqrc$2DuB| z(v6c!7Uc+XgfIwNuVQi*T<{LC9{_`v6Nzlx2f}a{ww!Qt7k`>*J2Eh#bc$7_QA_`8 zGNqVC01?dUa{&N!F+kBhgt=%l;HP6muzWTneI%-|1Wyfz1MnDlw$W~eS{-ocW0;Iq zvg#mjg5jpY;X#sMEwHT@^u0boiV)U$&x&IF(;;h&9t+8~=iNO>gunGaV3!3eGKF*f z%PocFWCH>S78;B2`A4wI{)%op9~Zr0L~kxfuY>VpUp*IKBT!qSh(m>xwD{?TM-1+W zn;Ey5YJWDM&7lMR)BcnkT)-$$c12e*TXx0?8K2ffEstRY0^C7FV&>Vooda6p_4d5p z*tJ-u3!O8mrz^LfgF6(Qm1gO{0VdRZ{7WVp&C@#2kWKVmloiw4Y~zGLy2Ye zl91Mn`h2!i>|Ak)TOiFCZRr%pTc<45IVeuC2b4Q2;4bo*MuF~FU`pj^Mpoq$jnncl zD)51pwl%w^x|db`jQ?@oRw-y&HK3Ym)1cpnn-nueGHK%Ujea~Wk6z% ze>rgWSsctu^t4EXG7q@lr+uDb!3gnWTUm;!Zvi=2tam1J0Rjcjf_G4rMl85+=oguW z#KVs`i?=8nJq0TFCq{GZog9N9NhC$gt^Loz_8Z<(j4_dvci4@xAuT=`a2#xHfX`-> zB~tHf;5+?;88dHR6_s6}_Q(xozfRoG-Ij4BJ%t%}Lu0F5#c1xv+}JAEt^|Ss)B zKd}j$pz#>E>-TtD?CvNNG}jfi>u0=)^63RJHmcIa%Jbb@f0RcepM+a-xPN1%uH-uU zcmj|qC%;-PSM9Os7+lyDW0wWbvCbdzF1vr>@}dlfUbtzC!mp?AIQiLwMzRyXoYG$m zFew~My(bB?Cah_2z>)cP8&jhfQ=XA_P16((|-FHCxUG{!SZx=)-^apX-Z;PxD;VUt0 z!dAL2heJvgp|4@u6BoVLTUhy@Xn(3fg9?p z7;Eez2N}_;q}t=gOcs*59MTEVre+csHJNyi{)2IAa?~RX56vBSL#VPQr*VsCEg+Jy9#Q2SnL^J?M&tdW2R$V37rwpfrEfVHJi-h-NyEdZJIARJ)Gf1 zxK=fu*Bvs}nIlP#*4Gp7LGCQ;&3A4BTdH*YB(k-I$Xh5p{f6i4T;`-B$eXQ`c5}+c z3scxdvms50v{)PU9iW`mGqnKmSdK$r-lnq=EQyXPEVBUS8vW(@u~?7^2BK!t@3uLZ zdTvj~ALQ+XWH52m6 z^}Y`T5r5h4AE02E)nqpQiO65%9Da`1le)tYco~@Xts4Q9XBR&%E!xf}Ow49*PbqL# zKhY3VKFo~v7ezE~hS3dT(^Wo>kRoBdogpU1{+#Z@7=I1Kcy=j>&qpC2rklgc${aP1 zD_mxwc()C*@%(yFqB*!sD(cZ7B!QUBS+>>A7{0cJWJ)0#%YFx;(Z@wd=4-YAAZA&% zSJ8M0KLQKc2Gd(ypm$h6`>~wMNzhoW8Pqcp`dEpx#TKZuh}Eq{6zf)8N_R2iFh309 zI`1AIM3|E<&Q0`3AzstNabbAHZGnmsri+SA?el$5ceryuaElw;XMB3)rYHxVY(~*S&=z~Q4>Zs!W;pw zG|dYKKq z6y*gf80lhenDXgR3r${B<8?rZkQU8lEUk4|5YxUcxCXS{1(YQS&Vmt$fhD~US#v=; z(7=z0SqX-~@V;og@$wS%yz5m9EfBdj)a7%KwU%~7^RRBHGixR`m0I*J?$q|F0#pEfKnE`?)IDRe8jE|Nc8wm$M!=^EiLa;&fY~AZ81Vdk?B3$}g-Vu6#8qD~8*NJ5-z^nh z2W_dxG2mq(?{MwEZ&UOD?7`O1T4(6dBK>iansf$aaH;eM zpzx3&e6Y12F<$++1li*UX$Yp={bI(U5B2IClBx1Rpiq}i@=rE(RzNh*J(^+qXK{b# zuH^hx3X4q@^+_0X656R>;h2g^pDsciDl_k?uj!mU*q6-f6ac#Gp;VNtoOhSQBkR^k zQa>Qad}YrMB3-V^^6ToLe%Dps?F{%8LfFu0RucPbck8z)Bu*kh7w_6bgnhPzsPSYi zryk5#;8tvYeiN+GXpp9~vv1$wJf}gN#m3MSa1Th;1znrA+hx3toqa&oo+k8@$kp;R z5vuaELt8+3>tSIs{b~X&hu(tl)kLR7Zm%7C90?Vr)ch`}d5XkJ6Tl+v9XbABfaW?AaS0oTBe8Ht|Fo;I4 z>cXDkMm63kLvImu!&ti%rcx<6tGc(~T8gNDH0B&RlT=oID)p*N_S?l;7rq(>aYF}? z$+1BIrT9~9_cM6o9;gK&irHwEMgUym;>Gg=YhZq{E9B#rHclgdh?+9%^?H)j^FqXn zZ24^v+$VYiz5x+TA#Zn{kuE3@zi{#bHI)$Lg6uTnlTTWjQ>qoQIM;Bj$S~dw)M^}3 z0tf_${cWL3m)k-?P3F!9otQ^4!8Y7NZKA#QoZo8wqyRdASzR~ZNNPmYaq)XlB#G&& zxXpz5m*8Mlij+Y;jU1N!m$mw3)h->+c>PJbq2ls>CuLVQCwfdfp_@w?VWQw8l3V5MQ z)0IpHCZSL7!P;)E0Y%9Rb35_SnS3`u^1sMi;AG(R!-xkVP z-ZR5x*X8;5S_-=<1I0?r6JQFw{4;CA76}s|@2y`1)wZb@lJ0_&6QLwzj=9gG4u`%g zkSVfP)#0c5WApi~bG#*ThR^B;VSeem{ISHOS2ti}kfk_q#)M8#u}5VcDzvpr2qyXU zSa71+;p?R6>ycSz>pQS9UNfvpi?=|p6Hd^4hq=g7_<`Qc0=4|&!0qW~P>X-wh)`LK zL9AP7lX8v+nweV0Mzm@2r?@IweHI7$Z-<$Y9<|?7KhSxTxRfEEsT$$za%+qI3rPcb zinn4ZL#yDCw2U!@<@>u|5gB>Giro77pKx7(#wmoY59!*cDmS|;S_7cXF`x%LtZg1m zQj_)JGZ4T6&IVL&B!D{|wu0SPvIU*+6Ai793x=ctOWK}mWuQ$HGu05pYMstxFtAhk zR@QIf^4-9p;HUfUB*o;ZHj>LH&2PTz_bC|{j>JO`2o?>|Gs!7n?0jkn_&y#FrWCR^ z(%hHynl9eh5o&T!&qmQmsu!$zN|nD&0;7vHJSnmv>z*>M_))k@BtQYmay%#DDt-$I zK4|*Ao^@#${_|OL(%KZ>oJ8P5Kz)UKKfD8^Dfx&?bA>niKI^u!i#h|@+gQ*Y<~H7{ zoUIDXy!q{Oot7I|N~%AXw^H3~Xj zb0d)NSn3VAtk1OYKN}&nVL=ya)Cj@W&6W=c5pKRobBeAnv`qBb%dA*$We@4B7H(o^ z&RW{49J&Tb1(pH9mZFwnfDiO^zs#-0n9kciP@{NG&#e`UF1Z*bm@j9=%vn^ig}SbT zw$u0StM!4%!NkIqbcgB4Y457A~j82 zWA=&4$qOLs6B-L7xz~l&qreFY>?I^`PmyXP5Eo?HW++6f?K8lGkYs^NQd*Zm`=r>w zR*Ww%qq`)w>>y5oRzJgKrJ$J(CZ6_7Nb~I3omajUcgg=aj{o+`kDFQLZFhcsJ&@-v zD7)9i*9fvZJK&F=9D#vLs!(091no{P1Y?F|4=FLFbq9If=xCAE*i71{I+vVOqH#mu z@FMiQx)H)^Ft-V|3p>69&_n>G=vBm^DaDB9uL7k_e!Vod=De!)*Hl(F$@xI#bp}Ml zXEn^tL7$hOw+fULFNl$9+GSIuS`Vq03pwM09Tzk42zU$i07d0!{YapeHVn|{9ieWZ z5}Eo&nfm8JHb2aOhwAj-(Z8%#Lb5~RpV9G2T0Uf@X`wU~aq+ z$Uq;0K5zY^UXb)R5G93CjhG7u^wIRGrAh`wL#@Gsi*F~1O#u1a#p{%beAT6zZ7{)a zIq3r|mdN@O|A39%Kchln$WY|Fx5(-#I^fP+v8d$*kd>qy^*S{-FoTr%o42yM&Gk@s z4Ce2YfEK~9(<}PH_%>(jLH92d&S-lW1njG#oV_euNY>XZ$sD#C#NsC?Rhu$umyL}# z%PFH18B@k#`pU9WQud2xJf};i@}Tpb&T&tyO;;YkN`_r30gwj%4SF{@GMl`bpbVY- zrdNAD1ptFU*I@>D5x4X%mN4gRKVXf20%X0YG$rD7N#cqkTC`MvI!WACW7Kfo+*_{O z&5uRl<4~R5IyppO*_)orNb(7vsX)yf3p>Vl-7O*?Z7-H3U0OQTeV4kVKcVCypFVji z)~e+;Er;XaU3}o&JO@JF;C$X6ZS259XA$eNoee zw|PEExh~;D{ST}8YxwuOzrCSA2I>u!)r#(E5<$yBuSm^H&~Aga%vpKUtg7Bn@VgS= zN3+(k5P`AyjSGOrs1@6~7<3^Z7`mo5WNDT+Vpodm6Qx;GJ*^;RF1FEd>ZiX?TA*;g z94MUzIlcTe8~3ulN(SxxC75j{t3KO55fPWyZSPP@YUmoor9f&SaI5&46eBC2m~5BM zfl^Bm=yf=Swbn#QriRLGWb*V}dC45?S1#fJsC zq;Z`{9&Udsh{-xiZ-SnN1`eI|x4vtjzrmnD9WcDv?U}T#g1z`hERyu(_=5Bv2_Hoq zm6||p+9ywi7X+qS-TuCGly#exORsWVbKv~HS;*giWM(3ZmTr(?IN#ZPo$HSZpX7Nm z^1vd=N53PUlAc?pU;6uVBZcx1Qh`g->*1e{)RF9hiVislR$l}tv>}^4{v6IEDMik9{>h^> znSc4@U$;sU?};0{dO13LJWg{Dkt4@0**~)w$~U$cAs>ML`E{NCRs?WIqVMk<{w22l zmz$NeIB(va=e4zECYHJHDjQ^Ln{mY;HvShypGyiz6++ntpk#&*aiG9hc|-YtQ`` zj{?MR8ldc!d41zgfZM-)(_g(eshxy|;<00)|EpTkUp@rTmP3F|pmA>RKm5sW>;3P4 zqGC#Z{A6TwSvb{yanZ~$ki;QQc>ixV_b1Qp1`jy9ZSA^1c=%tp)Zh8>Et{l~VE+s8 z|7OQtBvnS|a-Q7%FE08%l$k5eQkv;6Qr17Y&EI!q>l+e25C<>3{9j!3XUORbN_@%G ze|5(H>fwTwH6fuhqutquPyZJetp?0#dQA7RzuL5aeZzj-SAoH7uZplf{$E^lCfFP5 ziVJ^AsP`Z46|Age^chg-`oH7*w>J0x9pArwCjRgE{(o5M|2w{acd`D@`~H8pmiy3* zsFC2m@APMsCjlsyZ{lDrMH>JfNCTjmHJbxz(gBd&*o*4W*g6ux`=j^4b8n`yaZ7f* z0X$oaS2cx%0Q4d@M$QFu)}H)$5IpPE`_SF!iM1v>yPINPQfWzi*W z#TXCxuf&f>UoX>lLpN?L^`M=F?S0iXl2J1*H`w#K=)7`w?TK7xOw^J9?M z8@mqqc%qWMMT$IA0!^P&@!!8FkSmwA?GT2hkA7UqV6uhn9yfP)=*W3oJ?UKwIDV#O zgIDspm+Fk@YeCb-e#Z~*cFVzU%CkwCi&~slNKbC?awKSHxAtWlp!*Q8wqncv-eU8C zK7i{S4pF<3m@X!QZg<~>B5hM|bFFNZ-59d59N_g77;fVoccMl;$!jtlwXu>3f!pF2 z>!0x)i+}8#95h0h#`15vWog=q4cU{oR)WZ=ka()d`|m|Yl9wS>T+1>}EwgufIi_%3 zwOn_DLb!esQCAV^)XnDOQK4ORPyG_%d8Zn@iCU2T9KDKOuZ7 zkz(7~FuGyF7>=jCJ4^N+puB`?3_M<8aEn$QG&G|j53{so(9G3?@(J418eh$QaGa9; zkUpsI7NcmIC1Fyomlb?>SG~K%^Y}Mkv65yU9_?+dnh^8MsEfe(?OqBh{>b67TA2>Z zsZTPp)Ya6Mk7dzDI=pu0`7A3|w!wTU>q>JLe3rnY%P}=%PA;DbRzYpAsbJ)9u7U&Q z!?=z7i%!c!_IxzVVp|I-595VJVimLCd=eanwnx$sUlGiyh^8$Gr+jzjj7QojvYYmm zy}ds`Hz7)#HQticDO6f?d!*7>W>b)}2XK!T9D;84<@=ye&~eMKV9blKS!OzyJv^Y> zpU_o3`3MCFrRK5^6CP(#u^$qh@_RK^wKO=N-Gk z&D+$$8W;^%FLfO$>WY2<`1Sh12+$b6 zsYa{42Z$IKNfD#RIQsz2-?K$G?vNpP0f2|IW>VB?=1dT4_5=sAucTizOk~cSXJ?Gk z`l%MoKCj#{s)8`kb7J~qAgz_pZsRSjwM_kfP+Fu}W@AsP^ld)%ZrPsbZWbHOV;0RG zWM~5>QL53UqcPx|MsO2XBImBX2*|tWW76uid`!K>wj?np+!GKw#(cMQA+ymVYo>O;3G~||knX0g>G0Cl2 zJ?eycnRT{lTE>=qBU*4KN$DLAzN?+K-Q?~-MOf9;b+U$pUR60)K98>@jlq!@VD0dq z<||eA8Qpd&p70p+M@8&E1(|+VXjNFW0+{vbJsMMQf+_ijU5H2S)0=~Vemt=KeZYT7 zP?eY2R?hNagDz`VsIj1SyPtP_J+U3Q>{0EecumcE_wQRH$ehi7fy*(uxdU^Z+%+B290`HzRi)cG-^pw zh)05C&s@C?$xLcINm;&0w! zILjJ|zKzAu+eA}7!i-vMC+Q7)#^^8;y+(QU@~X?_fs0a&UGJ4v9;f3$p|E2=OoE@D z{D8qn%WPB`dfU+RajtgNlUKueGL%krLZ zMq>9i-JDVy#euBrW89DC#3*HPLLX^9H%+zS=$Djai|(Wv)l{|G4sV=Ik@K(`(5#*a ztcH3JCnOFi4S_)2!7y_9=}p8z*ff*8WF=BT`YRdD84dEAsw(vyAJk4?qmX*5QYj@x ze&*h3)ic*vPO5!wOcF`0xv?AHRnczjid_vs2amDm;XL)W+S{X}dAg#teSNF8akp;? zKv6nXnj@gc_Ap==>${`6wTfmnBNBBB&DtzGe@q_PRsd{*c_(WiVpN%>w!4Uv7exCm zc@LC1%|tYDj_`CC=Bw~-WGP~07Go8LZB$(ne77vLm-ODlb%_qcj>FZSVKbfa>c^-WUh=C z$tfiW-qemaOS_*et(MwK_=Ht;R!l6SpsalljHBY#>L@GV!%vy%ct83gvMr*TrxJa{ z*0A_#W}KjVy*0a5(z6e2+Vy67H?Y&|Q1;onFVDllFmDCYOUOx7zs#pWc6e2z#Jk1) zIv7)$cS^87Z;RZ7NAtMV2z?C&Gifz!O#nIK}8FrA}Y7CryjsrgiBo10^&cHTt%9s?Z=W#;$}$7stat5imJK8`$S7-dt=(+24je|qa&b>mj$UuioK{Pf zHwnsftuG@SJH_7?v05!BTP-0ge(+rZDfa%{2<~WLt!~vlVdtesWu78TicvTEnGkGt z)x&qx-YPa*MA_6OCBO5xqkGwxHr2WbJ`7wT#p@|culOpfRUTE z6DO_Ai#F*oGS3_RM(VyaJQe&?vC=WFI=U@TTc`?MQ$wgH)k&&7}%ZLa`mN|;_2325HQN$So%@g z1=!r8GFB5i71`m`3;5j6O5U@0lvp6#he_D$!vgY5M)kp$AnvDGU8^TL#dV#eQgrTN&7U)E2* zyh65%L&je9Uh=)WQOhzizB(e*>k4PK7lZ6H;q&AU=l&JK`}e}#Kb;&ioXj>uxuK-& z<4uNXZ}rdjjoE4~HwW9Jxhcl-29X3W01bU!OqMBdEBml)j-$I>qvyjR&E_tmYwigO zJT}X((k#Jl<=pv!(`F?IO+=8T3T8GfuQgE#rcgH988NCs+wu1Db7X6r0()qsua3-s zjm|~&`2{M<$ZFgvbusNMq9qDfY(6}qq2{uUq)Vi0cV$np+*t*v|2bYAm^M4P6}w?> z_LKXG;~S6ny2O-BEXs0-?jfU8i##P}q2ou&OIP>NHYo9fde&Rn+cHOH_imMAc+y{E z-x^;XYkZd2BW88j4e~e@p6nV6FW=(#=0|QW%z_K%dNL~y#W>|mBb}ESZ&s}`noXxE zuFDr`+GuG_biIbIp5Kl}Incpvdq1US4lS+H_RpuQr-6aL@t`veM<0B=6^-GTstQ|` z+7j$9+w^v*)g0S&AS{a2`0`;2rkn6D0yaITrgZpbLr3k3Pz$KCTJ;~U@AS8@&Wi&L zaFszDkCO+8()dbeDvx*%yQ*#Fuw?LWnm=cCywb)~J!edR-e-yLX+!;9e9A~xPEQ)x4k|3%51aGqXotkNcy*K^ZF z$aN*^;ob&M`dKSW_aD1v=qd3;RWU2^Bh%)|D#0DW*YL($ zs)yO1FXOXwRG;rDHDc0P0M5u%J1a&x@kazzzJM>rC0fuYTN#Q2AVGBl8<$yP35a(3 zvhXj>`Rr_1mEzup#$T;(+7s)`ZHVR0IPesQmCbh;nl~*;OnC934W1L9C5N1=(=Tw0 zs5nZsqq#;-#3&^wgKdhmDEt&D&jKxZ+Ilc=5X) zI0g^HS$R{&y{ZMbefAK;)7UV+vB&MY`Q1n76Fojj#T~e{icu#Xzs3uCcC5uBOCL4%U@11~$X3R4f^&HB z9c4~qQ*%z0I0;p3=_#d`gFwY(^R_N8kFg>qZ`UNp=WC@62)A?ticL3zW;#MKz$F8!mNj$^69>7gN1BfBH`|Y`llMsL9r>EfTh^N$7|5w#Nz;1y zQArqlih<0)I!Js6L$ z347_UWglv^X%!2P z+tBw~(jY_Z*0zLitV-=s%T2q+*8cU`L7v$T4)kaJ=9l>;j9uQ_#lD>@Fzp*&1k4(D_#5yptd)Wp#h^m24nN*a^&j8s@>-!M89rD2mCZ0~S z3BZK^b-dP8(STpV*}kD(Sf!!a8^2N9AIPV%oD=1>zjFfr__#M~)nJkY+`K3I)v#t3 zba|-UcB*s2S6t_!`^L*;N6p>B>-SfQlhTR_KA(~qawVA8?CA2DB&N`kRU`w z!xQu#RjDtZ@c87^E^fZ#X4UTay;ssAR@O)2ljCPw4d}iPJS!k0*pl4A+#MThYdazH0L+e4_v z5HCuE=f{J{e9=k;fMV!}r`<*9vRC_(<2M!oNS`IVPQYLul_RI@DN5Bd*YbYU^9Ih~ zw)S4NHtiCAK`aS=?^Ic9X-$#N;W2_|3CyaewF5`%W0p7DKrI{nx^J~&aKEtgsEgIW z(;G%5It;oQDpKj6a15pbb}EBnq4?n%o8=^|*EDUSz?3FKmaFYseQctxzAyrxK5DS0 z(Uwrl{JnDHDbV66ka)J^-oeT3(ECKlb1U1HTQTdUm7u`^?+1p++Z=OPiNr0ts@2+7 zj=@}5!&dr3f?gFi+*YgR%BS$cEqYOdF4n4?W%J{DU1XX>c4skmd(7XfLmeW!bGBPD zcQlkCI0L@S(cEa(a<=gZ;~emWu7tu__Rn~cYD}T88EVcD!-kn~Z^oDk8ID)YIpMb$ z($+J4Y|*9q0HtQqj2)&wQ{M-rQ`&?t6+i#9urgGDvIVP5^Wfe7>qzwE;E9r$f(c`P z4gju&BX5JT;R;12o@Sv({O&AV5ngakEfDFv4Z2x8IhL$Fh8;E3u0M8DIXC3H^9+Ua?27%*|TE z+pgbUKyNklz`D2jJG{|#5HnoVjA>|^AUCJSw9M}*m{%)cuU)P(**d%$s&jL|X0^z5Wv~)2AS)W^r59 zBO(jm)VbW2G}@`GA?S(zy{z}NryqCO2Yx?es?;3a+AkU%Zx&U@4t95pV7%&SC-vS5 zxUbIuv7TK^r^Hz!%5kH`@*hlXK}?GIcbPyiGSe2RTximUw@LuyB zz$hur@4T9J?a48~Fytlg*z1-cVAdHW3T2GF>1Nyay9&1BJZevG3d9r$kP zWXZZj3t5$(=mt6I-2`{GE0QarKg8APxT;f~+Y@GaEw0uUjWe;9H*X(^`s}wlWlkB` z7${&}54>G=S1O$NPsS}ZX}k**-Mx1=6K~F-UHT|>ztA=QmSK%rsm+9K(;{-b{DITr zwk>DoQ{{mB_`u359P@cj_utiTBgMO`igl{s$^%%A9KpPPWXajBboKP%DP>-3BfK-o zFFvc|QYQO*kM5adHaTPV`ZF&+9DPw&aaZk22O}+ZsVyeKp_T1*1$&{7YA zIYZwF;3%`v&unnOO7CSy-tBd5eJ3l;%S*GFgmPlNt>;!qJl-UG1tkUY(VBv9xIq z4rP<0&Sfg!xkc88Mp|wIOeg!iKZksYvYeIU$i8{j$p=`aoid`?`8s z+ctJReYL8yNUZxysV&-4bN(A+Ih&I_E3>a*s%OXlWA8nqnvC+jQAI^j5d|A4APS-& zMd=+yq=QnWOYgl&7ex>dkS-;FR4Ga5p;x6BsR056q(le=A@q>sZf54(bLPBf<~rxD zyY83y2Frk(=h=Jz+plC?kaS)6`f>mFon*;*rSkPO?{h!-c~_x{jMwmpYu8osWANx> z;M5TgBj}Inm2M062wVnZcgyn`e*&@|0weQ(ib?x;f#VJ zQ^4)fdJ zR@N7Z*9~i&&;fjM zkbE(C<~y0G643qhhLff1ytYFNCmTKarB|!w?#T5363#OBv;6|lWj`!Lf*hjdUPs4h zyE>cVxD0VY^GbvB+*`vPPr%q`?SY&vas=i>IjDxa8Nlc=dTV7EnXb>N?~CdaN-B%s z)P`31lcTh^#;ni|r_MdAwv(UsXBLpI2_zr)lFu1X$OyQrqF=!u=)1cXQDLZKJv!a! z?~`(Nq_+6+FsSD3kX6oTIp55psX^vbn((U|Q$^^JfqX*RAOR5Lh`>N={q2klF($YX zjw@fEh7o~=WmmgODp3cu2(&=D29nk@0+JS8IBwYu`(kFAxby?~jvLp-8CxwDa=Jax zSW*-)Cp!9co~_YJy*c(^}%8~2N z`*YVGwQY6WV>si5qKo{>y{h*+?2XQw<`SKEl7D(=D=2`mR{{(oHsRt_fxQP>j^;^3 zF{=ScT)$I;!@JV&;a3ts$k?NaaMk6UAM;-@;d}PZ+sOlaiI?|aZOVJ-i^es&9|i|) zG&`RQ)bq=ThQ2%Fji|xj%RlDsOoR4|h?_YAi&z^sAH;(B#P_o7pb6$AuB2yb66_4IBYA*A)K_Whlz+itZSaPWgUN|V&)od% z4&qS+<~Of4Bv^KMRs~jy)j-}@B>*3u+=wI{^}Dt)D}A|3L{x8F|` zuRm*$g9m>i&@kK%qF7_N#Vqb#UDJW8TK{CP3)d9%9GDLPl1AE;)UVYl7!ZO2kU7mS zp87}Scv|2*e!?ofB0rheB{ogmdx#qYQfTe0QReH)>6^*ZOKazym3?;t2I|Yjyl|V2 zo6887#&g>p$4Dvz`;%Cw@zF{=-e>{J!$)(euCYjKl%}|7zQd~V)UG{Nd#&>7jsbhs1GGHYlhWnnx8g(Oh~D7-#%?s3u2aY1zVifJ%f# zt_H+ogkZL6V(dc_K@EJ8Y$$kE@K^)-vtX`?9a5sZq@27k0M%b?i@w|WDBspWlM85_ z=5?2bEb)&gABHTb=QuxBNEfe}3P3?)IkY&yW{y1n3nizASbKG0v`|IN=0MS`Tmo9Y zmQ-m6QOqs37eI&Fjm97DZ$9&we1ihT>FuIGBeNd9sHthBH|~q)Q{AftvyL3=nA>F@C_!B?R4MpSH2lMAZF)xH zn?C)4t@1iXz>XE<|4To}!YwmEc~Df#G`cf3SE>^|&=51(VL3sV zkhTGBb_*nITD{RgSHs(r7!tWTibQvYClvLTDkeN&=}T=ijV^+)L&6L7rag~_DaP}6 z`*LN=??x$^h^&d<0HUAU1%kwSJgzoKE8~RsRsNTv_P>vLe{rEJuiEt`reUNSG;0xA zx)(71TWL#$Ugs7Dc(ia$11D{_B?fIPcqGZi5>5!jj61BffU3>*I_dXZwyV?kv#|r- zJVFj6twhn?7a0N$w(8kF0(I~}xuzRNj$==l;5tY@=9y0ql8aqtyr+RY1V2A%=-j~m z>cLiAh~*sGNvb}ig@ zUFt<|%0nhqUr8cvIyd@CrTi|XCd}w>bVoC+9?$yqI?y^3ZRe5Z>_*9+S&R;4gRC1l z%=Jr!56jgo2N5{LSoSXMOuGLrSCe8n$i4~~F;B0`FC0ppj|sG-^S){KXVEk0HCN`P zGsIzXfd>(s=`@Ypjd*i>aN*DGQ>;I_{2$J9E)OFDKU%_%t5V;pW#RK;r~bVSlA{Nz zH>Jj?=B|1Q$sUMG=O;cIp#QIZDkVbu z9;2jdh1a#=K=`hhpnL)6QlIfHNS$Xie}7%pD#S~k#wvYzI2R9#DkJxvi&U8v>3*Cf zRJe=f(*F?hHrw`KM%ndAh)h;N$tsgfG>Y6Xlcxu(>nAMtzbGQ8f=;TJMm!N5a7E%X zwbX}wDg2&q1JzF5Yy#okyNl`@WufN^F4N5~aN&pifVzFU!0yW+$!+={RxEi?lXo4t z-f>BbAvYY))bTE7pXD+A6L>O={{Jp+#N_IJ)iBeGA@xz5hBc8AgOce0Ol$xTRs_kG)@WGO{v1Nh+Mt zO{8apdqsXA3QS8pw+0tvKs>kVq6gw^C3(X`7yh{E-E?6~Eq5)!8AM|58LCL%z~D@R z%|)Oz3i5yoXN)oI37cD5cLGOuUBgU`-^ed1?P3E5iZNX0zFXr%sh2KE`yVqXPN#)q z3Lwl^+1#FN>*eZ^w>lVg2&b7gH0~Vg0@@wd?Bhcu`vu|fgg-4-&wP`f9h75Aw8;oC z^Hm5PfRtSz4a%nY_2T-)}s=7IqY5O;BUbUO@XqgJAeVr3kcu1L6#o#C_Zm}*FWJ7 zMS;&pxMje++t;_J&VII9!fh`4jbYz5(j1LSCdc3rW31eWi2K^x@Ui!Q=GKrP0d%-SdpyxZ|60}r| zs*L?zP4kTYgn!7}N@oo7>S0Si1|~&p8g|r4w|<#XQZkzPPTCxFt>pwwsQ)NqW0eM< zHYTKcf?kEl!bz%1+C@MSzOxJ=PaLeOzQ;(9KA$(O(o=9t(s0=M7Dg94NM*CslO)tm z*4VO2vrQNCT>ZpnMM?zYy|9qyaBT}2sIbp$^JCEW2m@1peg5YR8L4Fd5k+vI%sp{d z2St{b3p>Do{HebhM071!$0d;SdKGtQ#y60ZTJ@}>z|4@gWp|9(aW$&$nYpP11DZk(q(0tK$*tS9M zh44t;x^rW__ff-ayZYfY&(wa$mN-av%S`4tcF}S#h4Y+0);XS{HAd+4zyAxT|G!}? z|H-$~Z!doqw@86_Wru7#WL%&)bMopf-kam^EC2Y$`RgygNONju4aI|B;^F1v)G7=9 z#S92=Sx3LCl1BfJALU;s&$pPU00#BESZaQ{z4>3d3`YXfvx4@2oQD2&U>Wq9)p=>X zX>nfdZYsS+Uy3ZQA^(YZ3Zus3TcoqBkxd$AmjBvc`AsbdWU@UNjl0QRxk?$_0jir) z!bO+6;`DcczQPO87~Ev8SLCJ&ss@rHH+S=H{sTtc|C}z&N^v{Ol$pl@ohtI4R(n&A zQvAJ3xQ+EH-t3dNJ_#T1jk*6nlk5K3Bm#i^J^)6EOt6{I9|QjXb9uCH!GjNWPWpW3 zkFK7--)AgA_@IQJ2|KDHHlM6cE|3eGle?H;=KFxn0asPR+{P$`8M|=6d z10Me!kN=f@b>{!SgiCtp;_knY*cLZWxM9puRcl9m;I0I|P*%nGPRDFIgIR!w+w3cL zTHFhx+jNGe0H>Ix*>`3&762Pp<~v|?gyb9vEOvabYq{6N^&6YWRd#?e?zJh>DdGTX z#(RkT=6zU9GwHTYt=h@~lq#d*y^ITH?W~B^QV?4r;zCiR? zoj=)cEbKH*3|)7}b@Ut7^{z~y`>#-&j}^W!1Y9f@8}@OiZBH zC-=T%a-|#n0F){~x5hR>`Ll4BJm7z5s?U5@XM$A`00_jz)3o(u z)w3}qo4hx#wmf+!i@}xb<*T%_Isu!re+v3RXifO2g7y2crYPp5w$SL#6yve(Ne^t_ z6qr14ai&?hy+V8C0y7Jwdn0)<4&gAZsGc8o#=^MvwZa7^QPx1mX%4rgexW3ZgLkD_ z$L9fPj>NeH;m}kl^ zmrTg{mXH!A(7ySgeaofd&hm8%F(hns?#{F)ovdFo89V9Gglj{=PA=K zr{TiCD#%yJndQ*-x=b!S`hYrZ-bB zb8E^wHrC|Cb89IM}qN8#rOf;9e0N1HlUSY7#rq{V@6k*di{xNOeF4~B&@H9HW=IhLni}%spQmG`$Wt6Q zgq#we_LD~e*zl$rryO}<%Ge)?ib6KFtNHb%_&lxO3K<=Q*U<4&r~ z)4L@XQiy8?B3`7leG-j((=>J#;ObzF(hE)*MS-gQe|nfcy*cH&*ll4YWw(O>^S{bB zCaR5G|1Q;2h=$4)X|OCImrCkfq~cm>&m;CX?x=$ zbQN}d;sM%);2;TLVUHV!Rx9N(BQ0_HewKx|qpv@@mvamTk$xZwI@|HU5%#mhWR>G0 z-F59yHl?N{{j^WHlwrm`xyjZZ1_^MgO`a zlRYhWdg~YcL!I^67wgd*+})if@BAr%KQ?nBfGT%^HT$(Mw^|M7Mf3;meF~C9GTFSk zSu77+8w(zZljg>fhz*Q<|+iOb3Vi_@V7JFXi!(x|y|IMqYUzcVEt-0GuaL+;8DS z#cOD#J*siKZgNNNV}S8w!=T<@VtxcHdtDKaG(xnEjH90T!*s;v@T3!WqO_K0e4nl_ zVvPN4XdqowSli}(+X_~NxTm74C6}@nKnGM#56 z@8(lXbJ_VAmlv3fd?+r#kMOTWX@ z>GQ?IBce%R#5L~jJ07-2>?9I^qPb%Shl`QxZWL`XA6^DEH*q*GvcDv+*oe^oVcSvq z$(4I+nw6uU@&%zjg^$MoMnovV%FIUvpLRBj;myd|tVzfEjb|GE_$vLLaT)6?FRA&A zYTdhv@sz=pwiYE-)`PTl1*&U$RZVCrT0oKUQKgpAF7nBBEzs6+7|nx%iAFol3o6zm z<~DS!@Wxecz&9%3F2W=8LR3PWVJS08*vGiet!`=o|Q{FQse{Yk1R zx-x1V#7Jw8C25tm6_SaGf1W@!POu^R2#3R|4fE8I-7iR}Y%rPo@{aPuu0&W7Mo*?{ zro=fu?yO5TrTlc>KoBC5QD}z!fkD=u%H7$uO6=b_MqWZ0jg-15&)35NaO>-ht`2oB0$Rc-RxkoUN3`#zK*W5Zbd**p1h;2Yu&dXBL(UxuWqq# z{sU73Ps3;@`T(3>;!^+|E8bvY;-%kE8(nRi06ZBEFvE{ck%*MRAY7s zP0Z>MREO#!w^xRB_&Z@-AV58Ge;$bMUETz~ z9uV6BDS3kGrG%nwjdyk6XAFf>^L7hldpHx`ZwFL*aZu$eCl5Jhh&csq1;8_O$IN<@ zw-cs5J<$DjoZ_%(;AfAD{y5i(gPjc#D+5U+0O#2E;}z$x<;K#vj_z{FhF{*9n)CEr zpSklcG_oZ0zF}ef8rBy!8d!OS+n`f(Zo3iHMBWH>UzzF?R{|uq1e)^b*jTo36p<*# z@C-MZ8@JFK(`G-!TTNL3fK8Ln0(rH|%=;fy+Kuw|9H<$H!@)SIqJywhwEWZPT|iTX z3q|$8bZ$bG4!OSlWb)eVG007kvL2+dGM~PY$IP8+9ki|&gv@=!5&8@`LMwYyd}^sH z=w-efp@BllfdlS=WHGcsgmzOSK8kZa$oD+)_#16`WM0BjrJ^Wk+ryy8u7tp$Qda*^ zGWzd31++PDD4Qke$tF471q>uV-1nqKU5wg^VulgC6PVA@Qq$WFA1k5sqGNBQH3PU| zg!{_HORK{VL4+Qp8D`o!c{ueZyG}()z`l6&VdW?QI#7!TV1rRpw#iPO_UzFk1rA{N ztu2$v)9^cu?lr71;_ffc%>{+wXd1G0!l;l~1s&jAx+x}cL*1>B?W#_AxK7%tqMBUM zqdF5UY9`wsI-Dg&(7ZW?T}ho{XGHzBr5s=*x!JP)%!AjnZZkp6@-n~7E%PkU8b z=t~lcmT1&3)ztx^$8ajMh7v6&s+Aj3FQNdA0BGxZmOO~*Lop*WLy3HVN0@I}#TB%%$y|3}it=>)bv*%vtU>-aO z2k8E&OsMYip~g}5FkD_&_e=A&ktu=1NbS4pi%0_WAkT)Vy6&We$3(*9+zCpWD-sA) ztG|&zGu-5rgWmB;@QE_8k*vp_R@j!2mj9jx)UJZ2aw=(PncrUhJ&>n@@OQFWso+^P z%?_ddOEG952GFt@W0k2Vto}su@hDT5e?y^1I`TPg>8cksv}IiZYMm>6tNMX1JbqS~c=O{YHpEj1(nz|=vG(A!|ZqMZcT zGM$ENnr&%T1jl>hu;1=0Mi<~K98esCaY{%=+^Mfmi-ORmssdIHHw2%a%stWP-O?&M z1eJUG-JBX`dr#xtgid+ifoGJ2UtE%LY>vVma`YFw_g2$OfM-$MarQ{11fJU{tI)n31E*mfhST*%j8n549XV=nrX?Og-SiA} z!bD3U6wUI>O0>Dg#)>*ns+Ft0^)kv*+!=Pz1*plGvwmF^L`BOS`f;j4R%v;xA>Z$j zceq5c_U?{$s}a6O)E9ElpQjMl9<}Ys|E*Yx_eY+V!@J5V2O}Dv^VHPH!WYx4g;hn` z@X}DKU2m0C!N`JR#ivIyx4tRF$rf4HK>>j?E6AQ$^r9i9PPAXL4bnt@I@z>QOXjFE z03``1fP!_$I9hSh52ua?GD>l4NJXEawL5dzFXrebv~d)lp5NLGd=H;97Z&)_?4yti$nM zF#-nWR-ClPTfrhUjFRRr0CBoxhJs8^(hzHAesY3&r zyhGD&9nl`$;y9CB#(t!0yYGaUPWy{`EU$Q4^nRTi z>xI>yg`jzKV#Z{y-TQ>z7jjlg6@(VJ7vqtIO$=otD{VU2Ym*HqCcz^1%d6TUqmkd8 z!q2=y1qDEcaup}KyAFDnz-9e$Z*5mWFkdfp$v&?W3{dkL6hs}Oabi2zE! z%6;J1zUa2*Z|tW^+jELKr!JwS(zXJj0x)cv=XPMIa;s>=kyjgn19i55FH}9+CeRxQ zru1yNc>h^+R$ZVF+MruIZVWTWec$fFB^VUPcLzx+S{b&!fxHSJ~hl=|eMfbH+<3j1MIe%`MwGwXqeFPH%1K;qky zF`^SC;gg(MVo2YndIV8lNx)xy5Z}%t>9jLdZBq_)-|uXzw3k@h|ucw>Aj*F2_0D4W15W3_IS_Kki2ZW7mS>>{${n)b^%JusV71{H*^5I)!+uN z6@ZBqTwfB#I-#oCm^guHP*os6=b(5DzJ6Lez4E#Z6&ii>p^T|-0|0^B8{W@PvCCYN z$akdKt%)kam`>(r$b%xkFJ7RT^tvs7wSkDq;Jo5BZ6~@ybp^XCs|=rC2#brMXallg zRCP zwM&UPKvbpCQ?GGy%1UJXiF?c7D}rI<_MA4GR`d}YrNCo(Oy*!~mC50QP#X4k3bWXb zPDSWultTL9isrR31^el$zJA$^7YLh0Tf{PcS-aB%=`9tVA?$cfgEe}5z_$UD=EXXt zU8haG@3Zv4?K9FSP^SGO6ANf2{}w6UO{-;@!iZXQb5csOpEY}2WZ3lkkZ&m4irFn5 z;MU_nA$D+cP+z6iH8WgkjoUs~Y z#E_R6=gIYw%bGLLzvloAk$9gpl`{z0a1&?N*siP>%m$8sN}6{iiH2RiA1fzztpTAX zXn*TO=y|@mUp4q8CCFr@IfyAsp|^eBglsd{*^ahmLvEb_T%cHhe~@P_{`(y1f1cbO z#l|ytomUp_dv8u$>sB(eiQ}O88z9FdSV#Up8C98P>24I3mDl=jRZSNe}dT&6a%e18~2)(hRNtYId zh*e#KW)&T8KdSZvPEab<2^lrnUNCZ_R|EkTy1ffqXLvuWwAgxrkq&4YP~pI zgTFd1GlQkFw;V_ug`ojrRB|AC+PS7YfcUJALn%=?Z@pngB{eWou)>z+V$SgFhEFVu zzt0hvzn~wr66yKK_17&0Q20-``S$M}8enNof%JB)z6eMY9D&cozRaesd6@U#H2KQe zozS4MBB|ct7~vqt8MZ!At^>>_7~PR7sX{0RTunow%SL^kas%pebaB3v|nCsm^0dx-wJ~CI##|_oAgreMen58jdK;q%$YU z_hk>0@9_O8j_LzD1^uHO1mEDR9LmSi0!8k<8G(-5X!|4~;g%(yrgR&QYrvoe?dkty z-TDghh(god>%+xha8u0D?|M=~C(f7D>J3Px>*f zzTp+so7Qg2oWvCwbmg0_AF+4jj7n`Q*9cx)`??KF-IbRgy!>%tu%rlemCNX-=N*5T zdlVYv8+gPPf%&$Bmkz&wZfW(vej-M`|SF`m^gplWfVmG~1M4=g(YBfw&>bJsy& z(ML7Gg8FZ7>f-e%sol59f7ol^Fp}oPgE7Z+KYop=;VCEL00BIMx<+h<*H-Fq999HC zlCC5Y3A-pjyEzNj(-kO86w$a_gZL?7+4+FoKzzOju42py)vhk`%Q3VQ-}+>Tgqc}s zYz)JyrzRTB^6U3pHI{);g>^MQfPs=^Xt&C0X&8Q|e~l4rc!uPvx|WEdJBBX3b2}It z_%THQ_PL3u3V-u>jYy#Bys;)WVF>7W!#neZ*)7|+^{uLbc%wkn7}W2A@&1hOS{bF3 z)j-(Imq<*#e?GV1XIK#E;;WAnD{2c-lo#ml0{3#aYik%?_7YLk5yECUo#S?*Ol`)(hv-3*TKH*Ib_`9+SxKb&8d3#FPn0%CE zCP_kijrSn?sarO)cFt;voL2oWPtD^O`|LbNn|(4d9Drlq&sCLi1W&~svCvU)@}60h z8qkjZ87dv?s2ahnVRH39(}{ofmGmA3gXX9db*N183n5zh&;)C&c;u7(c^SABy zF>vj5Q+LHGoMjg;!0CCv2F#c=jw<71Iy>odl_F$XrmbSRAMVgQ85H%)Jj6M<`vzY# z!Bh_@X-M40UwaTQH0iOG>@A zAJtEl9bhWB=>GPNH{{sDV0On_QW$G&{QWx*z>wp%vB3o~yB{@A!+!2u_HhvN#PRpc zoP~NeFGgu=68orh?RJ*{aox`7rW$wpjG1bm!j(~tU~bhKE( z7=PXXoSZuhtVp{F%*i(b%EdjP{V(J;xTZJZF_*=*l+Nar9tSS0Zm)=|O6y*Pt`?cl zUKhYB(Mk8o`Z6JOmu{jG^xc!i2F?Rdzfh{z>nMaVFtz1M6(n*1CLk>}@6N9Yxd8G! z4WDYp_v8Kq`NwL{HHEBCEe)wAm#Kx&t^POFnQ zeDdIA`>6`u0KKZI2S|_B&g4gym!bVdEqmXU8k6Q#f9n>R0jk_Hpdw+{{+?iPS~Oq8 z`2l&bu*-22&3;3mV!hV)IyE}qb+xJK=E8Kj75w#yG2r%7w66+q=oURXqG&F%aAH>^ zW}vA!K_O16d_6400lhljFNyJqddM8GklyXr?@|~gcle1VrY2hIutIr;{HhqA&hrQ* zP%m!&N@{So#bHhoUgL&|1wC>8(Bbu`#0#CVY~?jTabUM=_DfrzSMG%$%!bvw&y$2i z6Z*F4hw~n9ee7@8tpa{k;rb#Xx9uVF7j6o_N-rDFGm65{rG5jvcn-o zt54byg)$%F;3)7zx`W%eF|8o6sKZ$wr1!Pc@);^d0SaJh%Ql=ha3s5TBxrUga`1|p zpzSTKBJqpP`tF=S;bu1Y^PScoE$z0?E5I!9okQaaR!Q{BLcsb?S%QD%&rQOe&qA{SjP*0)@a#0P$51P*ssygO)OFHq0jM6;?%O>`Q-DS5Vi z#Jtv+Y(Bz%n7OZyvCRsS6F}0F5Y)e)9)Od@H(GD&K}(1^bg;1$)r-$9l3{B{yU1f8AI;w2>q(0Cn31pc;|oXg2x99VNsbb|0!Nt(K0AzusK(+$9WALYHgxb+drGUZcw&9F zMP}(V{XzD5hf|xFBsHm;&5t2`ZY~ZTVpx>J|lE}f`@lc*c z)~OJv(+Z1D5E?j>(>O9G-TgB2v?7_IZfvi0s-0Q#Ih)J`zcx+6hgaHk$~6}wUvN{w?8qH!r?B)_gTn{4xy~Nay(FlCNYclm&9vwFGSm8wd?vVtS z3-iZEln2ucTIZ}tD>oH~R|au3LR$^P%znTC@)n7T)s@69OhFF3wc!NoJ@aP={|v_w z=dSMcM%Mlj=dR#xDwlp$CZU7{cd{aUl{wH+VCU4?g(NT$?)Ww9GXV50F~M5V@{ic> zC9$jFuCL$7raMWWGtdFb?!~Vfr?lUF1d@QC0F#B1zHg=aVm08GAE{=7@Me1<>7m)P zt~fEI7PVC<-#U%80b|p6@{M&AL3Pm@!LW-m6zh@EV%{1yzjWL<=tTdnoh^o} z_JfSW(mdZ%LvMVYaYNrJS-A3R44UdSxI;wJU9aK?k`8|lF!&$8y~T14z~now?t&@YepnU-GRy3V^N`tSd13={cI3Lvr;PX!dQ zmA-1!<*f}rJ{Gjc4j}lXX*$c`{{sj8PZPHKt9wTnEud(SZ1%1Lt1ww%Gr#SZe3JrM z2Ns7VQq3OxYpeT^+nk@FQtk@?XX$@k+5J!a0jd$Ye+WVP+iEX1G$##9#o;miO5vBg zvYC572-E!+aO*+q(i`~8ho$|FxJDW47u^0g zv=3#_gfgKh6Z^x4?|)t1hAvpMU!z#>|AzL#3ruE0sXB50Y=iyR|Lnhf?wi$7<7^+t z`@iA!v|a`e>+Y?(e~~P9w3Le(V9gzvy0)o5x+?zuHaYX}7yMsd;XkkL|NVmhg)8LN z|Em*x%f>H#YMUQ%0z_w?PpV2mHx6!0sx{IY1jx6 zETx7?!lxS8MAju}>vdIH58f77sdky40*<|m({@wU58uvdDoPo?(k36dS3|s)l3f-( zCoHZZfIDvo(i`%?p!$amdh2eCHKrdzv~cX8k=$!O^0n5&c>Q>MdeoMu%e4OME|-Lc z_`$F2OYZ=zt`;;Un;t}4rF*wa02fi0`Z!h5f(5_b7}m2)qVCr2_ha^{*wbdWTBU1t z8V^>Aa4n~qKMp}_R|>Vxh2U?NcVmIFKX?+6n|HfME1y=WIx6eEKz00#^f8KyZ;qY3b?l#fxgBxr?VR3L=t-W< zCpsE&#|BWq8hAx*I-C=BJ;P01Svvhav|*$DMkTHHM7?lm^UtT!Smf`ywjM!mASj5F zBJZnRcKSWr;t0H8-GQmRd``LDOT!P+bnjhW2m}?AsNTowX<(o5ikq|sKWzYDxzE-8 zSkr~hnB?1g>WdY?nsfvj-dcX0XT=|z>eLuU53JMc?1`AZRLqg_u#gvSN*I@;5Z8S+ zgxTlUm+Ca|6SAfz7SCY~I~GML;`jvt%kR%j_W@kDeQk%Gv$*7~)Z$Ae3Qxz8oF+S6 zztJv%#s!;N!7-qiGsy}suc+b~FT$SZqnG)EhjZ~O;^f8i$PzoSbrykpZhVT~6GomN zF`>19mYm-nrz7EXKyUAKpzykwKd2IEBsO>I1@ijI2gj)l1ZXUP(60xWC08P_#C<@C zp9VsRH%B#o7S?XM7G#7Wsz%h6CY16sxOeT6n^@qvwAXM|C`ZYAqI#ETpVXa9@o|Y6 zmGbeHoiQPv;{|P2c$_H8ALf9ndXen$;*&#B{j~P&sW{9ka6BC?6s?+azMxs=WCQd5 z0^M_8pJ`~{*7aG=ov#2Mvk?PI5OJVQMO9kz(;xWLCo^TT9ZL!|aS~fCc_-zuIBD1M zq?4ncb)FUeP|SY3B>3g)<~PS-z$;zVZqBubE9u1vN3HUKNfN0>eBN_;h^;aXOuX>I z228<8FSGX4?nBwOw`=a$LBemeQ~mSp_8Y zaiPzEhl%5g=d_28Z{070pLGD@(lPfz)AVuyyM*tKyH%P?NZd6AD&LiEB@5fRlcWb{ z41k_(>8rgVB_1fE6{60|?7RAK7Z^9oBEb2a6{S0m&F^XeR>u91K+-)sSME-G5|0_RyPCliX+R zZ-TXNtKCK$-(NHO{bCxt z4CV|%ocbDdhBvDqENs-Xw;$ZV6Ot&$x;0z%$nTF-1PJ!yXeo@90pZcx`q$*CZ>O>d()`Uu2Y2pNp_(ilWs>;DH zo$Y!6P+o7xbzkV^F)629YXbeF~%@D7o&c6GXLxygTydg#XQFnQRe8ZQ=-(I_pRr2>< zC(OR1NqZ^=j3qpaip#>W({)~~lX0I)(w6h%IzH0ROM-azOBmG3@)z&041oixr+$;q zs{iJ0FWW$-)vfZ4#(^goF|}NH_0MqKuA)XEaLs%be>fy(gkk+0%d6=>3O8;F$7^O3^&fObd<{1LX{eBa zY0<#TnrZBSnA^TR_BrtA)Y?JL-xTH@#X2+LW#{Vy$)pzqv^R!vx0@6G)Zn|`uC|^H z8)APcwJwoM|L#i8xk$T6r_!yv>+jXXH-ei!Ms2R7COz*uTf=e586bjh!y-QWmhX3F zyWLmI3y0VI>IiSItF}wCn{p`Yx_c!WA9F{peO{$is&5%wJhIGGsf+X?JquxD@JodQ zziA$6?s@u~IZ zIiFROF)WbZ%P$aCIi{CN=XOUq?@UeI(e#)m7@5AN0IeoiXRF+ zy@&noBaHcJ@1%d>@2o z2U5~7-CV;<-YRMcpWju3BV$axmJa~@A z6aJ$d#b8%_X&T`>B8n}ha~WY00%6OV+j@PZTsI}jLW0H`GAxJJCe#btMGpBSv8j*o z9VIQoXEQOz|ZKGhZv2zNVX&k81174lp)8PH~z%N8B5#&9S;9A!o~6xIjOI zS&SB#wsHtWhFTe*(^GCb=qpA}SNPfSU1&YDjnkmyO8iF<0mk(w&>AfU(GloMtR(RH!e4rDr`6M2+E^Ebb)ptm5QJDM3 z!v(0iU^PoA!JQrGq!QWNNg#d8Sw1 zT4~3#K(#EB1hz}lKQ@gxx#G#Px8U$jY6c+M8I3G(Q)`%Lj~et}Op z(|WznYotx}1DCp3+t)(bF{|#J#LNGF!_4djqlg$elH#` zy}};ZbdINky{bfs%usXj` zI-0VSB?O(GQ@TN0LE0Y(RPDVH`HsIOVSs(j8l{IydcZ1QMo)S289L9|Bpr)(i z>lePma{a>lU|mAu(z`3_1VJJJP2R~Le1X*#cxxizYaN})tF7JLRx*iEA;eE3*_4&} zs0euji0wc~2cfPvV%Ire`3{!`Yna8NB@e!e&AvGiKDk8qOZT}& z4=B%)-gbw1}ROnHnvXiG27aK5F$}SYF-=Vcms_K zvN1!pk9uQGO`iUT768+HGbUNisETK3$Ck=x+x{z&lYgzohf2{*ouF56O|#W~XOiUq z1!L8>)^xbYvq8$PPGR=BFd7j791_2e*>|FVBFb7Bz>k*9k0ZR}4r{pboe z&Q4XreK)G}tQ=>WnrZN{r)8-~wx&B)y=-UX>CU12*nUubk0c$nBc^hbj8+^Qw#RN? z%8j0nrhS+frbr|zM$s}AqTzM8+S|`ej0^e;Q=f9!+@*=8l?gVT<7M7{e_+s>$KUhL zO4xF^`^D#P&SQAX8NBQawjb=);9n1mBVGr)O~HDSzHf->$<5 zJ+0ZGJlhBJ{o<_Z!jI#wTP?GO1)G{=gi3V}mu=t1wRw!({Bdx#Ki|2xKs?frN>XMx zC10FXvHKCUbS$DUy68aZ<-Hfx-KAH@WVR2%j>hqG+uG8e$Z#K0khODunYh9hly%bZ zi^X>I!3ZqL=C<8-gVENR@2w_`ny_!rZS1lP>)IZ%mZ+1A%;A2~qAOqJ#hI#drz#n5 z*u1TFT_*Z_2Z`f*`82+_N=8Nfgq6-pv~WB~%dT4UL=R1_yhkV|S=it?I2&=T~c+(|6H1nbeMXf_^hx=j&s&;nCS_^4~F z{d&vn!Fd|^+JPlfQ6f;G`C7Qk+RNeVBMfxm?}wLqRD`A%z_Egs?ERYT*O#z+$!$U* zp+Eh4J~HNqdTUonfM8=flC`XO%)ZGYmwH#Xu4y#XYO1)_YI-=pw80T2siu^K6n;MA zy=c*)S)FGsH7shdvVIQzYz=Z$t2Gs`Sa*C{8kN0f7(X7^+Pn4S{aBpafP|1yQLAvR zcGIuLO7+AJ^u0vL&grntwHf+EuAs(kQ`|aAg2RC9cQf!jvZ%g(iEt?LCxuJ|`*HE} zn5p%hbBnIVCyT6!m*H1DJI%h>&qoS*g8Dc*U{m`oYgkEfQURzMxHjL=%aZ1p!_)u` z@$ltq4wr6FN?awLuMFrnu;{8QdCEpoatvGHZaUojHlTqL{B_RBiT2trtJiF=%Z&AU zuxVGMSJbQbDXgE+f;elOwIVS$gIEV zaMGlF2{re=ka$hhi8i(CMAk_wl@$K40Pabj7aIDct$Zn2W?0r{Q zlWVuFx@?Gw1`q`Wr3gxig{CwMNR!@M5Rn>+fB`}XC{m=0ROy0%ln`2|At+TqdI>d1 zFCmmb2oMP8W9|L#v-f${K70M=>RkMHJh=$)ee<1j%rVBC??+oLw?4*p-Y|3cY%DWG z3KL;FzHzg8i3;3#^bE^a&=Y!uJ?S3mK4>(MK%1%3gXTSVQ=Tpdc!r+J0b}?16p!W& zwU=o>-k(x**_Si6EJ!y(3ZZ`*bMPhWa>=bXGWW2ji{?cZVlL@H*)_-`D{9jQl-LTr zSiYl4gZd~jAG2O;4E~AW)UPEjgvN&920l^#73kT{9!$frhy80Yir&}GHj*|&(#no! zIC=i+Wr_uh=5|Zrxc;~7vMoNQfg9~oWnJk0o-SY?9{+-1Y|!BAa3jHz0pr}_>l$O# z=vv+VJlS4OCu-a4^F!sM2(QIwgy7i$9a@QR?h}He#PF+Vc}iPHkNgam2yY3+uT+$& zfq+0|94r~sg{1yb@M1_dX?u?^W`o-@AS;}xP14(2``ki}n){l{vq#9RH0Ixg9&tZs z;8}GyUoxuzSTmRlMYZ+NNgW7HCaM4ldJo<`n9yDNaqivVh0l$@62U&Kh54L88Tc~hiBH!}qHRl=xj80#rKsV2&&H=MX1EOXC88s0qf-jp zsa;q0W!^DMYsc$oXN7W?uGhN`*(3oAh;QalH4XNNa|d%k{3d3xxzN)CPTq!R&z~4D zq9sb0o0|n8e@|~`BGvOZ4WQn}5&tNC|5tKT<>I3|rXkPIHRSJb8!8yOM1x%cWE*n4 zfo|+~J{Zl*zIKd@0lpbf#u{J*#(;{h0-c`v)MaOKLG@fB^qCx=P$YVn8&c07ytz4B zBSFimtmpkDHT8i zyC%su)sq)WBbK81PNRWGWHG@-Ukmc*w{(fUzP*qc8A0Sf8qaoCH92!PC_^^b9@P!F zM;d}tRQ10vmd0Jyo4Uc^&j|ooPp3utA+F3+Abwzbc)OeN2=X|C@1CTA6ca!#R*uU? zH2!#b6YNKLpXo1G4)GyrvNO0f_C-8E!Gf;3DzU5tM7(r3(ZF8A-bK*OW>w0Q>vsb8 z4kTbU^rr_x0{GHP-Jb5)3y%Z4HFz$5JKlZ|rDj*(wqPCX%gY^mxjSj@GgfXy9P0eh z)6mcN;ECTLjDkvNdy_IC=iYUOroMRiatvR6gTm=%j+lWr?4JikgR^EWAP~L%hkQ|S z==avQcrLnW!V#XL5-mDPNfAXffU>cQP4)ng&diG%hG)dk8kv!h>L*EZgGjFO-P zG`HpD7qY`OnDBQeWVt5FWyUJ@2>CAaYTTZvQy4KR{Y^iAv2X^4ikjO8e`v-3_chweX|ZWy3z3ZW4e z6StplK&IBqw!JuelQ*(=GF+zAu%ZNB(ef!sa2G&qzTgsWYY3GffRj|Y;YZen&xoCm z@rA%+M;T|DfID%2b^@-r=6N9ZbF7~7Dny09Ftloou7F(VVV@yM)a|HkO8Cv95MHw7 zv~PnFSp%gw36zZfon-ukz)LouhqXlMY*j(5h^u_HVT)?MmOHhYwCZt_`r7)O^_Bum zf6L_Q|BD#$A7?w=_}IC8wMTos$2S#<8pQ~0$cqJ@UoIs^LvqHPTD_*=rLD<_D$)|Ketwix!q(z{N*4VJ_AdkAg27c)n225~x6x{x zyYxb;bg+v5jXp!hfTW&qE_zgDL&zX7*ze@rk~-=ALJhvV85<(FWRFwQS!@4$W4C|25V)l8^||+_WrjriNKmZQQq#%0a{E*}(=3e9j?x?Z`OnuB zjrKFk%AF32-r2c9Ry? zz9R(Q5Py*qJ`*J=6>f!TN@8^wMH>4#tQzUK@O@P;E!Y}azxl2b>%RnV3JsuyUB6hs zDi~-rByHc;7NLQWP}|FQb(A)1=c$`R8hfKMy?TBmZ8`qiD|;lMkxSTPd`KTb5V!D& zCi+6l4SG!AXPXvY7C%oHR;BwfV<*sOl@IY&`p0uUm-q!Q{8GtB9PVncLq4}46rCxGAt@eD*?B^#J6KTch3y(l{5;$#LPGQ`peUhwR=5` zb1=syZ#vG5DQ|smq0hjrj2PPszv!7O-5&{JY(e{ z7%%FKl2A`lC&scOcgZX{R_$@(_FCfr^!Kg+ct&|{3%O3cgEFy0;gJD7qd%4`FcZW$ zdWpeT%%h|^u;VE--~T0n_xYJB8iFn`89S)-Sy(FHmqh~Fd8p)ufQGAyg&gqvTX;J~ zIP1hAoo5nK%kb!-26)wd=mx6XITW=z{Lj~CnP z{%35rN9?>#(WZ@JplpcDctu~52{{9LaPo8jHum*~OFi-tF#>s$y#A)&usB;LBWAd= zybw+g2X!jeN@}(NbS-|ldTx3|R>?#TDeYUfj=D24G5@#n{ddp;9-W4u3>Y|m)G@R@ z9r%g zoW1A(NS&+b&kB(|JYVOJ(|PCI#U<;7dXM>h(|zGO?I+pQ$zY?G z0s}pr7R07L{6yiYFG^!At<#s!c=Y-UbpU*37cO=@LxW0w3V)yf@N z#4VZjjN${(kv!-$W2Ts#G-lZd8RPYj^%U-a#xTtmA)mY! z#0*FD^NZHF6H{3kx!!a?zPNEmO|8(;WcG_bI7}BhIg%`XzCl_{LiCc=m+k&?8OI52 z_L}oc3LjP4bgNyh=L^`52zE?I%5n0?ewSJKz z_am3Dm0 z-6?QKrJv-Ok1D^tbM=zf=eQ%)H?2RvD%K&CU!zQ3sklj^*e$DyXN{yBHYw;LaPJbD z&1$7;o^!&K3W!?IxKKP^PW^6=Zbc5eWP~x-hzl6Ur4u3+0kZuS?z&AHhUBJ~3UnbZ z1vZwokM!C^Blj73Lo5&Ui($8^VL9{O2h=cftMMOixZeK{R*FB-%kLB}fCV$Tc-9W- zosjkZ#Zd3J*{b~~4@m9XWy>g=AFeN-Ce^XJ8DD%4ZaA~*ZJ3)$nX?JFzWYg*1vD%T ztZK3Zw|fbw$pgDCl8nTxiE%dNnpXVpVr#Svw()$Dl~4W{S1#F>sMM>zHnC#W2F9bev4;(oM&o1EM~19 zEIedNZBx7yBB;{wqdE~xPK5^Cn84rOK?{$b$wdl{YRoqB%BrbF=vVSY0+HwArB)K$ zYk2he&oP3|Ok$y`ieTioLkbbJ@o3U0?q<=0<)Vf$VEO(k0qAPvwSz4wQ3htg8Rum- zyBl|HT5ur!zDo=WwHNMa*#i>Po^uacbyzueRU-RypXek-fGQgSN3j z12A*Uy-}y8ML_V({pM0R&Ve~UDpo(J0fDG~ZcelML;*#jG8^lSz~5qTqkEBqITd?L z4fhMt2ACFLtd@6hZ>{M_TAEcs(MZ^P=?!hOlC5LocrdT6ISo*1Q0+ryCl8$$J&L{v zTW6sGx};WHu%iYrqMyP_4eTMc+ZjzEg5CDnv!s{*Eev-ISB58WlIduecw8dVqq zt|+t0xOKz*XB6~r$UQICd9J^Eov1YQ>CqhryM8h@5$vZIBW-K+SicG!sXEK$b1QWg zBZvytFS2j{Nlwr5esa`IY_{!v25U~%fZ*9um|&9%JQcB01=w#0Zsx zVow`R)%!6EzA6w+qfYqBbt+`safYu01C=fT^6dIT^V1841{+!Z37b|y8!}hrKt8!> z?122*LSGzE;*6LlBReKsD`5I2%aUkoq$V9&SY>S*b{q$E^Tyu7>(7BjV%#nt=lrNY z^X|=Bj3==&?G)WqORVIHkgQk6?gN$(g?bCYH7#lQb=+pJ>iDgkWYD~0OL#&o(b+sT z;)^DMpej`X%{NQyn~Gx;9FSA7Ibj-?xiY-f*6>QLHY#xTr)pMXj*o+>OA~8U#hktL z9l%&eO~TpK zoI$Hk9$^P~02XUW06ju4TSyonIM?lqiuz*hn4ORxh3#G zQ<7N)(3A`;Yq>l3o94=o&C8_Z3!!WVmsPKU4C3z9*a)Z(8e7tCj`^z0as`f3c%f-} z)vvn*#puW@%5@D)7-n@uF3Z&&xOoo5hP<@ z;|raQdBC{sm(&(HdU3|xLf6`1dIx)7h_iWNiRBMK2-W|{8RABg&D$ObOImYgk>V;{ z%5+_3&tki`zY)BmHzLwBpc+BQLn?RYO#iK#e?1&PB+drC%Hm` zW>vFaKSIBX#)_Q>=7a-Ya~rS;W#&h_a2=53Xp#9 z0O*?9RDUY?Og`(``2lnC2Wz!s&92q~3bX!W77%in4QfoX)TP0VcE24|8FwH4teh}W z@In%tiaW309mN-zm%pQ}zF;#d`1rHD;^0hH{~5R5#zJu)$4*DHWn*o})Grmvf=^1O z)t{sS$q&?FSX?A+y8>7^bC2ll-5`rWC8 zj-#y*+b9)iMYV`Hvs)I%d^^^Z$X**%t~(O_D!eT(QdsfHYq)*w)(A9{H?e|BK%hqf zoxHR}00Cynv}WK}wX&X1oBHth0evgrbQZ&j?Q&AqTp}w^Tu3H19|JfYnv_K^_2U#d z;k@SsDbGH$o(;_hObv+`cNFlYE}WCf`oL~Lp2s8zf;0;-eXNL|wE)jG1~?ZD$W~0T zv9Z2}@!I&oQuNf2xkHhi1F{{(KZ$eNxCL_p`pB|toujXx5AQi#<6Cy_{}xr7BH8uc zEQ?lfrW_{NqOq&+P0U#PWRD>z;22vU@BG-f-bmxd(V#OYOXn21~^{UhvTJp z!K<_55-h-k{wt>#&?{B{HGr?@54rW@xxn$bJ9Yp%3Bi6Df77R?CB45~B7bC{KQocu zM!p2C%Do4&EC94zt=e9CR6NeW94Us2WtE6{rCv`4NVtT>08P^7lM@VzheD>L1ktKL zL~iQzk5)kOy%?k0E5vRUoSXit7AuFxu61w_cd!Bf`fr$-6 zj~E0i=1dq%yXoKU^5S)dSR>k8g*dt@^YM0d{p>Z#4<sKMTQ?eY<%u{cJ2|PS4Uk)rcu1i`=d86AEb;AAZF= z-{Px(AW(Si)TEa)@kMR%ZL>t>e&Mc_d+<4m+$iUYU?M+ArFC=~Frr*;@?FZ!NtQ;% zZ-{Uxh^$AZm>X~8TNOHx5(Y5aJ9IqEcBF5nn2^-`fpzgVwjVd!XPdn3l2znN{Gegb zs^z8x>&5o&Z37#7=gX+Aykq6{FOt&E;prRO(~&I|eD}&Z?@x2cCBMSpBP1|7Q-hh2 zvLlDCU9ElHE?3SRN6yKFb42Z>kqFk{098y&vmhSqjV`c`G_ zuyqn2B(R4)1GzpEtr4;HtoAK`Dbj?Yy$qSI`_ROIlot0ip?OPHvU$1Gxk|=lCvKM` zD{<$Wx5GXud9zD2golxuwEDE}z@YB-l#w8>LsEGAgTG^*d}q&~oAWYHoZ2LS+b5fv zI~j1xckE_J$@4QoA9*&-C-g0j&YnB||lhQK)ezjqS{*aaqU1uhGje6x{6_^?rx zT`kvKQ=dWinI{k5Ec|{S8X-5}T};AogUlZbvj57fi!|@E6su2hwHgv17E@jn)ki7+ zs#(lV8cE+a<&>i^&-;Ya##epV7}!~VggEa~h-^6w8Z%jgUgytU85Mi^`}@02ugy(( z0Mw85s@*ub{sx0UU#}n+C3CJIQV<+Q{%RaPjG>Y$Cftn=fZ?m?#3n&BcIuGyUPv8e zMfnm+b`@Qgaja)PIb7=pJL7h&$M|J^efuB22JDSAReXi>#I`BXUresNNj^gW{H(CC zrl5mgy31bmaz#I;xqxlxcEn(t*q|Ygp62b)l0` zOi`2@^@{P**S2!Nx*~ASk@H`Wy`gi+1i8F(@#a2Nb#I6TrRqXtgLJcvW$7^HglHAw zH<Qaw``9+{nGQ}o zA1omkuS*8~Wk2;T>{0H)ANfLJ#sA5-PUd|9lZDTzLWYkobp}|uSJhS4Y%#95)}Nm6 zOv)aM|FOlmp!reg#%I_N-=H-ZY$(2t&g@v1z2`Qb4zEWE|JF+tTS^lM3vc ztGO2W(lWrm{2EzlKfZ2{4>Xib$qY|&=R70X&d}RSUb8TA?AIHNHOlPwv?J~Hp6uc>b*Q3$5kfMT2mlFsl&Dh z?h{wzrAE>&Fpg@Apz6?Kje};~55r@-8&?1)n%e0GmX-)Mv(fS(THH#zG*e1G$!8hp zQcLTRQY}whN`+PBb|#5dw5J5c1~zAhYM`N`3g-izHn^DIUL*(QINMx+QXQ-sM>xpT zi;6?4?q+!x1W`pf6>dcd*mso&D;ru=*@${?uWb)&T96-_ zh7Xh5_mLL!DJTyx*suA7N^EK=BbX7syv)kO1x_6guiRH3}Xjv9(n5ABq*k zL*LDU>9Ib`NEvZ5SztZR|MG6Gr=awJhO#qeKwr!IAky(^)viD|`D2kT&RTC8TP z0PP!f`mvwjXMgxo4lsP9h|gP91=q$1tO6p&^V2=7S})}X%%(Ix%U{k(^|F^9wEmSa z38+yazPyRj3+eAOw&`hD_-%9IzWU2jfM${SOnhDR z_W1(Y2pyrKb0w(Z!q|zNP+T{8-$_)-&eF6QrxL zKh>sg)amCEfT;;zzkhYFG0_O5Y=ktl*E- z4t=q?6Ml`M2s1yrlObvc^+);$=*eZV>Hr==ou+l`QPU*YL7dA{LeA>p>z0S0ehtu#rx#pJLnAF(` z!1z>|OAdjWKM8eyl{|FU%t>gd(#UU8GnoS?od777<+3+widx-M?c~b97X=DakrmG1e5i~sXT)dfRuhU=F*b64|so){?v zieTCq{Mu&g0LQz0{Y3^1E6{!ABRLwjAn!x#s|e^yoFLCHMLZ~|wV}Yj{$8*_DspRh zVT{gG9M<^$cFoO04p3ZjrE+>K4KVf?N`3>6QyA1JA1{cr1P83wLl0)LO!2RU+8vO6 z$4`>e`H!Yc2g4s@{7>pT{N9Mye8k-b*l%{z^CCg&s>1xa(T=V7Z&Tpe^p)JNFrQX5Wn` zHm@o3r_3Ds0XGP-F}aldX*)uLx^l8guW3&mVTa519_MkbK7|5Q}}J zNyY1HCDZ9K6cj{`?O7Yjx~B29bu^bcN7MD@?xIPy4%kQ2hzfm4c!JW{5QE$h9-q97k1A zIkz=-48eQuT-^{Yj#6k4Sxe?Nd|f_6yJoPUj=rnTJtx#|k0vpN>j3NL?9!y>5-c;= z&axE3)WGG*fDw7UvV13=y;Qub%nkK<>%@EQ{g=di?>5p-2+zAIEyZiBwT)k(pXKX! z{j+zue6}Zh7>`WW7Qms?fyeBL;p5GHRaORh9TtgYL>21r>#lsjxMN4k<^2`%xdAvK zs%igb-isB>-XtgN4#@S{McCG=nrwJ&Eiax292#ZTHi5(IY%2;DPo$_NQzJe-yHVx( zbL$c+muiYu%GJNTHF60$dj%vz3oRBR!Iip1ync{&iRj2F|BZpdb}{(LuzRyye}|PU zzS;e{*a;aCUhy(ZQ#3|DY`@i<;ns4ObWT&nRH(LjI+4*gxOryacE{!{1sI6D0Dtp5 z)RL&17k0;-!FT2;3Yfc?L@gN4ZZ7umgEj_rg$ly3*H!23VA0zP1*M%K{+eHG)$Ler zI@^&6w!Ybh)Z2I4vp>;q8<@x5x)Cqmkkx1cl3U0b0RxrHI5+@Fo5AS%wP?#JfNz9h z6We$k1T@#++nLRuWe16$>-Yx~{{Y1*EnE!=U`2~~s&w;BRyLbR;`*9@Wh8^kYuWSd zEOk>q&gvPu-C{8t*;Tzv>{Asm4mXs1<-9@JH(1mne63B_wo?fggrAQ4Ai^TD>&j9X z&mbz^H&tdo(3aw}yO<0492?~C0b*~fDMUwJ4ua-2`!}|OWV6FfX0xhL)op*k6ChMQ zwhki{pI!C?DckOJ+uHna!=ZWM;zDT-8Xa&T+TnAn}?fiF5Y@j zEAWCUm9>#wr?L6TA4xwAAK1Fm9pCgflenm%VNp}XPTj9KRokpIuDCnT%(29F>tN+& zwr*IDYcOcWOf^-C4-{)tCVKRRSPJN)hqr8^qQZCSJte=usOV#O!wO9UsI${S>J#QG8Z)-P~n5d82RU?*d`5q{TDjL#{m$fVz$!{2JU`8k+ z2kiUzT=%L-!|(_|`?xK}UN*S6V+5^DofmcTb?RcbbIjN9&9pV!3Y%3MmwWoP_C@?) zTpIEI3uBYhe<`NHlph^1O&>&>9%vLEg!WL2>^iS5m2l3S``?}+&BV8xmv=Fi5B5IM z<+;kvWx;1*r+&PK@V1`%8LJmhHPwT8ivCpOQT=_e8 zjK9Y2ltAFCz4};nkN@4j=70Zd|7Q5#VSaQcSx(2*|1Z1a|7o22!rMo+H{QOeB)mDn z`tJYr8|c8$^L$s#-y1>r3m>MY^WoE!Fng@c@|AM~^q`+~-sL8@1&xd#8Sm zW2Xa&Y6hAY|6jjhMgw4t>CapLPOkXNxAL!00OIdop>P;~{|beFg~Gq*zkh|oze3^v zr%))8ugzun=b*x)bEkz8q*{>P6LO$iHLDLB#ccZpE;cLd-@LQZdTMNW^=5PIa@qP| zp+bwux)^^b|2z17o62>t%yf1b`{W2(?kC2~Q5c_o!ws>1^;{YIu?ydTHD?W&FN(&0 zzgQPt23@9o*QC;6zRHg9%$psirB+hab0n#%ODXXl*G@T2j%=BYirH#<$oUKZSo=_C zp72cre~tPk+~nm>Cw+c7Xe~2%2 zk-eu((3JsyVhhYr-__6Svo=msYZmIq3_HRB zS;GANC!ZZv2QiMBUf7Y%e3jcmM(N-G?K7 z7AKGW9VznPvFoE0P=eT`Aa(aa3(N6O=*oeJhvQ<~pS|cQa)@li`cSQ<{xylQdWziY z(sng2bx(U6Z-B#~@CXBQ%?lSb++>2T>WvooxGW}N9OvUq3-gm?ogBu>0Yd%?uoOgt z3)h159Mz-uI(m=qpXlnyZ^kG8i8|l9oesQm;;(`DUw?|>F6s{vta683nGv&Y=4ZtK2<@6)aC2J<@BbECcO7`SW}Evrt0OGpUB{beZDsr!ny`B z*hLa#+SS0GZ1klaer7RFcsr}pFigT-72ONIh#N8&wf#LPJv;T2c_XHWt1^PaX+?iz z`Zu@G{W^MyK-iH#149V}!2FpXtpChojy*nt!a#nwtz8M$Wt{7DnKt(xIajwGM`Z`EGI$gWnxip_BZTp&gZnI0yrR!RW&Pyk6 z+7|S5Aae|^&d_T>%=Ey`cat+2)YNhHK*9Kq#h=gb1%Ttl???vx6OF{rphpeOYNk1A z<-yYVbZWXepMO-E^ri-$I#jB|Xe{ZA<2WGSSD$LVVvbz7^ec&&2p-M-*p(Nx;2L~M zSf@{qS#t4x#kj$VyBoir(A~1V=M#ndeh6228{n-BeSArYjR!Xy@D<-<<)YF!5-dlGiW;3bdVSYu$lb~?3jq@pHHDY)-5+lTl zL)4z-W-bMW+Gauws$He4ygQJbza$j4lvfU8uMB_=eYG2|&Sm1qZu!xyx7#mH@3>jl z@YNJtMm}^b!9Yi>KbOG2Dt!Fu4xC0_C*c8@760R0Ok%_rG@Ys|${AyjX)T_D?tCJyC;c%E_0^7E&xGG!38o=0 z6)NP+X0D5SEWM%*PgARMxguRlZ$^MU_f{5+VXLIL(J6z%b#2u2c9uGKIp#9g{TAO9 z9OA%H!fKP|)YigP&6g+!?st5EFLyquHy!#}<)=8=eG|JSXCreO+~+)QE0im2E_)!) zVn!<78aPOO6RlXwcES!PTrr@&yFuw0GGGmMCX+I0{l(6d)kFe3O_Sshu z#UTwxam32Fb`Q6g8?$baY;D&jt$nHCL~78+`24|6MPEBJVPwmw9b;M?RQLR94sxrQ zwv~bhS+p||FmJzFaECD-9sn^upi9l3Q9AX1_P>99aR2;NGwyBU+uHMzgS8&BJ1%1` z8nVB4ufn|^OD4q>KBvv_P^XF!H9+!gb|JNz_{w&it6V#Y2M(=x<6QwSypmT;G4}0zgW<^yGfdrqdkuCNH{Tz}MF)>cw`b z4Hpc^{9%!wBqOTk=aha{(fKN#WwbVSB#Y!tnO_jXz1h1U0j)Ps$nxDokB)(a zU6Y`FHO@Uqt~>jo8sqK!%d5QE>ZKlZclSn*S>$=}(m~m_>~lX7TjVIemc)QfGzaIV zR)(cxbN9C|z8=xCp8TEYzGjn}D2e@!aNI_<{~U-mSW0H zgQhGE9boerGxmAoi>VWnIIqcE$#LVHeLm1xjor0cd5-DR0ntp(v>u+~W@UlPsdCV4 z1SPD2x!yd9p3>gwvL7MW1uXVPJHCD+OPTHob~TXMl$Y=&Q5YQ#E(EtB{lwI<-@~is z;|wbcUZc#68+sjP)Z(kasYqU27TX#9A$RI_BJ*K5VEDchPht3!y#AT0`TQ6NXB7R~ zV~(Q$uleATBt6SU1^BEP zG>CisD^)$!fY^3>%6yGLo9aQ_JYHpi>~{uVt8<=tEQZY?UNMgd-7ShS9Ll*k;X(I` zb7N%NP5+hIhz5BCKCuptpM8KgAz0dO2k`SX*5i_~5z&ft0d@UU(wPh$C_I7-K8mE2 zHE1{0&s?EM_{}ae>j-JZ4F9x$%#bMl1}lQoK#`XkpX zk>km?q8(m4&ChOk?d>vB>!a#~7l()S+vFYoF-4Dwn4hkneTdC? zxq)qX*7dI`_vRWZIUlxKdjG5`!sw49ssQM-`jCm;;XO|eX*)hm&3_hN{DNRgU0OV0 z@rD6BZDG^^mTf}PHybQoPkGft!7<5FWbN^?j(fq|ToVYBDPy+jRL|=dv&AwR-aZb# zBXS7b`{N#wJ+9d&aV-9=4IO_16`+1zxM15~25e;<^h%9YRCDGUhgm+cz5QgNkP;nn;06)Pw3J3{v&&Do^)1GyV9o!j}ul3{nP>chI183>tf z-kT71k>zpTO)_zzO;kQpORA!PYX^i)i@K3J%bDuOo;V)@g29Iy&h%a};C^gbUuucl zv|EZ$?Gn4w_a=$zW>pcr*9A4-kAr`zz}-?u%C37U$Vw*V1Gb6^~Yg znC?^Ot7pqcdcj|a1yz(AVbY)@)hK9RzH`Qy ziim9GI(ntF1xMAD^4)Tfa9m!foIrX7tDrn<3!5W%Ewd&#myHX%$JM}LRmD^$nB_{< zumKPGku*839Y8SXkcP%`@#&`<(B4zZ&TSl~;{9Sy!?$l+4%7b_|K@AT94`Uy|G<62 z#k()LOz^|%DY<*nXW-U9#e>%?CTBgW^#xI&aj53nCQ;4O=$JWF(BS}zr+uF`jUH63 zxs^E)i9Ip8h4W~FtJhBis>Z~R+=7Hgn3-nIO@z+GT4%{HxvZKYADhnbAK=<44}V|A zzgcwGN*H`!%tW{{fme<+=labqY9DiY`WV0tKXPRFZBbc%T$$;gII5a)kD|6wzaQwA zzovWhJC_a)-ul2=YiKwr9KSj@D7mg7uEKjHRSId(UC`fTqjB>@jto`i-X%sswP!daUbeo;Oc|->95-qx#Ux|9cPZR zLAZc{iNW;8Px0;)*jQdr%BxHk>Cbd2my~&hTXZt+s+TY6!Hht~6xzq+f~rJhBQ8aJ zxS1lewJZ8(!39yMs0*E=5l=_u!8M~JXtrJ=Wcy+ z2fR_}+RN0LKh$5EK-)Z;u5A@PhCK|_omX@6?50T3rSY`%iJEdGUOmRFwQ1AEava}~ z8Y)Y6fHr+o0l)J;*f>~W2M;`KD=ci*XbCTWv#%~WaR@?AelaQ8I8)d)nZvTa2LSf* zvE1m)>Y2N;>|)NGvsMT2d9)3ye511>vZB7crlwD^48USauGjm5;37 zn1lzN6AzWvqdoHhGV+Ocdks;BUc<1H)mHf7Zj1@yy;9@ybbpf;AruAU6s`Ojjh0pc zSDE4)6U9hZr|RGC}K7s|Yi#T6{V6DtJH2%Z6SZULpEu`xYB9nQ6DKOt_bE7Y3%Rk~Pd zTB{K2N-f=%upcQ9*xCEsXV&27VFYEI>rg$d0&@x$NJrU&r1n7HNw5TT6+dKR-V)O1N_dcz%B$Oa$MJE_y`z0fRsN3>HF zVpFqHqEWBo4m{Qe(fn0&q`KA8%3}f1J13Za>kAbid`!13xqV`%pW1P-HTMO^Pb}i# z8&-dIYYb@VRcnnz+tTn(#R+&yk`%1OWPe-k(s(}Xk%jQ-mi{q29qrPT`&QcASgq12 z_!Q4qhUI-3UkAb$)ftr6STxi2b&1`GCnk)@mujhT@y0HE9F2#*@ONXU`<)CrQSAQ| zv|Cfx~q)HsxznMTNc7A0TvW#^j4 z?SF}!<_L(kZ&6GxzdqzxF}T~)!iwEjr@aX~>@&v~YMhjQX}}32#Mp45_#RuQ`2{wp zhVdKI!s{cPS5B#M%5YzDqkRuxWUK6M2i}_0&Zt6*_OwLU5VGP%=b6BqNDksNQ-lk* zafMkBZ(-G8)PCd#_(^y#FZiG7PCk6E-`QX!$eX)5ryc}xuVx84R3M%!L%e6txi`nB zHCX_ODU*ooUIjLcdT$NYjY&9TS%NSNWywU$!QFM(V6c1$XdX^mSVZT%r0 z(ZD8>unHh6XQx4b^iT&FY!8&(<_`B#5m;CHCs0fyeV<{!@ypVju2vS-U87JoUD7I5q^Y~|!Vll5|H@M4!jeWdSBTCnitsgz zr5RLhUadm)&d%Ck{?z&$bKsN*7o?*Yp!CM>LC?Ns4q{SO%Z3&0S94?$crF|!HqOJu z_BcxV?RJCVFq-q=9(yCIMVKn4K#Ikz7aOqbmqMWEKSpWQ61ZK)|D(j9OO$+$#SKcD+WR282x;xM>U01N^EuhQ```C9kXJ@7B5y*0nr&2O-BT1w^ESqB-jx-07i4 z#ZT66n=V$IQmn!cYzH?JSKVFMM^T6^7acjy*=8Baizl}EXWoq4b@`vhl}~5tMG}J7 zGk5S?mS2s1OOx7djsHEcjwjoU>%IXpJv~AHIaOo$Bl7Cmlgt! zI=>_Xt$wxBiPgG}5fS1h=!z?*7Iab_!v1o_%6qxeCl^r znk=a*p5i{Md2$L=V5-cpC_AHZglz6JYBv6rX+rHo9zSv|qL07hA=AhpWxL~XunC!6 zc#<-7YUM*iEInL~lg)fIn$K8hQJ+A&!=X#|22j>B&K#7-6dK~$7GAma5q2L6c~d*| zqD9|3^L56KX-7MQxZ_CG_H7N~`dEtz5JP*n{&a!Bo}^o(%&27lEpYLVcd`BMZR3~2 zp3{UFR#zPup2Xr}SJ?=1>hSx31{4(qYW)%Tf*IA1n77q9jKtrFvX%S=#>Q2?ZfIW> z^5i33AcPe1aq1%4jDW2MFpfP(TcPWnA$3#d5tW2foJp)%`qZ{{=kD|`M8vXD)cmmP z3D}4y@f(`E3wN`GY)DGDpE9By{3j3T8n6;8A3jx3Hh-hA;baouR3yM6`aOc%vH->} z&5Si;NI_Z4jo_=iulF}6Jen+EcYWfFU9uz(%9!DF$o#TkNMkiTj0qkbXIq(^8)5fy z-*xe!Q<>Ed%XRhH3936K&9OxWZk3C@q_$?1sGp%q(bdG`tKlZ)XWmx@ z9@a9!!!?Q_(ZBxjjQ%=`|LdoT!lONyh3;X@0@jaM)}vntQaP!K^Krjaoc@kd3N z^q{z1ZJD38n&0d@g9&E1T&n3>n@JlZ8ieSX|7jv3_~G%YNE;du-je-`Vozswt9at>Y5{{e>p%%H z0`V+0pzrwb7Xu?f5d#7=)jJBY(rq>t29^umK+ZeQOS;4iVZ<$!u2{!%u^GOP4h#>i zofYYGHp6FoOf+s^LAJmON1dJ~kKArY>^XQgr8qT;u_>l{@M)H3`z#`VyA*1b&WTIP zqDk8yx7~jac(6(D&cS&q>6S>9lR_2x0PMu*yXBPP>PKl}Gx-tKY zz3&WbD(e;=5fw#LL_w(v3Mfr#=w%cTX*PNZO7ES}Bch;GkzRw+q<2CONR<{O6ln<% z0Rx2IL&A3$U+2y{<*N7R_l%D}4oUVtYp=cfT6-_tz>v>ctMX#a7Mh#BNm_;}7GoBB zJC!hLtJT8%9#1@9vG3Z*u>0{I%O{6qkj-mv+~s`fNAzCF6(X+VmV6A)oeyjhfH@l9 zKOIo&F*)($!35gNhml7Kut;b713KB(Z6Xts_k3U#o&7+ssP%@g_+p*iFgH`Kb zLc*Ipl=FL=gbj0CqpM65VLKaic4l(D)kZyxaFf{c2Wgp7n*P^HY)?iXA^PGj5wy~kjn2cxdEsyZ(S2lEWmTCED$Hf)k1n>sI zYMtj_I-&YzsAop+#u{^mduk>qmSfG)A(o3D@q>3v5O`yIx>o(tUm@HMs5YOo8|tVE z{M!}8t>BoKl{$1OwzjQpfPp>MudYDX8sMz#W{9>7AwHzdKLWPqiNmj#JTX*z*BtDw zIxhiO7Gvh&7&;Dn86#tk@X-t8v>asEwr~h=^1&sPlJB7qaISn;2=GRh-{10YIo`x- z&-HXtJ}imshvRIr2O$$+B=Fk>&@YpyX*h#$s5cZAFvn~&J3$ASM#vAwtn(@EoHhW-npqgQTs?*H6vDRvribSa|$oSS;50_*Bt%i!NwcX?8 zU_h&fTdgby&jwe^cKl2n;x`fPSADhjEo#+O@0Thq`Bb3dLZgL4;J{t1Jw9PB zZU6T3&m`FX77pqPwMsm>kRFQKt*uv%hxPFk?9B2^#5$d z95vv4ZoCtGcjAwq{^1ATeOzS+azg(|3d{QosmF3!qx=Wzl*kk)=V#|;!4C%+sb`tA|nfs_}J5X{_kM)1 z)cX7H|7o%^IDnH)+5zUj#n?a1<)?N90FnlO(%|wxA^UHK_M!otjOsn~`kUSP+W}u^ z|99j5O4R>u++Qf~Uyl0=N-q7&aetBZfT(ntzvc7s2jsGB4W&woWYzd+|D|v8u_593 z`rLqZH*ML&L~@f0b~Lc($N$Qm|H~&Xc84woNNkZl0-Y@X7nyhPRca_%Qvj8 zx1F`%^8>V9h1P=+%jZu{Hsv?mkN(R7|4G}-o=!pWjR3_qkFaPgZbT0z}|Gak2 z*`LyZ??8tRHEmS1t;hZxkX+>CC*fqu8sEjxe*w9{4#0@sj3fOVkQUDY6{u;HedoU> z`yP5y(EyA?qs%Dq&jFc#=FE+;6WkgPG30F0ia#=FWt2jqD`-|G2gT>nPBKeP>g zo-}7}=-Yd#Kz|O%vR`Ez9Jq=eUj0U_KSV2c?Ex6)ZNqVVKL?~H1z?pOiOKoiM*TO! z=2QU~>+t-){j?zyqJFGpH29+2HJy84H%mO$M}Kdni8dZ$7(m`V$nho?i$ppE2dzna#j)$Pf^A`)3*Gp!3Mg*JejcxFVjv{qKRwYO#*;hzTvUL3`v}lB`qqe0XN-jH?z#(+${4iKqjWxU;8|o2E%VRb4WU^$%RjA0Q@u^d{^K1`KAk}Zyu^*L&Y{POu^{g%E&yw^{ zBy#3b?;ElvbszK6`NXZ}XF{`YYSwm2ml$AE1Ns-s+Uk?9r;3)eExtBA%iBTe48Hv! zOI2M<-_?~lU3_tHv@c8AOKleosZaLYn$vhVdI`gaSj}0VMeOlVeZj~O7usZOq8k+$ zY@^hqrqkC^`v&pXwgZZdQHf}W5v$P`UClHc((ywQpC^Vg{SJe=1Gh%am%MiZSv;=T z0~hbr=i259KGZILEH93&TT+h@-u77%6GX=7=Yv_K2sd#<2%^s%1_2hwM@8mp@Q40p zf!W*zwg&WW+}8N}#O8Xns73l1sAjJICYnXG2)cW zLN9x+)O_8N+P-TK9g28bGH$V&kRoI^mXC`|3%9v%e>35R$Eu*m@?vIU!O0O@d_^Fb z`iA%Ld98tSH@v$Mg?w7hQ}f|mfxwN73DRrX;j!2afiv+DKzr+z+L~ycXAE01(y=S| zztWOlG?{_y|Kj8P{#BlyA6n0@&Q79;c92=e1+iR(jN!p}VTjYq0piPhYtAEOI6b>! z`4K3>XHA6F!mSUb4*z6mGD#eo*81!!f_BH20WBiSwGqr__1o`*H+ofluKcEDlGS7b zCe`4|E-$`o(|$U0#ErPEl`y_wIRrK|yJS~!pCgIid%NhIUk)W5Z&&F_t@){1kF~z? zq48HK-mBT`K%19L)!lWaQ5z{yrU+9WW?`!~tKf8rR!qEEPeSxT&*W2GPmN;vz$te0 z5aom$lf{xRD!jJrbQtuq4wrb8o5lMqtZ?+Hg2I&nYE)jE?N-;bsgfUgM8Knft9Bw- zx2tq5$HL#ypqRbCOs>1csBY&8B5y~iZf81TS6f)(mn}&379ZaVyaK^q5tp7}!<7xH z^Q5;h$8};RMwc$Y_%@fi@I>>vEQ_}v2vS?<*qNT>M0J^+W}5rqi{lLaRgMd)mZYag zEOU9_cLG-#id?o^CUxmb#jSoW(w!THJgJs4$5T z6^T&#=x{|aTd8Z*&C}AN8%r2I7P~#fY)WFX52*kt141Hwr8ii5Wo#u(Zi~;gH=&B& z++@%~wqZ+mm!BQCOj|(EsEbO=8*S3iaAbNd*2%~l?a}IB+Cqk};_F8XEaw~jb1NRP z{w6KHSO5k&6OWzG_sxI5>tsBp7Z>48U#d@DC>ZN1FsRWCn`r`}U7W>dDZzFEQtW%c zy-O->b&GK>i*z!WE3Y=Elu7Y+J!H?=Qr(v|^)d^+@rl<27DafW+t}xp;o}fxnOzd( z{t~*50ol_Q%WIHOzdB%_K5ti9Jh!(*noz{e&n72^iS4$tuLb*t44clD5#8AOhU!=F z0=I>)!{Ux?@#<7x@mU+iSQgok-vy<(?ahbTmLyZrdb)m|@IS@vU&L24)!{Peg&tpU z(gLpmx2zS}==bbQRqdoVl};>J9cn5yN2;;xwsZ6eKFAa@6e4a+DX$uRff2U@cPc>H zz3-o$=h93^0O|urklBM$S&e&+LOWL0>i1f__vvUzq;P0@F)06JEpMiQ4(Xc2V+^g#E z0WtUMkjFGFc7k`TssW`z-~SS+5XIuXg+R$2W9~Idq5e%D74ed5LiPHpzeKzKfU>9P zuWLxpg7uVAwgm{Cx*e)g8{POmf?Bj>f!S%zh=Er>G-m%Xx4|QNA>i%;z^d2QxfYFC8vwKVvz*h zNTY#{icD2lguQaa#|kGynCfmiPsJxmsD>I}=J};i|NC6?ny0qr2Dq3Y3mxH)to(sET0 z8HKXCxV^Xq)jm3=K>V~t-OEVi?(hKbo9#kkcHo>hD=xlf&|Dn-gn16G1v+WK-_?(7 z1(!aV?l79P*lK4x)k(O|%_#R#hJ@RvP167r7kD+#R8b7a*Os)9DY?#NRL$m|of(S1hTgjepKYv6+PQtv>nIu-4si zcfj5k<_m3PS;LbW0%c-&xABF2Gl{2lZP$wJgXTPd*2)rd{0w;}mk*!~(glajPP>%FH= zXBY*?7|F4HbuImmm|R#ia>!--lZW*;rd^&onYyZ{`01$-*&d3NT0jqYT*5J+SHrv| zD})8$^&*{ndQlh>>FMJe#MM2R2nB7}#ly$!66)R1v7IXUTKYb!I*5^mebYlQY6pFC zO2uRfV5-loY_=v`yaLp1dla0!0?P`iOFJ^ed)J>nH})+%A0L2N8E`fh;yaKPL5=K7 z#VM40+XmvKN@LV3?glY|Lt2|zAINOuiaLAJ9*~PFR_%^Jvw-{Vge~zwxdpsgOs>t7 z(+)g`_v;q}gdC?LJqFr8c>}T-e3Y`Snc^9#_gK#zd(n;0wQA(b;8kSwp{|{0v?Pc| zOa0yi0cmxHn=zQl_seC|8bF_`pv=20-joj_gN)ED`wM$cjTE$KYorvwNO8sEFQ|8B zFOz*t+ndrtD6P#Rk%0{Re4B)D=ysiew&)B5KO(&50j>&Ptlz3{4Zip_@XEvsL>8Mq z9`CGLsM&su_5g;aZPt^8XvL-*3$@(oj*{mPPHoyfGfzu(XuScV_(W*Ur>0{~8mzJF~w2`2+4lR?3}Ha~UwN2xO7+ zd=r(-gXs`q#yG%Dt@_NHrWo5aa8zMQC}FXr%CtdpyYRbCfQVBvNf1TNdZ~FJ|Pj@`Sj^&9?%K{h6HmNp4OVd3Z^7~yreD4 zu6fkIb^E-Q0h4_$51+}Q%z--*MEFQ;oQTVpis`%#qv?1-ts}hQWA&af*vF0cBo^QEDcglFl?=72>3tq3AZ zv;fDRJJ5In%OX|N=l?jUQZcZc_p&G@N!Q44Sk^b3}9m4L1a@8FDLn zZRigui19OM8yMM|j&IUSPj0l$9%}lC4h5mV`kT$oFSFazy*rdR(ig{T;8w2V*U6qS z9D`_CzbjEgrby*zSl+<*)GVs85I&oCSSm6(y3+&Rh~xvutwpYz@yvjqu|AI>)#ss7 z4oi9F&0zsYM$cxq#bd2wr_QtR1nFR#w4J~k@c~0H-YiPQl8q~Z>ERl*)`;=nQ zmr=~_jFY$OQD(1&mk;BQS2!|+OObAgS$6WkIRoK27sAAWW-!-hg_+%#8v{F)67B1C z#A*wUof3E0Kd1AIj#od97jV%fjkZUiV6fZ#Ha3hW(3>W zaKgwZm$%+#xa1%J(&0e_cRT2-y%TW2eoE}f#*kXIP}G4enqhQ@kuow_7i_k+xDGph4|`=rUZkwXUD^1)+#I}*Y0Twd|kbGv23i`7|7rU6s^q-vfNHtef2X-zC$=u z2y`n@CS!+^HVR8!cF-JP1`EM>fpu$5l*9^G8bU$iNxXFns|O(y2ITd&HZxbvJneg7 zpiL{^qFsmhr4VV0fH9afzsi-M!pCk?_EDk5NVnR>QcQooAubuh zn@gGvTe;dtd@+7T`sh*!M2S%azb~`bB8bkVe zz~%?j@u|T&ch8KTnWoB0N%Az8tG3n+pkhkkqsO;Yz!kH*SiKjA>hD!}%_(sdIIb1S zkX)xrz{FN~U!=OU#sT?AO9=YRIDtZ3=m?atRIL; zdGCdwFcAk9s>{&$Ryso)I_ki4#6?A_-IiFA6jx0Yks6OT-6JBvc_+xE=ereKZXk70;QWw~fzTL$xpsGpWzci0^=)k z4}`u|uyEYwfHNWrz4fLPm8XNufC7B7zRFdYZX7Os9begW3U^@Oe@bl4P$qd;)~lHO zN`l!#r8LY$?0VzA`n`8K4icOuia7S~swLUf6!Hp;-L0V1fqDB2iN}%+*N1{p1hkh~ z{c-BCon(6`o6i->TcI{3pzxZjP>s=gd+&|0b2^U_xmb1^2qh<*5Gq;*KDMMB&81pC zC_j_S#35BPJbXzS=e56}*X8ew zAo#Or0NxgjAxwJXW%wZDU53V%{d!V>q&BExzpFIi=pzI%-`Ys5!W$`>v3_3SH+*fqcC_z>`0q*T`6 zgiCg(;k?lIQTaqR>&zD*{$ENIT?;{VdLVYYm-+w$t#hTFoY2Hgu@wavu2~wVZpDmJ zyp`I0$Hv70y6C%AgO2anS;#`QM~s5v>HI1&ySrK_$3ltB54DfqHx^29F17WfL9Vk4 zyUPF(U6i`-Hc>#rX}XJ<$HvRF?y+bImb4kWAG^g-LI&3oWA6;p(@}FLu0*HhGA({g z+m+s1K869Xv=TmZF_r;zR>xOnuW>5_30)M-R*ucQe7k%2BoLIdI}@M85Nol9079|r z#o^wSNdx&oD>dky&Gi=KzSamq*eio#&cGmhP=Wl-{vH&X#9CRc`|_7Hz}0#5xt+bV z29nF8XbfM>opa4~jA!Q(^!rU5mqj)xaFz6_*;)qtRXqRslcFASR-o}&XDlBQtQ^Cw zL`lni8|ZN{Ek2Jj4P4SLAT%?u@C8x{ozt##c$6UEs+$0^H>8g{5_*b3M5z3n(ax}Q zy|&qvdhB3gM=LG2`!#EGpuTfDwv<>ee_(Vc2x1i2kUdF2Of@O`n25c&k?e?BV*Udl ztk$^+>h}X0>tS5XtyPVMwhye z(8Dd|b9J^`=w>r4QCa2~IzCgKbdZ{!y_9GR>9?v#BRoY{7pV@9%2E$X44cY~beC`4 z1E5kj8nETIwrA4)Y9W0mqS%`!Yx0t*9DAy}r_fd5Kp1|M2+b+?jP%qqZJb`YPk-W; z(J~O(T+?b2vtNS5M1Ed@<4y=2rMPZ*GBIVy#a5oJoFQR|* z@CU5kKTzY{@B_u>Scnt+%c}otDSx6cKl&p;{Ega23+8%C-;-q`oPHG2*9?Ie`dDhkcJ>e z-1v@PSk$(+{=T9RzniKY%WFy6+hM9y$YuQ|Ml94Wg>_DCtKi%Z%`w^Ah5ngh|00(g zbkA}M477NWr?~*3dEmSxcXyJQGf)1#(%*-cQ#7w=Gsd>8=rCq!FvAB*YQ=4V3f3_Z z8RhTGxa3=@j=f4DkoiF#pumO=aE*DoQ>1Dl*B^qu$uWi)S;oMY|Gc6cTS29qj)iMB zIJ70|vHbdymCo(Dp|3NvJVNuyLSA#waP3D-QZ8<42j)79SDnd+*JVJ-&H9)# zG6U!~J00H0EMRhbFLAk;LaExWuOSCvhSf6vbakx>R=?zh^ht(Qr@af=kC(0 z*sA;4;fLcYzQO%F1^o3PSuR!J$<>k}d51fu%N+;B9Y9qR>L5O^()`q4qR9|->?`V1 z(MP7-xx(r^*S|y@l$-Pyf)V`)AbKsDd0yu^kWlu-pgrdhC!03P;OpP7wST9SO7V+b zl4WJP2;=bc;FC?jb`)l-M*j}7Cf^ykM)`orO3U2#>tmv&4rLELB8TSmG}Cwhoj3Z{ zT<04}{3B_lqE0-dqz|*C7Si7_?4K`Vcx(J01o(-c6)13arK*ZF;( zb-mmx%2~PMZea6F?VFIA1Nhj;0lOkT9I}y_NER@ZXkdBscdY-@gP-mZa>}9W8Q)fF z)P02fcwv=+7vFGx9Yg=OkN=ldrj}At7~&SZZtws8j*$nc%3dOwJm1~O|DIMnRu-P2 z9P3)ZjHCL;z5nka-(IOoUhO(RVxr4KV{)NDUWw`F-t2lE*i1D@lHK<)|3^l9Cl6SJ zJa^`P)I9sT@$Vi07NKi3)+O;9A^iQ4{`jT?+wOontnb|q{JBxd2PQI#DQomk(|uo+?6F?{xC0|*fxcdSWtDSg z-~72zY6s32E(~S+b4*hX96$M&)&2+7vj4K$pQ!x*YgY5KD}VS;!dR2fVE|!&8z{q3 z0SaA|mpn%kH4#tf|AF~a4+9bj@=J-v-|3Me5+ygdUQ*S4;bqz5mZ1I`$kGPX-}ds~ zORxS=KyXwKYl#2lD4)gkB9YQ~X%Jy&y^yQ0TdR62r6yw>GM-_1Oy#m{>N)p0ocStH zA9UmKr;2Hp!+!uZ4@_oF@b<7xnJ)x;y&wD?$T9^$*jtzNe?3PwU)+%X0h!LhE*)(@ z=}V|(-&h)gNTGETHLor2C5ho=uuE!*HnomGhTG>pl3D7yIRks|WisQo@fcb@!=&A< zCD`Vu7cyaRr-LiFuX5B5>F?xrp~`?ZO0X<*`XOuYFgdfk%YDziY)ezP8@le0_<6|r&#iKE;qJd8)G{I=ol=)EIA0j*y7 zE@rYkxqJPMBC{%A?e^pPmhtNHJ;A=(oi)_L>}+}?P>tOhuK`X1$#er{8Lsna9-unS zXl^`rh~5(EY3Js-6Rp)J5|5kPtyAFAM$%b@`&6aS2-kSf07<(f_?ntTOw&>N6JtJ@ z!?>LV!dvB`0^GZ%Q+A@LPo6x>pVL3!NXJ;zngUKK6%HH2H$3&U;EtqInjHONGwSgo zQmXDQ(-4AnNn(}=gJ{vhr{t}%*i4n=l+7_8mD1TXM->6r-H<&Ad-;mb{ECdayew^! zd}eLx^Q-!seu2zNKsF(I3-=|@zMOe$pU|3!3gXTsnGLUu1d!nRi+2yZauk{1`do13 zaL|{n>^9~&z`N)((p3!M)~k?Ig$Z*=&_PAIlZe0G8K!G}Ls&`>VflpU&RtR&9i9ob zoIOfwj_^}~t=Np1M24bf{6(x?=kvwDcDC4jv7vAzvh1SZLHRW*M$g7@4dP)6qviD$ z>+bPQ)UL46*|ZRxv6iAF=v$8n0whMdzDdRzz|b8 zcIrzE7^wV92FMf{WsBwanms$|zM|oU|!Mia}AZw`9#uCXy2lp228>2H+0fp5FOG$a$ ziH|S0$kL-#snnvqTd^+- zFuhhVCA$-aW1(N^Mz_H}zu6x749^SGton^wDgRdursnn}gza?0jG_if zs)c;C#Y`J}kRoY(;DBu>kdBW=#8P#W>aLRoz}#1}UDoos4pYH=&F<`eIfh1kt^w88 zV7Jpt&K+u6VHhhfB*mNuh1TIivmR>B7fp&PTq7k*t`^-ak^L}YcP>O0%SnBb9jK*r zi#=an+VJ{nXCHFk+tgoEUD671>hy(_jXg>nC5C8_AZcs5?ECS}M>e=E47jzWB)P>c ze}1d7c)i2AKwYq8EuF%S=MJOR*3u9k;`o(<+5Y?VFupcbR_Iq4acTaT;OrJRmx=lq)5Se$Q-JYJJ)`J8}eD{kh4 zZe=;K;LFOH<3+7V$AyURkJ51SYyu<#dpb}TSf zFUh$;*s7;+lB>Q!w z9JZ1e0!LISCC9KgFWp}uJdE0wOm=ORC_&D!^YA?WkIULC2e=0k6eCs#Mu`!T=0)D6 z6|7u&iOq1(IXXCY!<+Wj%I%n!SIEw*c@T>|W-6^Z<0@vz9EZG|39Oll0cGy0?t3p? zMz?GFYS6ox+zuJjPa=hvREa!QLX>x|btv=YpJe21t)sbAw0VRzZJtQOo?%`>d6Tb8 z?^O!)k#EdxGkJQUR4~10sy%&0H?8GXa%mz{35(NtGHd0DNsbgH7;&?3P;+g>_KE|x z8xEZJfEIo1k4|%JT#e60^%asF9(lGUs`L2Y&(Z8**X%T-*Lj^FA!1dKe05kLRGHkZ zh5um`#ZgPfA_lsfe0!r;Re8A;h*yfZx56R zN%Sy4x9V{9$5`$hSS5*d**v@s3Yj%lSHnJFTcaxPZU;ny!Oj&K8N>2OWs<9@{{(hA zl=rQs9+;npw;t_8NXEV4KEqLw#_2d&%1NCru{pRebUel|X{WsW)=Y<`PJ{EkDd&&+ z0MF~>;}3XV)tZ`|p^&25b7&OPKk?WF>s&D3hJKAE6u>p~GzKgmWOZ|vvMW5_DMVk< zDtD4l2|;R8yPS!y7%go!e>;M4Rne<;_t?~ZyB4rimvJerNQF*(ttWee) ztT{?o9J9R`>~p5jJ*bm^So)LeY(OImel&5{Dt`s?euR{m$H!Ec5Ap-*6f+i9JC>%d zQiVlN!5dl|-;b9jo(NjqZr1Q?mJh%@PT2~helbzm#-ZA2_y9FVN0%-(T;)C{DOLo< zEL^As)7S1~;)IRe+U;x>2l$yvVyNyLHNMyC8=fWEle~J59`9{rU;^cmu2FvB2^;3p zV!=$WxFm~$!#N(^8r+DvQ@%EBkyeFmjv3Lda2eQne6Mh%%9o!H%~vxwk^?%|x{H-0 z&CT1X@bT!!ZThHga8$`VEIc3Xj*gzqiN-5RI68MVXR&8Qmd@)^Kh(`{-oY>N?dk1| z%9$otdWfXFI|p*CzgDy3cAGXy*Scyw$ojtNvg4TcXo3I^XWrKhA5W1vlI>KLaej6k zf+5IOvk~ep`wG6}9?pbkmusrdo|hscw3#a%ath#cDft-xiHV)Y(^v76M4I?k8KO&zeXI4r08BqRA5*9 zxOF|p)85>#lFf7U@@Q0kt8bhj#4g2@ z&o|1Qzf*K;RK_q0O$-CUcX(HvuDVJ)G(Rd-1!0`Lm~Z(k`%yv91*rz7X0(%?pSHD^ z6oyK=OHV;vd6rSIefHCLq#O`FCWmzuy;f5KIWnR8&U)2Ew4&Cif2h}nN*hxJ#B)?B zFoWbnEUMPMabB0lVas2=%=02;UVH-+1pVMBrD8y|0pwz`xh5W+@8#>bx+$^Lb6Ah< z%fi!OUZ5s*LWk$*o<^rx^_F4HES;#+F)otvM6AS+wHk4~zS@>M&tV9I#B!4%`?Og++r2c`I8K5LzOCe+L# zF1K}r$#Q19eE8)xTeoWiHd5PVI)Z)q{*Bojf zzxwY$)<}ff%%~SW)KD*KZpOxo_gK9w#!_{5yyU7Hj*6ZcUSOlK^yX6(AsUoGLG-bD zBthw#57@?qh!Ls2V`yhj>~QN)$&_wS9h2^vnwu{^KE)O!X1Usk<5i-ta18rY zK$H9GP06||0!?U`&be9I{o1`bm${d18Tf$H*1c*Wz#Xvad?|((l$_m0O|uqi?MH>I z2Nxu7Li4x=*M^|_G1BX3$n;Cs1ff2QU-vZc?kriP4GV|1Rzcl`w5C4!EbzdEK{cu< zy*0gG^|nlTm_ie8W2;T>v7%NFatC?b7PXa@K;0@E!EMZCM89e=(-D`q0E*Y`=(j|z z8LjBf<@_h?LASCWEuR5H0D_FdVbhhu)-Tsl%SSX4oBFrIT64%4V?lDzhKa z@cubp?4i4%f}H*PgRUeVcdL$4+@6eK#G;pphC=qhhIk1hkvOM^(lB1S0~RIZTOy=% zebXoAMg(E9#(u|X7>;r?J1-mJn6elnj@phC&e-IIjtUtXOiTjx!RF0J;M7f;N2E`o zt%CPh4H!$C)84Sn>yRfa{9;qyr!RtvAsH0fa4_@fL>=A)N^vz+fCIuBhngfno+jqh z&lp?-Z3IQ@rpqFOTl#tzX7Z&V43VaE^*0a=7uuR4e6qFN)^6To)n2a-3TdEL@dS)t z5JgznFP|x_NKLT)m}60`#%hI?Gu7)eD7EVeA23{{!iow>9oSV-kul%q<#l9dVaL2$ zhrm?-eb#vl&F~RyMU;EL8BA5=1Ivn-6JZj!np@cE|MdE32xZP;5YsDgTqxzBaa%d^ zNN4cAhexEVMgiQZGM`XVk{=U*opLV6u2;J6As1%KD9FQn7%#@EORQ|0B?bjYEisV7 z%x38Xr^uWo@fmE&`bm5pYH5Rp4G!d)En*hsXJO;HCBHnatYm?DK?$>_xGqS2xvmCk zgtJ$;X4w#5BbZQ!E$}K)WC=$OIiT|4$1fBVNiR$sE>+ZH(dN&XoX8t4U0XwDXb9ki z^DOh~jwEqZs0opkT@Q!S^D)IU1Ut#nzS6wN&4|KYxWVkf4C-2+A6;+Ff2^~F=UATF zi|t$Dt>-oHZDPu|T5la?xxM0v(F<3$cORF#wpsUUY8^RildI{l-6&9;joxo9!%xU% zYayAVOHab@UFtaUYH<64*x87M%BD@+TW*|k)bVgt8-An8ksT-RA(paz>0G=e zc?sHasLrEKde{RykzZj`q~bigE1%KEQ6Q-mE{lODx!tMQ%gIVy5b%6hZMRSU_amv%IUlsBw&h!|I{W-hj_h5RQAGH=+RgRjOM^ZYG!JzpiecY~U) zmaFu{{(OgZ2TRuATKAGr;!vSIZ11Cve4rH-?X@}k_9bST8WNtkLz!dFtFvEGwr2aW z8_S}?vi&|hk(_b~F{_5dnx`byuiymx)O!dHyNk2Pj>G=yk^+Q?3f2Z*vOvT-5tC)1 zc)2%)jD`bg(K~iraUr$>{n#jG9O=_aWi%psw((U9ibG`B9ecF#eYC2UnXiXXr~Z(> zrLqXB{R{oDSK9iQk5ZjBaD&JJ6{wq;7{al2NGTHbVYcN+EhxqNail5sf_ou%u*p4k zRm1qncNfAbAhW$GYw{Q zxCPOD%r<7|C4;GEtgz_ZErnE0>N97Dw};Z3v6^fWGk`N34^E}ZZM|AT)%tv{>9iQ< zX>L17sA?;gx_X6TgtKO;TG^wqqN-w~nlI5BK8P#EsEaSg32d9ShD5_^W@2Emrf^@U zK2&!%Zx$Hl(<)xljCrU}5q?%Z6*q1@E?F=6^iYpDmT3p$x_)(c9kv2#OYQR`b0o=F zl=|(^V`sv}m>6r(eu*QCxj`Hv*X9)(JRTFOwT_Cm*xw(xBS|G!mj`D|>>)Hupu&5IM!O$lWnR^QVw!Jw1WfJNfvkuP5(kab4%Q z$lhz_XPzlDlp4x*dPER^!`r&?J=Mbz^D8MT527j~(sYN}DVYyS+`ToE9+bN*e(KwC zDzD(m8aH(GuE$t!<|Rd+hPxE7$W=RfU%Z;4{qQ1pl6n^j(*NjK?CekkPi4uiUmP9L zXJ>27>(_XIlgq)?h)=-DGmn+4kY=eT+T5Hp`+b8~FreK)l61EISQes|>MQ?LCQX z6II{XrJ@{B>@B%ryd(wTOH8OtaK-};%l$!HyGu{kf|1(Jt}6ezjczdi2^VyJ5HoU+ z6f!z|_C-Uu`)6lK1<+IZsbe_?rHW@Yxf}(2hNQNp!^wfdU#(Reo0XdylGUXv<1+&> z;VciCJD68?ZqbsjKHju=Q+eF0;fb%M!>0ReWuz$c*4W5d8B&+Ku*H?d8)5ZAypCHg zc_7}YHWA=>SdMM!#)Nlr^u_6*Y1RoWPOG$*DQUcz`KqtV3zlXx85s_!B*YmaO=nv+ zBjvlywRi{0OL&y8`%+>GyP}LVgJ`xnXv?SG*|QMx{!}qQs{|jDmdVfi14m7JzNo8l`64$5<$gxr`Ui7rW9j= zXE}?|(nFF$lOM+l1x5|?K=o`vXLVS)N)2dHdW0Jp5e{>YJ)uJl0bh;Ga3N*!eXnG!C7gFWkIWLB0cVN0ng z&?Y*pTcCY&*u-@6Rb}t;mj@3}&wxloxJkSjVH20^#w09>mvpii@$8J2bBmtOW(-^4 z4wn+;sZ?!7wW2}?du2;LiUXvPhPbr(D;N3;D=`)bR|mK znu$+68AovqLyYe6&9Iapvd+)@c#sIey5PhjIe91ed@5VUO9^zSQ>pi-|{xWT=#RI@7K zHmz$-*zYXl?qP|oVa?w)#wX%yh8F9TBabrsI9UOX-f^G>rZZ}<3sSm(*|#m}x;0tT zVs<#8AF<}QfElY=OOPbnXx$%!rSxpdMIA1AvU!L|e^g8|R!2I<$1KI$OrahF>YfSj z;wvSUOBT{>@kR|{G@~XR0&3v7|pR_ofZ-)Zu^!^y?4j%@}mcVFi!WNTCj>f zk+9LpcfJ0#DJ)F!jmW9WP^3B@lymkFWj2GJzZJsC zlOM_^V+{&2lD|tWzrfF5H#T+!^b$7>OLU985ZY>Z_uj(eL)7Oe-@V|}T<9aOd3p5% zwBE~oGZ}(h96cTAj|fCQ+DZog5$zIJ6U}LN=4U%=mUjJ~zu<6Z_QD&wv(qaFd%?Vy(-j*f@2UGp{?pzk?`Nnz8*ZSlzo(MMM32Azp^+|n22;PK z89pmjJV8A|rYXNsf(=hTQ{Fj&i`(t!mD^~Me1v|#E8iK@P1!R=v#4d`lYyxBUdmp& z7i%?KTa^~_t3l8qkdDIerV@DHvs)&1jP1f|R-v+q&F#46GArJad)RM(9-Nhy4)f#`k zq5DUhy$nartvq6;-K}7$*O4GV3}J?@w%(hhw?0kW9(;P0JYrxoi`%7l(2z&s`q0&? zL3q}yAx$3LTX?-BaPelwjpWDkwJ`UgI$6RS^Owulfjq{dSf9<83Z}5)PeX$BJ8Nq? zp`q355M;vV`PLmnDIPM{{A`+24r?qv9YoR+sCgrUVafAS&Qb`IaPM1%P7Q0cTR8WJ z?w*gxA4yn85r z#3ZH^KM~#E`5J#T&6~+h4;<^W?+?9|67@lSUQ>clZ40Y)ek^N=Ff>@`{rGy#YdTcJ z^#aFTRk`hR$izOpR-oBpE)!BRDRY(R8{kj<@_~+r+w{EYt_QXC`e5_Gydq=#I zi+;RwTTgnWOL?*U{0_eTuqVW|reaKtZ}%7rNsxYw_@0Yv><*YE>twZcXELjqKVh@o z_BzmrlYSH7f)xqqM|<-kT$V`LPlG(@iBNKSzZ~QweqmK0hf}a$FU%Wg2r|a+vl`A?>Jz7Ywv7 z2VO+;pOvAT3+IIMycx0930`9hRjc<&DLJ$Gc0XooVIIOFsgg&(`?Jb2r8#)V)|iv4`*qO)vm&k3ex zIs;!S}LSkA)bSpbu}%c5ax+2rwiN8el0`^?Ie^)g|Ruv4GYr53UtVM!SgB?hB|cDq#Y z-JYP+Qx-)T#Zh6m7Qp+Y*in1fLbgp6qAQX9X@(PdQ?#_=G#b^ZmFI*isJXo4J2^IS z+kduon*}bJLoL7MDhSKUDs@JwCPI;~}1ZZfnqbXgfI|Bt7)aBKQ)|A$rl07{2+h$zTMx*MdG zGU-OTdt-!(bV)Y|C?Q>=HjwU)0fS9C#u%dpjCgk6&-eZv$Mqk)ulIGHuR71q7&fxL zZUDti4DPv(rnLySW^h(L~A&w>n> zrQrg>Hy=Z`7JtE3&>?44DNn31LN}sg3XMBi5Onj(`)!GhpV1_3eCwLqZVjBIx}j&= z6-AV9$ZRkE+UgG|m9j}J>7F98lGhR{+_UK(=n1S1c0r4Gt!p}-WrTj~^yOFV@Xb>9 zA$^Y>bd8|C3}7_#P%7S>yiwrx7_r+y6?udIi>|*GQIbf?JGV}&(4E(xkiUBTMuXeL`f7+Xenaj$1U0<2*|3f8!h_l9iEZ%jw~;9J-MLH zQ1q13e1#_Nyu}zV?kjC4@gi&t0!rZAn69z_rpiC&&#$ZI&e;0!0{_2R0I0x|b5#@+ zG;?G!)}*6dQ`o%(c4tp(+TAH#XI)jsRp-5@Vhm&~p}RuhIkn;qN0je-*@Nz{0mw{| zBjb6pYBmCNR@`Q~iY{}I7w8~>@hClm>ZInq{#TwFvI$PuEB`{9__XU zPlY$6Hh+9FuA2@t}PKv!jXYx<8ii@&Qh|=vu*!wd|H^b+!YpgBJ&B*qiGw!u(^- zSW4UP6!{mb>&!NPo)IE<7Zu+I{Ma?+0pORg`EuthTNs&PVnV2S1CiQeAx{u&f z3iMALF*JT+p3v9)4|lK}@#c%;xKZ#srfI#=o78JZR=XeZGS93(|BGfJzq2VchQK1( z8{EK)O9m(br-Bj>BHPKK1tLv6#>rf!(qiM3N!dKo8o|H zfy>&iuqPy9Y%XU|oWsKiW_~NTmu8y7VY6g*|5w8w-(zL?qAOLo|6J;SRo#p3$J^mi z`P@a1^oDIj1}{SiSA<^Vo8E{WZ$c&e*4~Hmb$;5{9ru(h&*|0s^`7mE}3KSX%Dg?8_p6eaPEEntqEV4Kn5ShJ|d zQaWLJFXZQ+#`}<#{?miMTlBg1<@YP-7N+nW`Kn-1$>G9J^~~#4h0!>@(x1{~RMCas zqkJIq$WBtbhPZ?#rx}k?lyGB9vBthzg*%qc&w_aUbMG+0qQVxrFDKt@$(C97u421v zK_Ncs2|Za))^?r?v1Odbo^}m{XYUVX!-&sSbFAP99ofts#)A=P$Vzm=|B z-OnK}bYT6mY^GxS(soWiMjLvEqbrYjfQQrH(u;h2DxWph@PJn25k-Uj;ooaU7Th-| z{&39|!6ze`kE@T1(4!+03pa1I&_NvL2`gP)+j$)Ua<1V*oh-58$iVI3fZJ}~mDUjF znd54R0Bw+WJQfi+vL9KbbUzhvbaC|{vVWI9aPJ?Sv(4TxV7)C3b9Ck1tZ z+oTMtD8e3ATLQ-NPJ6ovy;6}A<7gaJQ_V_V(h=^=7@R43OOU!E|JBIoYcctVN5j2^J)m6`He`6fp4-HXa4> zitswm&nf#BU_bvkqqgUlnMX+~MrUxkFZQ9XF#VS8V%uHWTu`=QfvgageeVHvkGM*W z)~XfPHx>Iam@dw_vmw23G6`Zz@&XaeFO>SSVbp%u3beEOJWjtf{XyvSF!r~-$6hTS zs?~6*h6R!5wo88O2kL$T5XRwmqvzpqDR?{ckx!7yGqzgIv;T*Fg{@c3 zyYYs=#{(~wJ;zR^E@btbkR9y@1lc8=FGo+t-6o%L&SVz;a87E_rz$Q>epH3ZY%aY{ zU~I9&J+ii+zv?R1aOTaVjhFEnGe0Eco7c700C`PdlnjkDZ+tM}V`2Wj`OpK(cJaXc z+PGQu*o!ydeb6H?nx505yEZnrg`~CdFN7cDJ_>E?u}@zK7Hc#MN^S zh?;V`$+3}9tvcY_q)){gkT(;|lp;bnX0cZJvkkJh?7KL|anEB0er&$9QVembiFxj( zLCEL76W3Po$u0SFz7Cl)c z$<}3l=;^3%*w} zE0mIqP)**`QR=mG&Z~oWN{(CW!YwHmSPjir+9o#B&SEd7hmr zsM$UN3Irxm!9uG23p$MY?DwwNn&{mygN90@U>O&sZqB2y-=h&1UGao5Ww26+onH%o zl3>_Bd_Fd#TMqql@Fgqjq%DQTwTKb>F>j*q-2%~KgsR2E2N^x6Pi}SKPJy8rUrTr4 zo(CF7rq1wr+_a&W4H;}*x$~Wqsfq+lR6e8$m0B^D`HW{QFfegqeoMtT+5eZgt>oi| zU>vi3g-HwnDE{}jTHT+H40N-~`rxmoeE&bk%9iW%%NrAL~YVTp^idfMhp)r zwUd0|`-F(INjg&Yx(vV+dvTtKY;po~Xba=*oWy>8UjK8t*%cWSVo=U_fyNeC_sqh@ zy+Z-^V`4D)IMc>!C#vYn9o3*OB!$Ad#}O(6ciG=L8PBGZ z%I@9HwWvqclT*MMw}4(y0G7hoK6g8>VM|SES4VE`y9ki`C-XmvT5?A=e`|>H7D&*Y zHE0WuSDI|`COrTC4BHUxV`rdf!8V&4`1^zZ|n`yZ+aDF98k(X`Q0 zyWzz7x&DQ1rbt_7C{poEtj)5L=-3*$_$bDIO)EOBVO(0~VJlh>lQ#>h?Bn57Q2co3 zE>gSt2#0Ko&^n6ebwxR@cc}HMI6T4$t1PA@C`r~FE6IMoO+*8{ir#u2euiE(?a)V; z&>VOMYr$C*uy0sJYsIOb=D_|n%JA;kHyzn7n^cJZoKEaJ{;0=AV@H5FK`eNi!y>@S zt4{*fK2xHXuy%x8*UpbK?%Dh_A-W#~_3s}`g7F;W7~>F(b@rXnnzPZmPLmA|peS5W z$3$(RYqi6iB9e6VkTTp$f%kkp2S<4n@nf4zXrZoA>1jcZMLJla26Iqykk_Ywmg-gnLt=)4>J)ds zSp$+5yY*98VqAunYbK!c=;i=qi*2|t0Zsgsih$+j`7ew^uL%9DTf>3n1|_RP@9FLo zUrfeXL7s~@{6|N~_u)saZ@x_zDm0cIEJavXDs3v8ps>%lNYFV5v+>76$yVl?$H!tNcWd7s91?j?@J~$W-}>4qOmtUeV|JD< z#z&tD9SoqsD@*Kthldbf?F;PQkrX@dH4AQ=*Ww3z{)_GCzf!yZ$hFp7Zy|a{T{6OF zyr2a*L?6DBzG4$cilvHhxOv-=(xb*M35dn%ZKzLWj2vzl|Hg6@#`Uy^>-{1VY<5YtKR{%uWgy&+aT?Lqi*oKI2VS8>C*?i9oj)k#x|N9d*M%SSlhXn>}-4G zudpj%kjN-2@@zW;O8?K>?ZNE$C(EU?rz)`eS@DN`vGUxY!ld94c;ljkSYV>N`StOU z@`u~M96y3`POb<}$m6wZpuaW<)Z6}{@vEWpi-|@YN2(+}{MW|{zz+@%O#&jS{zUym zMX>>Tc^YkBUzZS!@R+STyf@JF?4X=?Yq$Yy+4AX5DUfzsQRyc00k9iqG6x^jHwfk) z)t5k#ACjZ|3*QtkiGnvr5Qcrjol(Ttdz$iMy&$O-|I+KYy}x*j+!H|*Hs~$M%fX>* z?>Y|`>Ghvcylyo1O@PR$>If+xv5sfO z!E!MVDy)45_GOY7%|M-|%QhqBDErCQ*@=!%Hp`8mtfL1-m*N#?!6XfywRa|@f^?s2 z<~$T*@Vf13XE3M~i1UyP!<JbPK*Lo6rD^DrxmKy zr;WU194HL_Q#pSsdhF=B3s1(|VeYmMplQD;Z8`|COxI}A+YN~csgG=gl5vF2>na9y zN=qB_vNt-c+%zdB9-6%CT((CEu&b<0mLUBLKnVxWY}Q|sRz!AsBa z(y(giPPtuDt)x6Liu~OE5U>F5z6`xsjLPhCh(x38^Ishp?gUi91-pI13Rz#rjYcAm3Bq0l#+IkFc!EpadNC`+G8QXddpya|`I-PaGRQV@I{@~@$Z z{jl&dp{3ai)u5%vjfw&O_{!fXjN*l|!Fygc6!$C)4k!fwg3jy#rz8Moomr>7qlm;2y!>*cJWb za&8a5mMJ+L{(!%DCa7xFOqYyYJ{Z%lh*^@@#`4j z{oR)9G}&p$>>V9TiF)Grp6XhC4agiCV0#oB9v$}Q-pe9gxVCD9U%v^{u-oCPvm8}R4|{s)=&lsvO&3oQRqa)G`pU!=~SQM zUfo#KIe!#yW1hMn#}G*K_$$mu=!8Rk{(-erfotFhHOe>2^pp)K)ybv~ zs1&v{PWQU;rOYzQ1o&o)#c#eW*6eE+34XLjDhA<%pTK$@mdxqM=psRp{q@23-cCSq z3Rdl!+Ok)p$xloY;!B-m$(2rvs!iak9bw{-TfIzikpt=tBk{omC08@~MSD5bp}#ENaxYUK`6AWh@#g>opXT)g3Dju z8=a6Nt;j%@Le)*ZfE`_I65tJW@>=a{%`wW9CeLPCXI^Yi6sDsU`6c|-J5FQw%?b5C zm$&nmyIcG6fJA@P^pIp)<4+$^`tIqedJwEFL9sXWRbB0~*QEw6oZatTQB`1HJoz}Z zbLaEF_Bw@u>&S6`#;ZM-gFU5C7#EvfLqRC&Q$Rd2WRvq29=zUIyr&KfRzENTJW{#O zwzO!e&}DuJ_h`M9!;`&b8CvJJ$UZipImj^EHlxCJ?+j_~ZwLB1ZOb;U^l^nq6x%Y^ z=riS2Ta(oGSr0Xq+Uq}+6^%TpV0E2(vn^u1PZ}_S3}w@W$*w~<+r9+185z&Hyxo%tW7uEP9Wa~F%;HrKRrG81t^*l3O&(kM$_3XSJs)u3X zCD+J+<)+XJ3*Xy3@zp<8E-TJ57Maf13VVpA*kW?}^jC+uVy;>TaFs~Q)_$oKi0(U{ z;u*>>iNV^hFwbvS8ecLv?HcZl(SM0bLuU5<>5y0zQTg(!p6Gd;p(8t2_JcQP3sB8` zz#fs1vX~26?v-{^UNH0c{cEG6t>trOc}{Y1ifgw6(%X^|hGo(L+@Iw1wqr zl*BngAG=x6ho|uh2zWQJN;H zmdTq%8zPj4VN7Gm`CM7p9@%;t!Dl1AV9Tlua!PU3To0A+qH<+C?%eLo;xSWoUwiUP zC!Vsu%$!h+BGBRwpi?+=cCQn%UU|H5L@&4vnkr87YE7geiSYY(ZN461P}V`SRWW(l zd3%g$Ie2xQ(iP`}37s?#>YKwV>z|eKI~%w$!>8-)hBlEP?{29xJC@bMORjlwE-pBq zzP`duX4%c4$2Ts0#cTeGB(BuDt|C$uUH(yxd|QO^tilBuN%Sn*{Q7ooODT)!D6uv@ z5agq@)4lgjc=bjy2ZOYJ6}Db%@z|YVeo>%uc{7ZRBRatPjo)(ydm=PH8dA%=er{m7 z+8kYGgxOJ-v1X0{QxNAbG#MYyo2t187rTXhnUwsVNi+p+G8Asf5+;@-{8Y$|izrpf zgyW9>40Dx=J~>x#Xk^w~gpG1Xboc|!PfDwYsw}#-AI9(r_4*6_g8^xGJy*r_Kr_(; z?0MUDOMnmc8(Dn61me2r&kh_31nc;hz(Ms6gsp~x&sA4XQ32rFb#ehS58wC3mO?^9 z@tjDoZmI`wnhN>S>}6#W9xQ}k32luHIi?6$vFyZ9G9|&4;sU38iB`~z*WxiaSV1t~ zGJTa0$PZ{?q&hnyeWz%ONOOOa>1P|}yF#(8$;zT=EkW^gol~gaXay*k6%yEF#$_89 zBv45IzOGzXwE$}Pgu;s>D$rW%I{7TRAjH-=v&r$gulEPy)3?wI$q~zZF##cD@5LBS zFQCeFGxrPi*|zqv`Yma@$D!MuThzJ*HqxUV8GP$QW4LD1c>4I$fL@NT=Z9@d1wp!m z+PpyN(Di}Yl?IAAf9%){Z(A1QE&rNd^!6h#^m_)Q#ds%u45$>>g4pXyjg)B)}(wAb=<30W5M(`bN?p9Z*cC`gfNm5+KReTtmOQ_=(NxUz5 znUDFIbD~5^A`0D-e~*Bv1y}~PjJe#D160A!=n215t|F$XR&e~WH?f{biq>;~ zjlbH=&qo{wL-<+CPbL$OkDhe0Xe6p+IJ8mL3IH!FChyRg9Er@x!~m7iuIZFt7OmM+GIRow#a=~<@vlj$9DV9~-U}xmB5VBQ zOQPhQ*c(YIb+w_}+reS>6OE>jR_CkiEADv$p-c9ML%T zXxD8h)<|H z#1D;<>ojEB&jG|Jpd}7a75Yk1-$dp4K!= zEY-u?o{1^)t;scU2(bO?-dwegS-vJS9y8yQ1EmX6My)*QzC`8D>@A1-^KtEr9@d$tjBk5+p(|1qGGLq7|m zX*D1ti?T)V%C0HY$Yf41zy&s*;Q_A6(D4=?@I>aPiuYQzA54orBnVkdbZ^jdabS8n zAuPG@j`&D|!roz}E(ODXNYs}H{$tFLFMs66sI?qcES<1q;b}7rVBeL$Vbyu8)oziH zcbw$^bXns342G9TjYEOsbHGwn^Gw;g=ubWID{`CpO>g*LFRcbMAK0Rl&X2Hle|&wE zQYOyB_NdHyTtpg(>)28Abyci1vsDjF&tBhDN>IENQq7F{la}Ra3tY;K8!W^7D` zoQ1eeFgfVI^WnfRnja9^`lBa(W&V2+Bl z2>liBzF+pm%hVWdo(=Mtzd)-`bediMT_6-6R-$yb>q_oimH=@Kzj9hAU18x{bl7Ni6;n2V>;uYf{0rmf% zDJ9@EwtKDlPzqlC;pqSIsk8S-M+A7C;w2>2xvbq_z-ViQF>0>tJKU9%@YIz38L(eo zX;$O)?!w3(f9<(AOXk!|M~<%q`hUKw+k*vOnF0fs3LuS==yJp1G!*m0mFuF5Ns)(H z8lv!V?0jXgoFe~Bg1Mwy;FVnK*XFwob_kGheM+pG6GbqXN_%`p{z0S)?7Lm$igzB( zY6lomuO8dtm6Q-j7vkftBNdx;b`cWe!@?CS~9E{g-=B*MuO#xUWlD0BOQ(b(A8vIUDfT9u0S|QSwC#7@H9jMa}R7 zCpy8Tf{GI4_pGYPnWr4bOq+>KPUf%&aat5Y_GTUjg_AB46xH@R3ak%@1t6265Z)KS z4j3`ww`_^{=g5IA4)q1gc&?uVIi6f_U}>I>7a7L9_nLGV`=%e-Vzc?XOzh4SNB{Mo zhJ-1uI>S!Islf8vsm~mGXkZbe_Y;o3@dAI4h3G9tuCx^z7!cph!zSMyjExK(LIEB^ zPu)9LB(WWwj94Z_v*C82%4CQlzf+W%>Ut2#DSCBF@k)*DY8X|qewr7sj9?EqFDnbP zo6BxqaT!7S5}p=h)d}kMA|NkHXtA#GXskO=8elqn)FS>fR6ih39~oC}N^)|r#UMDo zBBg_r_1afFFxcs9%X!8J8^?E!bLV4d_+5R?`4V&4&32!aG zvpC**y@BH0iuQ3+lI2rv59)W#*iW?sy)&~&fLUlGWdm-p6N%Rnmgv4eH9-BD#6FIK zXh#3xWBgb%!Y;|#jO_kc;S}FeXkv9zfDKtF!G`Bv)=BcZ&Lkx9xzOW!EwTFyW)As2 zDC$?S84UhoaXB{n-BE_vJx31d=-cjCA$5WSk68YuH>JaMO7Tp?b@$`m*a^2#Zqr>k zQm^S@(bR%U*Vi4>C0b2EKLwCyVWJ1w3Q`F1rPy${+LV9XQF4i+=`}`adlE2kgN}%Js*K(GqJr?O_fBgEDo0 z?bh^74&ET8`tjgFT;|<3M3<)mmjcb}Q?GyoO;RB31CC?gBE+9D*=`vTA5f8xR>j;U&u1X!wR>m_ zXlZ^$6wig>U;y;+U z2}-N#ANjO`pG&=78)SY#G374;SQ*t`<#NhsJGnIWqFz2ve~OusR;i^;_xjfjPMLh1 z@ZNjEf@wU^hu{z{s&J_>-Gt+&zx53TpX(OH0If<#70-TLAMX_fIXJAy3H^(FqyLE- z!_1raJ>!GKsh$0-Fl(wvX7O}e$1iQ<=ToIA_b%na$!^&$XG=-`IfKg470fU4grXHC zdO4)~i5tbW;tqQG{Z`TJO9Li@qezupWV z_D$UfiW8&j*@EqsT^e_}DJdO1m=v79sl(p=7n}S?ZzFp4xa*KO&^lG%zX0Y-e9E0P zN0vq5W9mO`e!>kFl_E;6Ms?SYxwXE#gUW0lUlcb8joe@pL;G{OHM*pQ?KV@Cdzswb z=jz9ObeIf=F3V_omVey6UNgrsSv`3uukcLnD*~j{IU`1o?T`Lh`BNAA4~`$xoqivd zNU6ks^6qou4b-Cu{3m=#_6aPT~B`Q`wN+O+)s3&mqb)PR@DrW~f37BeUOWAdbH2 zv9{8l_3s+xIJW%uQE7b^3f=p5t4(GG_@GcG2V!M7=d#W_`M^AJ_9d{*`%jpTf_6p~DowD0nZL6ouOL;A>^YxuO2lIcCv(}xaXNC| zz9AO~^+`ZzXGi{a-Y3~#H+pn)OC8YmfW|q;=z+ig@kX+<3h7+qc9Iz}*k2=%jLfQ| z?lVM%Ap%2CoLGG1?{_gJ)h?D6Y{@H_7|kc^?Y(*Kj%`s&QLzFOpg%A+xits}r4ns^ zwB*^$NP4@wmUAS#NY*FRSY`UR>O~x^K^rgMb;zs!4GZ=-4-xn!|6w2Wp^`2R^Bc;| zPt}(myHXs$+A4Na13GiDe68W=IByF_vW+%Z!CM2w2m^_{BWPy3i2qD_Nw8jC+OF>hRk# zpW3JC?OCs<=&iq&+<4|kcKl605=U?YRvG(iLkY~5u(6(y1M-uZtQZaCElFVtZJ~|y zj#p#BN4sF=e73Q+fU#RjV}PxIVfDD*KKyf;3FHMbSW1fN=c!<;=xc#-A44T>o$R@K zgvMpx9;*{jD+0S5yh&C%&;^+&3$+R21}~Pl|1e_fs`+5rPdW2%`&GN%A*+QFomIqc z1d7yiAB4jW$b4b!iNZdGRd$-VR! zHBiME4QSW>o*Kwm8|e1BP4rQBw2gc$JMVJB_F6k8i#7Cpk6YiozU+-rz<)fwL{Fk) zTcYtPO1-ZkR6J}V<4w;CDL?KTg@c>V?xl*8k%%&~_)=2}fhzX|BZ;y*ItqSBcJ|{^{uj+Aq2soIbix>49GTm{=vkc}gKp zl~YHrH1oYJvjbZ|;$=DAqu=2El_vQlqccmrPH<8IHdoFFhpP22vSW_gS|)scUiVQ^ zd^1OcjZatR$KcphaN#)m*x2hT6#sHH$1VL>kadgCtVSK~pJ@U28>j6SHl-r@c{4C~ z+Oz;x8Sx}8Xk-@fR8zi)IATHInd73QRykJoh|g2}f1TPb?YY_0BPfW1;QrYkO<=qN z_jBe2^^h|UQh4UuC$c5rf{Ly(VFgI?-oC!2*Dal1!!{VuuvTOuD;+1yfC2x~hQLh_TT3LmVVO>!tuTLxd!XJ=pR;;_u-*5-g9dTZRhw8Xs3t|;b&f1&fg zm-Ywt!6(=qRwWO@KEnTVKlt9)*%*rEdM83Ar|o*b#Z6mK>CnPv6ub}=>nK6bc{>nr z|D%hqcNV}l8y6CIl)Ap0ASYgt;VRuHz_FK1v3lx%co$ycn@ua^x}y!RnHGj5ntSP* z4>WBNil(m6f6XhMK!qK!*?&9&9;;6|(D zw6H*Nihqd0T8|SGewMYJA21;V;>%VUJK#Ig*ycf)@o;#RJ$rlVEruL)A}|E?3=Na* z?t_n)YZjpNe)WKfTAf&z?QjyO5;4gM3x!}?_oK^*0j+A-rOay!Rym8o*xGlP*2$SA zw}A1O>Y86e%3%y0bScC}?YBEW9rl`E3;ee!cZzDPZnVKQ>L!{wSu6^jQ0Ap2D3G}# zZDP$nlGbQYDb+u3%WyPO8q`-$%=jnApSh&xUCGdxY(jWvhbZhzgke6o9ULXOnkCUl z1k*ibjtD-%mSXi~=S=n?s#kLKQDE$Q4TkzgJ*9Td?rYb}8DVl_?U4x|0L?@q%j8%$$o3y@d>4qRbifBd@Lz*4eFXTV2w*p27?*d>_AFNa%(Y&J>%8wRK+7y1mDp-~mpiuT9}W+l9^!#LW+K6_P? zUsS&zlwQ~izblWBGU@Fm1qyeV0`&D7{M~%mr+9wo#WF^OW6wJxH@Ag3P!rJFQGJ&{ z2R6t=E!U|U%vJ5rSYwv`$h8jBj7LjfNdHJPk=F(r=9d@VFRYWv>YZz?QLYwIrCI2V z+f;Mx&kJ`XTdohFB@vc3LH&5~Ld1yVi*I(CkHE~CrKR<4h_UZaSeGz%_!I`Xc1Sddsx1Uu;@7me`R&tkM-miHGaGxYp-$(W&xp1{vN!7OQ&~1AHlSCSmI!?&-~beZzB7b zVXPe>F~y=4)9!g&OPQP2915f{l`t?j&19UMZ*hzCvHtF6RIoSG@ZL9ORGBU!5$@r+ z96_socpL8L)vfXp6Eo*wEZ%G?TqZ?HZtxn|OdWhoUrLojUrsgGW-;HU)!zN1V`T+d z)5@(V2O(FwJvus0KHWOMPy1rFlrYu| zbG>DQ8{iC4%r74-ibO{-7`!&m#TgBDoe4C$MuB6+Ej9@T68T8Xl@0oH_!3xPOjv=e z!r!SO>5Zi$I`849MQTe!4dJ-AkyL$&Y4aJJpsVPXv20_OeGtc1;dGm#)98`Ss(NgO z+K|_Tn9BeqpV${+JPVw!;+=u5eQ~AazOV-m@iTtcV#@Ve3tol}Nsi~xwA-zBJg*R@ z(zk@0B=tEgxT<=%&G`9Y>9Xw;2YZu!vp-YuXylub2v_arrzVoKt!c@Ee*D9~8fOT8 zZr4y|1?OuDgut_!81Db2k-qCahH2+_m>2#(XS2l9!-X~t)?~F=r+)ERdqV6gf>r*p zsq)cLg-i{PTqPZLdQerBn?f{7#OaW)S!6b0!h7Se@E}P&b|qE_N(=lA-T&>f;`E+( z`+-|@q;j`L3=aEsbrLTEm%3k!O{Ccac!iU3=Tl)V5=K?d#J%INDqXMRPiRaSojrr> z%x$>`goP|CJvxIEo>3%6eXHM(f8?hk4aRK938ZKcvCSxBBZK#$vJ-i?Z}@483dQI- zp5cUWQUeY6#0ZVv0)mY*!G$AJT93;^rSC|@X6LWbWa}W=kDVXp=L>6q=HQ47n6)Wt z`}w$*&9jX@kr%Dohri?MZa0bt+8fI)boaBRgNBqUf;*az^VH`v&Gz%b8t2Jk8MGoN z;JkX7hYw39$O?jGMdNl`PIvrJ$ZV^B!sXxW$DA_vU zcF0j+hgD0T-f6-5;xS`iIquNHdlV3%qW`MB$;mEf9_2mtUTDLw(fiP&02pVxH_M(q z2Xtyb%$8d@fn#3CSPqScZ6tv3PHMWsqu>|rxp|CsiGCJQkyK0E2dUpa4(5-yuPklI zGfMUXH|&0QbJ7i<*AWC<%z99HJgz3%c>(ur){f2F1HRP@6ujA$D$#h~X#m*y+CG-s zAnYN>-tBN+?gDRgK9FmdJO$oF=A8Vp-0CwfQ0m5^F0VM!d=ggar&dAF1t?1Bnwi;* zvZY2}L$gGi_{@Z51G?{iSjAd~G##)KP4E|*GZpv|mznK_Yo;qAejF~O+NCmA?SK9r zejTtR(!Q@MtDUaM&THuSTSs1bB?xcswG1Vp8l~@#fQ7NY#&WdSqfL-}V9D3F1)SGn z4Wo1pKkgbLa77Ygdw2ReN`qkhl(2sZagBqf#&xOwWc=W_lGL6t$_0;W|2I#*_wG0j zps*1(gg96X)SH{5A>o?P(*{ru3%VeNqf7r)z^y0B44Zonr6n~x9=!baH`Pm(44E}I z5AXJMfrfm>JnlSNVt!i>NMdc85LR-Ol(K4C67|nSz8>jZ#rvP`E#GMtb~DvXbq0^k zpDOS)Pf4M46Olen6Dn^)`T1*%@+$W#0XZzQC*;vBnPqw8gf9|C&i zkThssujM)&@LCpR^PcD;l*laKiel~J{3uYmRe_t6lN1zVYtozy*QN~n<_66ehC1jmSzefK= z3eW8kZ0WGfud5Nx+RwL5c(ST7ND{Qtux|T7#7^y*>y+hUm3HZA-XnJX|JQNeyC)Oi z`Dnk={Qp^dUh^s!$NTY22{$-AW}++wg#=$5LHd(7Xe>;4>7POx80)f{T$AXE1FSl2_e3Hru?Ara+QOC-&~9!760)U7}@!b zC3H>ArBS7{YH?;lal4yuddGDu&`Jm8s=^q{DaeVn`7nN02yAR0@8bktWwhu#YztTr z;3W+@xUh3-*R%$(kjfEb`C~=Qcl!*@*soV#$%yxU2bh5Qz)pYo)TwQq&i(rrbd;G% zDor0?I!mDe>-;^aROXqCsVL)ew|2DT4M*jA1Y-qr^D+mhL4`VL4q&_kSOw0m$qdV; z>t37eo5*+W_wD58Z2yClk7UBouvm%6ex4!36Q)hqUFk)zmV+zPuhq&gFAkRLRCAwV z@>J~KA3chB@5&lKT9`5T>8c5_{snlxm8Jg>ubOBjzYbWJ>=6S#JDYEp;WBLaA-0z1 zH^X3>JpoOnv!Ry6tv;jlkhiZgMSrSD*cDJs)Z&Ij=J(fz#}^h=;ODWSm<*r0`UX zmy=f4khNb1kIQH^M3m8$7z09;PF)XvQ zWDilEy-#Fb-=SY2*yM^)A-Lcs=$5)z8ZB2)VoeR^?Q2QVu-A|d17JIv0sT7F* zL&|=M1`%s;`Ni1V%AiO66filz!C+o@sI5Uu zIYKrpiNiegMe`X3-fqEmkHh5LG2e(O>EnDWZGqDc&7VhjGKJ4vkjC?znjCakBTLUt zTmyvO!~}7EV+wi0ZwG$?O$UeC@~05hP}BPF&U!)Z!YN%)JAzL?S$Z&=1U$}PXAN3h zsGQ)qxR>XQOdAV8$S{ol-5h}s!Li+&c@3C&RDrSI%lrzuL^;`fO7_Ctv1TMQ4;`?! z9`{;c&kYz2J0!95f%Mv@o?@~DTB_OLe9bdew9l`Cy+ zyqz>+M%O7Q8Ws%WeWX8>`e}C5<2XZC1(yPojn0L-8L-%6nEatgu>a0oz?&~GkxovJ z$QnfNVbJ1r!gzl^VAauUrV8FL*yOs=$wMDqq2 z{(3>I4NEVl*I-X7m;8UfDGS>vc)4L5@uQQ$z{1q%CZLUg>ASnyb2-K4tpFU9=q$UM zaHW-s5oDagM*SCK<54n{DF%Ycyy)8}D7{O2f##AL(71q}!LHV$faeTb8-yS%hsc6| zPSw>NnAtVGwU;Bgu%t`eajh+*si@p6A>yCPh+Z8i@C$9---6U~O2#`ixoel5m`L&v z*(cQB@QjcIzY+aEKL+)el9yJ;B}uxQ|KmKf1@E$$WyXlR$~!ELdp=at@SBPH8Yv+g zz?=oT5~176JJD@kp#klyeSZ{KYi;6kT;hKlM%A9b0?8Qg9_gqTQ?cv6=jEgec!Hh_m@xVfd7i29RapP0F3Hnv+qIP1^(Ux>CQ1jj;GJOC zvt|6UwJlW)rK+k4LBTFAmv_pvroWmU8tBi{OlMS+Jmxf^zW&Mev|@&GRnQENUWEI{ zr~CR(TRRl#m#jZj5T)ALHWmbk$7n~+UPUsrts=tSI!4(Dog(t9*6H6?#!n*Wwue|Y zT95=4ynLCvFg!0*&eihr!^zgv_eO(ClSk&;!shc?#QR`5RIeiLvb@c@SL=3?ns!0q zHQ-i(3?xK~8#CeD4ObSzyF`TBxZYPuGy0EzQQi~!A2qxY7*U{i^^=Zg?wBF|&04=H zCTeZ+v8=0>xx=g^d_x`-j>twEEe}9mJTzYD%HDiizO>!m8A=RrZ?VLhd?|bE=6q>t zk9Y){ly1N8%!U=!A`!;!mTZhFqlGUQk@{9D-YZ30B!A zodYZ+dRSc?XWaAG{ZA}_@C&{2i$uYA`Jo7SmSh_0HB09$C~rU(%n8~V418}BftT30 zWH5D2&$wFqC3MO63h4>p2pkKjzq#pV9*0nEs#mUQR%}_^-|8!0O)W`Z{(j@fF?Rt4 z!B&>Rh#B+el?m&oV~jcy{Xip&7=hN%j@w^D2aO)Lii(WmyKS;}o`J0TU+j z+oZh{1?hC9k6u-GNlMM*%4X?ogu^NJv&1kV&7eg0(+=3r(-vIrEnz;r+R$1WFTa$@ zww(+@C!@xQnM&GuYvhhGA|gMK2iCRH01DK!0#v8os)!JR@aw$`L^!JAPu1Kf^0@Ad zbu9^^1bK2C<=MK*lIDM#|2sYkzO$9#pNGVY1KeO>#P+}AI?`;uD+P+ZP zmF$(gA5ha-*L}JK)eM@*72Rw}raWy|Dq!^LUw_;X@za=)_7Q-dK8lb|0d@CUpy2JE z(mO-;I3vsLd_T6~?=bw)uQ|D$QN2Lwsx$K;vH4;b!{%A4{@&*e`M%wCAtt9+M|W{m zz}LU_KbGl)Xa1EI5`|vvp^EUQ#!6A%?@{7>P ziiZA550&$Ge;@9Q85l#~GflV0UBleD- zk_#|PNBIyazRQ%fZoc^=zN45j_D_8MQ}kzuq5hd0NM zG-*cvf9-u|R8w2mt{{SiDj-b=h=3>v3eq7cy@Q2L5D_%er3C^6RGPGdAOfLE7nI&R zNQcmCAOYz$^a!Ec%_)Az@4n-H?|J=m#~n96GWOWXUVE=K=bCfvxu56R)(Ow4%=4~( zoEqYHMM>^Y*`(LBkRAy8Z``YY^uwrxz)GgULkV4r?Fli5?C|_;ZG_^sOr1fj7jp_0-Osjiay)N`F+Vvz2 z?2Hu>Kq<~Y7JO2^W1%*4M7F$7_g&gr*$ZCVWM-ggl31d)|DrSh?CJGTBsfBihGkJZbgm#=#G$WC?N%At3;WLozpy=HUxFY-4`rd{%+3m z9lsXk((0~u*)z_$xoxK*f+*GIb&@-N6Lo$UE6S8sk9feHf>8nm?qm!W@F&MwjZX5I z&YUu)T9XTA5|SgkqTn)U`6F>Ca{rcl1v=BYu>NN`q37W_LHdP0XW}5lF##MtvLX#u zSZ{x1CqcsOv>DBDb)wOAn`bpTg%Tg~y`Qt2OC6*pN8vytU$w@f7zN|FPH~lyRf;yW z75AH5Cc1L?hK@fQg%P55<&(0#s#v^6t~<^3dj_;?YYUQ7)`nwGApi%CPDB z2%yKdgdkc_?XQGOClt0=TZv~aFtZ>x-jB4K8sJ%j+=K~EfdZgBJj(C>L}kE0nK`%$ zyxH^GAcWlKW_*AX_qY7XgP>gdI3c!xn42uPR_@d!qWp4vj`62rJ0$JVm1u)$-07;f z4v1(a;?2Di3_wm9HUn!Pv^-x;U+2B$EB=j#c|=emz>jh)dw}n0v)nZe&Zi5$;qb~n zLJmhlN>OKjWt#2MsYsS!Npkj$)x8}-!wX_xcK5hL9LyCf(L-(_GafL1 zcG*VB(d^uswe=$?-Xi%Rsv=~o{9Eku!L15~79Xa#7zkCpwBmdnkjK?EP6|dkr(Me& zQWG0W@1vV87j*ki|o%4FT$4M<2INh5q`?Gor6WpMqW`-~f+AjH#s4qo^oXDjc3 z&ZuOY4T%8s6xNWy>k&iW;uk^gch~J|;O+y9Kr}b0E)wV3v zXx;}hYaNV(*7QXD!T1( zTc%dhtzOuIk=*VjVIS2D$K}F5GJw=Q)5b7vF{+j;kIqW;E%c`w&mv_>+_1Q|C3V!sljt56h=b?X5soG{ zR9C8l8?`M{G4+MChWwcK=9G-|fy@}Mc~)HVE~{%oVw2@|CQZ%Jn9L`Hw!gdmgtv0& zM9T$Q>KAmkmxPt}m!X*OtVY3BA_sQeWp|_Gs)vOkK1Hyz3Pmatz>{Bm%W2(gesi9! zM=$sAlNCgTCE(ZBG5R&P_t|tZ$`%Tswz9|4hmXfJbw-4``8(kLw^@{~eOILW%C5JP zrUwambGfS42rkfUg>~=f+Pv~4o8X%en8Nz@V7SED$}~5Nb{aC*)HyCW;LJ4H^4K(I z?0})|-kjxw# zdP$dVXZjPU&8T;tjz!wyPdjyH=KAZZO5X~`>M!4M^JgtfgnKrq_) zZET7$Q>OqymUrB0fN~8CZ~Ag}P=|#RQYWwu0pS-08=aTOc`JAl;ARH(OzzjkhwqOy zJSV6Z0%@&iq}%{wHdM9qrP0coAc8S_%}_Qrd7rfWekq`ttZ}rN`?0Hp3alw9GR*d& z*iOz@RJS@O(eAXe(9*(`>>K;$L_wKCV&5VCd78Q8k^M+crOJ#$YiYBI{iOdD=5-&CH_r3bKhVydh1;M3R zf&GF5NR4{oG-cPv7F9+X&)fv31neMmju~&;3zT-2x;03No|gKKBi# z1YQgvBUq>tK+_%(CUgCwR1b$w-p$Khw$T2wYFtuXoMdbh)QLUoTFiJihqDNk6o(}e zaC;@4)tuxUx0{xhUe4CG{PZ9zz<6eLS_xyHPOKq1l)()~`*Fx8A8>_G)}+>PGG%Ze zrn`pUq|HQ7sxvnWG8d$4%8Z#TNUA_dQW{xyosV8_ITm+g=_vk#8hYDNHW4h zNR^J>Sq_K!<)`+ZQkj11wj{UkJYln;%((W1!hF%$ZwgjrE({Lwg-sWd_6SlgJMBi9 zpP*Vdi(a=7uiaoK?vmJ3lqbnyq_6}L4jeIG;no}P7P`NUz4J>o50hx{(ydPATs>jD z`?b-W>+YOoOFGFIQo&&BoXY(Q7S@EUvNfkNqYOAB$gb?HpB&n5CP3dfRLOX&h`M?7 zvuC9;jNZ?8IbcYA!8bD=96aAB&jhfV1M+3IAr2dq{=Ii?PJ=#rJ0EXYe>-d?6@G(O zHJOLVKW_0phw4D_kAvNZ!ob`Y75%45G#kY!@Vn!^w|YjYE@uuP-bU!#QWav%#_tl~8>(Sqi>g&I zx0TOx-1O7WwZlOi+^5cCuOM|3>7O(d2VbWx&3tRZ?_h5rfpZJuf49yTm&D1 z;)@xv#8Q13Ocsl@z&)UZW8-S1XoHWOwb4xDC4NftK~p=`S5xz^QM!ztOE?x5<%h?e z?H95Ji}`Sw#UUEfDS$%im8dZ`UU0E177o7dNc&c_CH0yp*u@56Q7zh#qWvYgs?9qO zV6&RENh`B1eeB2$B}pGhDOmbSK6hh~h4Gd4*UP#*4H4I!@00Ehe!@gGrwo~;98PL1 zSw_1t%|V(z@tZ3Jem{mK)~qdG3}6jRF|Wynvcm-XDNa9p z2ETiKhphcOG(mrFEqL8|;U&4xI%g1DSo_Yn-UU$hR;O}8?dD$p$J9?<2=Tph(=Uxl zsnOQAtZ(2C8nz3DVs6b34x5HCb(WPM@lhHd3r`7lt9Gw-d?bW!GnFw>Di)i+1YdE{ zH00g+IwKRZQMu$AEubg(`ci*}&_e8O0Or4^#*E|27T_tpGk|-6bq%QH(O?9Xz8BOF zG5Bn4d80#g_6BJrGPTYx~D}Nbvw^*vm@Ks zfuQPQQ@dh?L~SFUvDj3aiSsfgV&OZh(`3v;ZcCcznq=QwjHuvES^hJP#br@ZrOk$Y zt{k*~aeJP-lK{|X1KrR+qi_fkYRM$ksm z>@wl0z|8EvJw`>&!*O70dQ)+WXL>R#5apB(RTq*#Yi&MIiFKV3q-1+iUNWv^yqwxQ zB|H@%x1^!M+^kl~9RSkYXpt9-c`u-5-Z14a5Ap9uB|1?{YwQwl7agRnnlD~5yz=~G zIK%X1IkQK}hv#jq0_Lx+!eGO9^CGzgr`*Q6;gmp0+%|(KfwJ#u3%G?Hz`B#on9K|7 z^p?F=RAK|cw9-tdI_%QAOIO26ZRJ7G{vHQoj2@I2hOQl$Wvl6p^`UmsO*cReRl>p+ zTLzFX#F-eVx-H}2cZM2W*`}&nS4!Y7A5eP{I?cyt8u5i18=C>A@h-J{n8hz0wx1_* zYaKtyky|N_%1m zS=Ee$g*-;D>iFFQAKDMw>*|4GB6}LyUy)F;!ELrmd=6eC|G*dXuEKfoX?<2lR~65& zDmhi2iaE1>J1O}!RiQ6R551%V{e}MLK z11|Nwp_cmEv)cYKI%}h`+c9`S&3v39P|sQ8kZ**Z-fSbouhW^bEdvy}xX(FK?~T}k zY5DT*1N6_QZoA(vO~y-hl_9#t+}`5JAu&p_aKAEBt*vhNp~q-o9o%Zlk9H~31ei#+ zF^Ae$>*hs2cHumZMwAC$pbhSu9stVzFDE36oxyl#1Jb%!4sTY`7rzl2sZdVjzo{4U zm$o7(FI3ky9F{5%OpWTUh`-2jAH~22L9pm;sZP)Q?xb;{#=W{+N5cN<=fOlJXy*l! zpd$Rb<=s(-6M7iP)8>3e_%rPBYMNJF7dST(iV8SNefRsH4t|`yoW{QY>Q3^AG>j|6+VeuBUop4j7UnCgR#n% zNvWC%mPp(CUM3g(Z@kw}epasUzK2)$ytD$sAf{X=-t@ikb&h<}qHe2oLvCQnkUQ-C zfk`IjT`+P-awdYFhrjBbqq>@s*$DDZcJT)zIc!Y;F7H-S4|`hqA?ZTFIoptlcR!APyNj zWnww|X^=9^V+O=wn@k#xJz5-Jpo`$#F*}}%y#t9ZG?;M?UhHbmuYi8VY&OhknG{;0 zD@_bb_!d?LVs;I|I}54^Dx2zAU7gd9M_OZojh3-6Mo9TmSF4c_)N!qHpTO~%qh!n+ z>x5>_R~ng;BU5@1qlZji1>nG(QfF?TVS>ah@;!O6e=mWRAHf&%HXiy-X7k=~yobMK z)a3Q?jJ};0iMT<#FM;XcCgjtgc-PQ`pd8Xg)vZ=9kUJInhTfM@DTv*y?`Lv%W!hP; zu#Fl7F2S|eLPxOs;6pW8VV;i|Q*9)6en0D4F47HgdFL5cigziX=+s)i=x`0o;nT5! zqUqjq3NViPth8Z9HPlowz16|i5#;IL zHd(sK-{-si_47cPfT_FZqnSWU|J71w*0FRd-7Mil!_`umTBY}MOjSQgOx5fo=V-Z0U-_?ggcr6S?0zdi6i=RZ)h6Qs#%6z zG0X&5RmwGFtbboKDl8N?{z{3ViA|~nBvGPbiUbf78{8!?c9vSvg7FQXJtnx8J$17Q z8RPCopF@q7o!WWLw-Q9OYLZ<=Ff<*W@uvi41_-MB$7AIlGuYYzA6_cQLm2)FG2h_Q zTJm6c9(w8O^45fj&5@e4*0*@adais|hngm6A=&!~NPbyo(&ql=te0g*MG40kPdiQL zy7f1r*m;W&y8N*NbdyaZBf9`(jQUmX<|+?vZ}XB+R}rT8k&3f&hn$>1wkNilfPvS9 zIM_IWIM^{w-B%x>JRdIY=_I7_FEk=N0G0w$0jT_+OVj_1c9{e3M@ka#&KA!?!6(W> z+bU%97PIkq8GF#dG$$C|O_?Pnv%Bg5^4xACHL10^zEdEz&I=Za+ImdAzxl{&LK5e> zK;~#OB|#vnHrgW+D=tbOaM7_ryYQD=r0oFjQ&V)-{u;N6E@`_QH~QPotlyXAXYp`G zO}E#}O`Spdg{w$~(=q*|AYX!~?od{cU18VI3*`44WBd4wKjE#y9K_Jq?^N4rNE*|$ z?_d6_tV%?4O_p6v(D*Tf%_kb7uLTJ4<8R6fO8(m)x~1+Enw2X0KfKLy%SY0(%K@g{ zvVIWgKmTHt0;}WHCweR@5OaKVa4WoYxJvf$gm3M0g#>u2oSBF!LdngxPRVf!Y$&+! zS^Iy+w6gd7IUI5CUj4Wo2M`t_ z#KiB<`vrA77SPo7^8Nww{S)XLN5ZDRu6*^<+)6ZCP9JZDsHTKO_RF)AvwK+?i6Jia z%Nuzdr#nWag=_(WVr5~i`zyi*C^umGV1L|d;^Zj+Ks~kxC;;DLaU-4BN~Bw^DX6lv z>H`K4nVXs8hc$#Sj-V|xpiDgJz%}}GlM|6PM-OAA-RIKvznt-It;|n+2Ozr4N)4}O z3I7QOd7Q#2Xy-Li$EscKd}(Y)509PTc2<^E8) z?b<7@wVqzN@w{r6Fv};SW8JFTfp%0dLOj*Dqd@K2n(6n4!d7FVnu$I7Vot`uRZs-W z$_;;DDfm+Bll(*9?RV~?2sQCVayxd0d!v7F=KuJ<7IvyXRHo*88!W?fSJ&T$T<(Si zEo>&gDHMKwd8|AI{UZBwyC$cVzbEOAi>dUiGQK6qHaJBJ(0~SREN$|eE?p#NAWpTp zvd+XQ2$My$n94EYkh~GxBJzEz5B>zjh@2wbeeiXOyQ-&ePKa z%#566MDxw}ntFfXZh$@}$rB@^7`1wT0B=q<9>2bl_q{ExcVO}9i_+ayK=sV7^zN)y z4AhY1l2pYt)Oro4U(<52p}6(7wkt-U<{NOMWcu=*mJH!#n*+Vh-V4cwvGRO`0sXzy-4K7XsfEH z=0^Y5Ee>FcfOAqs>%B0~fq?31pkdi^5}>9RLs*y&OPO$)NP7=(Bw0H&KBj&*aj{xG zI+!jTFVNE~6Q|;H>8|-6YXry_+5(DB3J+3CHZWm^YgMn8`;tRg`f@Xtht(h*s>IpK zKhpEbMvpg%%>Du={#!?`ywZqup~eK{a9?<`H?rs~P}Ir^D~V?Xf5KU?p>1k*I<=4r7m5rFXqOIZD|IZ$W})uR!7ud_koREKcaMh9x+)G8A%e6 zy_pEDF~llZJZ;}kG-0||Ap#a0s;iANna<+D)~+`>7YUyxpJt%hr6W%joFpTBL@ z2$yvYlABpwL~g*1X1$+1ws8?7Tz+kKEg^!HiwZ*BYRs3qO6U!PkRMM}i}SGWI+PU9q(_zjjIaPy{S&vz1%G|x$h zQ~anlFm!kB*mEWgbNoHX0Zz=b4m^^pET`fdncT`Ee=GZdvnSbvWw;c-rooSH&;7Z1 zSyCSjp$7V&hvz5z|2|;Jh+hGKbxY_G}Vo{wtOITfP5z%wxQTx0FButEHL{_i;84Xzy6yx4G};z zYeTKREbrC%b+Hu(!fUQN%BqTbKe!fTAJGsm4E~ zQ2&|V{o!7+3V^+TJvgM_1)MM4AwE*=m%K0YE2BxP5u$5j4%68J}Uk0k}1583Q||K?9Y@NW+BZ#9>61+e%3MZxBA#4#-U zxz;aE@GE)sw7>aJQ5;?5ef>)|*A_pS;?FXqe`%0`%ydF!Mg&IQ{WU+1tK)u_=(&ry zjx~Y*()_GzR(K3>pzKX5oE!+NrHNMk=^7DWabV50`9YiKhs*y58`^5bi5{DQ@VS_0 zz1U2p%O_EmfBF9l(`lhjQlHuj5IF8(CT z9iG2_{A*Ld8))XtlbLC`;3fc%txWQl-g7`#(7eyK}61LuH9K9HNV zDew#K$WDCbWHoXsROHtdY7c&8fOyX>wJ-J*mV65Ms3>U4f4TSg G)&Bx{?oXQl literal 0 HcmV?d00001 diff --git a/docker/media/project-docker-registry-view.png b/docker/media/project-docker-registry-view.png new file mode 100644 index 0000000000000000000000000000000000000000..a54a9181aef70a526a929e56115a3be4bba0db44 GIT binary patch literal 247905 zcmaHS2S5{9*EZEq#L!V%5Tr;8y+c3{5JYOENfS|!5~L#~G(n_^NUutj4vK)3(0j4b zr8lKR1QH10A6$3eec#>h=O&q(nYni|=brLB=M0g0w=}3Im?;Pd2&isosu>UvP?8Z4 zkZh5i1NMCJUsfd`IIrxWs;YNGRh2`}4RPPW$&P?PGcwT_WMbIMm}UOrxw1VWNnY3t z3C&f)ys(egw#lTf(Q>?`CRXW;738mDG!nE{HNH+?8Z=HLA9Z(|lAogQgGr?T!a#W} zVA(GLv4z8~4j_jzDcSm#nnfI%C9+YEiXTE=TIUeu&DzVHhDPoAJ{|FgvQ04@3!LaJg1AGazkE3{UFs?Jw1 zMcgNwaII$%6LHOn;8Bix+kXuYkZx=a6pZL(gpd7bt-kN5x)Ei0xI&F6biBlBE=E1XtRsQJwTwWJGHTwDgt?u{R{G+uN_r@ET~IX8zo(1lI@8ax`&Pd3CBkl1ZKXt&p)N{iV$GP3Vs+-)LODJG0p(?03+*2q% zAKOy+_Q{t*?LkW!JOw^SJ#~$Z{Z~pg5z&A!WL6k@;ijmEsAxIKeSSQ1kVHylZ8B| z=D();liId}s7f`1`rJYc35Sj5`4?e%96z2?$8)@WPO5pyAhuNFJ%?nE(vpZ9MSYM5 zhxc>gJSN{5|7)T~q$Ag;^tC-C`jk`c`IsYaUHkIsjs0b_XyNN)uSdSSydzY6VtEab zllh&}gMc_itROVi`rsn5txdd%SWoH-dy&K$lo_W)G zba^FtE}uc%cHFkyy!p!c_qbmf#2Y;MES$fnWt^CGHGroiL8ym)NmDFNA+#~NG3MLN z=9^nQ$O3!4Qa;OV*m1fD8J5#N@7``VP=Who5`HV z@cn#d)4^*oZ=_7i^vbSn22I-4D0xyqM<||J}nwrvg<7TO5c4mI>CTosr zBx^Ej=-w4N8b5@5+4|b*A?P_SHzC*Waj^dFD^{Y-mBgI{*|9+5$yFZfQz36KW}IAM zUTj|Br+S#cZUI$>Y9pRl=FSiY+?l-%Gef4~)lbBa0=R0_AIC1m3bNHBmn?|Cwv2Lg zSr@e44wVWeROM>tP>r9LYyT1+pf0E0te&b~5*^F3&-^92DTcJGBwF_(tkCgG>QAR5 zv&RBWvmGs0#?|gtcjiqmy?vAD(C85F(DZ{?{Mn^;!9k})(!QR8AaK{vMqDNvVhV;Gh=hB+RKw7PSs9Q<9DjA zJ6kx3I|(>Lr(SuLdl5BId9me^<$lv3*MP@;u&cN`{0>@KQM2v#@e!FjpPRjhkL;ws z$KH2_+~FAf%J+qo&AP$enTvy>mu;VQNyPbX-dpeREcW=Ilq4tRy78?_$JR=AN*0|d zEjR1+sy?hi(8J*oc5Q8i->b}h(^K4A`%#HoL+8i4YNAOZLk)D_PQI;vfOwvMpI?jL zu9{th>iP`}4qId?E|zxfc002ij`x3!TCC|T`I5esxzIGx@TI#WV$pCD-7_|jyxK5* zc}V z`#q7%G7>)5cZA+wPVXev>nnH0{%iy}2Z4L?a|oR1=jnq-QzXmr`FHL(2A zDb`*6tQx9ar!9<^TN$fCT@cnWpKKoRoakEaSZ-UEHpY~Wo0jWan&5rxFn!_hpYZ&W znewdaqiavxI}0RVO1_ZHbZj4$DigP=^`B1OzqF1pI)aZHUosLZe~b#QonKR*jqyxB zFP-Ro^v<*rdytePL^YB$!eAb0PWfJGI;nE=8zOr4;rn$2bPDeo;i|DZl#*E{=^`oF zaNbL(=23N%b;Z4HbimbPmo4!0tXt5doUNSB)?(Fn zs`*r!?a|@*uIR3zg1my*_RA6GX^QD!wnsA6i$%Sk`fMk=lh_Jbp-M_VuXj3HZhY!( zHvCb@UVGD_-up4`$9mbPvdstAhBII+J_kQ#SL*3jn}#4wbC2eCu6K2IX_qH6Pu`il zaI&>`{K{+WAiUotIla8j%3chHv4i zY_%@o`P&jgR6%3%1UWo}%Au>`eI!_{?rmypDm=Ue*|sPCy0nf+4}_Y{J)RpI6e2iq z3GId!RY=hU@$nNp+9F_+_o};)IdX4R%i(gHd&|mr#T(f-bF=i=RCGig(k0kv1 zp3-p8KhVXU!55$u&+JTY+}F_|;0O9-1SEva1f)QZ5O^yQvi#LoC*&g_{0-*e!7Hs+5l@r!T*5b%u_c>ClK{dqPeSq|}^eUdHU7=enR>Wv$~*3j0? z&d$Z%9^ny{FJB7mAa~U?btfQTul#?!{OuX1M@3E(9X$?Jgqt0Ql(2}f z2&WPS2M33O+r9g825RcRhXdafIqf|>T;-rpZ*Om5Z*gIS+XJYmtgI|lL<}k>CIp-z zMlk@CCfBpUOJMDZN{&OW4 z_usb#+#vMq2vk&91p3$9z)*#=y>faEK6Xx~Y7WkT%z$esiHb>yDf}Ank3;{tcp!QTTFplA2~H(C54=wEvQNh?t( zK>u1bC5qT9&rrZZvO1{g8Ub5C&CdRaynzqCKej-hsO;x0aM?=&0%d|5YAQxPgd3Tl zAo}YEU9$s%nnu*u^#^3L?{=X=E)J9N&<})YiP#XhtLy2D1iPIZFdZ-j)2*etpHrhT zqNX38zHw@-XinF_yD_viG=N{b;3p5qtTj~>J6a+)*JUwlMbDK9NdEbvQ0HJRPSoTx z*$MbYK*aHXyv`9KbNaBWXb?dj3C&aWHrnDc;nuW7jl|dU&((fH$p4%;=LZcL32s50 zuuUmj_w}bD8S_tNcxW>sZc1pi6?EJJ6NKa;=0O<3z#zzH-`1^m@D4GK0A`}3Iok%_ zQN4s#m0^9US;=^M>(H?w4!JmMw=t`~!F1dicxiUSC#m@fWbSOR<|=6e7}jz&*c!_b zKnZk(an^0BMDVgsCNlesaLDP6Lzafw4Ln`Et4@rXgr*F4dBMFkWA=?Vb@DrNZ`wO@o$O@A%I!eaM8Zxet8DKO)FkcX-Q=mpoK={8eJ}Z*f@lW|9 z1cheL>AC;!(ck9>^JuhG@V7S0b_1q zkl-L#Ifv8C*H1*5#$Fz|<_E3BetIl=s20c^F@{~6 znS(u7a6z9l{&|?AianVc>zFT4}eiXlYpDD?UmobHmG_Rb>ps9io-J82R9Qy1JxIGI_a z`>v;YbB=)iarjIbg4LB=^qX-;>Vy3*~t;^1lX)xDPyZtrHm6{B* zNYPd$EiNd0pu=!rOBMt=u$fV)B=>7!uLP}At;Eo85CVNcA`G!V0mKAh?H>f@PXG_R z_N*%~qf%Joi!g~QLS@>|`L=kkB5RnR>?vMK4S1FO*2%m&%xaZNVQ$5$0F)(~=J z3v7TzhFswfw}3fwCM|K~NNfMIrime5RIQHzznXk#QF%cYF?s)h3wI9HibxH#@x5@` z0^0%O+V@nB7y)ktOmI1qX+!C30`N}ga_CPXpeq4`VFr2A;8$hz;M`^`fsiu8m6Dej z+P8S7;<6iS#{H^0x}UHb3~5}i(mw3p-Vu^(?m%3@iXJ=(kUYf&W1HA?ztmzL7-D!S z+SDRL|0ybEMv~~X>oPAfOBPAmvZViq1J(;B!@#-6jSDZ{2_DS7@l08QIo>tq8Oirk zO`T9>qr^0QM;GSOtE9i&NzO7SwK26zJ@rSofALMO8ZG`SLq(v3zYOnxI%bSS#o$tv zR>ED7*_(%zC_Bk!()UuOjzKgQlvgB$hycB%%b0$bZ@~O}b>6X(5D}gbsSmdrqbZE~ zf4G}b{NpyBgNR}uKE;bzD{ZU((}@1i5c^AH82>;jigH>cJW*i-0>&Jv8CFp%3C6tq;{qQ!sh?*#9k zvtYEt_uyo&-lz2of(XPz${vy(!~__>4w#~0JML<(Lab8_Am-1!M-W($2(XD?Ruy{$za_KgbKt>d#gxV94~@@_ii`hK(xs7m z@$O4iTJq+I?Rmey{qoWNUrz0Re%4DC$i*7#Y<cKIZ7i)m*Tw-9Rb0=C=5X` zg+|Njp`)Iw0Ix`0Te=pyngcLVZr0*KH`2m1ieURIf(5Yu#D~u79M6k6?tB~QrTP;U z{0(>XkrTe9X6w@{;VWd+Gyb>Bo%v8GNwh&_E@P?I!@Q&zWeM6(o-O%8EMc^c0cxQ& zI;j63nP(Bo%2v_t^kIWzVy|z=wQYog4xYP8;C{&(^q0T;smeJ3NrjpSAj@FgB z#9=o$p2waoO~q3o9oRr1`DNnYRLUhmyqvA2Qaa)MKEBM)--4F z3)AwL82yK>{wC^(sYjf9GH&PDI}~l*JJZCGkHmh zs`Cq|Eo$8H$7D&>t)mFMO=FEa6!(fA%aP-L&uX|;N_pHy2v7VE-$g(qL_|ohYNjUr zMf2YjHkjgi%Q@0xiJf2)qIzl(D{65oTCPMTA|7QK#vY3!&;0P;BG>C9`+AiuZbUDT zuGg9CX#Ksu`}lHQQ!IKeG~nJ575-HUAL19LJp;ry&j9gRILkwaOh-WGB6wCoZw$|W zdSJByK6Z3F);2@5;wxbraoG4waQ^=XQ&Y}?zNlH{Bw~n*!&k(ETRWurI7<=BbEu-g z>w@&9H^!rCSQ-W&0rdkNRv3NEcjMWcBGUL6WIJLv%4#_3gDnPbb-M)VL|qB01#1E9z1XERH_7T5TE`F z6X;w8p(8(31{#ROf8HG0niALuJW4`~l6-ztbv#F`bdJyw{KI^M__#%BC(gsN^7xmO zTfj-+&$Yk1KN_tM=OeUMK*$`ek&u#(Lc%tjQs&R=uQ6z;gY15M`v*q*QR1rXy|)mq zy}6>ouFdJa=IOhq^&8n}V0!3}sf%(My4{^yp}OY@qcn~f{ad;@p1Ux4p*%LY=Ef>q zezHb~RBqC~|EDb`p1HduJ!ell=@+B_;;R7!p?ryJ4vo#sv8KByIP@U1zfRPs$&|)g zsM2&E;W}S%%6at~m}=<7`QLFp=lW%8!2L)_J>(hrR5bCOMfHQuh5-95YwYSg*PK?7(x-tEjeq8%dx3AJFHe(e{ z0@?BJ!}MTMc0gTcEg}piqIA}-;9fO$0iKy*2v>aZ&d9TU<58=TXciaLt2J(|$Rij? zw=2Sp6~a0HJO$as+E;}qS1Q2&grF0`U=kU#zW7Bf)JAv5Q7W*E0;5lTo9^;I1@Vr6 zq^+2hReyw>_D_)bKj2E)fn$#DB>UXCW8yLinQ<%T#Mg`r!OAj4*9e(SwD{dgj5aQ_ zmI|TO^#XodwLLq@U5d6)xQYqyv}eWSmG_=nZI`-jFLytdVT>H|?Igs}dW8mbY!IHh zzU2NzV(_1U7kZ?Ao%1TOwqbkyUc$;&(=Mg|3;yDw{dgm6EbqeJNew+@beoPnd-`{wgCm_hcQ){0{I#ppK^x0+s!FP|&)a zKtkMYCJz6aODg~7{`KO?)I10$NXY(AOqj+*h_V7KA=<2x{5(&vvMmR7qTZlE%vCP> z0@07osh zAogw8AlOGOC)Y}%I6&!i7LgAfUQKcjeYUGFwOW9X?u7;l3A2_uGyC1e58zUFqjuAG zS(=^Oud>H!8lr|6C`gteiV`O;Ve? z_>`;W{8`3`LM?^E<}Y`pEJZ>jXMDJCL~4WPw=&|@;aQkU6|Vx6ffx_iM#RdcwB4Zp zYL%rc#7DNuWr13Hp<%ww%h;@asui@taOF(*W8U?qKaAqPav#s+Av+~Rsk6r1_}gU* zeVNg<(Y1q$GvShY608)}?Bi{L!;#^kv{VtMC}_#PAEvq<`(n>mX#rcbtO$9X?Hntc@fMtBG`r@1WOi0EWj3NFzJpQ%TFIm7BJD=)RCas!fZJW+o z2Y~(u8~+B(d_>^yYbobQjUDtV`OmEvMw&wJHC28crM%8bTU5h%3pTzw~iSPk-E@0 z6Od#&(I5&oP!FnzOmy3pu$boLXQqi!Pb*jcGl3K=c8wzkdW3xpaK&aE6Eau*=OFVz zYrFbSTi~{TxyCAKe<0`eN9GUnu@O7qqxX0**c3-K((&@z_w3K>kS^Un+X^^(IC}WM z*FShK1$;4oE=d3U0qM)Si>QkzEk7IuD3q3btlCoBKaP%-wfwl6by3GRT6)LP0WEN% zdb)$VZ90tLBXdOlCoT7v5c-&c^)?MoqFE_umZw6$vi=LXQ_iWU-ofHx&$!!$TBwAv z4j&WAo(2cc#Y%}&mb&T%nx|nQAJkdPFS78gS}cv~AVUua$6#R&IP7 z8^8_bzz>N%q1M%%XN;ck&xPirVJ9PXyuy6p6NLLSWFXm8!Wgh^g}_iarlfF&8ib-&CcU$?dX(W7%#J9aZIjfCucPi5E>C_%Z%lN4M>huk7M zE$GVLn?v>o4uIw%|CUksMP&Gdlthw#d+BjOX%&Dp z3bNFYoQheciXYHu1Aw>HzmyvbJBV$v-DV6P*d1=X_#I%pBe#BDXBM@h`oH*O{s3{j zt4Uky!z?S`*avK(o~-{OBY<9VfWY<=mAUjzA)x^u=Gs3%)&uh!w~_~yxS8Xv2Mv56 zGlxr6u6m^0UvE`U=+{re_E)^+H_~=lfE8J5Cw8D{lX{UN^e>L_B@dCl#T?c81Cqhj zh5zIS{)&ZvbhgMaY&F(Y6t_o2XvvI-i0L5bhSuRP-uPJ?&>pP_DP2a`B?;1x{FR==Mhd#O2JjA>$ zd{!=Kfo%L_{zEkhl%5Llwngk*_9uh1MGW>u=u@4);1E9MyT=kX=yHQv}j+IPadKb zLnOv%nsvqVm-?SZim70 zPDgNfnjKI_?P5+<0ngQ55A&|pJOLT7(zwP8)x48P;QAPiG{Y;K3&6G>+if|8;HvRV-vPqMZ_vC!F8=DR(e?TPd-S*P^Ww0rsg}8Wi=(J2 z#74un&8af{Cry*<#Kl#J{YTRNf>d*-?3nK#+{XRmFW@R6>w?1O8}04LwMrt$jt%b7 z>XVXP-4L6}>7l{^M zPZ9v1M2pwlF)Bd$f~%M#`osn~XoDPQ^NN@jJq`Zf6jkub5@9w*l20b*VQX2(s&s$~ zQsMzJ2>E5ws;KMfxv(lfT=NWSSgjqfuox=Q>|j({5~_uzah}bm*2srrZfuo2mm}JI zA*s&pO&#}sP~03BhK!w$f&F_uR!+>z1crUQf^fqzc)!K9?UDCu2)4Ho54I3rMqiRS z3FAN$uYmH!U^(mx6@YHj`~yKqkb*ITU@V@W+z9^T!_zp=H}_4FA3h5V*KNdxd8%|x z-NkCC!NYQkdG3_rdb+0&^E$XQQa2a;d6AGlER|@7?|Kp8E2ZD2%&s7^^;PxWRLq!> zzuGNk_X>afBI`-iRp%c%4!&PIgm?S3_O)|2s4v_WI9yo8$Ogs&g8mmb@a{ajh*oPL zkkM;x#V7{O8k{Hr)EjUB>mj94j88x@ir0;vzRo|Q+jO~v1|6ja-k<4d{1gRRplFk$ zePKpGWFhbUQn>}{jyD-6GSjBhrU|Ywwop00TbG|5c%pi3{^``!Sca!9Zfuj!cD-xg z**~fo$VByQ(su*#{ROp2!1D5Pvvw>~wr|qMpP&7#dOH)cV>frKeg)}Zbq$VPF6CUN z>xBQ~ynlcFSW1TRxID>J^tok@-y*^5l&#EEKy#ksV;-3rw~=AlvkIKcx4cSY9~&O! z@lx48X=YCT<>wYECfo` zAhVK+s;|(ReKGAn4Y$}$MR!|?t?JTkn4IDaU2Cy^tM{yHOLj+--%78xh~cca>Y!FTtD_p4PGP>s|eU5!g;b+{YJm ze+9QxsNm%HdKB$uGo`d!$F>Gi(1qt4zaN}i&i)1~DgAik_Exo1>8t{-(BuG%jYDZV zi$n*^L9V~Gia?Z3Pdc+T7aq>SFRV2sda|vOh6i%-_TrwLO5DI+3QV{S8!C)Bo}*g? zs=cQkE60g*kOjUlHLe07&&^iAYdx#l&RY%KNZ*ZGwaVJQID6J;(TeiIK;q%dUt%JD zm$QthVHmhkH&90^W$VXX1oGb_sP3pgWfo|lvaW8RNDr`wtJ_e*CK(+zj&@>nB+&TUAl!@l_+ z%roDany#UFyDId&eJJ}|jvJJ%5w-DQ|6DmXJ(Py~vyy5>!lYi^%yKl_r0!#vs#wQM zUzG5-hMKUE>Q!HYXj^`0Z1DIwqpIG){d#zX%F7cU%pTn=NA8R~uB{c_Xt7P3;;XYO zIemBI!DA4amFV^6ELo;+Uq3QWT5-iq3OEM2iegv7X(D=W5B1YyeKgz4Wp6nznb*XJ zFSMV?p#(xQ;bO6yQ}2>^yRrl~J<>B=kZVHNVM9bHp%lAE`=OZA&bz9WQ^`Mt-8)6HUGOP*mlsvsvw)u=4OXOwamWi}v=qRqElY;Z~me zxjr68Z+D?ZDk^Gr&we9CW7Lt~bZvWqV{d2Z0Uh0y+u842cMah7fd)!=ucR4El$Y^B zfUf`MjEmVmmLJ7Ys>@=2 z;seCKpAJ8XTF%j@gX@ehS)Aeo;%Co5NO|fuLlkJBaHj1QheE$hu2hf}Lw`k2#3N>8 zi0=R3wBVIh!fa^HZp%tGzWm|M7ps<|NR!hW=+LKO4~zFz3*z4`4sN3`j?2-x^ErVw zQz`WLh-}ddiy***e!S+xVcl$Ymp-hf`-TJA#m@19+tjy8u7!FZJH35%r&y(Y;`0*k z>u#8QJtPIEF?UvyUx_38i$p-6zE<-=S^J-vreEc73Y8Xw zQ3x@d`Qjb;Wue!XI6lgdg~4TIUQ<+Dp|-zCGQjN7K~rQuV9>+z8w+cJf|Zqt!~D_+ z=KFl-3+yRjd)_GD`C z0Yo^xVoK2wf3SpqFe8XQwY*c|#$!iL9eb4wFrRo!);q|WaxrXUZpZA$?{go*Be-Zp zgDYS-oZqM8+VO+W^jMvAp90?Zv2q*dsi=jkA@4eH>g@H&8x4kxz;*Wq!)SDhIGjRy z9=wU6@15<{h@?RFR~9-;n;CXxdOc-p7JHw4iYdvESec1FuH@YyM;fpMWzUbCix$w& zl#T%W(8HN(M`fk)%l^?PE#3bTG+T%V2Z@`Dbp7GX^cxkQ=d5At_e)u-XA*6jptR&a zCatMtmRlls%VZYKudu$fmx*q&L7WlgGwtE@X(6YUqG)@M@|5wUJ?3;y(^si8!K8DH zjUi-JqmAB~i=mnh6;~OtixtR>)q8$AO3N&ZzMCuJ3ZaV5mR`r-9U*;n$5xNL(7m(K zs#41a)Jnz-VF?vbiPNR^!aE+(rEk}sz5o8&NrwPKK=<4<`9;5YgxTjnC5kp7^0= z5%kwXA1e8~!RjRPDy@N37e=XV0*5s}vaP2-yPiMc_h~X$h0?}?B|2s9$%hV1P}7yt zS2Qcy4;?SVOW%|Gwa+k*%zXXWGyB+_3nPxXYekN}+x%l5437=jv4&H}s@{uGnoc86 zp`)@b;{Q=+YnF&?P7HDHCe`3gjX%z2n-t~}@K~D*G>A)^`*`O^E+R0=aT|F9s@+zv ziP=6yruR+|A9|FvO%FE-BxI!|nYm=0`1O>V_r8hPTuZB%YJC!w+^6*5TVCPX#+#r6 z9k~ZNPSsx0wNBgx914>02*thHx{3*B()!f}TZ{fGPz>kJDh$Uzi&xOSZBdor^lqi^ zbG@y+-=zC`(c(}>;VLBpRc6)qo&aaC<+!p}sKYvc$tVz)=-n?Zjx>zs&$ePtx6)v* ze*hT!4S(CsHW$juip7KVs|^R^Enf~cBS&tpZD6afiO?rm1)la>uGK59hiljkWk;Fe zFki=1iu<1!ym3bIl7VuP2pG$vrQj1-1^zqFf-MePBIPm-9)@_~Q!pmfz)(M!+DK6U*|GkX1 z-^!i?8aRVZ_tsqDxc}Q8OxH1$e`_)Z(j|?_pXffU19HH$Z?eczTI%|5eVg6iT1(6M zc`2}Iy}m%9ZswuI@=H}e?ONgfNOIRqlzr7VseK{rPP*xSc^7eVCI`=4#dD91W$)hF zw>O_V*<4Ky+2Mmz3#-dH&au1A4VnAgubJEC7PJ4UD7G5+D|j%~wHRyk?G; zK(faL-JfIMH%|N0hnchUcOrI+i0H$jtUma~>RKwQeGnX**V8)y0Z$ySr`PssGTg3h z`u~8l9b_1Z=YwZ()*4PJ72FD>yn{o4+?6WrEdw_p8DbZL;By?*`t#51B@W;X=kB$a z{hmng^I29~5%ea+mGKqh@imb7rZ!W?V??5&vX3+!$A+7g?BayOgxB6rNmWXzEomjJ zrTX+wpH`on%(BW1VaAVypg?Mau#Np1=M&a!kp+>7hoK`D{h}a{rGLW!2F~!x;t2g6 zxy=!JVZ0YTGl6E*Jf-Ae%Fl9HX8Js|(ukbw5I1}&n?UGpl=NW0v$OWjq%r?KtM8Jf zFCUq}(e<5l|4oO7u>|vemLCb`c}jJg_g6j?)FiBQH0Z3~-s?z7$X9K!Tlgw*RKB{L zTC4O$w+*}0DsWB~Iys(DtF#+_)7M4LD{5BErG6l4&fSi}wKocq`>7HL*R7O(z5SLu zB}w8?TUHy^*4bU}|4NAj8>|olSwXZTPyuEbep?EDTRsxxSyn`m{=~6FQrVE|4I%cW znAOMf&+NkkM;=!$cHnnlUG81(z$``f>^nx(?@!9xYaAE+2ZYbd>;nkJ8+S|I$yhc| z7i>J$Hh+#xapWKHq9<1Egy6L-Jk*x_0UW zPKas@L_Zxj4!z@MVrD4NKn2*b7$9P2FF;&l#K!Ou(LMtOmzNErw7XS67bShqTDMuyI|f<5nfH&2|!y z2B%GTWD74{Hc7+Iv9|4G1hy@+h;0GY%b0xRZM%Tb;zZ&gr`r4VONzjxeBsEH6m_8d z0i@pf#+p-?O_-0d3!fSy_{qRCunE{A(s+!-uGY{_PRjIPU!g&9?8`v|%ItF9x8xuz+G{9xwez?s$KsxSyi*!DxfUC>v|3NO^yG(CpPv07~*RXDzf>u^9*Nz2vP2L<`8@(PS zLCU*xy0tnx!qe<};dlgkIPKsrTXdLIZLn&W@0@HJi$wzuj{6V;MeLH&g ztaV;OaUIE*l2e}>UYP6k`5d&XhD1?;9Ou2!^6%R&YE!j*=?ywgg<9)k===?|{MX)a zI5k8%yO6Bqa%2b@L!XSC1}GZdVq>Bdnf57j5j$EUmgK8b)w+#*_;CgR(;03~8y^Ed zyGMB%#7<=DqE4^hV)x6Q9&~It3!Ouhu?LN#MM5(ouTiD%4IYd+Hsc&(&oc+at;<$~ z0*=dXIbj_m^annlMpqwO`al^4SA4JgW?%Hugif5EzJGqed?iI!jIcqsY4=Su^1CGO zMX_wZ#*L|Oe%}}jmug{5y)h4AjumNSpV58N^szlLU=$A+b@XbC+cXYQW$AtCQHM+Q zP_e-MCSmMS>Fi2B+#AJ_?Jq5lbyYzkD!a7YhFTQuPDYIP&gbb4yB;)>x{bVKTFOOi zY7_l1&fAJBE)3bp4?RdwnsQ1>Go75VtAz;+rxT1288%b2k=_>={1mtTkl=Wkmwm3+ z-Nji1zc<$p>CuU6-13rJOi6x?Z9^vYp@!=|k$=y0n-~3jnL9tHCGTR#d5tP8=YTVUHb{Y-lGUjo&D)yd_5-=!Pv5*D z12V)5eBycR_rd>G(;^@Ns&=Sil4A+Jaq{!Rk<9w!)J3c$ule868SE0!k^v8+esa~k z9%gGrFO(>!5B{^R#Ym*i_i@bqIX~GHlgt3iW#*pYz-h{BzH^`a1mM(4qER6`t^g_` zu!25;({YTShV;6P1}S#YI zmG8RHV<}Qg$z@-v@oB}1pw1-z6q3s>;y%*4A-5e1bDd` zrN33#0g@Q~G0byojmjoDCo2@x&8ttmDZAXCA1vx$|K5FV(tT7JabYMtQ}Kb%FMh}$ympM!UjD7GPGwS-iDtVf_HikLn^*t6;%OZHO@I-QM{}6>wG9OZEOE6S) z@xB0i-HI85VYHf#P*vEWB1`YJ9AIhLooh4UyR`|$Aydk8x7eoSkm=|^l&JOg&!JUV zpD7*t;6+HxXN+@h?9b+^t>|2@Et2z4Poi)pnc(&)$snIM1Jd2_`3Z79OV+70f<2FB zYpd3;F8m#^diKUjCeL)D?$wn#C0dFbGDe}u^^9uSod^Cl{x*&GfEI?lg!B;XsS51$ z?b80M^(V<%rd2pRJ4XuonLS)Y`c|p;h-zIrraF+J{Eb($V3$w7kGa;O^VvfUPiEJB z*jId0u;AhL=;1_ZC1X+vWgiVXj+@2OL^}BZ1CMd>bxO;4gmKC8bfzGkA9InL7U$b6 ze^A(8#w_6gJSnjjQ`pUZ@yr3sg>K4=|a)HlbH{pDu~7kW&~)+TfK9Re@c+RHN< zr$^q!^5izl;eV=9Ug*<7R9)g2%Gh~&^K_x`r{UG(3Hq?b(3^d1*v*TcUz2B!`75Fj z3U5(P_4tRr7U`R>!KhU3_KEGXQqhuTa+6)Z`aL3~ z5`Hh?Wv$zMd_{%iK3*Qh%bR5%sCJXc#ZY9&fdMW%8qLX+u|Y>})*N)ItpLlPKEz5rxl|RXGKo88~U-Hgk)Yy?H zw&daMk4RtK9Vrw={{XOvg3`(rhc6%CkH4(dAm~Lv7QVIqCH%FP`$P9<_xay#6##T^ z!}ExQ{7tpXd%V(SrIOoDxw&Ls(M!MWPdTHJak^-yicTBM<)-Dz>G!wlYcI>69zQI| z@H$b=T3#`rW-p>>V=UY^-sVpjme;}!38Zb+9Nk~JBa835nzcOPb${ws)8nTersYkJ zgmnAM5%);voveHnY)k_?>a{mJv@fF4zqI`-=f`$+(sJCNHv z_XfV-edpw>(W)muSK+}~*4%=177q_Cin)ut^7_iQ7-Zb_E4Cj9EsbH%gAqS zogF+_87ydvcYCL8al~?`g|q?q|0+4b3m5cxfmD;xm)%;<1+!B8WynT#3O-iRMt9tB zw@VjCj%>@`;r)}!upH0d1fYn?6dD*w{;SIr5+p2~@6LuHTaHK>G*i-ZNYnGFI`y+D-MhGe1oe()8cLU3aTI_fx+0 z#eP}ojIki;{K|I?)GRMF1_qxLG|aGmOu0zIgzrv(GOMbLCry1UgGV9kl{%YIr(5Rz zm7#vCO9&rr`s4&{I4vjmO4jiUn>oq`u}+NNZI}J+!qMJN$~Ak zdX>Ygi1LSe!es9>NlYM4@za)GF&99N5`05yPvU<+){+A}%=Def5GbSFk9H5?T<6T0 z7zW4rGlZc#-(IFD=>-ymjlj3f{R5qipC|kQeg`Azw+@XiNwz*aL}4s(R7B{YlcT{0 zPesLQe`1g@Af@GZH89|@Z|^adkBrv}G4uUl5B^od{`2{WjWqF|fijDg6d>{VtO()1 zXJNVNXdkfderj192tf_=B%rTfTPe{xrW7+^`23d@_CD*EbJ`?)3`Fjd!Fm3h2@txc{oM| zF4=ueo;@{N$H&|plY*8|^s8U>*a?=mb4CO8wLSv)(*DA5VL96x3HWiMV=N29r_~wk zo%<*6l^IIq?RVzGt6dHm-cB!kbDx{2l|i`V*GeacRK7v1-zN0+_!(&`ihXb+<{U`( z1QR9pd`f@5N!ZU=Ac^lR1CG|M(NX0B`bxNpJgXcDKG)4UZ2ZcH_D& zPc^p!w=A&h+1%Bi(x&ev3ymeTAC7k=5bN9rv@Mn@Q;_J>(@$z=AI*)H%I4!QyZ(G^`AsPH|u!;(zt*T`Um?}Fq1W9!Ysp?>?fagset zNy^?wWvA@245^fT$-a|)Cp$BRsBGC~n~;6qcVj6+C}QkGc4my7G28F0`@Wy&`}^L{ zUt}WmZ>Ff(Cl;acpG8`#5=4X(6J$iXQxs)oba5&}Y zvt}{oXl|PlU9E6zJ@z@&HzNI#J3zZXwj;p>D$oGGe{$D?jUeP+FY8@E@^HFV6- zk>ms2|CKeSIG-GnELv4Xtp-a>W|v%8U72%hh>F#pbWKh-6U#=pes8=suC1JED+ZPZ zNsA!TNfk{X~?{;tGb23wADNq1o1AfZYTxOA1d{oPab z!+@W*HvYfCa>JZ*Znt-;y6Tu1bSpv!DKC%->xb9x4bH(+o(F&Apx;x9izFrFc8x9r zE|!*!&$YUT6mPqBc`mR2!+jSc>}Ff2Y7nw4S>E?S z&o^ERzv|G^vV6-H1#yoIA-JOC*{Fk4IOVB~eTPtGvRX2?U3GEz+{=NV%I;$7kM$rSYF}$-ZHjc4=d}*4O`94J!NZLq4fw8 zTOpX!^n!j6EMg(Sg)MZrFH$-rT(iOL?!}f0U-f6E_&j(8soEDjw||-A&wex(3P-p7 zCkwy@?c7szqg@Ufqe52)D?=A5Uwq_>u{`xwCMi){_nZFF=oD}Y-{C% zU*dR>g7==VMt?63LJkY%N4dUeT`2C43$q(L6aRl0+b+g!m*tlxs^udEbb)aWfs<2tDnCKQu?%4YXX;2?oHiywC~k_}im&LuUIj6DA1JK}+ff^zJ6v5}bCJN@D?Waik8 zG;|r$|H}-D<90+GmF$EKqT2mdm1Z6@-(DF1cWaVYe9%|W1?PBi`svI~pw8o1e8ASF zqq)?jP&dP`dc#$k83b$`YdS`Me)M0$o&g8amxN0!=fPKd(h+dq#f}=!G0nB_oy`HA z5O2TAE3kF2i8;sb!&20U148|%YeBnJ(Y4jJdiKd~*QN~IXGFQc-0gbo#BdlUO5Y%F z#)9trZwsfL(X7=m=X#J8I7*^zFy$%v{a|{y_+<)qh_3iOwM2kAMRBJVNA;V`bvFU6 zL2Pr}L%lS2M87cYal7_v7mKD<;UZ|eL_xA9%oiS_LDC$3zrwdu7P9z-cNNA3PEYg< zmVJu^1s(qc<`RIP@3;S#4TL$qW$k>SP(|)kG4M3z7VAq7w}k~T{hT|S?Ap)8sxTY+ zV9ZvZ(UmjkZ2H@Q(bB}g+u2GMmu|X1z@&| ztyY^E7hY5zXc<1f1~(_PYU~KifDe!De=3K)ENC=Ltp;ZclZ|y= z&2=HsraW>L=;AO?ZV4UvCw)eOfLT23dpTRB=QpRuJlO+J z9-GuSc`q&~K8+{sQ;KTd~}IoLpL}?@9N9)-|Odi^C$| zJkM6q2%V;}R6*_I+b$Z^(&o4N8u<7&TwZ$}9Je00e#fjmY)U3QpE+A-o-;N?{_d$X zsT$C*KK;cyjWQTBV!N#r$P0XT1<}1L1mO@5*n3xnk)ALum@dJzL&6dEn{vSI1fN>= zoXud%9MdJcB{W6?KfI?k**)bcKATf%&eX!9D>s^Ix|OwV`KIw{>e1_Zk5RtUI3YjD z?Vi3D77vX=c6`CBx*@Vd@YdN?e{io#wae=>$|u$7L4%)%eNYP@t^+6m6Cl2)-i!b$ z^HFdHzNt*03+vA|cwX+8sx?IW)Mi@KdA=d2a4p@>y;-*9@HxEYU<3Hp9b93Q>N$E^ zI9F*oWf^reXF`E&%yDe3Wsy@HoE@vS59+a-$BPaZREMP;6WosLkKK@f`lf6(bgjd} zuR=@90ok^`n7&#ntvK?*?)LFch1!Z;^l@_Bl(}px-v!#umfokvAmH_CoYjwJ&1mrI zVQi3Gbx?iiPeu?ex$7%Z$rlp2-#BXrqNaIHBJkEgM8W@7z6`CBP6!lSM~Z*wDPz|F z899I^s@Gk&yph&y4LW zXt@{=eyQ#2&nDg!HFMP%g^0)wQeVsULf8Ai^(ShK)+)|piw#I2LP|1sliub41|DVA z)=QDJgZHy~%jo!o?!anIE=AhW^q#K$*lpd33K7W25$EB$31+H?ev(lLCuJl!ncBsEXg~$Kjx|)5QxsNz3$xO$q6;CBd3C# z9s}C}d6@};e)VwmYX-&ai>F2p8F95=ys}6bl zU$mK@$1iLTWmoc{);#}hWc&;Ac(|7)tgQ=f0vdd~yy!R`Dia`5MS|+D;iAphBe95r zxp##7=R2y5>XlE8yK2u*&1KCeBaBkTt3Lfh#6vOL{C|x)YHV*Tnih@wbAnRJsEGLj zz0xWE_UbMEq+$LC7e=FI#+@_AO@&7ZvegM$tgn8Dlp{%d+v(ue>P&K(Q3WjEiF?6< zR3HK(Va?P)UIG)e;5l5oF>qxtpBvXF>T__Dj&W2EMrUMYat|8tvfxv#ZXAci`EJFT z#t`s|{sF{biCOOVU`W%6Nym8e4cO6&KL`_=u7z5QKTJqlGf)sS3qFX`eu-QVZ972# zz)qynV5=^}xtOVXwENp{;+tXZkGmFf?KdP3n=xWb`NUOss~ZVN?qZ6AXO8vgt4y4U z0U+WaGjtUPZSyaAW%$jo`Q@cZy3xTk&kld-R-j5Z56AfbC}BKh^>JtS)CUBi0cluK zg`WD@o&t2DC?qp^l9)7?6Km&#O-h*CQROrNkuPJ)w7@@0p6jt)8Tg$v5jeaKEe~xz z)c575g6(0vZy5+D}L`#lw85I+Yc;BSrO zS|0dol~!>+jL@Z_m$3@H;`Brul|c|h5egutTExh{#%t6r*7ixzaiVZBF&W!AKLCGL zV+fIpy^h1?xi>GQh2ynh8#K-a%)pkk;+7P+oH4Imr~_Kx_;i@5SH}f7@@r2ry4*Dg z#txqh52h^Ap99hw4vO6LUb807IFqjBLACENE#*KBmzPOTK2``zEZI4^U;jll%A%+%Y z%kB)ffjcYZYbnWliS^`URdjxEU7yf+y*I+jjr;}D^y?5id|WQZvYNM= zQ@|q4mmwc|HZAdSN(QS3#)c-p z6qIE0now~@33Tv})Q;@yrWVp6nxY6K$o5j?#C31?s0ZXW1+Q~V(Rv7kcfFs)&k%@n z0n^~>QJuhy)vLK=@@7Y%9t^JfQa)su7{qkqXA*pRD)kH7?tO{w#FqZv&j+vBBaT?` z1~j2|ip@8qhPb27mq6c0+P~6gP%d4lIsY1A{>Otv3gt!i3jK@sV>sV@{XIb(4a}v) zN^%|l>)EqMjgl1%b&969b_xJf>=gI1+R_38=gk5XA-$gIMaVm?`*IRWi0wj|u^_L)}JY9+VZbyb+ow_lGA z*u*7(>D(#o`*YSW8Nx8QhawNp-r#=6KZFfKd~yVZfXNqTuVH6`DI*$StqQ9NFmIw8$~rkj8Q^dwa@!BSUvY z@%=ITmmbBlgQYR274Qq&+mIlg%&;Cf^|1r=<9?3eU=ec4f)6x8ZXWjrx>9p~yjg1zcnqJ^|34!ohxht$C#Yr zhJwVcbgGnFwSq{R|CgKVA_0Z9m+WE+3&hYU5hd?%AsOYqf;0W)X{Lk>3iQ7jSnzx$ z?!nY*$f%?5R1k+2a@{{JOtEsCQ{(k^@yhsRRos18P%rThX)n)kZgkc#dgprN0BhF7 zOHI+H0WdfanW_5P>&h?O!Ej~G!{(}pgJDlM;Qa&VMOZH8c*cRJ_0>=C6gR88Yq%f- zr{wcP=_+ge{4fAc=`%-)qZ2ruuWmF!Oo*vK#BpL$eEY`f`j>iJ zsG@e;I01vTB7O(tLVC?RU|46<13dZ%X}dbYf{@B5fU3riwV4pi&k{KO&A^UrO3L&F^YTMgQrv{zqJNrnUFwich8KQzzEzG-OHeDW>+?rfGjAOO2+ zyZF^h9SxDx*J|I*YtvuEd>RY=8m=LZEs%2h{ajP66ONFxa4IbNHT|->o~d zuEW~Z+NCH&;QDgi+IhJN$?69=XtE=M&|!(XV3g~8((PA*==2NE61p(5T~IfM+K9Di zqtaupF73>t!n5%H{zV^WfvR+DlA3n$zWA3k?`6J9U;QWmY7tmm#%}VcFX6 z#}~VPi?kCI5Xr|D3Hwu>x>6krN?jW}S&PazZ(Xh!a!y+{9(kF$Rv z2j8*(y^y;jOMr?c+*9Q3q~{Bp#*erv9;;WF-dCG3JlM4l)xuN;#Cmky0Q?U{M*40I|MO%`lHyXVWf zL7{T4x$FTLi(9DrQ`e^2)kH|`D7t>aK}?O=p>rIl6o&0Bh!=us=G(p_dlB|nO?aKr z7dK6-qYF*53R!+m$}7kNB(@YFs62tao~h6wLwK>T_ap2y2D%!%pYYPXT6?UjbDL)W zL}n$;zQ`+zru zA}6aYA!F}ZKb|My$dFNrw3kez&4PY9h*maaiId+?4beJ&S+naf=qaHIJbL^bHPUp! z2R30FZwBe6ey%@Wp+XI~5nf)(hi_NmOQr{s2U|QKQ=W%+OL|a)C@c(8DOeDF6lax7 zh+h7jSZ3={=dE{*6CC#iverLQUsi(-trMV%26~&p(+xni6B-tb0R}RT?TFQMb>*`v zM+qgF($3Zc!PG;8wi;YGMVRP>zE9T{Oc@i$F>3@c3Le3e z+4u`wRE24Dwzw;b@M?3kw$+Ka%>^GT*~l_91)I#+4nH)o)`8&avh$!rq#FdG-7x>y`*2k111Rni#_8sgmit*&6f=d(5!l5Kcd zLgxngr-vgYq{UNMK$P&&(yS64oFFT+oM*&AK7E8m66kp7lPjf+7Y+#pqMa&TP<(W* zws62#fFFTbnguU9Ub?1P#*cmGYAOR&kXfVSXBSp zCwHztE(`a{$72B0Ri(Z09(O&m6D^HdGh_tMH7;!74PCGo1HB(DwiDs7^zXnlVSE7@ z7?^{G?^ucbdAH#1Z)^2%uPt}wxbN0{w{XtJbO1C#ic;iP+mbczI>?$UsztzA=oq^$Ob^xOiRBi+-*|osnz2n~bopqJop6JgnU8 zanqvw=~ke2L8^2sIf2P8C!^9>$KM6N`@OL~DhY%QCk+a`K9umls_PU5jPSdSxXx@H zfc?^v@SiiS8sM!y;(Yk2pkHWuJun;?G-~ZD8BvsTFNH9$Z!`}Gw^+n10;OUyw*h&h zJ$k~!i*Bq<5^&E@1O5oINZQh9vyc(|Soyi9ssZU}n#TQgch;0omW!Xr{L{$-Fh)&g zYe|FJ!!hWvwGRQo;J8kZgE27C!aOwDalO=C-FjM>a=zQxS0r{&qEJu40x9Vb~8)GIY3A3yEeg&mpS@uvHDJ- z`S|NbsWd3Hyr|=&jWj-mVf5$cUv6QZVhfl%fPVrAhDn~gI0O%ln?NWjE z?hW0>rPpQTF__xSjeEIllN2=(2kV1CQwPhq^@VG>erR3Xf`NhDb!ckT{bP>q@=MF! zmcACV#T5_rZ5UAjNS)fvo1aXYwb00Ys@y_Vqz}B2gjh}?K$lvqKmFrqEQxjIuwg$q z|2|yKx0`cZTAdN~`Pb?9bPzr>(Nl_i`7Z0b;~g%Dy`fkF51{>fP}sq9GA(u2>4zX~ z&gmeOmY4`N!27+`;&@p&%2DVyUc9}qD}FqWzaC-oz@p_oE*a}O;xRnjna++joJ_G> ze)bq_wU=;N<+e$!zN&M*MtRDihaCQ?R#NLB%XaCHb4g&Z-Gf&)P#C>I~I65(rIEQkr1*qrN7&KI@9tg6lrtrD(c%5Qy%=68nu z+;;fxV0|B_fe#kkZuuOCWjZ>v0#mcBb3Zq@VeZY4qy}fyLUWPFg@TVLBnc%S5)$QV z7l{h@@;zFC1b-XD0Yb$OM*Zgt}#ByxuFhbvX&wU9A}i#+8nD zY3>BVIm5Or$^;ap*V>M7Guq#-%6Ie3RM$iw)JA)`$31?boI!9s-@gLl=K}Ryq@*%K z-u^G2w=IygLn*;&Of;B5@cd0*Ex8PS=Va|=Ocq(|h-|%%Y8`m$DzgLUWuI1WuKjIS zAejSFJadVv$M5Y-pEM0v!SP7^O@okx$hZ$)Cp&8U@-)3a+C*@_e+q^M@``Eg6H^yW~M@>H53nfBC#Zm#pIe_#cLt))ZaeW`RRM$B})z*os)= zD3_(KJiXMFS+=?uvgLjLv<+1M=UCo0{3`MC^%=aU96gseC-bJDGy-ZVYS*=RJu8 zEo=W*XRYHb&q(#+y*BaFk~NT0P$hx&*hl?B2t_oi`|VXB(9y!2APcj%ENsy%&Ewol z%B@FlED3fi?15e-8iWmdsjxqrhL2lurPR@VkVC}7SSUY*C)3$E7OwCJ?~|H zsKeN#QV3`4$5@28ai`}5z9#?2-t1opWWo-!VwBNaCOj%ftX095o-enZ^roQxEmV*$ z06Q|w>jNM$CdD;5;N0rI2{=~{wDmCnQyX(_19~&0PT-gSvo`7dVqH&^}%} zP)ls$Wo@XWi(1v8%fRESzhx%O>3G(k4&KY}4wcpBQj%L`Cly!~+^sR_C_1Oqsv z6t=c$vL_2uR?26oZNV@9*zHNH^)rpi{AjGAu97TXKubGvwjMm*Y6@>p!nx}sKFS>z zMg#RiUJZrYE~5bx3#pG^tqg=@KeU|e<1vMoAX{c0`UDdd>xMq$WY4za2QF`dF#Wt@ zTl>y+PTq08h+6DP3eySt77Vv`3QLOd2vIxj+>f*PfqAi56KKqby2wzRPEh7v5J4)Q z{JMT(bNyPcawT;6pcGUs=~6emsSyw`hI@ZeiddUk}kuad0f}MFJW) z)!wqU%mjtI&F%bE(rTZioz0{T$!DUzfLKJ*B2+b#Oz92)l_ntDX*z!B0OgSQ=vhob zzLROanLIGh8w2n~?w&r7-6lXQ(c_JkkU7I;hFB#H5V7747z7#c8o4aGB^y=GXUUZMUk?yli6=U%B=4NU7n%iSg} z*Z|rrX(74~h{b#1H&S!AkqsoeK**KZ&Io6YSOM(@;cTwIjgIXSw^NX>u!2u)p2e;d zx^YQ;rzf|`)mNL*x}{w+_6qx0xn+691F&MYSc<^s8VAnlbQnj!Hs60c`2#z#BWFlP zpwVSq1VSEG)9NV$H6V{U!%!->0${pT8Ucg2lXO?a)7(yv7DobNG*fWEQt|FxVn3ja zH)W25{jL-9M*gm?G?)BR?bb9}mg_g--&@kbT2U3UWDm#f#8}xa1k8iwhP=yOn>gVL zoHs5s0D|aHLxPFRV$!e&CBVwy02T;4Ipx_WX_!RKbFPk+s;u`b*lCTn#3=QHoI<FtTxy!e?sO*B=&wAC`QE8-}ibA4$L)-OC1Nfd3g=x~h%`B<58V zG}4YC!ChuB&amjvy-AIYV8`s3vX4;$ zPU*?`4MRo;oJcX@W!Kxzrt2Y%qS7yo82dTvX2@uIpWLhhP{Wv2gYk*n;d*${o0;o| z*qnl%r?dnSKG*{ltn=%=hgD3Uq$O&6S4XtJtV-j4F8WM+n6+Da(sKr$2pvG1mQqH_g1>Hxavz;m-to?Esw zkB^yhj2B0<{|*r}X;nXYZA{i*4I|z)TEK^go*h?o9E?pN+CAG7*(?1Uixiz!)rdl+ z-r)>~?&MOpAlx}axfKV@HlaCJ|} zQ_F0EeW#cE5ZJuRPA<%QNonRK%D|lLiMthRs3K<9v2|&x_~n~;vh_Cyz_g;iSicD{ zXm&j(Gekr7@fCsYoy*&0P9er>CjJALMNT=zfUjkdl_!YkWCA&^I%!Hlx$6T|x?IpH zq|1MN8eVv)%E#_)a4glNur{;hgP=u!OZ^463cD-o+r3-m^7HD%);V#N#76oFuHS4zPnz%Q)T%M=stuMs+@jlT`-U$$K5tXu0i;}W1qF(^U z!-vUS5DSk)Kj_rAGu(hp```{tZ$}Ip&w9UGJ{T=+Tk`v~}7ync@}ab6u0#^iPSGN!BEWpps*}|FeV;lKQPib zpW6Q2MbIzpEm0_Sl=U-q1<IH-Qa0BJlqJ@etX3q9}q|4`?x-qN#e0W;P+~rhFrp z(W|Y2<_4~G7f{h@%)(eb&hX7^Wammz-5ZM7OrL)eOI{dY)}+@+?L@U3ge{k*Emxwo4vX9^gcy<$>v|14 z+`Vu>auxwo?w(aSzX5n{YZ!1m|9#6tH$%{VgcbaI7!bfzHC|n30_uhAap8EE8Ky*$ zO}#jtYvZ54>x`q_nWwsBstufo271`wlIc0&g80T8Hxc0dJ~s?H8u6W>expC3aw6=m zi8A?wlu3v|X%*db27K`5!dS=M;fAUtmPL%lLq8`LqQzb{B<)!>C);2m_9zzNNtD5; z8C)X-Kb=SP36$kQ=VQ?6VqIMFGX2g`1-ARNHarW~HtlM1P$A^8R z?++W825i)82x(nwp;t)1-Tizivx|c=E^hXWC58@LyU!+XOLY-jxU8E6#IBQu)GPv- z%hNn(s$1s+B4$;*M!BJ%QDw0xc6|2FGfGRqKJ>ifIdT#@+~_H&BJnH`nwur7+;{8K z)*H0sPhy5T`qg^-Nrw>_SbP%2!W~di^d>ZI0iWqI z3Z%$R<&)mFu1!XU_Dy7+V1!-U}d041wE4PC~jg(0bR@Dd5VgKM^XVm(dmi01{ACqni{lZ) z$3SnYUS|%x&RLlNv5_vck?xr-%r7UL)%x7>zXI)3|7)80{|BWtT<6>)FFVq2Fkoe3 zUkb`N)|`54%;FQsT_I4Ks3Q!lqy)C*ev7B5tTOQ~uQd5mXi7x#;eE(P))?^SQy3(5 zra!TiIDN!ivML>8`qy-DUTM5&f|H3ty%aYBAB5@$sF4OB9j%jCXD+k!!qr zmvr~}$4!Q$f;)GquaVzT(^0PDmM!4sdzX~*<=SJPmbnFNxkI(1qranQ^g=s4bpA_j z?k?P*I>ff+TZz{3LD5OhhA5LZ zb{qR*60M!WuceP|{>86*DYDpy?etKLAIB$xw<+XiPe0UeBfRknYYz7kG-Rak3CzDr zb%R;8!snxhs2UZ^fHMkGDq$m$G7D5s##@9Fvz}cOc3t*E0$0x|0z&21(NQ$ppEg} zSULu68qlAIyd{PdKUbIf`CfcObFCArCK=rF6(4M9@kE9u;hEu8|MwrfNS0Q{v`>Xu zODC=Lmo=h-MQ$y`?GC0|4E%8Uw)WwRtF-3?1n%*(zcbqD*KE%9QDX{$cyWrRf&q1A zNy#Udo)n9ic-EyWRYGjxJ2Sjoc?9&N^zv0`9Bq2FUIa0Y?mHMg`-NVz%53C!#auvN zsaidl)Wixx;F=&flg;Ygq@)fhUi}lZxm{cig-wcUo}1X7xl;`cW?YXnP%RJQXj-5m zVUVuw;k0H5C&7^txKC*XE}ryQGpun6JZ5T_zQZeMEwWgX;2?x}F6OOGgAK#=JUUV? z$b8YD#QD(c=!)Oj%RHa-JFS-Ym#Rsg(RBO*zZL0soHqKCpqfwh4{2}GYtKI`w1i6# z{>w{Nd!l=94k%)!m&o==&qzW)Gm>zV%44;jx9#tk0qBy?Ant00(;UVr)j-U4NkhqFv3wM-tALtd&wW3yQRt_G(}+x z10wg`%j2Gk8WtTC#*}HA)`3M)im%KOX{pyvH;PX@6&+`J1$wXXRp8*vJv4KPB4K9s zh6DP+b7smir91UuC)PtLw>#T3D$)`w`f+QS91_8!Y6_6gOGZasA)7l77LJ2UnFDH^ zAdRf*AIg`4*r@LwKL3s<|^Q8QM#xMbCiF`0t7 zv#oN;B8qSQj);qMmkVUYe;4Q#D101Nc|B6UPO(RZ&<O=zT`ZW~V)vG1lr1OlO(O@d=~Cem6bNHV!JsgVdJh zTIQK%KvJGk>WbV}0%A59Xh`>X`YGih1Nf{r4EGiJ?gvtXFN4Cll4H0~x3>mGT=dwv zky?A8GcKHHD^4^cmurxv|FH%Mfs{yfnV2zOPX6{*co0i9LM4Pg#|z|v ztYl@Th1+g3FEbrozqwKlC~o^+mLg7ORQ|AYS8&U{!bwQ(U`>8IST7m1o653|=jD~< zcfC45cQY?;F~1mt6d$>2YAc#Q@{Fix;OKqxIeI2tLNy@=-$oYc`hAO9ZI@&jtZRQc zjrhvl9IpMkCa+7GfK@f#vb{soYgkH0Nrh%=|LE~c@8C2cxK&wSJU$|9`7GO&YX>EWgyA)1H`(9w3(y;JK10cO z|LVTp>(Y~d)2)8Siz{-2G?4T42I;Q@lG=|-mqCJG&))4n(CHsg@TI7O2Pc6HmVQ|v z_Fw*KOsf#fo=~?pw%;?PTpTOwK7{YSh}Kj``#gDKd>|N_twmoH@bK8Z5yY4#@O}4v z6xTg<{GS4Dx62`;A1WolgzeYM(D|AApKYaKjhN)xTsa2L!qe!nD;dH4QPr?pr=-kX z($%%MetUd>H9P#~bFHk^jPJnX7mQW?G`(f>N~Y)fj8(Hj_jlOR3F3b$&U&1#V~u5v zuSdpV{+NLujn?ZiYCQ|$U&u{q=`vGpVHTt!ll`%&*L%q(phj+*Bex~uf35=+AE(!1 zXJ1c`*7RMZ*k`2JH>lFZft68hZT`QOjLL_zre3Y)GWi@TZ_n{e94Gr7FDna8mf<-w zxlJkePi~}2k+n3x2W91sf=Z=Tb>2WVhkS76dc}M$-|+!G4yEYu#=cc^=S5x}!;Pt( zpB~dfv!`=VPOMigNwj$O+3of<48OA~C;4)2?)kku3s2}uId|w5Z0FT#LOm-u-|3U6 z5xsy9LLo)uMc9Z^bXqSD`-@U<=B8`y{7fu(2Dz-a@J=UMSer z)oY4V=$#0ClD9sb4XNP9MU;3C*yP?3_ z|3%#jP7Be~W{4c1Jg` z)HRfn55u{7DF2?}O1Bwtq1;fK6m5z&{k>*N+-0!d;(H7Y?p7#?A_*7Yvuz^G8`u$h(u5<4G?)rL@>iE4YWcL&C>f5M{h&qHF`7)QWvP|u4 zz3Z;(oiJC8d@A9gpWc<8YU`)P+~#kYBNh1VQ4b#Mds9-Wj(1rR_asE$uB@fBd1v%n z=vDrXXl7ge;;H?vL&)?NP4BL^$#UG?y^lexND_Y%xz}9(W7mI-3hZ9<+9M;j?}eNx z;1uaF3cDegMJ_zuA|<*HxR~qzo(^y2t?iyDQqwfWuv*<=lSQ)o=hC3q{c{>%j;aSN z{p`5H2TR!vGGvIBWx3V>S{gx%6m6ekU0=gr7-}{WkirKstSlkP^ztLPgViXz1ZbNfi>O4uGN0lgYtZ=gLw=LMGZ1 zFoI=YEyh){+iD135q(8t0O8ZW`S!E zxa7A_JHj!(`q5FM<0?O`rkC`yj2%7a_O)FnUi9d6y)brb;#VqLvqb6^vl;k{eb%2T#8dN*xm3fZ z<8&au3klbVE^}nqo(<8vj;8sG(mq#VFXnQ&*ZFTFPu)M0 z0a#>D3cpIQ%HZRCZuxlT$jT~WUwYr52UCN*^DRmwea=Z^8bq2hyC^vwnx7;nmXsUX z&eFK@8gZjqQRNy*v0e-zd2^$nxKR^09Mh_Ot=-A%f-F>Wc!uQycuG!-X=Rk=3p6T6 z(^s+_uPl9@*QhHizPsc9>)j{B_M8Bu|H_t=8e+=ya|!sT?USmR2K3{H-s>IyFLJ8u z%1|2uijN}ZU{CZsUTwjd9Jl)RX*cKJ4i_CrX`oeQWtC)`oR6|o8?28{0_V_Gtt!II zksEbD76!3A_;?*$#u5YG{)TTz#Xesy7?t*5fq}R*QSd0AjqhdjW+IT)rA_)i2bVW} z{fBpbHE4PTV0>cQNW6#w#*2E`wIl~FV|<3cTEF>oUwl@?uj^sFJ7WK2;4P0t;A7>R zXJ>zyazOkYk3IpHn+QGQe+*bQu6dEr@6qmE0-yy6=f5l68;M@B#MPyxrDs7)46tZK zA9@*>e*c9Z|K~M-8=#WF?tbaRlb^3lb!tXN-#plaeclEgv4D#izHWGrzVn)5dPf3% ztxXoMeLBkV#dfp7{dSC0cAD+$q)4RVG79ZpoBka2WmRQsmlMZ=+nva*XzH8GqyE z0BP@l))v0b>4lMi$#j9D+mKtWO-EHNL~87&6L8+xeJX_(D#U=cwv*w1^k%E5nv(Bz z^8T#VuLySY2CdWURj#97LGjBK0yviq-AtEC0nfe-{K`%4j_?~5vmPeU=^s`Ae91P( z-#HFRBP&z&cSkb4+9fmm9baC-hhw$#k7F*3L&d~}d!7H(F_kQ3SsUdHOtGgNvd+D?Ypa>k4?c7MNVDsqeJU5X)dJiAEcHmTPxsz? zcj?}4vXk3X-CRoyy)A#+ry9yNx+T@Wn~p?Lhw3sfmkfYlztQwA9WEh3_FF9xvCl$z9{$;iw8j3P?0e&~@Mp`Vg-mi#2+D`LRZK z&THN?$(l1ibha{Qz>=h}(sTo_KN~X_|6orKv&JU?bN3DP@M>Uj?yAQS*P21c9#Z2vFFdOnC#CX zU$%x&7?W0-j(`S@lyJ*eOw1iUTB}>XQakNJ{I$`r!BTh+waZ1M=>>53lg(CIOM)|l zK!#|5P}QHoKlImfpABgFeyr)Sd@LJyowBW8^1U9478R9vxj-$5t!O|}X}Xdd+2t{h z@`cYQ2mu?cCcLILtPE$9v-n&I#Ia&4ADRjZ@HbV=<9%P|X+Az!&$2nf(7(r@d(!`f zTWAd)Qg!Lr*+Nt#eH^$8=NK=v%|3QKFP8+7tI(j%oPxzIMuow0zm!^qY1c{oNxXKf zzrB&qoqY|iy>-Idf2^{)SnE@snE2qywUw5h#Z)fat#zX%684wbZ#lZ@{|}VE*h`l= z7~Wlb#}~c?W;itYu`xq&Ki-DDPln=u<8-P^oHcV3wYK+(-L5OIqq$2&ccr~y)N*uH z8(Q&ub7$i>*U3obx-@nd!h6+N?2G?XgpF2SAOtI}JuH9-8bmhz4v&B=FD)6zmSYZA zKXYpCWQ+pXLTK&D!yM9r?;Jo0-XL~90&yu+E;}~QZ7z1SZju_Fq;lJ!Uu}3?R#<4L zUKBN?>0R6?aWCY?=_F#EdcKdkG>_?pJ62W+->P2d&>ZWK(}8szIl2MqrJeiijjlSw z8>WfueBg5^WDALbl)kDBPmznRo<+ips`M*QqcV4gD^datmYdO0`cX&n(hcwT&+ipn zJ8&^<8A+FmP8)PcwAyym;BgQssGdSeHG)JAAC?CvKw$u)p0;&?E@e*R8>}BY2A6Dx z^{;R4TzkjCYP-?eAPH#J7h+&@h0St6&T&0OGm!Blly9YC=mE6So?Qq{X%9rl=;zgOfl*5p%MM+6nA56+S zEr^u-iDEwPJ1b)(v3gVem!SYh{0qXqd33Q)pV5X|*Uy;n%cMLygM7K2L3+Y~)uugR z=nCNZ$WGYxicB89nrCrewu%yKFcpVwpPrptwH)cZ6ls)vIlKR^!l-hn=C*t#R4ibU z9-vWDx;Q4(jt>gAQn%NR+_3Su=cs5%-na77oJG|8>j+yvkJOF?UU-wVD_l~FLDJHctF*@iv*AeRuyQ@R_8e_MPXYb8 zV^L;)q72oi=K`sq>2j*by>IY;OuzG4!Y=BSBIa>VptRsk!48}t6{H;6?xDstlq{ar zB`kRD%4`L~@zv_8Y^MTGyF~s5H28Olwp8Xx?z8B8ei!-wWC3XBPM*Nb6=ErxLMC_< zE<>e|58_8HS~PC^%ROh@nG+2{n&x;dA4rxhfHN#1GomLaE4^0*V!Dv`-6&85Oa_q2 z$=!9*>bk1~U2>&n%pC?kLG9M46r!^P{&zff>t3^T5!gpilWA=z?d_EI&JGiakkok4 zZ9%^?|?%05zEX0Z(qD%OQr(e(uIHRhkK%m^Nca83w*^!>FSTxa>_?SL6+BvTZT+VAk|KKetT)P<)kK{%Tm(R9a}@9@L*J_R zd2{Z_8i3y%nZ1?#iU(XxRCs@vN8SKdy%bEf7W_kzR_shG@SJgZGlpuHTyn(B_B%S@ zKo*E6f*=9p@gioeME@?=SZ^H$W&{)O-2Ef^;qF7a8(18eVkb6WgF{l_f%%n!ym+tk zX*7esGX$?yRns@39OBDOM4o(pYrekMgE0(iYgk8;;9pVUu-#v`lh1nri*WblmNpDx z7%wS+^W%2xz8?<9`gxdG*Bg~QlMp03Rvo|qdyf=RD`~@bUEhC~kU@t%|#XcmK%*wI7;Tb#m_;f$Dw?(zbY9(%(dvp7^q??YI zGM1XAQ-w?8qj@z3p)qDB$%|~VBSoc{7G;_<0q-UZ@pYkcMHvp<+x7w_i*2POwG5wdXju97@SVncfJBOHxQ6|=bYp-Z}Ki!|9xLT&Rgx z5}elMlK|!xmgWbRMz@jfV|ax!C?+#cthly#P@h-9)4wOa{9=kQZ#P*u^_L)sPTjOI zWfk_KUQXOVE?LLYqAw}F=a^MOclkx%HYdxpw8S262Ok+UqrvTtstJa9a%3l_Y2y~K zv0Lk)ZToO7!$5mFC63(?|11P5Hm9JZiznn8owgANV+3u#InfN1at3ByB*IzT1WO%q zUuH^Pr%&qnif`@GF*-k^L?44i(!UCb`t@Ql2>a5=s#|J!86d#MJ#R%qtvG3op5h|&7N9M+pm!Cx26 z_A9ieY<7AC(WDqc!0g8Tr+pU?2zp-Fw)R^DZcbZ48f@Igf_z0hJfVpZT4V9hxm@aGv=c; zZgEb-Hu(e#?_7q`l+VkN-4>}_V&f)UHfDM;`!dLe7qu~zlJHA?cSA6-phnPDcNC*D z>vwKG(qmj`XCz63>;~uR|_%cXv1lJKLpkW>GdXV$V0`k3pl}YF7`(A#^vzaPouiJOtWC16i z06R(gO!q#M+sh_olSo7L$-{|FM`xHp)fgysNLr;mec$$cBTLIcvx4zlzD6h6L2!)p z)q7osg`~G$9My{*EPQJA+mksJkGhUiYw5plwJWHl&ZPX@{=VU7RsMi*nP3W0x%gWP zZYBv)mi3BH=Hakv*yDYNPcCw&on?;b+TeQBQSeWhVAguA$p5OL1KT$6lR9P96uh)U z@e!>{w8#h)-+;(c`9FNHzwZdlCs^v9}~0?;W}xQsCgp;1J>9vf>Z~;QalM96o}3 zXQjRh=#23R&gvWg`OClWDMNA3Y$X7fmoqsySWb+N`1(Gd9uYl{UD>nW9oyz|^?yZ8 z^qm=(m@IRyDfG|xe};Y+v9DJS!osf;jcvK>xib$|rL-757#=|=$*uuxW>Ln9(H*vI zXa>F_NwQ652|c`!mCO>V5DveF_pSQ1(~jm49;W3x1!rT9FpXkdEx)1ZDl@uJ)Z9Qa z88bw6@LW-#Ou{~s9yprTx={N>Hg&2gx37%fH#$RIuWWI=F5Bqr;y27~ufvMB|J#97 z!Os@&7t`g7&8k8R%x`Bn7;__NbHc(GtY=_R6DcBlQkKANeKe6Eak4botRLz6z2!jy zptrvH-*-4*%B{mS3_D{W@DtyZg)-x=CUNbiO9An#eI0v*DdBQn>(+P?D=}G_W-C>T zel8QS%m~6`xMMXN(R;K$~k@=6QZV9X0kNrN8&9b5)e| zpM~U1Z`5rqmg(e*0Q_vBG|wb!>pOGjU1ojXi+a0P-5Q^)y`r7X7*R{OS1h=%w|)8( zNxztkqTjZYga}_)0IjQKFkm4(RF{$m6#64!Rimrj7}xQF$$Xy+)Y_2jq6?14x!c#g zW-{GhT9ndNpXjU${y3Yuj*28HU|ldAleVhxe4cW7UFJrB`UYxiq-yZnPMUQFx8 zSWPo^kwviGtBbortH8kfrD5Xm0e_JJ!&?fceO;%(MT8GQr&u#7Z$kr5{!t@cCt!bn zpxWuE2mc*OKS8E%0TaBZt!A<<8mD` z7-tDLsb6*o*@RR;TWFkH=+o=oUS~D8FdS%)0&>Lz1NR&U@+=ZME4wDJ|u!QO*?v*E_!7y z_*UhOXji)v;~7_7a)^6ufs1H#Q!~BlL)2M_*`Z~}W{o~8i6+=}`s)wvctd0bs~|_R zdd%fGUl#l%jynIU@d!>sHo0}Xz2fZi**8z*?afh`rc%-&Px zrw}9Y90Nq!7?%Q{Z{TtUeqSb|KS|m=3G>F;AwM*q`==86=QWX|!&2eg7A6&fAAFy4 z>P^ojP3FaN130sZfQ-B%Hir#ECg;PYoTt+&-xTcHD&C;y8W%1gLC0%?ND2;#=`UJb zy~D~xO1KQ58Do)HURA5ciC~e#R`fDQ7RyZGfh0tFaw2$#0ZO;;)3eqX_=PoH+L*}x z-0zE3MM2>+USW?bzew?$7wIux>xY6|M5NNgI)3r;?)L9qz8+}0p=R=wyLM+KlBLN| zw1`wnqh4l!!)W!bnA0O>^lp5iq|Go&-B$w`9=t=yopZ|*l5>|qkA-C$M5T$~ZcIdw z)9X_Jjj?^%KM6m6@*vTUlHC|F`&Rt;RIX+I`q{{4tuKm1`tNtLF&*tsY%_gIA58Q8B=-aJvg=~xSXop)7cmQ#pZHWv8K9ue=_dLaq;nk+}-#h4yuql4&b|}Z?kh~Ia%q6 zU-2O@^ZVuy8@D>o7YdEQ+rIhdAdF``IPK<#<&sVErdK80wq71^CckRfh+?Uxc}YhK z)8wq6HIY@{rv-xV%?U;}J99b*jjaU>d8F>c<=S8Gw7z;~$}`Jk@0~>^nQLknn^gBY z?YU6BZrWFN<<3|tlQe5Xpu~k&z97xkS&P{m{a~x>^TNK{wy?0*!(7NfZOi9drj*J9 z!^aKk>A~q6WmC#4&r4I{DkZ0(muKPQb?#CI$7$6#%M+BWJHD-+-nz3*8~&?zs3f3R z+tez*2zCEe49Cid`eZ%a)&(Nx1floU>odYQ12#3MENeh@k zl*=J{f#lE&6qz3`I zIn=lUV=ATFy)J)JNB|l>5y#vn{E6&+K!HvL|0C477Lfm$oCXeoP$`dI;yH61>3un+ z-3mJ(T!{dDsOyA>)3MF(&R@>)KWqotrNh7|m~~C)``CHD!1mWw58I*Uh(Jd~pP}H@ z3r6ggi?||Q$jG8iHbScj7M8%L9OqkueDc`Vey@nR+I#VKUkY#4aAo~o0a}sRR--EG z&yzYIl6rZ= zKvh)OW){mfz8fdT9=M{52JES@H)IaO!Ws94yGBxXAuks{l4hfPw)Pj67aaakb~t(i zFXLgcyZ2T>5e%rpw{EI?*E~#!dIm9GhNgbMbc-TI?Oq-Id8!m-0_HN%H~z7&8RVW1 z1eS1ERp6o578?f|P@%XTdl=`!CQecW@0&e(|^^XGJqBZ8}63SyqgSCUF~sUzkVng9JT=K_qcOn1%`gN-x}}J z+g|$I>L0Vvyi+ULTT%5p8yXah5WE3mRQs^~@FGXT3{u9?&5x)fvBoHWyfo0YZBmvM zJ<0WQ*{ju{U=i9jGOYH<=g@j$cRC*zaiiKduCGeDX#S}x`d5aNVe20dCAC2kd{MV# zrrL6zkD(oKHvWi_QS6~lBDCveOyHfUizrTNm^kc~fB$3@my(lfjHhYtj86X<=i_2m z^-K59=djvq30vwR*Hm(_+BuGyV9-V{G{;NXJ&5Z=EpeH46`5f9D!3U4&0!lqu-`X1 zEY70DC>p1^C%8PLOS z1zcm-%M(7%0i2U|@(hv>ED7i7a7_syZeRdu8RZHG=eYzRm5QJB*77$ra8ENNM$(cT z=uqf>UeIr3D+|PT`t*fl3impC|92yw)yu1m9-6fH#M6u=!dr4y206vsO&|HVLJ!UT zFJbeSN|mWV29S`{PCs*A482g3gtZ?geObokASJ1v;1XOQ==Tct>2`oJ0p1HYv52*4 zwc54)q6IU}T2#C2q`#*`_1Z$2a&t1fu)NV0&@I4uY!lqVXZJF0um1khzM2fWcu2%y zA`zR7t!#fReEcK%u9Q?e?&tfYv`aFxZf*yEvnFn6^fcXt`C%k(Jg>@wxU;vo9jQST zFiF>qA5Yg*X!Em}L{mSOBTj!hpD!bo)-C_|UTvJ&4R_w(&kqZqQ}71ZBqR~XCRILO693^SKBqxIAX4Wt=6!IE{0C)hfsKGV_)>VY|uFmAkO+Y>m=9}bS}<_&DmVr7D#aJ-Qyr4GTr)ARkE z=j-2PTts@W3C-Ew6;)lTk~+^t;UFu8?WRCG&nT|J&B>ZvaY##>L=3wf&Y_2q4!? z+Q%MpH3iY${N7LhtnU6ET>mKba00(SYwK2arKEdNvhb#E+8+vfsfU8@<<U2l3JEU_9XG zvzgzZCaI8jcfT?*@z}Okz@}g`-(ZJ|I2Yu)7cr4;pI7#n_td7!r}LtWD)z@;gG)_e z4~Aqr=6ihbLN2*LcJ1vdBJ@$+6`2ATAKe=CkJ#okGEZ$g`t%NIb14{pRpd_+Qs(=W zKfRUKZVUzR4}EuG5J)ZYTJ$o^teG$i*@^L9gMX93V3z`^CM*y1=h6i@mQy@P01qtAi0r05y_> z?aDiPKe3Gx%s9V@MRZP)1G6>WZ5+@0 z6f)9M-+?diUQwl+f(JfmiPibA0{?op`S-dx!EQ^l4OUoDD%fGT zoms%E|NJ&6ILTqXD$YyNGS|^Mq=1NH-W}*)2S51Zt~p!ZVBLC!vkYjKuj0%KIK%AK zv*T~n->T1TB)lP9u7|_pn;Cd63;!OdZ6H_0MjQExdi*WofAR$8r0*x$)_Oqn9KKLucH zZ~|LgK`Dmb$W5Dv7iQ*T($k7$Yi28QY-jX{xGN57_^#;_&8BotB8%2L7iU8z5R#5- zSg6h?GTUy_5m&voBEU;{q4+;Q)69zZ+!AqPT?SgtNGc>l@%;2>~MJ>|7S_QYC}z>NUHW zX$Akjv%J`cQ)o>TFz~5Z7F3q-{RnU!n>1#D#{})*W51%?e2Jlo+R%yv_DLEBWe+3H zX*5Vn%6p}(R_DgwqZx0ZzQF#k|7b!fiL35faRV4XRCt1fM0Je9 z5hu)NkFAj5mTUV&#FT27gCBs8E%b=1EXlAt4hZy6dJo=FEj#_pXu8c{*~Wda`?zK* zf{tj1&3X43tU(qpB+#*Wr@xeSE|ogu^M9K$-co#s2Zfg4Q5MdV+)%A~`SSnhw(F&Z zD=0uJWFqJcF$lMEnxhDp%*Y zaIv5Hg3mqg8Nbq7rXn=|TgQE3#o1H#QV2z=e5+M1m6um$Zw)u=8D;JzV+z2>;5Gj* zn~b7BlHxA;*~CUMCDic)<$rk#I7B((1n-0yr{fJPqF_{OG&i>VrME&iK2@lrtzRGD zWHRP8F5kSRY)Q}zWG({e!`w7YHKC~{|IYw9#1DFKQg4K$gzqHl)dko5(dlfMWHwo&ilFT|*lu67-*dQ6SDbZa!Q3b2*$-=lYmVf1X(! zG=m>=o8PSq0`=hh_ObtoH|SX^!a^!E1-Ng7y?@;rdG=_J(RndF51X9bV~_w4s45}ZF|3MSGI|v2ILq}Y zDOA&b4{%LcP4`iML)Fw8Z zoLxX4@4sFh*GBSVJP%#c+E6Dm0oYTW+?+;;!kGg>0DorKZ|{QF!SPrA-`8AXA7i5P zpX%e|$+S`85>x!b3r0*^dP{0}G>T3^9*ySea0qrAaRYAsukW1TmB6Qu*Lv`eIM%HT zd%*zs$1an6XGdc1WLKBM0kFAGG<3#OMY0y?o#rA%k%DYE7Xkis3QSe*=tsiQX}1G_ zb$$u*1}@;x4!H&avlWsjm%)V#kb7(ytH{$&;7j<{g{kk@^v-c6?-Ar2ypr7mZl3&` z8LZRus$Kqz9{kVS@CP98ge9i$b0%NKMM?0rZEfK4+1@(FM+BJq%{^Vjy;A%H6Wv?* z6-?~@B8t0BL>B$p&)fnJMlaRZoJoaUbMgNEl=7>e)DJaaGbycm@#GiC8xs}4Nxk4D z_26e-rQJ*5#-IvnPor&82M6SYwvH4D&!bso?0=I$oyh+d$enip) z;2`q$uw?!f=-sXj{1Pfw64uY3I}~H79yiog+>#tnp9%mW1*Dxr{<1i1tG{~@|1+ey z1|3<^XKgf;9T2xXwnWnI{WP?{9!xp*_pl?$1cyj*ZJtz%pINU z>NHwCYjuDtkdt8kP%Z{!F>7zohAZl|z6b!C_&_(!zYS&?5@f*j>|Plpuy}@dMb!qb z@7zPqJ@-Z302mZArVi1Y|Ff#fgrfqYsDtdYXqXuKwA>_iZl5G)oqV`>KF4vR@9-Wm zi`(!q8mgo{DfjC~-TS6e9#WqZSaSJydG1n;sa z2W;E#miVpyQhEA{^tTVPNfQ#iY3O=nn)}(cb|7 z=ayhykU;m#$ZR@=3DIyXv%^52hW2b$rm5pw&wWX5EnnDh5*^YJt%D9hSz#S~=1CPQ z$jAQ@K%GcEEKtBO&JqGVghuhU1+I0h>dTz%$V9l zmxRbfnU`08x5)x&8$RY!V~>*Fb5Xgz2pnQMkoG5Ls*rYC;!3tikqY=~Ef_cq$xyu-dsURe9*qyqHsESWs8V53FdhOX2Ja-mo9V8hUFcz{0-GUt5Y$QV(=(UPgKx4QZ}8)T zrxe16@9g!b1-n@FzsSF8crod?sG4|g*&81iKA#j;wow0UgAc2qMy8EAiJNlBx7cfd z!1$Dh$-Q6@f9Zv_?Xv-$k%*kMEaLwUuZ6V&(IB>3rf;_GhWc)=c(Z)mp6mYPf>JEJLBFisB~UF zNxG7gK%b^v;8a5oRHt;#^Jhyb9Li(GLLF}xKSSP*G)hsiEGt%j3vT%G^vpFb;%sd$ z^eU)HBt=&WN)XWX}iv47G5Sf1SBsTG~ zi~6=2Uw*;Idw`wwDUrhIxl_7~ z>0HrI701OoK&!zSr{~E4RR?)CdvDBImbUlrIk{AX2l?*RrQ6SCoxHcW3}b7m@3kX4 zGFlg!;Xp%aUX0t|mUfn>t&T>Re_~LlD%lI_wILNfho8~#n6WM9Fm#J{9vm(3DarqV zCk)hiY*KVYR0GQv2i>qDbW8e6nO|J+JCXgXjBXxu=H3sE7q2%@HBP>gr?n_f z^h+tGJofPlQ~k(iY7G~6{Y>NJG(?WbL>Yk#O$RwtwEFcje>pkHHx_u%UOC=K;4%rA zX*Nu_M6=v+0HQ?KiYr*P$EmqtHC6$>t-3!SLgMQ5!f#ZKOoRD}g;k^%kqOw-CzyrX z839eHW!tc^2=j#Gtl3wl`3%Qrue(3gA|s28w4d?cWLG`+Agw&J5172afgEGJP3@Fh?aAKM;{lj&Tx^?}Sf(v&?aDgp{`|IXPxB&1_`m ztb|JB!qg8U?rn>5LjNSfvv@fgE~qCXT1_oF6uS8jT!&xr#wM05s>C4{0-({|rms6D zy!_e~b_*@PVt%U`#-5EowtwFc95Oc5{KT>B-H~kEjCN{If?vb8L-K>Y3l>le8_cv( z8AyC{V>U|iw(A@^v6Q{otQG(4j6Ap2X0a*Z)NV6I*J)uia1!~1M6Pq^afmN6-%!3_>HEQ4(<^kuX}+<9SMHX@cvWvi9_tJ6mmHA>2@e03 z6#Vv60%{X}J@0rQA>ZC{c49sT^chJDbG8^~?kHQAs`u*|CsAX!rA>NXQO%$N{N9D& z>CDG@5H_?}nW4>#`(Il8^sD@NttlT9(PsjJUH90F;D3Pnq zL7u&m3B6o#q+}6FwAMkK4=1jCL8Ru9R<9qDMLPbF5mui8kx+RFuE2>wb`1AFj=bxCRu0k{s74Z|ykwWTeoqOB;< z_w`0@v)pG1*?8~eknld!E(k7(S#_&Gm8-N$)15kRLum#WrgLwD=Dnd2Ye9c&J=~80 z1fWwU5%`MSYTZL}^6s#!QPkPj6AIt}_lbF1rgq`o4+;SlZ4Yo&dBcX5&GHUenqHVo zE|wj$yZar|>yY|STlYPhX^zpkp+kbiUb!f8_a)8VajSnbdH39M!AIvpbY(ufmo&Ap zC_f;1QW%S7JWgwz%Mffki1jv~&j%k8i+{#3=T&@Egw2+i%+LQ#$6E3!_DeH%EOG1? zJi3b>=D$!xLW(C@sY_-=byZkxs&v6bl3WDb+6Gj zkBiMwa^5H2T2K2@?(P{Z%2a^x%oO3SpmmjqquG!t=-@>&LW|*tHA4Ijj}+TxmxGn@0>*!JgQ?E;(wctnRUg5Y;4nO9yk+!08iH7QvXPM zW>-cK!#`aQz-}Il#YtgCzwIpAJ9|`UvP=d|vw}`n$IZ`QPw=I-4rG5S0~5W*==QM5 z=+|6Sl?#vIVLN2MAbHo>UP`T4#ne1Z{0imW{*I0f-qJI`x*w1Tfl;Be3e9~g!~AKA zW(|V9b8>hQHLD2_jh$!Zjts@7#!!P&JuMh+N2D@CC!6pt^HeR1)P*JeyrtUJCBM*~ z#B|GUs2(v{_@#?JNUGe>pz@f5;gP>VYP(3u3>>?7}iS9?>7^Y9_+{DZH-ZeoAew z94Xed%;Irm2l&Z3=~vx(2xtEZ?&hO!``@R?xS7>w&1!6lw>WJD~!PHFHX z-BHLiDvBq#1YKz?p(xQEi%X6yVF8fq#v3FkR0anb=*Qc0hi-uU$P8ZODF7qJxd@T+ z8Mu9D(|wN!-AnJBH1UiXxUYi7RPm6&X8HGB{s-LFqQxJIsmT4(ewC`#JcP&t;R^C1 zt@$B~%3r~o%RN?vDl_C#q}1YWt{8V6J0Gk&m_OJYT5uWzKj}?+E<|YFAQrO<(?9=B zv4~@iYYKf=I$@KZC(cNOevjlx6RuRwd#QS~zT5Pw=RkJi$|xT*Jbr^%`O;}rjh?7+Fgy&b$`HcdJeU z&Gw6o+iRI74sPjQ@)ok@wuD{fK*w5h8SD18poU==RUDw&2z8ZM+S`UVQ%b?*NBYj^ ztC#0K^zuePTZsHC7PGEC1OK$`6TUMJo&`nV_*ZqZFcoeQqrAxR#NZFx6P)=;z6lWC zS54almyJb>9DYAP^+B}thdIJ$wOSXHhZK|c`0tY}JcD~UDmIAH$;G}ptYL}U98F*} z^p?mw9^YnDo{(RlBs90UTx;Bh>?#{P%AM?s#ROZI`41&sv zYc54iFg)#}c0KODwkLg2nn15_GC3IuxdBuPBPE5@DYhI314n1iXI9r{@Vd&UE;JES zuh;3`|44cDlV5j49g*>oMlVz=@8+K~!eoZj-)>aZN4*&LYPZ8c{fzQH;#I4scp+Vg z)LWTK9784~M3MsFgA7OIii^Bq41(_K&WvrtLC<30{W$jWj6#VpO-=Ap%3tnT+H|j zJ57GZY$|Xg!i?Ma%xGIz?qcwuxCk@ElDDOzq7NC_I+(3DdgFE^PG&G#n)b1@0!X?I zbV4DeE(%19;S{pekyUio02HtdU3xA3!A)Ec(Sad3RFTIy4BOlQ+Fj7-heQ@E%a3EG z1WQ5c-=zM`3`&gK3_O)G9pXt*lpZs@nigmjQTI^q1~_5T*2$n9JgZET02l@IbS=N| zMI~PKex24)SD&$43S^Rn^EjfA1x05wTYZ({wBWNucYHpOP0(lXdOou7On>_r29N@lNN?0ZRb4_3XvApjA&NEfR zZge_pG;w%!&X$K&-|g8qPngZkp4kq} zPwnFc!3}GgP!{($S56my4rm5lW$uc;?=UdeNAel6lGpg;l7LECa(Q>ayIVSdjBl{l z*P{?d_vT!+JhK8j!Q`LsAWn0vs&`q7WsM|Fy*9c?#+RcQ8YM7}9g_}1%Fn!w^8LPm z`1hsVVjE+w`bJrv+jt2UZ#@)RklTjks3*M}FOw*_+;&55(w|4~K7p0(Og?b%<{A-8s4Zsg{!>Fbh}Y2+2W?M^CgTtC%QtJoV(5qeyjHm=oUhuz9}0$uh@+=Y+k6DIKMXb&6J z^}lUw{lKK+n1QnE-g71Lo-Ha`-R1;>@hvH%M;j)s`Eoq z=-t#?I&a0>HtId0?;dMIyFokT4p&dFzFxICE{On64{IqZV4g+!u4NE5UAUx2XDZOzL7ogsC)2$xj8RK9%M3JbFB51r^TQ?2 zQD*r`9b&$W<@MWea}7YA^-pddXKL}l-wkd}rn5XuhsSuwa{3BeTMcX5$Z`pEa;8IQ zL3p(%I0JS7^C9hlp$3sI&>oBV>THhkf;LyYfgL5j`s1VH7WpNQEBdl{0TN9+`|_<; ztIY03Qua#<-Cd?Zl8yuxE(cqS;o;V=_MqrP>BASS#swMYPs&wMVs86xK#6Vp$mzlU z$-dsgn)z(he)7<J#`MRbB zY{%d?E4FDu_~+q$Gq$;YYj*_I>P7pK>Qq?w2sfLBMK;ebPShlO?m2ZkE0d z&xCoeGCN@!lg=t$^Y0vUo8w#MWT*5OKvA|^echB5M#YDsW6l=dcHJJM!94eo4pTcd z(vy$ZMNAYj`knVgN`q9=9rxLwEVnTk^i$BUC^hFy<^ zYAy;Dz-F^=vrbPL!I9Rp=*e#g@J4yai|tv>;aR(md$qTq2j3&|Pk>(WjlduQO!usG z1+&*71+Y;Q6_glpU7t{fg3t|x*No@0(FRqLQ*!CU)#vT>^>}*a7yMH?GxR+fB}`1c z;a}5^t;VvN50DE$+OC-ezz~1liO)i4{+_s(A&}Vww<9a^+-JBq zF$K)!42E^IR{ohHKll1q~$dJ_Z$r{ya|e{3BU>!h^*s#4fdN#ukiO^|g_| z%^Gcp(0kjBVzwQ#=6V=;%T8KJh&w({QtX=s4fe(6E|RFx=Ti96X;u9kGrQ}yQu{*u zb<*?Fy50!&mw~=68f3brG!sp*@;RWQzse?D+|3Q{88W09Z#q*ck+IANoQ#)Z*q5Wp ze8vr53-1>DU%QQ_C9%U_yVFagr?F?K-6wXTezucw;R{J&58NC#r`zoSomNNq(wf>z z#lo;*g?8p@)@FrotdX-#`pS}PtaTBK38QhG%HQ4AK)iq7&{tvGbGI~ul47P6>h4qh zT$cBThCuum@s|5l>O0gOD!$&v;CV3+EFLeOgCIf7y}bXLTE2O`~S8 z)U(h$xPGZNs@6JTULkx@0q~W6`W+#`u#srE`p@@ZC>t?fopRa(_c46+`%8L$_ZpA% zdoNN*_a?s)m4(&w>K3|oZUBUXp5rJtxL!;N6|T<9JfQ|DJzcd*oXfUeqF(;;=KWHq z`22&>gl)>7N(20V{#<-Cu693TBPq+$-Q(vyFA$oC^Cx+DajdWvwJs=86S%uw?yC*!8Xn$b-4@Z)G|3s~kvIejjv zJNHkeC5&lOnP%_BJnmLd8fON1W~FkNMtAt1mb9P2FZp+FW;BHV+7T}SlmDXLKTyUC zDF&dgU%WHo6WOb!F532om2c{jV?Lc62gszx{7?~Bm(qOUo=~fuZa&k?n3bG)KklMx zcPkkkJ@EA#3c6fCu@3)6PK7#a>{^b49ZYfJ2JTf~E;%Dy6%ML?4V-2v!?}%Uc0-Cq z97e#ftKz|{)%&6_F2Q#%x=*aVGugwZvZ6EMHrkTBZc2EZ2MH%lwYwvLFRhaGJv)Ef zkGX@C3XuB?Y>Q7YJgIV2l)DiO@s%}Vh%l&KW4-j7^HDP_ox`7*(_gpf*tcczzzh(k zw!ZUiUKjby$1zc+{ro+lluJ7;G5*FQXh6!lu^BM`p;TAR~vh$b?#cQxm zDMI8Y40ZpreqIU2DoVF}((H*#a;|sv1>2N#FOr~+B_s6LE6e!|ei|@=H zs=O@B=u03U*<_ks{|p%#S?J`j=?TncPv6flY}Qmw+wR3CW%AjH^Gq&O^mLw%iUjBL zjvY2@wn&C1@D(}wf+xx8bSZ-t?S?}tcd18IB)$vRtX{Mj8ZG*lic6hZ()aa}{PJ#Df;|TTlvUzJeNxFc!#&^k2Q1Umu8^_n zEA8{BjeM*oJh2>^A8>Z3O>*JIt9p~Fgt@u>jD@mXsW-8Q-2+Db?w-TAmkDeaj8(U! zDTMg;SA2i08J*ZoNBL*9Zh}g)%v)7kbi^FP z%fE23l6roa59c+u1X`19z`V0OreG(7#IdjKhw(A~M&}j-%|eEmKx5rz1}(QB?Ru`{ zfXec1HA=GSLHCsNWBJC;({91su(Q@uhY6P>r;itfti#^Vsj;XU#v{_%?^9=XXL20F z{&9$JtJ`;YO-t2u`XMj^Uw0_^qbnPXU_Q&Z?Io{jPpMf3Ak!RdtA+rGUC_ zJb=}-gW@>q;dau;M2l>V7mDwm?^(mdJmj?)A54T!ojn-g=;K3)eSl5-QcV^>S_mIAF7b;N+2~inbsFOln8doql?GT85~|Z8)9_< zT;9_ZI%8>_C38W;?W>IQoVZ$ZunEQ=dpA>xe{!Qihhvm*(TPgDp`mpbE}59k`|atvyMk>QA|(!2`&Rc+S5b!Hr^r>>R}W6#SxOEBlIv{_E{tXI>P>LxS?L$vV|UlX z4ap;wvwvq!IJ*#+_-FE>`of5PLn(IYleI+T2wrRHOnw<8&lO+FO9c;?T|gTJJ6f<~ zay$B@`%8rBZS%!X@RM)Pq!7@DB(07onaZA>%s&1(=^bZ>Z)Ht2KcOcc58R^M%qoL_ zva%*6G}l?U!xVDYt3bPWOq6%il)27O`uMFg!+c?@c<6JRnYSF>G8+>T(BVJ*;vx3Zv2@Hnh(>#lnU$w~<-kVW?f7OQ>YV;Lf_EgGSZZgwx1R@?BLg!Yj#cQqiTsculD-L9-yyDs=IZSfzPkqB)9DI z#?;fSlHja{v7yZoLSLRgME*S&fnyFkHidj8wHit`=~TEZoOj1VXt+e^ln8@EqH(?xvrFmCPm8IiCM^p!_TTT8qt60%KzR~4@w&>@tOh~V zJDK=JvJc21- zEqfi+2wL16O7c5%+4z#M(xi|ebHodpg+5s^S~yu;Y&g3=CN$^R=aJ{+v<~d#WIHKz zV%;x0_9s+1=4G%U^)aeNul6v#bvD@>TF#+d4|nsQbt7Lh?p{nnORvK2cxCbiwygW? zXGrMx^Ialeq{Zm#)X__I^BbX6P~WBkxJ<`9C1A@uzeH=javy>uA^CNSO*~u{n4ta5 z{u+t1PYu$xMnRZGM}2NXwfMGa5nrTeIpL*IQg+{=$$DR6f_-+FiBO&Wz=08X|8U|Y z+5kVRg+q(>Qxb{EpGPy=t<9e#a!;GsWjC?0NByI!kkJnYsMuou1$kee{p=KW<*8DW z@*#`p3%SQ!o?XA-Z-2OXJGc3_Mk-`C3H|5Uy-|2r8JRYZ&%d-b z46j^F-3TCc!gUN1DfMb&i3K)weTMETpvb(xd@AP-kICKNFR360aFcuH(65>{so%A1H`s4oy@2r8;&vFL6}ZaJUc%RF3WbYZ zx`m@m>jP;V=QgyW<1b!?YO=QvW-ZaAv9}ZMAHmxne%3oeHDBCqba9(l*PlA3Aj|ST zAzlP8v2)7C`^gy_Xgxjb=wU1e?F}oK9y$I$p58jFsmFZ-mJWj!6bU5-34@M-lt?qW z2BIJ!oze^x1O)`??(VLQ5J6f(nt?Q9^ak75_U`lhzVG|ju5+&aan5zl^_=^;pZmG* zBVbJ{&(5ERCh+qKg1%kTqsnYS9O}*PsZV4ij2HTJ*f5?j8Zmh`5UMTx+V(7~r!+~6 z_DiMzYMBUg6dG6`S$dvOWgty--%t$D7Sl*^v_iR?N81F96w&kfDukjQNwCXn$+`Da z-S+i0l_^aU^o#>kz=s^sY*ija*M>U;`l{DRs8v5o=19vV?q7G|XX&s+f#H|TWo2Pq zFDoDs2K+a|W7b_B7n#^T$=szNI(0BUu4swBNM&m^o z>)CGT{l6x42gK^`bejrUxoI*=m(~4B%rrzBpg}6-ShrTPc?nf-@oDjCfGYycQqsv} zzxweAL7rn?%#mFT9&f*kr7oNKp;vp15F;w)jBqsvj@|LHHfeBSq@l9YKLPh(X}%@D zz%(AAL3O@Uf6RD3@C>~z2zV}9)!4rkgQ|nG(NJbxKBcreZh63*^KEX7w}n$NEmYxm zpPuIjJ?G!BZPz=oTWJQoExSs?kq^dPetHh>-T9F<$3o}9tr~3r%Jg`KiY`AD+hi5| z+8#V21{{vNarTkpqm!x)UYt|PMt}`fx0CjCOPEyXi@ULt+(=ZKbM)2xB~xc+`=4~K za|X}$bfDLShV#iNIakZ`$%iGMr`@wI2@@k&_I~7aNRKKMN^~De6tmTg+q{fVIx5$HB zSM=K+2R{{&(t*3u>f(`GG6(02jIsiigHKQ5WECCtErkY2&Xcx? zfskD)g*|u;1Z?()IUlsS#(A`$+F`Qw*Zgx-nV?3Ttgp7_?0YJ0+^;goanI?Ko>AJa zHBInqV~<8zzt1UVumhRz531K{miR`A0{n_6KDm)*YMzp%yxab4GFhSW*U@x$5a!?8 z(WA!pLf{U?q|V@H34WeatqqFPdHpvFzru5V0=kc7R5`BaYVG%R$*BO|L2nwVHcxec zA|HsOY)Y6v+qs(Q5y~7V81lZ{-#DaomAJWm;Uz3@m>t=Wg=rqz{aMZtZRUzyjd;L4 zdc_8}Zx;B7-Y_@WN9kIGpX`n{pEsU%J1+|4NR+%e4qjWH*1(ko5A?0)p1!t*>dp;S zgu&;v{rGV_um7BxK%hQgmP6RlHbkt++fGaI=q>tXT*_@1$H8=zSNZ>ezyZwnK-mnI{L=|Fd2Ur=TO+j>-QB`i2C6GoC=fS98| zj-O20)b_*Q1DZuQxaH8-4NnmS#0%dC%Oa4#ID4pRq>0#85`7-e~@7|`#s~$`7EN6&6%@vRy z5h!AHzxu99GnXU3%YH8UU@6ns)|=h`&vyW!G?nWMCh6(oi@`D4cd_^VlZH%Iwuxp_ zS?6g`A*8!khwkMy9HM&X{Ci9V~9G! zhmQv+KONv58jlb(8NMMox%`o&Z3^-Auu^L=1=9v6cU2j2ba z6n2Q0*BW51yCDQJk=<|s11(W(FYZM#qRJw_Xr#$+-tNA9GFfv`QiDj4Ot0<9nDHSN z`HJ}_j*sS*o5Y7ZGQ9a3dJv=(DngJAdflh0&{lnz{uf9WxKvW{PxL8=l>5SPO<|TA z%+)RKTcn5Ui-gF=uXyg)$y74Kz#sf^EMr=F3Rc>tZ1%7Ai^NRS(CO|tK52_tTjR== zz+#IQv(8CYdGB|V)f97D;`;n|(v38YWZ3J$NZ5m`jjZ50BL3qj7EPL{cZOMplvb#G zF8S8j`0Q-t4~H*OwuFCw;%(8f^nw)~J*Q-vs`5heH3*b*tbh-gNNHJ@4HFd(ie0K( zP#V{@&%ny$s|EExgg$LI&MPqv^VIM0XlMv2?x%JT3`k)e8jG{YSrMTJoDSbKZ6*X!oIGh7@addGef* zKo-5LVAYf#X6Fp-i0rii;QL!HkaGq-Y>h6g8wX={Y(;kUC8I z3mHOs(R@4vJ%lK#i8NlV^*9MxJz|n(1I0H!_l|{S&;JtpDX%oJ^wFkviZ*VL>#`fo zIZ`L5={N{yzgka2Uk?5Dp7{3nc;N3&eVav5_JQoH-nJJ)~QNaJO? zsR)?fpK}(j0#+eFDaG@uUVGwgWnE-dTaN}DEqzh&QCoo%!z%>5O=mtUL_gM>s5UxY z6sXW#6~cs+TQUSzpZc5~iFFXtk%+rV+`%-5lPMxMeFL!+w|R`ypwbInx#ZVfnlhF0 z#E4JPeD{Sbi;_(zcEgyiV44SF!(y4kE7-_V-;3!W9kVewXK1RTVtK_|CRo+G*+Xvo zz)34>(s1bFN#tJ*iu3J4Qx|z+wQ-%Fu^9Z7GPs9u1ut~iWxwRy z!5#rx&J}I5-s_KUnnF*rt*(_YvP37#i|Ltn6Z|;L6^3`#<5hVzvB04;}vd z()GaIe&xPv*e`GQ6RtzKmc_P=(JAO2toJ_5`kI0_jYob&+b*wuvbRz1q>Lfr$EDuK z45eI{UWR*nc~rE=S-n-n)RPlb-GxqPOWE^Gt%`D;^EM01EIcrC;4G zK^K1=CzCX9f`vhq^|jg+Tw5uk_ILl#Y3Mq2RWoD1e*6Z{B$@(Ir#&_aFTHeoBVSyj zrL=BmQkt?pFvKOKO7Ud&o9ev^JJ_Vou3MHv0r!eIsn?1oX``g>fdhubBz^_Lt~=U> zudWE+ab%fktuwVp)*Tu+cCbE!(NpOEuA0t{5P1SH5E*FzJU}NmI{qF$XjGH#mFZ%>H z&zP^MmhxDPnDQ${$ zYlG9*$duI_%}JU~Bs2jG;(myePm0i+cMnP;oAqY07`THq9|bJ`uzb<{t zC*fYp;TO{tUu<+pt^=0+O`L)N%*6!B7#pd7XUOMLrlgKq80LxSMIJfB79(2%aM&XZ>l<7aRj!M$#-7iTl{S1)*lZ zeoliS+g7-wJEliZ;I@yniP2#n`qhvz29(4oj`<9LD`}Pe68JPEo<)(gY9ie+0XkV= zXB=ipVUUzCT$opZ!%mPrv~TRCm--*put>vnshe#?P}pBXpo-*oy*k3DK^G;5QIcD) zGZgxCg#|7Xxmy%gboROm+&e5B7J3gEtVQeK=PVbelZenN%e8B2_-J8G*uRp?xtY3< z;=)4O<;o3n+DX#B7Z)gv*_paf=9TLCQ!l^)WQC~36r}d=o;5WQrk*^#bPc9>X>!C4 z15*Cut-`%*U{dy^LUg6cc8oA+vK3ZN6tQ+HyP5$E9GuOh3z)PIlCu!;@9xJ*vnCmcb%ddhoH9Vnyc7v z!;z$-lyOV@F$A1x4ZW{%vP}rV8`jgaQ)nTZ$b>%8D{l;SQ{Z|dhxym~QW&hBg+gJw zYq*KIjKCVoNzKZeAIEuZeQhnv@*;f6Z{*GU%7-2_mvsx6N%<$58}q1!_R(H3`B4;9CESYz-lTXH=8hB*x>8I%Sxnp2E#Dx7UC_Pvwy=KQK?T^ zn&Q$+e*BM=$YX9WaFeQrQ!`VLvPJDsQfe4Ze${6la>yZ`qbGU9U10c(42bf}xcB|b zf5(Yb2W4?;nT9`WR5`0dmnMP8-$f;;qnN0%kLMy6zSE_+p!Bv9%X>Qj&79S?c5pI+7u zff!ll9b0gRZKi4le-1aQZVKlXW;&WGs~P?nI2A?Eu~^huMb1=J{wbt(EZgNx_sTbU zFWioMl<1Q!_;*pL2=(+3M$H}J4BsKfAdZIu%tKuCYRsI|G^Q$wCU&zy%lwX26KUwO z%ulkVObh=u*om=y>$c>Pp-3J@Qdwk(_ddJG{)RWDUO?HdY>h?~^xOq4o3b(ITFQefONYCTN zuDsAH*#wYK3kH2NfUCHSH6f;d@8+2SPRup0{J25Y<56#9zsR*vI{+Mdc*Bc4IE(=2 zIzm&iLmHZG@dtu5t#|m#*#Vb34xjR! z$>nYDV*({AuMQ8T3@X8mA4|0b9>|A1X!9SXtc`6V`ps>Gd2OrAWs}qdN9<6ChUk+* z*4D1S1&`^P?!KPVg{wRMmO0{*SEJ!ZTOo7(pdT&|FMrzKG3JrAs=gy*n^$~>qFfju z)=;CFBg--XI65b+g@=w9j*Oasp;Vf}bSpQ*VwT>8ZQLT4TD%-mB&_>ceqqcGu$gqi z1V(OKyh==z|W2J45-!XCzA^gq+XJ?G6=po0-PusGWVygPBXMASKU;3w=mHLZ#mY}9E9x% zhZvx*B9#=%I|;%Ox$Tf{lmH31%cpZ+oNTYz`nHv`$VT>i?d9xsb8w)!Yi0NKNHeXDx{GFe_TQC{);-pbMo9X9t zmPcZh#rjjXO3Q3QO;_bdoyr=xDA6_PD_Zt$X=*n-=by%{Y>wZbwIB`8v9$u;A@P{M zrG8Q}Pv>TZeQN;y;1JtB8ZmQhJlTzjs1+-ZihW6K0a2xTA2=^n?+WAy*f)6uI0^O{ zA8-3DR`;Nmhiwd5m`1EN-_M(`TazMK!r2bTkg{j_ph9Oc_!Z3~AllnuPp7|E|E&dKF!0X(Zu;F7bXwRh)uGXcP#d(1Aljn6_ zC|2v6^+^o5=^Kw6f^=F=P-kZcj!&=@h-El00c|7wwb97)==R7>o8ZqUCY}{(CnPm%kR|y1A5eTs+bCMBhF1 z`ct)ZXscHyE3e!xzHGKIWl}TOzuWqmP)y^gT7M9O!h-wfv0x1{EYDKrwWuc7!J#AcyNHyn*C3Pb6*s z1pHezzSOW_%!zC3A@%V=!Ffwy@#C!5I<64GX<1-VR??amE*o-0^}NYH%_>2j>HOJ0Zcg~O{GO*xAAGpf0xRK7r< z01eo6xi~eahJHBU^jlN#a=llxcE*<*@^TM7>X9*OD1MGT#wXF?*%3`A1|NoIwvl=% z+{2lg(v&X0I+T$YNIK}nyB2AJkFQD`CB*gWDma3i>HgeCN^%$6$rSO@jxCy)DBgbT z7VEp;8h=)N9j7Vmu!W@Rk_d+3|92>f{cNo@b8zlX|{O#yezI@c8y_>-4b4g378 z^FrwZaDM)XtAJ(8IhjERT$+-E%7#{mS|x;jo+}mXJgS48Fws|6wRj5AN@A2YGOkMw zKUr^4%JDy~Z&ma?3i|72*koEg?@K{gs7$n@?H2@{|16pyApFYXQQJn;8X!V^LmmQ6-O&>nowzBoOHISp;QiG}BiO$1 zitHg5g#rC9=8b*E@W5!DXhBboHx32 zUbTa%j~Y+Tce%YO+cl3XFYi^kt(Xob>EAY1W^1p_o6%cl4ZiDYX=6z23{5!1YKr0@ zJj!eHae=;58i_P=-MguGI#-hB#DvPBX*+Md4MJ7bk(jg za<1VS@CMk+`}O(zX)Zc#2pp>&%jMCwoG6&}%egL7*7bZUw9(pXMkuMyM)o21k zSU!V2+Jq;8`U@?|Os`~quN8;QR~hK&9Bgvgem<{Rb^Bo{seLM?O~E3k^c_|FQ=7UJ zoWj1Q?_cBTK(@%oqcX>|?I724y1~kLD>ht1jKg~&!g*1ea6_fs(`NQL`o1ACHw?K5 zCH}~j(6U48Z`g^w`m6Z7^D$l@QzIm;!rrz|~^o&|x>t}M#JpPCw zg4US}`R_ZKo}fol%V8I{{criDBowTyc+PS_NR_kPp1wCpVyvsV!gR?C*erw)A9qlR zW86(%`WYv}Q?n|Yp{IU+d&kYqFQk<2QJ_-tN2qwE*NR=D#~AnykH;&;_T;tg7DUpo zA|hUkiQcOv3vPH^esFyVeyGUjFUZAH&N8~f$UGSFkTKw@TXfTFWgDC+Q2A7$cKTg@ zaIl}{WSaNNQ?;G=1$7&iD7}in;!LUHo*>HP)M}GUK+A7yI_&9Kch;)ko6$Calkq#? zL~B|0j~X4c(Wte5MgFrVB^Yxu%c{);MI+Wr$|ur6Jn2j1m!VhaHVd>=^_qF)()&T* zEehi#uFPpmj=*(DU|YVdpPBWh`}>0X*`D1KU_;5^xc>v>|1X076rp4h__;rfoccM# zf0#T*>GTnOc1U%=C;pi8TyhU!^F$0hZ8{w|(fcauQq zpjQhE_47VJtD5Xt6=j90d>UnH{l}`bLRD`pPlD@305rgtWhV3!dO9*`YE#3$jDjn~ z#e_C8P?%cfo{(>6`Cl4mGi+Q4 z1wHlJ)~^hRt6GDhH7UUH68g(=M*0poGe7XEQZU9&=(MdFV{vwmdkU4YXhRPvL zBlfz-Ud?=|xR&WCm0M1*kd*XvDwCAPlbO(yb1aU?Uw$qIa~9@>+?N0Ugq^=pqMSj$J@IR_>H zhd=jQFrWNae9wzAwWXjQF}qUnXN|wdv5n{NcFQHdxpegTfwXS;o%fx4)%5PTah!+= zWl(F=@RJ|#ykh&k({J0+!f4jEd(?F>p7Tr8I3Da#Z-h1r)BYiIS=*k4F)USglX)9j z%Nml~OyZD8joN>P7tM$dcG(MMr438iTQJ&P6^acO&J9&)wsKm0Uq1pzgQ;M!vzh~d z{U?k<_;=TtQbm-;EyjOj7J>n)@QFhu?=~| zmLN6M(R~^^3D}gDs66L`AY@ut?Qm*cFavt_Cc1bOF^Sq9Y0F41cHFh=mV{7y9X)E$CJ(`3&xp~CoG1=aW26M6`%Ke;= ztKw~I{lDB#heh4=3uJ~L7|2=kk`(sM9(F&U>uts3FFwyJ>f_4zU|s8wPc8A>M}KwT z4JiA1+D-Xua{uTV{ozqP*Rn z@r6s@Sn)J{;3AXB=*TnDRj)3DJ9h3(+egj%&GQL)WsbmJh;yBSpH@Zo*G>p-U&?p! z+uAwj#Gs&ped7Db347yG!IlM@%kAGCQXM$L_j698s=S~Xk;Wrb)~fu6F@z0Iz;f|Q zz~qn3r2o+m|HoiFW=Mp*wg{x)U^|4o&@oTH_rD@n|C~N1?v=3-Ic56){f$DIoBx=> z{!cada3th^)JOi;M~S)=8!N{j^p)BA$lQZ3O5BG4_{B=byJ-$~Rtn2FN1>p?804&tnor4N77zxDH8RxeOaPCg2JsnFerZI8w=eEpcRa#K!O z;i{VxIM>W>S^hj;ZhB%m;6|G5e^9ICDqA!;a_|CEe@Dk?AXDJ#jQ&H%&L@9lYoQVS zQ5fRWL1SRqRs8Kifc8w!bHi`W8hiQtg5Z+ZAWd$&yo$@J+5>BO1Q^IV|vO5TA$m1Deqz(ZT*yEdtKN8n*t|usVYaW46j>9QQZetzQ|9GdqjQ& zmAG`GJ`u@r7^3y>Se#-bKURTs{%OqC`EjU0RbJSTaV?z<9hRr02|y~E?Gy5x#>0p5 z^TMU?Opv8fp3^8oM>butj8T;&#&o>wgJv}wG@SSga+VHH&T`NNfM-BShZ_GYN z1Ww4->!tJxYBHl=eqGu*A!kn=fVpX{U~xZMc(mJcZb|+`Lf-=x|H~h-28z`=(2%JX z#uz{9#+%twV(J98$C)`5`FI8*)~*uVB&P<@o5=et_cP?I^vVEjF^w$;M-snjXNKES zq;=4vZrP<|>vw+ic_ru`yGX*fq06mRn4Djj)@?_MzO1&@*}ujkV{nLWxQExeyPYsK zD}QKvEAj48JUhbV-zk9zU}ZV6QAA4u2)~97ep&Y9`+*!7>XfYS3(fF<%DG9YTFc5} z$MmDqYXI(i!I7|M?QB$4ymQCR(a$ubbfqTJZuRyTE@DS~$c)Mq$_c`FI=`$-m0hNx zM+t8|qHh~`hg><=G*0MQrJ=`9pw!BGp>cSk6*ZO8saKx%2_wqHDCAv zt<`j$+a95$Rc@_dWPazK+p)rGM$6|a=*;?J?!$9KO8K9(ok1hGvp4L-NACw7oBTUO z+f`;KU?+@v#NDQgwOo1As!&7E_wP&Lt8AdGDk<18R#u#{a7*IT>L%-5y5tP??EFjaGc4`cmDoo=&6 zya;}vEl8OLVh-5;uj)i3eNlShoh3iBE3e=FHdkgc_P=vrr6h& zEL~MrCZwwv+9rJ9G-nw^Mecu*_&8-886`~3qqI;#N7Ao6C)x#B303*sV33#52{Fgj znn+Y)A~kjtuB*w}AX}W16=KJzRH;Eo_Z(F)9pF7khNA8o%Gi6lO{ed1M%*UA2UTp_ zckzaJe3nI|4bkUQA$4sE|~NCiuj)acXefCc#F*Mo8)j%!c||ITFPF3e;z^#*J4 z)_KdNSoO8+oN$%afr+WWgOHe9#RZwxOI9;~&S=}Q<99{=3PGkT)-@f-K&kisrw6Po zhZhS2bVgdATfBwYdfxj!T3-rNXa%1Pf$;fDYmGlm!%i@g6YP7OqXzWq#OX;B#WgFi zB}~-Wr4NVjwL1TqJHF!G-Fy4xSuNdmP}_&ZNN_zN@DP&|n|w|LLsj6+3Qg)zOi-2m zK;*K7fes_dJT1=%2O*M5^2HTB&&l|1v|2It%#Sh(RK`Q)YL$|DTv;}XMiPq^oipIps z0m`|*<{vD{_9_ri{rH$^d5|H7r(R@C+;&;){rZ4duLK^O^_|mzPG>KdnfDqo8CEXr z-nnWT>XSBl(JQpL$4{K_?9z!$0IZSDp1GEH0OETTk@DSJDH_-qziHj)@AO9-1tj-% zDqIg~Tv+QA9qm;W1YaM@Jm#~M6Tr!LF;NBw1lZ+|N@q~?-Wt(ddRT9V33(QSs4%Ur zVUTcKm#7Tf9;kLj6U@uvie6W1ARJwrgib&NKBO*CMb^f7JL|><%NJMtnPSi%zOCRI zB{kq5YZuANd$N1aJ`#3WZ|l%n$k)N8k!iO=tJ|?|2m8zJ&*t{{C?C$krDYpLIuzLn z>u7|vCmnL3I@FCmr|zGXNjLH3#TSOkr85~{rMyU<@dF#`QT3vgD+L83nugF`v z{>4!f&~u6<&P4T-hDmrmG?*UNU2UJrMlSp)gm9k6_|=)JN?;xu{8G<&KatGmSZMXIJH;$oq-e*ngLL-jLRR@PEdWmCLDslFO&)FD&M& zMnoI)S5jTbL0{v^yR7YS{TF!jC3MywK&TbBSM_^y0#c`L0I7X9>;*eCQ;If6+zxuA%F?j4D->#8z-o)i}?px>XYrI1(>!(z`c12ruw&J}M zfGw(nmc(iCEN50xAMf zyvZ5mWxKagt2b{Q;A}onPKrYb+ZU6b*%ks^RV)>!%Qxv!wQU;bH;uANeZSW*)eeq% zG^E{vH0KexSl?aQd5fP6EECY524+QBvTt~m(}gikD2ql!%4ONs8dos;w|4?VdF`vvpV$|*?Bv637rROD$-}%Ez|8@`^K$MF)BxnO za_;Qc8_#@i+vuieE-AQ3RV)hs4f;e11;;{%A;mD)KnzC7fa9l!7k3N^ZKHmY#A7p% zzVcNcjWx@UI7F0^TCnPb7PyiJyqTEaxm=9Wgsj>GUOu1m@1*fLc~?QURpcvoI96D3fN<+6{`yc^;{h7 zqG~mvSrX+1Zrzw*{~&#ZKW{G>ja`_c?v--inzLmRKU>Ztlp1kJAX{7wwR{;re8&@e zV&_@*$XnTOu{0B0^WTABi!F?d%?%PGyMn6^W!cP81%GcNZFtG4=b2JmpLoBPq??!` zv8yY65;`fuG5ei>t(%A@MW#g3hwti9YjZ7h7KGW8{LTG{;E{M+ThwhDzN)n?s&5qC zC_8OGZS$>lsfwe6o7`3G!L+(Cu_mM0=UiW&3W^^MOORjQ7NQ;)J1-39sHTD+CJ1iK zC(UpvuhR!SPG9-W-8SwKzpR0nxgUnd^ zKzcvMREsM}QzZ0-zp(+3m2^T}#MV!AhV<@d;zZ@EK|;^1W{!Z9P7_)F4~-XZL;XmElwGy#mwVVif$A=i2VY4iI=L>07$C9?T(x!sun= zFUP95%B9|_r{slt=Sob-os;`2k28fJ14#NIx0a2_Kw8n{K+K~N#m=VoNM`RVe3vfxj8QKl~zc5{KT)GN(!|lC9a#5QwiFLO_QhS z2?^&-ORlQu(tt=SZ^MRG77%M)-~=tbUzJZ7FOdYK?Qgv=NtzO*+YanO|6x6X>`*a( zEEsb!)TQm$5B&Vk+iic5LWMb3A<-s?;63NM@&+eY2EZOSl{s4+8&N>Tv4z)#8o9>a zCe<@tx(6ac6J;_&6Z%Cx`rzm|A(mFxEo0?&yPD)#k<~gMv8r>Co3mM5Vf(ewxbU97 zqr}ie$j6zrw$8ybdp%Q-krvX)_j}jz=Xrn;S=nmV+f6}_$%UiJvkj{KMw{^Zt}5A= zxhy8yoqfxR|K@q%AfWx-YVDB_D@w7$wzhLoUhNt=Cn~r9o0|XM{5wWTks)R{yyZnX z1>9UD+YuaW()aKG?LV1_{im}>YdO{bXgOLkk}(HroByALBT{fT*<#8XVYfZ3HyLWV z<;#=;eA}Dj52l)1C(r1)X;6GHxwHSFDl$|K5}mdpZG|@1Vxupn+nZk_hvf7XF^Vr5 zf@&*$P2)O+CIIb#zYH&&enfe=QPbDXyEb}fM!@f_ZM^vu?kM+Ae1&L4L2UEB9y5tS z>(o~oMrzz&A6FUGFeS8B>5~mk~j=c)TtAJSI-Qj*p8RdGG`xdqjC84~Kc;CeM==A)77)Uc)91lioo z#i84R>iY0?8M?0R-TnOqI<=>IA z&Yng1)=(R=uOM+2uaSIh_X^8j>G`r^^@lHW)Gp@GuKh}DpebHT?ZRttrmo5|O<~_v zYk|(eIN(j7d)w-BibF`$8vnMKtR( z6)s|x`+uXk;-3-TxMw|pVpD!^>#rZ`hK8}tp&5Pg_l7=VDMJfD$QnU4QS)=65@+x- zdxe~{bkF#F)M zLdG6gh|R}NZ%xSvce~0QCcOOSVrt6MDS19JTKE1%sl~0#$Fb>n{LU4-MJ1)<6=Hzq z<%8oR!cW)8S;dH5dh$QWyubwDFWJ2MM!MP{wQr!q3kla0WAAXagbyB(50R1;$8Iz~ zpF3BVW;1a=*?q@B_fZTcGkrT?Q1*xEq?;TgAct9yuux4RBS_7v7#gSHXE4NtxcD;m z_#>l|o#8iR1>M8_QhV2u%N9*OX~G0Kue%=n(S| zf4~n9uCH0;Lk26aN~lptmeM6H@EC;d@3~bTe5ZncqrSkaQf}o~ukIHWg$3$HTW05v zD&fkVzmcmyGP~<^`kI*VOL&a4B+&c-TwZL>McIh>-o0*SA0YENxD%me?5vhi{usi(a-9Rrlu)O zA>SQsK7xAU9P@ke_nztBA^CN0f4?)oPqjbr=tFzfjtm>V3HO8(G7GQcNFJ1i z3MjSb$p8)LIqzD)`lz^juhrYK%K}8R3?C2%2YHxn-ahQX;2kTm?SmMpk=>!(>>D4- zzSSdsS7_t;;}dpI9aWv!BJY3CzF~JyK6$v#XBbZ#yNL3}<~^&Lc6;`o^z;gVL!|{0 zn%rlT#UMqwp%T|0efv&y4~j>k;h2Kp)vLgdR7w9Z2GnHo7wA_wz{o9A*=_w?H2B7; zCoUPK6qFB$xlO)gU+d!Fflm}0%??p5PKfSZj}fH!L)a&5$(`lI-9STrp7q>iOZ5ME zVLETI82SgaPbYVB&6sxSYhbCCj#3R;A_@9;PM#;Q0C>ELOZGwEVfn5k{DR_Qml>Z; zjF{m@I1)FBt56R~`t|8`jX_czPdroKIZaGb%|im)!O6j~e8o2_piMN|RYFhx^|xbBHN%d1z_p<@VB4lAO9uKn`r33=p)s;z zqHLsP}*{R(! zZveOE2(`Uchy}$7=IRS^jfMYYiVaJT1qb;9PpvK^xN!blS zqXJiWH)niOsWsPGesAz=mfa%7zusE)_nyz4tWf36^*Jo;Ho1rXrDb>4#r}U@02zIl zQB0tM!6U9L{YOL0J2juhe89JulrrIUf+SyE951FVkCRc<2a~V(L3P7i<(4Yj_h}BQ zTs1`g&X~Y&p9z>jnybqU=|zXpO@M!9Dw+og3kUFDCJj**A_4uyozKJ_zIw!Lgf@i~ zk&n|QE;4@zqK2y!a&#jiZqr7N(`!4O!;`*({z*x8EbTh3=*(^;j9&2~uy;Pm0|?pu}XTJeq14-BUX#5`)Y*m*{_X4YfNv z|HPCn5J}3kk4a^%glEo5Rvn#@&|K%#n}O%~9@cw0!%PmUljA+l#}kM$BhQqG`Eib7 z)QY`Sa!Hz&sk_nhV`Th%<>_MO{M_x3clAlcZ0I6#2}8r z_AAAd?cDN;)cT&63>H6=>gFdook`trG<>Oq7yt%!kxU5QNCA;A@hk>}2}Cl8A~r90 zd`po-CO<=P10t1hw&-&YYkhUe%1L(EwHY$QtF5K%lIfsHxD4qT5;h3Tn*GUF)kE5G z-8k7I{()?=ep2kg0VnbdGBYdH-(ly3{0gBZ8GIFhcanK;GX3F^1N3)Dt--D>xB8=p z(pSa?u9A-ds2AQ!E@^F3k{EEG&Zov(LUGHq{U>EXzcf8O9<|>`9F&BVc62)$CvWPN zmnQ6crdU$GUKz~`UbJotoBcn*gm+6f9vrF zn8I-(V2W!b3$V`aS8OtLS|3u%OkHe5uWO)KHvV{SmY8??$PTkJRJNLUamqf(&>WGV z-$%DuDn!np2U*7M#UmcGM2+hc=6AK%m>Wt0-*KjgU*yJesE#^u#*{Ul52cAZ!8O-{pzgqYZ==*Q=g&R}USQ)TJ; z`8&eYS|PwdK4EIAJIQb%W`%-A=dzvMoZ7%P`N_3n58~d3;x$i4o$VcA{boxZ+GYNS z(iJlcB@iWg?L}0l)AjA?U)(Nr)`%6meHyG;rE2kM=hlakN>jzL)w2y&SQV-$tsTBYMfxkB(rG&N zm^VEjovGNpHG{AH@^?nhs?u7tRCfjcTFc+J$5Uq=%h;~VJCfL3!IqTEC?!v(uittH zH_lBf5&DoOM$)Lf^govF%EZtd>|gVaiOD4at|I=6_x#Ra1`dySWqzVQ(AcnJy8R(h z)kPxu8OH#4&sP8dlUSq=K78`f3@&vqCc^fq2hgnlMh*95t_WTnjS(J*?OHwxIb)Hq z?_l~6_RBM6_JZBA{UeGln=I+xitX-(zdKFp>c!gb(GGZljq(k$`^>8C&N{a8;@bgu*>D_g%+_QtFdZz8AqgXNi8OcjcwQG8A7n&5dk6iiV|0Eq zDKgA}6Zr}0q_g&EbqF5eSL#;tL`10H4y%-Ll~=RO!t6S5_$F?pZHLS;y>hnuA;;`v z;{>1iaXP`$H{r0Sg|RpJChE6@sHvWr>cWMaf(|(r4r(Y_K6wM_f8~Dq0;;9$U#qAj z7Dr3T>Jqo^*6%IRNq8TnjlXGG-P84dH4jpcZ0H{y7OciB&kaqx)0X~WtPZdnPhJ^^ zOZJqxWq!=-;=FDMR1f>8zB1ExuiMimtwAyCj3tq!{J4|0ze-g(01fPWbQG8b_UQWV zw${1qj>0rRuM0Afc}IeK{I4Ih!)y*F46Ip5oe*0<`qZyFS{jRsn9DN(8^h$Z+EZ5z z+iQhFKWiep{Qt1`-ce0-Yab}ppddkM79gPs0s>+|1riXHDn$@PX^J#K0g)aOnu7Eu zy@?`C1r#Y#LJ{d5rAnwGkPvD@2ubdM`kr&%^WATqd;YxZTeB8qWwK}X%zoaN$}376wVZyZ(9j>1Q$FIA(CJj_WTrP1@_K@j%v6cCfB!W-ZM0w@3f^w6 zcbz+lZwu3u)98qxuk#MrdSp@AWMsxwUW%l0cz%gZ$Ich7_gtrpojskG!?82*2J@I% z)Lh}#QIUTUz4O{9N|q!JQgJ++HhA+jp)7$EOEkm zdHa1Oh3)ewhvD_m+bVvm-+O$Z2NI8@uNI{{>+Y9mo-~WU+U=|V7P)-wLt#?MOo%gX zMnV@QQ;yWcpRAKJ6a1a01k@LtW)KpJO|(#<=CQKGK4N2%$=Hqj>u+;}w&X~prjQlt z`#o(Gt$~o576U`y8=Ys8Vb6mNr!M|RXHE;oNS|S?I?&ksm|c`w0)(kqA59kw?g~%C zLi5`9sl)4G>0u&sD%@|ICrO%PhCrGQ^P!uiPLI`zs<|0Si}FV5FGRwuLahSjBo{mN zd}c=~20!OcHF)h6SCt3S2W=w7Anot?9{aku{k$|7DR2ZMntSZwPu*vcSCua0$rZe# z;87KlI?Wk|fp^U%n5QO@3Wd;!?bpHLETHK4flCbHV&CV$8tSAmka>|V*T*rdK)*&^ zPZ4Hr-WR$%gdfg>>Xyi2b|ovIlUAg^M7$NCTa6Z07vw7&n8kyvgp`4!|CKcQW}_ zI_lG>7f*-*$5r^9pB>)dJ*>w1@uy*tz;e?MlZIC*TPc}!7xYICb{)^>;Z0v2J79w; z2Bb_NcX^TOy-Rn*v&M+~#)<`eLN3=fa2att)q0gtFQZ>K{`Hs>o$mIJJj`s+4mcGR zb$HWNp@f}S{+8*6>Yejag8AEp>*-c^6CMouJ~cK2OQvBS^h`|j@IrT}^O(7cFgX)} z^GdYWOKKx7aK8tZ`{g4{E3ekBFUWg;G9D){H9Jce%^%$I*BjQF^DfM1;?)8+af~sv zMTy|^ac`(B-7JjmvPGAX180B=tEZiQB9ANbY&Qw6mL(jv$Lb>(PmD;c*xr8Faigd9 z?H3HGviPQolMXm8#izD7bLUNRIsK;Mg%?G}?Oh$zp9(cA-Ff%7YHr-Rajp`)k55sq zz_*SZCf|ire(T!?apHM1W&t+jC|`HmRJUWDzxdv+n9t#)t3FgYwn(;({LT_7|sjD2kcq2@k z5ox@1=%I%NV6s(~tVrrGPcb(s6vj(HmMy|b0m@pPT5&^6+|&2fy(y2ZHrDZ|A|jl2 z)sI3cE=(2Yf=GTqVr`gf?QHFeUdxXcBFU0f(Y8@zKu*$NeK~L1jamc%-WlQBJW1uu zCTlOZQngf$xR2g1xv9Rgt&Mm3wm857gHI)^e$UpLO-o1%EIAos<;GO&Od^OCEXndm zI41ZR>^pL9@uI1%@=E5DLzU$8{GX%rggy6|9AB(fg|>zewG5&{gW`Xe>X-q(8Hx!yq$hPG5q! zM_IZ*C45{b@z#Z$ctshXAc|Gek>1sSjaVWkF9(RIG2@VmnTGG`9Ll%N_C*f^b{*3d zH}SNwh3{RK;f*`sWB5h6>;@+DF&n__4U?;U)KC(2AYh_ObiBgz74=2MTbCQJaelf5 zkAhR>%Yte(LEJMIO?A2#w)LOT-5Fbcm*th`V>&AHkD{q;gmBEhTr2T?9WAim-->6j z+;hgXPuBK(FL`}a%YF=nHZ-stw*8fK8L$>kXWJDyxClMactl@{UcWA&*;c9YtQT`d z+NNA$)^(L(GiR5SrcKY^5b((tNyInbU)cgxM1$6kx_s`Aly$e=w^|Bc<0@li{w_U( zH>S0!i&spjDec;MtA|^FJ$_#;rI8qnx(b++^hIz$4&U1^KYpZa->V8?LUtYP)w%(b zB7*IPEsNs15%s0&MFQ4Y`iGv%IlCNla&uy0kw1PxevbMn{RST9yL_VI2`!S%7b$t>%gYI$+iz)%R> zsLqUesvZf?4dbj-?s zTDETVLjTJR4t6v3Vd}-z0fB0&dRO(rEq9{>%@-?Rq}6ti9RJqfobHO=O64>6!4Hqs ze201_5~C~4ue6QU>b<_~NIU&q+HgM8_{Kamv;uc3ymrm_q8rNZKw}ecVUI{J^d_q5 zgRXwlCA0MFqJ8Q65|`AT_=qz!)z6$@jvA|eBF>^sCmUzXv8>Qhi$7o2)&2IxwnhA< z-Y-Vc+2KVG>&#&fwK>$n2rnJJ*SPTv3MxcY4H%->(^2R48<}1xi@aEKsd|Qq=9;@R zG2PVgJF8I2i#xfnf>Hfi)3;i~mcscuV#k0Gh6U^N*PAG`< zYs6ie2=g7%O4b0S!U?rvjW%9z zYqSIuOsxi4U1v+{ZsC^2?LTphh0cWC%?`h3{ z(z*Ucekkpxz1O)EjeSYaW<2aA z{3f(7Z`TmvGsF%xZ$p)#j-`Tck&dZf6SVWWv%DJ(>JV?C>G{q*-#ud z$dyUpTnuubH{7yCC8(Fo@;}lQd&*Qbx+%!9tbH{79*769+5*Ud+vI0!_7cta#FT+D zBR{}Od=g=)#e8Q$W*-GE&pdU)UC*FN!E(xi6)AF(C1jGb3>^{i*nR$#x!=~d#KAX{ z$PK; zflF2JFKqJ$DkTBI_s2YZ#I3cN4m`?$7E7N45>3bh2>Q|ZHBSH|-c39AyDi`Sb-BXE zwg4yq&IeyZbzh}jDYc{X1Xv;Q^P7%(sX)jPmF(@g8F>Zd67xpEr&5>{(x;g#ckPh&NoAP>#>`~$n1SO zw|`8)JGbRf_Ug=2-{UQq!`Lm*zlO|(B zGQDq@_f4{8;DL?K7sot%lm=g(*(NbGs7h)%1=l|2J7?(^ar7<&u1R#$F+WYgEVZaB zph8R-mphC_DBL8d}S?8tQarMKhE}Foo;9+&0K*HTV zDQo_+tZYoOdr*uiSULp{<@ntEhBNC?w4PMzsSo_^oo_wRv4q$SWYkt@(UEFd?KSnP zdGJC1(vZnGg28>Pu(|E=M}QRe+gOH7KdqJFnL7vM+BhY6&ooAmn)9CPWQ@L8rf!jxRrF}C$(W4WHzRs zfgF$ZX?M8oU;<(Rr>5b_16LWYbEs9}Gpj=~GL7S}$j&EUi7o)!r1sW~)9yc~=JBvS z#1(-Jugm{*Eo$#3yne*eL@P+~LqGyoPnRJpPJVg^tpf^e%DHJSJGa-fp%HE&+XI-x z(CvnKOwpC-y6oo@+7icPRCgMn;R%Z&U_Clt(?B633biG7=WIS|IKv46p)*LKo+6bsKfZB$1E}=P_h=8 z3|H$ZV7YfP2QvmX=PRh=QI>J*eXxinWYrJWATjK+w!t|yI;xl@C7+uun)_V6ggT%2 zqHj+AgKI5pX@PMIGct8{9(te9qNHutet-({p=@T09+^hjOO9Vx-tcV;x8Xc{VW*RY z@RZoe^^bx*DST-4{DnYz$lF4~--tl-QMZ;-X<=3h$3zNR<+XYOcL) z?)MI6DLK_4kM7-w7e`L4wz}Q_x{jZ{MT1S79C`dGf^G0Jq-A;FZDP6Cn_Q?9w%8(4 zqA+j6^u?AyVlU(>&AsV(R*^P~lc3^+ZqW2BIth=wlZ9*@CHfF7()MkN&*7782re*c z(~^l1mJ}IXr!U`S_U?vP#+&pd)ctO#iT4)geh%kzUQsuMktgtY6U1}6sktQvvR4t1 z2ArwdvmiU#kziSEq+!DP)U&_6Mg7$ft#3T1b*-*9+#VROP-{}m(jPKeD|%hjG@~^ zNsJ&uKmSk*2EP~J>kL3C_?=E^bo=_Zl`+U*VR!h|$KTQ(FyC>i^wp7n;I>uk?)&-} z`lrF$Wj34nCL7y`mtZA*kT>4E&$orc7QGM76bxQ^Tr~l5AKjn6SbqrW`VKu22)NUXc|ok+54NV)^{AE_!N(=1AAeN zeA7aN>OI8gCF8x>NX?j0-V>x&FFS==LZ zY^w6|*O7t&h7c!`E=YIIL+~Uw`O)wp$W6U#eBg%Ln4OKZQSpEV{istKm#z9%bJ!pE zqKMZ|8yJ%{0_!H8JIt=$`5+>a|M_daNARVa3Ax$QJ1=#Z!#XA z>>U^u+HJHo>0J=ET!t%YTD-w-7y_xxVn_I+(P9T|^uM2K51u5%-@Ej~Z`S%eLnDTV z`#Z3Kwg4;*Qx?ti>Z{_aI6rnDjNTI=5+JPq<5VG&Z_p_tUK(QsBG=DL``J`V&Zp(~ z`Z9sM#ZE4%_XjGC+v!E`43Va*vkJyUtPa)nl(}(Oph86)8&im?>=(-(KDrIK<{U&@8HpkdJdfMc)G79@GY}DSN(TR z49_{V@aIM)k^}2oAV7*3LnQiomLKJ1xU6Qgt*7TvfWq6z0@d`snGejt7Uz13~Iu>OR(Dg;vA zaI~dpw_fQ?b@c5wKv$D9o_=;Te+KvO$G(=`k_~)r8Mo9HUQ1>SwySJdF9|R#GQLhb zL~hv%>{{E|4l}b{wAxycgxxgyzLb9b21U&88-DU{E*$u zSRvicLKMJucEMTtmvHc1M0n)qum|8DZ5v`sk>cnigs9(bV$1{D$bcD z-6O{$dzDO{zMZ9U{O02x-Gc6*v1h+~j{xcM0m>rggMp&57vd77rmxF&$JnX-YE1&{ z=K1b@MTh6A*8~PVOFJ?i^s9K0BVJ{Hsgri;c*nHNuUvZZ8J1Vp^`$yrGimn4a_l;$ zwTeut$^pH)EQ5~YcCTxnp;`9_Pza-{Z_~$xHE|>PUwHcO+`J|z=%2lk;#XV$^hE_c zClk5(@^WFuR{%PUv1H0-i=e9;{Tdxh*WBb+nD_b}u;SYL7!ZY-UL=ap!3**&Gw)q? zN!Z2Z-l-{;bT1%?OHdiwmIHMmHI&3uY8uW`zVO_lj7&t09Lv6H5fhx`*oW=hD?U;# zxH=i2DIa=k5w&>^>Qx04P-)L?I^DX|ujDMw;(T(N-g~)W>!p#9Z2tG=#jt&LKixtK zUE`0xp7r8ing@G451W5;Gml4U%_(XwMf0ydE(y*|La}H0xzbMrecluQvfUdk(r9~!0 zW1M-wjz!rlp}{9RWfqk(hy_d&dD8?(0ZYDDN3eH?Xh!;=uuo(p8T-QHt-cC{Fa*!J zN{!w=8aCVP0xJs&9HA!*GdD#N65->rB~~ugU*5+j87opgeR3a(*F?DX6@|NS}Bcp={^+}#1|Y~e9vC-dZV5a zbbVzW`p&P8EGL=>gtffCan{h5ATd2}{7o-YtS8PcNRh+j^c3XKMM2t1Ei-1U7u?D? z7X|{#pH(V>7~g7W8J#Ps#b%^wYFL=vC_JS6`GlU=t?gm$`^!N+YgFF%l)OBkMbroK ziIWzDw5rRsTLin$)iJ9R2GmPlwA#TiS95c*6ynN<`)!L!URhehexrTlPRdC4hb%(` zg^!^A6Q$5Wlb|@G+<~Op@$=tzw${j&fMp;L)E|%x`H_=#g$)Z|2T{a{rh7|szH%9e z`BKlhYDzQ^5Y@3*R1n6K)Xd*y=95m`=;9yO{hDvAN$&lIODVKa-ybKHTj_M`J~InA z0q9tbiglby>+ko0hi}&akfzC|lWRb^-;Qa`;zjqdM@J(Lm(b1{(9(LVfneFoRO#d4 zEni0-T+KhC6Ha&@Y{CJ5_rX#Hen&w~018e33HaY~QGLo5;aO}I|O^+$f1m;tR;lBfNx z+wPUGqAnh<-I>9^Ei}kF+I@cdICb2bW2|jU2RowkwZOs@!oL_W?~x@0ahekeoHx*! zpU(tE1FmF^jBUmi2Lx(`6|8*a$5rH~{6Z;@UK`h)I}^1-)v%9gcR$_lPv)sEamyJCV!Fc zde1deO@3INpZ8-zBq;F8cbQlul_K+TwJZ8-%UTq3PrMkPTYjON){T!nnbm1Ri{JIo z#<-9kx1G;U=e(GbIz2g9rtf}x241CS_#zOfJqp4F5maa_Yk{URnmbTK#OvG}y^5?X)S-Css#e>cVo2Pi+jppxyWEIb!FP-DPIX?8k!$;ymnoHHVk`tcfHM^TTW<*~Y{OJwUZ@ODf0 zBqK|v+J`gi6s2HXfL)eCzrdt`8;Vf;Q*T48iKq|+ZsZQWh7Q-xqupqCy)a!*8LYLw zbhP?9w_jDD4gnDfJ5e3@XGN3JbrGHU@$r2<>RgN3_Mkdd{98kOji5()~& zC$NPgQ*X0gdQR4PR(>}0{SsOg?xSadq6mY3+DCh2TP*AC*u(EKZV0bw1geifUck9P zkjHgp@9w+znHFAtnzt5WuwK=7Nlllyt7#8?B8!uqw%c{2Ymo--12GTWfKG?U|=; z@wc=bN_i+0SM~U+M1m+EVf=x~_xuMy5$O9IgQ~F_%oT6cfUv`XfymJdc3O08gifdJ z6sXnA^#r0|af_y2Jo#gh8+sD55+CPc;jM&96wd0i&=x4}m1&~HQDR-p>}9S*cL?;$ zC}_U+hA-y_eXZm6G#OLdB!*bhuFKS{7i=%*!4C{Bhwr>>?@mbC?*A}#{A`_yRIx`1 z8OXO>kUJ&!js}F(KcBK<%^ebuSguQB@@f~I^r3z)v>TD^8ZxxNy}wXQo}>_s#UQ@H zg|`;0_ER+tQ9G34ZEO+btD7@I!}qSQX(6ff0wqk*%xCPz=)o+`pXeys)mM5kKrmNw z-#Q8XGmA#bOu%a{*lm1>YiAtE;p0nkj&XvFpl#qOl-amOMi%xcL5-W#Qp!EDrb0F( z2Go8YeD76mk;Ys0nL7)jx%|`KC6=AY)09|6Wk5^BfLa-6>DTDIvuD@*jdx;3_K9dD z_ZT3eq?%Vt$}HEM+U86NK|ixL^tQ#k{r83>JQ4_f_3W0*)u(LA@$~LxcB5`5E9_vz z_G<9X{($KkKE`XoIMB|N@ybXUg;BXL>_*jFv^Q7632#nT&d16f1WP1|Y)0W#= z?R~N=>ZN`XcDVR)SAw1r>?LJDp#2m4-YcMITt;TWK=EmckyDVR%lB+m#l88`lj@<$ zFeoGj93fwV+WnNk2n)5bE4=B!L&=~5;d&5t`TPn{Po;qS?hdMhaEaT4%RoVNW1wwD zQoWaI6$%y>dfnw#YCdyELdK<7NGo_2v=*e|kI-KxK_%K7R`c`Sg!%;9Yn4{K`(2s0 zRokY6Zc0f6im!DAQFwcl+lI=3OHmr2`d7D^TqNEiWN^5YsaitU(|g zBaVU6^c&9Yg|~uLEM%8}+c1&>cC5Cbl~PqZ9M6;PosM`t)TVf7U(z!tVkCUMi{Cx* za|nTvDG+(*o5OB87KqZcqT-^7gu-KhdH%kL>gm;>Wawa^us$2loD~x8%9XcsAvv#M z(hY5D2g88ZC{PUe7>q@_V;}sp%|5wKwZ!pgYuXb^(%nUL@Kyg* zjb9E7!f~7Pr3(vY$r-p*(&<1EDU|aYvlHTQlyNnJJr(Omc zv)KZibdM0RAG273CG@t3l$q<8A}xa z^RqGEAsioZzK1T|**vaYglW0A@5&0#Sa`;m1&8x=7xEH$K_Lhf#~qxTRv7P)SEZ}y zPlsz*Xz7Fe+~qEMfVV=4rYHIUW-9~D9JKl57i#+;X>!sDAkSK};p#e#a_0@tZ3W8r zzor$nU1bji7t#LHw++6j(qT0cs4M;>U_-q04VKYz(Z zSm4Cnti7b~7@!QXn?+zv@V6XD1i>Qg>zHzoJk*~|POgX?JGU_6{nyKlk)U*;)(lxK&m zr(+8Zw|#hRgHGq#Yt{PA{z1qjS$EXYyKKly>NPr~1YS?2BQq*|gFum=d&+V|pXdQq zFSTdRDp^Wru!v79^VQ7LZSUW^=U-A)Ic1(+Cy)F20v)GiaFqx?me}gMCv5*gb+Z9- zos6-7qD-8e)bKfmcRO0wV>}DNpTVCQb1z?d(#_b18#7WAIJMzJLS7Gi<4{~CS$ ze*c0HkVJ>L{M}nGA2Fck`x4!U`K<*0%**2hSWQTX$4uplT>o6Y!(BfFt%(Rn&{X#> zcHj%+D%QzhV!Yb}VOKtbmG7UhcYZ?MITGDsBqbS7xeMbB_2hn|Gz+Nl9!^obcO#IR zWgGO*?U#O4dnq#|=rD`lhscRSId^^Ty*^9I|8UKfn1mLNN?fC=_xE>9G_QvJ$`iH9 z6Xu9+p7qNKJt(`^SD>Aaq13*-5%njh;5f_81C9G#-+XpI+Wgop#Y_x#OEHr0;!rk_ z8u}Cb+I_{ANA~s}H-@b4+Xj&U*?Nlv4eML&lj=z!h5u8Rls_}lf-~gsL}Xj-B(+)* zeYU89cp0rFGV77kxFmu$*?pnrCPk%3?Sx9B&hChS_kqf19jkT9ewN8$olDRS)Ti?! z8=!4fhYgUPN-~cQt8^rf(}iSUXfEZL^WAVRCIUT9-nFUi za*TKf%jX;e?r7UmR7TS^TXG7TaE3*6RTznY?e}?7VR4hH!h8p8Nd42!><^n|^9j`_ zk>*xjugCrU&53ies2#rF5L;Fz=c-i0@*6eeX919Zpq;7lCEY~3JsfRVg(*i4Dx~{y zgI_JSnsfKPhk@x$REE{~+!oY+aDt}&4;?6D+!aU2ziiHc_g##x1_ys|nKl!Z+lBW= z{dx0y=-OJivPj(PuYm(+kg&g&Skp2jD*nNuc-T&fG7<=PLGFd+$z7 zc|lEiOIiVtKM$*Xot!-%`fAralh1NH5aRg=5RC5<3nTB=&4hgxJuveQbbCuP>ZtL>}>WwEA_gSHOA%k|zVh){lo zN;Dr%J^c0QAK#VVR_i}wl)4lBD!0RSdv~4gkV9zlz<(9m3OTs8e1>|cqxJR#@w4gSz1b2)|Ksem~ZT?XH_ydM}<^teX5;H#^mqG*at zAhpRI4L<~)zO&A{Yw7x~j2{23=YEL}(M0gHgzxd!^koS!`{J^p!;Zyw3(NFI#c%@~ z(m%lf%k2spD~??QwVe^7CiE=ZifLco(Iq>SKUI$=1ExAFX70jqDg=R~$x3^kiIXP| zpr z51%prH&XLwNibys*yn~~s<~}(H1E~FB>mok?-KD>77baK-L_PGW`)H;VtT)RVZ%ay zh;LZBp}|$#C7aEhU*^d!2ge*|D7|?W#Jo99u!a5N9xh%6-QDKz!X@z2-KtrvL(f3C z1~=`0c}KI1GY7ltZ%(TyvN7a7gJd|+*}PKtAJvaIlaL%v^mSbVAoOdr2G&pLrp|W| zLlBlfKFLs2va^*E_-AT==@2!r0>K63G0NocgrD4qt%U!ti zEJ+4vxuE5Hj-`YmwC7r^cqagIR9qOr@yvcVp-z&76B(7qjSVwM1@7txC6U%xs=f|_ zBR-`y-7{=fcb0YlAVwj0w2D75ixRuDsV>U_bR7DE zEWoxn$#-Perr>Pb5Fq*>br(%UD42s~^oeQS9*ndBZUQxk0?vXmVEQw8H@u05tbGQr z=GRoVCpyqsGw@}h6j3>p**4@biZ|H_El3MSqvR@+MJeG_W~K-wrleix0kGE&%`$(` z1+J06i41)e9(e86Q>_k;W~)&+Iq42YKGHl{loY#jQ+F%22lMD21#>gdI`jtb-785A8&cz{Rnu#`tM>9pj88=6(LANL#dpPUyalsU9zhP z_7scqf-+P{tHD9PSW{b)U##9G-D&Rs z(N5xGWY~>huzj2i-I*bC;ssVHDs4WfdU69<1t|nS%2W?B@XiV)hV5A=KqpIeC{K7; zm`mbBE0vS1hlP7nRRA(gP(!t93>;{jnH`UK_Glql0`RC@O#jV@beIpg2>`3JGgr7N zDO4TQYXD>TAI!&l$2!g$uz3syE}#I=AJe@7A3ctXB$Nl?qLTGJexRcyS8IjPX=fh# zT9H5EC4*nTC`fe*DK4TPsVmyv?#81=AZ5Fhgt`^?N01YbGG3A`l`h!_0(y)J=w z!x`V}9^^ridQ1>mO~i|cEY&~$-!8NPcw7&?p{H{C`_)d~EMm@E^^U*?ZNWnELBO>F zuD3DvVQ~_fr_RK!QlGbzTgYGLPs5_QDeE`Ql%s6J+An#>DYj9iDZpiNyz5yx63ZJD z3~L+RD_WEl`Iy*Va)PJnne4&3ta^JUSu1E(bxknS3ji?^#C=0s)aHQw`DQ9*G-}|Y zi_{fl=(RX_nd2gSLvPoXqW%)bt)MT*hf$)CgiElPlaeoZ(i1ZUZ9B;(i_4HKAeAha za)8GvS}6#c^<3RI^vVaDt8)8&gaJ{|8X~~o4;ZzA)NIoA3-O3H>Sb@Nj2BZIxrc1| zJ}aw#PU+e!V||t3n$#=Z{{NKT1(|02 zpBQ-8|9=V&)Lv69m;s41J2p>lJa!}6V^pfoFYOcjAYdkk9vneO#h)g|5~GP=mLt9_ z(hiW^FR*EeADtRc(;SwC@sh}U#^$< zNkxvX_Uo(muN8;;WEPS(w72R1&HwK8R)${F#$2Il>$`}Y3XzEd$QVK??xo1g@gld6 zQzEIxN2okq?Fqy%Q$%qqu@H+XsTBmz#)9saCWqOq`?8S*u_0Vn;lUni3K9ntw1E9> zqUU3W974*Pa!OQ3xp840%|wsCCItm^9Rq)UZ*!pD>1e4gI>)lF_$TO}&hBwpz~#9B z4H5IB%~nX0yW<>L)ko#`Vm;>s0FddT%0n!i>ptzpo00v+AvnkwdX}IEeiz!ZwbMoj z!htZVu^=rdSYIXil;@+4E`d|NWgD#9%>VS{xe1_Yx$8=)RnLjV2@~+Ti_>Ysbr&T9 zoTFOBu)OarTakZ$%>o8&9ko-HfuYPVd)kqb*yBlusLE_fItg&juHjnWq@aoXcqa&NQc%;i$go zg_ggbTU=X_BfJRv=7E-Gj4YXm0Jji_5z*KFmA#b(hE)VKwBxg__Oo@j#t_BUR&FK& zngJF}j3;ywbAYQU`-5`UEKUnV9!{c@cA|Nfz^^EdOeDM{oZxA~MvcrGY%5Xf_()3R z{kbo*vblpDJ&uSKdkC%7oqdN-OaCO|{Jr$Ha|Q{(zotlx6vl6pQ!0C~mm+Ep{EE(m zm-tzcOU{gf`1$y{bUz35QnwH-^S5h49>>jtu~{XkM(VS9@CF$AuOr(P;8(M`(lg)n z_xFR|+Idi_vV^f2uXEzgRQ<&@77 zE~Kz3c?9W%8m5*|?vM?+c2WfKo;yHU)_NpoUynP8{!C+#7Y7)KXF%cEcD9H2tVY=h zWu(Zi@BlS`0hQXnMn1!5(otfm8mPzL1A1f_n0Iv%hZT7jNj>5`xqf`lIbB%~czo;y z4_^byfZdkX78F8pXb7ZEA7vT?1A^>DCrQ-|(0jHIZD`3mSCPW#=NFM~GWKHIo;nw} zF^_nbJrm)$dcwBbx|ri2yHTd0{fSFqAmxy;7gnbV&GF8E)x2km+afUy1J zDTqD?@jU^1PrbriScM$|t(_yK{&ns*nZ(!$#8^!N>PD9nFdh!#PV{&*%{y@{?KVW(e!KYgY&v5Y$(SUgV=b!G%?A$1b(AuoX{f31# z2;_;@cTzQ`p%p$0Sp!psHq`e92;z63u?-RmZfRcy`jPfFP&fvXnvJLUjvbQAMI_lw+b=+V3B7jE%ON%~;_f*zauVLyQ` z4$Seo8|vG_l+v3N1N6cct0dko=cmof!dgyCrbe|_YJPfy@wJr%f_e8aJ$C2&E|J!ucNcd94&JMOQRMl~&fF$(fFiSPUhwy_+k-Y3_biEBs!R~sm9|AU z%XJKzk+-JXk_e`8*mw_3-&acSF}vdUw?U+=IT@g_Eq$0PVsi2^O#Y{gZCOX0Rnn~b zs30ze@+z*L(W^L=80YLU{ihc|c$xaBwPwlg4@%yQ-jU*&_vC|xtSAVet>2?>FnB=^ zL7h&rtXCyaWC-Z?`?T<(D*&g({((IN$2eGaQzhh^UN3hb7&19$`y-`-Kyy% zYV4hXf&@a_tDC)~wBRqYdi5TyaKh|XFBX+#ez?1KN-XhX2K6e+y1K(7Nl`ySF($zC z%EwEgf7#x@O!a!&hyvK9=-JzYGHOoQLf3OW7p~-nav2=DqE;W4KdcPoj!TLY9&P&I zhN5!H!s~CS8L3@y^7rQ|?d4KYR#TJGSkHQHP0+LzBV*(IF3e@j2LKKF+Gel4@5ZJ$ ze^aJo;1Hi=8fV{Ou((zTqG#Ft!k>-_N(X#|l9?hxCcj6BR|ZohOK%g3Te$bWr(<}M zUf+$o-Svn#c7SBA@3%YZNqD{PnUjoTw<|M`etbg56vji}66E4LqjJ-^BZEH9EnKv7 z)iNp&Q)|;rQ{u$XGe9Q5b>NJPKlfo734%nwde~h^X>=8a5hqE7tq=u?zO6g>CXX{6 zW*)O1lk3P;e$6IvO)hp^#_e2}h6 z`)J=)I>^mr?h&%h)qNy#7Y090dB=X=zRLT@RIEjvM zh#acqg5HM}CeZo%E49k|mJF~2c|IIVbsloNdcdo0#bdt}zLYr3NOa7mgecVuSbQ|Jww(%-r4p^5KFR#oeRPTOj+c~|R+VfIdzR$^*x%A0Dyf_}(DY8^= zTBof~G}5!*M|**Hk1h5R#$kFaod=yl$r8^0!%o6N{k-oiaExzagfW7@4&bame?7Yu zXl;Z&L0nK&JMod?z9d=lN{<~if>Ty^jp!whqHU2wD5u5=p4DOf5sVC()>FtRMR+Cz z70pV-%w&v33MXeM*jx73&Eij^&y+HfoIsD1SkhQIe-qLlTH_LDKoxjK!*z<2JZOji zeiFLIxh8u-;3*cb8FkbsT-J2s!~6$X8Of8}v90Nr_^C+Y!ex&M-j>n=$)$0}!o z?(nJ&vq^Q&bNK76u$`L#Pk=@1i4DXFz5zFo_oWme&&;yXiM?a}MHOvY*lixj9H}XFeEwyZt$S;_% z_%BL7D3iMf*##F+``w+;5ZXuWbcb0Dbtp}^Z+19DyHWTUme)%vRsy0Amta_l$;6i~ zwGf3)uO7=U@v=Q&vL3D0v9`qdol;4Kw4lO-(htNM0RnDZ9&6VWaN{XzT5)^)ugt~7pu|$Gi5zvLx#z zNhHInrMe|OB>J!m2ISiTOs#=@i0Ze4Q`verFu01mv}`w1?vFh+@5VB0Hg3IDufD$$ zA5=T{dmT$~fxZ!dc-me4-T5|mz;-?k031nmcsM$zU$YD+)>YANMwhK0s8qzB!mG2> zr-l2nLAJ!{iGC#&HIUr{0Vy|ki7wqGAJuPI)n|vJ@+J?9YHK|^EPBwMSnXDUKo#a+L&ogDwGw}%cRadjpQ!paIp9~wmJrSL= zhD?yEs!p)?q<%+v&ovNcehkZGJq0OlSv1lVSNwh*R-ipep=|3hDKkQt=Q(-j68?;2 z$UGe!dzx_Mw|w|5OtR@l=x0QSkH++=q%bu(BPtNUvors!K8KO$37QWumrlnt5FUIX z`J29om^ZnQ1?h>L|AN8f(1<2^&e*f%+cF=@&h}LQggav|d1T-bL&$lQFGrm^`U=A@ zb@WFw1(<7Dg?O-}bo>7OM2>QH@F6Dr>l`a#juU@0S&Scp!Zl1zSU1a_0S-|X54 z69H&T#<~!+N_`L-{DWV-7XwhhRSq*Pk1;>b^_eeN-i__+=s*f=72ySo&^rxl*-9@B z)+F#Q?`W4hlbamGSBMq<6-isUjc<~4( z4i?@oc~>(8|I=IkF{FM^kK;03fD6l!od0&b{*nDq4(SNJ$V;mK$pioG?t2zpFdf4b zhpgIvJ3d?Xurk!_NY*j;r@w8)|F08KW}#tg`Hbu`w!Rs@2??% z1E3_;dX9tt?f6CwU>WZq?hhXQ7j^vi`}{AL7X&CN#9#2(e>?t3EU=7?TZ8#${#7#m zFSq;~)n8_Ta-ov1t*9}r)ghWhU7_+C@(oHKrtsO7v-@JIl2rcDf{!D zGiVP|UKNhH_JaR>;a08Q_-UrKAb*pey@glK6qr@rMVg)8IOaG+g|7dQiO5jrIHcOZ z4m@~(2Fty5dG}A;pTFg4MXJX}ERgRoBPA;S>M7KvAX!z@BXp$WpV@es84IS{-XddNeniFRlJLYGjuP|F(e8Gk{TaW`;LO zL5I(*+W26Bk3P2;1qA8Zm|Ges3UO0xcpU5%^o3SJRWBW=b~uv$Sd*(5*PWp-H)M%= zjzQ7P%biAqd~qL>upfG0*I(-#9eGgkJ&c!a`TPS0gM&v#83QK{+<94Zc9?j0xX!QI zX`%+0)h9G@!dH8tKQ{w4@ffRGuMIpt2a9i82{<6|+GG!$_IUIinnFl3G#Hw2Jf27w zpvH6{NBQPbIGuE#;f&cJlfAt9wKA`S`3Le#{K6d40zDbk!2oz+U#+eZD|cNqn_-F~ z`8@uxs7E{oMY*PxG|m{{Qpllcna|SuMaC#Py#1ejqHK*_A7#p#jT2N9GWqo2;w2NG z`C2d{Gq85PZrdkCkL^v)^gC2fLU~WsS7~;Jn#kj*#Ea`ZN9ac^`ajv6QP=n}f%a6e zn?R6o6&Py+kibpd)U)@GDCpYRbtJ;)&U`>@*$$j`*C_gN{oOeI0$03iCYrjfohW50 zq@5)F{JEq_n8&x#JJCvnnACHQTD3l#uaK1O*Q#%WGlH#IKC`Huvr5*0zTuU8b6#S^ z*)pll7`*au_@&WFhp`_Vb#UHLyK46(W78V1QYp(iGvi{5Q*vZS)#e;=6w2z&4 z62DPo{87)%taHB?j)!6DE>+Hj%$!;O`dS)a8v1VY0aG;6&&sbONk+zfaWM8v^%YZSdRO^_^BvtwM|sNE%hFiKI3>L{yJX0w3AmYd zym3e1lki;QuEqYJliSzd-)jpbW)~Rn(c|oH?#vCXMVjUoI{*y<*Tc2d)PRh(Wmk>R zRy&4w%3Jkeb2eYY@Ek1jXK43i@*WIo7S}FoPrSe4R&cu_N+v4EilJ;ZsEUL2Uo72O zFkp=SKU95pJk|Rj|4n3NUrA;uWESPvTL=ePm6alUb8N~;2o*99O7>pIIL;BW_vReV zA$uIhJ~+nj<9@&2-|u(->%Tlc=ly!Up6m5~Gq}?-k{AD~Gm3fZ@#~7zRSX-*g%Pb) zA=hQ^Q&!Ak1quoN7~X7pfcSJv9HX8KbY%~ZEa0j;e9NTPw_ep=;?5QvFOqC2{h;Qm zxmvTX9Pvlp%>^<7w28 zW}=K?eqV6yx^p@+=|ct8)_A#{j2&>wLh6G8S}XV5PuYZ?UXfiWcnQt5hm8^o7_dd2 zf@a-v6NO`jx+8vE^`gEZ^!Y{g+itFq)A=VW4MyK9P!E|uGjL~ z8_*&0($Mg;L&1fQ`~Gf&8@{(;*a4ZT_~vQKaO z(=^dAXB9q@YfC(3t4!OdAcQHg&NG;y0u4{42+sHSsW$Q_brf8mx%cDbV!AMf@CKe6XZXwT%rEwiY{B@EsWiZ;?DvbZ*^-7>~S zGu%&+aZqtZ>;+q5^{Vq?t$)49XX&G2dG~S{BKjBIdjUyOLvFJouPrKZ8wj@gmoI(r zpKe?04sxz=`9j3ujFv!Jr2t2H(euh5UXo3Su>yU$M?qHdmG+~e9$i+rpgK3>i(s+M z<=aR}+EY@y1)`rD>A#N8<}IN2@yv1Z(n1gyjmZ^cNDF+6%Pn+U#bk`z^INf8va4q5 z{D1N^ec)DYx*n4XT)Q?Vm3V{?W-(@uv*4XcL=&Wb@yFgzFfI*J<5?04JfpaO4KaBE zKMHg;%vd$i<@DFkl3EzUAiezQYZZORrYOa1w*s-=hvFFFNE!r+CAswK!7bUK6R%VS ztYxH_O^-$d6>|n{ew9sc56}jM6i&Cp7N9tw*B%(tl@Zcxxq zjg!{MN!yRy<>~2ijxXX+*#32uBfT7A8J0I1B7N-K8OKrS;K){IJD3SZJ-;i{p7w5X zOYq|lZh9oHEF_Ay#Fo*}Fa$f>CU*bMBUcGDY+6WW{{z)9C8|GS)rvW^XgT?;hIn-o zI{}gN3Po3TvZv}_*uc>J4msn-==nbJlQg6LH1se#3pa^p|GFoSa6ZdRM7nEJk9N&W z`CsETuM8t#0WH3?V|OBd!NujHRotyR+ZO&n)P4i^Hzl%)%WJQVMAo?6%g^+$+LvGZ zedlol>3g@Q&}_T%PQLT5-EIlLK4nn zu`oiZ@?lok6y~&JgV;g!_RR%LS!M*hqV}9U%7H@|nYT$JvRhg>J2`k@1^!dGi+#V~ zp{iRF1%g)x6=ceu!#Pv^=_kN8IJ3hKO}|GlHL07fSmZ3j^AfWLF=Or6TaSM_PL@$7 zznZu=kMUdKsN5@@Qr-Z+d#CjaW~y}9rIT*wh^|$&ufER+!}ICOZF<6R+DmA0po#e*7m@r?Otc86|oAnql*A zEfNe~R-3!&El&Cc{4i2a9+*jq0!f1H&@94qYrQ1sj!?XXV^8|%j5TX*+X#bnJ#V1P}kcbyZLc9LL>&cmZZIYV~vV}~olm+C&M#YoA8QHz9)z^lQI-B(m zwk{`Bo_6XT)Q9nG&J!8$^}DfbAX*N3BT?xX7SV46Y=|Yy8NIwXdbsZ1$D`6GkFp#V+@7Ar5%V||F#Gd$+3;XUR69qZa>rTZV8xngl5_W z6iFGbD?9&u%}nvP!&;T#`my}u0sbeZ1N{4$<;d4=bFROB4Dbw}4MTKQBR%yQQ{Kdu zZdMa8y8&N|V>m{NUTODesB>?MZK7&pPWLeIpPT1uZ*rwbTelK)T(sB}q`*Hz{{^RZ zFOST1DRMtL+TH6Jp9rt&1Yk#Xy5*d}R5}NTbKiD1`~y#Zf)LtUCPs+mLp{DO0 z07iH;VwM!adsf`)>wy_S@?;lGE81kqQsAEF12` zUZc4s`8vjWF9B%l73Aw9%w|(pL;b$Q^nj-)EAgH3ToCrVC%Q{$b#8WTW5i$a95Tid zE`61I#9R1{*+%aKhw&mZ+ltpl{1R_EuG>cW6UM%%bKdVyXZh4}r4H=0m;8<(L&ro> zJT&Rj=Ih9sIC$g4e{>_b_(uOvqu#fWnyF(khEFzkyxauy67szmje;X}-}ZlTU$8td zmteYq42@oWFUN{c$&uC~xR_JpW{U`w?h765C@274yq(U2iT73tb@;wE9ZZ-$blp3t zJ5DqJw(S;B$81Vv~1e+U^VeKJBQVFWZX9jlKwq z!VBh%AS@FfVMLK|%j=?NQwD9hug7}^psv_PotJ6Mt@5ejCOwJ$XQj`JJ(!On;||A; zgxS~OJxLgoxOETJO<$8O>pS!l{w5L$V|)lP%qKiYg`g|XS4SRtFei`m>EeqojQRe4ShtX_Y3Y-&gm226&EvQCWczY=jD}e`=yVq57`&=k9F;*n+kcFhVPkIv%Uqc z66Xp^tSKUI=~x1IA&)e;*}dU@#cOpL&2E)K=YIr;kr5J+FtK94 zrReDy;w#f_iUOXP>8#+l#v4Z#v@XvrU&8DJPGq7!tGt8BxGD!T>Am0n4n;jJlzZB3 zve*2PGJHeAp4;Y7kf*oVjQ6pi5rnjrXd=KzZY(5PY#hj!tF@({P1gG6N%Lyt0xyw%<=oVzdC(-wV~O_j?Md^kn9(p6i(7)!+WCIZ>wV|`x&*HH8vl7c zS9Z)$i9HXi+r%2CAdyggn;zW2$of|AA)Z>aiF^G(Sp|jF_}&mqf!@*mMTD}Dzu#U;rv17;P8%IiCLk2xa}v3G!sglu9qrb( zh59`;3zXn_62~g)S!$z_1KXJ{2?C?ihU1Zf0N!_duud9>3?iO5 z)exHcUm{wIeSAR$8osATI}deR8~vXoip?+Qa&rHQI|!-R(BLrzBIj}e&SL+ zs3{jp)`~P7=Cu9?spsT=4#^#M8BmyL$32CeX%h&+7BidYf1%}32X4L9qYXRVPGNx3 zpF5k;clPBP$@JYh=k!s?Ag_H7gt9=f-Vbg(FkfUN7(_D92v7R#T|99@+8U5bImkA7 zkyxYXd&(!CQALDptXhcemg1D^Yf=k9Ar@eT+@Q)!(O|1-!-AEFO~x7QcB#;%FmuMo z#)xV&P=qH0ysX(tSWp%Cm%YFp0vKQe?+6RJw8=KT&E47ItJP2LMN_{#*>_o(_o3_E z2=ym1j{jvf-!;Io7`^~M1~>0a$cTBPyewzz`3PSnB5M!Z*IGW%C`ehzH1W~O{fyYO zmEAzyW_`b1-h9Se{dTar`Y9Ck_>#OLk4oQ05#!}EaeLc8U$5L6uUC)-0Z46UC@&NAxIhssS5av9oQ(OFSD^s&;Mz!^i!UBQU@v#}pQ(3$Bek6JX2!rN z=HcZTo4ukEp!nd$&(!M_*Ua0HS=vrk0@z?)f{qoG(zONuI+72FTk6oL-U4VF-KOCij4J1NKPF<1$;T}l~ zslI?d^Y}17?M;}&SOG72;w*OHhKz;1Q!I*|hAZ%qf!F6Qwy~IW5IYK0Gpl_Z)w+(K zl=Rk@$y+M!ACX8tFLpUMDl;7BlhrXamT>xkS?Cm3c#P?qkd!b*+c;DU8WnNlXk}E3 z3=0zro{9x^sCOLVyq@0|cO4p6z6El)86#j)R#NSuCe(@=ZWJ=ziLE@w$_#F^@JLUr z7=1r|KT@N~WAEiwHd|Zu#J`}}sG#3O#=OYzzOC{{lou%Q{CH*e5Z#~*F=tf?gEvNO zaRu!UkNnhRn)PlKZ9cstirD|^B#v`CU=KN*wq=VCn#cc0k9QuvJ6B62Tcgc!>0C#s z-KL*7T#R-#k%k%KR=GM%-47fZAT=+?1(4V%CG9p8jJRd`#5l0}nO7Blav6(nK65dC z$Imxj#Cf+d`fKK(V0eO1%jo3`7u=BYyp96uQ6_x>lIoZ>R@ zkp{`9It!3k)ioKS`%wc%GV9lz6TH^E>H}`p_dg*z^}nnxnl6#b!@43)_YJ^VN@@${ z)vqIVJwDo7Zb+1GN7*-@pLc$HUIk$KG}!B~sY)O9iyNip$<=?t*vbh|yL+|#c$w&` z{TCbIvLJRlc8eOjO!S1;0+x$?a}&G z_ZY!y+?x}YYOQTqT=SgL=r|6Uu5H__-S!BMz=MNHfIX*I&75vsVD^*jOyP|$l&aKX zmOYvnSo5n#;>w;L56IwCD<6)QTlqS2IX@mHH-sAti@zPyh?8IMk|o9+uyS}j53sBq zT*vBunBBBw^@@7IVKXq}2e5ayeSPzO*N;~h{>)M!@F0w@7V>eky)m2aHp zLx&9JxfD_Kun=~^9o}Dpl zegJ;W?s1dr5IQLCjjpV%GzfXnPQ&K!oqcnwv<}O3Ft$l^;j?p`w{T~&xGX?K^nFFN z6{qiNRQ-U9xx-dtv1qKRh+c$vyzKcW)28bFyhsqUvh49{PjIuWVSR&0nm{Qjt@r}q*s4Ou2|gUu#MRaz@wHZ zD%Bv6TXGiFZ=t6vg)3_08LgqHhG5(Mq`X)QvDUM)sR0bap3>Kvc~R7VJ{siS*orHswxQ+)tCiElNHQ(xjkK8k47Y-ao*-MfF?Z*>|ESrw+wUfu3{% zb)9T0^#;Hv$NLHkP!eQMC0Y>KtA^o90oqo8n?wG!wwVmplxx-%Q&O2RwpU+& zt_^}7hFJtGIB$KgHfmWvzB;hq-atNho$J13;BjrhsuPor{HI-HjzDkl3S({YEZf=o zhK<>pS)&LK5eGXoa?Z}W2QQR% z3K^`Z-kGd@`?~7@bUleq{c4J)K!cd<-!>k&r-$kagZorOIHv*RwM5y*p zzC~+1FrL2-`6g`2TaGwPGYF-+ZAd>)W00r)rMzFr;XvT~9m{ebN}RPZ!v`0(M|Q8i z6-y7K8v(NMJwS@CWb$dH_UQ6LhVY`!ZS-4j4;{U)3vJJdr;?S{V4*0CYH+gyk7erx=#^hRF2Oy9pY1 zzq!bbFjEv$Sn_2ySj_GcO3968z)P9`IfI zyjgAdH1WJ5>lV7Aa9%nxS`Iph!6%B?ungN9YQLOoAhO)8C$!Br;6FzrTS? z#F5WCqdmy>O4A;@%Tff8b$C0DC*ovay>**D`xUy4axP-aex)* zcFa84<(ObeFj^T_=SdzhmKQ1-NL5#4Uti4X)JQTgYWfxURQg z_zmI((-t-u{XlAA-{9)#;*!#HVxX=ZPCHA{gxUd9Qurxw?O1WJllq3p*XjXz(KRCS zFip^AE@FjzLYK$+y&G1Zulq^$$rpd!!w*iB;cIM!XK%UwrQYys03omZ<3~>KD@2M5 zGy6kBA<7#b87`}Uoytn8+hn-a5u0w^Dwr@U6xV5Xz{~o#;q`g*B1tBz3{#}PCwwPQ zW>&@4F`2k1F4{Gzx`c>Yb-r><<}Pa)eA{qo6Jl6k;s)mp@i~O9WFIk9kn2=Eezp3w z-*Ik8CwXfE$;JEyI+SB-=YG#ekhPIC0AMo$%8{0_$sgcbenUIoSYKAYV^a6V-Rv*c_p2iaO1!TPJ~ir8b_f=-G=hF-};ha-xL3^F-U*dz9z^vmtMU zdyggGMxosVTNAzrxl0l(SF*PFhz8J_62%9$ z14KvOw>);7kX!-SinXesO-2x*nT+~IO0FE%n|55V$v=|tVK_%UeW}LtTm3|oyEfI% z$bEU@p%2r9MJ+njYR$=`d%1cPnAO!AbDa5t^b^?n6H`!9b|@AdY3_Yn)EQt?LS}nK9`(nBY)2&`b}Tbg!_QF=|$FBSd*T>Orbt2 zuQhp?1p3k77^^Ym67p7yn|&!r{lzz=B~Zs>We}_i)GX)qrlHCYq&3`CI;pA_t4<85 zVz7hTh6&i90PijEcO{g)W&KY>%un-;Z*hu8Lty7vjrzpE#~0N0(*<96=*!SQ-zQF& ze-gUzNF6INoMxZ^7NRrQpI0`Oyxf5%iZn#>&|}^P8*>@v6qQ)QErPKX_rC+hJDTsT ze*F62sl8$C2Ll8W*GhHWu{O#JYw3#lb;p4(!%{dW{fvm&g2G5kW4UvH+j~B zfKP*Q7rpg2_G01N&MCEM4~GwCfs+>zbW+&4)FraBpdLlqdIDPcswJ9Shdq>`D{?Zv zd4>&l&*M7!RlnirC+DA;xEq9ZJA3SL?sxT1&nSf{H+xE#D)4o*vA#Fm|NL92GS>l8 z9cjA1ZgAgaJbP=(?GzK`eA_jxj-Md6ol;_+EdHR}rl9USptE-02dgo`_~tD5ypq8j zXqi7`<@waHqn@|qQ;!Qjr9bGN_PH3%#d$@+v+3_DQZl7ZK`Y7>eYcv(#72b7og+uV zTY%l>=&aruXTU(&A!bt{?ze0GqS33+pp}fpjJwgyu&PDyuPwkoX7^wHjx1>mOr-kw zc&yk&*MEO?US?2fhvyYt+Fik)8p(zhS0mTHZ7f&gJ7ek4MGyWU;gMvSfS0VRwwkRa z9s+yy1)t_%0{4-5wmw?gFXka0WWPVhm2AfUICh2Ab*j-hedg8gU`#gW zG$;UwaGJkz6lGiWO#y~CT~-{)0M$J z0%A5*q8C6Hx#!ySWuX=cBppV6s8M76Xrtw?;C(oG>5Zuz8Lt-@KD^4LbX%WOhbvKc z&sXgQTzNG+?0((sfhbluLWeYkm}*ZKdsr7aHkuOMP=l+tAAOs8GO#bHRD!y1{_;64 zkIk4(^trfgzdNmhgtH9C6yXCc?yQ*GYi;JXizd-Bcv`3qkw&~W{aw^ud>r0 z!1kS=5!0JZcfz8qY?v&)D1329jM4LeaTnVUxz*iw(+9!HQHZ+#^bJ_ZRYyO--MlTF z4oV6lJpK{Gj3xDd;uQjiPx{m;Ihzv98a&7)o6!Ub*ivu#w;rEjV|acb`u3zHgYDj9 zrT?}S`Je9OHR%hV>8%(vCW2p1Ka=n4z0?CmKmT8t@Zu6CRCA;A`)W!tKiyT17vhC? zr%%#%9{r;YdFe~gQT%&#C1YxBk>4ms+~xK?qXReaU*FxFh!PgjoW;?;?RMaY|%W(_9+ ziilHNo*~HKHKU#y%EdaxqZlJ>566N!YW$;&ae3>yx#a9;AWhO=hxw*;ju*q)h~D!0 zZWc7Ygp`!i6K&>EkBy2^_};nG5v6R}>C$wv?zrr80dlv;Ko9fWc^B~F6ROyd<|oqL zQqm%bJe>FI0jh**+Yo{B6c6vsF1EBs_Cn>_>?^SzhfI{=__LMf#WmZ^y`|xu(kP~m zv~${K9(K@hY2EC7QRJf@IWHK>HfV`CS#!f!*ajrNb8;|={T#^|JeK@~ju#=_B6(qV zlcl+staU%OQ;zY@zoJ?1NiKH=(&+9mzpt3J%ItfWD||BU@L{QX^vON?%tyz8I6Aqbl+8OENxZk1yyCP{*5lJZj{UvwD@vehpjeT2K|{yaL5`HbbvJ2V!e- z$%TrSv~!VR;jEul0HwF{i^GRn%9g(K5#4VS0P8I(I>sn5 zzgN7t!evly0ih&!<~6{WyZ&8Au9hJhbR}TuZXZ%rts540scVjRBeSv}Vd_tBP>}oC zOiG&tuPI(e%3R0U_&Hp0)WOhUeE!zO+qjv=7^I^4W)Qfgd#*TGR|HyXu@xxjZW{9ay< zH6cGl++J;Tc;kv#d+Ay=bzUcE(%sfk{T0OSEE};V1Lbd$IlHQ_^a-@T>0Sb;biP6k z-6Ko#JC*+AxS5^pDQx)jRtku5nc00NKCsu*byRKfBgbWypW)`p5dwfY&kWuFep?9N z8bRI9m@SnV>1A`>{C1e$DaF*Ga0$YpPP}bonlz!`HeaiwV|ub@Lz3gY+mNe=`S{A8 z_g4dxiQ5Y=#O6a9ika3lnd!5<-=4a*ax;|Y;l09B_tB$RdFrC-Jes{b?1qrfiYbul zXV>X!(%Yupc#{=B10f7rPXqNj+e_hbD{0fqB~T0vfQpwe^Q;PpXBEjjKiU?^x!woj z56qF=1<;#@&D`$xFw$0ij8CEsncTvI_uDL%B42DjtY|~_lqZuEYbGz~8t) zvP1hx@E(r9{w3d3BbgS*LD%ZAH{)j4yBtSugua{R$i;hKyEpE|YmujiBp?0RpWC^Ry-#;aw6bjS-wU$&(<|S}a7hp3gLLzqJcsftu-$Awz5$&~uk5ImfP!r^hfRE#K4gREti#(W zsde1M0IdhIjXYT|jx+g;AYjl8MqU6QyPA8X)no;31wUa{}Tiv6gOjYj9Tyt3h;vW^yXO4iq_?+&7 z=p-m_*5Q$2POIay!z1U^FZN2zGscQun7^!b;*GsEL7>@fzxPS<4#$`G=g=X@ zMjdwf`W(z<7A3U7+O#N0GXIh5`EB6=N5a94)?jrsuCKyN;)C_x^xBW3Qs>Eg8cyhy zpPcTP--zS=^k-ixK!#$iQNU;KWm`HuhavhSDf2>Ao5Nhs7vZ-J;9A1>Rum|4SSc0OD{+0Gp&4EMwd@AF4mC z%YpmmWSrUK2Oe;SU=Q2Lb^c6{Dv``rB<*GTo{$Yw24mdN6&tE#?fT@XEq$ikCTdfCw@LsHmPiTldwzr!Y;fO_ya3D zH6q*40XZ31=1BJCc!T)n_3T#Kc)63$>}X+3SY`UwEFrHq;wm{ja^SFott@G8K(3Qz zg|o&PJM6>3e2UyTDc)L}l2`>^xpBJvM!osw6qddBW(#f50)ygDihpbOj+dlAYGTVx z`w@jb9Y62WjZ(0{@n($@$0VjIVVZ7TL;k-Rjina#%w6I8>&&w*k4O?_wvOxYj40NZ zK=zu|zjqdS)I?gBN);Z&B2F&wV`vFw$ev=-<@&zD|BA&W87QR|ssAO!VlHjP401-@ zfo*hAgMD>-HS1k0)@-`IF}HR2|W4w0}@jy2mEbV=77v zHcT9c={UMFVuK5x<+9`7BDCH=CHz`RH!v}!r`CN{KXZ3?8IIl!n&KW+R!YnKy@#}f4y+P1FFP2 zj$UMQZJN(mAIXiWu^Ya_&+4w0@?SkJJGuPGcuc$k6i!Jw^GKl2yz+H!VDsD3Jy#rx zE0Bp%g3Z0eCp2^!zS`>fNw{#~#ymPtkRC!ke7HPfgO$Yi*Mc1nkm|Jf=a*iSCRD<(npNB z?uuwR8ar#vv|mTMZ|_1S62dJ@ELeLvmmfb@Yq@m-lT)5Kd!F~m2l$){pi{Dj4VR1G zZ!UGYDYhp~lFu$xa@WM`93F`4k}E=%8Jw#4vO%p4m#AMRNSM|;ximn$*3GTL4g zi{7xl`;}&wo8543eUg94ZCzAA1{}ET`Luf;c`tCBt@8>wsi@|g|CC@HRMaVZ)PnaxfdR(UMSuv2 zEcCzHxK(t)bl|c^b4G*;t;@C0fY`2tQR&TOYoUr)ZU-F0Ld-nAkCo{6>)fcLS?8wP z5TaQRuch7`LdF*GM@drjv6i#IZ1RG|!^?R=Untkxz%}~OEs)~v5yH6Aejf?Y@N_SeOcxt;&#F8Q~0!&Mn*TXCjCrXGCJ;^ zvqFxbRQ=tvvr>!+Fx{;Vx_-w2?E{=U>qLlHmx~yeDmt8=O1Av+qS`YU-*|nHZAdpQ zr5m|+De2bA>(Ba(*-YcjQa(>w%!4=F2^=am@$clXLuKs=VUeY%*MQjV;NMteq2~*% ziCFI@79tJ8$f0Zmj$_83en9=`xLNlmC|Xzq?g??XlJbbcAGVHo#5^_O4s44BNoD$Z zYKettZ8e+tLw!R*Pq*L~SNwMGUpOf7Soj*#x?%^C2jnDku+w5hgAlI0jA^cP`SG)v zyH(TstTocVvo1bQlv;15B)_&FnbtJsmh~T_klGwlVYK*j9wNuDD|_tK`p6Gz)?}AC zHSi#mv{h+sS3qWVZ$I`wD^34jXjKvR50G>Nf(5wk(R-sapFb&1-8d^E%$AwQxy5Wd z<7!eV76QrA!?^KsE15wb3L+(3f%^hK-I7sS?t^k^J;Mk+9xJhb486 zW*8krYX^Xxi|;#!)pvF0u~KKc;=SFE2MR>2QV~oXV5z0Qr2s9d^{ezw+4GVovA0Ix zKKO8ZKJ654Y!Ct3rs$kWl}Xti^Pk*m`3tzn?aRzFS*C8HsD9>sS8z-c3-7Iu_cSUJ z%7*5TSvtE~)hv&mZjD#Ny`)>47jt;^*dE&Nnj&kuIjvQ=*%zY(r!Q&1>}e`W^`OtC z&U>~hVfMqrURCarNn=q*fmfO5EuB>!OGUE9o`55Fe2r?a@3S(A&s?!}_tzf@%8aE$ z-|tB3l+OdTk=#ZrBeeh2KOQLZ`htSoz~f!Ok8iQqQl_3${c;;7g^oA;`Vtxck>?BPtg~h); z;cTC_%@hz$%g66L(BPh_P%>xr4c>N8xClPdd{KK6V95~*+&)D4qaI(-TZL@^7@-_N zPG}=!X3hdL{XysB@ltZX#|D%@9C1(}&`eq^$BbSzi3Na8gUTJoLsy^jSSqf65#PDXo((t>!RMG9MGL2s{v`LZ`4E z6RS3^_l}e`LSRb#ZC-Xy445R$a9wYi$%; z{!RHN7mPz8{r%}U16LXR_AB|C^zY+C&X!v1g8`M;Y)C=Gi>Pz##f+shjtlb3hWTs;2PQEEpC|r zIVBH3wQD{3&opO6siO0(Hjj)y`;%PNsASH;3D=D{(iK$yida9e90*{!+L&t>(F7Eb8RvchI+=wkUdZ zX~kZa6y^mig8A_5??-@Q@M~}_|M1&{u%}%4O1lJWK(9r9liw2Ke?F`A`?jno+ak$^#O37`8u3sIvjJ!(jP$}>50D?;!Y3pUO0a&KI9>J8T z+umaFy&U?&M8l4GyA*Z2Yr0C$TY*^0bF{{a*=(Xe%WMU64;Z-8R7bbX<7k;U<{-{} z)r0Y$_DETIA>%Q!yGP>S^zMKb>*u6?#-av!HFh$vXYEuEMIWGU)Fev}XaO;xd*+`4@c_VA0_DC5Po@gG;ir6-n%seoN<{T25=r}E2w4HzwY(1LcPock#k-`2jilhb)aYyud~T50gw< zoTB77#IuORN;J$1Z98fkc=yt6t>c%f+GFr&-RV5~GZUt8e$+M_>kL?X_$lO;o$1l~ zhU-rI+B)N;c~Bt$FCXD*L6{q$d7dx8N6=^_;Dp;|2Y(6yI_$=U@A@=D9`?6r1sf2d z?PyTrsk>F_Jha$y?O>xTn|dHB*WHJzN-+fnCniok(ah$i2xl`YzOO=j>-x-Inq*!- zr?aQO&1LVq%gE2lCb3}!i#$pTP6hq4W~%~GTssR*VsY80tnh4D^NSdjAbX=vG9eZN zcn^FO3@6ibtvG&eFT&GiP>!(A>v`yH_EfDMlMS|a!Z!i5vHFCNFsp?VX^V{Cew(~L zUfdm?+$p(WhGYui`ot;&{($j{!H+G(%iT4P-~1D!g&= zODRq|s@1-E{M*OT{%6k3RV`sZMu$_s{#ezfP!cv304U5HB60!j8?j%p!58K($O8+@ zjG!Un?}eq0w9Ys?ta~1b)xpREm)5(-*30=w5AV7CG(l0#`c02u>hRi7K&ne>k71GV zcAjml_1e5Czjt;Eo@zfNkJc>OMV|!ekiLenBIOk`} zzTFfI{ayOF5#IG*Q?VfI65?k8j@_e;`A08lK%4PGizjktb%REreqvl9w6$p5aS=;L zZ0`h?QH2_n)qiAjX4ko58MK~tP+XvD59qFg1AIO-op+!sA2p6v0a>YyzD@0-A;{`S z1|Ig?QM6)*c&ud)>BrGpPGDy$X}a2WqijL0-eaj}0C|S}Uzl?hOW|Ce6?8ZaHk|}_ zy6Rx{3Eii!RnRyKRA#40U|EIvEJ9z==Wkp$A5|CaTdiaS{ZRMK^ zOontimpsmhJHr9~Vk5Z(a7Yec=qXfFWw`)S@&faS=<-OwGvWpf>Iq`w@HU3u_F^kU z{%=s!=0tjVlGqcyWf~=z$sjppcuo_Zzjx#W1)x;L=X@WLX9L8vdp8`$OSVqWMkET@ zYAjDH`7&dOruoIE2a+PlkXml)aC?pa zZ>-9Pb|c%>&l@(sq-@*UMW5Geob1c>vy@x70-G!HU^AIN?O#05xL^!a#yinAYlraA z&T7Td?TdCu$cOTo+aNQ(mCOK`sE81*c?+)Yf3Uu$#l}m|>9OGLF)_W;y{Wr$G{QB~ znDohaep*p~7YNMfT3%d1?&oKWm~0{UBmlx#@|cOZBDyVe)o!9D%KeW1JrP{l4T@0^ zCqd)mzi*SFy1Y`zUjAct9QsjAw5%yF2Tm!8a<^zs2jn}{@Z+_t45_zr!025b&M0N> z5`~^|>I_6+J(XM;;^g9wbo;FI7S(0?`&ozNWpgmbpfU*m$>!j?<0*o{5NWGJlw*9yMp1r%EtB3G=bfOJ$?d3u$sxc>jWVCKFJfl(*`5PFw8;atepJ0uG3ZRAFV^YjonhD9Loah9K7eQ8oRz)XYQk2=g>lXVzOa#-@Z@&M)K9_#w$FO zb$J0ifeUPo+?6#LG&P5NB|c3OkX*v+7bOx#z%uG`iPJ6cl5*OPX0bJz)SY=gh}fGA z>6@+&vw=JMXjSG{A;;s4*vCaiavCBWfyjCbhcQ^OWZU6HS?jF9$r1U;OlatDlz{T8 z&r2>_?-;qm8I`B34?miuWczUiE*93WxF{sw(@O4T(ruoz=$S5BSr8vw-H3_V{UyUt z3Z%$9Ml@S2z9oxViX`yrc!+RF_^D+Li zeU(|aFzDG}OL>#Ty#eBnJfrj+SSM{aRk%H9)CGp+h%?}Og&s7&O*`=tec*<_%OrCP z=xbH8CEnV`+<;oDjaF)NA==K6HVQnsE3tiAZQXq4!8^Pbt)we2e#fPq&ihw(+|5n; zG2=<**_!y4gX@KYX0j()whK+hN(K7;x53p8MRh*04;Zc*L#77SS z$%-|!$V|)AzrCvf+h!S{k1}c(_d>dP-{?xX%<#v^TM8{$WO8SvmvYd$Oeyky;$o|Y zA0BUYX7K|S{hnHJ{o$8LuFsbXO$lvS-#y`m(`d&)AQ&(lc_BOtClb{koyUUmdoIlV z8@5hDO^%A0&NG9Tr5Y|Q=}tygTWmdTJ7ReN#VK-DRyFuR9EX4A6yJd3PLy0g*Vz*% zCxZKGtqhmnC!X^Ii_`UDk>r?b<~L(b*7CD{;_usb`Cv<<+t&v62IYj+*%_L9x`E<% zoLyxzp(6bk-p0Db$zir;m2$LKp^fQtU~7mM z_8`es;l=kb4}*v0R8yPTnZXW|Wmd>>tZH&?ol43(9u4a}Kkvfc4suW;hm4P;#B*|F z(|3zml*i7mE0}^V!UduEqWjV>pdBkUo%AyFiLb8irU*9yq$G+SuL{EpOo3d<%UXxU zgWO-OO8?m|ax2o!1%kUBF}}oKPXDpyjuCx!%C({b%Rmw{y)R7;1pYjC<1MxRtnchK zVKTg2IZ-L0HI(=3nsw*^5LO|1een8*HB)U9H{k9|xVM7@IlhNm46=>g7%#9eddfOn z4Zdl3I~}xoZ0ge3u_L(|>qB^a=B?rq-@B4I>Ad$gb}8dLm`>SqxgStzczpq&0AyKk zW9s;e*|l{WUpUa~Bn;?mr>Po$7s52Xzovhn<&f_B9(f|i<``%}VpY2*ef#%;SV`tb zZ{?2&L)bSpetF;`153R0|Lo4iuYldzt2*^|vPBN8&MiA1lCabkzmUu{(M)!^y(d1e zlB`BYWrgd_hLwVvQIa$;oag8&KJWmH2mf?i@MbfF`MmxJ62`r~@aP7puPbm_Dg8ET zklVe25Gb~!2Ic*yxWTo-QKcYq?sK?$6@4T=HHdu4G-}FAm=I7mNjwtuO^lO zqdGUxMNs3N$};ZPq+hk;$ zhx9_igHZ1}>2sn6v*|^}U;3XUMg>RcrH$ zg=-Bf$jGZPns-}?yT!S0k2hGZ?iH$k5Z4_M#Id?SW04k@mqX1xxuO;9)t9pGslZJZ9WG??JP| zflTjz2d~&agEyf|V#%AXFpyYC!C`p1Ka``}>G!U$SpL%KS3hM=-0HTq_*|YWL?_LB zD_LWeCGCDYlfdhnGLVu5C6(%R>qwW`M$IIF9j#_E=2ETRK|nOUA<}0qztzPPpSaw= zfEXrQ;w!Ub+y=76;b(*OKIH&AXhWayl=3+D+NX;^KX6MF9m z3MgGVgdXXH7J7i(9l!IHbG~zqy6(Db-5=)T2Al#2a7Z zYfSy}x3We%PAHnoDG9+7c&Ux!3`n*?XaWT0VLTw~exB}Cw~&FqgCRYh<0LG4GRXyG z6lOf@vO75=Yh!jyAA7c6VJ@+AiF1Co<5QR!CzVRtXgu&%89>}YlY9L_PGUe`fWy6= zX?oPv`u&snTKr^p8EEK%Z3IdD+=`3#bx3ehu<~80o~;igw2TW@2|Vj`FkJ|o!{PLJ z)MH+%{=407l-@2k&17V1ND%as`zJ!P`Xb)>pn@`nGUV7ms&A##+Rnx9V#_g<*nolb z%_h|yqq}7#n9%p40-2tRjfE}sn)wisSf+JLS|y59vEy1NVS-<;$- z=bc%N>41WW+DXGWf&0aYH{KB$=lEIi7BC2jtp3*+7RW(3;Pw*}C0Zv04aj zqo~v$A|P2>VcsjPJh4H-GVL`+!PTRZ9`vaD%Q7O+_3ER+QAfow>(@6gJj;=vaQ)b| zbM`}Q-aj&+D&wx+I(fU=dqBoO6AO_f4Q0S(wVl*n@9s#EEr6wO-)Y$z_UsXb@XdgP z6B=5W<(2#I1Bp{|FTv*O+kK z%VfXP7qCt@zUcHRTl*4S|kGqG}=9SDre2Mb@`o zM-zpm-A~_KN*OwSnsm{V)iN2u#b>E&OE;s%gsDWcEB>wS{YJxRE*t&@XAA#fXYN%18y@A#gG%y9V9_t7eDjjDC0Sg1_&%a4+Jz z)#^h_O<~~TeI|c2sP<*7r7d2t`BXfSCuYPtuOasJ&pY+mnIpV$Z?3%wV zzpe15)VzoJX@9S*O)=)Il990455uoO9VtY`&>w;LhbCh3at@w@V^(|MvmCWoFYp4c$ z`TP+$LvI*3*h!gT1Py!U>nIq6L=;V{EK5QWZ-zJQj}j2x1zcmBBLDFCEYFU;{0Anp zvRQ+*GCIWcV#cnBq9fjhP65Y}5cy;eS}{&(l&+ zS@4Y4$c@*pvII$f9mfRi<9c|sTgB3GnQ4-B_nE!F-4PxhdU8B)^AV_`(bUc5UC=B5$4_sT?wGA4|Bjg{M=LFql+ z(3D=T*C~j7!_bLco|FC1JMyghHC}?EpNr#Mr;9rh#9HnfRJ|?8^XZmdTZZ;bSIio| z443}m%KD8?qteAv6r0UIJwjl9r^0D^WKNhPObI5oB z(%~_kWmuE7bU#Z&zAqT;EQ>ubiil7%hn(K!)V6OScTZNm;EcIpMys87ZKbL<1dFc9 z(?CFOwY()LGdSzAYCY2yQ|K_+5NNni?b=(X)gCMO22YP9Iyl?jj8Aq2AZFR`AST;) z5c5XKc3h|kPR~Ta7rQBJ-c_@3Ba+`rYq+|1CYxW#;b4!&G;T<`tN0q1W;Sc8Vmdo4 zzqNA_L?uMd54TU+h)^p{tg%0xoj)rfVlGfX)E6f2=zhPfv$rbuuxY${Na@3M$@J*mqh0<@75?iQkb8@5nZkjIVe$*0{!I$1?e&9&!L= z0k|Iaqg%%Bcxs3z_Y{xvHz>>;Asjk=TsRA&j?XV~(bIqYd`By1&#*s7j~)AV%Pq1c zG8K{&Yl-xoPTSf<+H1Aws?Wgn4qagU8vH(ddu6FfgQDBD!Uu(SdV}SYHYu6gqQ!?29%6s`-2+tZQVYJDamaqtb>eG&%tRh`R)7A;bZ<2$&3gm5R z+Y{PBLqn#=ir;tvz7VtiO|_n2Ob%e55zOTcE6+Dm&eY0F1n0B54=xLzG$Wo7-eA?K zzoik$!`<*aV>#Ms>Uf6v1B1J;#^v;#v84 zxRrLSzh?r|-UUv6?iF&RjXdMZIl|&NcZ%&P7fp1t2H(fZQCb4=O0EoNWzskFAIMm1_C&C$4Y8iT~gM1*kyJP+h`k_4lA$+}J1^$P-Zz@4x(6NSd_E zu7k2g|DqM4u}{jfYko5C@lvWQ9)K2;vjCDlj?Q#*X6TB}vW5m)`{e)n9OP>Mz z>R+mDQ}8sdUL%AcEx`|3a|Ai&TQh63Qg2B4aZ9v}@L7PtX`Ay${541~ah2arZM$iC z3UcZFM^Qk|dKNgCK{6s|AEw|*R_~R6PiDFEc6H!;ikKxnndNDIJKL9QU?uu{;NLIy zwD`HskTdwF-+J|d9#I?Fh;V}Ro>=G1mVF>BkfkekP7C2*_JE#L@2_w8{bgkWL9khY zR6XQwFG23Z36ONt?0E{pSAWiCr6JbvBcR&&{M_D2OfCLd9ZrsMDYae-un&{@KO&;f zjEK3y)A0PJiwGdpy(IUYca>f4g#g}hBNN6m$6Rq&D^uY)D=|0nSdD1`Pm7l%LruDP z@fNFo5(`7*f21CMegob zcF@&$dLfflhrpn=02wHWX)bsH(9`@zDWku` zeXoWQ^1trnzgm)eU3f#%O-;6NBz!?_q2M+N+j0D$x-U=A4yWqLSME|lQt-UFSH|!Z znV?2Rt-`6%fb0=SID9m=A5hkQ}0}_y2CiCc6b*XfrEdP~f<5OopT;u{1YIA(h zC#5(p1daF*`X8GpRQZ8IZEm?T@p}FWM#BL9cM682G9MIK?;^kP2*&_Mt1tI1P#j}@ zK+!Wty^hPCZ8CZwvW^JVwJk{3KE$LH(B6?3-=cHaK zY3?UW&C=~1S3%$4IQ=l><;@GHhxW%YY5bn!+qK&>wXddYx1}(mXY&}y$dspczFa%? zmtp_$QvdNo#trZ@Vs7Q*f0fpMy!!WB7rPJ;Oh!q`H7We?(!bs}i5UFsT)x2Nes|nO zCkpM@KkxYG;C{cC1)7ksA;;&5q>#ZuZS2Jogg;9F?m18FF$bgMSkUz|FW|F{?&)2 zQ*SFBR(m4@io}KJPTL7D6n$N7S!g`N&Hgq!7NVFH=sW`u^R2o29>t<_hg+*?JuVxA>K2Zz+h3 zNKXO#e_3XK|EjBjsF2k`sI42NVYj_I$agG|p9wTy`*}$a5W2G7J4dGSkw?-_cq_z_ zu)+0e@RQp=BY{7zNzzCXpow27Lb(~eVBllb=S)@ldn}qkmuE8LB|B?Zpcxv;9o~f4}(8|J>6A zBIOLZ`)f;ilLOc)IL55^m!|Jn7LbRQ>HRxDvsVAOCP@lt4>{e{ile2LY^6%Kpv5|F8|s*@6iBfuhWy?>eqy?3FLt&%((Ou z@Ac;(?!m#L+7z{ySN=n=Kl33e3&=y0F7nsrc|jd4DyiA_U(*nz8ni$jE$?6dms$P$ zSO42&|FRDLx5@r05dZ&evVYz)|J!8$r1$=xS+aP$NATad0RFll|6wBjXVw0+P2a`y zKdbgvf&9;^{mY*DpH=&-K>lad{z-`+`=9Ih*VREl_M zH04jn4#2*tv=m#)+cM?0optHI0N&Kr`uVL62={(eqRBWx>OMDOnzkWjJVNg&4Wa7b zp!tQjfLsA@LB?K^$dW0*8sAVq3f_xWj@P=BqJTb=ov5l{7FoP z2ORlIEG)%aiL!_S6<`7>XHKfrPta@_IDAcg?mtm*@oDpg8pv}ODO<~uN#g*c(A-ga z-Y3tBO6TMK9MtlE0bsMmhktn5oR}n&{T!H-qu%`jcEO_tetSzr5y=$c=YULrs;d0U z-u^G}R2w5W!;f(#U$?i$x zPt*KbwhvPP8vHO;^3u=$_9{7;cJuSOxz+%0&mC=}{rMR^;;#nYS&d>^6vPT7n$tfV z`GwUhhJPI9xQ*>cdMZ0x{sqrp`0Wr6cwSZGXx<(0d^XScU%?)fv}9yFDsP@^Qvtsn z%0wIf{CH)o0YYLITBO_Dd5KlO`HlJ9FZ?zHgwpIz^*o@$hf3Ite+6`YdkwtmS!d@y zZoCe=m0$S_k;dT{;Bj#=3ws`2Rs@rs3KZdC36(S~6Tk1~U{td_q@$y3rB|@f$tpfU z)mFiKN9CjgP>T;YHMQEpX@7oR2nciTfXyS=Zbsedg!c1a>rNVowfK5nrDk>AcirD* zZsc<1_FN20r^O8!5BX{RUkJ1ICWB!}b_}0*QQ=}bcDB4))1W(6i>l-C#O$_ofo6Nm z(_g?~WBGvM^hQ+Laj;6*?&(^~hy(@X)pMo#8Gv z5SK-M5>U_}4xF_bhowpEMg&u1U}7%IucPZ!j2p`N)?3XRMf6N$b{eCNLN2OG@VmE? z<*exxnHRxpKL9#b+GKejIu=Tt2pe5(-!16IGe9qU736^nAA~F>HGcc@1_es7f}5Fr zFF^EMBX`EOg`R^X>c0BV3@i`5(%i?H&QY_eJTsJp%0E5Hbk2UMZQ%47z-KR zs&qr2u0mHT!bV<`@na3@u*-E#>~L7D^c0=+pah-XlHP8CtepRGYPL_xN9{Po3p&T3 zjiC3M0c7@vs*8%XOAU-(uSKbC$}xgCP<^fxQ$1R{#gwiTdj{3?f^+N5Kdh6F3Zt}; zJXl}_aGh%%y^hu=+M_z`r!Ot3Yv1vxlf-I3#O+cEIcWA*e5D!6#t&!9XY;lpJ}o8v z`WLs5_Tz&tJ<4R0r4?DFnNM4!57&l8R?6tV$&DI6MNeh(cYY3EOnun^2eMR_Sf4Xl z1D%f}DWH*>kg$?|UH+3&b06LUnu*p9(H$>BQ267uF=Sa*{pJ>(&Lw8A)x-UAU3?+O zbF}+l=29g2*{c`WbJPklH_*?|yu zTRP)gvDn#o57!18a7X%do)MUf+&A>C-fPjeuW`ko^7KTu2C1js`AX`S#Cgsp2-9Kv z-xsV3*PR_d+$t>SgXn$guJO{eMZV^g#(CRh7cCQ;%6TmhrIXi1bQNTG5d3&0CZ&>s z2x3Ic+eXwCikD{fNSdYuXyVCh0}^PHk=f+;Ch572_q}y8RHwhZVf&OKODYl+td1-$ z;p>eq%{f8pD`n*|Ih@ILJbfK+<@uU@?^jC^JySOrR`cl>+7w++KHpgBl6##sK9oNJ zVQQm;bTJU>(6F;5n6xH2@nX?Fkxpc>q^SwNR*6zPR=XB7iDBuVO9eFD&HxAS$V5Q%A1bX3wkhdI)rcRkyg8HU3;ncrwrt zfAbDHGIN(JN4sP+KFcN;75B=|VSTaYYuYOaOGl{@qwCG3I$U_+M{j9B&#Y~K5W0@a zb{HRD4!az!e_gz7o8!_U62sRP-eWZ$zCRw&NLv?&%@y88#`Ftmw;JeZbaP;toL;VeK1N+zq(phPCyL0vh&E8YOL#L{wt-`=YT~^j z?a_w$*jrvX8Yc+T7uB>ytm7IQJ`&gI+QfBe5(gGJ3c1@vcD~wcYl>;f;b7gLYRzkW z#hw_$%r(BZxkxYfiV{8zyeYMa+q5q0@hR z)mU~vzmu0jwj-26w7m~*S9du|H_N!ZcqI`Uouh$W;NlbNZ)eQ32UGObZ}kg1{9GAs zzO%x_WW`1I#`a4&SYlZ#nR&dU)yD2**pP9ps;#2d=&0wcR8ZFvMpmGiY{w4%HkQE>1i)^gwW~yUSueC=#$e7K5ZL#_<=#fgm5VR%Guj%IT11 z1O|B3&mHUO6vK{`AMP<=+;oheGL?oDbx2$k=Uz@%iaEWQJ5aTRl~@@mGKsS-5kS=R^reNvQfP}}^(Cot% zA6Ufp^%_5i95g0uG5=l#r%k7<-}-VLa0g}+t9ArH@ze{J6{HsSJyd#r}*t|>OWN< z@bptDGBS^I-Jbi^s7a1!5r%n^z94FkN78QD?? z4GlHEHC$q#9|A-=o$^@V?8b@mKT_TP8T02$~S??#M0+WmIm^S z`pSxQZ|~Gs<>d`SOb?E~V8NN%Y}6>XLYS2aJ|XQHs6u!vE&52ZPROUpTc%C|I>%fq ztj6woYXIKHFydRwyg0Y1>l>MN3giZsY-v;L!=M>B#cUek2AK4fJqB zRi!zsy6GH3fsB7(Sju&^i2=tG#$~WUyf_T6WP9&zP1G0cueUafuT(Q| z<>q^PqZr1oR9(v17pw6@ol@Apu#9V##w-erK0=jmbcbM28n=0cDauo`m!G%t@jQ7( zLg97U(Dxwp3}$!Mu|sq=vP2Z!tG2q9U*T8EY+$dEbi^kmJFc!1`I<^ z!NZgGF=mCo=uPMbP8cp+_7r)ZV0P1$Ja?&ZuC zWnnJLPc!V~L7-Dq5qGswMXbx73h~MKtxO8FRwF%LNUZROU%ex+HjnNO+ z8tJ)U4AykDUv(e1%!^Wr?Q}JaNWPYK0tUow-JgW{#YY(Ik(>4iZmZh);bKl1H=^}q z(~Dh+!^3TC$jTBZY|!5Cpfj>ro2&WyWNoy8zu0c(;59-DmwQvnTC zeed$U_SAyhJ%>!AgIV?;V6qUI!tAW^vcqyus@2`bH_)l}`7W)A=Y8r}yjO1&^X*Ud zm(`Y7WUG7d2`|(bA1V^24)m_0c?v%ZEAnanIP^c90hT{Q%ue^!Oxi%zXMI`*1fziImkRiZju`^9)(ct$UaKg-3Y&41%_y zrG1FE;#>F5!L*i2**rwg_S7tHYoySPLj)UQ#7Nyq+t3keYpFsh`YoyOgK6rbJi}%f zUYo1zZE~zl1oS|JY7a`|LYn!-Y60b<_!j6|A^X|;rpd<;nh2F`dz-4sCvj74MID)5 zVnK7^qI(%4lb?*HXRVKurps)V3@FD{)od*=)Jd%EROiv$mD(z|ls4%(xC_5rlRod$ zUaGDtp1gWE8pYH^fW?V~i7l+|muI^B$k!&c_^qz+R$L_>@wS#Y+@ao;R3snRU!!ci z=W1!7KCMgGkO`Ng6o0>TFyxJCpPCBcwQvaWJ;c=S>`qP_Pj$<4zDMcEeiU#=4+LM*tkBrWcrZZNdMq$qE~X&$V%K{B4Mr7C}CvKRC6(on(A<|#y7+y zEQXG=U2FmAzWvZbtld-{3G9%%h&SBFcO>PO+c=<87TSY5>ejxuOtmIc=hf3DsJt$7 z=t|fi_w*#sv8HVf@?JAk<_K<*GP?N*L$;9))6tb`F4%5srtumvJ40s1Wmf&O*K_ul zyK8#R6!PRG>764}=2C*YFAojPXXwA$0eI=NO-0yz^6yjXAoWt7Y#v#$*&o*ek+e&( zEU4`FTlI^x5AIQgZ*3R5)R`y3s+Z_2i7*?jWIf1keSIiev)}_Exq!tUD8Fy=4}x2^ z4|Od0dM%Zswmf@U+@%vf6E*-B1!gR`V9;8diFftmNJj1nh56D#J^{9XkLywlTq*8r zX${YpEj1@?5Y@iwM;lTxG0fS30B9E@sOhm3 z%x9%N z1vpM(cTY9;2`R?XXf-O=m!&6qRTfujOHJ+oFSKTxGs>ERr+-*#G$pg^k?Gr9kqq3y ztA?kNUficag}p<;bU2t>+>b@s*}-<0g4{-Fw$6o%RjJt_pold~@j94BYq7lKoJt zvONFs>NG$2p2uvoL`?)f(GZaj59&H*XAL^!VA(^Sm-W=h#rhPw-m99^+vIW6vN zo7fN1D_o_WgbSIY>Z(%boZAJpUNB3F_3Qa27fwoSFR2Gxo z-g!MT=k)r7SsvYa2UgD;RViBOFhc<&7_qwcb|@kKeq zlHC=(Y;n=qwxxB>6Yz%>)3pW?*;~u$eyg8?sh^Lpy*$72D4G__=+TTeMI4dd+~8kJ zW?_0+_JwTGds}?1qe_n9ZMyySpu(QcH^rJ&zWoaGQa(_^hSUo!Khgq8Xprtz_Ns!9 zB0~2@xhqlNcO#=+*!@U7Z{w}(?TuXV#(aJB6qORj$OeNYx&pho3%|uTyF@g0>=W z%kJE$QaD_BC!1ejlG67S@0$DKum`4y6}##+`86b_<4fLr*E_j3{!R(OGF2u4+E^QI zC42({6JW|YpSY7yGM|R7V3G@0R}7(`&4h%wt>!q*+fn1@g!Q#8a5i#mI-5=xu1@6w zmTbsuqVIWWc+gGE^LZL6;OjC%m)0mC6FJpjvq++zeWVQGHK_cS&ZZKtO$wZ5K&x}5 z6j|#nxaup7xjAOHTrnF4y0_AWBZBVpRnG12)X$9bC9?3(Uk* zX*z{xVpZ(!X@4ox>P-CVI+pv@J$%%po~UX-q82o0E2|feB>d3ITcxyiXxRZwX-`Xa zxA-Qu6{aNtpLvAHDGdRRzLrGC;m&k$b90p5A#0`k3Nt`umut% zXl@_aonP7o@K1DmGc$CFu1;z``u^Frj)rtq8knOA?&!(3C%P`xS^3wU=KFiA3>V9y z_HyW`#(cI7P;ZmNIcrVk>B+hm`cvz??}e!55KPjX7BdD~GH1J)|^QE1SDa>(-oY}xExYt!(sV&;lZQR~q!FeQ! zDCl5|$KByI;2amm3(q1sy1v?%Z_BX2 zpm7lce$aLyqyWtQOVE&UfDaEkj!YRsj!9ihpS>~)12)_dD79~z);SgO_4AecJBiT4 zbIr`|4`K0-GdNzx+H$xgGz<(uDq?LR$T}fK!Y%5zhx#$UlqNll@ zn^8yuGU}bf8TA09&L|?U`Ymni796%`IdW|^w>lSU?AmSM7DMN!!oFON>e%VK!#-=< zZ{VI#J`?VHg;~p4`9>=^7zyFQrv2D%S?A_x#_qe7sfY?g0PhyS8u?^WR#&3*qUunp zlDpd1EMoT6c1m0nCr4w?^%8g|xxe$3LXi^44r@JIS$Ap^^$9|*5GOrRU35H(oKlC_PR2)jt>i`^9iM!u4JOB#nKAw~PG zM$5q{)g99@gL!_9D%?hGP4z{rKny2>be6_d z>o!3x&9~N4ntMt&8e4Mu0qXStlG_jq$;=*M_VpgVTQWbw!49XXLYFFzWk+>Atjvho z0=^X@Xg({*olDDJ+2jhv0E8HOSBpm3)_1QbVCri#vufiMgl8iD6_9p@;O+JgSe9{Zjd@X$?@I}w_PD@4O7IU}{uYK*O zsStX|<3sDU^4LYo^u*KREBdvM9bt~|x<=MOIdFFV^Ch~}Xn9>z!5~(yvX<>-KKIf4 z`ljXAyApDtyQ#A+kMnel@URtVg{rOQE8E<^)*l$D!%R4b3y|#mEBn(QL zhpyDH=-E^#_e+!lrRq2tRA;+-aukMaFIaEknntU=%XF8t)^06RKT9RG{`n({on&8R zF{_+K`mj~o8tvM$U{|r*q-|TG*}9O$)O(I9w^+$1O*6me`A}qM)W`(Q=XLuKB;C^UX;W{lRPAJ4S%`9_gNlU0L_Z0GbnG@a#IfiEBqBo2U# zT40kGC9QQ+MQr<)rEBc-Jl;wFVAerdlMojOq{ye?v5RAY_=dqE>!0{B!tTupZqIkg z&S&YCUJJr`?;E&}yfS)7I~o6enar}}OYy>^(j3g_$|nFcB`^t9!U0s6T`=R`+F{N` zn!Y;Mg2=w_mRmvc4^v}_2!lMq7E+m-9Q1Y90#B^u>S&LuBr~Q%k6GvdG-#cIGWgcn zi0EWo)l^B8NX^f(T`APG6$X%kwq+_UYCMR_)>5R(rKGd)r1``HKw!02R(Bv7CJq~3 zXw(r7ms-!$cBo$-GA=q=K15_$(*FkLb5~;hbFz-+8|*w$;L2g^2W?>JhwE~Zoxg;VEc-msiOc=z>sA&5sR$$H2H0E50o-(wtzDia*N)e zxKNB!>zsX8Qt)%`2}AT@4|XeduAk$_8=3+iO7Op<5`Kl1n^k`|OYc^>T8dP-efeaX z2qxRPg&3Ywtpm|-q69&rl66M6MY!%aR@A2X70T1egD7gbmkQ-Aei zhst~>qIvw;Tg^fmc@iARMZQf~Zi}Ed4jqBpZI+ID!LJf(bhtjSyC#IRp`A^Dy20XU zV}X(v`c2788~T{y!6_ZB-C96NNk+=D8DcV3i(T$qsak7dXk6jL8mx(UUM{oIgH=pF zocMq&`sj@VT{Ax!LL+^-_#|*jE^}S)>Y?{-FE3Xo%airN(o(jITv9h_B3K-HD1z>- zSDV&obQp;Z#k)h7J!d+`$E~*EV`W-Gq$EF5)_yYJTuNnkQJt4{#EYwAMRU5Sx`L2pa>9E~C?FkQ1 z>ShA0``wCs&CEVnO$Iwd_pi5Rn<<6(jDkg^byDx~yp4bSj)PUV!pA*qE>F(rHmZoiCzPjA&-{LoS!+oWw0I;P2+!Xi_ZbKWig?g`7M)bUy-q6F*v9Txn z>jq&%pMH^bPi+MXVFtPs;poY^ue$1|`UpW_6aFNqd^n4a+uo$FBo#Sd^*tQ}vGSeQ zim3J0ytYMagBv0S+7*?y3nl>7^MnhJIoSRNDW&?FV)<2Y${X{TkJUa9iv@$uVE}k( z5c|$PlGzDnPkeDeDRLG1V(M_IXs;Rgta{wi8(no%FXbrt#Bn1Co^<~DL^g#sG1Pg$( zVC{HhsFk;-d}-Exx>}gdsY5(dEyE2r?*rAWynpM5XnpZ{kW3(SeF)S&3bT&n8THgR zrWvjQKk1rujq12m!Fb{{&U0%&MK**^WNtzZ1;oz5U_#b7Y|wp_F~)e5JN? zsNj0#8mhTj*E%}yFczY%eEw4lIzn9~1Zjs;H>{awV`xqdx@uCIIy-EJWHR~m?39kC zV`@(w?jWxbHB&i5Pfym)G>(tT>)DnV3_Z`5KA0zP{51JA(v2REzH~!h+lKI!YBRbG zo^Sj(`B6GJRG5R(D!uPvUgg5`>jPHrde0swr36U(plz9C(K~^`x}Ao)WWe9%c>w6K zlegvLzBNY%VR0~f?%JJ)k$qo#s}cosr;(P&2__v;O^pWbqfF_EZt`2CjXq>shv=~? zjHX9V)gUxkxpjC0T?LKbKu}{Ujf3uObyO|g-x@n~Xl8ovi|cuAo8>c0vV=p4EM!>L zqd3&lK6S#}ZcFS9qCD#m*@aFcZi{|XjwUOIZ&W2|Nm%p`!c6Z-zH;X5w}`ec$6^W+OIBFdaTq#pa(|`TYABTtjezN%l5-}_TI_mgjTxeMQqJC_(m{kmfK1P2EL;#CPB1O z;q`J)KH;OPol;nRopB>~qRCzz=Vtfzl7Z%m*+amT4n_BYSk@FtV-gw_1lNhCfgJLT z-qngz-n2{|EPiM+U)pwEu^EhwHiUOLdB@v*#L;|y{-LKlVFNy}w%I?Pu_XRkY|N%e znVieh=q=L>*1&D_qM^@b$-W3zgqXSOP%Yl6RsSvQI3K6uHe~w#B|-SBWO@0$QfAL@ zUv1`0u`LreVxCI{>;gNfPjX6!F9?l12sk5Xr7_}$gtt7No{w!|Jpc|YJ=oz!Vv+Wi zkyGHR!qralH{?DehIA)YLc2SQHk^5El@FA5M^|`5T4BDo3JkWIuQl&qfEABR4H;`- zZ-t@4#o79~kAZDC6|#eYlL&b>l}!8=sUSt@3A(#xZ+%}l!i$A3E})tAxuVa4<;WkfceKT-%c#+WDcEb+2CtFIdQg>w|S9H!?x?QW4$2JaPTr(Yoyo*?Og0n1ZYYXo6 zPOVAfb|38TuU^DBw{t`gYsD?<4%s_53j^j?k*#uSyc?6Z^nSrn_wuf)Z)HjTg-!k4 z-Qv6Ubv4_e=jGB43-7=liUk4Pm(Ck5vG8^WJI)X$oWmLid7U%D3*x_cU`z|5KqiX= zHkqL}{dwBhW;b~(7Tvb&=P-XJj+4cG?)XL7d-F6Fe{wkASQpj1s7z zTcgQK8aX~G_E>zd8uag?$679os@_-&I4M?q<3ss|xs~py*Z2W!WNG`l=%sKQ>xDxd zh93heelnQ=WzLi*=ZMbTLdrgQbf)Lj+XTdEE#g4;5zF+qdHG^XFYJWJJWOEoQb9== z7*xf8=USdd27^C4+?x|X$*(lE_;b9U;bTXDN0Tv?D>LfJP~lel@#B2u6OSF&Zq3qb zWwqyWjY@CtlRE+*M2kra#PFPv!T|HLdz+i-=iv23fyIX#rh5kJT>gLy%Ta(S9D9;Eo4M;452OdDy@}Ywzo%|&ACSrTEN{+ypsJmz9BafnFMLIJMC}LPvodN-4zO^#`B7^d1`sgb_rSp+20j%Vu}!i~naT?&Klr{)vMS2nW)$}DJ?A^1RRwH&NQt?5Lu3` zGH~2YE%+*5gF$7#gs>mV88`N{tXdA_8lbS_4k{*Le-3+s`5E3_u!04qhAv9a)xMr*i05)m;+` ziU$C3xyPIlB|Sp%XhXYe`ySV*u)FtsEX`>Z#L~$#4vI%-1rnKj%jFogNfm`@o0ug$ zZtFWt-g*U5&K>ENKv${O4LTvv7sy?rq|8mRX<_5c5+bF@<)@~~-PpCci+dMVfwnjx8}2rZfW0j+uKBzaWnd$018C01H5N@W(T=!U@Rlc zY#hA1>Yp}!q?GPXl0w|>2ZrimZOup-KIP@PAjC)#Kq8Zr9aaKirrVKUa;LHy_3Wpg!1TL%I&-0pZzEmW>v!0zG5R)oz{| za^I@$<+t{=o*{i4m-eg14e0RLPip(~>0&F#z1@Q8C_Mp@fq1NN`E8;fChUFZ4$+&~ z0QzM%i?q0X?qxcSti@3OJIsqbk+{uQThvWHuUPs&ikSDG>5+@}>UWmeDQK{Y3K;j| zhd0OQ+^(PWAxE4hC+}41o7ogJZHd?c&NVq#D~!>k=>cuUQu^x|^#ZGyU62tjK>SA2fn7=!I=X;odKjbZED=e#(*0`Z(VPq0WE?K|tP z+81@W*HI4`LS13{*2e4KzPx&zgFgY_!xd}o_Pi3U@jc?}9pWo=a?>0l2@NQ6?tGaX zxx`fTo93#OlcC{HDZ@`P4by9V%W-{S$&AlT<#-zh2{fptQbG4-qPUsLI?Quq$XaVL z%T?F1@~!OmRJu94@eWd~UF_p^d?t&x*u$G`nxyP}z)>z(Z5Zbc?3m5UgrBvIA8URm zHy-jTsLOe_;ck5^Z9{h|7jdUWUGBeQ%len2(IeoY#@r;w`+e$3C75dEytbi{ci~K; z)q?^-0ae>`LnE!{Y=!?(PSJ?rw~kk5Q-0xSB2SBrr$s6Gl&!QB5yKA=L;7PfD3yA5eOIVGv)!|5w}7;@Vu^{Qi*qFxrC zt=?HMHM5E7as}v(rl=BS6E2^$7C#4}j$TV1lh)z1#X?6jg2`^IvVDBtaGF$+W7>NB z3DRv{v!OwUgD1F(J%QXAn;vq~&hZ>5cbBczZJ$G;O5r)D4fBg3u5en&4Ms^TtBM=R zI89PAVYcMj%VZ6Z7{3OT&;2-OMv&Dn4oX4pwm;={)3PW&o*l1i730LgpY7 zR^W*XT%e=av>s_gG-(sx&ZKw!iftS|>yjwYS}=Qqm&u&76Yt)Lzg`{(#FWKx?{a{5 zNLa7P-G>;|;_UpqH70IP&&MG4$f2*5Ceru^p!7}pt5B1D%RU&3p_|?J=KcrCn2v4m?x}*Y}>|?9#c~)r>Fjn_5o10y}&k!ud3 zHyp`!v?(1*a|98~YJ$m3#AUr3tV90c=Tkrc74kEnpf}V{o$dS1t%C-7J_6a&TZU{cj@N9V^SJ5 zg`nRQ@F5STk^*3K&YhB4eL_FWym3_=Uk{V+#=mkIV4oxLTBcxAe~H`tZFz65qh{#j z!wx0ny@k>$+Re)ira2oRl!?Z7={wH*=hg0UKB2^20_|aHJZ4ATP4>&z3cz`zv8=Af zW@GNF$;r@`pXE-vhiAU4Y-P-$1yL2d4sws*rzuT*xmdkzAXO12Q3I+9>RwPvF<+}O zjx zp427s@u1@6Pv4j7ExvkgBh(B^PGC$hscTr-trx7Wp3nSJsrY0XZ2N1)J@h3=DddgIn%FN+eD77DR%d|22 z2ftx!%q7|8TeQKAmeHheQ)i-+Vl5Yp!_mULhk`X@wIDrqr*Yo*S=m#VM!(CVaxFE< za)q*vfu4xSQp9+37v+;^$psZXJ1Xo_wWp53bUp3S)8?wYX8LihH43BrXUyVI% z-hC-#;JYbum^8YMSHf8!^sMC`lU#zI-;!Ft)D@-5Qr+7gBhf}pBrM?>C5y30(Bubs z&E@k%;bP#=EJ%MHwyVZdDT6TFo7O#?<#>+CYz{QKPt>ATMzF`ll`Lb-WwPLFiDEjN zICL*)0F{DaAQrAG>)>2#mBR{o+FFb6Hcf1!^55`TuWFCZteiW_D<(CQ2+bv)Eqvb3 zNX0DZWg~GgwK$aA)IiQlWu05C1RIHOAkV91YEE!wEsieTSmpzF5AMv8*d=Ebs&KR4estva3_R7aCZ$J9D)URw-DTe z6Wpb7cbCTbR`%HYeq-Ew#yIyq_gDXeUj3}6YR*|zvj7G?2^jQyEIL`kCaw0BnqbSfFJKfc(8P5!%r(0fayk|1rQ!1fGxj>)pVN7n^b zmxW``d86q^oI(SN%2uA5j>bA7a0SL~V$P+IOS_h5f^CvULIQoIwE{gz=wgEOtn{K{ zFw^x_XW^h$n!6az=!apN;%C-yj|;D7oBT|(=jjd>nWL$>!bz*C@sR%eh>!C zy?823(2(2NcWd3&ww~F|42ZE5=}`*t1HVjbSWmAyF|Vy^zo{_%?;b)~KTGF_IuKLU z9W6_fO=k;Y_8F7NDARde`b!3`nOixzqj)Y#el;Ctl_LexDx{D#lX*!-kQxefoj+pE zMi^r+zS%JUX*&$J*8X^q+;AH0x;@Iz2h3j-g!?#lG(1Ffk$6X6_zh?K15eTmvs2v)P%dC z>Z4s#qn(`~(tm4)_OT9C4ZRu-LW`Z~19H@HROe&t(a!*KB`1y>jITb1mI|d|G6qhj^X6L+Lq}W2P|l zzETHvdonFQ?@j{S9tbxoeEDNn=;;qn1FqI%TI)x#bfxe8;E~* z_E2iu^Y?yJVp0-s^m-Gc`x^W0Hr`Ohc9t&@{;+LQVtX1yJp-bK@#$FfdV7G-x6yVY z!a;^wvrC;8sY{(4DTbL1QuTW`moZ=Co5IYkof%!no?TAg*&Bz>ZnvA_r0~c5X;0Yv z-hFKE?N}UhPj_fs*wI3LCT&}L2MMr%dehCZABAQ2>J`gwq-&V5p?6Te@zz$N)y!OV zswUg;NWM48?9~n>9ywnPb9vP4iVw4o&%g;#+r&;m14WYxy`pe8Reo^ozdt*p&CL;8 zOD51R@+_;Gn5dee8|>9{99D;_8&vc#w(t4|eZJpqH+(lVdEaxZI33O~RStfyPsRB# zB8!<(d49J?p6NbcepcDZ&rzF?A2=!7IbGKa-${vaU&Gy3`2brY;<&rmt+In@SqB-! zE+;qcR2-{8pZ}Hs8a0nED-;V*idEX$Ok z&Q)nwsp--yW6)ABJ{rZ8m)QK*qE&ZK&z?*MwUyB#a>+aryFGT)i&mi38N-Q=L-FIOcPFEgc=7;pil;QrCxZq=X{ z9$Pa&XJxIU_SLr!#$iHY!9Lvl!O=Hm-0cErD;jqd8irMx)3&AOUxxIKpgjaDy3KR8 zGb?;2zGQzBg;mROi3!Y6Ss#Lq^5sjrqBJ+~>3yUFA{{ML53LqFaAy8{qbqcT-PUia zSYZ6*pZ?M<4Mnj`fP>WlqqPO{DL;~`FMydjL2F|vEaqcAr(;}TUO(bL1G$x>1H~Ol z8|~lRn;G?5lz@rc!ZsBiKB4ulVqfEwQt$g+e%tz#K8-v1dB+G+RSNLP@zNrY6iTd8 zBiRHzo0LrBp&WhhBddQ|14DVf!1S)E4y3NpZ*~1En(w8~_Vau*+E4m0CNFRvrnosG zzFNGRFTO5VT&1a*bdNy1sX0$hnB;@7_y-1I)Ucojy08>^icU?H_~&twHW$)NuIu>c#W<#k)ITYlD15x_jLPolZddxBgZL z1O$Tj|N0L;?96fOsH-K#z{nqd?fjf~+6~#rAU0F4XHb9pVQP{*Z}rKTaBMtN(c`Z8 z!?Asd;cYQQB_j@e<8=>>SBymoQSc6mqF14Ot#oX~eik%yd68VLR(=IpU`v!gln9_< zn%Fc=?P}*@cBxQ$rgv*jRgFm}El^ZCB2mj??K!U3ZqGVzbM7PL$={(DGh_RalymF5<0ZTTXCFmU$!!SApz@AMtvH`;8Da{L z=@`Bp8u(vck+KPWCNI4>j8nT!mU^2^wd!C$b3hwTkaNsfYR(MNaV6!Sl>8Ztz=`Vg zA<&UxRx}}+=hZpf@UU1jM-FL4y{(VkiLnP==j9g?skj*n6UyA)=?u<#2h4!4adV`Y zGTapQfikAwT?qW`$ilI^N8I|!^53}3wv!lRo^eu~@uGHC^`d?+f3@68pAO+AT`zyd z+Q*h23?Z{j<4^yF-aSVp>sVpPLgE+stmKNaAN9`F_{16M9nrcZM)2diTGIVIDDehO z2$q)=3hbnsN8i&MSt;^2XEX(uD8}^*$ucB35n(VG5%K^2!%r^aD`_%^ivRnX02dPO zr8xFK)l)ex#3eVEVvDk=E2`aEF#(U#aN0xq$yM;s1kNQP_t( zyT8(nDM{LnZB1~gI=TtN=g(qrOVs`(1Ks!2U{}F{*vX{%bsuvx6ll{UdP=QRz_8EL zkBW*s16q+eg8mKU&-)*|Qzo5f5np0C*p3Z7EQU`qbn*Z-0}&l$_hdO0T2S-!_^%$y zth%eg3#$qL1lx(ltyC))8!l{u#yF4qav9A+5Dlu7N}n)TGP9>gyOKx5?XWDN#^!?a zSbui>)e`AEXi&~zNtBJCvi{bwStK23DhawwGGVQOT0ofA|-3H`Sj|h7k5T; zz2b$EC^*XamBVnmeS}61E+PYBOAV4PMf|ImJVZ~+zI+epseL;p$nS_IvNTrrx0DY5 z`B}kF9Umx9N-J3&8$_Lmi)QcPK69OyZgSt3nrwUroz^ z*q?4VUW)vgER><*d|D@wbN4m+F1p5aEb#*&O6X z7!duhvvd23?d!*TbqrHy-b$#;3RzTlDX=3N1vWpN&8Ox^Ay|JnNinvno{8HDQ zh^w+Rjx2o&wR7jWL3k_mvf6}D{K1EOU&}>qhEC<^`)QsI-iwq#5!|-@lF|tLqo=zU z&MM-E(s`EQB{~Q{Q8&tFgCQ+0lOgOrBNiJ9hT}}Vf-9o2os-(vr7JgIX-4#omqOUS zzK-CYD3r@>ZNdp$S_k|8B&AzB%6WAsiAUolTftBH_so(B=rx(6s$|ssUwq(J=unL4 zj15?-LE+mtZDKBCe@!@)sbne3qz1`M;;JJ2CwJ@Lx6p6!+utjO>wh}*hScl~T5rws z_DR{OM14)BiN>3x{LC4eGrJ3O++h@gv@@koO-_*A^(>$4HNvSfx)Ke%Y|bp9o7myl zo1EaIJSt#e)0|s88A+8h_pmmd=Rx&LL|_Y~26eTLRQJ4Xrn*16TVH_qLnc~3Sy0L* zG7yJz%mM#?yD{=-K=YC`!Lz;ZuOe; zx#5fo6tMf)9e{+o)wogJifG-V=w4wjL3efIoyf6&`fGN%_SopfyBd%AzPAb~YP?a= zF5V`NDC2Hk?TLx{Wr#-Ly_NZE9{rmOC7oPQ8N`y*5BbovRyI{vp}&o1L1Csf)o7M* z#qHIEbJy=&a5$y}lInlyz}e>yzym%e7xJ6wkjKHF@B5Z@e=%67Svl3&2$2pb+c!*vnsH;6`2@|{1ZUp+{! za~Y|mI(@2x7Jg4)cI$P7snDx5N8R-dYq=#@_~K z1RwG~zC!TKroR41FymjiM0j27cjVB15(K03Q)FEL2g}%I*~d0rGwE5$c>Y_7$Sib? zHW|mH1*P{CQ?+nQsX~=l*SE_EoWJzLUj8Yz=~<6I-+xJ?v(MqhX|4HLks2b-lrc8B z_#Ma!A*8kqigB7E`vJaS&of8rlMe3`s$>Vo9O^=5bgJkP(&-@9CAZcqJ+1)4f_c_2 zc|OM53;s^F+WO}5OG;x_7KnL0NXAVnni9*Gz4S8NrYKGLA`%9}NFZQ!tr$+E8ca5hpyiwYqrL8qKH0Hf{Z7G%OimChk zRlTEHW6z5N3S>o)>w)nq3Mnax8ly-(UF6&tn8KzSuJfB4^=in?L0>cs@5z(tUm?!X z-)>~FeynyD$>5VsbpQ8T8rjUJ91=$^p--+`)ZllhbLcoin;#3WKbj#@aO|%ChSJ;o z&w^cEz*oZL+H(sg>DSM$QEvHtb2w$$W&{M)KJo zgYl-|Vq)CA;Y6I|R|qu5GNUpJn#eRMp)5VYI?W~Q!h>9vjhbTQig^4&eN zjYP3ZK~`x6nw7j==`82yet~hE?B%5~jJMo;qNTRRV&k6EbE>2CyZ++k3n^OSv{~YM zLF81hDb_(zXh&WD77Y!eD9%^0#-7VDdhVa#zzwYiMSr^}z3U~=!v`)sS4@)zPJw#d zY6ut~-lxCg+nfxTLZB$b3?&N99`*G<=L#9OX!*T-1fAm;FZxln zq;Lv3yE%)u?+vXn{xwM$giJQKgw(y#xz$1KsTjq2l;7!+o8y=hYTAdI$%*2Ehc;Qx z*Oo=t%k;7BNHPXvC;C`)Z!#ZM25eAKv+Mt2&}86uom2RP{6No=A>S5r_4W2^mEcay zJee5U+}JgNR4_YFNL*z*pSbnC0@iu3ll>avr#XarlQ_=>b_71u&at?=_WzxyPvr z5hUxaAxhez^Y_^g$I&(*S70%&ocOl%F-mJSYkp}J ze)pZ$by~$BTl|BCdcD;gcemIkr~$_hXRxD7s;u9^bW~EEF0uN!XEo|8IB>~6bnZHC z^m@!u07Z8#OGFFQNkH~V8iM%nJ>cvHw$kQS6Jmy~m<1ne_RnFa-(ZCBTUN`4Ja5?J zhjJXQG$Afn?{sE?g$c!^qGubAG`Yc#^=-yH{kH(1uk#-|ps_(TGQ$ogFk+JO;TOSd zj#$^=Rm{bi+5T-KYJ`T_IP4L1@mh4YTq|u~jiJC8JzwNzdYE#&LA!KLHB|E2M)KCS z-qJRs*$a$YHcrEHD-o$hu7bVMc(BWPitOE4?cadvL~+>@!-YT z5l2%$?@wD@KdfBWTAyL1-l17TnZ8f&Jsx=Z#UHERd1#eSVCl_v3yka`PuAsK|5Bf?syfiwCt+x|?soWiot&P4%uDq4fgRw{M%B9;ODl7S5vF=tyrJ>gb^#jdW|~{9 z*S7>tJJvjn>Qmc7faOL>4T*GnCLg6bSq~+hyuPklZ6^TbLtx4&5f}3u#F;$zeCp=6 zIrvz}71lSiD;hM&ial=D+e)g8mA9ZSq%u|+oYacxvp$f zRpUGqau)X^UxkwjtyTEk|FApz=J^@a5E~doMn)VA=|b)X~}?h8L^q4d0! z(R)h%(Cbyiw}_!C|BTfCB_7g&pl(rcFy|u8QtwzcWt1*Y0OuHsx2`gv!I(7*J>P{3 zH}KtU$P6m|D*gUpRDXXO1wUcZRLAcv9`62IReEQ~YqupMwON~hnR^ea$%a@1z5N#ZB|6w_?qM zNqs`vd}Jawi?>1`otp6{VIP+;yxb#M1V`rl(B#44dA4z_1Sas~xFJURuXxV(XH=Nz z6&-#dbQHP9wAq>$6tB&uZmR9n&?S^Mw&_kAOXiLIAiB8kyknYBe2_)VdDL^No4dF% z_0vLzsP@IPAKoG2)4mC1q2x5v-TdPfoQM0B?sG6mPgt4hcCPhK64c3Q#xx%Z4m1wY z0d-ax7a4>7?b5!+n{`rw-|=YGpo9de@&7}a@h|?@#DM=BsxMoB7@FaZh&pG68oX_2 z_F@|zCiRq*Y16=DSq|s%`xq;h zAR6VZL|(Rh{-o4&oZ>{|`HBP7HCdFDIC;m&$MD?y0{4^|m}_34th3Y#@?_3u#21&O zwh@k-SO$3>~O5NB*C0Mj-MN2Rp|ZixbCBg=oj!`e_BT7v|f_wDroz3 z5kMNFF-fPgzvGFG*ywkI4*xTDv<<41j-tp_KZ8H_82~j$4-X4jE-#d95Z5$kTE!*} zeU`$yh=gMlk~lf0HN`IgX~eipzY_Sd+{DQg&&%21%@{Ixi^%geM_J+h9a|Nt{Rb$I zVfg)WkPn$pTV-c9`a6;PqRBQb%T1Il*2LCcFyn(|qO1=O`=zRg(>M)$e+CS$?tYY{ zLU#Br`u;5M;+57?yxc52)vs5{_NvESx3ln`I_2=VrIsBCIGsEGO z4s1sgne!Wu($Ab|4MOL&>RLe);pUfIr*=W4dV{hNidrnd4ZX(Y!^I8w2fFkB5~>>j zO$qFB(RU;aVJGnsK_lbwCbsslC4{Y;OV067^UZ(z8MjlJpFnyDRTiNBh$WK@!B}kj zq+^+F#NitCb=xqmN0}sWZ3pJ)t9%mcXjV^^aTg0D-7I8v*|`=EoOjk4bC{qEedP8{ zSwYif2I~H8YEQ0wV6(Wki%qa$0?o<*qhSrtlHm;&$yim!aMVhJc`8z7@=9T5iN_;h15^GYXwn^%ahLPiJRx56E^;k`UC zGdEobMU4&dOKr(ZNa3J95R0ps1_b4R5Hqf6@U2iU}^AOkA zi&Z|<`9>Rn$Q>t~mfDWI2ND@kOa_k)hadBY_F}tG@T{SWb~ld??(jU^D)tuG6`Ivl z97XCZ6J~mk0c4A^wMp13Sj(-OB*F~;9OA)=Kss=~B|st&{SSFfx6z=dia%$RGW^QI z?kAe96$au46&W6$`dRi_%{-)4j5@sP#|k~K&vxIw!wa+~K5Iag0!zMc|2*(-1suym zB@fX^{KY35Kx#1qc&`8Y1_8v!#YSgd5_#u%u|>ojfnsk8c`=6$-?!*JjNtPzn*|fF z!&J>NS*U=12@$g%eRlkOw%(!e=XOaxWj+m%QfQtZE}~}=$)kp%n$XrQ_toA7x5%HF7@DKo?fd>E)>ZGVEWDs;z>xPJH1qMnEVv>z6 zJ^&G0tzN}mHkQ8;KNB3t_!f6@RT8U0r7F$G=50Nqk`}Hv0L96XQ2bB(!fxjBMAE-W zcLF3t(c-g4On_?<<>vA;e*XOwGs~a-1i8K4*nm)0Ya4C}IV?%QTju@oiXIzb(hrln zvfImv!&_d8SD-$OLh=7~E&uQ1dpD+^8#plbNBi?rYT`Rqn*O*xr6(&nN_H>7YhgGG zwc6Kyfjcxp z{a7l_lYgW6U-Lm!93;^;Kki;xrJQaZPBUN z{O!;tER#RMbe|eK{=#=sY}jCB`*;g}s@Vn~UqFVt#IC+a(< z)QOOZLIvxV27?u^6Fd@LlhEOE>&J67jcGz!kp`9Gkm8%JpxV|rt0-Qb@*!hHq3jy+ zfY&YCk=W+9cjZn~*)7=4wTr6Q%H`)uB_0&=Se&7O$O{ss%fAvB7ljk8^()h635O4oDs3JGL+i+Gp9nsyj zpAKd-X;~g@9)4OKH)eOerGcKWec|+6{3?tY5w_^98$(+ELdI=vzn*>B=Cg6k@`w6L zh0H{h$!)tUl2no=y6?@Z_byQkToaeCj`m7QD<;mqA`1BelJ{}lXh23Rvy!3$MjMy! zGK8z$TV7eX>Kh172=Swk50%F*{;@WwGYZ~k^(>rEGE2toe9j1q^0+=0HxOsgny0!+ z>bjEgTgO7Zch;Y>Ejl#?ToPLzp1IFVRWX5nJ1OdN3coHnEbG$Lp5{w9o4cL5{&esy zd&ViI-^ALq3J5KENcsKz<79yiuzD1Hy45y0TC2}P2(&0&9^WmfVI_4`RR5ev04Re3 z9-{2e?Y@3V>`xrn42+HUx=M;vELBRofAJMk0+@P1YPCs@+PlhY@~8I0vj7O%7L?5XPs=mYNPyDgOyIsVK3wPq)cT~d(#mq4yxCQDPFsLM51~t z-9JW>O4TEQ`ExA;NtC}6fSKXfX#VyD61|G<&S7>UjDV{CtgQ3HSFPbqp{|2z_U0di zJhj0t{Es(RhbJV3Z2M@D1Ie~6f;mqwzz?@OhP+XDg}&okNJnEg`m!xff>D_{Z0%2q zvw2Eljn!YGirVzxB$9W3fl<%6aN9$fa+A14M8T$C`9h<6QK5ma6iAUPdY9c9H7YVp zoL#d$ccBMtS}+@A0QP(!Zqi^*) z(_>S79w~51&XaMr4~e9tqNkE!5I1Vy+aeC>UACHc)WglOW8Y)DF|hgdzQ1F|2WAas z`C)86WJ40PkuTuZSE00*uYIv+?2P$F6c;NfssZ)??K^Yhd<)tomDBpdTQPP2%r7#> z3^$<-hoOCwL4~xp+=*1nMhg=2d|w0dM6KyGRqJE)spVXn-$PEw&~?yrkE zS`ZwYHJd0!_|+aLEi{38G$@5dnd4S)rh*d;J9>_~K3P!e^Wn9aBpUj7!EV|QitOYX zzrEPm?LOO(J@Sz~;=(=ZFvAAN_`h2F)&a%47i$I;ny0!G=KfF6*#95)fBS-gxbC%q z)0cqp=$i;3V`P~tWfczCuU{e0GW5NVtWfD5QY%(bEnv{Dg{jg;&HJbGp5-11QgBJK zRfeJmYDm`|@Nx7V6A)g0Caz39lhn=Qmz-1$D zcY+r6&i2(QK$9m^YAYvJe4oW6!d~X^*|a~lBmFiRI?50*oY2*+Q+{ebDUhOczhNy9 zr$~^4Kb?~T(UV$Msi7BHJr39NKslZRCEz|o}&)L?-dlS#|rxkb0Y#1 zfR&1gw~5zB3rc{eccQ~)zhX51gL1>iq@~BsAoLpFXE@nXMT$R(9Q1GG5)XeB%HaO% zx(lmF0>&po6OVM~%H|vzY~5v(Y3&++KmuE7Gk#N?(9%o<3blcf9*|EhI>ePFlZ=A%qwMc)D6Q5f^* zKOgnLH&?TUd#fE{@o(e^24dFk8oJ-bg5a~k+}?NW<(}59B<=e7+c~o19J6|O1I_lT+o#{6X2w&=s;HZSvNt*ul7od#$?^HVLi=QUS}T5Z#pbDKZusc3{u(ylAcg!oKq z?>{I68?&6x4@~dYufU&-B~KQrEVsDxUr=Gm5c=Js6OK=rGw)&WpPzQIyM5d|5kU0u zenckWb}Ned(m6M-COvJABLZ0lkqJ5ko<{LGY!=~jeA9l+JTYFPrRfSn_nlb0z1S0z z__T1brKvG7QJ^3vfkv3)U@|z?g^gc(*z@lUu}bMDA{K0ZPA}S0YmV?nDPz(1W2Rth zhTq9fqBV3+Ypk$RyD>tgKz@&HMs5f5WXO8Kbr;3OY_^xGhGxsQOfDqjKF;HEU+%DZ5V7WpZqiMFb&88&y9xdRm22O&iiMA1n<>cXfV z#xIHvc5Cpi6Gb8d8wBm*@!U9Wt_Pk|xtdOaagmjklTi2X@FGMcb{^Nqq`$xpJ0lXg z!PwK?6EPG$EFU0a4ch2;cL%L)6aJykBjkKzViJ^>ThA(GY4a58hF$-?u;e!n9$Z8XUkcc!qXIc40xY{nsMxcy)wp=^8i1R)&sB;KN@h~7UTS%2?ZjG2t47dk83T2 z^qQmR>md+~(PZ1kK{V=MdP+aHxPT=v>AwJL%;>PpVFd(HF^yZqurY?gKK^agUZ8NU zc_Kq{fWzY|0_=eL7cQU34@*H&8WJuGVQgT@LOhKENxt7Tx=x!zf&@Df>X$z`xA9aG zVObAh66>PB!GZP+AN#P5%DY8K4@2W<6SjM(EpV@`<@gx*r-}>`bZX5qc56^+w~z$d z8t#Cd64QNmDV;Y8=Jn5jZHU<*--%9&jb3l9 z&;t-lFU8Nk^MZO`tI|XPhKe>}ufyc_3AVj3g3{BBe%$rLwyxO7r_D@|urp zgBl)9&U^AEgZ#!z;UeIM3GmHMrf-qVMPi^!MBP_ahVLm6WCDqT3$3ZZCI}IWnHqZ) zxhMQ&1LkuZezt=vjH#_NJeqMr&uisdhpqLQ5aSG^vkA@Jrh|&fR21W3bVX1K#}^8H zz>s9BPJzB@ub0$0KJzGt4+oc8g_ZeHpwB~+;Ybl) zq2lSzH2h^X2E-vKARW(=&8k;>2w zjeyNT@h7AMK^CI#=l&FidgXe>RBKv>g{wEW?^FxNziTF%7oKkos3a+l^0ex+z)pNm z;hc|_<#xV|KQzWuORWojzYc?Y!u>1`>P&jE`Ful-RXWVbVKF`ZR;^S;8!sJ~{`DAz zUWq(UdW)CqvIwxFM1@u^ukH;jQo&Z`84jVAoHI6pgk@-(iOc_YLg4Sp{msHJ1IWEU z1e%<)Cr0CC9p>3K4hrQGvz7&#X&WyMLs74(PI}oYlh%p(TOk7<5l}g?DhzOL1XqvG zSZV8Q%ep7p9`ZIE@B%l~QCil|E-=G?LeQDLZcP?~3yCKSb`?Gt*c)QqcWcZtqadHl zi~J7#EwxamTF3-+Ds6?0s(5l}CNOBT4w7;rMyQbHYE&5H9;I+uiaJ|N*DM0b>46Y@ z%0_d;yb5uD`M)19+%pIe-h?#TyhaQf{I{8O)&?uj;#n1R24ddSHzpk5e-jop$8`A~ zfKJ#DCyBZ54zNk>FhoE4P>2;xbIfx+r*#&veg;M4Z26v2xt;<})Nmshvq-sIgc;ufj0n8_o$+6@#&h=7u`}J}E~+uP(=<=6{*w_n;%>d6z0z zKoDwj{LJ8FtelU~{BM7KmH^HP>H2v}tX;@0#$whw>jL8_-Pv+hw&r$6YE;aa8Z>O< z4`W{LiixajClRM|L8o@{7giBp9b4;CQ;l?n$Rt4Y+CG`&^IWi_ig2Gz{gxA1Zhbkr z`VEVORV`hkW=%*6tjKUQsKgrx8M0*9o2(szk#AFOkEFyFDW=N~T%fw-Z7G7kQNo+D zp?&f)hkkC_xLydL>ZAF}x|KP79fU@Zg}Tv;c#DHBvMMY2d-TVu>`42j!v9fludG_$DcySIsvW1#QzS~gW8AB)PIXoZC8o9<|}R9H3ySQ z<~t)wyHLx@4}Fa*<~hAT^V6yA69Hs1WeKBhOh4BEfOT467T9ZHtgWN`D8)<>`aA*z1|d&5mf*|%d{oD4)rgSK}o=g?(O3vOz5(=K;g2Fk`QPg zW8Z)Khc?1>mYycDzU89AWiXbS6aZP@04%L!YqMOo;jx>IvKNd&4BGSqOx5EBmmB5o zkvLtq1_?FLG1pfbEoqOI?isyvqq+v;-{CEJYYH}9MDe{X*DpE zP77&;Zw`MOJF}7i+n-*AA0X=KO5KnYcAKK%5g{_M8vvr$NSc$yX7D=G4Ww|O<9naN z(W(8+QrP&$R@NwW+tRv4&g*ppO6PG5{#D8gEV}rlBv?$?87bt5S)T>#irz1B32McO z;jC!m4Qia$oT4|A6D+HMxO`T2W^}F4Q(G_o6llD^)U7R~tjPW8jBnb<_im5q>TsFk zu(Qz6|9!@DXLx|w25G93iUyOeC^4I9%^Uhe4}a>I_T_ubqxp+BM^6uqjW=`SErqv&Q<~W$73!1SJNIaED!I zPz5z=w%SEpUys*W&wNNwAmz#-wQ1-$5AiwdB}KTs4tQpo*36<19Ye`xMtgI>W%b(3 zN->V6_Wb$t+e+vW-!!3D_WOW%#*2tHc)UxcB9xtxbk!>%`mOeRCU3Pnz!SRsk^A{! z^NmDUxXIVkBAD*7_pAl&?xaogl}5S}8P711j{2FP-%BRFm@$|eYE*|LC6fD1c23jq zS=&uT^YJA=@5>|SNz-X5p9{w9rR*coT>d+=_-0D~_v|>{1@FC19$|aCs@-wH2vCcy3s&)n|)%fO1@ALT*dS{!@mx?D71ikOTuUgZo%qeLIzUYT; zwFYv3;1YbiWLtU~-&uA!U<~Apen_V79j;!gh`pZWkJ1`Q=|fb zo_$4BUt_%_n|cP5Fa9o)nifSaq?pQSt?@Vq$7{bXzS>$*lSt?}ao=hK6?kcu@Q&OJ z208Ts7!bV3t>67Kp%c_U0c5Jx_6a|df=_Y5$)iF*KsTG;vDFMQmS{t?f3hUAM+z8C* zOWV+`iTXxDzDqBJbb#%9CtOq5vsWbUANCI_QFYA5VT;+lu00uE{D+?0DX@0;iiPRb z{VM}@qpxgGvOM`c6_Jfxt-EToUW5YqhnWMjJLWjoo$0AjYN_b4mRs~%HmGF>JQ8E2 z`^ECMtbAwzrfzI7u#id9ZN>LY;v>U*ryCGCeT{z-GvOE*E1cA(Ul<$T`~7a{cwT0E z93|b|R|;CA!!Q=$#C(-zi)^K=<~DH^(pT5Sl8tYBn=VEc)D0a;(*?a#1V5bQq+2{7 z%AXg*4whV(^4q5Y&O^L^=Cs-WK_FPDsBKs=^C|g!vr*lb&-l>qOqsvGxr%?|h*5W{ zVXf?rW>g-w}|TSOm}9?%1;_*vbfl;kdl**`-r%s zl$SerPoeAgZ@zO{AoSaJZP@z&Z4F;~J$46Oj-mN|9=~~#N+Wex`_Q9Ey|W4O+#0+dg2Wh{*6Od}VK-`-PItbSP^(@$h8Q z4EoKzZbfkJ2HVxSu2QRgI_HmWo-uDF|JCt=;qajK_=uxvTi(3~S9_^OWj?U*p|ria zM7=zx^+y%CO{aHPSM&mZLK@gdt)QZ&ce?AJ3aj{GYo(qH(`d+*8HjX&`7edl4g34uH} zyLgujV=ww*xH>j~6v~O?ozT7NHC8e2K0Eph7NSovOcR77? zu}!kI58Hw}M0g0T5)ujL2Q`I5f;Y`h>CyCi0-^~^L7DE=I} z?s5JT5FVg5hebG-#X3blSRCst$(#?Sa!eISnwz}KXaepr-Nx}!0rIcB6EzA{H8FXb zT*^s2;B4o!P_dCB{`>_dmytloB7izZkCWj_Y1AGu>JI8W#P?joNF z7!F;JmQ@+C8eolDZX|+EF*;ffzO$d!y?+1h2z&XINFVi6oH)0vv4PQm6lUBP_2ELp zx^`QCuGVZOUO7XLMk!qqose-`gOhDvGU(jI+NFWz3SlcGcLfELyg&x6cO!UqCiqQW zhJdHW8VMvwy+SRPx5m=gA4Q)>gs2a7L(7P|YAWtxXLR!75oK%!w#+pi)WT_#R}(mk zZic#p-g?r44`ELI4FO-ex3wF5Wp{Z%`+oWx?vW#u>>63=xX+wI$R)n^h4Dj3r|(G1 z?M!T)KCeJwKX1pDS%%+TxZq2F!EDQ&sNO#pO;)#PBy2YRE{1pK@G1PxO19eQ3~ zYD!S1Y{|1eNVG8Su4T77RX#HD@61uo4&P+f@>h-&MC|6_f?IPO>R(IY-gJu2&vfqX zIjM4s8pg3S^p4ckHO(_$zhOyP{j7cWUK}bYA74<>eP;k4Yjhiypz+9Yc42vvu~bVS zZ}VlyXFprl*SfP$WrhaJXAA1qD8Oaw<()pg@|!|@{S1%Rk(rb2Zu8n&7p3*Ut=ZT4 zN>;!{yV0T+pL5Es`#cW=^3wGfs;_Gl>oHWc)>eyf@$iXFti7$(eTtK9nJuAYX6(eY zrJZm%v$U5_bMHFLCES&*v);`6O#ctH@Ct{?I-h*i61wroMs>}4bMOO0x_VAa- z@|5R%4s*(*ky_(z&*`IUuofpBNssYc}8vXx8?fYaR(lfo{b8i8GDvxT3iC(e;I2KK+&HPikoSb$&6d|Ix85GBn2v+L&WhmjUj^ zr;z@W|9An+pwjz;gSFbL{t{>;w9P_*v)OwO2w21&0LsNX^%Av;^2EO&JHJO?i9 zvClXAO|xaTmX(usi=8CRTA4=63jo;sahK?`EM8g}TjOliZ$8(M%-FIpyv;Rp5QziC z^fnx)>xjKTcLY#0!iJEQ7m=Vt&4O_)`SKeZ(nBDO=UN+{9Rpz;W%L3%O<(lrm|iyd zg8M6xlwvA>tix7Qvaas7*+QjeGSw$lS}||2zHqqo~nLba?IjrT}tay`mfrj;sEPj#ud(BN_0 z&XU>an*j8Mw1D?(x#ZX>r#Gg)=DiG#r04rynUV{Vr{H*znhPg`Q1&DG8C#BSO6z4R z;z{={gzJaLkT^wX(aG)SIhk#-hU+!69V3jystA1Hus(k1js;#`>ctfcdd;qjvN{)-NX7yk;#9=}mL zeb_m?DykHRZyN29pGg<)L2kIwAq&?24iRG}7O}3>aTuli76i@uSc7$Qfj##xdcV z31gHgx25Ix#N9im+=?6grLiH`*69Y90-&%hdW~`P1xOBq?k_FN&#Q4!kkKD9`d~pj zY{s@RqAz(S7p7{Zrp}I~mHe1LUvQ3{+sW1%DvFlgSRqMxSz4x^a^g+Yfu=DtG7q-( zWGAN!XFl;i2$wPuKpRe}VIQ%rYl86K9S>Hv{Td*-9^(uTB4$F>t^jJqu}t6yd3~L; z0bE^j;rSY6aXqFnL67rI8C_z(Z|oM`V-Vbz{X`Hf63oVLDAo|EYcg5V14Yu2v9Ose zpRZYHzAYJc=TsR>1&@vdBO`VoKF<;U6c$U)2%Ry>pKpD{%_rdZI{MKc?~u&{*{={b zS4Z3DjOD01d*UD%<*FQ0RSUlV z-1LqigP7XGnbL|7U3JuG*EBybxn@ory2x673-AT8BVHdaE2OSH(%$)e5}3F<{q?);#<>BH*L9pAdoB2rt6 znPST;Qa9K~JaqPbzu%;?cCupWcbH{Ojb3|$8PIq)4?gEZ;n5NnvhGXwqIP1o<&Ql< zt}pD?z0vFt{ZL7EBf2L9-BLd7@l}~yT`#)R$aqvMmZ%j^o0GsXrrHUw2sz%dmmWs$ zXe~bjPg>f6Z!-d;f}Kc!qw&1_9>iAVWk!204WOO869fLDqE_hD zd%#x{{V~IRCu`{JF{~{%TI5W?k3|9SZnJ);{qxiW9xfEU?M%36{gL~`nCcCvZ-aQd zHS*Uw;+pgVFS~i21?gP>w)*H-Bu@PorPBMMgvs%J9roRh%A+cUiq`iT!L6k3w23|} zDc1?$N6);yRrl>HvQ0@a1B(mi3g&N@yZAqM-54LuUMF53s26-&nEE4khG}{3Pp?Q= ztRwJqobbf-NdO&|$DIit{E=8+Y3I#zdiGs;#k_?g<+PwOC|KSMjniKtk4PxlBa}00 zvfoV~N!uYUMQQV7N2@o$?YeB<(R}+b+>z7`S+UvPhD3_b&(tmLh3U5cCv)o9>ki~X zJI%?qv?r=Zjqo?Nu@QS|dz-Ma-NiB!%R#iln5k0Th1p)MJfRtPqJTFiVhWE}Y#P7a zvRsysHSUalbHiQ8c}3wklb{d zT9^NvK;}6}kSYa;($ti;O_400dQcsyevq>GpZC6_d21}V+{twwBf`928z$X{NMqCZ zUoIi2r5H*4V-A^Tj3H2z4}k_2X^O(2Gi3O) zKgG#+b1lUD^f#f?qR-PJC=e9VqeIrAf7yZJU&Hvr4W~|JdVi*SuZe?R|8aen8~g@R zuEU<2!U6x~)Ovl;jWS$L1wHio#XN_YE=sIP!pGZ`e9(by) zpq@j4HKC-4e+&egRA5xWX zAl;*xQX@Bdj!^@~fYBqq+vj=S?|Z-Feffifzc@I??sM05)`ftUVXmWTSuUU0wme5& zU(>xa)~<<%BE*gf0ScnP8nJOjX8O~n%KVM+%)yV}${i-tJZ5jk-R6m*8iV~}R^l4S z+q&h*x%`kVz@--go`mR`ekSqSd~eBS&ZKkZV0rDg=RjSm!`R?eL73Tud&NPNaBoD# zT!RmPc>?!#HHL3AIsvrqB0a|;5Rm=n=|cf&gNoPMF+CB30Ej{MZa6j+bIWC(E^RS> zr?@@q#5L-j&ZFt;>U(k0m+Nd19X%3+Z-y~{<4sQM;}KxV1E11DkL}LXHaF;#ly-f-l#SA>{RMlecnStG5J=_G^lM z8*7$cWRY0aN$jW6xTGkD4$qCwnJ&Hx2~ol&sGit`&Gh4=?`T{oTd5^1w6!Z48u?iJPKE#L(f zOkq{^KJo)sN=?Iq-j!%!5-I00@e$;0npaIoBog#f+_;CsM4{RP7N4p7HKtgpc71c+ zYzeWw3l4abQ1ztzkla%+v}4y^ZrF_er$>+U&o{s6_JJ!(>z<<6C)=wZ{dfniU4yuf zR<7J>Qz}Q4LKvl9rIe=#PelnGW6q_va}D56Tg5VDJ(fK~iy*&UxWXCSy<9}jV8CB3 z5?_q3&~JnuBw94P#2(y+O*fh*Ss2X5P%%5mCCVa>l=kos*bFZ$yieE8WjI?k=5qUF zIo$c_8w1(3E?0Ux1E}*~mrLU6eUjYFOZUZ!vMlyPDqN=}UfVIdws!{|?^^3((?)Y% zmMq){+9^D$Vma}{_WWN9wq|OuN5Yb^{U&@b<}Hx^U>g;FR!Ls-1N72%vNZQL2(w(3b6XBDg#*d8I#+fqZo0XA zgtcEyTEAEO3_%}tz@6OE7k+dFT3*?H$Z~GvaJz4JkDkx+EU`~Y5bB!HJ1fl1(ShT? zM?88q-bILaPm}ACro~&6yR*uBH%T1&xQQp$p9g+>l;gT_e~6|EpBPq7?8pTf**~+0 zcE{j<70Uj_bOa=%`CvtdE&K1_optb22a*w}(;KgMl7{My1KVyC#v-y9p(}yqdf`K> zj%%x30ID+zojvdSM;^kN0D|O2-Y_A2pXO@!JYdl(=gZ}L8fG;2Od#ZE*8pw<0sRZ> z5+E?l+YReCD?>5w4m%ss>x&x$j{aIt(c4SU!(^#3y-&uguiY_MMw61bmfOS}BBHyz z)<*QmBp96~#W4F1hYDCd8rb(2UV4ARj|4kae0dSnV`c+a41s`X*QSM2M7>m@gG;~= zeky!1oGjKYBXETs$l~$qq-~M}%#iEkbjpp94=|zT?8HdF<|v=U(q342rF87_82&G% z6Ip-tm)K`n+szE<%$j#aHcD(%thElbw<%xcA6&8CsVYet|O*i=jSGWvJco3 z342@f@-w}PxMZQ?cKtsq|Gd#Zzx5-%RnVj@idQ?Q45G|N>Jd-LJGmhu=r4_!7Lb8KMn&~;Z{&DdHKyJ!;_D;5 zGEASQ5#`*!@jcIxah93OELkEkNNgFlI-EhnkdqY=7)jx;7ZY%NoeXN|!==3FGx6GC z=^FDKn@T*-?YRcWqHKSKt13;sdVP0(e2?=X;i~&btyP>+K;s+aFmprt=?<)eot?I!w|Lh;h`e@5T6S$v+k zc{^+8VW~=WJ+}M?8~6PW#CSu_26kf-A6W=AqAVYQj_R6sFOk&EsH)b} zL&L6VZ*uvnfdMH&ztTW0^A+-YLK5YNb+|!g*>CCfK}!n}_=d2WpuA1w4M`_u|JG#> z47;RD?=utPhI5JpA2~ttR({}7r8&ee>#j000@{)o{kWStivA)WXYcd$-bG94lk4I1 zOJ)KQ>>_HY_u3>6=^Z={c&h8DEe!KRgm;fTC;AiWcpuugG)JQ!#4#Afb?k0c6+Cjb zm(4yxlXo-0-x4D3T}H6QCm8Ce1UWC3B}lBIF}(JzF~vFpUp4u;`r$$8=kgH=vof9xb-@X z^OiMmR@nH!#AhFpOv0%}bhl>GMp0bzn%TRWP%|>@^-SdS8=B`j$;LlU_ZlZB9{4CO z?Oq^h1I~au>Oo`JY~d8IaqGHsW|(*e{wfED<3Sm+df!s#R@M;^rQLPt8ClySI&9uu z+EJtXpm};CM-TT&eDszKZOfvaP2Rh70f!k>gKYv_-NOv97uRoWka%y7KL_pgQb~SK zdF6@}G29X}z5iZ5_4ZN_AvG2e(fD7Z%+G)3RLrq1hwkO=0B zT(|r6irhIWlrQ2IrC^YTHI9)sDfjT(=y@y3yOR^{^@k+eib0fWo8}^Wg)I{{3vM5{ z1DEijv#SE}G`*+0pc<5_ioSVW%H^8qQ-1KQ&g(4QKRBKYQ9){I7uEV#O+n8~@!}TL zV?t!@(p;tIid8y){JjXK2Kysa%=gLG&-T&{U$rJ%UVBARDWMLno^(fTZWCI=eGY z^GgH++9mzT>xLnz^)0rO-TUk(*r?nJ^J=j2EybXX=?A}a^;mt{Gw%LSfyBYGExtVc zrADW4ZHzAJ`{a zycvAkyx#7H8ySw#c7Tj`LRmIa<7EN+%?s&fhNqZAS78n-7$3W6^R|5SxO|NB9}J7O z$mf{nmcO*67T5o|q7b}HK2-`Zt*Z1_#IG@->Bob)$D0WJe1Y*VGaNjg3N26hRE>SI z;;CbPnwzuGGW+Y(Y}bl#-k<~vAf?5r_B`zUMEHJQx-528^*-;SrD?0I!`Sb4pv@q> z-ce5bfnaVJ8I~z-fX0C|pg}bNm@C8IeR%HsX|M9ttf7iS#7^KfoEA`|omWIb;!~E* zO(z$N&T`q(2ws6p#_e#0rSoK>M>b&}4O5H?%{e5q(uFId36ylWb=n?RmKQbaIa`rsqN*MELNU*W)HRFJ(hQh&b;i}&ubf=u<*O@f-bhM5@&QYb$Moc4>ip5c zb9OfzGKEa<{q(p>f8k|6OkO4|A2JV+4?mBU9J~7pD4Lbh0xapZ&6j3_WPKe<>bwoz z6}wy3OwOg7lZF3Onzu((wjP@B-%3n!or4xY{Te8$-rIlj$`LZxiJ_>w)Ni<0_-Mxb zowUsVcO0vcv`QN1+II1e+1^NXt^(UKw``Niisv@CJFZI3O9w6a9Fg})wBx1LhOiq6 zN9SgdKg!oa1#KKTixs%b98g;R!b|7mztee2Hk-non}YK!ExSM0d@>?#8V#E8arbh< z5ZaL<$oVAsdmxKzt{<4-D#tgt7q_!9H>#S>rVFwmh9>cn(B0_zUoPENXJHp8m+wyw z*>rO>Fo~qjwOze4x_cLysnr1YXxA1d%66*z7nc*Q>O%PXNsNxA-hmuau0Q|%{EH-3 z0_op&scq(Fn8+V0EPK8OjZ9`Jz`>7$gaFX9;LY2R0NH$oqOzH!@#jEO!B})QeC22i z!4I4#x=)h?jFrOuf`ERse7c=buj6i(wF-=rGDKd2fT9MtAy{T(vO3atc6S%%G-1W) z>igxfwvpG0&rM%ofK+lr`YZOdHD;5b9p+@mouS%iPBgdmb!l<|762)6jET~j_fnbr zVFoGoVUt1`(K~m1SUYyKq*&-6YLlJq$}6EtQV|UlK_&j*Y%6fc@)x_odVE%gPND9U z586UBRB7Chc|5rJgV@Ab7ai>~0!8pGhT$VhkR^K7p|ob_R7Z`;f?8#k?D_N~v&dos z>o+3gO?|W*DlL+^T(W^R)>i(5cF$-bRk~czjfF3AjgkbVAx zGUoQied}$SxT!Q?=dWQ~|CXK=_B!VhP+E-0l^O`P4!#2we$ zxOQyJRJQszr0VXu+|kUwdP{T!lqhaqO07%Gh_3;RhC;t3$>i zXY%vyb;ADdeQIILOuQTJZL-MwXpNW5fO3Pde(!&)da~D%q0ha;q021u@SP%dT|-CQ zBe+r5F~F5+bZl__3JF7UHwnE<#x3xlSX{sBbl{a2ko*S?IdIFT!j#JcC)C`ort z70m!jHl8~Khp#dU%lK|FSy@qZQ>dJ+cR42Wy@m$+1*)T+;F}ajVWbg>AKka^ll4$1 zcC(8*)g31G@k+T$@K!c9wmH{V112FScDUO{j92wQ6v1nSl4U2UDV}MZe*1H(fncT{ z%K^s{owI`Re>8s0=Cf~o8PDJtCGTUAsS0ylQx!R{qL03>AO8#CK9PLyn_*M`NPbOP zNf}VBh3`Ik{czoQwHaWO&;L+UHI8~ydN?U8nS5Q4lu7e$Ha}T19cG!LAgO=ZJs5{s zDG}QBed4o6(!y`42-i$tlhEM&eUsX#Q{r79_-h$~NxL$ZJ;VKkI|JEGfSwb7SVVSS zGa4h~oIG!}&;NQI$~eAtXRSP8sjmzRCMkaAMl!UP038XmvU&eadRp0u>IZm`y*)v#3!XTP2DVM$E@|Q zvqp}KxHcRikV!j~-PzcbVqawEy%Eq&7iw6b3o&lb(0t+Rs-D0yiL~MKU&TTF;M-A{ zrxZpOq;#u+Q41$8y{dql>8uWz+>gSZK8@%MSAukzQZ#AgyWAb zU6}=JJXA*1ebMWQ{r}yjA9A=#KYSgdE`D(H)$u)wUY_&dc~c-I@XzLjdV-%ke>H-F zBQ;e90jhAfkxnM7e2~?MMhKd|OxVzG|DriPZ<7Cpd2j(jUCWk!F9$AycLgfmW*ZXU zW#HFd9-lz?KEUH%3}8An`eww6ds<6&M6V(9nE;R@J9V^do4G06xula#PeUe?XQN?zteyPv za3kt_jT6(sf}T#INaZ7$SztNt0G4B^j&nv%#;0;I zD*%~&mrs$GS}Mo;PP5OZ-;1}TH2+)t`X*pB<$Ejf&_GDpsf*`~E~*}NOX}4r;2>e- zd+VvDLFMV*Oh;Xc8R;lUV3l0o)O+yHONxU?w)0&Q2pH4z$;0e zjjvSQQplnDg~0@_w={rc#pcw#1=t;4!xyh=`)OPkVX%$+Ms6W5h23fK9KLA*KAW`} z2X7t626{4C%_kbKMX`g$xe}+Mi?u!^XHm*2VyBDswaP3YH!C7Z?(=$)NG8JjMQ1gj zXB+%~qXap(Bz-Lm?O6bZ(=;c`V+*i(NjNi)g17E-amae7Me{#-%%`=tV>InH|1+NlcQQvfP7&^K`>A_x3Wm)XvsFP_?ptkl-Z}$X$9OL^b#orSHj3`w{ zGBK+bU%Wctwhf1doKH_9P?+xe3!IS*d~14%FG^}~H9Q}D8#ljomuyQ$B`sB7{9J;Xb0d9z*cdTC?6``d0ssq7 zgvGN|N^Z$5nV+fElB@FE<~*cTU#CqW)1Fy-r)dq_RZK20 zpf6^s5~6haT2}u=rLxh5%Hy{it7V6IhkaBf5AEMXAw`L$0@u{ zC=aMMm&{E8vroih0}7X+C7ag2T;66HLf3Np{ewoB5v`XTjiMb)p(@70M&Y78Y@*Kn zV_0Sy-iXgwEM=EV5~rls5V07a;2(6fr2-fa5G9#x3a8fvEo#1|K#YDQ_vntsYO<;n zjvaQn9W%*ItZ5P<3`uS3Pm0B`iH8IAYut%Nag481hSDMH*6A^#MjB!U%GZ>yaZ1>+ zxdyD6^DE@t3Z5UCyS@71iT%hj^PK~LOAW7=S&JTa=pPEMSU-8mNY;U`@n0SWUG@=w zyL*vY{AV&-LZ1`|GCC+g&Jro)d1%P~2SazP%gsfCCJ{Jvs3+%nF7<9$-LOO_HS9;6 zzy|T=a#0ZFYLT0HtI1WsRg>Z@34W#tmbGfOjE&ZUNH8(Ae2&!omn`G&jBll@PL)5= z3=>WqPCuYq2pkm52|rneu!i@Lc=M1GVxJvfk3}SfE&HL`7Ymu_VM$6CPl1ZPa%lUB z9h(}s-s5IVv}`|1&hS{62s~FT4)8n7Xvn5ZgDb|1GdV|6cAjglS;wqAX9nJB(iaWh z&2f}$T)hi^ClSoOOr(k5Nm>Y>yQBksA zShjq`If!cK z$ZDhV=ktEgB)xfPX+W=qMhX~7ZzkmW$LGF`D-XRm;nOcShRK#8pHIhXDs)91u8Z0T zy4F5pWT@pDJ$C8UVig1CFUrTfY?Y(#&Dx)7{X+I)h3qStnO&VPveFS^Xr zOdQz!piN*)L<4T8gB!On{SqyiHj2%5xmV9~OMvjzkU!F}>iycYStV{35%-@(wBP8X z0v1gqL3Zpj4q8!|)ePtHR;SecIk;@TS9=Ix5{ck_&T}^xqTD>$O5?g*R{q6t(+~-1 ztRut(#$-xoApsrx2Y+>2>hT!%nQk>-?kMYZWkl2I#vR#M%aD2`w+5kw-8qyUUgK*W zOT<%i6wdmvI`I7q+AM(>1t(vbLhG4FjdlIwZ$E&`jkAoPqp`0t++9QM9k<*#|OfPs|+j(3C(4{{>_?+JQ4!NWVNvrdH)XwH#={dyxR>1iaM=y)^ zQymO-9Gc|T*3QU*<;S*$Al zV}o&y<6k?O6wTO`IbN`d1l3D4dBm=X4F%05aJl4Q8zHQhz)an~5gYq=3fnYks!=tY z{u3km@Lk2Yf2i)~?pd~f!5g4E%RRyHvG1%&!GYEq|2oU6C+f2=viJ3hcksM=q(sS&^jWS{-+8u4#J!DuxKAfreDIONO;iaBc^n@P`(Oz8w8;nJcTF-~*uP3;0%6GG96?dI)`n>p3iKlTfkpJ65|SjmcLD+gvmsiq$y>lCncLT zB07t>$$7N@dfDh3)52~vKS@u=#^3Ma0Ov6uf3qE|cW$DnWfrqHeV1>gc7v`?-W{D` z<$lXH+eAW;>Pgy8uH*ydd5qPWj%*-plvMjk3isvh(N_`k6D|6b+DBdl{Dd$lm zQyIRT;V5x7HOD9K<-}iHyZ+%G=LTz%lEe6w8PM@BQADpCc`et7DqW2kZ75*1)G4em zsJ~h7%Fe}ezJp16Qy&=i20NJ&zUGd;&s$k^CqpMs@Y>#Z-EVIgu95K8YIr2@i-faX z_caC*ob^EAuD90q{ORJ^?)y?5b~zgIpPxVe;2a60e=d(^FhsT=%2IL2nrWT@p|+w| zJkc}xNtz1jvbYBs9qyk6Ex^~QkbpYwhci6}bb^;?bhe#9Ctw#J8GkWfG~q=8d8Z-W zEb*o_?oOzSZy1n`nVkVxwO4>HA;3KYP~?-=%Lly7(8}Ol9QMnDuZV(9-m8tBql?7J ziK2S?jg?a$X8{m+uRDp$chd&)>uWgZm%CGOOn*o5@b#yfYTw0IK6qb^0d$qUb@`;q zgu-(kRXt0qRT4CiC^yO|?r|+sG3ubJ%%x(a7N5YJUo$J#&0Y?a7gw^7o?o(RTI*6U zBZ~Ntg4OD&n}XU?H4+UnuNgq189jNTdU7~tem|$IK15RJBc6%fXH)S9*o2Q!BF~5r z{aV@ZPumZIMO*a;SKrhIO5naV5*-H53T3|)MvSjg?Xp~V)I9Lpo|matDT#+31j0RW zFN}iwBKXFscUd~peW{i5PC>x?Rsa`i!=#0Zt!w@LJDHDyMcJ_O_I~ZO{A3jfgp=UF zhc?=?$U$kl9U{fuRzLE@XnlJGa=k-PGr5J#O~1H%t}l+d-u5CNfBx+khG4AUpU4_rXO(?U8z+c=iHDwhUx@h4wHed9`V!}$7@NRKJx1K}VaQ$FUnh7*HBjezU6cHd`Yle` zU$X7bO)rS{I?_FYmmEtWO~_&idTPn;Mpx7;)ZQ?4{{oK1X(($H_4xm%umWu+GABbj zmCfa+$>Sv=pY;KlGVb5k@jv3jKs^SK#|$5AG-?ifrF8Es7}mz@WQZy9+!mHA)Ez-m_!eHj6w0Oxz5rNA{z`03-OYc4m{xobUc89X>o~% zZVEwEj{feNr*hj(=FWG0g!@neGeafU>hGaY+=FrXuB6pl0h*y)p+tR?lbH_b@M^6(Jc8J(WJJW)(x zR0;rysDmgT>^-y0A9sIu+;HHnGAe4BvREK@T5ZDp&2{#A*wp(fkW{^HrmF|d%`z7O zX2$ct6(?t)`6|7fiHE3QmbO8mzfW_64fA7S}*5g3bq4^G!bW&!|H$Y6s5~P4c7(0_;>6`cQh(NF2Ao<111~M5p20;te zWM18=1`CX*g)VyR6N3#FZp}`;^Mpdb$_CrP_L6jAd+}VR-i3bj6K-Dqojk07Dzj<6 z@-MyL_xxJc*O6TMr2W=}pcAop8HB9Wl1FR*ChMTg4R6K4wyGur<#^IE!1L0cpOh&R zlCkC|2$k??YoeV%47DRA{>6Cd0OKEoXE>TG{zw|--cP_Z=5WRTHThi)o#53i3_jV8 z5`FfGPpx0IBXRgKfX~Paz?VH78C+i_9gIp}3ot(~yl!<4wK-{|;H>?(r0FQ;^Tdo8 zX(>!`M74PH%YNIbhHC(s)9szy7+iWOidBDt9eb~iIcCV668p< z52AmYVojtHGUdS}?mF_B6pqOpjnX3bKB#+sD&`8_V{?<(oUBi*FufSC z{u^4?_C5u^sQzXyT%&#EXl`-=rs)qTY858>Jn30BmM1E*lW3O5!JA||Qw$rRL$)G? z0EA0O+*y*I=0LGl>WK6vzY;e)LzRHVJN6%mO?k~d2)e=_fr}^5Pd+Z{=93^)nAiTzzO9o<`ffD{)U?Txj(zeo~ZM z-IjKWu8>M0=oi#bUsD_;e@y`st>xcsXp&D;y^FvY`y`5g1oD$@aPWcX|{{QoxANLhzD z+!D2A&;q1M-_{%;Qa7N$I?CNc^uaLB7Aa3_$UUL^_{RS14~rHssrB_TVuL8qw^Z0x z4GpX64&r`BBJ^EOMZ1YpfAz_M*+Ra_Z}h?^8h1FNo#*<3NS;1jix6IP^lwzzxwvwI zI!5CbLFt4wR^s?96KBoo-&`+RYh%~ zv`x+md+>58jSqBaR@2lcN`k&U+Ize4uIbQzZ_{@L^e8PyECs1Huk|(*nB)9l$ z`V|kkiB%2J)Qb66y|ZHB z#Z*hmLG+M{lJ z!Y{Y^wKC+l%J3RN&(K>GPEz&|W2IV*cGpy3JKz61Py6=ZE)5Mwq^+r|x_`Ul#Q5G; z;=Wt!E)Crxz9^{Z?jYxoTK5zdjGb?uiLGlcS66PrzIC6zNoI%?!D~JiNx6znYaq8* zf5zX^#Kq1hoLj$H>Dc%2?o8}#=Igpy`tY{(+3Zh)WZuX4+FWAr>BfpBh$=x7Xic_F ztvmOfD{lJ4$#`evV&C5M>UbmP9%8>i&7gj0_H;D#k``U>Kjb`?VYs>*C?BMZOY&twxqv6sr2fSCCOGU2SCp|?E@1wvn8F-I<+2d%% z=a@5Pr-KQZsN&EQBxi9cUrUn%3%aT4gQ2%2RGbnX;-T*^6b(~Pwcl#<%BUMD^d<~9 zE(_A$LCU1sw+#Iad)Qs8Pj0Vcd6F~*K&dwib6+J;eDjxOdRMHqtG(OKQ_(% z&sTHL_DcdpcboPdV#Q77%=bFK>=U-GfvsqGzDACokkwo+MW~Y4eH22}8gYmf>%uvi zInkx$i5iTFyG{MvQS*)pv6M=j7qFz&!H0^B6J7}Bl6n*Vj{(6)RxynTm^AZQ+uDVB zM*EaOfcqM%mcjKSEE)EseQS`BD(%SnytGp4m0_d#NmM{dT=^7kRnPbe5#$?WQ2x;Z znQUn*c)%`o8HH$=dn`h9c*#rtL1`C!$Q-y`T3YUK6r03w_ z@aaWkLAkYt42p{n7j|Jvs}MO@-_ZI%Qd+{%N|>d|mow z1bcMJ2+3%%8*-|0>s8r_1J0H0l@cIY|M)*PV+Xalf&&gmEP@=2;mf52 zZBVcpRJR3ps8kpy1TCzG^DD}|DXnDUvmnkWe)K6aTYT1$dVwWaKJ&<;GvBI83c!z@ zfQ3z7v~A0wn0K-{F>ll;&jD9q8#u)$7+hM}cDa6sCQ;KE*4Mj46cpMY4#CDv+?Lz= zq~wT>-r{~+jZgB~rOYM1nDxzcHJM6jB)X*rZ1xl>S)1@YG-9LgldQF5Fj@>K$=<^q z9&Y`h_K5s-1ig$r(-Re>6mBlXXtSZ3_R98u>z?asrHg{+e@-F&PAgIk>xdY2oN8*n{ zU+J_W$vA^W+C49kbvOFFff6gW+Ot!sjuBN>l*{{vA3uK#b8o}jhaxt2l_yOtc-0`t z^R15j9CQo!GA*VLpYDJS+FSOip*eAb;A_E+=)B^*+=Ishf{mw)#1v__SUeinuuyqG z{Eounz31oLpr^)sv)>H&%QvME?y%9m)0gckLf}X!zcUs(ouniu#}J6sviQomDGc@V z0B_x$dtJ{Da-Oo9I`Wx%6%0zf-u=o@X)X|Wy`VBJ#~ewS=92U7J!ecs4rssoo6k!` z9Gm0YYPLF#LudeM-wz?1%v+RM)2S6~co(LHb30!5>oA&J7(SH9ARc{wq1lM4{)@K2 zuLPCYvaB#{F1ycJ#?SrWzl;tUv4Fe1FUtwui>)A?ww(bbJeU!k;vDy@wMko&dihl! zZ2h`hIuFV|*v?@(U>AvAaE=;&p~M2j&sk%AmncMvGggkEZ@{_Y?6DnQXjsdi>4 z-SKb>vPa;6FM1^fa~g8+U2NO4V(mH2vJ>}+*2Kq0tXNR}v5OJ`ffF?kV3|8VFW3Y< z<2?QoV-mTP<_oYYLb$IAj!r8BniH-w^g@{8fu%Mbwn10|G`r<B01GNc-K9qP{dlki$YR zmhDe=y+qqsZs6C_0>nE8H_{qEZ*`n1#7MkRZn7+qM#!C&7MQM;9WJcdVh^f9aAm0E z8NJ{oHt!0Iy5Go}Qf4k(coDYg4&&W@(}aYg6HY)E#~~NP{6f0G2eu$18uZ_;+vZpU zN2hImc9uiPlf=2$lBd0&`4hBDN(_e=_!n}r!#*!a>FtEwXhS zZCl~Q@x2G-+d7<)Q~9oUySrSC_u29~jmV-3zSZg42~zagyouS!e_wypwwZlhHBl|W z@>c6|&3kN?7o305H(23VkgUiLiorpyShms*LacgsHY=fzwFlj19gsdj&L$|tsPe~mr8gD|4_Va<_>?CvZ*ubkee_sF z-%ImWhDRD+%?ww8XW<7E1A!q4zdd==DxMJ%G@^t)k5m;_XYSjK#fMrl6LXK@*tQBk}CP(+b1-YJUyLRk9*kt3Xj-)m%}Ji z)tmdA&A!V@P=4BO!rWG`I;%HQD%%_m`y3sh2W^^t#Koo_Ean`59n!6AnSR~AGY$`Y z-N5Sf8pHkTM@g{DD39VGBjW($8|w^iWn;zk#mX~a zD-6bg2T3m%(QW60BtY$JQ?H2iLOwv)27;v4Ak#E(x# z_Dgn2B?>r4$g9&Cc*9Lcg64s>LFTKyKPY6?v4)~U=}YsIS^(qvTE^i=uSvqG<@VYk zZ}kIaKX#ywqmlIy-8fas_U)>;y&!W$-_Q`z>s+(XfJ%78D3rYOHLI@9{T z%e`X&Ygas$@H$t~;D)3cD<$FxaL*1llc3D$jDwd?aNy-;4hZL{${f#*4W78co+W%! zxDC?W&$0fN`E9|&TBXf3|L<)tqD`uZGokwIv+W)GHC4+HJHr@#5buUPYh{Xg2nrsk z-yWX-u$ARC-u`qD=7u|jt7q@d!ew6xm7MyScT`8qMY}PcJ}NE>lsDWL?&zliw~ov_ zaE6?>2H^J^3^=MLP(G6YQW{?GK6lxi+VK=UU&(COsLAoZnVd5CP>Bv&T6M}2ei3@Q zKXXDwJ7smg#TZx9vH;cay5yJ`ANKYpvdaOFdoHeV;zPuieUrIZR zpHR#}k-X+80p{)85cdF|p3!m%`BmQv zQs84)iAVSuvlAr8&kdE|M-;i4Pc12NoAkd_VQ)B zFZsErYs`9QK$F8wo{6oryDQQ_P$BRR({ttHGLU&Tz!M|~Xselj>Oi9|6r788BTxKu z6^RvR9z!d?<6fGZy}6vdYj{hx`Yt0Srf`*z)9QD^!*fcEQEE{QkxG!FQj7yn++uT1|?8fF&dN@gp4ih6?FscSKAEeM-aJU+C3M*VrO%q9`qQ z&d%Ol7FN73MYlBl@?1<6dZunvDGxRi0neGPtw4Og5)CgC5gKsf_Lo#{FomMq|23ADzz~|(DDW$YF7)%IL$IH zyf4qOBXanGYnhkhWnzdpej!U~u6YwPGxBo084;(F%mI0vom~Ijyza3_1Sh>q?VKEHdjd3vgO|9e01DetquxZF9sylL&j?0dB&Tqx^r(M^9|>ao|~bf-TH=L z1=qGQ4Of_;r9T*@#Fn}su=7y`BWQm5Qc>H`>||J_C%-70rv@RHYO9O?;O^@^<&do+F`gV_zzIGka0b_FB5!N8+nue;Mf)s=utOdVjg3&AanYQ zXV2in5^vA(tU7{K+DtU(v*@ILJT;_39<+o^*5l(HKdvxMFm2k@$A!Qt2uYy14YT;U z&7;uz9AEq#1l8xEUT1yS&Uwa_ef|T!%n51v3$!c#T)ftWz{PT6m9GxaR24Gp(+r0+ zUR2(_sJsz{@oIPL!EIC=ZrZoEoTOOV!p>7mQ0Es%5Y(-yFn6U16b@ftofBxz_Yv+F zh+C5~XoM+%{uiXKpx*fN%X@^|I!KEn_#@S{G5yAR z719Rm7>K>^$oIg{?#&dufdhrHls<|(nF#*KAiYUWp2~S-VS(iSRquGHu>XE5sP^oB zVuXN+sNy~@=GIC31RPvlS~7=o7C*v^3ifFKol{rBc8)r{ToKl%oqP@ZE$1oMyH{*k zCKhprj2PdIfex#=1bUdT$9xUu_MH7%tVNmUa`iQW^G^)yx{w!RS;1HZTB?@Za>Iej8Rx@SkPNL7p^I=9f@in+7(C4bg@p<;l!B>B4wGy{d zn~38C#mgwtlap14Ewos!Y&lM*OUHIo_rIb31|JufeDvUcUsVRu&$VFT%T98VQVJr_ z+Ey{!Stldw;>(TPx3P#QsoZ9dmZ05S!tOTo=mNUtTtWHat$Q8QnZuzpad(u0EGeew zEz1VYhq!t_d%nhtws+4t%x2x@Tw`4N@=|3nw~4K_2KZuKl(cL8ih<*>L`=uD-Xwj} zL82x^-S4DZYofw9^KPWZfrQS0x>g8na6CQI)E12dMrua-*NV-Y5UV@pBwu>CRGn(= zhBuhX`1^%BH25Edo?${)+U?*u*^<-Wy(n{6;0b-6f=DDBHFgR&H{Oh=9Wr;u)lUp* z*LLpO@tBTSwfT=Non0H#US(zh;b`xL7cLwQ4bRbXnNxf6Tt@wkcA}D9rt4`Iz zuhDAA~Jn`Fr8$jHcvZ^tjX zD76|ibSysU>sxngwK%F(`^K4+eQ9&}a7@!c{!7S_wyvp5b$}}|=Ccc@PpGiM#WCij zg=KCv(D;)A$BNv1-$I#Q2XPZv?tM%G130fPq(zgrMyYHOZCYLPqI*)X1n%`HEJE{K+s%ZJ_##vB}kN1%g+P=U-m5Q)p#cNl;_)5L26Js96Uruu_ z1Rxh2v$v+b=C;y;@56>fA?@364Nv*ek-|AJ&rGO~>FP5W>#%E6?m^ky3DYoEShvRk zLf?MY|5(?MJrq{3f$>}HnQ1P9R3C7zcr<=R@c|mcVt=ye3 zi!;@2id0zov(ThQfbXOr(=C+vRj2bV+;j8(*q5OgT}4MTxK2qj5fhlN%@!_1Ix=i> zc)nMZx?bDpT?F9^W5-V>akK?Sb>yNdAcSK+p%Z&IHMG9!k6X#7xu{6d zE_#j5qwy%?H#UlSmpjm-Mw9N2^$5TkduXFbYi{6xntAYbjGTt+UxaOWUMH}4mtc=i z_?EpS-wgJ7OyIg=+7vj2a~}H4i)to0Q*}(u3g@)Id+Mr+pwMj9x`U}vv*+}#iyCw^ zZaVtIB>(lRfFIT^l$D!;%1!z!`0uxkO!`b{WV%jFiK~-Kw~$}cL_WIJ=Cs~rq?(V`A_5F$P_%~1d8M)%N7*1BGc3bxQ{2+>ie3EM?FDbb0;8&$iQfgIR z#_UIT&ko6dNE6h;DTPlxWiKNr36EGGAG0Qznf;v@G3K6Qq>h^q`=p*`tlJmp0j_)k zYY%?Hd4flsS9*_zgSNIK+571{H?I^Nl~NpAut>~OnD20IH}MS1Ewi7<%Ll;~Celkn zt_KUArVQN);6($p}^1l>Who&L3m;p+~uCNsPL%4<+y7*G53~ zhgv1u1QDF`k1G6A%CW?EBYd-M-&iHn$dR-fH3IQk{|(*s^ne#DeC1-vaG3RtGl_cn zg2@a|a`Yww<*vbT>?&o^$`C%6V2G1)9@Ix?!jM_=d8X7HNf1&-1TLaW^o8{QVedV| zn!vg>(6LaYTPV^Q3kuRydQ%x2RY5`r0qMQhP?Vw|pdd|}h=4%oy@n#7v>+|?5PA)S z79auc&YUyn^UV3anc>%c?tT2@<0F#nz4j{ade>TeSJ17?f6({88_JLlkZ7kzD#E+v zhHBy<%W4u(BIv33I-iqmx!bU6Ow~eMT7;sL!CvGOsYq#dfA?Kv{sV_QGF3_b_ zRt5{1ZTDpHU(Tw0I)S<_(41zFRz0hOQe?3%@G^@X8`9$*jNMCAGA? z*=YxrOzFL$A}cHnMAovUJK`3;)NDNYhc=|~tbvCgGBhxU?;&ht@R<*rF)wHo9rS;# z0+l_fv7IT7Okctv_j=~xZva&5zc=k1zIMP`o`kY|!Pse@7<^t=p%!{s`=#qeC)v}p zE8p*PD7lS$XzX>`*(uE>yXk8eP5XN@z`e%$Ex^dmg?PQGGykR0zw6`E(NpB)q6H7K zbIC;oPo2p2ZlE-XD;sqzE?IaT*1Oj`pd?#^o!zV+S!EUM`pxr3@{0zvn8)I~636BX zFCGNGAbUH1`-6x-BE`wJ${L*wc_6#@mNuPH>d>P!T3VY#yi!)XsEv*GIJmd>+1_BQ ziV|uUgCkY!noU5+^Fme_OhgFBC&e1Z7g^%y#intrTZt;j~ zDD4|D`!&GaD{1PLpq+++lb*!Z<_*%0$z-qPmQJ_|^Y*j*iaOJS)uQ zfjfA8LTO1!N7gFdZsn#Vs#`^DfIV*;xORVo!Eob}YZnGm-TfMGS)Q>z(3+w5* zq!3cRyHxBtbhUP?{zOk+^|I6lxG1pZ3u|95M0~2m!#vMDO!1(5uKmd`{;4Ou1*p?E`dD4RnNaqM&n3qq z2+z;XW_~NPmcDq1HKi>!D&)%9WFYelKbsD@ciTQ)s1j{kOU7Spp-*4RUcSTIc=pmc){Z5m zgcvIUgEDKsD#!~mdw02~PfmVJ@2SSrNeW0RV{8&hwz%&Q!OTJ-Tq-6*F*$OqPYC*} zVv)tg?xTo6xclPwz^!U`GMG~rZ%p*0FQf*uw5o=oX%r-}8qk&fewhVjTB9OzD*l#XKb1xaa2g)*gtC?b|YxA%3%jS|sjD z@mrNlqGv$<;ThQ7C;UsSch?ES;{ev_+o7prMNZG!cz|o2bV$3JMQe&aC;t$lAD;#l zN%{c{B@~-8OU>kw0wCCp3>?SF5%Z?4o_j2LvLBY;)|}7JC0F_M$R0hG*FV8@nN^qE zQl(_O0MS62Ut z=m@FA$*u38=(lnG?LlbFi=152)=f%)2GY&7{}9TLLG=f>_Cxgeat7>(tUX8Xfjgph zTV4g_(5ju87hA9<9;>gjbdyR$^Q3>8&UQWY>~%WbjuglX%2WA&`>Fr4pL$J7a`K2$ z_u#!2o{PFqWL*{|EZU|Y?v8|sERR;LMUrvI#~MK8u!Y(N?S>Oq+j#?sk9hDOzWXWc(H_2J>MW9Z9UBYjY`iAF{!qrUjluNW`P zTUVbwSm4cM^*Y7X> z=P3U6FH9G}zJ#8$TL_;%Q+LJc;STU6Y-``8al^dQ&{j6GO_m%{a6z+Eh_A|)?Pq{I z$;o8^y#6~Ys%hnj-!9S|4#)Bje{|LrFVlarGp{lpmV64ql-8O99W z-jCFsW;-5Y?O@gY(id4%CrU3X$t%QOTX-P33L+2}8C$nQ#j*dOga2;%{+EGXCOleA z>oR6cjI^zq8MuAv=J)q&i%e(O^GYZD7^&Dx_7-mcyI=jc&Vl?j@su1&1gnzXnJevl z4Ack@{jdh;T+HQjtnK#%D@KI--2eIdZbgCOmC_=3tS2y5FP^^(R3EAetslF8uMO?t z6GcTb2xyPmsX7v{6ve|1NV#m6LG8NSH&b! zClVWc|GCKOPXaAY{y>@6`^@d^x#Y^Bj+wG1og-r z`yqkn4R;z$PWztyC+q*O|8rIgq=CN*)EZ{gCKTA8dweoz$yQJ7PUCm_{M`iR172-X z{>J*p)8H7Y_;bKCn_d;X+e>V*ebIPV`0mpR zcR03%C0!{4ab-MwNBDSS=&D{KQ4O}tLh0 zOsTRNC5IH)zjo~9E2j}M#osP2AG*v@FfeQdo>s}EWxm?@C8^5ba1m)?k)Dof*2yW()n?$#IvmLFfT*gN@(jum4=}AXZ?D{hnAFX{XZ+;TT{s+vgbAt2PT+v`;<{ z^l$CbRXW1q?+{y(;O(Xznb(dt!M|$L*X?TLJa4Kk5tPzC%f>Y^Pdtp>96|f83E6)h zQvB*U!6I*8^n5}R5);&pMl)sR{o)`@(CHI6sr$hF2;hO-Spay(+GwF_x+{%e`X9TE znW(qJ`wzyCpnw2i^Kx?P=4bZ&P0;<*;osx|UwsAMc9h8W^Sl0cVEQS;AFbx69{wne zpStm*G=A#FkJ9+58$U|pr*8Zxji0*lqcnc%#*fnYsT)5^c)@K z_^BH|O5>+){3wl|y78kl{=c9b1uft;%oJ;9iYLl{;&lF1XZ*JWW_`hND_CL62ZxWS z55Bb0n{WKqIC-RxMjO4h^Z!-w8mClUY*f~Tf6nWHFA;>@5@%(-Kudl~K&MQRva#t5 zv;Y65Q~qxGJ7=y02Y|V~L)u?{IXoPh(Kr9&eYNE=vWj#4V`LL2p1*u~{MYACX)TwT z&n|!eJa~r1(z=kv*v|a7W(p&LLfjcEMa7%jXI{00k9~d>^1r;s8 z2lM|W^uPUP^C9B)@O$E(N88?2K*XO5CjAHJ{;wtc+}NMQ_4CAjbYnkN@lV*{r;YvK z9)CKIpU&e?_VLqs{3wAx3FrR>=h30;AI#w+R)NcMtC*eF#C*iXA4xb5LJF7%qiq*aDygXQ#y*2mgHlu;JgZoM;QZ=(? zcQzr|rflePf94U|gy=XQ)%y9_(?h_c^1k$G%^XrW6=YFyP?UTW*Ic0ORdJ1GzekZU~2sAXeA$o8W5AM#ez0l!qgetdIB3VorYJmse-i+KZTNM#`vz**(oQP2)|97!D`! zCj)%fVh2whK@g}HMETBZ9Ygz{Ae+tpMYLq^^W(He_`z2G+Gekc1{GE7o3-<1iKk~l zwihI_^+NYDC*eg6OgZFV!0~T(L${Mmaxx&N;+GpNbX|6yEw9g`UvVds6Pxhrc(Y*aG|8T}>(gw^=zVbr=_AQIeE zzy||2%?55K3SQup;?^~pJ3>y?VGA%c&zB{7y6|ddamQz3`(-UK z5t9~UE=cNJopOHX=B16jrTk!8S%TneW?>#P*X6=ihM0sOJLvyTM15*RgY5Jq3xMx$ zkJt|y21l<{g68p51GtaLemWb6_uB0W@ROzyJGfremEzhyRfRxK-3M(=Ra{+Eyh}C2IEj7fYOCoqGLRLmG zPLRut@x(xSl%IXpOOd}bWb9&%q|fW5Aa4z6&rEXF zW|H=F9&(KVSLEml&oWi2VtnQ9T zvI49xLD*%!n@eL?6f(t^RMc3Sehdw+whX?}C3XRrNw)J=!E~g;Z1%Zwi{1=tt>udO zkD-lA`S9e*YwSpQ-rRFW{j&~JEi>5{%~metu|`Pi=eVv6N89i&v9^&$JA=7pKl3Aa zdKb|1m-oE*>hZ?H>=mI_R<$zRS!FS2Q4C^%?{873g~V;0581DUNQ~7^1{m*vDIMBtBlz27 zZ08{Q9y3pTsj6D)trkj#Epq~a3liuMM(Psx&oo-Qn4QxYDp03c=Doj#qy?j8R$7O^ z|3>Gu=~QtB>Edy#95MQy3q5xOU18c%=G`CGIz>_E_9hka+bd1Gb158v1dU!qP#|Yd zG!mV;g9n6Uf~kl&0jtI}(-#!g^7cNvmMbfk)yQ{syjCHYao7Pa7esYR=$fvC9&imN z_zsC+f~SKoi(QSwpkq5<`#^gHR{o!34Z6?eL=_!mM7$CjM z+TWL@*2EResw$F4M0bL=2_iNW#>J+SdxbpM!9ruTP~ekTj3!EN>UyJ~xIiEQUSSat z`G_3rdN;QGnsxaKfY{m@TAef(t-ox9_rH^Lo_|z! z4TS}_Lj)8fWF%47z&j^`oYxA!oC$PIk0%;I2XGyc%tgJ*lF{l1onXuI_c@@>JKrOG z-r0E`rL(kyfoT14!1`Hv`wuW~aNZFw7r16!1cX{Z_vQdmE{o4jIEi(`T*)Ui%Z9*R9({>+pGo! z`(&aEUy=cKlvP-GTCRaR#oI9se0+Nhy_>Gvm)%IM>WQo+P>D2=(_zXi)y;zj2*Aut!;dyL2Z?opC~b%r8FgE@bEAQ8WdC zM@*gah2}li8o(DYTj>H*0P`{952%-VGgSt?R%jp~dz(aXIGbb;a8Q}l-e^FE3ALIRNkOGYC4|)u3U*wl&z6HQ>)O zg0?VYlAEh2csFi)#LkRV$GE~h*<<$V>l~yaq(a1MMB!TDZ>IL*R@TeyRr=8_1H-<} z^ctTa-z0)fVze%^f*Xdi9GL%^)y|6A67=A+osH70KG*oPs9fAdx0fBId=GlL{6dNF7GyG{F z_x=T5T^);RfRkJXjY9c&z|DKa8?0s*+XAC=;<7Gt)-;f^1SGkQSBB)CzjG8H1t!YgTtQl)nZg5_7naLWKFxaQnvE{X!(D{0D%2{k(sU3kP8G;FuWB({ z(0Z_-uYew+$wgH3z&cu)9Zo9do1eS85Q+AUrwqT}W$YAP+6GfS7@#`tyECy(064X6 z7q_x|1pt^>4TG0V`JJF42R~JDX%%#i<=0b%mWm7mW7r-tOX;)Jnk)DtyA@VG!l)9b zVD;~iFwgDQ&hHt~n6uhYvaz{4I9|mq6bNEy0Gi~t_<$cY1h^-wu7#Nhl6KabXw{nJ zdB=P}&_RDr3zrXj6;_KIm;5qeTbn5l9>vzs6Q(+U{i2hb*1hx-5#qCPf`)_cv%2fc-SZi%fl-Y~~$r8}CsV++cSr@9n zr4)^*!vowC1M5UDOr>Gn?kw*TVsU4^U7!g}EbEn-12{;T0H!0eWtVi3KuropY^&2- zg!|$^a{E#}GBgwDQ2@YRITgG&&*eb@K5UHE(2r;4VEN9y$5*#AY0|#VCZsE6^yQ`Z z4P4c?%LxJC?gW{Bjfi`4bB`icMvkzdTOT05CwN77icgTyP}O>FWgD#=Qpex4P^|#@ zRpT1E=@7X)HVQHu4p)%s6dk<1a6bsRw_2EQPHq_mz^Eo#zK~Yb!FIX*;XW>wPXhbp zEDH*; zRZDq&5{0Lm`nZ#vRhy?O@tEo+aH0v?`B40J)h)zmEJrJSE<-5+qRD!q zOXwM^L1<=6SE@VgNRDcm$XDE{WIu)m`RBz%FlK5<*uk230p7-#Ny^=xnE4GA4GKN2 z_Y*Yjh$}K7hOhbWY4^2!9a_O1m2W6<47lc9%vg1>nzDF$iDV6k@@R0*0!s#fi)eg& zpG_U)lny{_fgokfD3NsY0dYrZOXF-_EpUrXAj&6b3y6R;^^N3#xrT%~#Z|k422#Vh zBsYFG{ee0nt|~BWYTHwhTq)@WN#+w@;kh^3w~u07-Cu|r`wPPQmOGJi0Kkii2rYlN zaF8tCz%iVtC%V?zqQPA-h#dglFG#cYfQ5KXzr7#S%%B4UnEYjQh;j3zW@UwQNPmcN z|76cq_9=8QM{;EspkdgzN1YHMw`Tzoxk*@1@#PtNh8dj{k5TRqu$saqdd$WXy?%Pm zTw@h?qWzs&>=&0J9$`_4M`h&JWT*gpIQYgQB}pe7une`iL32 z512M5_yN|J73f8g;4+$uIUbf_qv>FKClN0a5sUs`B;@pwk*nhCR(F0Jy!p{U538# z%(&0~ULeQ*{QXKlTi8q1oO37;anV3w6~a12K@fu%06aCF5QwyFDm{By@oSPVWNRip zMw^Efy7}E?ZXI|76Uc*b$p}LfU-{^DSV@n3;MT_#V)_oQ`25z0rAL5(D7y(1tb$k6 zU`700?&l*N{cpshuQw$L$kEJg;1WtCUI-ABLYU^J4sRDA5H=7^z~O2ZQaJeN8s`&f zG@_j*Nw`?r&*zqpGY8SI4Nt^bGUNfK#}GH>hG5lJsBqn!|6cjV9x@>T5$USBhuxX- zxOubG3dzqnnfGK&JddRb36xcB*3?owxPyBBC2zMe>EF!ROjirtd z-aIB6nVpI5Z1tQDbt`Hl0Q1#oSaDa|=m4M<&bf{{v=*7TjXKjfn7{U409cwdzo?DJkVpx&3&6c!uEfU!qdUtFTb_7 zXEU95Y(UYlNd%BEXmSF|_Np^@O6Xeka*<)bx(2n7vv!2V-sS=bMEc}_-m+%`MBs~r z<*{lv{+ldK-=cYTMJoKCp@G|Ml0CpitO0he_;IidbT0r9%KUDaFdJMDi!Hf|^wty0 z!9N(ACk7T%;2kaspjZYLZh6w~YZKi-kBUqTe0EKkB%IALQkRtzHOr(>r(cZl=GQn5 zn#AfkA_Y+f3ofRJ)Sm6lqa*~Tfa-RKkih4*;DEbY1KukLIPzeiQ!yP8Lg4RcI9W=(BTgtSM40L*m1{b4#Sfs~ zN`%KCk8ma@W|7tvGwKxJ%DrENK^ndWNW#E}QI~}U+kQO?TJ_hW|Gfea+EPH}cLio^ z8E#+z?nu;m+V}Z!rM-O(TEY#(N~9r;RL72R>kybYSUOHDHi7aSm2qo0-!l=f?Pc*L zmgJ^#zzd^EnM7@MnQoXWzC=&lWzhOfu;O`??GYAU52N_-s-SvA(-YsavgU&J#E~SN5lQ>19N1&ch*KD0b!Bd zNIv%C68=>I{|^h_I`;+zi^%Y^C%G@GNy2oLU-rLz2K+89?C0i=XC|50c^ zW%W~5|49P>ztN4gT8hzaoNtgMhx>IGDsHwiD^0_AP>2V0(T?|ixMA3?R>Ggsi2viu zJ79i<*^ah!jiqkr_+Zi9Nkk8GTxP7H$^eIV*sif2Fj9Y-lIBT**U2piP^tD>|h$jY4 zc2fJ33Fwb37-hEEaGgAkvW03$whEE`Hh7KavG%S1aPGH!cD*JJgrr2jXr@SA(oON2 z-7Ye88lsMz1G;zDJerVi6-#m_A43_WuAwPW5^t6F-W-&S*kyKhHUvRgeAJVYui{pm z-S{@Ot?^`eR8wd^{H)A@x8e>V6l<95Y9kk`%qE8mtP1FlG>)sbNgzccw3#BNP<9bi z7w$tDS}x=2k!S0a!i|%=#fLDH($#_Vo&xNkZs#(PO^^SG`1DQ9v3)MZZoJ^>+%9i; z7-e0uppct>inol&YqmodP*YH!FS^xQ@U@XLs9$5}v3YEwS^8CW94O21^4XTi>Nkb6 zF}ot{p0&NpLN}@0n**hUu5X2|@8NQ(V}puL-uX((T`QRT$)|9%Yz!?IaSfN3av92o zp5=}Tn=gBty6(;yw*+4x(>p&Y9hU-8%#P-8>sH>)Kn`7@%fIVQ%bg=HgtaoEAOIwC4YU>Dxh*f3+K!(+1$f-uZ>PpT)hgR;tR&QiqGa zYWJOoKD=mhR1kQ-XoU%rkO6Ptigpto9xQQK38&70TS`42&L3sU%O%V-SUj> zs@{Ab_HC3&=o3`pz{`ejcy=!tU6+x4!^STU<1=OL@UG&Sxgz>EoID)NMqA`mYM2h3 zSEtZZh?a8k+q=0wfJIhhkGHNrF#2 zh`X`VZ9i?&qnYi?3Z`f)2#x#}p)g#M#riy)3z2Eyd1KTEF@@ebe0_y=q%0wijb_Yg z=}xu#W%j0K(Q!76=T7`2rfAol12be7RrFzPcU(e5|PneoXG`_N{a{Th^QiZ_)=W_+?{kAC~98`y#Y;86UL{gA*z{KjQ^8A1DIbs z&yQdV%}e@k7!_yQ$HzJPnm(x^0lGWv!8cQorP21 zr>^6v*}?>?C*kx*?9RBZmOIQ2s@#w0Br#m|+*#?E^2eto}Zq%B@dPKDwp( zlHO1uQ~ns*E>gak+rVCpX?u7iJ?B}OvG9_Dvhps_pPsRSnW2rhw<*ji&TQAuF zzqL;f)f&sAxHG*O<~5|+EPg_%_&X$aw!5B zo&*F1x$jozmjZ;)oEfQIyU3%y!pS0qfL{FSX$givi;YmTj=0k_vRfhV5xjXEIn~K- zjdpTG{GL$Foh9LA`>=nA;Ip0&8rmHUe4;4zX?+i;2nE^t8ajpxqX!!=%rtI^=?9NR zNO8Z$uP+hu)bdMD23SG!*_kgl;j!}5_}7!p~ukdly0D>x|GHMy02s|t-*2(O@S;GcSY?^M4>+} zttzt(k4tV-!-rhco}_?g2dA=n-zfPaJU9JspdIGFO=~hw-0~oM1f!BYNy8^O7g%+T z&ghZLI&h_x8V70&+2Opp;?o7)Ua#&_EZJ5cTF^J8@7@LiyUmDVW{0lN5Z?4zW$w;V zuFSZ$jlE(SMsV3QE&vf_4 zh6k@vt89C)D(k!?^;6}7Yt``}rGb3DP7~qZ$H7h1It+x15*wvSoRS9J!~@TkH0ohcl6WV9cylsmXTa;3L<2E$x_61k)W^--(0b zlCh!6_4Bd&SEY7#D6{ms7LK8TW`CkfFf>-CZtKdL8>6zj9sC!0@uU0xG4D{YDGMZ2 z?RX>o1Dzh~7+P|-)v`zKxLO>Tz0fadRJTbU1BIrIDlwkpok|k})xkl$cD1rDn!zij zN`o1z+F5nt>DcB$fu?OT8h**KvGS2CxD%lwHnVMf34+)W`*jl0;5mbRJIiIvz6gz% z4g6th{>^DE%||!YnCT9dLmkjaGTN)m*u!EgSf3k3NO9H34ct~HlO ztE-F~wr@51SMOghPsmcs6lSjJ;y~==Uk-pq)N-OaYFpm8+-x&I^fj(B3tfQ@-z|T} z$Ftdg=6An@u!};mY7D9*T4ej#k8X>et)o_6)68Ze!K7$ap5bHnaOTWHDqjO|1*noYyWri@iEgo zGWqrWAZ+N4?`hj@6{d%7ucjH3ox$7g}IFj z^@gV)f!z9m;?w_T7W z+!U_6NiMb+CD3SlKQWs5EQ{!6+{njExDnYDDBr24sCUfO#L#p5{>t0MoD7E%)ZzKO zikZ5;?iQZumN(n__fs@nb5&U$G84Ux_2*_ zaJhneBS4=7l>g>dEhJ(Ek#&1We5+>30WtU&%~ZEG=*=JQcFyK7Jty)dy^*#BsVY+H zt_&=&a#t+_g~gZZr#TdP5^8Y`+=hMJX-!fqvo!5E{cG2%pX3*;J1PWeK30`B|2sMbdO4$_s(0*b}n?k_Z z^$!s**B0grACk|sciS|OG=7w3ps4k9re?{jbetDM9UdP|AITHpB+P)AJoS(+$65P> zad5#n0&;Dot5jBnm17sFVEEe^QvCErq2c7GZNUGETIsX7z=&?o=bojCoiDW?d^Xm9XY`)1 z8Wq{SOt&Z}sMnXzYbh5AjQOq4lO$#_@RZylYm8?}Lbc88ENGl&X7^Z6*X{2j_*E#E z=Or})c;49b&BaHuk^!rIzZLTe9r;aYTknuNkSAkzi>FpudHZy}M-4lcDHwj}aK_O( zq55)dBu_L8_gu9-@-Y9UwgpS`{@KgQ6o=m1{TI!5Z|~f!Jgj?={kdgBPb@2&8P&RU z=-k|$r?|7zS z^Q-CeQU^J9jc<&%LA%P@v^FhtuV0YxBEks~U-nRVMAKzUw&{Lgt_VhLP#@bSyIQQ; zY}TytO&Z=`$IVEKu$fYMXwna}FwHaZj*(6rHRh_+2+iDzntBiSHyiA;=xsb9*Ume2 z;2dq@h*OBM@b-0khzWf4kiIV!QFA7!cH0$mo*rd4#;}~|HDFhDuxQxOwZX_CZz;pO z>4tAU5xFIq-ZH}?q^L`f3OzP1hZ~hB+2{Z2g=|s@z7#e0fI#)Cy(l|8hBQE+V%v@@ zPeUrneV*U2sT6V*!_#3F$Tw$$;wP+xoTY3%O~&yDxVqlg+aw?cQ}bN0G(0Il*tlEvd^`=!i? z=^Re?>bQOWbahL1yMX#=^!tbu+SPD^SWT4oKnvpK&agx!2gCAB-on{2nZ5Fa~E zfvtX)*j2mNRW!y{y6OrRVdj&)QQZRb@nN)cf+M#2#}J1E9}WhLy`Wgl7A_uj3CYf}iylt65am+LAsL_T>88_Z3u6x~(5iB~qp z-Ic=^y#5Pjp?;Zx=qZP6)Y=bD2Jo5-2eT{(#_B7vjn%A-aMQ2%G<@yz``Dra(W(60 z?`UfL<`N{Z2LINKhuxVfCQnEBkw9Fp)b@fs5pPeNo(h`^L;#~L~C$YLl!|| zE+0!@Zu|zVf|Ge0@P^%@#1TzIi2EWsqBp#Cm2Qazy)q)tF7Y&%N^aQ4eGRk-jo>3q zQUkcs^TN4H{z*)hHs&GfQW1)Gq_AQeY7mpwphkIzTA_!6rKn>s z))TJOFlbC)qt;linAzjfFZZ7t*K|#x7kSfRUHeTN{YJXXQqXx%{Y;$1RFQ0;JFUpg z<_WJu&|Ihq1qVuD(QH2j*oB!gwx)^b3_cNpg_v_hE(_IEOFiM#w@LV6&FS8Epf1^ORz-7+ZR$7@`mcn@633;EFiz`(|CPAO^7sQt8dvmnljIrm& zXi#1WlX;_r4*ho6(`Ta-`jVT}LiSn~Z4zZ-+gD2f!Pyaa;wXvNTzq$%Iv zhhsE`#VPrZ%a&k|tf?d*6xOjeA40+{k2kiaK%qnUy}fCP7C-ZWb1%|aRz*HoKepJx zjehD`lRk9k7v>LgGJa6*9C91JlgN`Se*)gzo_&g(F~ZJT#>y*rxl(=5w%7SqS>Xy{ zDodl$f^jU8dBCl2_fk2MP7E!84-j?$XxzHjN_CUUZvguE0U%Zp$5pz_5!q zf>&h+RO6FX#?>B5r=c}%DlK=Uza0GtVLA~yTliiP^)Y&l1cpn>P&?ZCEd6>hGP zsjfO1c*UA$^FnZtNzAgAIV0lP{?HziYuy%YE5jY(baV(9CQf>yVSBBQ1*gW$&O46N zd6+(NHmd%K+_CXVtV7+Q9X^mxl%SO@$@_`EDKEWWM2(kTcrt}fc4@HIslPn5%Bfh$ zoxSO|LLPnLUebr^%bJ&jb0(jS-t)HmLgCl1TzF=qkZgI-{0h8h+lYQxjlU)<1B&@> zH&rmVo$$`)db>ydGhG;gmDe;@8B7cA=-2K9!Gm{_ljE~eK_;0#3{dZ5hRZ4aRvyiK zxWFBstI0+mR=e6sAyjd#b4m+a;aqvfk({oHMT&0nJ^`nF>mPOEKbBb9jn3{rY@TGw z=1E>Z1_zvv5|0KvCWr1kuXHP-74t2EYLb@5Xse?qB&2!dN^<^XPSzKPv@!>Gu$Ej) z?i(kJOctQw;<6wGZs{IZiJWRzDU5WZQejlV#=DHyqk{Aw-u*_{G*mS_{Mws$&8cR# zqlp(Hntz{8aC}L~n>nRbE0w?-%o>wZpDf#DsqgDG%Hc?F8`{bcY#EB5@+Js1$-v>9 z{cI_(L=(N9FeMPKJTpDf*k6Nu|0}0%r&C6Z-#qc>N#4u(IJu@csV%GAs(5bcRMB8`%ROxiN#leV6RFv0 zgHs+uM{8(`Tod$dU0W?5rx^nyb&PbFLVM)JUrK*Z!R3}Jhf6B_?5S9qyUm0T$$SD8 zVwSqVtnky7_}I)B&!}xKz+qw&F{|zHmU;}}j}W3cK6-~{yqD00daAd-!nz;8bnywF zjvFP~9*oFdKcQKuZAUf3pi+2!)#n7ib&2*LY<;JMb=ItUA0xbGr9 z`F;wYUTuRb%+G7n?oig6X*F82vasY=BdU0IKqZ@uyrM_BN=&~l`em%4`jBUHgy4I7 z4`zRNsD*j{E5()8HCw8yogGh@89FH}`m4s6BE`YG*xIbiVf0GAPSZoeByCeZX)ZSf zl8TN;r5hIw3$d1aof%Gww=l(>=Z$KiNN-ll?E=aVFJ-~~S?vNxvaeb|gMTsY_ZvY_42EXFmMH>%!; zD!nxe(%TbeLh{a-WS1rKkrvk3^o-vj$bYzk5_tp1jK6M=PT{OJ5q+&Ikh65*sbx&I zyvdXJ<&rd4TASHD6s_Cj`p$CId{A|h0f%?CySuGSj;bTXWmHTIavCFMfENVNU6p=( zgT>~WyEyOddUJf&%LtG_$>!}$1<0fE1guGIUCfho+0E&8)zC@&WYO*5PkZZO-yW*( zGhJbiKUp7|sd-ysuk|3Fnp|T;-zvn<9_#V0YcAym@M^)edno(OH&*hZz2=g2vbL3L zxKyR@5nV;&a4=adkMM7CWx}Zw7>_fv7__fzQIjJ#HmZ|lv{35nx4Q)*pAzaGn%8Xx z%VE!ZZk-PnHJ9c^1Zw9gEDUP)yM0YBk98Ft4##z-XR;-FyShFI>s)_GZT`vO`!})x zO^ZtB_d~1aqw7YVNvFxv2homEM&Qz7c1#XQb3Si;i7yMwYZ z7zt#!f5r~?7lXsquSN1Ncj>!!_Q$y(SGrWFNdkgpHY`=oZ_N_A8<*}a!I;BUj=yid zji?Yj$l7A!;Y~7%%S2?iU+N#I3UG^hssRz6(e%KED?}&+4llCNg&(UlSa!4qz44k# z&cxJst2N}EC~8a7Qjw~B)TRGnpm8XAnW=|A$rf&YYkokOt-u>yS}q4N4}Lag`KD+s zbO=EnTXpcPdEC&`k$`>wOxCNG;>t(QPA>UdBk- zZ|vn-643`1tFSka1qoR_k9ghEzMyP&+6UQFTT0o}Gnw)u(w3-|#v%m)<`!T7p*9O2|`Ixplfb ziwUpLg_#C?PS*3myV=+b;{MJf%{_S1?bbmk45`qkeUWH*cgXDu^(jgvY(3wM ze<`awghM7|_?%R$sp;|drj3Qn=WSjH09Z~Q*}R$#TN^u8ZFAmr)Nx-v_{~!OcJ21i zyaT!<)UGr4ENUeE_DsbGv1fy(c>|S*WkPXAA?`QZrn`r2vSvleg>XVMqVX|)iboQcdi?AMaB({wgG6@M2viVp zQD%c%58u@FWo-san2_C|WNYP?mB3#YM~KxXpX^r1oLo8^yKg#TFwOINuC@OU+cu&j z_7|bodifCq8L!<5IdJe>Z7JHbOE4+M>!O->&Ad7G_ug>DUzl>++!>;S+exRK952q$ zD;wurF7KVqx~nC#drX0E-~w)6^1$V#?`0HQQL>2^ViJba8=dX&DSw3u#7WE&ig0qH zrrs%e(F9SI#7P_)#!>xj-~HDmK$RIx(|m#dMM;X>Tjh8s7kpPb$y3jbms8ijSbX#; za(w3y9Yx^E+bR}rbP}i`4)w*XQt;-kP3e-Lu~YDCn|yVHAJx_A>ed=5ot8jtl-nD{ zg+8HNapguIom}jQU7;-(lf9F!FGA?T8(#Lv3UMG` zH6-4M7~}2$)TYnbO5`h)GL6o%uppRe9N|`Xq?OMGfiz8K_t#T&dYTMUOryRL1=}hy z0#r6vI9X3L7w%b7IXK6;B^PW?N;6ZBhT+!}K7GVw1+L7Qt*dvfuP`0((D0Yqj`2`Q zJ&Wy5S8E-8^DrtiV8zExdkAZ$t{#4Dspl7rYeMc>njzyH0>&{P_=}2(g4(iEBb$0T zf+9h}#6sLfXNj6_Ml&OGe83Ag<*k*dC+nfUm}>W7mnG_^I!PPIDE)!`zMe_LSIuu& zm51)avg%bSlj}^8LushXfnG+I>#ui~PN)Pb*i~`EJ<-Atm#}`ljozrHadNB#ql%FE zd0vOv%!g4K7Y!?Pz|}g3zJom50woj2P398lKfmUJ6-VMr_obZXL>VWy!&$ve7MWWE zC9Ny<#q7%s7+TbCs4eVkGQZsOQZ3!9`Q+)caIWcfwe3de9XGiyheuRCEg#ftWSlP1 z8*Qk+lT~?+C|ozko_kRVAN{Q2pwT7qelD}wi;p!=7XQ`z#>SQ%tuz}kbhNWs$ z4cjBuW7Ff-dJfTAr;dx9qYFpGT-YAJjJpsYB8kv^qpl~cp90r+Fha*eYK9dKeMDKs z=i4UpbV)F>$td`v$6iDjTZasBJ6N42yG@qjY~i4h7vg}uakEs=;)!R(Osl$Q*E14& zg%gbiKGIy)wWzDRO7!~1fh$BJGnFl+Nz1=yry~TBM|kG~1Hm4~!5vLnG$ky)`W`+j zn&1^Sr^P%Rj!$?;5Dw7JB$06ata)B)X+xT$Dz7BP)5izd%e|z_elWFQxOS;L8j5^1 z2iY6VbFS-`VO;cp@<8v)K11Z*Avh@cgm4i~km>6(%!i@m4!sw!S#x?ZDx=B^W~McV zDpeB^CbCkm>4#OM*F|^r=JbnYC{1Uol?Vq+ASg8-%DhBf*g&lAoVOpKcYC-0lsivj z??pCjBd-#epR*efq_ZC zrt}VIB;~WN0XHJ0YUBsygwm5DJu-Sy7RR0NN|D`DE%9FM6gpr0iSm&ZVr#l0RG}(i zLSi==H^MI=a&5HK$#&;{iqBih7twh-%*@4^lcrM3N~-scCc4#cqq22X1G04^?;NjI3r}LR8Wn*?Yz-Nh z2P)c3661Droj00v0;6x38b$XTtav$U6^N3m8rzFN47$iJT!*N*j1kIx19I$qiH-!P z>m<)#z&baTGBSSPciTZkxLN*lD1XtTYyn=~({j^7iuw}GYwNeU^&=OYz~lY0TK3Ej|Zt|%pebV!-qVhN+anu2I7$w zsynkm(*#C5hG6U`PMRv8=^o}Ao#^YMyRhEl3Ms`ftyk|)9t-T&$4Y47!wi8kKW29n z(9d_TiyWCq!%-D3x-Y!j8vQKtk_~a`U;(S?FK89gFL4-Ai}yoW5)Q?DE=9tE%(<=? zzbY!$xV~p%dh<2x(L30q$mQQIdPP0HvyFh9f7>bWL|*$h)t18=fxKRq6nvtWH+-%x zXZ6rUn!O6aj&MqH^7V~C;k_|uy-;t2mb!AP5=vhz#`}0}#v?l?s_zFym5e2xJkCKCr9SVAV=G&8 zGYsZn-9-Tos^sIg_-*wyhubb|pE8z8np;4dMe!VIJM`^V$9K=~@~z)(ngbgrleZjY zYZ0A=_?NafETpvkbsMHO@@g(DRdMI8svtKu*C*ItLgL)o&oQ5LZpWuo9&fZ2C&AFy z2$U?`B?T#|5^8^sg>(4aIjQaCoT56GwCE^aW;*^&{f!9TPCxs)ImJ=b)$e`<*+nv^ zv!2v?)()Q+sH&Lq-G)zf_;E#4JjQ3mM&(K6JBjgO4#H$8C!hCA#44}gL~_efjeU6B zOtO8NIO*i`N^u5Svzfhvmv|opBc1XFUQ#Kg!s-7Z?LDKK+_t{&ElLrjSRx`Iuq_Bk zkzS-KpwdK0=pZ7!cWH@&h>8?NdIxC`JWw$meCV8Q@#TlG<-QhfAtX2+7H6Tb zv(fh1)_j+Z&3DRvm*MCVo7x{IPO&Kc%P$!dRCB$qaiqhpDn+W+(*twsPevH8IKYg{ zV9&*ie8*be!)Ic-@7Lxp&qo$jS3q8|3YlIKs%f`L9;I)sXnL_Ft2I&$eT_~Qmoasz z^2hhI&054oO1jXo9Us9S$w1dBn77#-4bKa;oLdy7 zub4RcEztj6k|ROlc7h1L)2O558PZz+tQODxkzZ7;^iz}P4o&gp-7w#92tKi7I}cgo z+|W*A(;YE_q2D6P5$)GY=BLkBgv27_3Y9*OpH!~0Cu`Q?vWDCz8*AM{ReaX-(k4BK z`p;bn+5QJHtD9oJ<~9yO=*2WV>krxUA@uLWf*)2aqmi%I#?zH?4a>tbF5-tA-lXd- zeOe=xv+g{Cm`Aagaa7@@;#ubW))n*4TS%4Fd9z`%VTE3})@ptvN^2qfd|Sk_ubfjw znSI$9QF+>9vh-aAHjbqK0yT@shTz_zM zn^a|9DUXl~r8z>Zpp zQyLnC{0y&dmQr|P9ylBF{=56j)^}iJX5Gl)$@^m5DH}3+_mxH!B^ib3h({whxGOwK zt`APwN2_e19}JeajB}svU^BemvE6;l+W@P(MOj==STky`o+zU6@ z#ThN4-wEYyiJ#1OA-Ur=4nQa|3xe(aV<&G#4KZ?ge!HTER@1c;*sY5m#y=pklZPpj zJwz>bi2tZjcl*Ka8a$jMAZH-h- zCj2^`QKRs)HH^7DFaDsSu_{kYFXvboDv4mwTy zk#)rCPSJURH@Qm2swSByX5jLX48cj5^S+*bn>z!2XbSw-p^D8tfz5X6R}g94FBw@{ z$`7HuLw#Kfj*JE+j@oj?&Q21BMRJS1XN`FDLc7+{nR!OJ#uA=imkbiPLX?VeG&Hei zO-@11WeWOM>8r6|Yx z;brEMh{S08uVDCqxv+YEJN~*>e8XY(TB1S6Sb8QQZ>2r<>|N3`eGi`<21cs-InN*p z$L`7rDMhE%r7@*u?6ns8u7HE|dgH9* zBPaA5EY0A*NuS4g^=7CH4*bBGd=XG+1bthY6cy%P(!fXw0MR?>R-C}NN zWyR=mA{Gk_FwrD6YbQNLxsvqszM)@810op8%m0a9E23V$VYv{iV+!pWjB<5knzU7K zcmr=eh$E%@gkyD}_WD&F@2WwdMNY`&_kFii9b2UQx|7xYn_c)CcVIu8dg5eo`k4|< zy*Ul)>kua4&jshKH}shLkhkQl$2A9M3I!@8Gp+Qk2kaK1JZ%jn>u1@M-ZB_8y15>W zzDV`C#QG?T<`pkBRF!0kNH3~P^p2+1{Km>6bS(w0$}+`HUT$CuSn>GUJii#?)18pJ zT?aOhr0N3=N$UhHyHM zA+CtT#ZrsRJF+^n{!{l5vXtP#DDi;RQtOFdKE~E1IoM6lk|tXsZLDv$mL=+}vNWlg z*Sssi`-gc^Phu+5_k`RJ(t~!?j}h0QGebIkB*Dup@a=<|9`72(8R4e_0=%J5tDcy0 z$9Hq=B#Tn9Yj*>`M*0-dzEdjQbI(H`m zYGQuX_VJu&&7+C%KYXeC>Z>S|c_!NFe{ME<_?=0I+5Fu*nks0!PbvbXDo}~`WvL{XsIE&CrNfk@ z4eX{kZ8J|;%6CF?BmL{veiIArlnk+fb8E~Y6#QdloF6e@HA^f~Qn04y%r5F2S^Wby zqIlMOM;}T`bL@4`W=sD^4zh!%X~E_F{ysuPk4um5cVdZAQzsw)+DYs4=$K=IV8^Iy9sTQ0DeXWC#k?e<2Uc}0Y*qxNfR427G1QOnZn9YcI`TZ)NWUE@kmV+WqyuALkT{~C1zs6i<1~2it8g=%3k@qzA(XeD?=SI=9L)hfdtRa=VY>YuYgk z`QknlC1!K&{kQfKCJqDTePXz_WU_0ccI(A=$#)1NI45p=(`kz_9qN-Y#v@F$32jWH z*ByOKM_tI?my>BwL$^yJQlrZb$IO@{XxIBit&-Tmrut1E``?s`pAl)taY{gX^B-5 zGa5S`wK`AZ-OrOrT0uHNBI8VxgmkwcFaLq(oqGYHifgHr?~^Gio2!r7VQ9Wn`K8vT z$!GO*ow*7$QFB+C4yg2m>Ea6@>^4;bWK!P-!^NaQ(nQL04B1+ukUmMBqt?;OqvQDm zp8X0(Nvqp|u`1Ys#Yo#LKE?>F;-q-ca?-NDdRv(9sdcR*#wWyXR!3tHmLZOPT+j2+ z?B^NJ+V-4L`^Epm=djOwe$*#GjkRhP!bsQYvWk1_ZfP9)>k7Mv^8T+@M99OzpyCE8 zE3vvrJ%*{h>5BPgUW;DC&ZG5h+Y9GTH7=#)1xL@{Lr8dzjdk9D>2Czgo}6!&Qut=5 zP+;6(a=&jOytuA20{9xNVT{sZR!M5VR0fS0Ah(rq*NNI&CaN1%l~k>vhl0BIm7d#d zQuQM0_wYJ`9K*h^Rc5<|8VXZ$H&Pb~F4jP}sna7O8Ib8He$QP4wh2+<&#gQDgVaWl z46^mWrASbtL0Gg&BsqVk*sZs6@gaNTQ;Ruo?HbmzVtOvJm$0XD$eDlhg%un$T0n@1Y2qB zYT^PV*e_6mDIVRCnz=ua+}On*PGx<-Pls|SMbgG+Hyv$_s0}e^YsH$IwYM)e_8m!$ zrEt6w&c-m`qHJ}h+mu-OtFzvIka-!VysF@DxS8|-yHAZNAE6=G|-+KlU^uZ}(OVfo*zITX?b#nP#E8vVD znltjQ`Z4XR?9Lpe?8bEtN+4|C4U3TyF-RMcsee~=j;%-~zUq+(j|B^#km&~j{g1-j z90r^^o!;C1vdugL1a6PAJ-go8yQ@(H-NK+cXizwvT0m|hG{p42ya zxnE|Bd($h)d!;U2rrg<4&P=)VN<1#EOjF!0?zULyhplu(^;Rajs2=?^9OV zF}LQ8i7Twe@@4HdE6G5#?W10|wI^LX$C9?QQ=%L@qtO;r`_dHZNINOhy6Cr3lg3-4 z6Zkd@u-ASrRrJ&uoY|4G-y8E|ADoGv6w9sqP#a(dRe%olS1km;!;wR}aB#hwq}|zZ zVvOkPk}I+kcklxV@9iHf!0xk%tdEr-DaduMkqg5xJFF|YZ_Q5sK)QfWQ<3K}pXAb9 zDEOoWJWGJ`^_*nhzPXp?_B!;`tJ!rUd1m3$<~|N*1EYvq(Aw;lfkn@Ttj^^%OiG7__S+(=wPBrEv31fUVqKl^VUOVJMWQ?}Q*d9{2F0XS6s5*`Qm1}h ziXbmv{jnBt=CaW5Iu^4$(OLf!)*DuRedAwWn5Xvmm^dX!99N;DeW!$T5Ah>7ww>y! zXYzx!%1v&@DLTdIxotqz+N#m2(RlrgdI67OB6^3_=AzAPdMGkhar}d>&HY*N_+Pzp z5%3RXdTRekOjR6GqQ!A8dcy?%FblK zKl8N;@%cSVc?9 zkXIGGX8kOyzI}dC%74JWiJEq;OdZIKut$99Ws3n2BbUuVRskH|g@Fx(uB+C$C0O znZxD0(zz?2A1Y?He||xR$je!iUo}>5{m?6+QCyM#j!P-}Iaxn6&o+3$1BS4x^*3X` zZ0s|(JQGNb+LKctjg@sqnk4Oj@}se{vQ@_uAz>z5+kf-NQVnPI)ab60j6V`};1sTRX&x)ttUDrKaN>LQtjolLe9fr2P2>74-{BjGC(7bfJ`O9*`o+{V4G z;In{u$Vt=jtE2cxin<%seBCkrxac8TVP+z3Pg&7RE9m|SQS{PSX1vYvkWqWOQ&?VK zij{1dr`GW8Yw{?D(^OL)M}un0dgvw9L{y(*%TLqh*NS6bLmjJL@Si~a>1Ik=UHRTZ zPB{1&MQ(1A*>5(Zq;HrYn!rv4Rm`E)pjfqM-^$?wpZ&20;%kK5J===`I<7-ObgbDJlly6fZIamn#1LoKW2 zlQu3x9|Jpr&?rPsfOKp1=OFF6v^7EBQ_(1hI%H0pU}OLg4J4c zrk@(Y@IkS=h?-%;^>qFb7jskU=a_~gV%pxu$r{s-I{4-8T2)bnoxe6MCP=68nM;73 zSBJaZHGiR`T(r^d%8mZq_=me5Zyg^jn2o__s{45Vodp5BiSWA_rU#gIcbB;dUf@Ek zd3couLIck;6>N;+|@#edfxsOYPJ?Qs=zjM-4mDvSLBb74Np<)X8wD(Z5Fs+tR(TjqAYxZ8_lp# z3ka|KR5At%RrH8T8WyET8C=&u^QGXwP49^Dmt16LxgtjK9Nc-=6J5>)XUA_}@*FB^ zBTtT#C*9l)2A$Sal|`v_A@}jg{5fWbt?!i2ZidjXxlgEeYT?7Vm{!Px%#XDhgV|E4 zTr~UU&&ff9uA3ngq1DY)18*thVQwz}h!jqWzdGFx&I7Vwo@3!3mGFm+{%#S9#K5PI z`0!dg*4Oq?)3WF3VvFjYF(VsC?Hbe8}BflUW z<7pR2fo@a%K1)WOqi!X^2>NKngF~#%Vo2XT)^7TJ^^@Dn+1op9KcprOKKtE~99EP&OoI1eFEH}56Rn`k+AJy-1(JLM+H?A9&PWt`rD@*DejB$RFuK05>VK! z)RkL>YxP`44OP#ATF>WFu@K13#|1dY2-lFW%aX7EW)P__oQKFhSFTLJQf8~y6v7$l zP~etyp!+b{`=}R7=$^R)RIf?q|uvu}GPKC&Tht+{?i9aUNzg ze>Xs`fo(Reu8)YL(Sv*6@ikrQ%Bt&P!K{&4S~C>KUd7u2rE9_l-#l9^6e4D7-z|?9 zn`bP!tH=%Bs#QaiDGh#eMa-C?8u_>?4Lmy@luFxM@t$qKy)+hD_sdy=kg;lnFP3@c z#ahqA$r0J{$<NLs-yk@H39jcPHUUcB#BnMXR5C?QML9Y&40+?hG(ee zzPSp$9^@wpou=cBj?WGexROPtcj<70HqG+pc7rPpF`Q`3HdGmEZv6*Gr7n zkn*c!F1$O09V%EhST|x5YDqtq8>fq(9$F!vzclc?_(kcwz^!4KQ43QgZvW=KMp4Op zX<|OC+d(!R65{q~!l%-W!2Lc`DF^oslpxR$y@a)K2A%R`XuC>i!fcXC5dCF1fxJ7a zw0o9z-9cN{;iwiGlAc28_M#)|RLT~b7-C@=vIgCd;x{J37D|S(uFQ%}hzVr1ahA$m zWJa==eC1PMQ;w!y)6`Eluh1pCxje2lKA{-8sBa$-$ny9t1G_q^gW@_Mjx}hDP^Y=@ zSc9IRE@0YAa&DH%7;OI_cxkYVJf#*}Y=-I~=l`f{RfW?v{@K#R9P)=lO@^$q>0nTe zGK-JHBXqHmJRQBHeW@|aZ*$&RHkO*SG&9;%w+F+X^9KP!I%mvs{RGKYNqe(Sp1;`) z_JP1`@V+z3(MZ^18$A2~@+KplBbPh$7ddF*a`WQ*>>d}a>xbI=;OT4AGkn>h=?2k~ zOJkodL^jTTSm>FIYFd_Y$s_qs4rVW;b><7ynVq&=Tz!4JAM7*a*MD);0L^udpCrgW!V3L@Y(z$*NP?L9xJsurx?!) z^^GFZz98#-kqc^$e-M;<{c8t^lR0E^YOYQ*8oJy1-aO^4By?d!tPjD~Vv!rY5R4AI z+~)sac~?P-8o{7d{A2!?dF%Iu^OL$fCuYQts@+NGgDKs;P<1Ku9$OOGS2B-d@P<`v z;DT8{kfrDUhAfrY2C_7#y<85|XsdIJPc&4NYQfN?Trf8JH2qvEhk;r3=+!qgHdW27 z&Zz+xxfVH}o7)aP-C=ynJcA)9NGvcniusrqzjenRSO0q-`K|c_rGXJz?NEkwJxabJ z+Sjy~#xA(12eB$)P-%oedMt49JPYi;12}H_D< zzFszD&k&-xVey)Y*$DR8@RMdYRj<(F0*lL_N~Av3bk?5sRfDh0&&siE>tUhUV$Wff z52ulvdG9{;m+BTr`#(EBli&JH%FM^1qwkeKbl}qs_EzhO7b@KbyQ{y|)@&b(NLpon zdqdQpVE6-adL46cFp?3BPgQMtlOb|w-+9H(XY@vM>t?4#prdxfe(wA1DeDGI&vjex zZwVJUAM6+tv5_e4)p}@t&Z!x#6*9@k$amzo65^C zrbRzqlvbZD@=0}UaGB!c98cJzU0qcZQ&>1^{sOZ)5}{jQ#qQo2@zui2IeslRwqp@l z3gGq0p1LQ`PD34A9A8qFBIVsb?aUk@Ns!+f$U5Efh)3A{Sz)y8=Y$t-sv8nVENRcz z1x<3s(=T!g87LVJ#bG=3c_Xgq3_Mq*YE`5ida0v;cr+F5<(2M^9$+!M5#bK4AKsdC zfGZhx=Ku|stHXmpU6U`tYn%7`vAF%FS|#w%n;xXK zgGvfmcra;W;foLwRW@>PC0+(#XN+q2NJiiM3_e>tTYYu*-<{3^f1vBuqsT*>L>>FG z)#HLF02D;{5sP%UQ*()hZlx-%8V|c6+q+S^it&X&%&6W%gPKtSF+fmEVkl|iUE%E{ zdry`IhY-i6or90gD;Tf?eUqJjJ)MBvsJqCpVD_A-YeV2HM?pd1JFJ$5AP14eI%jB( zI3I;7knc6|HzE1FM0Q(j8HzP?UyQ$P>8tM~UkEfE-TccZqlG-09nX+cAAR?Y(pbx# zMwLpYQ8bKBg0Qsx>^@S%Hxp$;^6Pjg5!gkCq;SmJp_fVa^0Z; zm890druu4TM6p7>_G60(p*;6cvtGG5H@%v@F18AYv|=?0Y6Jq4b>V!S?`t2$ z+MF%-kYrrHcGUbNbIJX{*+yma{2n|U^=z?k#O<$@Tt%j4I1=%{t)ggPaewf(z%^Dv z&(4VO4emnx6`*W&q2GW-esxbmovYHMR~4!n@FiLugR)ZSs!LH6!!;FHho7cG%I>v~ zOtPCC4B<-HVbXOvHQ8EC({&d{E(E%aRJjfZCFwtqabXYD6u7}{+8-e3&vBsAI!8~p zQcEB7_5q>wbAh6b{#pre=SV+GIM1Acw@g=+ofl84xh_xIE;ekT-d$amncgdGUXWyE zqyyDNQPAf(y>B@F`abRE>5fsfUre+5J3k@mib{n%&18EadWxEG<Z zhA-!?ZUSLDO7&E(`i=0Tny(yo)J&g29YN1A{i1*ps}h5L8ng*{mwx{7M7uezAz{@YcCl4dz8lQx6m|QCyH_wj?s2SAE0_|E=3YV0< z{BGEbItrq+hb3_#oa43VS8m#6EC*p-o669}9d2bOEXlVB&dl)>-w*8cLWkC>N4e>?*SKEs#6|V?ULNWDTBA6(UC3?H7lTa>@^$`D z73ihHGeeI^1qWHi@c9%#mdI>7f46a&ixa!y91>4|o+M~NZE4nq@=t#f(w=R8ImUZ^ zJPr;sg`Sk6Qz<)0BEF{@=0Y4eJ0XUfs``XX*h9OrVnedAX?L7o`>l>i^(-3TA|~Th z>W>duN{do^M7wXDH=B$^`7fs`+ZlJ}C}g-52FFVL8M`ylwTx&B2~xlh$d!@w6DZL^ zKBd?PNEc&pcC$w6&k)_4e$_R)+`YW42+yW)7)jgDpV#V5M$Gydsii!%>pPIeNj3*M2Krf@5)~s@lr%C&y7bIL?v^|IZuRHjN1AYv!3vwOPgKB zhNhU5DJxOOf+8}>k2qIW*5|-kna1c$aI)T2d4syho&Zf(GKa~QGkv!+KS&18jHncXjpO|1?`?NjGnxJsf((kmNj zz{8A@k;qrO;!JdD+FCz5Ur|zV)KO^R3&k=MaLy*A_f3xb3^U8X0Czm;UuJdBjd|JN z%t!ug{(Hsof9f+fPpmae=cKX*#}_pp+Pw4bujr_nYU$<;P&HS0utLJFU1dyHIfCgf z?tPsavE5bnh5G3 z;mGS%DnS!O_ddTPbMNlDK5>wFupa3|xuAQSWC@|?@UIf`7Wl^+#R0iLc7&=ezH&1E zhjYz1j+eCRl#vPGitHV^DmLu!l%e^Xe{(o*sXs10wAoUx_9{|VlAiI8MTIm0>exKM z<~)#oH=t$yxUGf4)^~7VAa2UD&c%8HDEERD;v|rgk z(Dh15R~t*jOQEP^IlYVPR7_(hH%fNqlhMH$(I|%$*8pT8VeL0p&uTinEcO|MVps;l z1fr~a45%fc(fDMQoG8Y+(t)53oQ-Oj8G6t!$)V$?nYVR?Hp>sbYlnqF7th_)OL%}a zs2YL7kk%o|RR%@{Dq)42B&n8a{ZuIVd662v0zOb5%_y)dHb*BGSyPy6pI*>L9nfPt{%AjJT6wnJXQO& zXT(%Tj!4|s3LSEgDvMIoX^}F|qJW1+RX)>o`=q^Tw_}*RO9+B84|%x8die9;_qBY9_(! z6Crna@u}IQOzq8IZ(BR6f;Un;#G%k3-?z~T!NO*J$1x)sR5Qfv?d?DtMcum-9ltRIS%I{ zN3n%&Z}p+d+m;1zMS!wKk>bpNf5M;yOts<2M!WUNaBMVWDhoPn}%u+;Xq#Mg?0<-H`UVOr?SP^3kV6uqcv4>fdI>zYbcJ4ybp#*N8c8 zBS_=>wWbGJ3kFa{P)3`MdH9riz|NXq=BMQ~J$K`3^v%uw1zds{aUwH13(RAu|o1UcqYF2_i zng*Q#%%s1Z= zkS2=LrtnyeWiyXsykwr@+{XP3rYxINMddk0bRwA7a2Vj{s!11_o@BU4v1C##LMQI* z2(vY;{zK+rtu?Jqz0A>i@}PbsPEszC_1`YMzmDymd}ql=^2~t#tN8b*M2ap8i?)av z2WO!^&&&<2q)VY8_y>10kf${hB?rBvSGo!ZF9$9aU4C%V+NaZH66(suC?gJkkye{>54W z9d>dKDv=XqN1$l4Im|ncsnN?#<{Pw0c%X+6SL`K@9kOYbG%P>NomuwA?3uVa89P-s z7t{qtTqho)6Qr+8#~)d9SfxT?qJNb!;{VC0_&=S#ac5p4&z&I$y{ol^3ci z;h;t8D(9^eufPA@ck<8gE4|0?HS|?Lx<~DpXI@d<;_PtC2b|`@xw92D`5c?p93 z6X{iY>e7v?|9$uWrzr8|9PcAYYm3VD|J{q#rGj4n-#^OLN3s9=M>z}P%K!RNp0eh@ zn|YG?5vQ5w`2W1JE~1p6jI<1~|6{#><`xOqbK02-*ccNm0x&8YtaAD{$T-I*a}ARy*6EUc3|gL=vOtin zYid@@yD#G-kfXm95tKl^JZ;YGokAxPz-Ra9D3YH60IKCNCAiB)s0u&0&R)Q27|ECu zgjk$1bg7#=rg0@$m1y)9`t8h*EwnkHifoVuB|zt2QV;%bd-z{1C^jmn3`gO4u#Vj8 z1n#IPNxzH)TShPI``s`m2@p7|O*Tf$vh4xe?d@Krm-8L~eoF%G?mPKKP%w-gLz1ae zzW{U}2H>{r<0q}6k~Rv-WHpEF(re}kmdgj>kuB%s;3fPXIcSo432jiM?8t}A1d zhR`v}OFC`%3mE49?5A+^;dZxM4E>)a0KaSl=-y%hYgO^OUuvTF069AuaQ-^pbIJ|B zK27$%@MzMdVXX&HaSgx$UZ!T~=b@22o%Aa%+Dt3O4aH*sZHWQz$!z9J)_YpU%g<~u zN!B$*J9hwMzLosR|H({i==_j@Mb$#S)sL;(>Q;Jz(H%!@b&>r(VJF@Kej6~g zJ^>Qd`vVaA+vcj8yoZfC%?H6)pLqEM~zxy0J!XV&!wo4k_AuOXa?(N z1Cy_ty{f?u$C+A1NH{4D5NL7VdH7ELohWftGVm9*Q3TKAjX8&|hk@F!>wh%$Ca&Amc7pn%7)0hl*^ zXdan#Uru*(l=`RVx_D?JVEKsv`t)u3(+iFOb{x9|*n?BZR(f3vzd3N9dv$RSGQ2*7 zir1AG^{5mlc7WCAh~hE${oHrdlGYP|=O9fE#)L2xY`u`;;pW|5t5L5(tu>7k_WDtK zzMq?Oxf^lBy|v_>v|*eqfO7WG6_HjNW&mQc-3c(?T|Vjjtwdq?o(hnb<7EyGmFH2e z=o)q1q1F~zzTLD;YZNLybO?H%dtbkr5pFqH$Tw5bVAFu z0Ks7IFkHBFv5!>n?z@4RH?+e87?7y?4Zy(^-7DJBQ!Ip{;)>!90fN-JXQVDYXm9MT z@**HtbfiUbNymj#^adq-NTVm&8%$GiNkJy5lUzkzm{Pz4nMPz|!YzQasfflcB zoep>_R^fW zlrcFv-00`&Iq-nj`jivdq1V_B0CA<{{df0msW&X3WotHDe^pYq_W z*^UUYWAL8ZrES2tmjDRPY!it?4he@(Y`34@lZO9LrX73NM&FP8t~T^89Zp<&Z@q^@ z9J$>Y`Eq!$qXi)B5v?(B0d*zA6B?mnW`i=EV2{~=-&mvUBxUvz2f(+TnrU~&v_u7s zD@BgEpVI5h$9QVX7qEp)WEZk{Rm#j4df(@zerOm7K~E7M^usjzrRZgpLM^>1%e)Ts z-}s-H*=g)hHq%Mth{`^JYlRujD7q4Bq58!?&$D)nG(N?GnCGf+@H?h(=p(p~1xt7D zV9QKhw+&(*K^X{7bo3;OVE{Am;@(xfCG@6t!?t_v-T!p3{7-575<&gVHFppKYMOtB zqdI;tD&h>=(<#JJkU-AE4CJ4t{c1|JSsq7)gmK4`L;&Czi9AG{p=0Kh{Ym3Pea!y6 z6siH$l^oAH(FbtM9XCZ5$0zDMogv1oUkml}A{o!$fOe*d*+w#UI><|`=|KatV-enn z+Rb9Buk_Fk3B|+wu1+$E5M^@9F$qDTkK})kf<=Pnu$lUYzvyB7UXz7JVhu-&iu6(Z zKSuR{ zZU{bVu-kI$Knh!xT}}@o?w0kvS>y=>?Du)#xG)1S$@cfX9B_>Ex6%Ul3#HSiz-Jxu z@Si#uBy}hscOalYkEX}4LcruBrdfj`vmpGCb8OfCbpa_nz(JQo?5bjqqE5`1H~A+5 z61T}$?Nr&sI6;$+VB|Rz@@;)CvH{>gtJ$j{FD<~2x|zOIMi1CYx3NE>8(?-)BO?yx zSO<>@qY*Dp(k4M+M%OtAw1*FW(2Gq4?qDdZqn1!n*dO{@D~kzpnXu&-rRFR@!l{bq z%6+>Jw>r5b@2w&!#lT6Q4`vW@t5~xwV>%WK0$-5>>mU`Slr@Q~=?luP9e?Bn#@+yj z+{V2Me$KTT#26JAWBh{^N`gXAJtMAxQDC)5;o^wC>T^I=^@-#6_gxtlbSVqY!zA2p zTgQeHfWGn)11J`eS1A`*VA6&>mQ_HdB?3D8jdi`)NQkz99%L1Gqr&b+HvdXL{f`7~ zPX~&FyZx8`ZywN=<1SoI1X8c6-#bOt8);-23UR{s6jhIRIaOdJbJZd)%k({)YV@1W zj#4OBws5|iDNk+0F5&QOZ`6ueufb>SJst=@a0R7&5MbLAH+jd7vz^J=pN?6fQ@3yY`KGxl44|+tmJ8}ney*W(gLyK3K|*VFa?mH4vGx&He*c`;>tX# zq*-)4J~(RI*f!O>2bc;@%OfSO@4)Q007362$Y*_aIUlFzW|ast1bT`171{!Ynn1Z= zT#x_IS>TT9h+t0!QV82T?aFI!2p$;P8O7x)sPF6zu%=by`w&kk53Kvh z;OV3)Y%t}J2Q?EO8a0N>>2Re=aL?SPY6zxb+E+3k(8ZURAXq*6rzvdaF{|k$*cBK+ z61~#fE#cP5>6-u`>{y|5MTaW}H-^Z5fC78n-vyNv+l%0;YI#XBOHSQ+&ql zGnK6^U~jO^xv^QU*Vekzix(>75Jvf3Uijr9vg$Tdo;PAqhR{OGHA*hnu1*tppasF; zopi%t7}z5V0x2&Y?$CDl0QzRK8uGCj2>RBCL*pCnot(BI2gMKFAA_d*CiLO9MneHW zIE~s?U|n_xODYQ&a39pKu$jGb|68&{i5q$XXbiV=|13N|*?AZP5hmx_goo{bAa(sp z0aYfe`SVOhhYrEdFA9)}4kizZ0Q7eL%Sq-lwm2WGv*i2B+V?NrtRxngG+j7hiAK7B z<>lmNa6*I-v+IJREC8Idvo?TO>lGPGAUuXQffjH+CMFjyQBFaL`Zt(~)t7?~0)r)g8Dpb7dl3G*f-|_Fp|Gcm`=yTzlH-;I_ zzni~Vmqd*0*a^i>`s@~DWF{43K^lG*7DL$zNox+Lnx*HdkGml4wL{iXJXN>^p99AY z5*AvSU7#Z9X6hu1dCqHQBRd>IC+@-_uA!|uAVJL9xAPi}-=@y{>nSfAf|`?o4H zx=$*AG$4xv_i{Lvo}3oBo|K}dWcczX^~vm)n$+}@eI*TT1`vZ#ZiqhUXRO`>^IIJK z5NuMbVGS;5vd^(mSZnk)J@gejIaY&{1t!uHDo!SMFfNeT{1xwAtZ7!%8nTB%4FrX_ zO%ojpRx(y}@9%_^()%F)Tml@da#+O5-8z-G$HkuC^h?_E>hn^{GmZxRMmyverBoS% zc{)gTRs{1E#pL2Z-`=>(U!`;0^rp9$Kv-gz-G(>BbpZav;^pK@HRfQ?aw0$me6q?^C>GQ9{?51M~?@L;Xj z{^n*(|Kk7?s1k|+#FgoLz*Xbx|1D0{dR)D1fW~Qg6>3=m)>SckITOi(`Z94$aL&lS z_}E%4H1#ldxeVaY%VTyc@MiU1Ww8eMK|J!|;bL}FlKo(A=niCF&l?79fYutWB{>$K zMulB*|9V#KrP@d(Ct^ZNhj`0dX%x3njdgJhd4)-ql0v!_WUPXue353na#ew}YNf9% zI&KbN&pXp0%&%i0a4cB$7=VX%xh&=4BoGAO?N^^aSV-@qx9Fq>7Zn5{G)LfnSIfTj zO@a|0>)Ph1`V;U#mvUO=igZ(Apu3df$yScckY+uLPNhXCOtUc_SaK@>}X8iGOb-E~93G za#_l#hWg`qL?w+|KC}->t=G;n?EsEEHhT)>;2!(DR9K7HO{XeTg8IP4-7GMNC&(HO zbPQ?om~;PL)!WQFh@4A7H5bR5?w$+h_eGVcVS}j|RvCnZD%pdC^4M60n^w>lrWMn( zI@_iHp1m3!_o6$nsA0X{-g*0I&gajoNn^}hlWuKH3cLw{&N58Xj<~4i0kt!lifV=UC zLP`!{lD)aIPdVJt5p$bX5_A9c)y@1T%(7an?~d4+Yy6t1t{VCzHSCB5!dZW9(6#@C zoAJnFQZ0bGW7u6AEqz&iH(iL%DE5n$G%b{=Oyq7=qOHK6HT{2dH??28|2z_NH zH}mSeX81+H3A_D*j67np0{J52^wQ`Bx?RGIu*H}q9y*CA?%J~qe#sN0 z4@qQ-ndXjw1_+kx^P6qV!ZzMJAjw{Wiuqo>J=yLk1GAVD!DuN0o}>@Sg14 zR|3ojbg(xVYUeyHcIOQhjX8qs4VE}z^g6q^-4oJ%FNsiXyptQx*kYj6RIV0#c7TcS zjZayEk`_2 z>k?p3W4~%=0`D7Sa_f_=pmJmo7%i?IcgQs;F)veJHaI&(MXxB~G%8{`%uKiu%7Y6c zn3;Sq6yUpG|LdK=>lMc@xQ+o97TDYq=tFG>`ainAho)sipFzGrpJ1xe(|Tm(%+-hx#@iudCMxC`H*Ks}i*|9Nq%c6L{hyqV6=?}&8}v+b=MGM+4_+>yc@ z9ZPZN{Gm`!3E|5ud+|B}`yi@G%CfTB!K%i9ZFlFYQ$8>yo)^Y-i!6<oa6zP&&oZfY(YxO%Uk9+nZ0=aNl+9@W8HhklkCx62+qh)u1PCcd_? zH0tw*>Ip#wepBv`*BrfnBb2@I9ypFuu-}or-BQI5S3!McRhwXL__q1|2bj|H{mq$q zU>GqFa%}GwPkYpI|J^F;I;dD-=QeRe|JOpRD?c$~8kXah8Ejr$(3Q_Gj^qqjEwceq z)vH33Dko_a_y#5~D~u{^*fPJng(ker5#+u_0xCveUh|@s|K6A_20W?@Z~o&Pi~b?z z(~=H9znKPZwTHtrFFl4lrH&e+KmEHNbHP)vN*i1?4)su^>{~S?s~{&7@IUozet-Mm zb5K+>HJ;)%@W5x3Im{tAI1rhif+~22-`|>TQ|)47+3UMYo3WgMJhxnvc{l8GX!VeB zjf->So$dPwpQ#z(Q)7^B9J^H>qdp}yyZc9j2f2JBG9t9nYip$u2M(8Ks;{h zL^jm(f29!XUPGSxkODxS$C&v}TC)o0x;Ct#fvYzUw1%eXQH+klZ4gJAOeXe(Lk}V6 zhCH>0%t8H|Z_I#fB>=h2QJj4uv;;CQoXckz1XO$|>QjU3;^pO>@MDAMN(julN?*SzSE!T)^d(F^C^R9v{9d@xgdJDJE*fZ@tMUMT%4CQw*!Mt zXp3rq6}@=1tnNkDMML0zw;@}Y<#(g~mU7}({X=0=LBMsufHLv&I?D9Kj9q@Ub$47Y z5MVRzwI|S`C0wt4c6Nfy!4IlIT!+T1ogJB^r(Xo3Mi2Vka`|ad!ETip5XfKOn3Ry& z{BjZ~Am^-bh^ca|dejx0^@%#8m4&2?7p26PNk~Bf@seElZ(4`7uXm2#Uy&cb1LC$1 zANl2M*z;L_elG8yx@s-p21{T`7X}dD%3>reVtvSNRlz7qH5niCI$M|)yxN;~#b4@@ zf(hk#)IDBKU)5Yzf5e4~H4yH5VRXJ^jS*GL4Tm8lq!J9WVOwLT(D1Xfz!V3=#n;mY zDl}*N;qx0YmVZ-^X&o0wcK^TP2rLUA@)LRPzPwOk4By=7zd{dwoD59({2)q&kHgt;8_Y5dQXUz zgW3$NdKs{x;356gpVZ$Y6)DdvfJYG4eVy?RaLndGHmI%u9cuPw-C)=QMdli(=fEzg zm$92E^Ei)g!FfhJy&oa7k1&Qjbph|VtK5Y_{J+;)cj#-7qV}ebCYTE^j$5?G zFwh)+an6KTkb@jdNUIf5GP^(Sk}KA}z7$jHhm`VS|{iQ~#p9;8g#Vzvay>X^@@Xcz;F^QltRa6Cfp8?9<~j|8kJLuvPNdnb%is1k%u-SO*E`L` ziVAn2kA8*{@MnHhe35|GLPJ-cCQ)_drFcB7eTPWx>X$m4cI1g|ZaXRHk&!^k{-%Z9 zh*t>xc2H6Wt(G&Pk|=O4!;(81eUv`X0eXM zqyRC}jV$M``gc7QK6>D((8m1p|Vi~v|A5}MSWX4AT11wUd{ycjQwi%2;9q>DNP zqKQ%96}S^U8I0V^FI@nh**W%$Flr5I>+Bhn_{mn;;Z82Z#FdXK-$8MRlxs{$r&ol7 zZ0D1>C`06FgBY0h)2Mp7+)7|8l3Wh!y>Kg4KA?K?l}ZY|=e#4*1^Fe}Se!d$zUr1Z zcOXDAUuJ!yNB1~3>;G%-%Hyfp*S;aqb_};dkge&?%HLtZm4&6EbEN%2d)V znj|ty8TMwUGHyfJijpDY&R9Zbr;|j+_gS6OsoOnohx2*g`^WvP{<5_<%UZwnJm2a0 zEp{i+TqHq($v2w?p=B2;^dHqK`gAth^ z>dm$`5p;VW)eVIpm+!*Y+w54k_9NPS&oyfil00jq=pHF|6a5qEOjKTf*-i^#dph)% z_QYB1*=|E~pDpX0I;pvWgco{9Xhf}&Y4XD$7Ixlf;O%=%r_Zd?76+A>t#X`Wia0Sa z*K2rr&$}m^!WBYqIh9VmzMzWuH%oVVG=}vYG)apd>>Ckyd2&3Q<1w{IUpWu<40e{x ztvm7y*XYzGrFphV3-$dy_v;kfwnv^DVUX)eWM zD68@Fz|F>kC}DF!H1FIV6QwvFj=Bm8x26Gu9# zifCf_+162B{p7~UpMHI`sz$oqan<^i4csmJkgLe69fkrj&T(tyJ&qMTc~y2goSkOo z`k1FhbvHYQ#0-yE3XGxLu|``l2amGB1%hebD)w2mxt~kz3rS7Wv3|W*)T-Hc7ISOk zt(9fPX46ac(zN4U7U~&8R`O_icRwoURHz=gTe-ge@sUckur)+b{jB@5vS~O~M!M9gtlL&h2f%wNp!ZYp1*|f5$5qw8eajZR-+jpkIpmmfYfj-fX|gAy-Sf^-&z? z@&_*1XfA-iS`E#n+|HldV%3~&6s`Z=)8_YA1IgDig$ih$c6n|e!oi?z1uud1!&?*> z9WMN-#=Sld?-npnR2JZxa{2s25}^eF zK6M_K0PTYc{lQgQVt(LE`oB}tLhYps1WGda(&kxiRT`E1N36|oziKNU^qe0AGC}x zay7{Zr$!(XYO-P`3SGKsVdt}f`S8;lMson~SkxP-9AZ&*&;2;wAFH%5+bH%rl#D8z z&eXwFmuA4f9Zg__th)rFw@1UR1KpW=EN4)3?mnEAe|F{&a%ciJ=Q7?BcU>~cy`eB8 z!!0j3Rm!17)1*T?5kdintZn#qK$>f`2JZ9?MYT}Poz|avUmuo~!!(bZ7D1+l~Rzv@kL z2u9J!LmCw1L*YlrGO}mt)#*`<<@0$d<^InVOt#2VGb=z8N&rEcBejTB zLGAZ_KR}F;a^Urr{i_`>KIe-Lib2dq5cZz>6k$RAHAm$sh(So{r)u6+%LjxX4Om6~ zgZf-mxXD81f11^JOjkr*bE}KEn;RpLg*vR)B5*1Hrj<)5_J*E*6>a|!0wC38;De(eQ_1viD z)h;@wR&qUk0hN2>-6qI zh{&5Yx-MNTw>$$x7zRzs&vH0Dwv{n&)iDbQ2{wDBN3=k$BRnr}@s_9#k%#X41E;-w zL!1~Kp94s;tu*rJEkCU{Xf)%Qh)Y%!V}tYU5rHwIIiUiQ8QI<^M%3P7WDc~hnOK#| zw0=9iK*i}yx$)}V;rJi>?Wt<3UyJ4lKl~*@1=m`H>z-CTSIYgiUTs5b6u;?Zi^;PI z91dBzgybSCxkchGPsXUn>f?rG`btl3Tb_oBEw&Q!VZU{0e~1iJgGqe!l6JBhQiE)- zlM3m}sfPP!WNWLAl29fo8|O_;J@^PW_;DkVWdqWzzwuCRL1O46dDJ<(Y`JQ)SVZtO zXNy8bRCOJPRKl4Hl6DP`mKWTFx6_t$u3NJ{mhfDMKD-752R4U3ki8<(d4W_ z{E2UUqtAAaHCg9}LvQ8=pX_kIfu=( zDY2a)wFOMq{-_nCGtrC^6zwhumV}dnrbnTY?@m#>#JSB*0LMtU zW49viLuUXGLSeWc)Z z25&{|n)akP75Ml+Xps!z_Y9btypal)R{L1vo}Z;e`SEl~DN*vwf&Tus-;)#;GBIsw z7JE!jsnp(fM1{t+yN;?Cu*=1RiD6u(5u(I%)VpqHR+69Tq!kFS)4pq8XP0x{N8;zK z`F0ptuJ@lJj)SQh649*FY@tzq5QH3864&s&YY@G_Hs)~Ni@KaKt_8UitLN;@VB*!{ zRt)=<2P6#jg@L{({R3@SL7lEeH|};pHc`~aMfkU|jOUu|Nqb-HruCA!KhFK)oT5wP z)V|Lp;@f21ANj^l|G^6!Z2dpDDRGLh?|{;8+Sd#W=VX2ay)VLB2UjFKlFrLfE6h*; zo@kp8nHGv0)`BO$I>f?2-|kROL3;q!d}3)2o1mFniA>rnfXwVw;q{uNRQmBv>>EI0 zxcp?RLm_%0WiirDCLRMCWo_Vm$*1s>r789_RKMql%FrW|R}Xm=*Q*M&y4(}+PIP4? z%IF{m5J3Xt3-El!Hx*=9qr|z9>gkYwVoJ`c;s=3a9x6v_p8Log@9H3skw_wPZlb>^ z5@13O<#yg32KzhaB^?w;-J(M&swls>$7qb;ylS{EQ|+kcI2wL4FuBB7=t=mVb*u3$ zo^NAGYz|aSQ~hjTkf!d>nIOjzmKG$+^H~AhCno_0zL>L5$V(;_yNTWsc1%}(pJUg! zi`Ve&9*agrgOGMEDUl~6@#8s(PGG62&Z*8B&|A4SIVe%1c=8C=dJpUra2nKCX>aV- zBGekXL8k}AJD%nnRh(Ry9-2KU$lX%A;lwM(l&W6;#&NAvDk`34v3IR1ZPpCPCHE7d z%g!~^h#^%xV%}QMju-Y_m%c`FNb2uE9Y>E6({@<(;rLxqq}!b`vpoh+K0@{?H{yH= z>+I;h(8gk=UJCw&|4aJaA(LUb%u|pnv9*Dwxi&QUe64@LrR82C{6k`!g))EHe#-Ep zR^9xqs>*pcoY=MrodCJB|M=G>^S>;pp!2{HGSf}p_%1uMdk;-Q-bnq>bw)r61DwbW zr0i$9*5fI)G;n|@4phx0a8L66I= z0Sj|IHm!E3-J5lGkX5mVm%wH~+#hnc`6eS#OQXlcUctUF&lAka+568QEZ`q`Llv@F zCcT??%6ngbl1n1fbJx-S#DKiyv176_&c?>+Ol0?QW?DU|0+nfU9q1R^HCuAfU=eVy zqgp#Xg7*&M^k%dw(|bg6HdOUD>Fb1N5oW=IX|vCm|G+NTaw^lsRl>=2*%{B1n^eQZ z!Fj+B=pxors0kwfz*Llfb4{+}`?XOHtyR?;_m59iZsal~n(##MnChfNzMelv;HvYC zgN}PYGI!2;Js`+1{A9em=PuF1dGGmzj}=@7eaN`TR$E12f_Df4TlLM=CCzIWSpwd{ zb*t7ec+LckBYNPI%XRYUmZ)g7w54Z3aW*k7dkN9Mt$Fg~;4)lP#@=kE8}|H$13Nw$ z97XXEp%IpUMwmynMfc_T?;9s@sidLI;A0dw=l#sIF`=##Jsjg5?se8ki03#`Lx@D& zL#k#2v#;y3jXvF$nB(OdBoS$6_*flrtfdMufzVU_hvkY`)9yYNgeDg@-BV6vL8H| zP8vxpYj|DO){!^Eah!6!+=s*4v@ZBhM-o%U--YkgVkE|2h*Q0|&JOg6(=YV(rMl7X z22DV?_`_Xx-&LqwD3GmV|>TeI(~<)E1#Q%bx-J8z0{WW@1tJ*3^%AVM|h-q0rfbxI48pJ?h6h{}~G^z%DpUXfuF z$X6;9UNZ-xYMXfW2*0$Wk;_#5OB)k%Zyu_F)a~hrG0Krr+xnX%zewP9yXv@J6=TnbELiZwz-h7P;pb@nr+{{ z_cMq7)5J=46FZt-U-?-l6ZgsBgTag@i@n;5>)F2*;PiV;y)uo3IiCr-ra4S9vhAFR zy1Z&p_4;>>mXrw##6wIJ+P@mK{~scvd>LT^YHmyK;=t!`ha7|#K~Ii`##bjNFz~Hj zzzVs=X~Dl8a#9tlGxaz8Kb#r}-0ZyX(!JMjeKXlm28hv!UVwJocQ4epS5>*0Tj;@! zTF=dYcZfqs_eWGAefYvNJ zFMg5o{a4UK*Cz-Zz8m~f_R*-f(nK*UdM9v{oe&r(F39+$urBtiApB-!Hnv9AFRvE;Rr+3_MC9j>dkq%-kuS(g zNJw+TmsgAaDhaoOo13ziFG64C-+S+09R^&`Q%nC8!+++2&fhUp*v_qo0^WMu8uy>XLUNcmsB{k`XEv2nX_=fU%}zV~##Z+{;4Yp|1s ztu*Yu!HNx5I&Yc6h@;k8imm)j7DKJ3ZqdNjl!%5EY$an zT=4yy0;5qFjlyUYMx!tqh0!RCMqxAxqfr=*`aWX(23}%r6y`=@ZWKnNFdBu?D2zt^ zx6r7D(9NC<4D7-O_Nf@4kPxeHSbf9l8&==`Tk0ED(-@gB=7zPk{}6Kv>Mwq|p=uLu zNeW|-9UR@@bjY@)?Cg=gLY9rr9~t`f79oG7XIPp%R5?NQ?_RMK_tc(=la{O6w{a;4 z7tQO!oj|cTu@sRlC?@cuf315*E0Bj082roivR!s literal 0 HcmV?d00001 diff --git a/docker/media/show-page-outline.png b/docker/media/show-page-outline.png new file mode 100644 index 0000000000000000000000000000000000000000..94a24c16eaae4c44bae585f37c0013a9d5c9de27 GIT binary patch literal 302785 zcmeEtWmp|cwkYld3ke!FPOyy=+zAp~H?G0m9fAgT_u%gC8eD?A6Wrb2&KWs5bLYk?@cpARti1#f0S{AP~wRAfT%d;J`Jq%xE(Z5Ged6 zLPE0QLP7vpTdU6|=7tauVqfD_;8hg{-lVHXNAep(!sPg^z+k+G%<&r+JV9U+#012m zLkskV(^5CQQKZ!oQW1Gw;ys7K6{2^7OpP?yq*_5^CC@+WvF#RPb^Lg@JM1-@%5^hJ z2r(sXE1YevNa#(V+xCItdjv1<`y}aoNUFy@26on(D71x&li`-qA>bq|)&GS4dlXuuJv@Sqo^D-y$i|p)y4S=eQ^-6X0!-dS=E& zVJa0fqV9dA?^jtA0$DjyFLh}%LQ1Iakk5tZQ*mF6IG>#c2I5ifD*Tklg$M{p)0l@B zk4lzg-z)nx$Ry%nAXZZJ5W#RK@$^uU3tp!A4xX>mtJ7+-df8V?f6f}ph$z zeW`79I*;V>sg{A19zzeQX9xC+A3~54vE%olU{s~KP?}*K5tR8La>mKq5#sQ5WZ%cF zdsTtt4-BHZ+Q~8OEZXLF0p5gJ9S;}G8(+5XI@KBpK#$h+bJ%T>xO_R?WB5dDEN|bf z^6~RJl>`DqBb0u+qi;X-e(&FSE$0~9^^Sep2wvPpAt>zvNe^59%+I|6uzl~GOvOhn z{f<&AaRBjabiXeUUe_D;5t2|$M+&wPfQ7K|fcLP*=OP1GgJ2Ycch@P`2@oY4boD#M zLq_V%KtB^$IsL!~0usIEe?DNKXSkPedDCv=Ty zg`>h&w`IOU;^BpdugB9UF%nUMA(}YCzLr>04`Lz%=MxIq!rFZU`|;Pn&SGGR^ap4s zqU3%jw=M-i_;rkRxU3j!qu@-jM}^q+Ossu)YTKxLeR^xjg1s*f>sqpe)SH|QycPXv zSws}a&^*oj@wq|j$=LTY3tlOmXih@9A05fQZDAN_S6z*fDp@FL1}Nn;J~Q&2t11Lu zjNl#L&GQ`u9$x6+-&d2@A#i!8bxZ5Mt@x%&nyF7&K~aLrMbX<`eDKxW0o5fWsy7DK z0p{hD-QFlSY1ppB;n3lc(lQ}^jk0x4@+yCUbkFEBi`zPOc1+hcH|pS9D$NR znA><+Uzy~03}2cPSW_dgtYOnT-okjJqa$wcVF`oP8tQPUqR#kA z>Cns}UVA@fVi3UQcYv%BH6YnLcp6-rI+4fGR(!?~zC@x^3kqGL>-Rua3ZPT2IT>0>lk97dn@4Q##DHL(trc1rP;ol^6{ODSbAD%j_FXK3h*@7M(yW85;dmvN{hWkJ2kwu z18g(SMM0o>#d%E*HBctEj7#ac*7n7q<{*(0i;}sLDm9i88MUO6V?nB7$IsMP0+U!f6Q`y|y%=r#fZ;eL8ua%D!yAkV2&00HLUL&h8%p9-^G0 zZK6q{#i8N&S9m`{Tf<=cibG{^`0~wuBwd={tGUv&uJ&}0%?ayO_2w*ZRTsvYw3tMh zwC+JO2jh0rj+n=RLO|_}l`ai;v=?d@&2J2l14IIP@j~q0oY0lhV$r=-V^h0U-BYV9 zMJhEdJyusJ^(e_#Q&HEhCthGMuQF$w)2I}&&@^W@r?CJoW;vEQLN%j05`IJYw)7G4 zqf$hZVY%K|4X~oT_QZDF2EmTX*4W;eW5M12{3rIev9RZzpBsrs4I^hOI3tY2`oVoH z5iUmt2R#l&_D#I;oNU&IOO;ak+8m1<7isLAtBzTXe$BM@CN_8b`#aQ*rFKUS%ubRv z#kS48<9fo3@eHHQSW8b!yG=w6FF)7!qfRPUH-R#4W3*$M={%byeP{g^_&0MyQz4tR zy~RILj?*?;=bL}@_XKV#99?{$9ZuM7UcSNo5s);nG~&Lu`;+pTYwIR`yEA!&b1&i! z{x0FZ9^M$P2fhT&8OIne@trFE9MKjg3-eC{2I3Da&LXFDb;Kz{Wx6+k4gSNgOi+~& z!%#Je?x}{^J`v0iga-urm7}y1>0%Iv8i&F}9e$6CLd!16&J!j4N+J4DL`9S@3>S-; zW=DgT&Qap9Y|hEX(`IPnxL3Vjazh<+3X_6jimXNEQifLMkbuwi@$f7m`Z5MaTxuX^ z%Wvyw02&L5q9lEsXTnf#peH4!ghDFAUbpIb>u{hyuRv=36Gx<=nE0ppxO0E3fx;b> zoGOK4`G-c@W4vSJbf6V>dBRCAaFS8RJ830lJw`5Sf>H-@23gAx&d74m zJp((bmeh=ai?Nya<9kZ--3nQSL~`>H*1@#&MdB)~xFF7a*H4-}wb}&KE9rq-QiEoL zvtNkLEilanfYD`8Ctn7?p#8wiBTOt~)cnwD5pWdR%bbo4_vV=mD`OySo@vNn<`JY> zom5>vEU=R^YLaBeR_|?tcSpaEWC}-Y_UA6m)F|sW$LdAt^!InNYX!Z?#$M65u*X^4*MKVSuH!T5P^aUkvDLq-BiHdR$5$LJ zS%t2e)*V;@7oQyhtv~LLCZ?6LTC%b>qd3wPibW6{{O!t7&t zTI*<==bmU!Sa`}AyVYv4bMXb+iQ`z>Ay@hFo24grs}+x~r%CvIH0pPF+$GQ5I}!KN zv!nW&&zcqydPK`^!ybO9 zXj<|h0siIuskqe3-Rp+tthjSla^+|1{MpQ%#Ss5gN4FEl32!^_Df32m_hVnbT?Qr} z&ja@n^4`!wIh%G$yT4~MUn27ds*NCyDj7%=WfnSA?^!O0ObSST-(BWGn7ca}W%Row zrGO5vu5;$>k_ISQ`1aLr4{Ng{bP(5;zWu<0ayATaDryLuV+cYn$A(vF(+0Z|Cd6HK z9XoU7g&c)ztFP~pE&?08EPWKGaUYkSkrhVVJuU69UBE6yyrHW2XDKNNYH%3=0u~Yz z0vcR`1ph%m;z7XvQHFq!fW-f2Sss$=?`@zUAOcMwVE*1l4gCIfMS>qN^xxmm(E$+f z;9r>F$1fA=AFUC}GNJ!bhOPpCgAh;<5*G)*74&Tl4K3}At?cQP5GKJDh}L2tI|zt3 zg}Aj2bT!_~QO!od5CZPfY&?s$^$qD`aH> zhP3DXbFuyo{QJj$1^i=7(4S*+u(16;1ul-jD)qe}|d+onM$k>{I>7e@yHD1QQkMMil-`jHof35v*yznz<+WrFOu9$7!d>nKZLljfTA()q-nbsi(r4Bp1k1wq9B(d1B@VEUV_tqn8gKc*Z_| zHX2Rk*kw1OWZrGreZVxGwul9?U;-fjySqRK>MOQ8Z+bY4nD>8oqd+35%%T6cP}Keq z{xor4(kRLQOGGoR{Vo1W48=0#5y(Qw6bHY$$p3d#0r=cf@JG;`c|3vK{yWG57O2-{ zig>Q+iT@onH&VcK`Tyhn52pD4^!)dHw2{JJ)qG!A_+FmwTYzGD6!E~o`+M zrO*axC^G*4bSr)3_w-D^b#J^Mh~jZ$GCWCNj!sL%ic$JBVH3X3YAPn`8jzEhE1`$k z$API=MNi9u7zy$BTTuz>?(Qx+A%$ihG#}jvRQM%&QX}9Sn;Z_rN7QPIOj|*~|7+9u zpd%hGG%D0tEK+kYlIV1P_}J=tl`A@uOgs}g@ViXO_U9L@Rz5n%XE)i8rDSGSsJcFi zNlhgfQ1VBPrT;(WollWC#5?imDQ0mAiQt^vd`VH!0EG$EgV}*&&E~X|i3a&%Webz> zf~-vtY<3FIJ}aC)A#Vl{FJzc0>K_x1{;ZYGAYA95G967XwVX?#D%GDv zLy!RAx;gH56A07%-&4G$fLQOpwpa{>$Wbm;6UEt303J>3xWJ|>mlzvf_vs`wakfWj#0WUu}F|h#pyP$OC z?47C#z@K=bzk$S}rYa$ILH~0C|K!Hs3q6QFHGb}eF_DzDm>1-lvkI3nP3_0?O~V5+ zkskN+NeO_(3oLTU+2UYRL8dlR87Q?wFL%ZB~e@e>V zL=hDiSl11XMsoZ<+W#yo5o7w3j<^t^aoeUw!*2~@@I;#C|#Id{wyy*C+SMlEIhEx$H5l1iP@ z{&bnvw*U^lmeNe~-xm*Y{K0^mF-WS{x`eC$Cg}Hq5H*N z;%tD}#AT9^M}RYjQW6pxL-eRLuaV>@bG@)6#hf3Ol$J%a$>XpC=$tJC7@~* zD{N(pWbPG*VveVyb39yG3%{i+4bmi{`7sdw_@RgsLjgUFH5BbJ&tzm(_gKR&w_`{- zG)nchVr$3rkhIpoP;ov!QBf8y0I}$hrt>1^PZ45(Alrb68$Rs+pVtWvA%}cRa(9aW zH#Lf~wV1Lvl1$T-BJS{tOhyRNaA$i+8awT9+ECiF?e-E{Qd%tX^^>CHr${yG!|&sU zdgzEKevA&RQW<#DIWfVXT?@UpcX{TS=$Pf++{%;Dw729i=N0I_46JI;`uRnJ{5_)4 z(LTw}{Dq^6guJ`ritOk>SpRwH05AXtz_-$^?I)e*A2WH~9*qnjm+pOLiKrskt( z15M^nTU!J1OYZY!(jOgvQZfSts+6ju6*AT3CW6I^Jee>ra_cyu3%_ODiz805kjo|K zQAqm68i*qU>b!OGjIQjjuA^x*iwuXhG(k)r+QB6wQu0YCVSgduhVL)4$tDdX`Jb>2 zQim$sH_SDY2q#b?m4Tc26th$Kejg*Zpwo9y6uX#qK7mxS`rN=U{QG{YFz84!o(^x2 zl1tc%>Vviis>l=*hl*OBfw=avJY#q_(2W9Ugky|Vzb?L zeQIpv*zRb^!Jonl%yZ~wFjN{}YiatLwnqY*Of{r7Bw#PhG%KXBmxTR<%`4IL{>1T7D?o`7eYlV)6+>QZbz>c|Vd| z9~*#*|4WsV)6q%cm{n1;XH-q2GRjp~RT<2V5>}b~5CuKVn$EYHsmwENPyA)F|FXv_ zGWcV9G&r(d!wJ;6L^yNu-<;V)q0!J(+ioTB%%A^Qfk{e>qs6}&?kO`l;k-59RI&!jxGbE zMz%d@!)m+Lz-iJK^R@ms#eOVw+g?ES*`K-CP!9P{l1qkt~MBc7QSO%r0l+8XQ)z^ zD`!9;wAUkyn)0K)Ge~d^9H6sH@NAq7qx_3P@R`IEGhS-3m>a}IKwTq;1{+2|zGpzQ z6Z4d$2@vYXTl50CZ+Vydi>nB#6@~*F{#EXKm>a~9m_Z6qylGdl-w5MhSM-=b>(q+> z<|PVJcy~HFcaMDkj&O%VLFQvj9+{M6!hN=pR>6(ho|1guT-diN<)TN=cyivfGQz?= z{+`ut>$Yic@oTm2o03vxLwV`xULt+8(67s!rxi0QmEsl zu=VwJKt48&S`wp0)$oId<@@l@@|aW-5ZU?fW7?udQrHGNI=gc`0;RzQy3`nrh@1^F zVh=2&J;_FlKd#gEk}mFBevtTY($9xl9K!e5;b6vLKj6mLxy1V|vNL-}pmHq#5$Apz zzeWG`k%&{@X}oIjTW+XzXdvY&PHb|rDU2$*uEk=V)OAr%E;sErSBJvtc2t1Z%?U+3 zh0Lidyd@#3Vhxi-H+0zKDZkdlAe(mCr`1{+-lZf!yJ5bBG^`s4QDfsvDf;-Qr4;HE zTe~fTRV`daX3c$yx2_O^nzH%B?pH0@y1r~G+0MBQuGU1q>1YT}ZY@xC$=oS2tumVHJno(EB8tnqmSZ@%9 z@0tte;#Dwan*&_waOLPeh8M?BkMOMfupQ(ie&w3d9~dRO`;kC0L3Q98xub$TbvU0+ zn&jeUXmDh2k{RP#k|o_gJ&=&rIM2k(Ea3R@YdwxSFa7U~C@5K%$lKb@a@+!b8n%4x+G+(-^ zMoRLkc=io;se5U5T)h*;P!|<;=^T`htS?h`2^I6;G20ZXgRmy@Q)PpKb=qIltX>3v zp+8OH$d7D8Gld`8o{z-Wro)*1D3FV7$t)K1KUFiq3(><4X-`k#y1~`YjxqDQJqpl5 zJ{j=8W8+2_rkQtrrDl&FG<9{Hm`d6|B@rn- zJ!My1Y|6!Izu?4Gz2$-Zh4!j zLiQHj!xci1N}3WR)d4yOl(Dw4(Jv=PlwN;F7_5}5VdPnMIR3?&NQ0oj?);R9^{TDk z&90#8DiDBdLGXFe9Yn4Ejds=qpfh5#-2Kll>D@Bb#3c@uhCAK z$N74=FbGp98(ZBo7T542q3m0;0@D3p&Tq$m#S^@6xo^r?%;&yOxl9LdS86%M^T8p^ zIV#s{`QlK~g@{3s$*#aK2!(5@45lz${O*eceTEvK-WeTd5MXds`?^EZTP5%tTb9Bz1~2tKCjt#5>gOxi*x zJw;iE1kp2>}v z8Kz!NQ+E_-*LG<-J*@k<>?c&d5srA{S}!d>5iE*pl@hEdrXempu9bm zd=0G)P!<4Fb!xz(fO-EF+`mxO@E28cj!ppcV4s9`HpMUk1F9}9H}AgPxlim2^a-pb zZ2@41iG$zpFS?&m2tdcP96Oy5gT-w4SwK9K7RFq?-hwZ+#oQ}zgeH~S`BN`?#Vpm- zL0Wty5W4s-$0iAzgGJrB-tXe~K#S1KeF9WTs(se?=aH%eB-tn!Zp%Dm7_`yI3A=o%Hl^u!SLOAI7 zFeQ;(v#i#*_~b_K#R1$b=fi$(cSZPQq}l{B%h zT}yY5M&iZYFb(y!rq33Fn?6l(W?3GmyG#+HbbJy0t$%)~N{P&=t~k|Fsv)UA>Sb9G zWWOu3mXVqX{MHWr9VAzx8KtXV0&)b2&m6JNVw1(gdhF7>p;NW>@!~ms3A@%9rM3wg z9+g*M!_zW@8sE;H$-}8^c=nW2X+)M1EMsZ{eo@66@voatFN(j@-dB~DW9ZXw5rYiAK6fL%hNbW6qdDn~^g3L#fr>h)C@RF{&L#qt1F8UYhEEhe8Rf4do z;`vR=oQk(J_M)Nys0v8U4HqmojY;!N`c}4a2pC)b{*eL&>g=VPy0tOAqY|*t6^V^N zmH0a(7w3pwr{9z+*v?Rfx}IlK8|t_H7oGEG9Dx55`p;Rf%vAe96T>&9nN{d*{1DK@ zD5d)tYYub|tUP4XlxfpHf|`BZVq8h?Sea#%Z;PaZ0KoTu$lX$(e@7FD?b%A_Y1Pk| zLCjFK2UCo!E0rE@sne;`f7xVz*m^p*CAfmlkTy~+Zhr6S zJ_e;&OO-y|c7I~3VUVaSiHoF!&-=<^1})v-_62;5BYPzdRVB!+Zye z!y-Oag2Z_8+UuQxQTO^(N;v9XlHZ+}9v*ODWTOoc{onaBzX-!mhS1Or{dDuSziSjT z8{OH`r||-Xs~dsn=NoN6&V=@TR@|qKk&(PQk~c`+v{B>nN6{XW#605iSeU8MjypSp z+}IYhV@e{D*!A0u18TFhdrCYirsUYp36LSzgD~cwcXO0&pi18TW=rrZk@D|3#A;5= zl&TLs5P6VYJU=SDbG@9Pq9F_s`Uh*tV*0pV?GO3CjbO~Uyu6H#w)2X`__^{eztOQj zg7^KPMBm0g7q$n>M-}sn@V5#IyhRDXL@*s?56zqmavFdC6t_uA{DxCU5p4$Tuta3Z zqab65_7}{y8q;ObQO;fNi%wUTzv(GUF8(BM)PLdlF#ahwjY9SMGBQd^$*23P-u$9F z3-$VY#?j&7*jBfjdy9~5;28H!s{P4Cx8i!L-B{e}&Ttg_sxxVPv7+J7N|h1~F1K@J zjmZyXJQj0+dcAp69F5ALQHCc4m*Zhju&Ycur%1H@OmANp{`M9Y2ZYK(i2O&P*VAR1 z>Q#2O2IVVeOYs>QM3WApiQjCrLzx5VH5z2;TW>L)*fh!oVJ0UhVU9?+xSADfiDwB(R z;lV+M`t+mY<4>$yn;42JqZ`ZldMoWlWy~=#V>h?!!}qfnekm?wi&_naR7W-`Y!*zj zXtUPqU+$`Yj``<~eA)Y56^V63c&pUtbwfAS{{SDwsM2dUAls6paiEgt%kB7qfVq2M zR2cl>`O<%NnaB7Pz4A+(K=P_OjmpneL3|d20u(&%8fziknLL>kk{`9JWwdo-2f>^~ zu~9oPqEJ{B1K)=eh8bYF4c}u${AGLjSR?wqkP#i@O1dl&agM7;Z*Ol8P!J}1ZEq)R z7gA=>zIRbPIdNX|@!aMDYegiL#~{V$gm=`mO?&Yvuq!rD^V=#K#K>=GBil6{9CAa7 z7&5>5?1zSi@{4TO=^@x3EhwdX9DDWVpm(+!EzVd}na|5N?xwpL4DHZ~Oj1%(itFk5 z*cB(Un!cVNL!A(UXis>Ceii5YidT4i6I!mn*{U664w=GgMgK8^Q+l7>cuiwh0~@sA zHJPC8PLI!40X|dR>OU{Gpe#kX&0*&qsU!?IO~8s zT6oKWlpQ>Yq@q@1PTqQd$?Ed-_#=W^f|O-+$VRATkIyS%nwQua3k#Q8>s>Ii?xJ$jjThU+K0j^s&ip+*R zT+TK`m~`EJPNz~B7epSe35P$Yl*xVTv%OC;a*M%Re=5`DGOR|*{_M#|M>b&=^{K(< zAGR+b53!;5eL)a*sm^i$d{s6!?Gf8yJ}kvJ%X^jNJhI?xR}MtSz2@8CL-+Yub*j%c zj-tlo^4Zw0;a%dC8mivO-JXErcUtS^R4&}?={kn4Z5Qf@(l;?8EWph(|h5bFN8 z{&2qA1?!jq(sJCM_FO%Bi(sTyLG zwII?6wiIHW`>TWuXmXpnX}NOSKtj))nv?N~`GB~*2Y;X^{*9LpRbNmsHy5$Dpeo6a zc(?0#qjXOuG9WLNm2UuQ+LGyX(RRs4J#yv6mC!wfFk{#aCcjt7%n!3-YeC7}T_$c+ z+zYrG`Ss@8z?D+<@^ln#`wIS+vre}Y;hJ=10tJmc6O$;!0sohSU$Oms+ZXz!UEckl z`Z?-1P$xmBj~`qn>p!!5dUYNbqYDuGK`l&1RDT`>?-LuN0f%4ezd#Xcx>zMa`u-X3 zyL+1k=~{_|mL>Fb%%T|BhIJO9F;gTv!p>W3#xBdG9l2^Gju#Y(qiol4sb}cIez814 z@Sc!NRklyMlnNkOzs7aaXT*dwTQ;6bqq?B0#f=9iAdRS*=>eB$yy#^rEEgS(REeI|{(7S|sZXT^-DD=%u#0Jqa1Z za#3j2>1X0Y-&QnI+fyBDL6oxw64@xK8lS!xZGwYj<^!1XO%5R(GL%hl$y?vA<8sy{a zoN|KM!0ED^GR>W~=yIJv6hh0EYkogg`&)#h^W~h$wO8-yTGmR=8%*orkC!l#+;T^N zbGCCqQKMFH@4~fQpXa^TJFLC5i*B5h?r{V_zHq2vnHYE1N9_DF&sPbELfD7gOMj$g zvBlTMJAJpaF}sqwBv-4z;q7(!V>IX0;_v6@8f55_FfG~Gv4ncc=1w6hr^^0>uvEkVWKcg(KF!?d;H4Z1DJzYOvJZT@Ys9(xX4`82L z*7(!7<4;2DNwwZysz%ndW}kuVPp5wUVS7pe*0*m4?wxCaMiCS`0-xRp!}qJMi?I}$ zlDJmtgttcr@#-(@iWtnJqdX4;s0Wp!^K5R{<}P=+2|3#Bp6BDFXepia!%MBZD{h8U zaKOS!S_|PXLB%}0C7N~?es}dLX=bA zrgb^)0S=w8+8KSP$^Bmcy{3{5h)f(O6pziw{ zPCI1gqowNyE4%I3AEOK_?A99WX-V4eeA|!h0;*qSp>hw52V;J)A4fOZyxi+ug-2%^ zT+sF^Wz6y!{cu3g{Qf8A(LrS(5!Hs2;R;cE3gmfldFx7$3PA7ufHZGuhtP8Ra&z|^ zra1d>)nzoB@k4DdM%`BHQuJ4}m|4czJ|7z_&?NZhu}mU*nCaP`Bi= zKSjsoHb|VG%D3V}QdzS8mF!2qf>~24l1!jKUo}dP!!K)Ac069fy)yg^DV|yNOHMhz z_!`50ZY}0-*^mlk5Ukli1bmR?IhrU-+XBkZC&mB|Z1u~Eo5AN%E1WE}qRofpqJoFO zI;#Uo&`xa|!+5o=HT1(FgUMoDDa-6>$1M@k$x?gq>1Ht^7InFfz$o`cKT+B<{iT-^ zpK!OyN&QfHi@Y=r&w{(q%dQ6+Hy}a7K5BV{{7*AIHi`ce&xp{Jk1VI9+e2ynR=B!V z9LIfyisZ7jdh_k1B;h)eV%KiEQ~Gi)bkLB&xznn+lmo2i#sKAzn1}Q5B(8A18E*>9 zbIYPyttt82pU^n(n9ODizcFvl4yJP0ZEe5RW=We_sUbz+Bd#y#?v$s&%p$~uUaevm&nc`Y3wNpRGazU`i!j}xFNt%}L%(BTRK84~zEQES z?LK%O?dFtJG&E8?25O8~w0VEw3f*M6sTTKxc9TKjW7aICqFD}9+3|9-i5{v4bmT`>vT6L8lqs+`VMO;7m44)tG$O-$NBy<+0OwOM!_x7nBE7om#qCN)D zX%yzC6xXn#BjYK0z-@<)FA=InO7 z2E@EKU7UcSES1E~@$(7RrLnZAE-E$Ee7?@2_|xN#8K|mZ%@ZDoOb&{nkV+%F*u{Qo z9i3fefaA~&!)HIkDrx+TkxTA#b1dG>UclXUbPQ z?BO;wrj=4_oL04%B@uK}thrmfV*^2z2%Oo6-kuc~9qJ>4*hAYX<&@t)dtCE>FOo(1 z609@Kh>!QF6Llj$8SmOiCDJDs5*i-Y2Kzjj&7q2bb$?D)T6^6@7R}jT0EbRPX38eb zwfJ^3Vil!68c9(|NRQCoTBWlUO$&IMvfAQOjkpPep==Yd>bUCpBo*z;8u2~``h^Hc zA7Q-o)^d@nLcxeMSTGHJaCo`zq3JW?H@bS$Wr_l)SYrcu`6O#Td`@#e@qpD~RqUg~ zCD4b9ik!{Lmt7(wj;^@-g*PU@ascRIFm+ao6hxksb zJAZG!xL({I$j@+$W-*!U0R?t!t~{JLwY}s*zXP_OetEoDeQ~VQH9Q;wa&=*&#Jb-O zP;%-_i~&*G2%NeQZHv=S1V%3AUmp03_=G8sFSZI6TBRZbe@Z~X|z)nN=X9$Sz5kP68xqLnT zEdnpfZ#iJTKf~j`@JY=uK}Z8_r@)F~A{Z8;6O9*qu9_t!ob$ebq$@5@Dg|$h_K3wG zaCY30*ZBq7<-Yyp>e#kTDD<)?M&>7kp}G;iGD88Pz=SUv_s@Es@wYX(@bBFD*K!dK zEEn^O?sqfXv{pQBG;cna!2>Fs&oDnWL?_4N0YuJH=;gAOId;=Gon$&N(8UE>Z0{0> zdhk`AXBMrn{K~)ZxIw<2WDaCU4@?!cKN(D@M>p7J<*O{gF&13uE3hVFNJ>T1Yr!?=dHOL zI>m9zy2Gy9O#nivib7>1YmhB8E?imw3cjVI$HjC4q!0*}vn;!QT26O&epl1jukYBd zURe&N>!w2pY-@X3pPZ`)u5E*z1^V-sQj_W(Td`ToyrV2sxayGmWj)K&D+CZLi9!$kFN0OMI1(QE<(5mhF2ImUw8|-a`f}(c>2c+)hm|goZ@o4r?GzqxqSvH8Qizlnaq21 znZS-q-dGC@IrUIwE{9BkNwJ?bY~0i6<_MIpVJC_f5@3>QbY&VO>LW~Zje}A|j5fUB z;q}t-oKJ1N}c$YhSpI!bk!Npfw)+yX~QsH0(yPI4=O@R!~YhL029M8z)v+3%e`zuXMJ z+_~J^oJf`{OO^TH8>41GMex_G!8A3tkaa#tm>kx!YbEe+kZ<{{(zNh+Tn_Zxu5j#f zc5zk%vt*6^Km_Pz&Zx7eQT^}YQWO)iQ}ILv?ySUcU-qM=;qfG^B>w?dupW@Ma6NbV%%kE7827+?uLd92&`V z+67aNj(*dzcGX)au1?>~y%+lf1wKf>lW4^{dxE~fIg}J`A!K?dbktnpD-nP7FkKbiKkvT85=0+tf!qXOZ;7>8{wtF52g*y>uRc8 z+f&TL)Z7*P9k4WCOT`rj`82^{BtW|OJx;2 z6FaL-XvQtdf%v6_Y~LGl0To4$31K&!`yPk#N@m}~|BqBcrSMuxd3(IwDA%fUPO5j3x(^yK?| z(fIVPiJ$IvQ!QqWGT`62s){hej%h@gIIm=|qzZ`Owdlhz2fZpP0!8_2s-1~c+&B5- zD}lzb3Z9Rf^t}L#NVgL|Bmk~iR>e_?l(^J`){DnW-1Bj}E122`1zb3)>qw?(qB`Vw z5<$u~Fbr(op7ZI(S?CtWv3+P&_wi-Rml#k+)K?X~^b^0*mgu5$fS%Zm^ z^ij;CoRWFc>9nVQGXFeZQQ!_$N3=xGxX%2#IvYEuw=(P9TZxtRy!SulP;Y)xahB!M z9KH(nAKlgo5J0OZrh7NT@By+Lnu1#ve1a%$x>#8}Fr08C^Xu}>ZH!FvD{~r@Bo4d0 zCP*SZr=h#ITB^bgw#Rq7To08Ss9sy(uqXLa6>&Rg9C>Dl!T~JeNQYfROn5er1IrjtqzWT??tziCB58cwVcwH zveMI`aD{0a0#^4Wk3;uXQ)C4wGi-x#S%h$j+#dOMDwdm_hITVN=)sZI@kqgCDGjwl znMuexnz9DLXUD5)&-v=G+x63jFJ@1@yBUKM{RH{#&5DiBd0yY)(&UiRQCZ#JqP!^Z zzFg|($E*N-A+#gMvV4)BybjeUMVNTPwqUl?GHny{wfH8TE70zwC*95s}>O`U4TT<`&1{+^H+Vh(0 zy?!Fo+Zx#GVi1Y;&06#7D3fQQQgK~Ow`fBCDH=V{=X!mU_! z&^6KZU}SbHno%XODk0Ds_D0NIr(tXZdSV(J5!#*PFD5Oql7dvG~ zcS+9oEW2I4c5i_rey0Ti?Vos&xBW-fEQk#Xy(fDWVuruf0@Tg(GQX>haNCXvZGirb z`|0pyYoWND$=9a+@v)jzV=wDQqTbr@Ht5UC>UaFzFZJKUsJUxrN?babQJ&vE6`hP@ zTUegpym)L>2J?F4rlD;h)2LMr>1@RDcZEsNIKyUqkX?qh5#)4IMp`btZxm4qLM7MPb$vd`6mBuZb!(PtX|wXX-pMvD3jro*}VUtB*}zj7>1b-4!Z;A`n=du%JY2x!Zej-+Nm+h9=+ zA_H5E*{~^vyUy6nmNW>UA4I(ljG0gO^-*tnen@jDt<^%X$%)Wc8;wOZpENGITg@Ir zRM&XTiPC;u8{+eP85zC|jD%{B=_^Bbz?mX^n7ok38^iw)uh zl(#*0l~)vi|Ce~GSbJ5bfyuo+0VU{u#p}kx$eREcP;NEt@if2t27$kMPX}IbS=Fw& zRkQdVTT7kc(C5y4?qRxM2egXV{zmh3_#)?i8uYnRt*XAZTaBnwy$^7N2I%UY4A#eQ zo)W|4F7stpJ*7qy}~k#INn=QUZ~A2|*CU!IIH%M28U7?kRXo7SF;dvrf0TATu^K(NiX zUVaZ*S1p+Dm2qhOZ^$0ee7*BrLgM?}UY!Rf@WvpjM~`Ki(m8}-pVwMHyXz`BR#4wL*phU%MHca1P{|+-pLqTWPB!C@r8u{F&)%O6 zhX30)zDr-5L`%D4urJf$Yq0|7 z6JBvuTP(6|(sduwuFHUi0^k?H;kQYSp{ydPzy*BA!?S*_#Zq;5^A6u!?6OdMoayVLamt(*%gp@&m9YB z`|JI5YL$!H?(497e3|X`$4m2;=j!RXK+2iSd#4cP!)vqhM$xtj+~LGVt$HoZW)8~) zXB(8yKQmi*kKGJ<&$m0HVU|4C@7}^JS+Z{!j;_e;PXkd@3~v_6wJ=MKdo9(o(*_#a zRU@E?T3$Rch+%qYUfK>;KS$QYw{bUxS@cMeo^rx52AaaRu+~67YB4s1N&89yo8?8` zU~TPYc-Dx2p?3qPw7S*+kX`i+JgG1|RHBaNvyR3*$@3)5y`);}{)DG+LVnP;__s}^ zch8`DM|?O&bZG>~9nfwO|A85(&D(#e+NP0gV>Np-%qhnmdT)Z9vjN(j*?1X1TJ$ct^M+Y`IKOQsO`syYBFeD7SjEv$< z4lCl&?QX4#7F#y96CxCaNG`PTenmYY@Xh%lJt4H>MhdhyQZB$!1w#QCJhudP*=CKBIj4W+(gp1s0!m~^=f2+_vT>RXB zl6rTQxUg5-q(I%K?zg!qPnG}bW*On^i2)i+T;W9&dzC!X*8}_FiI5JbMaPsTF2^F> z+bfKxAUtjNa(t0`_kpBmw_-hni|m4`&m+!EEQTME^-BOCfD6Iz8G+b*;u{Tc0vu?G ze<6J_OW`c;ihfRu3s&`_l_u~;^h8GGl#yR45z0N2S-WY;czhqXAj~ZU%0{_qW0)U! zsC9csXm0Nf1E)#lM0$% zJu;COKXhQSY<`}<%KqLnhxyE=5yr3kH3hM&aH_h=!RcE z%eTo1bH%MVpMdJlsI*d!Ul*QsvZcbyG8Kv&$r%@G`Z5V37spr?jW2O{?)H`QgAwP13TPl-t`Ck-1a@fK4h zsbU4IxD^!CI0B)d`3TG1?-A;y^kOX2t(mS7?>rkllfE^d9*hkS>;+IKD#qx@)DE!o=BA+Jl7sU$aSg!h;o zAG4P((D!K^6Ww~hiXC?j9IjhWE$E4Y5kn9zQYKS$rs3CoeqAN&!yI`ZpSuNQ!tL89 z^P??>EE`Q)4*yshJ1F(s1+LdSfE^_M%xsg(iD8>38FI(a;P`7Uf3?a@zo(B^&=#Jp zr}3j))Ax~!zGomzY)5kL0w36L9(Sfn1?x$?IC?wJUDjQa36!-wEOWgvXWeU(8vk@hkM_9TfrP^%2>?he-fS>$?HTK?bvSbrX+n=7sc!NM9(9^Y z>x)AiUvm}Sc=VL541IQ5av6JEtVjN%TMu+mbh}~^2qdkA7r7qQse-7UdW_NKc=l{~m| zgck7-RoDH;M^~PdO#%96yN!;AcE{g_knJ4fPWtwAJty&X(3G3TrsiYO6`Za)D294~ zTjkf?)&l$t+K1W!i>>eK`KBPz53KPskC;k#mK$b#3N7~cPz=>eqWKT5aYkpEVcueH zE~_I2y1ABJ=SW~?3SvTMj!Si_S%m9fP!Z0j>`WHFEH`(9=G0VA8R7)(`tWb!c?WNC1y znV|~8GEi8iGm^v>lfxPpJQLOfZwe9^FaD3-?Ic~b9Q^mm)Nt=nI z8&Jh76Lf52UTlR&!OF}wT%KB)n~;m<5-y#^O?0hYn?Z)?Z;rl!DS7Bjn%0fe0=|_@&hSFyTG$%hU-F zD56br#KLgxrkXD<&%o2Gehe$k69QnTE$0jg<3~5kw74YDqxTW^tMx|pvf1qeKxq~Z zcz}iE2b=Ym3gJRicw9=P$G%7el04|~NM}Si8sC^@jd%4i&{Qwv$7bBKEb;d==Ijs( zYZr8N`uA?&giMHP#U$%0Bt>9d008GOEC%hDElSF08~E_HP}S#fMcoE&HS*LRvgsUy zBwy)MKGKk`A{eo+JvV6$s+{3Tp12))v!)-!?fbSOje7bUB1SOMXGg^?7aoQ4bKb)@ zF{=w@Zc!w!@NaIfcHxI{CYOsZl0HN^HWg&Zy$(IDI81ZiY!we+f@<3a@caK$E=s8f zPpg{$DiK$YGm5T_A4~P_X`fY z6ZVs^#cy_$M|^@jtT0fWx5V>^dA^ms!Jw!=DQ6v>pWEXhxhWRD?ZVSR4UWliO@{jA+>ABQ5};-_2U@0btaSQ{}G!A*feXZFLbBR6tw)n4KOxVuyF%}O0^ zX?ja!^8WhrBvsFah|EZ5?3 z;+`(K3h1@v*a<~@y}o!pz8Zy`X4&z)mdo6)Uqpw?CH~F8qr(>RL@mdF`q$?>3*wI) zy_6na=~5DJ)Gpm#5AJ^6Nd($lG;WDhqX*0lu|p5u%m8Zy&_3an5oHt!zZW(>|Nw*l$LUPm@^(-SBIO zd3vs1c5wc#x%Z}A#wnfN|3a@Jgdb}uWS}3?9#=VjE$%{qU{;j(E;DLA|MdVXysq#KVMv8O6%RsJ@n+Owv)eTqax(xm$YoP|riHl& zaTrALiDn zW*+Z`&)ft1+n;B)G`34v-HI*8Sky#F>ZrG_h3zpAwxF?(ikew zM8JeT6SHqk0>B3kC-myG3?}&rSF4odCVX15Is#Ix>$~vf(G#F97Ox3u({?yg*_W!+ z?aBFs>PVf{YzaGx)$H9Kw{tjKH7tLXO5MEz(#}t~7fCdAqy`c!-5|UtSy9RF%~eb3 zN|oYybWsJ!-=GmsrgV7hj)}uI%oa7V!87zz%yW0$j z&6BFm%E6$&$}+T&=~t+*a_Xw%q`q90?L6ywd?brlft@DI z?RSxy&*nj#xu%P6irL^4(w|bt(Afcxa^nx4bVEMxWUZb9-H3CMmDangG&9rE_FB(( zrKBzWE?syPN--;w@I@4zl{TQF^q1!29jG4StPmPSuLGL`x=LhFhY+jx#%QFCI z4C~0*f@ojiVfK+~!>;ERimubJ z;A{|m>yM41TZCt(Mqpk?s`X}jJ<9Kz|&#*>>p7v2g6Vdc}Q%9*{Mv4lDF^Uhf}Zr-~_-C zv0GyAV_!VpbtEWaHekkb{Fcl=hKb$!(Uec zXmCeAu4eMmxx|E~R{$0V?xne%a2xYF47W$Kk-Yj<3)+U4#k*e3ICY@xcl$v}Y~M8P zQ4ek>SCOf{Ji&`B>5-r_g%u@wIvtepp(v0~;A*)Ui$5dojBkYmLrA14ShcMOnX`;U zx=CR|6r2x6og#2RS3UGq#yN3v^UT_lR|uSIm2~is%P%HCJ0sM39uS;WnKAS;@eT{m z3VF#arB7C ztMOQ}9779wyKil`yI;#|%8cl^#DDRGmV~c*-uYzExN(1=Hbe(H<(yOfGJgN|VlmcW zyqIL*7P1x59CG-muu(v*A{1KWacsMik+q;Yk*MdjXWRXbS#gWY?RJoJO*jBfAHBGrBNHoyxvG-y5zy65vW&O?}=QzfBGiBPEf z)dfjF2YEjcY~=?zx+g)mK>5w_AXN9=0&8nJqjsYsUa$^`&quEfeO&nH6&eZG#*GHe zxyTlFE#kvh@U?pFBteuscj-67WQ-q-A?i=PxLw)tK8>!irmtIp`)U%)3eEim+maJX)2=+VUv;@eH^sVMC z4^wLwNA5WL-xvL4DG>>ZU?zS2;q^K|&w$K(6G~ke?1@&rFQf$*MMBI&V&bgBvVpvc z`?OWc{QICs^Bt#Y?aX@j)!i=9t@bni<2nEYki_A?l;?3?4g=>F?lx28f+9`?OD3&S zX*0J7+?wMAON@MOKRe`H{2GL?*wAuS0V&$8B_Ci4K?>Ora9dOO;Je|yL^SIrhTnwO zZjM`2=<0rXTU#$CN^>;1dOp3%NO|Vwzy521?`HeED|cVd7zu3Zokk0fcK@gs5rmJ5u@+;$dBOUMUf3=uZw!@RLARofkcaZ(4P9>6u&~WP+~feN#Ssi zoiuH&wRkNO9p5ezggdi}UaDmHI^3eoP0n;cR+V<}Ifv!GhEM0Sx!b>M1|Ut3k3v;3 zNf3<#=p9Wvk;JgmI~MOu=u{{Z;8UqNwY{48uD-9z(lTFfa-P}w2z`R==sF9}RGG%c zE|&1!wchojaV{tuMxzP)1BvJ^Dijy|j}>GVXnlH*tSlb3ykl-Jc}Llzxe~hI6Eq~Z zDqjav+wF9B&8XrmXdm|7_?k#4`Q%JF8@#(|paF4z<=PRMVsV54M{wU82yc zysoNfU*zjIac$|rBS_dRRU-gYMwS*!H4|1WQqCv&d07}>bURYNYc3!$(-oJO$?9|z ztq)@>Q8;O7IMIhAB!`!O;DgmTHkd9bcgN|3wi98R!h&CV5j*N^Z?l;Wu%zWc))`DS z^DPfMFP7M0az!4YJ-2LIz$LSB-Hm|yEm}{Rf6vHEcb#{JH!YrexN%Mu2T-!WJu(*e z78D7JV%BUo-nbZp$qwF^IWX7Jl4;s@+?!@(;=2eBJ!8bOMs=!cp3-%xS8I>s6nxJ+ z!l6m!-&m1ZyT@^XMHT{zE3Z6Rxb9GQN~D~=L#Sdv#43hT&gX$x=nrcBj#t#J-n!2G zgoGO!83icuk^CwLju(D#@!5$z?g&6znG7unQD5l!Vk}~i@%eD7OD}q_)ITLPue)du z@M8Asrr*WEFdc!ea5PZ^RYhA}Ry%RW9+CFxDZ3Q%L_Uzrl!6A694-=^W(?~IO^+iQ zMH($3e1aT`mt)ylI3?fRO7pjlRZcJ8QEmYIufr%DdQV(2{I5+)f7X~qkyJFJ-0Dc@ko{0prja!5a39Vf+%|(L z;h%&v1Qc!3#UNh}5frD?PSgTxk*6b-(Z6<40SpemuAA5o3M>{q_sC9i*9%P;u)hFA_6O`9C3JGWAzr1Na_wfO((089pTQ| zOfeG>NK4x%){P7pPpl#v4{sl#(v%MgZiRQISk;w(dc)D4V%vD;5K8|GwF#w>PO(j`TBY1ZLZd`!rqJp?Tami_Qs(Z(bA`S4bFR-QU1wV%;ir#$y~ce>Bn7U#BAwsL$` zXyBK2T>m-^y@6>Oy^7O)xjD}#^o`lhz%xXa8mpPCj?M^A;ch(_=UOKD=cocWMwvDI zagT(_-3i#uEPiaRcwxVDf_M9t?&k zX1y{yumM#^_)aWvvP3OSX#MlZ+2|_{x|x)$m}$MC|L$+L+U=1mTr@1LR`ibDTq1&i zJ$wtB#HA)bZ2RS1E}0E@d(po_PQDyfz)Iw4j(ns8Y3Kh|F$5T^%rwomO~$#z)z*HeZg%><{Rg(18R*y-o<)K=WIPpBus<{${KAscbo^-61)~J$ z?%X~vr37*=OegBZ1hJ*gEr(DhtvnpG6c~2e3lp7LZn=1s`0^p_n%C`F;`Mli)xHL+ zO{5Tv`4(5`>+7rCQlo%SPpgJ?V?mte6r@?axW%a-`vfyB9*u5^m67eK_Cq|HB+f?< zxwFlot>^+29cRwB)l+K`Q?M9MSBda+-<0#6_H^1EwLiKhjzVvBTJ#i8o2cRTcp>yM zT#q<<$}B&N_s%5`mGpUsH5E^vw>#*a%Z=b2k~!#!%cqLWAC&l>r8fEz9N+dim-_04 z1)1z#mXscGrtadi1u>iM>zc10PTe&JJH=m~Y~Zhqg}GuN86LO=ToWBsSsjuboc=Hk zCGB}lXgh}76j|eo|NZ;OGr0#a0rB936uJC*k@@8aCgPoPBblIU1~{3Y$gT`~64rEP zLhjLokHchzr`Sw2wyOy3)I^o9>2 z*jUdzaMKG+&Rt;pO^{k)p1(lx@C>-!Zy@Z$X@q`PD4`C6Z0LMJ^B0*r^j!~B zQmDduxJ+hPPQ@uPxxUudUQ!jI6q79`Sp$>`)9!`XQX=Z(WZ?uu_54mx61x4UglEwS z_{R5`c1fKH7z)MZQNPu`?1es*ThSBjgXho_E#!hZQneIenX3eTr^eXu{gn$hWQl|d z%6C0)&+)O+eTdb{+lkECy~Yth1w46dr{lRE@95Mc6>R+3YlZ1E#^th|Wbls($NgFE z`lWnqv%|&k|`VJ2pn_Jlg3@wSXId(aic@0lWlbosb+2WA>3iIk@STIF>Df9hZ#FQ@dTl;P% z1-q&2Xwa~HQIXO0Cb1Ku0OgENNl;hJ=;waVI?zwMRz2zUA?QY4|T?nHc#P2 zf~&8(rPW2S9|DPn1;^8YMkx`{g8c>mhEJhhB39s+v zqLwWRrXf70E3|=k5KC2XGS{nMq?W_ao|e&4#q`-NX;xi4b!Kb5B)yX_ExKLn?IukW zyt`4tWV{(4la1{!q1nQA#IqdDmiX+}=td}vNt_*vTfCy?zwNhq3M>~rma{;DOk6)n zAZ4i0pZZI9IO^*Y8O|m8q@?SxNc*@aX+7Ne@1Z)xys8I|NGd~(%P>G;U`o^X4wTQG z)E&Zt$I}LOYYO7rltQiG05Cdw$hwSBQ3qU~OUGI5R`7-ow^o zJ%>3$e{Q)`6}*nO9X|cynM~Z}f7y2+rG#$lflsZB>M%A#@CHeL~ zZ}LE#%nu2fvsCCG`d3|Vbz-b%$I3a%hDY-qy@`;sMflcJ4b|1A=ru$mezP(2ns*Kx z8++Xmr*K(`z%S3Cb~C|vFeOF)MGx#sc&=RGjX*DusTW5lX6unlvfE#}W1PwtcXzG` zfO1*~f1n;$;`%!$`!&@YC(XNaO(H4oQYPf31%VrVf}>{0qeKH3fPVNfVvI)1i_3N)(nX9z{WJz#O>FPZizr~6dC9(A^!wI`~c7Qc!Jl}}ft`??jEKrU#w zDW&?fBSM9Mpofs8XAp@*v2Ta6D^*v0at!Nuy{*b!>9vzUe;m^E2whNHaqaxGMh82B zrT~94uVQub4-+W79TTpWKv8aQb_ll7r=GWq+CxsxGC-cG_Jn_=&%aC6XPKdGR<@4(iv;#T??zsrx;C;mW|elaZLHnd&ptry4&|c`x#^L zbUoD(P>E=Idw+Ct>&hg7?vxL~mC8-87yvTP5}y8g0=uo4Fvgf&>pCHThBP|en{sN? zestZO@Yt2ToOfZnY|w+`u;m7H_N{&>fUjw(d|&H~`J{}68=#Yskf6Qd)v2{gowuE{ zR9K-zSkL(@2p~q--VeG$i88BvbEGF-l}; zA!)g;S5w_tQ-y=4PMGZhCbVfXZ?2}_wPpWfkP?TnImDO)7^e7C1z$OoXy--tf|VnW z;_OCg^ThUUdh+=?`r~SJ9v91V8yoLdt9EMp8>pN)8PxmFMt15$@w=y82k{whXESzL81^N#IpW@n(p_4$5|ryoLvoJb3k zW{vd5Kr%TZgEDZ4;CDNVsWzViST;cZ?p&%LUZ9ct`Z!ZXJ5#*pBlkeV)zDvDXefwbd4;*B`RNsZ^9fXD`^RxceS%;R& zrMz*u9Ms!p(HHolqnZ%)H(v~h z1@1cq;h#yQ`%=9fRxtp$aGna0kGkMMeqHQ0Y4nGLHNN``LW((1fSL_@8Dm%z2)~oI zV$T-^X&YrAS3x*fpf-1;?rpj8WfoR}*KieXK`q)C-sJ^HJM3I_;X93anQOM+Wa$rP`FLBtu2~QhNmfGuB3c4B#O&di}av{-e4wAlxwp zrrD;7OM;KfeLmo5h|%Mo+1lavJlvEy6`^e>F^~YPGRuXUTN_!cYgS90m9=&QxCFdGonHV<9dT&Kz)r)hoPB@mHEIh)+GQQ@Z;?Mrr$}dpV_xB0DZZvTeEU^BT8?M$QtxYK#_bG`wQmitA|uXt~gH8byIFRt^~;q;)X5>cb{qj zE^3wn)tJ-qBc7ZCxY9baL}#ivVn9lH>e|eo34;HSBqkumT_t;rV-FszT-3E%F zB~Ln3NHq2@ip;ME1LE2Wug6n`;{DoIjqfo7`XrGpdrqQ#95KJsupD*R8y7Ph&&MBq zE-ebPf(f}6=Jm5wYh`>-?~Dqt;o*=+D+NaO(FOWW0My05*Y98hm_{cYXUc#JMBWj? zZZ5SmI!O=%!{E1?R{-~DK|}vpqr^m$=@+q#qA53fof!9KUJ1ZIDQ8Neg(k+$hrQoz z+u7~&nWpcBl}HB36a2rpM+4*<$tK~LLV^4<+ef#vebTQA(l20gsP*Qit{l81Hb~A~ zBmjpAHirq8khKQeF2Jd|{39*M=N4|6|J7WL<%VX1t9H9LQv_TA)%9a`nHdf3ptK=uhy;hF!k=&TVXH z+~J&@alI~1-xx|jz@8|cVGcwme2b2LO4bW^>XCEES0TG)NyC8VEec%>Dwq}XlLiddEH-4b-xT=Y;~ zP~=ytAal+n$C(nHp)cqA^Bu!tt=K>4wX2SWkF&Wd$lLpZc-}4b@gDP zfeffekM&G8aA|hUHOq30!%VY!aMp1t%VSKyOt)4xd`;7@`!6AWN|K!hQMFAupAwt1 zAchouEQzsNaFYa5`va?HK$(L~Rb!?A3>iF|i4lOqEt?!nJ^5fk!+@q96@Un>~06 zLd)wKN*qkSNWo!39z6W{;WJ602k-30H~GLlp`JjOwR8(&!-CO6)xT@-=*8Aj)#%4; zjaJ}YV?`GhHTX$WMT7xVCSptd8f;^dQijVDkf%{o=J4JASMyt9${!V-oBeb@YMh|e zeKuHxF7j-rrmQ4>%pP$UJRca0IZt+|__zqpNt6CYHN$Bf*^9%)UE%yz;$t$?90sv5JpP-Vd9jxCo8q{{JDy>?CyiNjFbNP_+mNkO$U9T9 z{$Ui)7@U63a*`7a7{n@phiIxF3kwZgGbS%a#|-|_4h`HJ%$6Z7J!LJ$S7=w88ylgE zGzW0AHRhWouRtAMla<^jJl`QbGa&EpA%*a`!cWKnh zX8!O3-71|-+rH)pQj>>c!rnv;WcD?*KYOtHSjHjJ!pcNZ(eBRg^RoIJYN>QjX{^Ys zaZi303@vg`R&{QP&f=o7!BqnJtT^)DE(r{Ln_?t89L&*nHR_;f$x`}ee^P|a#%MZN zVMEZ}l7f|7X=hOH13H%?CL(nPbi$-ea#+X1@KZjqQB?CqY-B2r<`}GLZoF;+J$fx| z|6>RU4gR!%qqKfU=8nr$4lPa++K{s%C7k%Tr#ysH6om|b04Q{#>FiIEPE4QQElp0SAFVWp)=8>4_>^Anzz_anFEO_QX8cq%&ZNv)yra?cqAJ|@_`z4 z(CJDNJLfigf;-%MF%UJ>jaZ7C>H!#W{Of+Z!%4_{(WbBPlRYq9uv!{$`uU?dYp5}Zo@E6V@-0ntD??K@8Lj^^Rc z*G^03p`p$%&7qn;ybdL}ENT*>O4$RO%TUD{-<9)i(PZM8BuOVqM}@Pht2#0bMH7b| zXU8UUfXnDvg}~NH(!c)t`9p@QMnAlkD-1!wqerwb(@MV}WnQjz9EjSr<$n6xwvc{A zxO61W6|Q$J_65mQFJaMLPxly7WzOScI4q0nRusmZ}+ zMv^Mc(uKWNUmoD{>KQ5PG>0>oFh2y>k8(0~mul}QTK{lt$Y_`})c6>C0k$(_AA_wv zoeQ}AM}rjdASxfQ<2Adt(GNNMqLk)vx8LFPABi5qVUcA#B#rH3HPrSg?_!yrYf5y_-AiFVc-23&);-f|I;1l5he6)&)WGs_wttiyN z;pmlsnNeyc=&?JDsYN<6tM(*YEYil=pw49j50oS2V-(H(Nwg9o4GSH`4{fujg)zg^_k*)}((+euHPol7=&)7y*QSC&7 zi*VIY^kR`=I4;?tz9cHqwG^4*#sB8zpQ_R;eBcAtWN`NX+oS)P^ZxfOIuM4y=Geos zq-b{UEj$&-C@RJ^_KX5LYiM+Nrpj7&$}Gg-N@$(COUo`q4>sR5s;<{PG>w)ad0dI| z@zgje1M4l1{aB0vR^{3|O?_j(D)vT;*H*-pEZLf}xiiqTH!SURnH7aM4b?rkQQd}a zALHfv`HqboB8%9@h;D1l8<8*W>$MmV*YtXH^!XnRt_gt^QUxMvijLJNt=FQ$TqQo7 zwvQuO)h>skpYyJ-RMR9Q#Mt;t3T`P9HcHf+yuPQA!I_UEFT(5DiEb|bXX(#oraW% zHphh9z#GwOmgYWkr2-bCnqGv;E@?5*d);QRBV*Q$66iWyh5x~VRdx^2o?PZqL5(L| zjckmXQTq#Hd@k{+V5MqKS=3dLSq%tZDsG?k*~M-f`5+}E>n?xVVYFK5OA?{8S*8o&Vd%d{45l;I#-*M5~c4^x8J7FYTH-j z`1O8L)Pqr<53F#GFpfu3p3!3|Ec#fO7*EAz?4s>QUo;q9U#(uzC#|kb%26v-=s~;1 z_0Nw4mq1?`zJ zR#8QKiugY|KxF%=@|W$fe{?}OX;3KAEA{=smKnn)gJT#Wov-af)yxUlW|X_q}h!AsDz4EoOI_b!lL{lm!v zg@2Y%ObEyr#wu?ueCFZIUm z(8YLkeY5hX_j{X1jJEBHz;gK481DZ7lNpAe&}^;G*RD4P6+c1U@<+~Z#Q-CK*uz!e zF(xg&-=D$#*JJl2ec+=?HUij}gg~B#GPz#W>yeSO=-ubV5jt0m9oSOGl9$U7Nun*@eV%2BAXKBFP&JSUr9dl&}f3kH34zdYzA8~V~k$h&O zsrTmuLy7P2>5DO=8UNX%fG`69wJPW-;QXl-&@DXU_B`miipg=|%a^VL7%i>C=+Jx8 zRy_mc(D)K21HB&-|13Gcw`V9sT3_E*x>kj0(dzBD`SQ^YSs<$QDg9On(76pcWI2?O za_8SRz1L56b6YT9)cka*_W0NjLnFf;S@^}Oi%h?JKdJHu2E&lqC-6r}6eTmH5}-et zEkm9da_kRptcF%*p*u^^f(XL?F$M$$iESUXDS)8U>W>X!2G>HY0()R@k8?DwWMeV- z8@OtF=PJnV`Af07cU%1A&CZz9{c+w8#)tJeuXDp`7)=sBMQ6G94x5-v3QXUd6MQCV zAj^{G?i1Ur_u9QTV34;rrH!n3Dbaj+_6$KiK0ZrL+uYu5+5H((BLz@O@jgyo>%SiS zXSfew650gBNb2jLjY+2A;JzoUyO{9z-60DTWJ8HeUvS_1?mv#09n_Yg6?J2t;)vBR zANz%}e%nWc>3-~f07S6;v?2dT2}1Bp5@$dd7E1J8>gBiCmt}xDB;7Ae9^M-_EhXk$ z<#F5&PlVXNyz5>^CJ``?s?Q`}dX$YvukV_tJ1$j%1}!dn4f#;Sdg{{z&7-@KIOOmlji8JolQW z1tLSTuSuEh$$g{T8#N?@625?tWv#z}-X43Vyhy{5!w1~bH8%oEM#oow@nkbaFVPML z>b~N9Z}$a(1H$&%avAa;8+AYqr$rqCj^*k(T31rVdxp7q1S)ZqRU{w@vC=Tlu*~a}(#_-9;eyZ{wdpvi0-JxPgn&~t!S?8tov+9^c1@aGrr@{td zEJcun6|qZ}MZPoz>?&>)4IgiWqBmP33ZYVx8ba3F&;HV5c9u87IPO+)nLN*%J&JWB zqYC}bD}RJCV1E#LNHKd)FaL6E&`1_c=U|+mkC9ox6#emVwxU5U-qmM7k_bQ&$=yN0 z^-(HTE={TB7Uwii7|Yq3_#y2E4o85Cc2H5j7XKUG?Eed&4XB#_WgSoSfb4mKw$g*O$=0yamJnH^6P_p`9AHZP|~z`#wi0 zZ%Fk)7{an~vMCd%)imZs$yK^b_w5l8P!4zB!|nx~&l=WK-6!wy4Rll@lvDQTp1{Gt zvyFx3&P-)N>E`_S@i#(^uW{?phX0N_A5mnsqzAaorWyM?@!*`?hS@%%yiGQeDC&b` z=zKsBwPLuzy%p)+68F;%eJ^9aQVAKUQgn{Kwn*GR8arj`#p2~vXjpfyrvYgip5 zn0K9dBnkv01AH^fj1qLX3erUVw-@BVvpI_mo9-2X&+BFRc)F4u`Qo?{{wTU6VmKr6 zkIKvN%dGLtE&vUGAe$CVp{||B2&+i#1~EcRK$f%%fyXIOurJ@(90f z$t>;X3}915`r>acfPd-bw=qUs;a=cAlEY>zZjvR?z<9{-KpSnIi#)2REu$AJ#ZvIs z78HVvIK9d7Q|x!rKrPPyCH5$aVKVN4d>rZcP%n;9n3}h@q0wtiLaP z&x`+OYpBB~FEb^g4mA7tb&{0|$rzr%38NSq`s)lS{%sc!4m_5v)v6alTBunS{+~nW zDPlM+awH^4MPZOl=y{nD-roWN>hQ+~R+9Iqz&+%Dt^8Me8u4xUU*Bi^kYy@G)A$VY zci%sM9h?d*wB?nCa`)Q%_lAm=p1FyJDgR6f+-#_Yq^N&?fi3^pGjJt~^e4eVV(_j; zZaJDIUY~K~MEs;bt_^R|Ud4=B{FcGz$>Zg4UNIe-cmJFs{)w<(-$v|UR#6Cj9a}OA zUeFP;_$E~TYn-pBfz@u0eg^xG?ih+3&Nzf_c6oaIF2jiazLtEZfio|brrdepZ@2nS z=h40}bbN~+kZ7W`q|;*DM%fma-I61)|@&cDzen~`ErO-R=ii!%Y!~TnD-hIAu+eReb!>@RCit=hS$;qvr zft8i^1iLx2es8wLDK2h&eLqjpaXRguTDZ7es*Z$O6_&2oA@TMTmNsLVLW_RQFfUns zmMg!p?SdBW-7Nh_N>EH&aMrS^Z?4L?gh+OTS`g$t92QPh)`HYpIjegGh(V*SOu=ns z7=?h8mwyBlY+lkRRC3!&xW0G7Xjd?F1&1^~t}NxEuD<%k>m6(3x91qYMy?U`03WK+ zy(`W1$EXxW+?q(`eX{S`rf(1?yQxvN#qFB&*vh7rTykJ4^!*+ zrh!y`sU|d?-4@$`PFOV0-rh?f%VTR>E@G)lt+<@VYh9V^u2Jq@U!+ye_f}?Q`d2(7 z{oIE?+#&5f1kc)eoi&Kb;>k034P>@F{tP|D)mG|Gs4RDG$IG zl1WohU|7t^0RquS6bYy_knsFbn*6q)px~3aZ~rr#)Mxn_8IaRO#$;nsF)^`8rKoQ3 z#DvN@UF-GUp2apmzzr~~i2=TCv@pwFF*)B6KR>SpE)1KR1F5LQtFl{K-|Zv3w3P@k z*+^nY;S8Faw{^)IO3iUPYysZ?QxUw6IIjRKN892~Kv9-oT(YV2WRVVz3h!*SHB+B9n~9uR**tdJmv?mY_#cBx5ZVP$W!y5 z^nAL>E=Opu4OXIscM+)mk{Feg!+OP2pBzS)b)pdI7l)x;N)COcDw4Nc6YjW(E*(YE zhrPB(Kn09_F5-c*CeL?mLcWzJ`O^HAmwW8B^7H3gmk!~DNJIiH zG`#q)_01VqUqzcmI8J9jja$N`@E(9I%&O8F7)etEYDcPT&@vj+v z%%dpU&N2;)W$FkfA=X1$C)6(%v4urQ`v+WmYyNanpn|*{_P)!?N19tY+K_kDHmu$Z zK}6w?dk<7)F%Z77coca-`A#LFd?C+?kCd(q&Hy=#0d9!Oc$qCjS-pi&^?H|$`9cT zdjECc(@fT`4C0@Mi5^x2G11P~8imzjHa&mN$}wSTN4JUzZor%_fSRynLq8*m{47lQ zRtKXS9PRq#_rmJHS0to}Iu{=N$1OmLbrIjg%rnE&F4S1k8TT8framG3Tx<6lpo~79 z>7YZIou8<2`~NIv@BT~;J6ZJQdtxb0o@5Ov^gfKd1LWvc+4Zfnm~XC+!F9oa4S~1* zbiaI<$+M+%x2fK(6G&FiUxr>nTyOTy6t67fmq3vHYR1}pB-A}5c}quLgieaK^Ec2c z5yz6fLo2j&rhe807u1ghO=ei>YFZtx7Pa{X&>!=If_*W!1@S{;*4C^e?<^7OLdJjP zYTMI^ddLZiE9-OroC8+Fv5dXP-1k|1D;)um_WP#p*YjqFJfm5E8<{0yY!JrE;hc#WS(N(-tgR#s;oEo=pn z=@vTTQL=(RAr|q#_FMaCX64KGu;_rz#u0mLLT{U^>WI5!Z-i*6#mms+3l^|*+z^Y< zjjI&WG1~BS6mk7|`@ML30rUz;aU$^p45la*L`^oWN@7d%o10+;1IKsx9$fezkxIh# zU_R2m++8QlkSOL zW)3m&uQ*?sSBdi6tjc$+-@+q1dcU9M8vqOJsI*J(pg-F^`hKI6Pazhhl_=sC8jri8 zgJdPlLfWqHo=0^Gc>VbK zg--i<7@duF>~gZZnvZY?3a(CUm zEA?& zUKUDtueS}?+dF(Bs2`Q)cD+xQ<34J3w|YO&CQlX^-Y(VF#flhSiZm`mRWjq(`CMnZ zgeMNM`cp_n{PZ?*52Pm=uB(5sw+++{rtlpYG9H+2_}c^Df2dNf(kb-=k{6j2Nknq@; z*qdTLg!d7Fa1TsS%*>_tLy7g`I~!OuoI6~86eJ! zgDhfzDks>1hW~S|f}`^MLyuDeFHb~MOvDi><_uB1}A80cC&ZGX#+Q5X>- z2p)CW@PtVq-gZ-;T19RujO3HmPJJfJ2*Hde2#>XiNG;1zq~0#*i93|3()5vj#*yOj zd1X+ot*z}frWE&T$7l5b)RSlS?bLt%c>m*2!pZw84#|o~;)kc*kZY2VO6hB*ue7Hw z)a_3orFe0O`BI1~F^+j|z|tMvfTD&u9=gAF+h9=qPA4}UYeyDUBr*=9t)Ca^Vu1v* z>$UYA^S98P;bm~9GI&@y%=faR_k`c9_%s3rv&A>Qa{s`>wfcaJa8YKVe4x8+?w`-XRo*Ldh^sla!gJ@Z#p-Vs5bhgYJ##I5Kva2Q>`Ql`EhL7s*( z$ygff0jJAf?+3gP1DZ|fOZ6(2RKBG`>z!gp1%l<8vhH-!JkK>n&#LAP6jT?U() zW%3d&O5BgzmKPD#PV^&tzrk-)ebcb9DR$!3|Iu9X8=FI;sA<(H8B+yH>%bYO&*lM7 zkq7kT#DhEXv)%C$rchocE;XX(;U4Vu+)L25MdvqJBuf^4W!83Q0zANdzFfP7S*i5% z;LwmRW+czZcFM+rs#n;Q&>nKLy(MM28yq>C4u?PG;_k|Tm#8U&7t2!53wAn7-dKtd zJaNPp$6=W%5s1m@u-D{fMGIpB`;kR@A2nwH^44i*UF*Y&JpnD7n zCwj9wax|^lYO3;BH`#s75rBV)SG6W|zfS`H8&S+Ej{lr`nZV_(o;!>#mYf_yl=Bl{ zC0;Ny<=jxt0@HNp@d2(C-vw{KrpnUyN z-h=nxU`{HeQhx*ir85pxi!D4hg{7phVNCsV7$a3lDDOiqu9yPDTHVULyu6}=+r=&O zJ1@p!c!twl5>kO6@lUqTfy&i934j5pF79h;o)D3^SPSY0fSmJU(JUJS=15|+#JqH6 zr`&mc@Fm!JWu>-e#}Ugc&fqUbPt!iz-113MgxMScbBmCcsW==mdSrL@vFIEHwjD1k zw@b@ZO%S)q>@3-htLFwzPIB|6%)b}XDjL@L!2kP?*K}CT%t=K1zCb{i*kbtJh>0`( zE7YfJ*Vn7D4J92$ryrl}B3OJMq~()dIh=?$q~?wc_4w#v4T{wSGz*s3uGVBS2}B(F8PcWA@P zZlI6(Y`h#G9IhG(%VJiYr9}+fLJq2&r^eeofPyM(ueXu|oUVuCShqt|>Zyy(hz0ord}^=-zF(CY7W6?yZM-%kXW;X1=SlzaVYva$g(@dF^*=64 z0D|Hn-u$^9XnibXDYXrbVGWnZodrPojhX@>`~naN^g>A~WHer&PxP{1!07~}TWJ>p ztlJD2^C8yHd=jV6T&KKMFvk%cj2!c^n-BMvd!tKA1XfXqlwf8-El}jw7-001!*C$5 z0t4XL*bM^GlFYWwzP^~fORujFLCth~ijrj;c!cCJQ2zjkX0F#ko8_U*vE$Nyy9ozO zK(4eHz@V+8n%xx>WZ|=H&xH;f9obR{`z80H431*Ep9vyYqC{g2-SdInQb8+U=Z^P@ zEt`Lj<3=h=O7Gm^_A18(8_n6V+SSRKDi2uqjFE}<@vo<8T0e(fUqdLv$~=bTxvONE z`oY+zKrv|L@TpAYNK-7VcVEgi9(=5SdgouZCsn{`aT}jn^B>s}HDeQJN99)>nXw}Y z=HRb4@u7p%19@Dte%j84Hr!|x*u#XEvOyLXw3Vo9m|~_XxQc|=r6I-OfT}C4@Felu z6&pXEeQF(Na54vr=RF5UwI?I2unuN-Dc-6hwX4k?vK^=#L}6{61c2%T5)&@NO68?Q zb5|3EtP7i^Z?8^&_1TY?sV2McI*I~8Mm#}&ezwLL4lApp3sH!##XSPoMVM(*;h zIMn4|){mQd0D(Bb`|uwX$`|TTK(CS(Pt==@CU~8b%1TR}p7g;&#12dJto!x9{&3a0E1!M;`wDobg!5a!yyT3^KUyjA}3Rp}ke~X-Fnape^Lwg-$ zcpY{Iz}44}sO?*aSHF10%AJm8vm9Q$ZImK3EVT7pL1Ta331H&HQ~K~(d1_uCHZxo5&o(TPTH+{63goGqN)qi|k&xSbdRqx2mCtzw=q4sb?77qlf zN`u4iY7C_tWugw9*GFHs_)remv%bAF!MGcH^*I4vUd2KQ+kfOdA*xUqv47#BfsQ7d z{&hVoRX5jADS_*dVKM}~nTtWhuEfiVCG4TpW7Bdp7H)bgyCv>;C{~{XIWDtc+|u=!Jun z!Yf(o-#cS^Qj-GT6TT~OzOcNkuP$pl;rn5Qr$|Fnvy<({^s{-h7DlN-bv#7BT(sTsfjvMLck(;C>-9-dKLqYv#S z2lP?fg*3OvX#C=kBN!a;`?-2SfKxcaZ8K8v@^j_@!S8F1zIL5$IsgABpZw>;0WLtU zTO>Ge(vyWbxJ75zmj12_Bem4+kt zmi(V}T3YCxYG$iM*rMCEwV>gTefmptAFB8NruZeo}?MOC0~ZU*#69W)y|<%qqLXCqF55{3izo!iO7I)p#mS|RL>MF{t{QPq3rN!miNk8j9|DDt>$-l)< zz5h-8Y`F%ubk9&RDnRcDnV7%3?|T<3Gy75l_li?Lhr{QxD!%_^UkUHQJ**D#E}#gj zc&EjD!#ouFe|n{_{w$myy7Q^oU6WdS=qJ1V^Oo@+P<7+fy1L1=XD<_l-yjB6&W(_N znvWp8xE{*imxcA#onY_Z12^@5`Jb7BO8i2_OX2JgI+B}To~QOHQfU}zS=jzcGetmz zGkE{RY2i`0hoT|L^DEWLb;Q7h{kHV@_XXnQ8K_pE>FimsEEVOu`?(isst=q(w4p66 zPpwcDwl%I{KL5EjZ2a(1-vn0M(Hm3;x03?yTO}yUs68`j{d}OMaL~0-u`!h^;OVAWZTha<0Z?F=nVH!O85#eBhT5}V3X9!* z4Uhu~yFd7ye|r=GFOzTmuDs~QM~ZuVeEn-ahK4XNAkyU>5})RUEn}e>B;;`aPmNhG zMJhR-4C3(G=T!%XHowvCl<5pDyEEJ1_c6@(h20L#9{U zZ@xdto;;>ihNiO}ce|4{DfQ%h(rGyLz0)))bC}%K#BQ#3{x=}QtMk`=1+&?fPXfNv zPdHC`3{9L4`zto_FZO;={A%>QEMicEJ<;W@U-(vp(T#Wy1rX`ammIKrwVMsKWkGZD zpEQ^Dut0i4cjS_Ik=f4N54U^Ic^>ZTbS4Dg;I2dh( z2(Q;EwFp@kc6{^G<>7xwY)C@WN3XCa;#vOac80Yz^Rpykfr6v#$C9g(CaIcMvwrw} z(fUQtFLCgEY&y=UpU6Ac+5y0LH2a{t;%x*OMj@l2L81os>MvXOmr^NL*9sSMRGd{? zQtMk#C*GXlb%SH2vkp-wfLM8KXqIObex)1%X(4vn=D&?BMUU-Le z)Tk&uj|NJ#1H_|@=o(wY@4OxVJBs7mMdFcrV9puh{JEvYBG8?&y@x0McUpM46J`eg=2}sw1Uj;5p!O7(02d+ zuvEDBLCN0m;9u^6+ib7wEW*Z$UVBd>2zNJl00SMp3w})-^O?$uH+9*At+(1J9gbt4bSZmVzQE;bj=#XL(&!Q0Xbl;=tpx~0akz=% zaG;Uy7js#sEepo~&;3%Ox!O0&b_RDpUwI!`sc~J(%S$W{xNNo&RaH8~#Kie;w{(}f zEdCmg3n#^87KEi;gw*8ZsC9LBQ_vT^@pQ;EQ@A(d)A7i3A=kbkVm?$nrg{I`k<-tG zn~&#yvgv~$ZJQYu9q$)`^rTeDvpYG>_dPu~^<$d=3cqG;Tg7R)CTE16`}OG6NJk!S z0FzJERnF+GN6bJBM)kSzWgzgBb+V-{-`E&0Cz)V9zcF33QF)_Q!r_zhotkpw6Jl@D_R}2H^xBJE$<0rG;#KR#o8H4m~h!8JhAI+Hk>5>Uw!d-Z( zxJ{DhWYlo6lnAHecyPT;7>cKO^D0!4veFKiPSi`O1bQplHsTl`2;r|OrLWeM+pq~6 zVO9q1qXuwkKfAuRIw66irmN3#iE*SY=%vgKBn6Wq=r6?HcPE{SMFT~lzp{xj|2fba z5C?OU_T>pw!u8htXlf_fhW-W^q*#XmGU10_8PO>!Y8R~3WrawY{+K^@4LV5qv#aZy zzFoa0oN3+4&VFxqcOk`KaoV6=tN47(5_X?#i8#+;@dQ&qQ8>b2C)6GC$LBh-E6{5K z1=?e{-^|Pd?8zb!MSguc{Qln_gxj=+jEn{n-{BQ6Pv5Zp9p*sjuB0J{{H&~Shsv6o zng@rJHES8M#=OA$MppkV<9d30lPrz#L;7cP6?r-h1`S7NPSrpQ`+^F)3{t{#lQY)X zBfD-JeW~X!3G7;SZucoTl%s%X^4bEGwqZ^{bYNM`Vhaub<$^i;W8ZCNE{L~e+S7;o z{q1`nUf(484FnZtXazOry7}twF^3^kpZVYCP)UC1xzQj7jR5*C|9a~Z_y9N3(UuB2 z*7M$323M~nyXRk@ySfG|*Gj(UPJ7s3SiZ@H(av5U$kpbfd+de%Tc2cDtmw zO}fOM0f89)Yxuka5BV&0$0H@>C^u* zSrJjrkczh-bt`Fo36s1St)cfl8c2ILc0cxKj%Ka_ONLV{?>iJvWmSQS>@_u26ZH9^ z6V>1WAklZ?G@cs%OpR{x!BBP60D<*M7PxOF&5Y?b@80CurhQ^xk6V7V7WC5Jfn=<1 z;gBZUbpVH%@M&E7qNfA$9*yn$e$84~2Qqyob23Z`yq=!W1QaYVB^v1JTH&WX)*{W?UZgSp ziDW--uy`4$@$TKbbeRD1h@7H7zvh3vEuZjRA9fL2p9cs24-3~1Vt&n6NoPV7KdxHF zv<;m?RzUZa7roRkfp$;loF!}T*RRt^=|L|D;y8m(>?>hqwiCl$Ku?ikvw-h~mo6mJ zB)hQCt-Mf2^H-06KjDoS96W*-jkXoHU`yuJ@rUL>pf_B-^`TMOi~Y5;;a4E>v+#|| zN~7<#6;oPkYij^5X)9@}6Lwnei3nj3X@^N8WRb!5OxuHtjZy$1S@*D;Z@BD9an)TN zV^75HWNCk3CemI$9*a_e<2T&0q!33N z_gLD7y8Etzc)z|6qoSk)Bz@N+yA$zL-FFQ@B_i^nn4Q4VwFcf3@se)d+81HT;`txa zdlN;1d^&h^ilj1C5{DPCYBP>+j_c&Yn1`_@g#hl<$8KuXa&vJ3nWfS2vDz43-Mdc9 zeIbi?{11i9e%?fG!8VtUrj$GZJ7_-EzhahVFaC1u$V&fP{k{3Eq{4V*{H(^tlc`GE z%Zr%GhJGQo31Hgzaz~D(?c8m&(tF1%=|zz6(wSuy5$UE?rpsWzfwmLxoRD-Vw{~7Q z)lw#^zc*P(!P@%QeAuXe(R@$`rjDNk+LQ;7&!#4HL6 zF+J@$4otQwcvLvAy_6)?$flo@-feH9a9m{kujTKTtcjW9Hb@%Pg-Mt8!_)hf(ZBz< zxiqK-CE%Y5EgV-@k>K<`IgGxle%r|sQ%PFU;ZVb8%B$Kh>3)Wdgl(J&U`$r8qBh;8 z90T5t7Tx((kXW}lBSRih+ApqMA>^N~+$@&BV;VtZbiC!%|CNCMhgea2WhA}E-mlz3 zc|JkbO^wjU>-^kD?b*{to*VaR60Oqr!}gV_>FXXjEh$z5DNyFS3!TcfiplPJW^r}xaw%|j==cE>EW24W412ih$4C!Ete$*FaceJ|WG zwOwx21mktq5|>bPe1l7Y2s+ok?xLoAg>~=vqS5z^ZNaTa;&hWcfYJHYd?tO9d*ZPj zsrKOfvprZE4mmQ4yRbozTyXi>to)~AyOH3JN}OzpCL8yc7`6Q$9f7WXVqK(-Y4`FSw{@S9;ZK$gj}lMz0AIoCGmHf zM=Es@KZ%WAuYoLo?YfA8Z~c1XcxQnwkQZ&NJyu^lBCou$Ah_9;seV56$ECu~Ig2;(ALzp{LPlG6By) z+*gvfMQArcf>g$fW?~On=e#a=b{mwt#XwhYYBE73({_L);JsrsF@GZ3PkL|lv{}iq zgln7VLG@~k{xif=C$1C2;z%Oef>^c}BHpTRQ<;$}fNuN&>#RNHLn8S(uVE);)k9q~REJ7Y-F11VczLDLEO%rk~ zKD0SG^8#dmVIUCF5afb5Ngiq_U2(f>+Z4aF;yy1 z;@p{tSCmQ{pY^;+CLAK{VD1@!Rz;YeFr$6ETE230i7q|i8sDQN(*X}%`?O}eYv0Hn z;dd7{Zdb{%En$%Iv|a=>AA_>V8T$z*GeGVwvGri((oOoBTrp1GVTSKkGj^AQ_+so8 zQ|IpcUN~t;-?i%(5_+MB2CABl;AM{PycvzUrwM=9oRghY5CH;jE&@}X znN2(r!VP~T7amkAYv$HRD zWEiRWOwgmP761$e5AaS;r)>x1XC$0!a>IHoUkQFn_jnoAc8A?`c-h#8&SgM8wbCcR zx@lG0g0)e<(hiy%xp{1?RFMMl)B09`_nJPvu+^0Pw71;%(lfFF>+P01*3bYGkOi^5 z1XGDip8ZThL6%A&{#ss4gL;Um(@90-cRJfQhM=Pbmq^VnTi{3AIS`rgBjGE<{pA%a zCaxTdmO}WHF#mhqdP)+^Nlwn6qN~bOBcSk~GaSIzrQvSMHQXNW*#{pbi8`4T#}jm9 zuTY)X1(Hv%vBnyO(B{JNMq?AX_gsoZ$D8eK{V$OE>dS7Anz@U)fTHM10%N%lda7K* z=aXSZnUu@AoOfqy#YQu#crQt-N-v3BF*orlY8iqR` zXBuX!jD1%6W0L;0b+E<86BAPymz?0oA0;a$a0|b8>Y!JP;*n{x2uPqN5*;Ae15$?K z$QFDh|A?h5sOCLWGkghI^CCu=0SoV(Y4A*Y8KS}LhtwQFF zWIy;$^gh%A{WZ@9CKGR>NR|_S#zr3hiu~z_7_Hv~!$0w9?YRQHqKZ?J2W0l@0 zbLle!Wj}9hsWkTGN8PY?NJ#v~+X%Hgk^ghZ`Z^DP$paBc+y1VeV?z=@MDSm_%)bx+ zQh;nw^{b_6*7dR-Qy8E>RAo@8;hF${epz&OA;%(gDu2=LPw-@**`g9J{V)@pb&^H& zzO%Eln5J#rh!CFaaj1)=#PH~Xxc!aF*EF2L(RjW4T);klCpRW4O2@xlsgv~9y8sCi zA(u-r3sxTLMXbe1&ifdkc%TZLWMr5>#Qaoor zQJ*#pDpK}(+cJrna9!PjDp%~rh)VlbnR?ZbD$fX-jO?cT(6{R(=LRd3N<6ApkbEXu zhv3JL5}0SjHdgoH#G0$|39Jz6<~jqP*q9tfZ9!n{e9=M5iHPO46LC5Wys_TeIU-*8 z0rg@^ztE>>VMoibO*##S@H66bV^M&Y$t)}%u1e} zR>by~bTEKq#*3{1gFK_-N*Ww>?Gty{lS7?S#M1Rjd&2^4DB1y~6p>S&`2Cb$!*Q6@&)|cs?4npsX_$w$AB6BxV zgisql-Z^X%)1){dTtDd-Hf#05?06;{HWumM14dt|Q>%e`@;NblNdjBSMcng(;5H1y zYEkE*7)^G5FD#JL0Sz``?8bV{T|Qs-Nu$E`-?}^>ajwW#NVad&KvuqwGFAN zMz`E+&InFi0{_e+xEU>V$yn&zv(tDpr%!R}LWazjJdF5T*MI1m>pxz85IlG+P00Kl z5vyBntvNH7y6mmAhILrnW($jgu`6D$1VgF`^z!s;wWxUv9ss(nGt=W#v6_XF?t5zb z>;6{6+YnqUkSSnhDFyn=v-9%G+~(h-he@+i?3b#;T#&2r&No}L-`5+7u714ghxaAd z*TmzURJPI3*5&{l7F>mOXC-xSGyjM`n{s=NU?zSt6CuY$Rr-(tdTN)woG5I28z#_g zi-b=2-w=H!ak8(@{bkY0$z>NA7tk zX^=qj9$Dg+FRs2GZ_kxcRuA;xXKmNcs_0$}Z#6CVoF1z=X`Uw6Zm`QGY=!v7ad*b@ z8E@w3Ue+B97adHJ&tcrQWo}43uuS%;0cd-8(%a0H8noi~*(EVu!e~ASbn8v6dgqat zWhKw}X=a_AkW>7`$fNPYdnG;!IxDQ@iJV*eqxOvlF$R_b9;+bV)5}e_-;~NiXQW}o zirX8LA}tq}@)e(r#9XF$c~@`rDWJ92PCd!TSP3JLVw)>9VTYodfDkR)UpzFZgd9;5 zgdS0oy*Tt%n%v}eVDK*3uFVa!QiJub1$6vQ{b}XS?%^fwsW@x2S(bg1F(%2l@%-_T=ZqjDL=)9qlvh z&iXH{Gws@0YoE-JiEa>4o?f+DjoTKpl7f)^ zh?h89`k>ZUaqQHxb5hGlZar+%Cw< zPh2H-HpWhQ7Ctc{V0pqEhTbe=LL|E;P=OW(Gf#U{_kKL^i#sEy+ep!z- z1Lzj5Ka^Mmn+j3JUck4EQ=WLS&n^gSE$x?NIUon|TTdSC3@epFvl1IpmJ+1m!g|Ye z!DQM>N=k#LN*!dtWYA(ZM@m%3y|!E7D7s*97Xu#K|jY0s?<4Oxpg#ixik)?4U5BBGbvUT5lF1RwMMnh}9wei5UAbq2|ke^nqxl5q^&9`)*Rcc+4OussZiqF`d3#)se_*ig3zG zTk_Pvd%a;|b{_HzFo3=7I|f0(2FO1am_CydRf3ng-Grc_p4CS%B%V#`*}+oM>3o>v zp-n(C_F}%B!93|$`5hO1Q;zz9q2uwQvZi7F2zni8HIm(Y)W~#r+*xsc+Rf~_{#r?8 zfgKgqP_UM|OqC{|b4l=djiUSCydW$1YRudBFxkk6VM;kGYYZb8*k^2>P^*~^(t4Pan_?osO} z^3S!LXz$Z}7foN2tI@*v$Dqj0YWBp>>d5Brk;)Qd*$KG^Okn45?cUUAqp3}e3!jbh z-C07GXL+uHL^+P*u?Z}e1rHj4Lj@4d=)4U6J5M6z@Fg==f60 zZW;@d&0*hjh0mKsGEQzOB@-B%4FBxP_$ETnS1-iA$3Ni}!I7NriC(uWkysU`e?S=o z*Ok3^)4&1Ze;Q;&b?Fy>u}tYSv`=9n{{<3Az;WJut$t-$js z~)=ZD)28oFO;Bu0?g{i~S2P|X;16yolB|#dl0??#Ry?w9QhmlkkG)|jt--#T2 zI?WtWQiGPgl+nv6o&_!`dA5}kE;*eS3;X98EUEWQw*JzRAn>q04N+4<>C z5p<2(D(;`-(-R*$uEs{SsU#1S#k6IaZBz<{KmnRHT!VNhL4k*o20zPG5*5wD2!bO zijoSkbEu%(zplBzws42H00a|rD-bfFv1V!L24%?-rosXza+TON(!uAmWcn@SYvdEp#1+B~dJuJ5Fv_5`*8TqCP_CkKa@Eu53rs zX?%5Aed9+!{s>36un*|8sXFFjJDIy!5!-PP_k+9qL1FfZAO|JF&`GZwKYUSFk?C#21?-VUsl=ov%7?0#0_HIb?9L%aM&i?EnghYo%Ccx~9#v53 z^YoHYQra3$fFzas*(8j%l`&tSeVwV~$YtzxQLRp8Q*V^bl0DjN8nFg4H!L~tlSKve zOP(tJx<5(vXBI#~^;*uCL`4C#3b3H)`~cUeN&4EA+<3hCK$>dKJdG~)F4g(;z}9mF zd~DEJN>Xi1;-&vxAsy4p^TS9L$>V!!dyzg_f1IR$_Ln@+B&T zvENb1Q!|HBe3_Y=-K4eh2Czm+cPPvHC+v%iX0Kqw35Y}xEAC&FzaqxkT7ZM6=&vNX z3F++Fwbfl|pz+)QM&1a$Bi8=(-|CtJ&wxa!^a<59!U8XCVD`J*pIjo4=Yw{StC?04b>B!}A-!wh<*t>EhH<*o#Yr>`Vy!iwu>~&LYD}X+-Ow9c- z6*HmKPFkNJtmf$S$t8V5sQ2ieU>q~A)K9kw;Ehneu+2PhEKl5jc8OR(3v z!J!$0k?QYcPxMcOYm29>3D`s^dm{(oV=l?g5vpw)&Vv2or+Gp!&Gf^SKQ@8#r(4;9 z<-CJ#dGpU7Wu>Ai?7BHDJOX(1D?Zlq7?#|FXh&p%i@C9DQxiV4J4S@3LWxRM!jQ~| zOwU5iU$&z~E6-7RIF@mF5WB98%BiI;JAetXbB%R&nX`&`?bFXj)vJbxAD@g`?%DFL z!q)PRtnPm+UfKWJ9mAv0o>^@dc`zoTJD3z`q3%emSB!n+zH8QD!VH^3DzVFWrhKiF z9Bvb>ydX%n$!u}b2Vvq9AHGGzzkLkQdwmF$wkQ=D2<0S$_$Ah}r336Y;Df}2e`E5Q zhx`o#+|Wyzql`?gVRzhE@K(@Yu@G0A&rKGbzTx?K#@z?ZdvfLoIFt&ys7QlaS&0s| z!&UXd=Ie?l;nIMDn&OI0y+ulyS!^@wfP0i#9;rnh1Oy!lkH z$pBl~C_0QHw!6sMV*i7E+oWIO+S&@oOgkGyW4XT)CVt=tZ z`aN`4fsSQEB5BR$LrV~#rp6k%#;XrS8Tl@6RMsX?H*cmB2(y?YcO4OUTUjk= zu3*iVT?ad5qeZX%zc+WYcZVdd3(@aWuL`;yL~DbYxu>Z>ubO+MeCsBtHg+bx4i$Pe z62bUtphX-(Sh)JjCC*f@37_B~C%bdQZP@1yw^N5Omy=frxH{_7N#j?PYmE_}>6IuZ zqL~l2Gb(jJ?Pi?Vr`p`(`u7-zl+JI=paRo1qNtGI{x5}b2e;XB=c~PXu9bA9-4x?# z&7&Vu!HL|3XzyjF4sSxO0q&*c+N!|!Egy-XiSlkV+8r0m5psSNPI{^S{F3pjpIW9S zNsm5>wcA>J@SUu3oYfNH9eInlTyveeL<~`=)rG&Ubn82pAgb>|mEs2DSD6VM9o<5@mgVn!4NPxGf^f z_wr)*zzwa76O1$ye>drPTO&@=KG}5J@iqumOjICoy48TGN@*en+lF3He-cX>%zn6B z{g{w^(*AC8t?05(9nmtbG2n8U-+$5@Prq6Jz*s~a;Yf@-ls)(~TH}(8bco|$_F%yO z>-;-sL-;5Su1xP2Q}U-Z=l4DOCd(?OOaF_0VwT+@!})+DOF@W-NK z@5p@qXkf`Vn5~Vg;N#j&aIx2V%Ld8xX z9Ucu-Nk%M8sXpmMc*Gv0Ops8GmJG$PvXFvAOM<#@8D7>UW-Iyj=!SQ}@yQk{g7|8{ zq<(_eC;KcPCXdnA0%N2`4ZFMDXNR#Ag-VWvp^q-w2C-xIG$Ft%7EH}o5qt; z8H7ew+nfRQE?5M|)B~0M-A6$E@e0*W!X2C8FiBv-=M4tdAa~HTa{+TgjRBHxZT6D3 z9AOWt_{YRtu^dY@daJRKEkl{;J5I0%iu9&nrb?xclf>B!9&#&4g9i-L21hCL_Bmdj zp6w~E4)J2=hC(#TGh$w|3J149-a9|DaH42YO^Qi*i3efK2*P4PRu^N0nwUP8~Z za>{L_HZIoaj$hy;pa*kH9?_K+kG^I;HYW z?S!3r2j~!BACd8n0ql|MgstV2^O}$gP#GWDBjg{J5=Y8>>A53|%+o+S+$kJ%V+p-MQQN!J0d++hdjHrJpJ{9N(VxSRlQ zP~|6rjx|aFBw9L!BcqAcR9hbkKQ>}wlKG(@ zsq@Y*kIpp)o?A#~(B&yC!3#f;e>1QwQG*a9M_9CPAJRLil4`%)_EmC9+`RNfn#-1^$PX)9(}_Yjeh;LFzqy6dXWTxe+(Y zq7N8NKkezdJqv(X>P`R}uaQYFBV;ajTqGepqp#@vm@ zw1R{|Tej$>Yj+@rL3tH4C4fybD+z?9;w)A%Z)cDztI1R0e(DYNl}o4Y$f zeiPZj99gI+6|BrRsCIwMXIU>NWnH8K7u?%9q-{huv?Y~JU1mF?yzlmA7mMyT#7?rE z(505;eLz_@a)p$n3(1zG1T_IaT~I*RKJ92d$3BrrfgAd~eVSZ`t5`w@& zIuGSax0+=>{{*URX?xiRmBK#|xEsFBtR6RSxpv%4Dp11-Hq^GWD@e@YR7Dh2RCI1Q z@6q*I3t66Giozpqm2j0z{?&9l`U~$IrtiX;$LrApdH1xF0}Mjn+(?DNVJY4y2AT!a z&}A5}p@@_8Z4fI_VG-%>N_b`kY*k?b`qDk9s+W<3@J4&WfvR<-g!Q^US0;!G)I_h78d74m$P^Opxu;? z>fhTIxNvCg72Zq9^a@r}ez4erfjmv~CMaNI7yehmtCZ?>P<~CjP!@ekwny~TjL2(F zNhnPlj>Vtpc)%VQsKlImExa_-6!$WsdyWot6OT()uupE zRigWmC9n0qY{T*4M3naq$Ac-KG-3SRCx<^M3KFB+24IQt385-L*{oV$W{|**8%zoB zJgnHg-YkkiE~y^RZMRPS;8Yi|5VoF%e?rOj?Lt&bD55i2GKdVaY3@`#?7}bowci*Q zt4UYEU0Bs(=)D+S(2?v})0%ho0+`M;0G>_x@(DxNNhG&9d?rp&siP2G+yVA}&Iq8H z%|E*3kpR3@Gyd2(;P=MJdyz`Se!5Q4@XHf%w8ZjVwYCBFNA z8=i;0p}u+|8K;`D=q-5qn9tl!N93Vhvmd?{o_5spI-`1zO~5PpqQ{^~<~4Wj0e zXQ+bjs#KCQ#yN_&fn^A5NqhpBTwhdsN&=jzC#F|KT{#T=8bBe+W7-0SA2~a#vz-iS z7PZWD6A*cPRU2;#(RXe2NCw3#{<*9OwtC6@m(q5gdYmbn z;vQAN0EsC6jpxT+*AM3w)Ky$makbCP_s8FNb)BuKM?~Yu zV|_LYJA4Q0EcN0Gd<|XWC36PDjt@n(*O=bmV>C?0X^h|BuN@?+|Nr><>aeJ~uWdy} z1yp>L5D~EGQo6waq&tRGVWhho1{DPa1%~eK7+`=II(3k4hG7V4hVJ|h&+`j>-`DRS zFRtU9v(MSF_S$RR_q}SJ;bHct!xo%FJ0^RclNU7YB@_EIeoNK!isAMpFxR7las^X&0m|>KCDqicX@AJ z|HmYGQqC)LuV<|=2Sv_#R4mZ_n*noXmZK%6D%`Zu>JpT)jsc#TnL!VV>UMN`N65a0 zF>|9QrSGYvk_;c>D-3(BE=zaH*k@&+4bxF5O)!WL>!?O~!=0#*f5f3+voVN6mhT63 z)-va=u7r$C>`L`AgwmqI3_Du0HWft0*}u|LgA(rMA`5oipDBwwoZiLtU6wNzayHX4 zAiQl;gXfFl;QqwbYiZK<7>r1E!ISh>ntHBcoN6$}n43qFW#j|@$6s2UR=EMFnqqT+t{ z^UiA5s^o3fQfIw7Z)?ND8~yj6w-SNsObu$BS)cdK=_3>x=;_TM`=hHqQ=i9Odeo1R z(>s`i@$k(W$^Fg(-{vsmhQP4DHZV4|KU`At528Yo{J{7N-2YMyl!%wW=zNBVGIHqP z2a#yI&8NR1SN0ht{JQK#7?1*Jn5-Y~CyO}2U>FnS zY3RY6__4-I?`4mEl@)DNbj0H7*k)Ikkg#9fy20V{wEm}4fbNT}*&Uk>RXlW^?%+46 z&ffO=D6FC}o8v^pL6CvhzNnxs1#?uOB-}0WP1P%7qP*{P)TBW9fr}TG_&w7J<2Gmc zjmG!W*xzcMXTztZE?2un$ zvifdLE#j6+2D=;CiienY=ZC}Zw(D23w|;C7>CZVY40h;yY!4VD8)VjCkZzmurDV-p zOfgM}QS-d~9m!{`=o785oenKSh+h6XWh# zJ*aS0$heqzm+vROW-gK0#N#IyT2!1ZPf_&4kc zhe)bPSEBW>n$id9tb1b9!Q85f?KAJ=0PG}wtlZ+42B40Eq3NlxY2D9~PmH&teYaOs z??$ONF{;}wle@E!m4tk@lNg?}aJ*ybQRKRNysMwtORCFjF{^&?(}qPR%P1~a^x*rW zl?0VERfUi*rvnBg0ry;!5Bs6aeXu7i3zCb&AD=62tUGY@OJ3f5CE<5XI&;4^ndI2y z`kk9OZAxP6?=jYgc1y1mNr`#FE&^0QN*(Opo_Cz~RzL^28Lbj%B2c2HePfyI|7F~|k zR}xVx=tbcTiS|!XIbIeGn;*V<6?BVV;-*HA*_15@d_9U$C0ccAklFHG-#j$vIbs)=LR*c{m>GE()#q%&SV z2$`_l>FIXGAEe6e*y%P`g`~EQHn&%7v#DNhT>YwDSzG>$5 z^Vis9jYrn?*hPR=j>3(lC1SYQD_PVg&{Gm_Bwj1aW`3>io!Dy!PGQ>S4cJ2 zBe@;0TzDX_AZ}d;aT#9<7?-#D1j>GFfs6y^>~NRYO4G)yw>cN20ZzfnGt?5F+M(rG zrF`kj#KNG`Ea~DQ;!wez;WF?GvEpk)3$ZIdy|zl);-S}4dEE5e_h-yZ^ow8n9sZgQ z?duFDz0If)8^NRfc#I!Ad0Y#!!*?UBTvbQk)a*!sZRI0+>2qZ5C7HjZDqi#2n=HF1 z8!NqVpj2WhH8Y|J-`rbv=<2AH0Tno6&9Z_wnYHoTLk8^vblgW;6Pr0|eX2~#OloVI zwjirV)t4DCOK2P7?`U<|=Sidmh>=#q$M`g^3Z^@3`nsm8;#Jmw^AfdmY~)pS`wsa7 zk8b)XgS&429wy&oGs!bZL^{-ODYXOXD0R}M>SM$A!K{nFz;p4_u_jER>ANgmyX;m$ z8>6`1BU;2Y7nM;G&YFm)O#1;NrG14gckV@-w^Fd{X2wSJtrHi2-bl~|e~NIUSL-ur zxUD3fV8+L3d%SS(9xj-gW#(GT29>t#Wr}~ufyvK_%kp4*sM&N$R()%m((JEg%RJ{i zoSBC}aO>qe(A_bUXVf_-iEbxuyPY1ZRb32El(y`;IwiGp3F_^@DwE3zM>#4;{Aj|za{J1aMEKT$%4mx-pcnK+ z6%wqH-1WUD4X}{6k-v_4H~J zzR;WFyu0$Wr*7*l5g2p@5$6p$+8jCJWw-HJFNr#|A7s>jnO#EeJ3cWkep7FVm-u=Y|YxrcJB8LAMCjYaotLibS=u`TjN06j24V(XOg?T%IXp6ci(1c& z=K4lxIfHTO3PKIzTLaVHd$EuuiA6u zn#pkdK7u!oarv~eg(&ULZ=85H7G-a@f=#mfan{@OJ&)o>N)uh0+c~8>qf><04U1n> z!P39?JKFs!=f`cy#(Hi7ZjPj52G5WyEa=lH2XX>hRw`!cch3vIq6q8#(Z91a(2g?A zi&(?yBsseL($u7Q<)b2(TfKv3H6RL-$Do~&P%5&uzQ;`2#=!0Ny< zksy?PC6JjU&qBfVT}OsKPLHI5ZMiK4^~t&uhOIE=4wwm!2YeRZYFyZ+L|3{F0m8b}s=NCo1`#vCAf+#)wUZ`*&_S!*oPAW#ufx`J3nG!`M z@^ij}S;3P2t93ZkZ7|qIbF@mY3$7MnGU7Yo4N9u896*bjG!PLBsnuy}`}%YBy!=r} zrl*uSAJFh3QlsVFOjUq{{q;9$CK zty%6Hl3`&+pLC~sC-XI4t=q|h^&RTQRxKA|2MZ*k9Tgf>Y;>evg0Kond>!mMW#2Urs~Cre#*gJI#*O>75)PSIAH*!g z55a4808t(k&RY>z1p`arbhiZ4;ED@qoy!$@tC^R$BHXTZ))gjEHknKy`HcTn{fsQO z{>FeN>iTnjQjRiQ$h&;yS0w)&?EV!1B7O!aw}=SJB43cV*R`&|zUD!4dCSEG%puW`h144O*Dng9sE?Gvs-0Um#EiMmBJ>kA_X{^21G z>lzcVA{U4b&@=nN3fiuO9TQcQvZpm>O}9xS=kq4WAeu~O9%$8yyndAw9uNx_;=*s} zy52&+2==ju4roKbPVn&^n=k0uC)<$AS-5u~{Lu7Vb;X--3#Pj2Y z=ct3^*Tqb$lPB&@b~`6m!I?+|h$pPNUx!ft zbje~*%E|H6%#_fp8Prfz^zrWVSTSQvS|*KbpYDF};vuMTFG1x(5Le!>hp521&b2gy zLl14fa;6594uS_~R|LOMBX9MtxvSwFg|BEgK^nB69CmZVKOH(PcX*^NY@>cxWpz46 z$E?=D0;{Dx8@azZXEG{3F3%a+RbPEE-M`(dIQkI&#pU=khf2mo(mpT{-=6JH#l3GvdtY zHv3iuFuegH+|%A_`UDxu7)8N_$E%8iVTF(S?9n1OdB46g8xDuWsMvP=U`&iQQ_Dy! z+)x*rD9CZdG0<|^B}o(mOe#40%%T# zPm;u1Wh)m~{hu0bq{s;__`@CPJjU#-zrv6;tA1DXd{CFa<_23S^4-yvNr_H0A3{ag z)hJ?-)|9N<9$r+2+{8CCyxc<}GFUX19>3*qxBeS`sikerR|<91AlxJPF?-e5o${}T zI`ebj2bjLyM#{4w8tn^ghWDD^733Qwas4TS`M3U11G2{|6!Cz-S$1|k@nSIr_5G;9>Y{w36TRX z-Ij=)RaccutLn&Q^7iIP8@voFzhJZXWA;407W6Rr;_lD4``6g=jN0A*Iju4A`60@4 z8JBzaS}XvKt>nf}o)pTn)*$-ygSe*_k#X<%WrB~l`k+-42AY0HON#=DUMp4OPhH!r zAES-}S;qjk2=BgJm4%uQldI-J2ublr*!fDOM6*o4EY7K3L0SW0-@tZR4mp~-=jH(6 zTx-muc|a4I7->k*Vw*S<#Z;4H3Wx^e`#-96Tu@zYTeTKmiGI5DB4Q;cxXVD){g=px z3Mhy3$|&y)_GsZC)AABBIaZ-w4!La>3m4aW_v}9BOD51)GkujT);{@&{&|)m$M$+Z z8Lhk%TKDc}f?eb5`~5ipsPKjivg7$(H!SZ^r7oLQa)=F>4@-~h;xG@nE6BC+PHcbA zC<|30hp9bX5zabhTViOMW*@C9^HX)l(hg*WvUbqSLTZXW6njISkNDuc+8?_@j!crO zUa+m%l@BZ{0s<}gaxM~sDj;Bi1@_@2>rKJ9v&fhrMo=??`(lmDHhd_aLwgRn!2OFZ za(oAXriYX*;(SiASTp){;*6ZJiIJUztg<9JVsAmD4N$)jVQ?sVx=zY?s>ci3WS)*A z0GFCYY3oeunY?X(cbGF z?+_xg;jLDx<+5Xi`S4ZGB=#ATRROZgfzKsI$bksRqg|zT<(vzVo1B%&OgpSNm*@04 zbQNJ-EUZZ!O&_dnMe|*3b`Jo2t&V13=EDl<=XE)C`>oVV@h@WJU}iX*1O07r&YEAO z6wO8gw#*{&q{8c8vGW3(c|~u1yzKXcdywOGOoy7+=0=dt?+(dzeQnle9FMji?E~?l z>Ra@91a4g4_GI5czfI4|z~&h74cS~x_C^S^T4UV7w;tQmpQQP6dyso`km z{V)2?qd%^5wk4BqgDGu8#5TUaqO`wJJP|=lArX<^5&g7DSS*}TiDP@cN54*pFM!V7 z^qHajRUuIx$Lyuy!i1>6+A_)x=+b~gLRSUWJ#T5Ws#NX1EJDG>$k6zdh)t&MD9Gp# zU#f(NV_!cq)b8kBq39`eD13~sztmhtXe&%vo@a#WS z?aw4}5p~947mgVz6;>C}&Ai_Uw9ut9({p-mE8>_yh6zy7u*b@E3iXVGkqoPYzh1K@kzMeM- zD3ZfPZYWaWM=N}kbTCK)L4bVi1wW>fyhvUKCwDz*p~Qylpd>06`2CHCln@A2p045R zx5=xRl_gIr`faXj9UtOWWP$2-q*mgnNtqEjL%0#L0-`J9e6*30^kFREI^S^p!tsyO zJnN>G-qaQ9IdI86yB#BaKpAtR!o6@swf_nhT6bb!YSQJ5*4{vW`fDWwW9^VITCoea zj(`~F)G{SFZ~Q2HfkCWXL}`cNKU~`0$DJNTVcuY@u=+0JG2xpkl+^AkN;+CT%pL$xFdds2!Abv$?Lfh&nB|h8mozwcOn6Hj@@kcJ3oLG#kD(yY(+ow)6Y*OJ8U<+ImOSHr;v_cL88Vl6W5hY(3Q=i+e*^LNS+}P-y^z=Op4gUdcQ4 zC^s8GTUqqEl$=Tj*xa!I@{kzYrtAJe8kKVgbs2?P*F6pvsz+RoKc-k*j?#1d=8`5q z?id_G2D>l^uqzdvGvh%l1348kZ?)Y8U7#JX1m9Y1r}^#%tMUQO!XFg6l^uK-ww)I{ zO3n||8dz|{lEnJogJVx>d1^F})z&+bg&AHs7Nhww7~qgxv-J^KoEK!e*c7U1V1$lf z2E!bg6_N&L;52NvUjPIpJ#Jw!-Qo2S$3BGw@AM+X$-xTe;jN5iO1e;*CH>_PdzIJv zD{^2<-8nfIl9OTV$)jX@dD#!(3Pl>NJZGzlvG`o|f<^r&YF{JsXFKBN#>?t`;Xf%- z`@aSZV@Gq_N18iv3U_&IgV^ielz<{Vttp!r6yoQ_fXbyRm!!~*?A3}R3r<5L&`ihL zf-(|c_l}N}6A%8Sc1~3`^1)DV%mPRi%n~k2DUc3vXh=>sAXgx z278d-g;Kx^r;k4(yO*DD5Y8^4oqtVtI^Z=qja7@6~q z`moNo)&%`(uC~@>@lLJ{@vTA9LYC{%N;q9~Q=YcirS_&ktA7ZhUsnl=%JWxrtd!Xc z<0!okQ66S*W-9Hj{Zf3rM8RGI^D@F!3_&oX1ef}2_ss{9%Tk?)V@UFH0rXfGvl zbjEr5SBWNz$S{wO3E*$D6-#a|oHny?mL(h~X`j0Eb}+Em6W!vOhaRR|vE25mq_O7rP%knN<QWh#6cpb;6%+ z9Ms5CjV8Gstq+%n3=^Mt!+~t!_^Q?2{!{;mANqB?yTxqOhnVX=1D0{f1$woQD;G@? zW;)Bn>wakS^E|eAw9G(otTgBwp~jg18J~Io_tKq;Z_AbDu^}?Ts0CD<#%)uZ zx#CfEl-tT}Va{@SBsE3E@mt12pQ2sipkt}E00?@(h8&rAODv$893y_H-3t4Huh8Z_ z7CQmdaNN7XkJB7XDY*x19-?U<4t3>wqP+QKon8O|X2qq*eZL))`sQp07Vs#R5?qRE zH)P=*nKb;v*4@}XS(ZbEx<~rXKj=@AD-w*`oZHze90P`JdJ+ogkRMZ*Y`U~1RFF71 zxG3KhZLmj;*deAvLFmNWV-@RSMD=;weX}ddSTn~U%|km9Ttk|@FiQ=3@gdYN5^uzB zrHPO(uX3)ASZ4(HRbQdA|Fra$UtpoaPH>*T5Xi(a4%SWBkF4nO&BcsIk(N>`ZjTPw zym$BME01lX_2QYa;mt8E}`8^a+D=XHpm zbKFiq9BQ$SvJh=KP43!4SQydKKzjLcI*DG-;3_T8_w8#ihPtH?;drjn_ZR>Iw-hqp z*G6*ls#0Gu*QGqRTqt1}FSa>LRd6!@QG=UP-vId1WkPAUF8{*_HrtunD|C*c%bAjO z3(zehKyt1GQ2YDR+cLw5CvcKRf$xLWocl5CFG>hrHsT%Ht8;v5cnfy#lW4mAAb46P z?ybhB07+IFx~#+J1-iFqm5KeH;zVzOXTLCAUEe{1q*`8YquqQrAXLUt_v0|T*6X-o zI89a@)P76fdgO&+aWZw?2L;lKpjA=~-`&v;vdK)Nys$OoOM+7v%TvZ@;{ zxzzgAR*k|7vYa$}ZB=wts$s1C@Dd=VZ$L2k#2 zYzA|`eMW!cD$4TYA;yTi1IkV|7E#Fj3SHO0XbD^4J@^LV+P^5RKAlQ&9K!O9bcR5K;VemKp=ReV(e+;1VEI|UlcKFRQE|(VB-P_MtA=X`& z2Mc_mFB|F2NDeKALlKou995{%VidxSJNCcpaO-JQ?dm?kjdoiY4>A?@B|jQ zB*#AnJz8K|41@)ybw|K|Y-U5$Wq>L)3&hV=>Oa7ZOw5(EC0;!-rSCgBafGa0<%5JN z4KPBK1!w2ejC{PiWo`HM#D-2pEU5UyKn144LWm~(9%OVih+TjI+#NWW^7F4bZ9#XC ze7aCA`hxv5E(TI2AK{!X$jrn%AXFY=K9E}#z{c8HLh+N#5)k%uUUbM!Chy1#rb}a| zR(^6x(UvABk#+6Wv+D~78NOGn#L;4PmSEQitPoi3^0&4`*M}qN@n2BO8|~qj3rsJK zwjHv&%3q{w(2O;@t*3e1_c3yr$EG{I!t}3X*g){P7Oc;5I-CF_i_r z*3HR#sb<@%ogjQgRBw}_*G|*N>}vY$SI1xIv8wX_LUR5WEBBwjUegdc=Em&x?et5w z52@G;vz>+FxY!#qZ*4VXvXQU|-?l{h8$zBpW;}QD+sf3n{tO#jk5ZX<^@Ka1m9_oH zy>_y{IxiHLLza{;2Qw**aH%7ihB?YFM7&lDbJpXZg+FN)4J&i5c@|qLxgvMENLj-0 z1gw-a)-0r@E?tR}J%ZHZa5FU@4{K0|z!# zUnkrd*>x69JY<9fBImA>CDpeXwYB0S3@O^^+`?c!M0^lav%01m))fgZJG21Upn1Z$ zs5t%`ai?mB`qxGn?su*ENQJ3XpZTOaxBOd_QAjFhw8R8A# z_AkBFB7<8fF}Q zx&^;>9l1wisqZrSgZGt6NORI^uFok3UyMf-Cg_jVJyP!do>UmLFqi0s;Md>OBNx!( z2_=v{x_{j#Mm=Cc=rHU%S12w$#;9i*nU3<1ok%0vi9@;uHrv>3W#&+IbelfMyw(E%o-*iq)a>xQz;~wjjQJ3Z&2Fp3=!+Kt ztq9n%jEq6rkHK)uc&>uIi`ub;_p??|_*ocD*q8Ak=rcRw#h(4l$RMvxP2*E0e-(kW zP=ehpq^gf&p(n3Jq&R9fd-p|6Yj636Q0KxyJrAM%gJJ^?%|@Yr5gUFZq5KP*NP6)& zyT+S8L zH8@JYavMXGQXg66Fk2av9~_7yyJuUfKTm2vsO9?%@Nka7w`euqN;l9pIA`hi-`Jgw zP*b|zIW_LSslY!b>q3Yzqlmu!Gpu;Mm0B2PATw~ry{7D}g{I*{<6h3mnhL3j)bN#$7;NTxMHmMqg3dTP)~=X8yMg?= zda95lhShEEx-rdZ2%MHqb9Mzhb}WYgZ0Or|r)z|e=8ijeyDhS+Ks+D}JwUq!;)SST zCdf5GQAcYQZN_*!6T(-NYNP%V$DWQxYx1e~e&%aAjZ9_%C7Glp&pNTMDw2NMbGj2R ziwAI~X~VD0LKH0N={;2egOI5(h}bj^Vn=+6!^f9)29j*3GBAUbKyhe>8RUmcM_VU5 zsW7L_I0KY4_;baCh4w1l$E)BKY1QsZh7byJ*y%GVDD1bpU zi4c>1BzX<%Xw_nep~4bzkm6$t;%myU17vOYtmeNySgCFB2OJ_JjB>Fb$3!lh++LBY z<>ynek*5k1FX8z26iR|jLk5Q{OZ_+?&yy9eblbc?dy>r6p-tZPB(8@<~H%E$h!uJ`T5vQb3xv(nIxJwP&Xt$NT5#g z+=)>$V*wE}Ed~69VVh4kqr5O28&~4EO5~$OMwsJT#hiotuOx7u*t7K9&TuNTWNNTv zMYpa4(I=vykmLDB6|WnhmFp;ct;d+Htpqr*kv*P0N0DidLBz-Gqqw~NSRfK&gxq~5_&g|eXu=v6a z0tkJv$NbwWf{RDOTl^Ty(#npH?TUN1HBu!h=lw^(v3ypGWS+M=jh_O66Q7bwY0%}D z*Ke%(zda%!b6Kr9>8os8`*3uLZTj;GS9!;a7?GoWjY4|QzlM`2wpIMdF4@$M9I9!? zoOzjeA;&ei0O9e`HuKV#u`5}_(mu7lBIb2~NbY8Oo^n-Wq9$LP4Mu1Gn>zkVyXq}# z3rlRnP5`LseVkA2hc;;{Zyu=prv2x3)l8&axPM0W6vMDDK^;1m z`4#J(>a1f$=MdAwzD$K6)**zQ|7B*B22-2h%#9}({e0_B8+9)|atj33$UiRUZ zVA~gCQ2INr50w)cDqRP*he&Toc^BH>pXqT-*@w$%l~Znq>-oD0^uMkV6(JKq_1XYF;9_lC7d^*Dd3)awuePO zlgoQp6)^$;sdCS>cmpWkf-~JBudN@9g_7-&4U%eNMJ8Yar~OlIx4^9CpG#GE|I@6FSqBEUEiRtFf#&G7>QTq4RK(hYNbpCSSTM{`{%j; z&^OA@K+f43>~Fdl*}&6bh6D=Hu)nU7RZ9#hgX2d4y+%xyZL2Q@yH0cFhotx1c}IKG zq2p`-e=8`iT>o&#uBqyda!eE;Da!vwW6~DCC;2^v`TVVsvLnnI~V`<m+8p~aApo-SCeViEI4Wbz;8(_4bt0P!6ra8kDS*Y1XSu?xv_Pn zMmix6?Kxc`YDoop&U_zj!_twO&zggV>~DHmfE}S$O*~+AC?v2<{)s7DWyo_M1Kk)r~=npqUMa)@>ylzmiRvEMsi!uE&h?<-M7w5tq0Cfm=TS3XkPIHGhiKfhM{5e+z@e0=7WAZ0?`zIPq* z9E$-M(cN)s{>7;J4-=%~M^CIr0nlh0Q0q4}sb{J+&SvKzqwVtbyUpx#r1{zV}?(^fp5;x_SrILw1OlgT2Xl4cy}C7&D+k}#x*cW(|S2tR&83+ zGA56`-|@I-I|#O$%Owbtsy%kLbWagy z?kJHdU!@Upmgi;O*mFw>W+vt!s!(IU%ApY_@aB2rdd6OAO;dW`kvcvoS? z?x-9GitSC}#HX-jj0?_-Qtw0G;k(LmKm;5AdgK#SAtuXy06irzmur)M1<-gCxbjEm zm-uUEKh&zI*crrzr&Z(gu z$gT*sU;Mlolee45LQL?Cl zmyu@)B}5)a={~uu6FlM4Nz#sR^kh0v3as!8WSt0MbJ`hgR|a5y+7sR`xwdkx6NaKf zf+9%~W|6#)WJ##E7cfH!4&!bH^^t=)4%YQCCaneLnKcale%f}oNos z>K3sxkJq*07Aislr6J*_eq+79*el19vcvS^#NyIf{3hPI8e(|Vzm67Ir$$r zG_UxR^K=D7WcGCqUqadul+-sDZbEtWz7!%9+F<%?k)-zaL6`QnsPK zm?gVTczut6jR_2bKhp2+uH z&)nSV=hb6~=2Q0pJjQBgNiY46SajwXzpE`=hG?3C(_s0;Y~)H==WEn3`rXS8&Gl@^ zHP;5X;Y+KtG`IE6@`zn&k_&)L7`*wVAGuN-Ml+dtKftyd4Zwz{6^S6SS2?wM-#|VI z*2~^H-oe-q=~?@79ROHt5N~h}gWV#IOe0e4a{!r|8m97r+<-H>=IDqM ztq6~!>8KmpV=9bZE}kR)O6g)YDcUn7OuiwR+)dwfuBFH)s>J=wjlQ4pI>|}QR1j26 z7#cu8>S!QsMQktF0nbermO4Rh>GUkUh;adHgjjx#nd_2u) z9qi)eO1&Z_?Y6U6+mVF|4+Gth2%{?3jGSA;fm4%BN!rdCD zpT&g9TF9{I+kxB0WoPyzld>Of5u;r*Q2MW#jmnnx7w8U=utdM5^x@V|RwysDY|G1+inFS40)lle;yOT~ECA>T6g4T; zS5Taddc%(@#-_ZuV&es1ew@z|mCrxI9I4f+(nOIiEqFpRJWPX?j z0e=RRg>d3F`T5G-PkcIXu;-dWERP*aL;7o|lGy&^$T#$AE=R}XnNAkFSs)cJ%M5R_ z?uejFqcZFc2KBBZ)-CU_fs4@V4aC;I{6uf7L}XB<*JyfI0fGkj1q*->W+d0`AaR?1 zY1l~beyW3hj3$|Kn3dWNLk%NPjJ~0n5!YFCYY{Pg57IcNPNt?_f(L*Po^OiZ4mI zoacC*izNC5G8F5V4-{(7>?M^(<%6d)Ig0dpn1=sVlj84r6sQRB$-R5m1zve@tvOEp?H!h5vI^Ujz{Ppk8 zKWk3e6@VF%jXzCmJAd`uF<;`tEmPe*_r?=N?dsM7g*)w6&$q(FbJjNgQ_uM?I=?)U z35bBw-(M`My$32qH9seJMQ&_5-y^O z4&?`gql3AT#Yy25MfRWFv~Rup9}n;a9ssiaI{)xb5BLkjmt8hkK6?MDEoAI1noy&8 z)6eTaC4p`-DFS^MxLFC;I`>-VUcgtPfy6PlwpAmCj4pj+39Vth!{R;4vFG1@3BR=I30V z%aCu=I|Z}0YSflG*S4=j${DROTRO!1UrfmVHIpUTk_nz+hVm1O1CrPuTGC(qaDEz+ z6`%E?=i2-q*I_&M% z2@T1QwV4G)p$0yuQTAHRg9zmH>0yOA2BenS%TUkI^QF*4{n}z)RD2Wlu3RL<}7<8 z<@0~rspQnFj)gU&RhPG?op&+7A{%lzwI_a^a(*^QtY7_Vm*E;h@9|qL)pOH`?1My` z4(#@h5qfHdX!>_f)CW$Wb0_Iu?EE$-&vhabiyy$JqF?T6Zpr(O007tL%?z<2V7ME_ zKQ8~K19NTyD)SHwuogHKm;+93u&!Q?>CQKr^EHwEP63IX4RMmh^I!Da$mAROg$xW; zZnL&r$vHyU!w#+K#Z8K)Uo_30>oTU>7i$-yh*(+wSizg=vqYB8`ZHFYw*HoOSvX7N zZ*M_x^S*7yg&Vi@|66zRoS6f|I_B$aD56_wB z-#&uubN!7oQreF_$@c}1lb;a$cQW`r{0q4G)s4rf-?(b$hYu-{x2=UjYOu87TQ|xr z#?oijr>EY%{%ypF&q|m>#XKGVT}|QK5dQrsOtgL+ytj5C2U9cF-Fx0Y zfRj(?`XJu^4E^oab6o)JHHzg^{+eYQIYCh~j@0?r{yUL@olFgwgGVA9Tg2zDo*R+u zCj@wl!ldG&aLNK(i`%VS=hgwB-=hHB{#H13ne=?uoNHbAH37TDnISQJIV|f(BZd4& z8|d$smH14U8)3>!@%*<{ajxyNgMvq=-jtBKE?%AM)rgi3_>>Y$3n|DwLf;TMsM_oJHIo zKz;L6GXnF}vO*aDPTIe134Fz}y*S_iW6`h&ataP|k!+vKu5tg})~^6y7VO*In+N@& z1O7HI%9Dg*RQ#P9h9TEVeO+N?BaPcu&-i;lSh)p1TA({6pYrre z9FJWZkp89?o@=Ai4;9_H+R^a$yq6Fo`D*brDfEWdp8+&~cV<1|4j|mdZ#65wx;lul z>5>bN-|B&9?pcK^@Y($xC5_%#_mR`CEv(;Qru=RXh1YumQ@05gQfY+JH~U{!g&ku?A@w7a+j3L6Hq;Ekw<{U4TQ*#?~zk2Vkyi!UTOr)}l$p9es}0rS&QX z4Sl=-`3{qzOa#7Uq^tl^(%9lap3x?dXY0V1uGU(If4q(b z9W2Z>s)Oanx9GUDEP#XNZfttglf zIdOoHn{_Aq@mIcXAgDN_F}aCiR>ThRbDH_uldXU}#bMp)p}5bt;WUI=uUM;Q*sKho z&NNm7lpO%Lgy}pEl5vl5J2XO-kp((#kPo%{i>zHOINXHJ(J~(zF;#(%TyL!~tg_EI z1Ty5uGIpUtQZ&NOjs?q}-A{q6Z)}_pM0QMM;j$x#6F?*P0&?Xm;i2NrXKGLU!hOJ^ z^@9T1QW?plbP0b9pPh|TpxW!@tR#0okk83})>0p;!eh2-E*{DR#n-stoJfv;<)=l?xwnfEdb>So!Oq=@7wII&zO|g>`e{CzEH?<{_u7gtvX$FWY@Yob!d( z-``ff>}gtC+9lXVg98XO#m_65eIfOL>|RmbiIB*TnvH`cYhbuGTU{)NI%|#=^9p6c z8A|rmBQt>rDF8FV^6@Duk-4rnNYlAZlU4T(K%Jmgp)1(4lz@X%SVZ|gz|PS-^*$a8 zrs9SMRi3F&oe61uIb8E69V^aC+8I0l^JO7Z=qvIH+(l3(Nv&9+yzjjaOq_C%yz(iJpr&+R9(V4MK_a1ThC` zzU*gPm9izl>ESO9gs73vd87Kjtu3*E1ei0?XY#}QWO95olxoFe?HT9&$?s_)vR7Ya ztLKDK25$hW8<~#UU9&0|WCTnlNlf>xg;$qpy^hu++LPP{!7E!AFNFbc!IXEpRx<{! z3-lEL+J-Uz(VgKZ%OfV!K;kWCAi#1I1^~Bc?{Gaf18|I`JyO(Flc)Qk7^e#ClqU6< zVp{8$nu#FW?m!CVC&3~>IV&1nu}Vuz;~RYd@?|$F2-+j*6Zoj|3GX}%4l;v8_b3gYz zmWkPTy6TU%-N)(%f^0YrzOuHvUo+5Q zkfoe2K_^$UY^cDqD}@o_Pb-pKk?tQQ4AsJ6V@g`b1y};LP_e_+9apUr*P1<23yvf$Sa!#~H^_=R1CX=FuJVIFnJR{gnRa ztDp3r+WES3Af-`eZ<|BJ1*ijm4*EXmet*Az&Rog7@Hi|!1nA{$Md`fFCepVyHdWDh zSf8$YFqjeU^2GS%J`l)g^i`>iFyyx43I3?;B?pmRO*YD{b4IJ z4(J3HwYeh~fWTA)8wWM=B2jNG;o9K>SNXe73bYIYhFBI^qs~t0PiV|xIW$^=`ultV zY(ffw*;Qm7d~_2p5pXKrg4n<&eF}Tz{%?~Em`$Qd_3GX^o;FOGOwJGKvuz9L@WFby z1S{wJ?hIIhdGfI_E$J=K6Pz->AgqX}Jy+y9S&f{#+Hb&1aGM}>tcF!WqX~rsh%tB% zyws|#(%8b`-WJtCFDI|N(QT%wzkc@*0F{53SP+4+H(fV=h9qSbkffUBE{)s$=tH6~ zYy|o|5ky@FzPcZN$k(W=gxphJ?@!u(U4L!U>P9)udp^aPEgiS)LhSI)j40~XxtkeC zEVp2yW=>TL+$+{vT14}_i?EnzunkFo)1LRZGCvEf$uloqi-tb?3KiMaHA@SW8EwJ| z=bPs-RdM`gNs}--;7=?iXF*v%%|lvRXbcz#1yS=xVYlQ?$e%-qwxgGOW!>QG?%y8_y}^we0w!#^VQ914!bk1 z5SkJLj2{1Ty)JCabB>Q^Cv zcMzEsK=hyQ8p|?^tpxhoLC9ib9X;(+z^E%rrOd(5czI08wSaY{8 zPm;T~3gO}tSHV&fjriw(Vu2J4n#VIZ0=#i3P1sScFjq`mn~ zPSJg-Lmgi%Vsh$f!LcZmIwsR-X75)&Wft7e?NCPgGt47O|re*&C^= zK@eB%*YsmFg+%@E)ZQ3@u|8nTJgE(0kUFJKf@u!ar{ulRe;2&}amNUBCCB>_)SC_h zal#Q53i?()5SmzR>4@gXp2^yq$-DPjPzaEi4I~rUhf;VpSBRT#i3k?xL`pi?lpatk z|KY~xd=YD2=yue0rFiaTdtPKET#PAOO!vkW0d4LlY%Et-`||zeRxLiaQso7-Yg1vR zbZj18vcM_L0oB^i)(`@CAHE;%m&V;NS+=JM(F~o`ujGGH<$_&89nEkm>1B z5@axLeKeD#6yI&hYOb-7=$M@F(mqdldJ~F796}qsHqb#1+xm2{9J=ST9^bE>bJ?SJ z-;Cs@^|O)6D$UBeo|ihCj^*LH^7g<94YNjJAI&_!PjD$?3(bjOfiZP>w4dm?07y4x zPDDlXrDm3U`m3n?u8!1Q zOv70_vxzWt_$U_&7hIj)7vGMkSR|g6>$4H`+@gBPzsNP5Xs>3eZH_X!=5@Q37Om%pU60OwAoSb= zQUEg48??KA0=`If8jdscmGM*MhIPeskPcX-&$8)e-J{}8yu38em^onKl{%yDcC}&f zte)~6@DHfBfbn`qfn4d1`(NtP3b=s9Iq`8cnJC%?`e(6rnh428ysS;%tVpaiK=!_asByD%)~4$?=4zoCx*l_33d0ltKKG3!!&5)vTlQr%$Cc%SiA zb@NeEUll)QQ-$fmJP&kI7pmQw0r%kgbgpUv_EabJI|y~RVKHNp4|i*!%t9jqVRib= zrl9~KvHNwYn&-`lhDOgD8{b{iyh^>p_m)Ib3#F#Yz|D7NmOJc>QorI=+{~Rs7fa5` zR2-|dZ=g<55laVCXer`or*i-`z8a>_oKWK*#mRX!YnxQoLuDK0V;aVN^u}?!*?UOo zhx$M6PnIUi6=;(B%JHacV3k-T5KUBvK?=J~6Z z7^xTL%kO1K7OG>Y+KQS z;CZvgzSxM5o z`{lEQc7m$S?1b9WM?W+95ax4o6Gzq)Ecb3MR_aks!~5e1iks#ejIw;L$Eh(T*TPp| z^O&jhV958y&mqTu1MK`GfcAci1{;iKaeMEK~ss> zb#`-v9u%gGIk0TUYn-%{of`Go;Q=?~8c-sdh@ueiPxk8O7XltV#eJYTH#ShcVx&2ge|wiG~I8_#N5J9jqOW?ngE zP-;JKJQ8)&YKdyVZrXh3>teKc6ip37G=gpj0%1r+6DVQ-bl4j2GY7$1sTr;9M(@WS z?B!z0trbvheP2#fB9LVR)O6Q)tByn%-;wxD1J>0Bf+CfCZ>QtDwXT`ge1%sa5HRbv zuWAlignn0wZljdDjBV?yxeGiBZ)F5Gm=7gZGY#v>OMddI_XM;jnBZx;m2SI#$I`?c z#z%WS4N{F+B{g$gHE0wTIZUY&mtZ^+5Tn?mF!F#nBpdWYjHV4NuDCK8r*j;bb97zN z(`j&ZZBy2Cui&~&xjpXx>dbe$!)d>_b9&vu_kN>oXTQmDW4 z(nb{A^q@F#17zOS0_c_14S|T3)nqk{^U`Pnh!|RG-axEK-aLO50h?^ee;43aZ<~?A zi+<_!k-tbU0%dNyU_3?#z_)XtSF1RJwwk&P`0(vD?Kd76!Ge3oHv5U19-v{JkWEL4 zMCfMK>elUV!(Xgf8bXD_BK-I;1EH-qiE9E)Z*Zp@K0`Ys`DzCTOa42_{bI5SVZ~f5i2_d(Vh!}H4VPTg_Y|H$L~h0yU>j#& zja#xIX#)#)8Ssl!W07{QIf2e<{o1Qi$j|Sf-5=`Hvw0!gL&D+-y*@DVhXwOKkKPrh zy2@<)Dpb4+50Y7Zr?;&7?Qh1FiqxMFG;qpxQ@k2ZE>LaYeDyPC=wm1{_h!X@qqH~5 zk_XkG5QQ;JL$Z1HP=?rZ=Y=C6WpE-i`z4IC>32VL{1ip13cRnA&A2z6=-KY}Yqi`s z?alZ_@u-s(Oj`L!x{{?BEf4D(ke8K}ozqt*D-7H?-*lwv9y~rDVL2H}VI0%7y2%YQ zwVD!({dT0Y`^?BOg^6>*DAH2} zVjUGQi3K2KS695lttdwJM=;V03#%YQ`V0R)tm^y>KwAzUJyvJ(PjprkmYkx0)-iJU zK{}GhkCdSC%Y5;lpL`?+@NYLx{9}Z_4CJrPC-?&RB861nzM@|T3fIcopl89NFCO3m z5fq>Pd~(g44|B>MRP|;5`UGIUqlBBOuxOC3?0Nm?J4rOXLTC?((0>_X{}E^ZSd`8fdi{#JU>4Dp}W_{&!$nyKQ~+U*3KX`Y@FUo97^3AM@%Lbu%~bu5O`MXXnz z@TbPYMW1r4u`_jUmg62a8?NpuvbWV>^E)aNVY zzMZw;z8(~^SnOl#2<;hbR-8~@;dQ!oKZ6YLZJ0!@RVKr5+S;?9SXB*cJu!741tMk( z`{jE68RT7K1{UvrCU#H9L(asVEYaktN{-4xxgE=ur`)yqz+S3aVNwKuT$C%-CXXBD z8A9{@E}gb+vm5=I_ceY4!Jq&Tv-Vtt?LR+terni>Kp+YxlP!QtuxzZUyVkm;8K2W_ z`rp^*U$*0sFfc>h9GG_Zyxnlv*Q;05XDZDhg=)utn?~~Q?lv_L_Qk%wH2|=!Gn1%! z3Dh}&?a4=XS(!y7F#7~Kv8dh5E&}k^s#`+~?l=&!Wq-FmV2{3~@+7^?MAfm)|9+3j z35S+tmUz_cAeeFiEPdBSt$d+u)@K;st_pg&{WU&owqyGwgLiFMVEe5SL4)$YkUsw9 zX8yWg?;tu^4`ra+P^0@*dM{edQfe|-H;l8p4@577<8@UqA@4*uPP-N@b8x+TFh30I zFKd8Kn_BnKywP7Yuc`?nvQCZl#z+UhCn3b(<7hO@9(s zoeL0>>5igx4Q9}&9qj{BD|7Eh<~{d$JncUdg#&qr6TU;?i5%;d>QtW!(}`LzKF-PA zV-b2aM@#-XQ$wq^v1h9ujll}WG#@L{;4)y=xprc6*m>vJnHJ!(EsguoZU(@F0CR1x zE76}%W zadn_6z8-CV$+$aju+xWZuX>w-u_UVrfhh;pK%im07A5A3Z6}~6fUEi%Z;9CBC#hxJ zB46%hx*1X3erW(qb1+ zGl(sA-$7&I)NQ_l_kidiW+a2zsd8T3YXy5tkvzzJOew2zU zy2XqD1RB4D%KRPLIfwbFP#`7f*uEB17o+L+;+0xH7*<+c%#4v7Zco>oUJ&mi{($|U zIYdBg%1o;`G=SKB6m3|nA`$NO*7PCw&Ouv4(-4?{6xVzu+u(WKi9K~@CKr9FRu)Au z{Y^ze_?e!>mv4;IU&LKWjnf3jzai@{R0)6iREV6`*$9Dn@{aI~T=erm=91qmIj(i~@|e(AhdAVKg3h$eDmrt)jE$2nq095eGet?X%*<83 z&O46YJ_w4AH6FFyjmOSQ2&^XGku58|7LcosyGnFuTu*oIZmwe>f1yzTbCTaUSM%06 zY^Hmt)C~8wI%AXE{jHxjG4N1Z%TEj#jE6{L^1}BYPh#lXZ;vZ>f(oPJiCYSH71!RYU3hP}afc{Eo9GVNvk5SwX?Q7PL_KH(tN>Gt6}tRrx)4TZPHQgF1L8qsL^L z&+R&6t(5!>qpc8dU=yN*^@TdUZ%S1_^Wo^+*{rVGL+1M(XFpfMxrjsr4Wzs8gwubl z6d-m>WuVdE%-)OQO8(S0mGrH{{_#Gs^bpE`@1P99E3-G1gve&RI69)-=z?L_ z7Z6d!hxJn-k9j!r?0pO2Ed>r{u|r$PL!`7383hh{vAb@Hs=`fwk_T5&G$hLZ)*}T^ z{tY8MmHNk(xY-8Rs_8~|*D~4XE;x@vUbTH7S?vNj4DPtEcJsjA=KP$c9Dgkbe^{C0 z-fV8A_ywhOB66o8luE0yz830F(EE&X_CdYZ`IGKzhv8?e7j^rI=I3h!>_)P}^Qyfz z%`20ox({M-C}n8;KB@pS2&pn0AY#eq!Z_u`$OdIzn;24IfHsZIEv)xKZ?4A5vLwcs zunr|L2WSEw5HjP{K`xO54(O&MJ3Xli{oB@IEo~ zcYhgf?|<+yrlMix!8WVBsxTi9u^Ur%U-OJ?n&5{5{lqVjdZQ)5lQlf89Et_ceOF=~ zK1^xR1ZVT7d>-nQNRm=Yv0BD{OWw_yIvKC;UYvxJi-xgJAPPFr-yVu?#E} zk$%qPTIp!X63i98eWud2P^}|GDfuuAD3{9b+3@%>iP2j&Y22=U`K@gQcHc0Cx8&wg z&fD|U(;$+x#z4Mrsbqk~uvM2Uk1N=FJ_T~Tv76;kU}m56raWK1=X832FL+1fJs*Qw z_TJvC3L(2yv&wxPYa+Lwq*{z5GRIq4*)j?17I3}?2PHb?NUo=L!nEsU4vuqK$r1MT z{v`rD!rBL*P0AJT0;)+xs>ge?io@zT&uz#=d=Cnc(Qsj4i0d??QsPvqIhVA2>HM~S`dKTeiV&apIqf?`cB>+B7o^@vMVS|?ls}&; zlSQd!y!LyHDa2ZR_*x%T#%8rMExVgX2hu;RSmf;(+259d!M9`Kay(g@XHsFZXW7dS zpXH25_vUhFLNxP(t{Kh7%*(_oV-~~bbycL|LPm_XS`448)HQvbHiP0>HRSp)?YBoK zU+6wv>Ycf{bS}>)@I#3n`Ar+}hNJ;IwIk~B03G7AfAdS<9BgceXK}6E_J+IK>ljk<*P7O9)d$R} zyPgMjIyi-?A<|?&N>z*xXR*TwdEMTzS`3o#fAcwM1odfFv!Al2SZi-d)FbTMqQ8H{ zd7d-n-+`Zmpy|&0`I8u?iLgoG_sC$Wl6&F&RT9;N^51;QmU7pnRgfmUCafAK(@WjOe}uN?N!pxEH~Fv@#9bMQELt^YSh&(-o$L9llpvVGz~ zpx4(A{`PY6;G|kn{3x$;8X_~Teu~}-zHLhNl9JfdOlkp)Xl4}-Hor5gbO)ZOPvrVN z!B0^r()TMAcxFxC$7#k9Mu=&?i<_;ck2^8emsUWLSqanBsnBYv6?I-ua4Kb{GIt}{ z`ME-6P7KBq?spnM^7pQ%V}K&(Q($w^E?f@DQ&$#@+_(na1gds98vehIG=qIpBEZB05pzM=)RO;NgY^OgN`KmVZ*~mZYuy3_oJIRl*6h+?7 z0a-O$GA`W{QIb}n;OBLtdTZ>Y+0<}Wr5@XTvhcBdrJAO`I1r>K@D)NNguYm6P~ZJ{ z@C>e0s$D!vAzN}tbU2=3N$(oHz%1S0w%p)EjY4s_G$o1Ct|Z9JB2SC&eK@5^(4z_> zr20De@)sGRYIrepirw)prlYhi&1}vx9<-o!)w2Y7W62PZBE&0^mp|)vgi%OLpm=B!9Up0r zu=2KI*(MQ3)5$qjnwvi7fP4@xxC?n-9`LEB_80Anf zF-V@ibw^o~FLbewqLd+`RKRYXY`&5#aJW1+#tp&~?9&4$ZCr}N&HZVIEDC~s4D!2r z#9~@|lL;lt?{%{5(PL!JWeco-X#wl$Uy=@)ZwS^3T!)W+`JVC#x(UFawWJDc!L-hM7ST~Y-TAGbOh>rmYV4IK zy!5{P#_g?#sy>6SU-hK$HASkAcF-ciGp3ofAFYkkwYMRA z-9c#?&au{2%zT3-$`AKKf?Jq&vQSnKeN{|i;{GXR?1}p(h3O=%UkN^^n^TG6oIWOk z^9Raz)w)*>Y%(Qr4cN6^rW?twMQqf|3rqeUs8!k{`Mpe7*KRK?)H@Yh5GQo<#|y^_ zYHY`LoFNL^TzkkhL@d8~U!VA0Ncfk;UD$)gI!eIs?@yw(!0VeP_r|esZDNv7nib0! zJ!IK$bie9N*{j!zv+#cy3*5p`if0j#^RXO4+8ugkICGIVIuH_tMmJ~JzVP!ciz?hFZz!0+k;z$V^H-VL%zF5jPA<1+mkW z=4$+x%k9DUJXSh_$ppP8;sIqb7c`0IqA}J|(e3jRa@oJdjKBYsi9{t%@;7q<67OR{ zuhytzo9AonG#W0(of)}u`O&NcMH+R9$AxN@?_z|G4?j53p%{SV{u={yC7iEdlR7F^ zMi?U|_~3`>SD6$g*hF@ht3I|UsgiP=6O*V%_3ch)TooSSgeIx(QPotOJ-Tj&JS)jCHStwc;^}BlsXxy7(8!ED!62^nh*`iQ2`{+Sm!(>>@!>4nR*(-(wV$9`}Q4WPKOP#V%W? zR;}cd?bAkSPJ@}&@j|tkh!IIuFy>TVUu%&y%XFjP6o5;*8@D(64R@XAeS z*h-bzJzL~-TPdt6=xI!P*%iMh)%%Im7YHOraylPnI(}Zco0S$QT7SAIgpupI*Eo}~ z1NG0zosgHwm5gJlGuDQfEHrMNAFiOD^;7^#8$(;KTn(Q8n@*ZyfzG4L{NlWX?>!P9 zu_@pl2?_gdUk<#)w4l}QIQRYCpnF-^i%5AHp1JU$e3QUEd)IuWE(at{dQG>BHNDL@ zUXy^s%#7=3ooMd+YIe)fCkI z%B(AIv#?5a1PHi%jFf0YMvKb1(JIsXh|l6;_r2Zo9C|fwkD0eYf#o*2 z!=pk8i?AM%_Kpn7@JU5`9RD2BGk#;_F=IY%_Z02BqGp{^k#_*G^LMa7L}WAm@A838 z(B49l`AlAVdk}~7-qxht+(Nlw^MbAncbeuYhjf9RAUxx%nNr;V`Z(j?a!uw$Rw;kTZEg1H zlPKn**?hf9p=QIhs#}%ozG6(EVF)Adt$JVdiHVl=2b0n2QgWk1U442g?~j;8&GN|H z{;|NM8en1E=FTA#njNIx^*;Hu~ihY+#*o(QX$dCbe5?k@a~lRw;h(<2dz$Sh>EFPH4Cm;s|!clfGGI!wy?BK&gp*vkjy) zS4zod&Czl88N=72}OZmE2 z=470nlML|GeH7RIAed^XTY~a>TvuDjOlV;h2AQs&C~GNJH`brt zEVt*aXgG>c1YTHAF^>V9!}9a5gCawOyM9XhU@U=)7;o}ZvYSEoEaFND(DI?YTMp+l zAi#Ul%VP3lXUe4i`XX=R9>)3Vd*(mm-9#1mewQWGR{KfxdyCl@&ejKN#M?wXE~9j< zaTpYdB_AoIMMzTljh>>8^}8d-sVl z-iG9e!)EnOnbc(WL|k*#9bhA&TN3MW$fd#HbvE@Xq`|ekXr1j#)g5p|qk1;+914aEUIzB~P>YrwUZE*^*D} zokQ_yg8j@coZC?YLZ#Rr4%~!@s;HFd$+b&-k>yxpANah&Lzn(}!ShPWxn1dSQb{~Y zu2P*t5d{8Wy?c!F#EOdpKtluBet72g#pBMV_B8z!iiBda{@4*Hl6QeGO4 zt1mpR4)&6MM#yp%f_tS6WZ56bWnq*7MGz<}R9jS8F6<#IVQy_pRzuTyEQ&<`OIcZII+G=-3EOOMB$JIZ9XKwY% zZjwn+jTilcR08MNVNr_3gD@n)KR(}T96Qx&4W0XB301kjS!jhJ@|WRjrX0+W4B8eD2q1V>p+z6aWQCUW!t0bF~R z2jBTXYdCSSKJ=gCb^dtL1BWIL8XMS3C#y`g*r9NY9w>j+1Zb}03e)`dR-*)fHc?msV-5YpbP@GdjK*P1+o}g2!RoI;> zr&Mt;6WSlyo2x6-**$5D?O3PJDS1lX8~&Az&-2gc?)991Dd>62FAw~mSvG$VmX~(A zS&hx!DJ&(?8Vc%%4=10ib5PWBhlEDL%e@9U#aN0bbn`m7PTQMYy(K)qodC^`?3L^B zVaMEoFo2)s%=fF?`jqOOtWV~z_Va@oYrad3uwZ`TK5o8evWda>^O+;((u}2sfYb?% z;^_HD9Ng~VXf9=jC^>i*kXPD7$i5~(xmqzqd&KAR5RBeHMGGMrA#!B68Gd0Wk)(@!ZPagXT zdfuKW^C!b>PFh<4m+`@;kuqn9*V?tfpJ%cKIT6Qz=w3| z%}!HLMByRX{6S!eejZB9YBFv&@&jML?qee9KVoWc68Oh7RLF_nYJc`HHd@ijt@BPV zGsgFD$o%JZF#h1RiK{m^U4gYbK0sEPL$>6k4YFX{36V+SkQjc2bOahrxsV5AzEK8G z<^{kEJr2b(1?sN_D;?qaAmUi`$8VkNm2khz;ef2?&N?5luQiQU6>m*t1Vk=e?XPx4 z<~0P$n2~;X4YZE8C`diE&7Wa>f1#PL)KD7M_myV!QJG^3fh=%v8|6OUu-S_+yl41wE0^qxv^X`JdyM@dTRu6 z488566?PaPPGVXIj>H0Sz@~XSdhj>%nO9n+JzyKml`%FPzjcHLTd&MI%MT6<3)4Me z*0YM{Iqf7bdYlblDGTUxvWOwXftN4;d`N*`nmsK~-c0Cu{O6|z0K345<~@x%h0Wot zq>6l1?r?b8N&xTeE#JF3tAoT|fF&Gzg%$gdR#P3=m^Zi#>hCtK1b}*{9VDI%jgSl( z4NKgxuh!mzcON~vjf9h0^;=Fkk7aV`>m0&;=4+zkc~_H8$`tnCy)RY*^5_B|AK<`y z_kKsgG0Ufvs7uuoV=K(;MVj$UDs_76M0`C@OX>k;x)vS<<30659Y$P2-5E?13hOi+ z*>B1ZUQLA61(QU^#5wUWuX__;v`KCJg?IZ&05qnRqTTHm*6Y81Wlf&i{*0ERXCBvXPLZ> z+x~D@-SOtcA~x08c4KRY^ebj!?0VI{s-15=pf|B9%Skle&R>Kc@KZzx(*w}fdKmC* z_9VYbC9F%LJ)}ytS?}HKH67|*mr76%r|XA<{+Z}|Gob(!B{ntocUT1^9`GLT%7l%> zi4KCWKZ4RpcVFZCc2q~r0d!o;=NLTKCFH!uBuhS*jmHr_lj(0hlSxMd$T9C8(u1En zCLqWGmSEm_yU=T4J=|;~>1)_i-koPRdQ+8gY?Ie^Sl+X4J@jF+Yd;b!-pYTKlrxA; zy?b+spvI#eM?F8qNMz*m;qma5C)MU-I{{=%c%>3GP_#Teji3iu$CaNSB=8#%ftn!SLJzRL_#D%)KsLtds~Aoeajjf z;e82n`pC|{vb-NFq5JhK$jT*zerP^cS(o}T90_yfhT*E^22zuCC5nRN#%;AjJAC!c ziJbP`Sn2BaCu1-wjP&ONX$WWt;)Ec#!RtsCsnXEhtd-^bzZKe~|-)0Y!sqGF#_r2n?o}sj_q| zQ_k1ah|}@;0p@;Jx3gb7r$muBxc5E+5Xml!fZTw-;Vp>T;^+o`+$TtU>1UA#27l6C*?|gBY)de=;GwLtrhQZ%@Wj&Z zNE?QV8?WEeW01zi@|Z@O43?w`)=bauE@%xPN~Sn(QQOz}k@`?ZNX9Ye9^z2uMPXYY z>+5}4Ku+-;6q?4|qEfnYI?x_lfnF*4qd!pqM6RUeR~EB5r+ae*)f0t<(jS2knQ{VZruWeK=eLmqlJqVJnuS|GOoR}*J})u z51)%(6A*;fR04KYk*%}!uZR5`hh~6h+weu z@08S+rL`Bm?&i7ARGNTTP@rcX0J~eD_TP$Kn3}rq-L-3Bxk6+nQ!Mhhz~1HjK*!z| z43JYoXm#nb=h+1FNI&j5LWi(BI6b-5QnC zUsd1ei5WP&Q|2+pSpi5FS^}@h95!Zc2NdzNY(CNec(NP=)I8E2JZ1kBC`FOkt6pQ9 zf3()ag-*(#LY@97VUmKBz%cF4or8H7P~ynE+6#WMlVHEHs)QjKW;_sXi+}owH*|OX zQN`)BqNV?H!2U0vzbNF@ic8ES`TRdE$$u+of-gi>#_oy9`ZbR7KMU38z(Teop-2AC zp87igj4c72M!{NM-TMj2fAqr{fQhI?;`U*@w=fMsD>mNzC4MTtm?)dhVy>PVUq*A`_~T| zOT7Qh=OwnoGbgoo^b5({@g6gs=&qJVv^(SWJdpg8N7e&Je@<@;-h})zyjZ z1E+;X_q_47m=qqQ0dhrR&&%9A`KltJ_uZ>qThOaL@+cO+I|Fl1Y|;gH3{J1{^73n? zWHA)Z9y2D?1ZTR z?OgW6vRc4=u9kJm1hb-kq^YS^GWiw;-EFCKjsjlgm|Xn?y#~Q$~#tx(`!^Bm4g@#Q#~RiB9CLa30rYc z6&t&U!TVVDmB-@LZTo~)e!|v-@&%-)61|ww#fx-GT+#|zn@OXI57NmEa14@0c-3!a zn}JHQG|5sLb@svgi0|-G9>e#(k{d5j38fHE+MP)6dLDiHp8X`^&cU_yYnn4fUUE>_WVs|=7g>j9zO5Cr{t%0CH?oZSeRcdjs4F(ZwN;}qQGd}`w5B1NIYJFjGy0Q z6(#1P_kXdCB09xMk5axmRpb#~{m`;Cq2)@?^AybYLFabd+<19gX%yHkzw~4ul`BlgSGBz){9sV*M=~q( z1{WHDKu90vlu!H7N_d_NgyMzE9v}y_^nZ0d5g*+vQO8CWCuz_Ork>M8|Eh_7ES%~6 zfx3_G{!<%;x=&K|>TX|qICetBQ|zlGjp6ks(nzYkkO z?_)FPe6CJsqnFQ?%zK+9o(CE?@}y=eEn52+4T=$h*XZJX?vi^DKNF25?t!6bP&)%DOD zk_wm=y+QYrXZ+|+OGiqNnIm6i(QHvqsaa|CFG&iFQuPRVa-}u%1J4R?CJNknts}5g zTUmHo3Kb@!vM{daS-wG`Dyo+JztH8EWty+?9p)1`4j1l7{JrGLo{#*HUa6O7UKiWL zW_$enL#rLy`d|3ze;IIL0RW_?dpT4V`7l}$MSQ~b<-Hyh@M01)->a9u|8s$l=9$$> z(Btm_WU?N=1=;|k>QBCS=Py_lYNrY2RaOr*foa9K>&npM$avZplS?*y>Rf=ZG6pz! zUrjz@Zomp8=z!no=$V}AW3GcJd zKd+iiXNX`%eMFZ7ilMo$f~t~=pIwh6^SHV>-rH8p*PK%c3kyy8z)A;ra4>_NKdr2u z28CW(oM-g;78QR74^}8pRmCx z;sS>E%(ce`P?DLW;nB&up6!kCxqmHX9z;lSoOjOi@I~ChuOWgptgyOoDO6a2_MxWR z8yQ0s4)fW}k9q1@K{RsdewLw7se>f|?E)14VB3)+H}$+U2YQsIw-( zgYI#(+A#S^rM!|Nl6j+HWZ4WDN65!0(^jL zz#nw<B^^tQAlc#Q7^*mGi4Yz&wLtE=I=hY8}FJ{>~w;gX+J3k9) z04O%E$So-r5c(>PHd4mOY8I5b>wZ`!X>TwP4UFg^k)uo+xF04eac(vQZ5-RxI3C;k zI=%rykx7c4UmWKi=nw_t(%D9JYh?OxGAJfEy z?J4w~kf%hR@Z7W^lFhUTqWBs0zCyo`{#fH~{%BDsWGu}SZlUc7v%y;g#=LF6Jpm|> zT1QFlL2DByt#xX<&Fo}<_2hqF}2d(Xr}q-Px|F< zx$FgZ=a1bNj~;u(P0%J&Iz4LXj;8(E9ZkR8yDdw2?mg(T?us-~u-mX!V06tg5yyC@ zOm((f?Fi|#f6&IN=i0%!KRjb0kJr<v2-YOvoZ_|09BSE;*bF+1XO$+RneQQ%8H&pSX z{Q|DW@!xPT9k2)Xwu_qPRd^;@5TB$Q8?GdMsCdO6A*GN@3quj%AI|(|s5Z;v3=Mg3lF`=A%s(#eKYN z#Vqrp`)A1>KgjQ&5lx4rp}PxBB5|8a)@dfcU^jS|Q%iKD?YVdt(fqnVrJVDOWbaCQ zSEj&FReUtKch@@kUvU%?>pyr@Kkb%-47-ezntu-PIT93B$_iB8Xp!H;Hd1c|QxDZn zns3hPrpANr2+l|i8)qT)*x_f}7uUaSI07?7k)q>@HXJ{mF9F7*;$!tg7CSmOnzW$f zJBe%yn7;Zt#jBjBYf#0P`?KwN0j4pl39Gd zySXonEX)6$QBz1o2oXc6xQ92vK$F;@mLpu?+JP*P`F@hmejq<;IzqzI`Ow{<7HdAC z09b<^M^bKV)svzLC3-c)r+kP(rIS*AzBC=d3YjK7{p22~zGN{I@Z* z=j-5HD-iO|$Pt~gs<#j0 zAZv+@%e}+<%Dt%9OSUijHGvZ8T~8(q6h-Mn((C^>njI*-vaL1*{4@{*+j~6)my>Ib zm-651WKT`zRZYPBPJ%4`lYwVujYZ_)d2pkt-*N?N;J4_;23 z%3}4=)PeK~K9&Dics7-cQLDJ$F-m9k!8trU`NRHun+R($PD+haL4;nhDs+N~0X0$V zZ5K_xfaLeLuUA2#x*)Si+rck?it~Mn<>EEE!WI%ZG+v9Nbv%PW@7&CN zAb@Kqm+9he-VR2SCH@`3T#;EQiZekNU7)xuyIgHt`GdG<*X;Dz8k-B_@h_e{N7?)h z{w-5ZCzr_?#d%bG3I zQjVa%_o(U{8*RgzHoNi3hg|5D->Kh$Z~t>wW7FmAcHwERjbM%}c7l%|2aW!J zl{9cSfI4K(`YkZZ>3etjLerdL=v%@mbm;n5d+-Jy>c`Y~!?H8%1gG+fF&pw3s&IWM z1ykX7@*k=25Sf$4VNL%W@_#!9szXlMB)9~1K!>IN8zyf=R7M{RP{wS~_jD9M$wDz| z@Da>fx!#tW438CQH3BlCM*<36PBjtAf*(@!2IHG%N8gQHLu-x6H_91I{ zPY>?H&Eu>l((k?r^`_9p8Og^sPr{zldDPHaT~2|r9;9Z#hl!aKVdd_On^S?s(?aRM zBF`O|kvF%doDnS-ySC@COfA+gHj&En%Tvr(4UWW~eBjnQ=A6PXj1jLiAngTSwV9~r z5)G9=iO#@I)3z^yPr$DmckDvd;A5770&|YM2R~mJb2afk(oPVazRs=DIRkis5B0Yj()`oC=TCk9WJb8Bknhik-H{~)K2o8Z8 zID8zm9r#`flUrQqx+k0Gv(-p`yBg?VRw9e-kn}!so0?~?o_txR?m8xrN&okEI)zJS zG~X@%x4_$YbZq2KBATT>vl7BHv##%a0kQ)ATOy{jAMTLD95B2iPSE4R= zKcm9>1@ntnWgLQ`0VX~_6CH=EqFOt|^-8^Q{R4_3m=`t}Tt)BmlJ#jKOv4xFGK}Kv%Lw7b0)ANVV2X%urY5{ImSUY78hVt9*%G}pos)b~Y zO}kcSd(FZegpbzEWdrMQNQ+>=kWCxW#p^(G+84s4dv~R-o__Pxr8VkHJNQVCjZr+c zT$DVCnHr3xJE=`HFCUZ(DEisKvuuF_?3q(6w9g{-QoC@($qTQu1ZPFlcNGzM2p*XF zp7Gjl188wsMno^mqUUHR~tNbIky1Am0e&~{=6Pb0Q=LKvXY45 zmj>9Y_N;#Wo33AL!?w)^5b1dS{&D4#p5bOoUN#}b_P4&b(sXf8MMMCP)spaJU@td* zk!BB{<$vc73KoHlzq_afT`}TkzN;>Ac5!lP%K>5oc#MNl4+Bwjwnc^FUXduchoezF zqEtZYNpdLz+c+_vKq#<$<(_+za1^<>EfL1w9JT#XxJ^N25Xc(%3{l)w zk?0u+y`7HKHCUS%(vY;j08uwdF+Xxq(1@71;)oa^RpRv1u@Vb)H?8*f9a(RE7W&Jf zXV0hN1KB!ji;=f%`VRp>lrb9i9w9vbqN75d;1eQ*lIzcA`N|c49ZR4{U z(I{|YY{6sSSv3RwzxBSONdAE=@@ePP>e|u4^Fg}T(%Wjca@%x>^_3mh0&$&@&Xm#4 z=BSG7Zq@d8MgZsG;TR!B5J#PKMQ-VCHJmB7F&kKWCc7M?CiA)HI+4uJro_`^*T~XL zE=pXI#4=$lMo=Tn1JP%B?ZUdidoRBB2K#Q;{j;*Emf3B4IPYc0XZgdDE!*#<_hidN zcgO_vo4;cBs6Iby=?kS68jNH%KA|~9OWkCyJ=3$miD262u!s4A^N-P0Zsc!YvnGg#9Qq?A`B%zLB331S}Fv%w$KON|4p?Zn&VJX-P=QfW_u52vEj2 zl4p)qp;~@9FQzOd`%5mf2RHf>4REyQn|QB$-kMECOO* zJ}D0k^8^4Kh6P4=u}pR%Pv3);ZW`b3_s&b%USjUqmK~vOebFGy!+3hNV=t2Q*da7+ z-hGt<@`tB&NJ&MTQR759mbQYBcNRbKqED+{3w_hgvv*eqx9h2IaRcvm#6rM7dV`)L z1kqaFo>oipmBC`_UfFHulOcxxsP5p+A=m~;5axg7clZU%aW;OEw_kt%-S=s$%oBr( zdzl>?pe}PyqY{1Z(PmBG2L(r1Uc`TCgu3!xQ(OXR?yD!=@!LifBjpLgk)CYFlbFa-tMI*gqBIr)1za4z(V^ExJ4 z=aw6Pni*;o#?Jzs7zhIJAbjq-^Qedp0q-LtK|~&Qzeb5))-1Ce`$l*g$=j^3q2NwB zVnC3>gQOXejZD)A2U5CiFHDB{GRORxemwVgn%2KAd13zJ1_5^h!|y{L;|Ilh=)DMC z9@_LyXo0PLVflpGQs@aYK77(pylGJY)XKD}RJ)6t74&w&+d-JH_tR`^dL9APVL$LZ zegIWsZ&h_uJ#{1;za$?f+G;DK!HopsLhPX?4e2C&qY&};x^aB0G z9EJYZfFD$SixcLe_L`H1Dk}r9|2c}ReJe~{HKP2j7XGwoJ14j3g<4g48hc^n)xKaH zN5FiLha7$-G0L;aHoGoGL3Cy_f3YDcx|1AD&g&tLmx5wNTF%{Cb+Lg4sH;d%woy>4wuLpcPP6_$7J%LS$N}jW&RQcmlWYnk z`UCx8xK@E;X?a&zSe%>9z9czls^OJ*6Y8tr4COqA_a7r(9PIK zhzHWCu#IP^R~ZICXawy` zNTJsG>Q)y;>x2!Z2W9TFYbWGvCQJa%6t}OJ1gyCuM2^J&!oeahM$8lz1ZMKwgL|y& zYWY&XKyj!LR%j8%-gzq+aWO8k-O2+X-D!SLQK~z()26#|b<;TAk=uSWdct)6+l`gF z8?+^kK4y=Fjjerr6MX*>^p?U^m;TfOY`DbSl|U%mw6)A7+wb6yU~W_M8_mpG)4&<* zvNn0v<1-MdZW2H`75Qii?54?;1(PCyg^Bx-ezqRq&QnUE%+JsuLKxRB{z=P|l2!yo z60c$m8GgIppm}p8apKI!Xk&w{LJho*2K~sm&Ia3nSE{OI5PRC-d=t7-y2p!bHKT zO}7R4VqVC`?&*B`+=R6C{^%%tW^fud?x)zsSI=K#TF$(O$Mdq5kczqr`G(D>AR=N1 z5y~7Bjbf*MePr+*CFwaEp`%#-_Q97Yz?xX+xYmGvV31}4}IT-3H z=JAoLAI{!Gq-0r#9t;BxEcRp;q_if zsCDhvb=|Mf8`H_K93k`R>;wig5T);NkLuo$IlE)@LeAGL4@0;j`V?AG`CF@ZJA zp*`7GPD_N2##W&y)0)8%<`-krYdP>HS0a4d)1NgVD$EdZPwd^?UqiIS3wNF8r(_mJ z(XNv|RaCaQiFX;tlh}|FA+Cb6uCgd)!_r8UMy>wcOOwQXD6jt#ON2OanK?xhsW`ZQ-`bK?cA z$tTLW3-kA&=VKwXIS^rzcT20vOH0JBg~y8*-pQq5$u5td=%iUAb~SjDBInmnxMhQ$ zl~@^O)kA!CSkf{d>XTc5&HSFeLG%BOq-R)jmcA_r%*#fwX_w}U&1Ku$MBwsu#r*cC zX>}JvR1jaq^;pt9T(Vw}D%XxnMEd}YLx?{_q1&@}4E zFVA<<-v_h*$po3qez-EjzTqCwY4g!oz0vqqD?AbMz6xuAC}(<9VDO)nue- z{5nU9+8nd4821D=i5UkZL<(%I^^R*B=dcxW63?jh^R8A!Fg8z4J!vej8Peum#D3La zv*61ywL7i-30Ox@ze@;jnK|5{{KdFNO7cL9Wm@RSEv%Z1MAWqAI=S^3{*Vtm5C>y& z1371Xr2|tcu1jz5@1p?kkaIT=ykgZ%<}VO)&a053oQ*pTYN32r`*5IkyXzptU-Qti z9e_7WSS1Xs<#iY>Pt1*gNGe{9t{K)EP_bT%S08t_0bUKajcA#?vd)D4lC>QIKLLcT zZ`XK3)ZJ|*>seLSYvJENH05$Gh7`h+H`KB&9C#}%Lq`%2w zoK0*lVGEj{DY^>S2y zpFwaBdC`G3+Q%5mf!%L69&fn9Y_X7Tn;#?4LdU(JFT-94oP{lza*Sf%mkqj8c~8-W zs1P_GEMVDUppnz|vhjHDr#h5k*kt-WqL1{t%e6GX%f+C|3rFiF60^=vjFu~Up*fJX zkg559onF_Q%ItU3Hfoowf)F_}*+w+J2p?s@6fY~pf1Nks5cKM9bf~_-LkZdc9u$md znF1S?>s#K3QB-a>n0XbEVSouhYd~3MM!ad3#@wh7^6-KMq}m7rvv8hh15gn`$)07} zvq-3ONzQ*oM4$5DvQLI+4{FXfV%GFoC`w{*X*lHF&VC7d>Ex|tw#%nm`u|{O7v%5g zn$nv3I?G#tE&QI@F+b$iP0(A!KVLd2{_&O)ITt5mH>&*atbpM^Y9U0Ne$L8T{OVnl z_p1d^Dnp8Jf8>8bDxy|MQ8S63P0Lz(BJ213A7ZA2{QZJ#Q(@ws?L);(6QLN?d(`p( zY7ky$KQyBo)k5#Wyw%>Z^=G>i!q@3Dz?JIKuh$T2>iH~0V6pd*1*t6yc3I&?> zZ?xY9^U7d8q>ize)`oo+sX78=!$U5G2t5p2e6WVj?8X-h<-~r4m(eak?e8+3PnG`?!hJW?PT_dxuR_Nm@TLwV_{NLHrO=ThVO~m z;+yNB4RI9@gthH(I1slc8@}klp_e@1D&xI6TUqCj87)b;*HtW&r?2WzaU7+S?2pe( zv%WX@6jmrXVB3p-g|+R=gR%k}(d!eZ044w3`u(ATuRg6wn)@!FX>zj& z>)(S;|1AR792!pIVJjllN&bI#ULVZEV2JW~-GZX)vnN1do)@U}-_}k3MZYmMi~z{u z6Kpf}V?K9eVyc;YA<{SOXHW2}9%nWVQE%AjyYs=MWFU$nHruFU8TJP$ zP1exRqgnhyHC!y(E!zV9ug@PLv_g}fxwZ3O{9-%Gw?$*E%j(9_AtBKCX137#*hizL z%WwS5q7U&x+sItKrLdPe*Zvq9)sfi!dJ(i-7{-IAB1{TETcCH^wc+j&Ov1F#8hn>z zRfe)Q5$7j?b%~N~7q{#$Sh{T*WHe_%_Hl`?V7vaQ6uAfs)3;mGqs{eNV&2!& zTKMqpGJes<^LYOQd(@~?+oNQD(+3NME{!dfD*?0=XvqIJ1Me8|{^GjDe6r zzFj?dk1Ds$%%wp&G(}zOGR9hDCxsWDzy5b({r~U8hS=|-8m%wpC82r>fkd*Y+Gr`j zrhv!*A#~~!#p?tyEyGuCR*W;f0DLFEXz}?(n&x{V^MKAjQ6^7V;C1kEXKVoaQvr~b z|CuglR6fvV+s^^w@3$&8>`!gNuV0>Nx#tFxXvnBjYu!}3PXh5!YfP3Y-3k@u$R~j~_E`rCLrm#Hn?o-y$b46kJ_W=*2Wg4VHn0K=4u)V2vH`5!YjpFplInl1s0Qg!rH%u%K_)Bm;rx$ z1WrET4wp{WRen~FB_k%7rtT_J-TF_)NH$^h-#6Y&7fIU{IP?=bP!gu<0R~aHT31|Z zKjQAp&aHx>8%}!`WX_t>JQG$EFFr-U!c0QZVawO!fX{*$w@!~*;Tccu_OVUSJI>S& zlrafleR=A}^}5m4wsrW*dR6)3#{`Cw^@TQr}R0hpxAasweY5xC|=DcUWQ9 zChF)gW1qqR>vtv{rkx+JlIKZgNVQSG{2%v{buY6l|D8tqZPUI)Zq^n_3jjQlu*v#= z|5L|hL5`$@!{Dv;wy#ZvswEgSywa;5aI|vz_cb2r0Ji?)+yfbYnc0QMGfTI4ai_OI z=}$$vS+$w6T+PjZ$y-g^NPgKY!VpWb*)t3$I!Z!i*fG4*+FeckauyEU3^p-olM5rM zaWgu7D#pI1L}{VSpg+kbb?A&S60@Bm-`)+r2FA*WQ;C&*E4Aqw^@(bS84IY{5O#Pm z!O7+qHCBxogb=2qTItTAbrP!YF|vOlf}dYiV; zI%6Z3GwgU|DHUAg%xvi^rzpMRMZ7i(jvO22bTr3Y>ufnIRQW5dP}nfn+9@TbB)vY& z&VIGdLitR=YItM3yO(-Jz$CD-^k;?OsY12(S4&_bF9smW7uOkvPC+4r_Ho41(1;TE zq|z%36`iPuVxG;3xU4t6X7kfnEPx@fB^;XB)v$td!uq5}|hAz-4Up z=8KYWJ`pjs$Zz93u2BktjYjwLe>Wmd(B@7c<#lWN^ zBY*vRUIcuXM<5Ch>q}S}NQp=Da%9MfD4`T&Q*VM-m)LoML6*UNd7?+m1J`Gxv&GIm z8F-;O{an((Xh+AGo7*fZv@95C7M*H)@w+WcqBRVLFcqcTa=OBzJe_lo-xBABlEZJ4 zyDgj$p6|1%H^nTbC)iV#Oz#uAp4=dD)YNwt^zojWX%qZGcjD~j zWnSGct$V)ti$02N9X5fU_gr6>gPZ~S>98Rb<}z({1Pvx|K_t*AA9gK{maod3{zCKl?t7KPbUVo$!-Nc4Aj5T zz;)qVd7()iL>TI~ocT3PYt47!k73XoNmBA?sN#^#^*6`sH?-?}5LK5r_>k|vV)CZJ zmhP1>(RfU8;n#%mt1rCkzlsRRKL2aQ^uf;zovxF@7Lo}5g$2=bWv;@Q#t5w56XV-l zJ$=?W#5gnvyQdQEtq}ITdHThPv8eB+-Cr8fV0TK(NYtOsFo*}2mv1*{`E9MrvglfN zy_~Yv?6VGJVO++kvXs}`)SK&S_Ewi0<6&jq?ToqEV zQ*&a-@NQ|(x6?wZq}t~b5sI8&8lp}%t>E?OEIeq9xY*d`M@fyoldA|!u<>O%8`4-; zzWM*R1d&`urv#}rfuh8C4iqfv3*5Xv0Sru7Pw!vQA;yiSV-uNkx3nJXKVd#Qm_r4H zZ-)fKi{j>X&$nxGp2~R%@xl#B?1O}GOGO-D<^<^s8NnXx@UpIzA<~+CK_KO zf8ZG_PtYmSd-D|cU~oxI;!T->q?1E*YjeFg>g4?LAiBG(eK<;ih>YrRwdL3ROjOzP z+kcsK3@o`Ok+8i6mC_OKb^EVOy1^&o-OmL5v92r`(8LVZyHhb#-$d8_vT~`CVi$6_ zRQY(0bA_bVLX7A-@3eh7zMHTj)@^OE-*piO;XX}7AEn-jWcp*kVFUFB#hyt9kHf?s zde7+iufan97@%o=BGEaIlaH`CH_c6$z$Y!-=ahoW#cysvbWvW4V(pI1@*BnefR{@T z{ov;gXsDRc4b|=UnF&9Ycek3r1axbi{jlR%E;(dr(?Dia#p<)t?m%NKev~L3nB%it zeCeO~hyW3&*n$dsx3MQ2i%&fEjFrlpc6t-H1K$EQr!c=f&d9PX3;j`pD5Mwux#XD! zbh`I*K)UsDLxM8;$ix@6Mm0XKm4u~9R7G-r&WZYC_uzj&0P%4=KJR&pw9B`PvXw6fqk!jgTUj%L$`LvZ0H$Q5A5X_7X73yGd zqB@(Kx_Y1K^K;xE&_zGp@}5iXl%jYrD41oMU*s9~CM4hQ=gm>g{CIRt+@ZM9X`gyw z`4VI!Sf8gWg4Qc_KY!A2={5n)^NBXe*eWa3`EYy|J%4eTRY?5e&Hdl}6`7WZ9Ce0; zLfdwC{y@=$v>UaMf3YKAdtPT`_~%0OSWUoMXT&ST`NBW5$ReTd;B1R? z7*R;Z%`^u2(;&{hiS!o8{+OT14covj%!jh4Poz42*_STY2Bs&R)A{*Qp~v@q3Z7?a z;<^lNpk4B?|09XnNPer^@9WY*{O4%B%7^|wM<9_o2bG(1aAa2FW3ag=$QSSSxuRlO zl^glngr|pCWk5Z4we=`=VEQb(wno!4?zvy~M^BB-&Ymv>q$0&IiKX|~xUaqcjBy(5 z2bMn`^s0JDPn<%fw$^7$dw`49t7S(e^BJ2JBgakL_N0!+2qUq>Z6sr5T_D>guyOHe z08p;KGR+=VhX(oP=bo`39HJLVJ}o3{6=@e$vE;3*Vo%g6jyD^f3MCGP0;1BiRa^3Xp|q@gVt6%KpI+&6g}MW zEj1*heA=rqdd^FRd$U^#gJPdLCS=47zx@7h#XdbQ{L^s(v^448YnXBGV-Q&8ch5~9 zRBqM9NS_$B4s&dxe5_MJs%_rfB?*==Mhx{P7IWqn<>KoWY$sY|()lgmdkML&x7_rF zO!(50lhB17w%o(Z&_l)wei1I@oLXEES|T5CuNftJ-w<&Dlj`c$LT_bHM8Im?#3W#v zJ|2|xkr-QV7ekZ7KIGVWdYQP#_Yo#GIdYxbvkgOHy;|N_I5jIzh@4{I>O_h0)te;$ zS_XkE(Qc976V%}4JitFki6P!B|5y%f-@b4cFac?tVhK-LXN`ERC=dVT_(T|U8w5tw zZGxjg3WqDRL$pW=p)I&yhkV%GKOj3MyG2iZVJmq?50ia|*n?|b*H6@ms;F|6_JRZPpPyj3 zxxit|A(Ea5vs5WzSrp^+W#Lb|$J0O*zQ9gXaYS%pb$85lRFV==%;=zzbDplpRwso= z_Wf${HEm#MxK)d&aJ?94JwV7QYwBEWvSHR6YLsUQyUF3QL`R)Q;r<iDTQ%`C$l{mG(X#(Ga^7^rBYUV_*qBC5(=S{^1ipM+Q+ z&CcK5)P(J+pQjbtVk^AYCRov>As7FC<6m3ZhDPFz_Ks{>{3?N*TW&LNf$iQ|@1DJ|hy0%YDOPQ$9gAq|>&4Uv6m`0}O&s!l zuP-nWO7(I%GUY5e6X|_3mLKdbqu(0WO3s437e$Y*-zg07^r7l!n8!oTWANC=g1!m3 z5Igo??uC5~59~HYU|XA!7y0r;!y_zWN$uRv8~e(2!AxUf;&%SRV)6pmACP7}_X2E> zdJrsg)JOtxM!Z5S2Y8pIpAJpaY_&lc)`zK5Zd*I5$Z$ZBG#7lx^qS(k{z|ae{q+wx zF#g0}L*rqWKHdB29sQ_lfXR9r+oxDNBuhh%rou~e{PWKnJ-35+D)#Lz<}lo3!A{Ej z4JjU453GcO-m;b_|6`TT5A7{~Ma^CjjGaHB?IaRiqbXu35+pKDqNqD2hfnu5vfw6 z%4+a;BooIR*xjlT$u^N;bWXoLPu>b+)vF^~ShC(^{JlAfC zTW01ZPtcAhB9ZkZ{^elcW+#IOO{8^&%4#@skFxau&)|)SVFa@siah}>~@?QA|35|ZwZ#O*RWmgBz;}7Ov`&W9b-S}$_z$Hvhcu!ahfD5ZAE)LrjUZ0vIX#wzSQ-)3U>mtspIJC1jM>x*y4;M0Vy_cX5 zi%XTx=xxB(lxXi3AF>08=-j78D16zUX`dU5@) z^>V`sceL&2=H0N2;WbrSjI-EOuYu73u_@l8yz>$tp&yQNX2gJ{5SzP_M%G!v(kp$| z^o~ZBrL<6}JWWxNc%@0CLU)RFn%K=yFc8>Txvi4P_Y3kOg1k1TmmbeMH?L+B?aYDT zp?3mOLqY6NCp&3ao|y>|)v!6XphO$(mS+kfdl(}XK#f?eqjT{>j$1)QM&d!-V~X5% zdrTp}YOB?3J;}O<&N>dtDf(;2Xuz?E550F_yx&rBkLP0SDkR`z(R9KT(eyD~vHIN0 z4$-?IeW9MKPbM`g z-KIcA7Nfkugj-6IhrYw74)K&B7dA&jEcdB4?_dO!6Tu(jUR z+1S|Flbb8(p*_Y<^TD4UJ&R|O3+l4jysWkb3sF;NkXKrWpa1B`rsQbn=zHf7$xLiu zw(>q?{1fhD7(QKwLPF_usQL2P)ui1UvWMq)nX^Ku47*fW^scxZF``~^vR<}aBIl=n z)Z64&cqmXtE+<+CM({f^33O6zbrh-?PWTjVYR8g(7uWEL9LTQcpYwvjz;pb8sMF3O zFdKCwcSd50PuYq&KBok_w)S}hJY+VN>%_wHpH=N5ltf9-{26d9>0_w`t5oD+j`Kjyy8yLb#(cIdLNHo6_E8(KV1*{X_)&uk)il8 zoRG?*Iy{+FPSc+Kk7RjT3gtZOFvBf2-c5BXtreo?C$q`&63T`bwmto(Eznuxr+dnH zHbRgu!T=JT^ojt1S>%|BwR^)!NPTPgp-bau#{%PNy(_Li$v#Pdc&7(>f6B7=%GRaU z9<%_BNLNEMlrWHdx=6OQU#P8l!5N#SvWYBxRd9}MheAn^<9FOns`xIh!=J&1;gDNw z6AbSVsWvs19LsR)%Wom@mgRGnGW(hxCh?{ga?C$n9a24{aFlc=!^QK>jqatarm7CScy39D zagl)ic_Fdq&M&-0AKFN8>a1?MKF|A@ z_cor@50C;7Jq>4>7<+o{_trfvzHxKhua2b%VOP`Vcl8Jc;}T{HKHH4GD_@C z5zoC-g-f3$?$}hc+#pBejyM>wpp?CSGyMJ|;UgU4MS#VI)VeKE6kF%g@?B1tksS#M*OV{>gv>{{Gh7{;rttwhlWambUW~2%o48CE~{h#lOC}3@Ip~Zn`VnD|>o$2j&I0Gj%jv ztQiFD2@rMj*N97eQU3KaYTMV*+*P}nV2r7n0ctQ(qp=M78RO|uAaJf(216}n)x;BCuh96TV5Gk;y7 z(xR(o1QlukfNIW)B14qe zX+Lgdwei^AzbwE9MT0I7*4gApeRt)+U$1AX)-mSpgtN2{i1CX8fNl3IF{1MKvm0;ISiMmfgBQD2lu7FQG!qEtZZ z?I)~pf=9sVSbu?nsOmSFO)Z~PN_BU zNvW^Lqyguv9fXQLi3Oq=ao-d%s;GB+kP^5=#r44=CBDBNFFKm4Go~;Wcd1pDaQ}O& zlz6=K)v&6&*hyBrwTq}sINq?yt6X%wX2us%h`FQdS7l0%tgps;Gje2-)K1r!xU2MW zN3Cm1mhMuY)-GkU$E$>L+(dZnN`vtZG)B~)xN)L2tX8{?)eX0T!SYF;YM}eIwkl2j}=2&YC8~K&8sHIn2lcx2bn1dnTA8Z=pt9Nuz$kv?a zTz(k{|GuR4gX(`m=1=+B!ebxm42cyhwf1s+l;o(4idyQpYVnRgDtVkPgQGQ0R>kvl zOTS_AofcN=S7kPT5v&+rmrFV(%u^fyp4PiM9>P7*itKEGfe6&4F&wPbKyTu;rKwAC^2@$cuZns71u{^psGB)dh}A zCfst|t*aYv^JH5FtSHP-ljMN^h-Yh2=Qo|19J@4c1P!`OCLDG3EtQu<#|#+o`>LuS z>;Z&Ni~>W^-PI(8w6Ti`EV8Y3; zXp%0_=R~MWfw8)H{hIRBTm`fmrm`}KzF-Bi5;=qaEX<3E*m}Y1b5&iC2CX*}^u&;^ zGSOPJSCAS=XWb>9?JZhk6U}g2fY$|#4!U&UJIN< zU*IF}xBY9~6)5O9rsi&GPHU?e$B4Vo@vUF_aUU&l-1@a50!#aAD2h%UJ|V}Kb11|J zTCMT&Mq22n{DyFS#y(8B5#jv3jLQ2}cd4F%>nX&Gnx;V^{xu!v58c-#%Dv4ae!MY% zUJMdQ#ApYnRI7qj8gY6|a8(ek7bAEh_?JN4DyQZ&ufM-}A}I2PUrDAn0cS4?It3t! z*LV6LkiZhFG8d%P8Xd$KM~M$lN$+T+#~zf>y<9p-qjRj8jl6iI1bfs2UELCHeHFjVnzuMm53QXy;lyu?^Nwi#EEzxiQiZ2m(NA4|9(5B{O8=HWrW{o zN~C10PUQO2sywQso`uLv%mns9UkUmL5;CcY4+!Hx!gI|8;%_(_jOL=z{dyj4$8PBU zRXA|NpO)xG{L}{owi)|XFvhFAzOCzgX;m8~?>P!ZyYW0WTqnwU!OFyTpwS8*z~I+D zkGt%Pp+#so+xz(8TinMR14^9N@ekXmZ`Q1k_6)=sf+D>}N(~12B0N1L{DXW@!0i_t z^pk2~@6B`zU#Eok-Z%pURf^IonzVVWG=z_@57K~?A?%TJk7h=eYF>8pWt8VKq>n}e z_lhf0Wb_D?m!0zQ(K`A^E8x`5G@~C%AT8R%czm>fM6wlc@p>iwz1JXz{Tk~t@9(H1 z(X-NwK+A~^tu@85P;b0FI;bg$o%;!<=*>+3q>#?A2K@;&gQEat=(426MI>kVU5q-{ zT#%iyz9Hd@488eFUjr3=JoMm3XxQ43MbFWXeZ0*gyg9UFrXB%)TyN~!YPPlEp&<7q zr3`MbH<_5f`qDC^fDTkMv8`9LZd3qbkB`qIh#-c(>S#{nv6vI5+t=pH7; zKX0q2j2;-=)>)nSE)DW@8P0yTJ2_}%n~)-@;BBbv(!&|rMm}OAHtFBrAN0ES)#yUjN96iw0MJd?iIO$!sC?DlWJ3v z8@@g&jp~u%z+({$%fqbBc1bAu0fzizyI$rU?mg%8ma&QF@TCl&G(LB}S^>uyZI^j|k z)kWuO-Y`sZ35%H2k=T}Xy#}CvnoxA+a_h%gYiymss(;Vun5%)`*wEc`;d@khqu+T~ zs&2{{Xeipyj>JBnb#JSe58bqf)hbmwx+BP@_qOQ#H?47=1^|KSf&v-TK)x5p4*k8b z+&z&PzJWaYN_tfz;i>XmqXe|$L1)-ZRdl+Yr4+PuA0yywgJS|wsvlh+{TUsQI8Viu zwJgKNuY~TnGjR2DplQm+$=8H-=yQFUIaF<(GT?8@-wMmbKb{VYj$>;}VQ2WM28uC^ zLbb}IKy|Y`qx|YPS(`NLy__)N-NqE1b zZ*7S-ZP)mG>e?wmO3Wq9IprKMUAUEcs_*DV@tr`EUi9-DWu_aAob@hMWxf;cSMQ|j z>>4Zxb}mxNy5%%sW#yLT%|7Vf#!JA%mRNQ_fvO2jG&+?u(C#BmqG5JqM&r8eel9c3 zwDxbJ9=YwxNyd=EKtj%8LqW%{?VQkuHp?e?;PVJM0?L_ZtzN_NPB%y|mJ5|*G?=Zw zMNwv!oYcBbpmj__1ZNSRXdsQ~FMo<8${9~FN@I1yN3`r~8S~;$%kX|ieH2!?O_Xa6 zJ@dJ*7NK<7Ii*~ALra;>J-ITPzS=NLQ$<+F3#TUBtO>$*^yo1Gdx}Ep*OlTVwXb;Q z?H{C2UF1H#FTyqZdqsAMj={i8oa69_dmD7?v`DO2yJ@$VkcROQrmg*>CzbaJoi~jP zumJiA?8g<{p}=u3x(RJZTd%%dWi31#^^d+H+0C__c~=aZ=^D4AKt+E_-`(&H8Uxc;Mu@lo>Gn9xg$*Io{$lx47;W~ZQ z*4Q&kQjDTe3TsS+vP%BSLET*+5y&8!kbK0CkiNMon?CcTE^}D8kM^JfVlk>ealQO zZwdJa>wb8)ZF&^5pXMS~;H@uOjWsewsLW>3Kb4VV8CN#B+YzBo zrnz65$_YCfyHsgf3ro;zU6N?@dXje5sh?4^WqY2DmJ}ywcFLu>>Av$VFc;)HkoQN5 zEMK?XsoP6U>A?8(;csOyDD4Fzp3i{y>hSc}7SEd>#c#hwnrYv#{d${ST($f0gxh{P zDK3eGXj>@Te3mseIOyTa6}0tD_0!{-(VsRv{J4LMW8SU1CIR>Hi{?~+Vq2tg^t3HL z7Hp|sP%o5=1)J)Fq5)4{ z76RqRC|>B2_CJaCYX|~t%cp|>AI{!7s;#HnA8sk`PVnOHr4*;QyHhl{Q`}2&FAjx5 z@!}3egOuRzQZ%>(cX`wAqxW~8yY5}@KV)%oa?Z@2?em$vhsg`zC!6(Z>L+|JblD8r ztRqAUE+>lz_$vKLXo9{l43<*9@*Jw;MluzPP*1$WRTnTF=;QydbO|(3^kpoujMHWO;~dsES)V z1kvE+#%@Spk6lewm!|B?oEnAUd0Y4q2k;yDtlrV}Xx$j_oK?JZMl+L zmgJ31&A6R@I~%ZFt$w7!h-15T}AJjd+eM%p5cIn zT){!_E&!&vVVW|>Y3eLRa?D%(M*4;%+08)iE2y@Fz)x+#;p&d;rOfwtaFWTv;bB#3 z3!}{4JWQc#OcXdkbZVaNvCR)$UGR9CH%OU{+ErLh-cd)@?HVC6%nk3=k;kUt693AZ6LRnrVgv;bH;&&&9Pb@ zAS>!pP1JO9yXw+88wh5U9u1SL-tShx6?UkkhW0{~+hcBDl*zE?OlE}pY{SMI2aM=w zp&TVp;X!iwLHA{Ps@>ZszbVOG(-`W>st)7DsVZZTxu=|PLs@@wq;x1|F|p*=F(DrZ zNdKS>@0)V6-Ngw_k#(0xhuGff3Ggg|0$0IYZG)N@LvlltdixFUolylm!$mU9rBiHy}_ae;*5dhPYa@F z#WxR3UHFEt+)*oW=B7+0f<73D)u21oFitFH&5j$0TcIT8Bs1T-~=r;+=B3D--Co< zJD2sMAv?B@O0+36v;~lWvkoqS@m}IflOG`)(QV)TKzbovgut#11y(gCaoI7>f|XEi zuzl-qkCdGZ)W<-e|T9WwKfMDm>mdl^kUi8ZHf;q50`%8Tp z@_O47!&B6|8%VN1+ZN|!4%Abf+jkM`7eY5d4*vnKO=ScT{q1*R!Zzs;{eH`3BUJod zz#=l2B|9S(KhdE!w;YYYH|h1?=jM1ny9?-%`EdO?)4V^cv>?&|ANVFD zNQ~y9iSJm!sNLV1d(KvZWhRFA>fHcGaeB$w(y@@|Z}tJv75_mB+r6hL9fwTTn#&XO zLzg}ksDc5*{UhA(#gh@xbB5o;U4;1}0^gxWThp_bSd5N+3uW^7%%u7;RYJ=Npizyf z7L0}u@BcD+7{z3&X%5X`MAeZ<6%M` z#a*|!$Vb>{d@!)&F_~_=G@##K#I;Elr_oU0*Knz|p+9L3_w@a}$Akg+~?+t4)~0{r6nQW7OLj&k@62(jSJH zm-YEXx$H{Fz)du^ zAzw&+=G(1{Em@*$-~!qnwExDSF*%=Dd<TMU-C*>!a|0qd|<4BvX*^ z+hc*QP3YU3)TFjhzn^d4WD-hRhc}!|g_YExm!;+(AJO@dyplf@f)f$vW}{1LRcg(j zt8=)}vp4jRoL@w7l9Q{W z`UR%TrqEB$9qhd4EV10L*?gNOH^=t=7BR)kP;MJ&VqW=It-LN)7I4Pl7&}G$aE7hs zHCj@3RrXVl|I^v9)&P*GGSlxW&FXd`IN)ae9^#aLtaL2uz5v;r^C^11TfcW=nVWwc z?5g|*1>M_F+&StQT>a5`c-(|q_ZE2-b@?QbBOz& zU;(}|1+m~Hl&aO;DlbmRo#*rhYkqF5>J%(B=}s+k&+!hR(@X1w{@UgM+pdhf-dxXc z#|oLrw69kitOJAH^YqH8ndmb%le;^>E|m^$MviVzdF06EdIl!e6uuOLVjd#Vrt`4C zmV5S-sEGGNU?{<`-Lk<=LK<2{3GwDU(; zqc=s&c_+}tqR=`yv~h;;IQwkv@m!x$d4%kW~%KbMO2=Y5|n^0^N5m zd-YeMAK8!2=il+hG~t#{&Ef|=@Oq>mGdE-?o(IM&f zDlhA`fdmC=Akbol?ssFQt+VX1Me#RvawH*wI|MqiHZ$dKJ{>TZPk=5jJ&Q$JTraRV z%T3*dNd36hqq$s(^4g_o76lva%C=ks1`G+|V3|_XWk6nz|5L+0ALOkoFL%pBgzgm%1j-GwHr!# zbPnICojC0|Z{NDs#Z!4}akMzQa(3}V<{Lj)F47HQ-A=SSb)(-8$3?W&V{u#hs(_urA3N;8E@fVfn-rXGkaOB{y3Y{SUfBlUAPl47daO_Zrp84j zR&lLQ5;fdRTI zT5Qdh8>_>q4}P^aiB+JROh8>kS_oI1jod4NUHTAJ`k8p#1h5&E`K6z|LdXm76_W+~ zqxANB@MV+K!Z+UVzI_;Ka8I0@J-^-}ON&ZO`__Y!Bbvwt!mr=u=`s1xcb@2^FYJp= zOZA#P2|%isRQ=X8%Sy?teJ?_{@H0_}t|VfAzBldB>pk3)n;@%-w<1q(f>FgjW)>XGGQWps*Y39GDWsocN02XudPu3I-tDz&s6cWGv{L_%(Gw!i&> z_#i}|vux+y*KoE$fNvcjMIwM~diIbdx|+>#IC(N+s7|GO<_9SIluMZ%xVARd%zgBy zAh7YIng0k?lBx;V9nZDlSqpcujm9}288%!MMO>EJ(Ysf6IT8Lq=30x)8d+L0fwP`N z^u;~fZ1A>NQUrMds!f!S(;7DI6PYJAt#{NXgg1sL{4}jWK!b`dr7XCG-(Fs8k=-h> z$d$3nv+F9Urk4FIm9Rxa3gN@d!sb?`o-do)K5F`%y0%sfKc=KF8gbz4H}r9X&Wz>= z(bzeRbM{>;i=lwBs=4lu@GlkUpsU3Tb;#JeEaq?26U%QB-{PzTM;n^tHRWPIMxCZN zsvMZy*SF=!L4;2OED!cM3}k)7BQ%&Viu&c|BHoo&r-GEovQnIJH3d|tyV)7Q?AUh& zS{7L-Q2%d|zO@tVT;-huFKqFj>l3pA3k)m{Z}lN=Tp6&_Y>(1gJ*7nJ!ZdtEtB?u; z;^#LTNXof@CYD&-Y_WU8)a8(SA?hxSH-p%tbYC&&uGsc}z2;2Cc)`EXZn%mMP3@>! zE$ncu>`UjgbW3S`zy0E|(FLEXMMU$@PK3_A@b@IW4>7)GcZ0zY-)gn*ULNFFlSsRC z&)_4vvGsT^MC#7)@WcXbsYrj#^dX;=FO3xGQ2?On5acNBl-2w4fFF@6MzOXnz2QRx zTawMYT9BWGl!j#r9Dne^ zYPeD)CqMAYTMQ4yjv8MyanBlzm>ONP!c)ln4VTJc;m}nhuswaQQMxa+t%Y9)ERah$Ja~A{ zNEJDsnzr(N@tlZ%)dtAyAPrU>S*z@?n^7-<GI>Lj6KT(RtJH*bHfzJZ7wWDdzmaY;-sLF(srE z3f6}9V+30)sH(^^N_qaFArs+~5?CTXTD6)+?3-2|e7MA^g=yy#jna3h{Hy3jAJtZx z1qiW&oZC{(t&W;=C8uaHvXvxM?!2|)TwkHzWklN`Jz~!Obc#Xi z&8tK^@6qkMa9?3ibA%yo_C*Z4z^V%+vc8IM^KjeeXL*j2D){w!Po3*wfI3f4gb`8j zB6j+REcLGrW&vexv6JkG{tz4qLSE8mK{_*nIP7sQGhOizM}@4Zr~aR%n?(MZrx+5H z*$zrB&aGS9nMZ{stZK2^E#D#@0JLgX#KztAkOF;;d;*A|_s8C;F(IDVFh>ya-Jv_~ zjC@W|MB7zsn?g+6O+fA|I)t;j$!iJzG1$2nT`v9g#j+Tn@n88N7q2`vAtI)4&TnZ^ zD+>#IcSlGn=3UzzRxxyZmPjvmT;TsC(y*wVJ9K@3yjV5XZN>l^)*UF0Wvmo@3`DHv z6FBedxqJib_yq~fIs5kc1rCxn!PpDJe$GZLS->(2Ej;8Am-vMff;IbTk%(fO*2$C3 z=Ms({WYvT7qx9exI6fYPg+-SS{X&l^-a{(wO$oRwM&jEeLeFyrcp^SaLB`M z?tMCPQCgy#1orCCmX1-z_qJx0?rs`I8Ll9m^((LU_ae@{!YX`CR_75?z z-@SwTv&;JyZY**$Pn!4pvFfQc)U&QY;{9 zQ10*t+~1jR%YcTnkE+UM%inR_oPO>VLDs`512zTZ!8x3-hHc|3|zgLH)rpb~3P)tJ3OC=*;cO#;@@G^F# zZx2tY%njKsR@R>l4k#L`z5D45mym`X6!fFEKY>5{utlL7$odf$+O*luT38n~GCoYY;8cm;e^^c9^1d~N_HQwsU1 z=7B>WA(GJ@2J~I*F|Gdrg`OZxmD%pPE`rRCfUylvEeEi|_da&EHxk09uHV1#zd+84jw=tKWgo7b+E6 zJxKIOo(-zc>%31(IJ>m)C)1sz!J!1dx}y+~1395B*fkFH%bOlcj&@kL0hD4U1}MfV z)2LwCB@SKZ&2JD0vKua3!NL1m4>U4_d2Fz!CU8wIx2=lqx4vBIzEl&{P0HaJ0pvx4j2m9Fi0>fyXP@^_;I z2BVpikLV-t55G{`Ju8OCx=Jk=9)Cmcv+Nj&9`h1Wk{qCRGWTO0l}Ro_u`Z$@MHKks#v_XB4Us3RG z475pq(!*!sWAppOks%W}eETLIU&eN2c z5ZTFcEebSk7LCGDXH)BvTR;B-KsRyg)L)lR_?BcD(LXI&J@3_nkp#m)cA02)UR#3r z+n@Dt+d~6pGIzxz&4O`=Le}GoG-j-R{fMp)m!yoGu$Ci|TNTv4D~S`-kF$#h2)yaC z;T&=<%(%(%>P8tjW2sD+_Fr!YM)VlVN;V3}=Ar6Q^N#s%peqKh>E~Tq7dWx?)g{R@ z#fuQ>ZR!~Nr^k4^#Nfp7n| zfl$)X^bq4ZfqrRLlpN1;gs}` zX+|v@rlw=J0Z?fZaKPgvLemkEWQCa628^Fb;4~uANY}k9CQ#SE4EWni{PNImZ5NIc z6T}v~Q8MztIJIBGKI=R|m7(-5ApNBG#di7nQabjNx63~nN*O)^+hmK3PZl*p$VWA~ zLjEm51V!BvRIxN0DmjoL^_CYij6z;gSI;0uS@j|8B9f7yiA?An6?1fAg-pEZ8}Wi& zpRL5zuWnqXj{sRRCY-ls3FwUX;2&xw_Pa)r+-fXZcAok3w!Y5QRD(#;FQ_IsBEHs~ zeR|1Tyj7NJiB!lJ#wiH}ds!Mq<|SFw=N(at53Y^8Rd_l(ZuR-GlserXIB%ILZw_%} z*g}GuV?;WOOQJ08F!edbya<~~EMl~LNTS292!#Cj-ri0#Q!b&`2>5$O%=k7k?Ke96!}C-Y7$uO%n>^?W zA>AUL4^B*ax93>_CTvaM98XEeYc_%02z1Sr(zR{KSFV zP!p)74GCQK#d=5%7e{)^ICRGwJhSyAnT!He(c9m?(kAE~oC(f<}@>=dEx~CYP(eyA++CwVs z4&({+gmxjXm-7PIJ!Lw%SD9Vda`hIJ5t46gS~oua;C2?-0Q@N>5DUOn)R z%B8+B)D00G-@^5|U$m<%DJB7dajco)1S6`G8SST(`3(0B!`4v~8lE1mmQeWXB5TEO ztct-nCzsNjfU7W)%G72tVHZ!LP5I{zpB{rtE5;}>P)6WIV}L=Pe0|cLd*Q8@??vX3 zeb{O%7L6D)nz~DjXczo|0!Igc*Rli=%jj}5`tS@bGDOWoUvshP6XPVqH~ZRpf5~IE z=&uWoOVaAI*xo@_-UeO^%GpklBm&`*iW%a0UMBNz~k)r;I7oq@XlYX`68nQiGXXb7%T0%D2rK%;h zZoDA;ul`qct*o5$S5fS?NI5c`_dUo7;Ot@*pj>BIp9<2d)EaNTTqPm%v& zFlYFE={4&}`1+N*gW@^7X5d0$>ORhI>!J)xdB^t6xHp%02$8=ME!NaMB?L~o)7RzM zMuxdRloVoeED2%*nmmQVL_`1Kiv4{||A!A%cDO!rSH;@VmV+W`rh->K4<*I8lS#rf zdu0*qwQHZzig1+QXz}g!+$`C+XW za+S+-J=1r+LqUOI}c1_wV%48>YYCC1__j`W49uR$c zx`z3gl*(ab&SG0$#`CvVOHN5HU}A>b{nh8n0~gTKSRn36FemoM{Z{0dKkDFV+>>%O zXZim&<*3)=PTP8uX0Xm;h6P?zgBO{%G7Qm8GN6TeU$5T3oFSOUEJgtby6NRehq?Oa~X+-}k)&B=hU_^s)3=^7MRp0-OdjCyWzSF}m zI~UUbY^(mC#`qrx{;!K-ddNT5<@IEJg#Xvs{_98og&@3CYhf(BU0b1lm(~9^#{XgN z|76iY@lFh#LGj6hzaqW=?e72Z_#Y@RDwd$Ia~%HXW%?fm{;vtE62dwDR3}U{ivRyh zMWsJfRC4w<{dd{?4@3Rur2n7qf@=Q=ckP!8pa18Jzv;jK!$sRaRJ8p(geLg6km9dN z!_dXithKV-6Owo-8X0pNf1}TM84kd6*(Iqk)kuKY_i&qS-Ri4Nk$KBr(ZS5SyB>a+ z{H;oKdLMUOk3L5Jw^cP6mi+FmUd4rn<>XF|oaH|=9w)}Ju&W=c`Rmrkqlr}=y}1XgfWUYFyW%dDQ3{Jo9L8UwukK>U-N z&Z-?BoW;p6p&oHGjt_d_9Vcf)yrS9^SURygSC>j7&YrE`%SFa3CItc^m%##M7ejw7 zK);m9?s~Y+b`>uo#vt1P!@o!a!}-7%h>V3JBdF6nNh#Imi3&em^J+;B^svRj;*jCo zjpF;pB2kXMBr8Yv>YILaC#*`ek?H=m;M%8BKGDZv9+oYcIFgiyLo3lTpAnX-cJ1cv zH3_T={S`D=xK@y)tbhA<-jjp}+xqY1aMKt`ysh-~GDf05>f}Mgf4$=ad;rsg>qO{D9{?79{!{L@UZ3P&lx4NF5&k6}A8~HxKrr<0 z?3J-&GX$0=8)5_f!c{0hdx2Vmt!pu(m1N-9!GHc$ipZcm(`5+OcPUM8R^=i9o>RGX zH&G$62BH@D1?14QB5=ELKIZ!zJa-c!2yL;O1i|9wQY1=0PJRP)Fc@CnC&MD~4~*4e zUxv~@`LJ<_-JP#>#(xXemhX6WcI@di-VyW|4wL!maIrGb)}8*I4nLeIm1_N1$Me&@ zs3EQII@FQj;O1rg6YMA2F~E-7?Mz53w4*G)hzpv?eH+OI-D~fS6N5^B=Q~{n?tiWD zyygrPfX$MTG-zf{mxll87v7XDv~4raJg5DN`JT`y=U*zu?GM*Xu5#!)K>V z170CwDpK0OByq^Uo~bB{`+d5T&A7hZ7D;7!Lo5hb-HvUv$&v^Vy5E%&`cGc}msR<< z7yNb6jqqa%jk!`aXve=ASMhq}(sX!JZ+ z2WirJ0q3!Ni9|+TU4$jRfq#UaFu7&tOMiI%`e`49g2>HU@Ye&H{hQ_U;Uq<#kvAoZ z_cJ=iN>5}3Un-l`LH{(TyM=#D6o-G!r3j%7QL2Gsvw0Y?m0~O!a)nP0tX`Ndk_Mt{ zGx+=afzN0hS#4aln%oo&7pIi?3D{5#{!>R&rV=X>)N1}xY8^zghfD}&F9qQ>r5%0F(KEB={r*U zFIliP5*|&!Ma)XBOuJ5&f+&8|$!mIex(2B{(Rn4@-@(FlrKMDNrG`z`WI|)J$a+AD z=vg%_!lb$aTT1HVVcl3FLylLT`r}x@Rb2>YCw4UTolrsFCFI*&YkC<{Pf5{NW<$kEZ9*TRSCW~t`o2dYo=;({oGL<3 zHy1x2`x=HSs>N&P&7$f4a6sZtwlBN)!-P!IAH&h8+cNe!2(_@A#W&OlDdEIlX9N@GV0A>Wg=npft2%MSGo-*6Zd&qfyu-fvp zFP;GkufFgaqub2IUF~qVX3TJnLVihd#NzW@(?=nwr{1xrrESQYVe%Pe5_+}bdpz)s zC4H6to7-ePD)^9<3Vp2c$A$08Tnsl~e5&_uNpZG(cB<$%<9c$bCcVd$8k-~cSvkU_ zr{|zyo{Rf=zwR5Uj0{Mo#Y3CUc3vJ3#r1$1mpmF2K&|U@{o8ZO_e5yb?_4QfW^JJ% zp3u80hG80Q!KFy17Xj-F7+9#KE%ztlcXPDr@RJ2^6^Ak~$zPt~&FqhnXb?l!LFq{% zj~7!AI^rJd^VR34Yg;Yt(K!L|5)>M@N3{hd4T9R}wnBuOo2w_;ik*q{h-th0j;G9xQ~?&7zoaZ>E|;iGJ?I2P!^6 zpPxADn2sKfZ8R(Vna|&U#^dV99p}nhgnFA8ZEtjGB8nX@dIgVv z<`+SWvU!cQeQk-KN!A$#Z&F#-EDw_xkS_0`$?hTk5RYL>i+CBHq2*k=xruxktE<2c z1$^ysSvB3>S*|P~#^>P|KGL&0IfIMb1wK1)9efZ{Ro)KUJ7UWp1$ZOAmDLXGP8~!n zLnij?&49iySZ^|;&X&E8()Cv|mELDw8UgU+jL}ESn-l!oE;gb|%BiZJ%3UqW7u-bF z{PW<$MCtdBM!Na%zv0y_5($oeP(5g2EH5TLE44gDGhKRY`pbxRru2jt+i)GtoO4u* zojK3xe{ml*;}+F7vloFroU@#N6-EO2Z4u}clyDxM>R)*-dheLa+y=+jh6uSpg|Di^ zxvvvuC|w{4163Bk3312VevRkK&ED~|LP7uVwSqc6cm6?-Y+?CtnLhH@;pp1#6(#|t z^vdB9t7)S)G?WFHnjss}JsJa{AMGg6ewJfQVeJHUC-O40{QRrw{5`b_MH{qVyUOgsz5rl?E>CKuPLRZ+;Gb5 zRH-1Y97c|0Rof3Pmos}R+5T_U(2ZSYv`P|nhUSm(;zhF$>w7RWP(R+C>rKw383f## zm)C%6(~9nqF+Gz+8Y?bl-sd3)W9MPw(;C9z zo;?6U^2~3W#)oRfT!t97_wm-YQ5VpN9(gAR#s*H9W5r$46lx=$O7ER%E728qd$SMHskog=%no4h) zQ8YoTiUO-|nzxtne6;nrR0xy`9J+Bol_HYDc;(RS1NK5!%EkeT7sm1}b9%2l%x(r3 zQj)v%Q{D0Ca)&6?i??*g0G2=|%!Zw+VT#A`9ZNRtSo9qe z&L8`An>W3CoyCw|h|#v0*82|Vn_&P%WIeh$w$Tyxk2;7_@Au@HC39VNQR@hDx+f#< z(S2pEmeJNJw2{Ftj$SV=u?VZ!5VmRCmOeu`RQBDO%rr*J@`zgmtIEM#$up^Zb$eG| z&~zDHr5rzwuT0MT7i3nV@q%U$IObx!L1Tfz4Oqc603*0^X5juW$;a;G-&ID zVjaqTZ&F-A({1H&`7lv_x~r~NWy-IVuqP!A_xxfEk7kI*Y&=kHP9Uc?b`)U_ z8Hy~5)O+XYlXtL6SBu#B;$`>xYegMP5Y-J&wlJ+Z@mi%&mGBtAGyMd?KDwX#u)#wV zvMx^irI!cN+;cgyXm&%2E6vDb}ZHNn_-L(`hwe$pNE~9phfC{earRmFA2&T1#7T~z06_iID90lExn7JRdW4iF;@C9;Fly}_DKP$?1V`cs$0V2s&-^0Q?KpYCyp87qe2*{m@tT1plJjkJ4dmSpG~uK7QGo+XblR12>tUiE(dR?aNUM*jYb12f zj;&B5oWzhZ!SxV4daH;R*$4r=i<-$l+jjoELuSD{GmH9Tw_N_^RXZ8Lxz|~AsG76ayx+X;R++PRhW#6` zUAz6ws?)6M#r+SWy3i?P8COYjT}K5uZ0p6pN%n7$N>~Rr0Pt;ptegbYo6c{g72{xb zyTGXbOl}fWDH;XT7a2T?>Qr%dd!mWKe zW3YQ-%gp(4MRrlMR8Y0B(=<0OvscBC?N=+j>;PKfdWs$sg<3McuTKMnwynZa_GB!x z6*<=CmDXK|Slm!y+JvEWnz|L9rtijvY>0En90$4F9ahv>ruEo7>{D+~<8ZRhFf5ox z>sgm91Fx6!S44UnF0m6Hep;ozAnmZm;tCKg8tNBrlUZo%+@k)08LQpxn|j+i%xh@W zx@*Vx`=dh91nL}a5Zopi_!*GWyv+i9h;HB#(grt?%@RMcw_VH$XX_EMGqi)aw{yj2J;n|H+!rCJe-;M>mgmvT9cTug` z5wM_qBC;qYsaT6>C^_MnwGRF9vz+F9+sGWdpW6A0EdmbR+AmBRx5eY_$&v-Cogv#_cY>WNx$+_JlwOkyOBkH2 zyr3t1(sYuPs=)~OABv+p2Pbh2vkl2;QeNzR#KxV_K9R8|^0~CTqpEB+h~Z_$sX+wN z(6KJU@((?4W)PtXo@Iloi|)56I`q+KPZwPtxKLPI?KfoVF4&HMEp{;LorqeLedF!6 z4M1_p2tS#SXjE+s4>#e(x?q9>XFwi@C6jSOM=e)9qNdmPcA8BDhx~Yn7$l_rZmpaI zPmeNWj7Uxil5mS&jX+0lrx=dpE{W%lrye1rDbPeIYJYu+Jl8C)@BplKq5{Ci{kTiq zd0@&@*YzMkQUI~rcfUy`q$IX?Xp}_LC03oGozFqz=hpp1zYaj=$LmKlxp9yePm8I|gZR8FZiRg7yc&FbNDy}b4Ah8R&=l`?m87ISV8 z<3&$@y-`nZf9piK6*ZcXoG-fax;qvNa$oE?d9j-w`?B|HW5A~o;5I99p+n96UNXY_ zLq|=C1%-LmjLX+)G{D-4qRC^NAJH(B@kwqq^d<4H+x%ZFHUx5{rYLkZ1Q9&NJ;0OL zNBs^BFEn0lQzMBVAf+_t$X#UHbZLWyu`})8(&#+N=^&t*{e|BOAwqOf0FdJKC4wu5 zUq5SJ%uLOv9bp}KAQ}n>f%FVhW=_8y$IkTki0k04ow~$9WC}ae3>MWEMy{P~Dj|St zWTAAQm%8wtpHxl+j&~Ou8^`9p+?hj2!76Vb@pZJS@GAFbNxR*iR!hg%52CAVB`Wo+ zlh+X^C*=SW0yzoGrRCjg^uW(u1!u9-N*C|Jq#yHphLRc;?&%!D;`J17(#!E*RNNes z9%&HvT-njy;!zPuqXoZeLlSu(u<*LuHM-|OS3Vy?Q8^KK8z&E}^ElS&^`SJYphDn|IW|9DDb2M@Zz|=a6_$wv52NX~N zPCZPMLE6)#$4tvhYq2ZPq$qI~yx1pe_sND^O7T#71Y@+PPr{l#CAe&*-xmB@_AT_9 zXBiuINqK|;a7;6z*UK_Ishp{lsw;aYgZZEL_3CP*7Y(U<^{7inq7!R(ohSpd&W`)r z>XlA}b|Mpo2&q-9LrC<98uX237(wI6$^jLy=;;@ZiTxIqksdh(K<1eiJV}Kk_u*D_ zR#3883Muy&$vJI8c%l&j$;a{c14?`=y1Id41-e^S2~_NCn87WT-JcMGGh`$k^FErl zSN+CqP@J4M#YIENj=p2Xd3^PXBEs$pH~J}Ee8gy~)PF_mW`t_DO&NdT z&Zr6h74umWGu0<`D$c%}qS+)TzQeK*$60(CABUd~^kE@~v~E_iOR}Y}{NIk&U^hU~ z>Dn=nVm`tYKfnAH>x%t7MG2vIVNN zu4Xy}{*0fz8xO`(#>&sqs#1xWx$yE*x@k28winrU+fhithrDjKZas3HUeq~kXc-d< z-6^CC!SamEO&-le{$fXEAc9U8pN0t(IX|lkd_1VvkKy0v{V?H?U~tQ<=D}k6!6Oau zvXm$Z)$z|x?wJhE9V69n-;gFj##F>a1|8US+p$phhxaxgDT$Zksk6GcX^&9pM5F3w zbK=T!dvtuR0$8{@t2319Cid9t#aABQPeda1X@_BE!-&4mz3wK`*1;-^!H`DUC-p}8 z#^^OzOMQDiwn!5UP{|qRN9uK#9iF-q-fbo}k0~U1@exHn?9UZZMR&B#l?GX%H{;_SN2guW8`@mk3*H!6VM{0=OH@m2Wzr)^ zQ1BkKa^$OQH3&jK5Q6Hy733@$#C%ARWwXW5`Pz#X!sL1J)NrNO&Th99!)rL(e(?^~ zT0L0rvx-^3Brm3XDBG{8OYWWnUf_t%*o_X0=)`W5y-U;)Um2DMw`@{EZ7T4&*zR8k2G4xwL2qRwv+<4{f zA@9+0IOT1U8Ghwtg=iCSO5xV~8o08d^#keJ>ZLZt8Q*)=_}4 zT_dMFH#Y_~wr=;$!+RH1)hD=oFY0*__DxS3O=!yx^0QK(+feC~Q#)Ax<2xCADo@D> z)-o&%%yObk*kh*!2lq3jTdm2A;TBI&7f5r(Och*e1mm?60>#w9k80*TT;i#DXE^(> zXN+`Zd_I_yo{aQ+IxCtTQ)$)x0k!^>wH}|t0kl@V8c)lilqzIy?+jvO$(=y$JB^?s z%f^KpaOXbb(y(+oJw}ezeix@c;X}*e5W=0+w~-!!@^DoB+e0`;n=$y)bWu#q$Z`Ky*fHtr6?*DRK`r!ZJPr_(G&v$V6rGG-EiKtRfoo<$XrejLSd1#9~vhl^Wxsru2jLhEADcRi3U!TK^Sy)*&8*7pf+dU!aL9{OiK2 ztjRA8HZupY{@X9QJ1h%NJv9}tdg6gnUH};O8uO(Y?@O&ZPFky0W=HF2y0RO{H}DGa z1U@Ft7z#O`v^;#c53&SvMexHK>>=(O{GNq!f@TBm4PBCmk3-UBk#5Ow`#Qk##vzWLdFywm70szx_>xQu0gH=1X^bTuBXU@E>c= z0VO_a=I*dCFt!H~`#USH$z%!#*4qkEO9dq=nU?k4xl4)Gos+29NYvT^hg^B>*pZ>e zV5gY2a*Y>2C+qrde?S>=onUMPs#L*y-hGn~Xx|v?M_$EFyeVP&Ljx+(y0bT6Jau`# z;5AT0@{fxjk`ov(#tv@_xkb5+z{O;XRc$}W^B5@G-y$OE>aTK#1tDOVZPwO%2?WFm z>s@?zNo_c{*7k-S%`M5L;x3!lR#N7S>3L5#}Job1EhL%mgvaZdL)&MRcrQXr_< zf?;m|zCCo|@dvKa1X|D9p}}gw9X3jRoy84FH?Q$uWGXHQ9E;`_mPTfB5Ro?sb1fiJ zr4Fg2gXfV^h$4A9(Hx-ewr8=}`Z9Yi)x=ro^@`;;`wH(@Cv{jstg5s$+Tl`lYsuWc z1?mDZ8zLHURD-W`lD;+4c#Mn#ml;x$74OGke=Ait$jO*idi36Cwgo10QVj5@PJXmp)QZfrIslKSI9}oY^)5J=wXwX|uIckxu$zu{7k0K2Vpt46V{M&vazV2C zhOqgi6m|LTPt%^a2Mtnza&~!d)O4ol9?2aA*V{uKn9r}%1!mJCU%Kbo z59BR5wKsf3mB0^KlZxPfm5)LPb{1UOa{<@k#R;+>wQ^Z}N@b}XkuTv9N7rJtkOIAK z$fBFHW1<4gL8GtqdsNtZ2qhF1NDv}`G0*$Czm{R;4eh;SE|WQZ&}ka?n)3`o9%j=g zjKV)fxe3Yxyv+_#c%yU1eYYGcTnu$bNv@PECjC?^W4J`NPSFD!i@3zrDTZW21PV12 z%|Y&pQT_Ep$L&;)d$s3iZ1YC|pk5+gz2 zTyuRY1$%e#jhfGgrjdu^p8q?X{=2Z$GO@r;ScTLo#wN=7 zmVyf;3pF;82b1X41B0W_GDfn{>;;Dwn}94pcAyg>*Z)V^TSvv!Wb4BTf#B}$?(Xgo z+}+)R(?|#&9D)XScXtxpU4n<;?ljJK^3Ke6XU)35xikN;da=6B>8ib}YRj{qZOQK8 z33q{1F%35XFEJS{c|su<-o?DEgi+7%Td#nLGFqaddvw{+rsnYiTpW4Z=nNKD`G-#V>ah$vO ztkGe`xhDSswRp!3-Yi@MS{$keXl6b`#<0@eoyFu3S=^+nu0|^t@G{D%Rq_HW$r>lA zv>mM5zXFgTz}3!~X0+LtWhnI>b>el>yk}cDA4tx22WVj?dnWsVgdv-Jv)$-JKnj(A zuw$t5N7M_by7vN_rGcMgUkbA(?Bq%e73TA3DO!Za{B6kzeVM(m{i^3U>wU>#;#Xw7 zB&??KojO!_3sr2dKi@QXk1_;bcdX4I|44CSkQ)CUCr}GALwf&F^NS=MlBz)t4>h+# z)>qbBghrw*pQp+wc(pHY(fwj-X`;&Fw5+H*nmcYey&XRg9^w0)o@#6>_^Sse$f_+rAj?OUA)NdY6i6A)cc1{P7H7T}~oY!Qe(jSE5i| zoL^nH%XSBLRDzjk9R#QX=ViMq#tP+9%yw0sO<3|D7k+p_+3xBqx0;0Wd)QlyjkC4P z>9KjxXXgwq*HZQ`IhZz7s#xLbX%exJ%*KH>NibVBrfh@cWTm|Ez-zSc^mOYLgxmTSZgJ5Z3G+|!FS zpyp#^GA!BEB|__g+p%A5*Vo23l}v03|?ET)I^1^L`7n2+uoiR=W5 z2Hi$+G$UEH@E~FT!37XzIOqX=u;t}rD=hI=>?a6O5c$PZ#++q9;bysU5ugdfkAy2R zyV9TbG0|_0m2gbg6H85iE7AO|GygH9mS9VZb58l4R^=6d5uICWYv!uctw5VeSprAq z;?FUBB3pRxgRm7L@G#-8pp{ouyc2H){h8-CKbBY{yLmpcGo2fK_Ptom7-^g`;~}_O zOO4(nNh|{n!S}==tf)Wj_};8H<5g)x6Q(~@>$r+8mry;W7uZD=9+J6G4V>IhX!0K# zVC&#Yly7TToWtwz-2JHnv1WoG{?@>sV8ydx0}p^bnAQ}&jOYfZwlAL0S(6VBR5j90 zoZJKDSvEU+2<&6ZOR*=`bfObG7Y?;{&#C{v&!PwTEX_ix!dmW-6)0hnpmx~Yqczi# znpvjy0-|=-pcu=>_!78i<I@Bw&wtc_L)SGPV2D-(z<0ct(0==$eKYUz^ zrYa10E^{UoF)_*m&O8jYG@mTci~;`3x47!uuiw2j08^#8JHt9JPm;eX!|xP)-cT(@ zEYDHEQ0H!uG|aaS6ACTM5u~Oh#9Nj+$YS;Bf?^QmfLC^{aGJJmIUc|%E#X>aCS~{@ zmcr7GsXM26D%4NtRN(tLjoX^#-!!>aI5nJ9wq*@5lt;eZgGaNWB!ISgH?7A~=dBt% zPMz97UukPp3C^wLmTjOA+gYS98Ek(oukMm_8uv7vg>oPd6_Oi!r^8#&_@R+8)Y*e* zg3bxEx$Mmu$0xG#liL7H{>KkDw~XSgWgvbW3JRas8>2P9!S(z zHOk5GZ+N0H$IahXY*iZ0?%)bogGV`^BesTnt6uLdT-~lB>>;;C6|W`o8kkl<{-K?B&@h{GSsY&pqe%E}`r>jlU1wV+_?_}nxLGu+8-d{2^t8%Z1wT{=FNk1!8%We0xjSho2TI-PeV2twt%;^BJ|C4$BIJeS!1cKBt zveFF>x}0k-x|EXtJ+PA=?1?P6&HkRKAln+imYh4 ztsHV1pjt3)BvJBn#SHt3bZ*r3li77AJmfrw>h;HR*}c?E&g>JU+?-zD0r~nk06tXyNTsCYqqjl?^^>PHhDyo!BgXEb&FRdT*VZN z2}Ge!vnOOYx;N_Y@e%X0=d+-&^;wS8%TVTOnzQAzw$RII-m}xw-9y5^MR?bMTty4V zl%8;pr()axr#vhPWz5TF#jdLRw2deQMb{|gGY)6N?|Y4@`FvIb=ptt0s6=;%=`EbQ z5>?J*SI!FRQMJ!5C*N7@iRJnMxFNq_)bxn0X~7>R1Bx_af-zZAj_uFydgRMr3{_QY zXmVwYzeKDOWGMa~FUkuuy*pnXH=MdbX| za~|Or+?gqLoBpHS%g?4cbfVe8BBRQ(#idC?O0^eL0Xw)xMy>h$DaFtwyt3)kujfI} zsk|l+)UNt=-KBIbg7!%eOKBb3wCrj~&QYZD&r!CGQ!tn5uN>Uo7} z9F)y9=dH>MXJn6O1Aq+F;MD&%woOazpxmi`1h&VNDL^P6Kn{Ioi}F@+YPajG8WYkO z?R3}SUkMSETYEI&92;H*Gmi zTuBgnN5$en%|>L0j-f$c-DbO!WaPJ#l;nrN;T|nDVq}*Qh#FgHISw_Sr}1HFN5*VL z)jYdaV+Yw|Y`m>$=x3(8!_wem$~@39(;=TAA-V7NrkRThOw3puP-6oU`6TJpOI%T z7t8um%W&yBaadKJVY(7&!DBIeUDSe)2ONxq^^N-+{U7Qq!YuvgkiW#v0DG}` z$k9Yqy!}knyz(qT>iY)}M$0)@(v#(6zTr(~GF6kmy<~w;K8{bwuXVvsJ!j?jM@YFan%cE8#S`E10}tw z%F;A-QOYMqa-m!FsNO$Wd7&K zo}XQ?lzwNspj{oflMcDrWo7DS|{AXsGa-d zW4XOJ4Q$t@oy;t*C)Wi|$ylw&EMiVyH1{e~jGS7Z3LayT=3XVah)NITZ+>OTx06DT z$F2R}t}631?*|lrLz((>)=8_{K3zzDg>A5RlLuIJzz)bNSTpl-~aAqkHt-kQPA`%*9Ub`du$@};gwXF`JT z57k9ugqz;3z6hjVp#Uy~MN!#otL&iWq48Lu|6aUlqE->+-Y*p$y{_{f_~F_?0<~63 zf$xoI@mlv7f)`#a(ksqNqa1eWjg~&Ls{3>SopcU2+ARiCI91kjYc(J0{hP%NH7X}9 zF5t%pdX7AS2qOhZWx$xyygXL$Lc--PUfRQz4BvY{7c&gioRs^^6{HkN`&p}%%UJCo z&}L%U7Z*L#=hY9akV)0BZo49EU#}oZ9gsh!_IuTP9SWPGT_$nGB*iFep*ki|#~m^G ziveA_$gT__wjAVjrpG!juhyaWL+c)$%6Yt%)3U{v1dVKB;8*;u*60E4PgO$7y2p9N z0#Bft_8TkORnK+zMe}rrW(BBS8oQ$XI$fl)ibt!gnN`gQaFqtzifi=g3KV-OB9^Y# z<`^C9+owA~FuqL3p#UzZ`1rFXN{tTHejoY_kiOe^1JfJQT|sdSPNYh zZ|#q25LPpueK2oHvYy7N@J+vlU~LYX^^u)D2*1?+R>7j5r9(LBe5Ek_0v5b8b~$^8 zOyNa30P_y(=GTrd&cQB!_^C0iWd=C1DsENx6uOjM3g-E8AowTe4FUaCCuV22pC!IV z&i+kfBE4%x^GVf?UUXx#j8VA3Vw70{RNmQD*g2)0q0|qv@I;+VncKA|>xrrQq$u$M z2zoItOcWI>`?Q?M@?dLb)$U{+X|YEuzblw_&T+&Y)}}sjxzdC?SO>NBrZHBcAe8X~ zg$*d_LYA*<3RHD#q+x_b{cW_*bkOjp7|+R-B5ttZZAty|q;Vapc>^>ii3ymfilaXr z@;BH^lkEA@NxuTSEMBI2)%K{8zPVV53yrWyRfUk>;^O=}P^{lPwP z_ti;zcBFut`;~>JDy4a@R-?S#aKLcj7tUV+M?3?ntZVR^k5i#eMR7G(9lWCPFVjq# z7yw?OuY+k;KZ18FeEwZ-P15sNJE_qU9<(WN+>F)FU?Y=MAs`;xXll1t!?vv=TR#FhE$Cx}!O0tkEIWZz*z@L_;^;$q&%sq<9b#4<^n#fZuTvNxl<;ebR14~201 zPrb|oSoR(1*3GBdlQz5ROp+^H+s#!JUF#qO>BFrDP;;_i&gCPL#)n-h7o*SWgeNE?Vo*>WUZ<-EN{}E_UlL+ zQo{SH@ga0ihEe3_K-{0IV1dM!F4c-nMj`DXC(<+KVO*tbm?QOO*LU44_GQb^z9mDzc%bM_!c6`^@KW4~5!nNNE zPIWbbP{$zSetxk$B!|2vs!43KkALErj$8pE9O4kluT|PD?o$5Ary>Q2>hGjF zzDdap{i^#SU0>Zrj+q)*xLHB-E&rWdf=+opJCeVL7n$hVmF6!A>|_py<2C|{Ta*Wo@NcvC71FrMGCM@3h0?=Bf$MD< zo0Br6DR$q}S1Ead?MmaD@8?cw@<++LaJH_U4z8wp5ex5?a;F7w&`s^8>UIOeh(EPlj;I;#)s4~}SlmF`Rs7YKVxhyrUh;iz-_Oys zS-dLJE0YS#u~T(2_eQCQ{OBS1P$FlE=3U(^DDmu-;AWL|b$BwXcBzc8k7BC0UpaH; zUFMr47wl=|q`U7e3+c@HNpCayP{Y5B$l-j*%-Y<~vq)=XR5R3&$3NP{wk_Ql{llL5 z41?mg{qlGgI}xT!Z&x>O!Qi8aAkO)_ILqv@AKQq6I$U(KbuYsV=;hfhzr7{i-`c~6 zy5B$MYA1IsiZ44plG(U=MAlB+v1B=HwC<;c?7qVGg1QDJF7vliufHy`JJ^8Vn~%uUY44I^*HX~H2oT;LXJVX=_2x7|J zI-U>=sFpdnR`lPWGh%wE$s+3}@44XGwj!=?RX$Ae1_Ii`svm!t}c_l-*p=M+Ss> zCvu~6ArB0biD9&xLe#)`Y2I?kA|<)(vHY+;ulJEY@|xD>u|Zq8ruJ^f7lT?z$njrz zp}(Y{uMb`GcZ`e0*7Zh%J$HKO_?hLb*CnxZC>SOEaIiaH*L{Yqen-bT4<({e;l4GY z|8fRCuAh*L=DhyobK2=?yEf?>OZ~hO8YTrQc@`f%YD&U66a*00WSFUexEo|!Xz>X9 z5?lR#ahbjRi@m2uDt~N+{?@vwdVSteL^}ad1EIWisvD6RIw^xBr@5wPL*&|F> z=@@T5jV~wPuSYapSb)~wzJu0`$OeGvrKVUN$Y0MS)N<6}IBL{(wTbZ-SgGu5HbYC# zKcgIZEnpPp`4w9GMJM`5V8CTZl4byHdxT6YzL+fFqv^nfa1LNB#KD;$stEJNpT_OW z@(s*dMyaEaby9ASY~XJxGj5SV(5XV$WIl{p_=K|+p*XxFe|vgg8wITvVUiL z%^t0qy!FQe)2nZ?h`m_+Qj0ka%h11$93hxzY4W+CuU2XxBX8rfo(`5I*v?zIy`Z>_={66nkkUer3kb=_3I{v;?j$?D{%>7hOql)RYf%ij zqdtWtKpxQ-vfaO_teTjl0k3B^f|l11v`or=&FM&$FnPfKvBPrce{FxjJ$e1Z{KR*pmgHb4NfOK+CA?xk-EItT-X`VCogAS;|g)HttY zHnMo5cT{C$)lw5GbtbgAV6ICC+%21;|MSgaDQa<+ntnA(R)EkH6P>)FM#D2-?0hdX zMrYVx*xd(Ik@wvIKUJX_wTl;-qKzA-9{s>d`j>ckm-ptuLvpF3hS=1ycE?uLr564z z66@QsH*_%&ILUP}b91(jC-q7_Mno!~egTDl1L*|}831+5oh2FC?>7hkR9Y+u#DVun zg=bPwM5Y^ay+p#+^BFpX+v$K8ygk-U!iaKTP`>Ap4C1fS!I^}zG6bkNVxf{bbIOiO1mi~{g10i)C}<6_ zk~;eEEP2WaVZu?3gWZiQAO-*^NFYX(pcq{B>!r~vT;$x-&rx|>U9m_fO^HswEp+8JY7pr1pB1Ys)Fd9aJ*OKyZDN3$8@ zoz+W<7WZBC@-Z=tOqNj@t5-BlO2FiYiZoMZ`ed2tQdj__+Q~%kV1wpRjF0>`Z|J2H zkr_i{v4kmXH|hGK@71^#tFFlt|7*up&tdvy`xscWARst#A*4B3nkN#%w&NFVP>51fVzaVj{7D{9;!<8Ac=pYRrY?;V@lLKwxF;SwO8m2(>fj%yKQN;p3lO zmk#W@m61@{L^L^g)kFl*H|<^)Gh`NT@ym|^z4z}@2+2>9V0K%Eqq5o4h~ktQH|yEPc#nJ z3s2LB7yGBijy`$|G&rKc!P6LQQ1Y96i&?<-($?F>@G$~Fg1n3RnE99sS~%8o@;t=| z@wHsrCpZbX=}E8RU7uAHi_y&JCy;J=Wfr3lL}9BRm9c)be8~v0vhd66yK5aND?gk} zYm!{F=P{nLe2i&DWg|{(1N4WMBgT=WM^OPeUZsTrq#<=9h{WB)d|fhz3>cRpvna-= z=Dwe`;BT;Qiz+sZF!!+X3Ub(Kdae5xVoiwIOv!pf>e%&|oQMaN3G^M&^A$5V*Q_a# z8HM-$Zk#Tn-q}FS19-IFNo3dNj%}Ub*4dgSETmxAV3{@bjwR-m0MLU{$^=!g)&%4_1z^ zv}i#JitooVY|+Ef6m(iKL`2qGXfil5jg$ts-yOe=(aMe9Q<6>mjpW;EPQbL(@ALLu zFetlYw0F=D`*iC^R1(V zlSkvx?zN|t&rNX?tPvsj`gAqVBnzV!^JXw~q7nOo35BSN!jFYnBfgVrwb+T|3*P-< z`P$5T8{P*Dhaq%`K*Oqpd*OoIO^+UvbO284*!frUQE>U#!SYzwv?-BscszS z=mwWXyC;X{x~udS>Ds%UaYoA?bX&e@JnD}=7JKUh^9X6kJI3m@MivYkzPI&_wQj}Z zs51YVt7G7$2|U_+$d5bAu#?odXaDSLrx&3@{GP#Mb-|l;idQ1f7`UU#kr>PMRC{&w zzXyVHQR^Ib*K%rB-1jos2NlU+SR8+zf4_YZ@XUV-u4HaqPOJz2{nc_>8;KYel!!zQ{Lvtfp(Vero{RD=3l&*JZAn zyL<_g*&JsrQcL4kxeI0A9&x=ycBRm3$H%U;1X^v<0Dtp|5x)It?O#wGa3KQ1u6@x3 zpkl`t)iA;;1I-n8{Y{mpK^`BzG=`Z~Zc8^J0#P1aUZN1Y;VB%W?4%Q*SdU)#-mq?= z41>;$DTinq-m9ekHueHp;#EgdHB6WMFfO~8NWPZ!Pzo3Ny;GZwB?a<;tvNo|TKNu? z!!ziyiFu_~w7PcCUT0#ZmgyED(Ina{FMK_Mbc97#x*`~TsIG_leBI29Xy-ecVM=Sq zptn4IS=|nAM8#4GkdKpezB6Vt$=+BGcoE^PIdx6+F%xiyrMjCrrTw)es$b)6_0j^ zkNXM~TXk&X+voubJk9vaOvdK=^*9qcbzWj6RJxW#22)uXveZKq1D{4x^tmslGv{Xw z*s$jL76R^JNs=KuqBAgugiO2UGaLjK@F(yK;nM~+o3kR9mFMK?l0{1A#?i6@+qVx? zo>aUEV+r7-J>hWMWDWn$7KV{B1wT`wPfwj9rdocmvzxmXmu^Kq^`Eja7OHCYlD&Eh+3ydbDMlF& zT$~@#?^}}L%eU%mgO(1}*Po~DZ-%73b=G1pRhsojh8CRy1XSn>4x2q3jymPj?pRZR zc#p=b(q!&KrN)Mm-*^2-Rhu2nJ`RI+@2~=&;M0_c=i!^yMxH^9RV%J%qW#X;_1B@< z=&ZBrQ!5?eEi8|yLXc5X^gc9ZtxgH;^R2XogtG7;Yt+;-A=gY<}NXrXd62aKl93Kxo8@GzFBDN$1(ewy+R4ftA3DMWTm{O>Ep`L>lS#j9SkQdjYGqH}m@VlSuUR~0oloegoOa1aH`%W(Ku40Q??0#{>G3++feZ!icY2Ws$iGBVatw*bWsL&D zhnJ*}#Af#R!+G(zo9;ySxT)g*525|bl>U0HND}qu!6dp3$T!OIupb_;)fR159M+tl zA6gxnS0*Y~DuANnXihRG`m4Q3&+PHk&2|5?&Y{tf>w{VP%Uosdzdc(d9EL%+VF>Uk zjN+SFL!cfmN1509u?_d{*62|cxdOktNdD>!M>oi9bI&(>7?Ie}UP3|HL(;QH#jLVI zM);dlmA}DE8I^pnifsF*wNbZ#pr5N++&?Qr_-q#T?<6FWV-3Nm*Bs6^p2}6ZKW34d zZ*jCBe{^Akdm57WPpUf-`6f4TB$DIt-^%-6WHLnZ`6J~!X!t+6|GzEc|MuS<$v2yF zpmpxY4Khg(ut|#e|Z2Z%IGl&fk>+ts<5dX2lp?nqr zp^CJq8Wt=e|2Ko)17gMH(f+rx|CcqTVFp(F@0;jSEJveUvr4Dl5=0v*cBy2Xq@n42 zb2#rxUo|u-|8k&8_J9AFWMPOX1}6W#L~=T+fK77gHYK5FjtBlarwvm_$tDZgUs;c* zu9Mx@UuRqKy^bnJ>*$X8Nv3L4$ie$HCXT_U1Tpu5l zX_W6`F&h#Tv#Py7Qs{y}eK1<}mg5loM$RFaa0MFu!H4tZ?U>1RB|7<18WP`G|9%bp z8^M#%1M9CdT>}AuQX&KS>E+%E3;>T>lQbXpnr;AC5{Yi<8|wYtx9`{_=(}GY%F{Vr ziylWICDR7~%|riZ3t&hA`fB2)4GmmkI1p%+`mXaDrv++?;gIx;20jiUrFt4wGUW@E zTfNzT?wRS%cD*`t*^B{%|66J#ZvuM$oIO!UN5DroN#mhsF#od&X3%Fb2iooAX5bcr zn2rehf$<+4Of*;ln2|}5AO3b&20#Y6rkVYvme{LdyE_J&9+LfnRTVixpx zVo<5um`6@S+5?MA1umv2aaoS<+Wb?Wf zr3=`o9y)p)&5t@PRAt&NH^_x+u>hvm~9O5h`*hPr{Sm$q~vKko*ir{*L77j)*B z#w+k_Ak>A+@>tV~!`|USC4Qx9tC8Q0hHevtjl-hRMx*@_o80T&R_q#fw!a}stkO^8 zAVmF?DucD60pE$^C6#vH-osmf#_NGWr`Q&33vIi>sN3*y7TBY`>;Fu9Z*Ky9Z3|`mOV$JM9 zV!ycqU(7){7lDwZ_QuBAhnu7J`3lF}aBv6cDz6~>(*vz7^QQNBIa3>w+qhH5FqLCh zW-RxH?`(9FkF?4O6@xs3$A;_V{?lB!)}%s{-Oap8zUrCArpKnQ(9g0C!ZtT&+u1hc zv2^}Lg$$nQ&Zil9$6SLwoc}<*m3aa%Z5QpXJvVi<+Xc|?`rd!XJQepio|bNZM&=XI z7~pp&J*BS?)b>fPWvR1|A9OP*8)WMxBe7c3ojO?UtiQpYb)ZL5JK_uQjFh_!eB=26 zuO*bpH-7^sfot;!TIu6zObM0$cD)(qxX}ro81m5X{&d>11z@))OQ~FV+MG6I;^Jye zkl@jJSHa)kIc5UfUTcQ{zZw8vzm99a{wfbwarUz|-{pF`ypRebsw!TT}x+F#;i zw9fL&q+vvd6W!-MG9>Z(#TKxJNX-IE5@_7W1URtGGff)HHG@R1d)c0b0rRoi-ahEc z!&FhkXGr|paS$Ni9zpQhUenPWt#0DSVzAKToyLVaL~Qzdgpd3;`>00w7jo%3b>`vt ztiCP$^HuN0t?IuLDK%XJSPXY(w>$68zb?YG=cYmL3 zS0}Oel;~+?^SYd+)jv5L|8t_M!~JDw9`g3Cdosw|*_^RhuowBYF_i!4`X@W{QsT3+ zCEL>h_z>AZn#^_gmhXZrmyJib7+PJL1Pue`|B8!E%${w)8~Rvw=tE(fw$3TH7> zsiRJ7$@0_sEhL9umCbFl6Fm7--XoDiFjpA4HnR52=pzWkyllXWEo`Tra)OXOJlFj^ z%%o0F1S`4kFvO((ANLlJ=9M8>mF>#JRwUz7ABrZx-Qr5H(s%l1&iH-oVwQx z%=2(z(d)hmWOkSk0ZCf)Y$YONsjgey+&s*kwZV?;D9{#S^NgIuUX7e-j#h=j^(tsj zeLCWh^dfAb{G`AD8uP$QeKAEk>f>Oxm+8XRG;8Sua~jSd{O3fh1&AoB?X7b6eJo1ifxGmsbI7c@=6iN6_d8I17?%hxr}uk< z7S=|S0?!TnXBn=mB3Dco0if)2Lp1e)1FNiq$f)AI)Moe)I#>lmLkgaF=m>L~RKxbO zER8ql$(wu7=^uO34?lhnfPUzQnEk_Hp!f(JqIpxoLb=%>o%ovX=`k9l&RIBIW3<~J zaCQOaR4sS$P$Wh-<&BhtjKd7;n)MZKBY(n7bWr?stI8dP)5=**5dV~bkU2YHb#;Ae z#GW7*4>K}ZS$8v)LK+WjX2%*aj0YZv^{<7A-}jRDU?hPVnheED-+MOKOd`j?l2iUu z9H-l(>TW5qJa#4FHAK!_HJmnSleh=HM+Aky%qg>Io-&0j2M1*bdixDHHP;HzH4T0H zihw(Zj?YPpSEe{90yNh1&JQ5C$K6wR@3NT6;i=B19o=f}g%o@B*+vC#qbuF5NzMYD z1BK)k+uBb%*bS-QhV_tx)LJMFj?lahR(*+JvNa7s7T^tIz3%8f-b?j-*F6P&j0yQi z?}bxxzPML73!#*V=ZN0?Y)z@%;WIEJN|4ABWGk8ykWT-LI0PXb1!V+1+%NS&Y(@AF}uL;1%jDRvBzv~m@=*XHJ{jWz&7hFQ0{KUq&o z?K$C%NjHh_hc5$frsfpq)dRC+MPP8J}6pcFWZxu)%l!If77P~qeHDXA>Hq!1rBzjkWDwxASQ zqZuP)(g&gx+xfDj_`T3{$B31hOPQhil<*m`gwIE<*hX%QDTeK6qx*cfd4;~Uox?q{ zA*QupDn++H6hYcaB~@nQ^|2F}e~<3`TUW-My2kWYqoULn663P49aiY;yOG~HKQ;!U z@dsrncw{0p8&uydUtVr1Y&vxqi`g7;+3jkzW@9nXkZezl435k9NMh@wgiBK3-0$bd z{&t9A6Ai{zq`8O~Wuhn1Re7+6zefwrQPEC$2VoL)iD4-oiV&{VY_r1EI~rs(4Vaiw zETYY+o=8rYFPpvdGWxS;Kh4x)Z;Y>Y*{miAx^+TfNE8{&rPjIoWc;-Zoh9E+M{p&1 znOT1nNVo5BCZ*&n7QN$`9uA}0MbUCH{GMw88ji=tbz16yqMb_=_D8#t`H;w*0Mow$ zy-PV1sp*?mM4g6B(5Q0Z{XG@vqTw`c3%WSplZRE(OD!uX?Rq?GuWny~R9SvmHw`wB zOIFF)*{XmY*s}YK z+bMS=^B|_K*oR-ywPx@LagRxtiN6)z)utGEml`p!ocLe# zJGbD5W8890Vj}C}5G*Cqs;zqLI*4kvD`r|}itSO1`)JD+g#~_b>VD?By^9|!wg55l zp8pCmi-o>KVC@r&#uy}ORHnvWa+Vp(F^gMFDe#QT4`dO&oFPLE=MJI1EIJp-{ncA+ zSN7Ip{-B~T=m-&yE2>3iPK`i~hWZ)PA^}2)pfTC|V5lj&BhtKn(a>++UFXoE$ou9H zcj^7N<`f{B#w2&O8Q9fihV0;k{>jT{@1&U>^1V=J!R%7K)u0#|2;XL{Na1a5oEI|7 zB%y2qH+nb~Mv+v}Z50cT>_z0nPNSAl=Bj|xD4HYOIaK(woe=P70-Ky!j1&1Zw8^K-2M+ zofCYg4p#!?%xue_?nas0H~e4MtQhNU(D|KAVAB3L++tAH{^z?s)@di&!--YssNqvlv4XyUc=O0ipeTm9YKI#v1}atH+Ai#la*FX+1} z6r+edBrHY_Cs4PqKqGrxp2l;)qIJ-^DX388hG19Cm8j&G9OE8QR-E@>5;G0?XvTfd zRa|Rxepz8~vNjP+=XA6S5~;v$Hms-=W)OX$9SEwQ{(gC0OD5!(%ER2i>JeDlT?7mjuOvtbILyTIoyZei z(IJj`%7Y|oo$9<`#^EQ1|2rU)Q--QtTSmw-sK*9TzC-f>1OAE;rEi?e>DaazI`_jr z#MWk`3ut%bwb+y6g^bhH@v+9I2sA`9VmY3HYZZ?|ik(mlDJ;Sy`Wf52B&v(b<(z7g z8Hk^QBd0kktOg=X87ds#J@s?W+vSApxBlqauy0zjtliIRS=r;?sC)-KAo=i7RLCS- zq=EvO>YfWky#%CuGdms%K+-Dw$2LdV+|JQ2ng;IkQFJud6F{#0_d2^0z2Ot8<=yab(rO;$XBZP+}aGu$Ql29h>%pQzcQk?~I*y z99s|eW5@ktkdb2g#5ZiisModi2R2krfv+;8ojOc z>s_v`g}ew68xX+3K9LA|0I&La=JKnz;0c#J79na=pWFgw*2Bur9!CO4)6a}eKYNA~ z0)lbY)d$qDep`UoJY2IG5#bB`9tH3n8{&N~!>N65=Sl?}y{^_(pIn@an7i-tt^y<1 zAJ5K+2>3Xbfo8v_7rOwhj=#W}RgX8FxxC4yXM@7f(R@6)AFHII6UP}TnweL^AO@_r zWt#CGKR!z~x0B(vsN`mv7&Et!e^+t6)uadDn^cbO4 ziEn?|6IS_xC3qZ66x5&^Sb~}_)ykB~Z}ml89sF+0EJ}hgi~#68Rw^A!^Q1hU#>rmR z3bbdC+WhjT*K^eMZzgke$1g_Kmx_rPk|9SLF_64%Lhi@i&$oIle0GlgECT)~YOn)uPn2jC59BAppX%ME|9fxhj@a^u#$z9n#Ytgp6SI3CTF zqEA|-%SN2l?T{1xKcZ}-5h4yglZ{_NZcJ4Ng66V9EJywag^sUTgsqOoP|>I z6$rVj56@;jicU8E&}Ej=t}za1KTWf9VWzNve^$|HL9O`uUA z(#8<+musTXmdZ}iF^-sZPasWrNpXga9CNeQSNLFdATqN+}_#p zj}=e_a=lq-Yuk}pF48MHH244o>|^E1tl0ZPSV9V2TI9`$!^`s^pg=ZF3iY0|Q$H4i;*kK>;Z8q@dfk5%n?!j<|f!93I% zUZxbm7d^DFp&ZrFr2sjJVJc^xU-1$))h>(zX+GfGF?^t#_O*#Emf$yNn36oEtY+htr59XeIwnUH;sshQS6Kyf=zgbCPN+hXN#dx+xi zN?miOh?hPP!59(>^0{90pb8# z=Ac`i(zrM_1AAk=jBDMA6^zj7_U-mn=VAjccN*VUZZ!hAVby%C8MAkbRbRqZ?%jkDiEAhxXpUDiJ%2zs zuYhRIpAupLvzYdz!Sa`%)Shtq@QNH)`t3pSDfQL$w!*rT+r;dL&B3a>?pN3LHn|Bq{uK^`KXq0d{z zKNjlj;7Vih^ft@)*XOsVAAPj&Ls3-k;g%}FyASfAA>c>hI>4ZM{VKmIc{_34w-pN> zPO>c9x*XKbK-s~3{>~wV(nJozew`Wcdy=j$>+U$RtDt>4Z_FdsvMAfQTqvT$^SRS( zU>EaCh6Z9?$zk={NP{HEi0lm8(anFwoo_c@2TZhlA42gR_p{hJ8w4`6=_nW_bR;;dWR?|y>}7<(o5(iltB0|_CCLT_8DWB=e&K!d6(e` z%dENPn)jUdeO;g1)CLf5P7ky(-y+7zC{5^uR5N-;H6kWqUB-TMPDv9d81MaW!W!f$ ztrGjh5=J7o1(V5|5Ww+C9V0jv#2QNFEVUCi=u`*9!j^Z92$E_-PCR5NrGQUIU5OAl z$2twE7@PT6Z2~wG%hDw%eT~&}6dT{GGCwL*F4F$ve|6YH210@5g%d~RmI{FMLHNPa z%{AmN++9frj%N}jwQNKR4rSu?yEy?m%>5=ezuZ5A4SS(*waHGWxXMfq={^xAuq!{y(V*}+@P z+xyDo2ldk?h`gwdcGKswgNd_Wixx_V@VVp5_9x zVh>SwN!kb6=Z~z8)jM{ggI@e?D5k2~h#A>NC*m-=H!R7$#Y&%B8tbeE3bwda&5ySWxM~m-#M+s|0+rT~5 zI8qA#(H!BEhuNlCFO!X^sZs`cFAgv~OQ+lXxf0da zy0{J3cBXfuxDr%3){^)k36q3VvfLDUx}N%K7e}ADhb5ZMM}=P7O}p)P-1t4CT$j{v zFLQ9TpfKYU8s`_nCY0zi5*X{d>nfw$(>c{kXOw!?$EciJQle4x{tF5C=;8ghgzZ;A z*-OpuTEi%@#CL#h*=wWyETeO!*g;`1aR^tT?&r8aQFnB7!X}~x6roNRISR_<$yL>L zSK{N3Rn{S`Xayp9Q;N?^6$2e+>yz)E0EyvufowiaG?PXoWH_QNa`SmajlL3JKEXRe z(!P)SBJ$u*>Hyi+wa~6*ujM`rZbS4XC`<3?rxtozoBU{V8Qv%%*%y&84BZPQR09Vj zQ2w|Q)qpeXdLGNJ4vY!mcy@Xl@rV!nyZF+34|to_h1@p(@Xy=O7M-nnIiVAMpX(=V z88$Cea8#dVFLGYXs!;iEna{SM0?Gy272=a!Q~S?W@V$(;7opn}nNoranj!t_Y13dN zP$hy-F5E{3Dzxw8iL#?b`bxw*@OIAqYCYl!VVjZ?sbUr<``_b1vPmn+iN^5Y{fD47 z%yZ)RCBLpqJO`iC8!FHXz>C<*^jO^WnhVViP92`Q)Y5M6xw)zxnMb2Aq9WPB4rQPh-s)s_uN`-uDa%tC zdf!n}gBMniinkXfZN1zwpl8?awsnx*Pd?3AW#E6$J+@tOHXp1j2TwrdRX+vfaM0@# zumtB$iz+4rpULuaJp}55C;yg_37DdZ2gHMO?xnoTgy0z8~E#yY09hX(ZVT>HQ7x< zuOg;$dniylR(4hqN0(3hXchq3_!eo|ntcjo{u9j}?Aeo?IdJlU2-4g2RRAQLUE7kB zm}$984_=DV#!uDp=49#fmH*4-k0097O+I>mW&~ks8rgOrMZpW2jW-h2ObVjznM=l& zgWCZvf#d1DcAIw;VWZwLM`ea8{6Dorfv(k#l$;7Splt9iPWywBLDk7MUWK}vWH4o# zfK!%Ac;SU$R^(lUrtjFHcLWd#)J9K9MPCJ>`qZf-q^RcXRU$0W3H8*RMrvZ9tN6%7 zjzi5f6ChYm;p{n3XNTy@R*jZL!J?eRO9OW#h?)&-B{K6<_Ok1zbMS+F<@55k`nZDs zW!ACS{@zKfmN4IN!&zl_vS!Ha)1C=T?1J8VVl+Nm&?OK8ySzsBD%4Dd5e4dmqhzcC`!cYW8+74Tb@vg@`d5AHtS_=4vdSX ziOd-~U#wL^!E(u46cDjrc;ZqgXz|~$;io9m`i1+9N_PRsNsv&;;1lUZDTR;P+7!(c?V0!Kx6M8T$7 zar_0vcY0<$4Qkl8eNS-sKW{>fr*s9i0&1dW2U`SCTP&R#xzEHutr>! z{h#mel)rF0S;IrOVLc`aDpAe!1JJQbq<%RfWCe>wW~NLmZ5m`usDT!OMIoY*=RBGo zVcJiss_AfA+Y*ByVwxRS{j9v*M?PS}ND82fTS+J5+L+ALi`H?SkK=S{Nb{ws+~}-$ zR^5=d^mH*75t=yopIMTlKKrG#Ut41RkxtbN5p|y*Yv*>i@BZ%Jga9gSyHRhpnLKJh zljDk8J$f6;Sz4Xf=(ZQL+Kf5z%-B6Jzj4JZ{U_DR-ygJ2D6W;}RW*VLsYVE$cwfF6 zo_>028O?VJ(T-qIdUR-?DF*#kc$p*G>G!FInfHTAiI+F<%2f+ho6e-%d}xLx#RY&ef0zi`wV)tt5Uj-=Gww z5ePJ8q+v6lCAi0B9pIU z3MZ37GYq{@ks_|=E~NS!jJ7Zom-lAVxB=aE4&#v9l?tp2s5ee`965{FqVXo;b}g%? zn1?2}MAq`-)v1KsDEg0w#7HEVEPw`x9g4o9)$yQ%z1q;iI@g_%vDeeh$>jZ2BF685 zjSw5Rw&97ibX?#xvWy_J%hde_XND0%4JuxvmMd(b7v30qgaH)4|0;fc|3WnDbaR~- zJrkEp{6XJnJakt02MEgv+1G_5ex+wxl0u`Xu&%-ZA)qU_p=t?P$G%Q1R9#6=Pmbjp z+CbcDeJqwAJWdXwB%`Vf39P9FTd9ND*P zEKo7~DeR;LETty#A#sxpi7#L4gi1xg(aC4X8vbDSI7T-onOX0%{H?A(D-Mdx;0&>B zF$>>$F)l@PlO7nB{&ou`y!Tba;N0O#v)pZuvZ41*=6LW%E?ezvI=E`3pRvBl?Tl78@p5EahtpTwtna_~=1) z_iJ5}0fOSk_q2))A!E?~;C@wrg>3q8v**>3tf{;X9*@5xwVD{erHQ(#s%Ef${YTpj4A0X1PBkP8N% zjQw>=cicj)ZF0faY@#8fh;>ECk>6Z6uz`8RYKL@aa6S`1c$;DAM8qH~ZD>?T1uj|t zy;`PQOG9aE_SODWtrZ4KxONd{@^ICtP+I%A9aHBx(d}zTX|&qFrDU&`!BcQgdy~2u z5kDXbtP!n)%k~fvAe1ccl~wt8eL^h!(kdM}v7N}Zd#z-mu0!URSxMi_Zh$cGmA5sD zLnY!jY)1|#tq5sRh2^c5a?jYQP%JJtU7z#}|4RWI8}8p^Anw9l3&>j;NDiR|Z(%{G zi%t@#EE0{G?JPk0mM&!X)5=vAa_zn_IK|rW8lhLA_EKSRLT2ttcIFo?W7bobOpkq* zcY#B1u1B-pv`mH9O^J)Xo(&$}g7^Nyqam@)Y5lyR^}$r4P^H+7D6C{JgXXOe@(eM* zQwDsTr609o9Pu8BYP2%%mj>xmY^K|qM<{(GOQSv8`}8|ajz8{U=bu(DmGSFbHGsX6 z1s+pm&`a{`Mw%Gu#%EZI#s{IT2b-8-gD# z!r|5!9YxP`a60KZ5bJvxG>5Q6Rp)mm4gl`59D@+(RgL~cC*H0F)htUwm?G6mpB!@hJd>-^zuW*+zsh}Yqa$A^4{3FQ$9Od{}gfb zXCLnc`Oau%Cg(yyBILNtP6L#n?VTyZsi&SUItWvF9`J7erYDM=G?6*#^6PwhWg>E@ zHAVpZO|7^B$X9pT`Y3c#F9KfiPXF?3Tw{dCd!6*tUCp!lr|ysmosjLynk%tq7#XVE zQ2sHo(K8=DWf0=^jG%PS==A|XS++PzWtY-Ov$&$lV4Yf34TpfGI5#U*UX>F^)6JE= z&&1i&bKBxPN;}199vpV(B(DM*ZhtxUf$_b+rRC+)l(BRb(A$f*GDM^+p&|2EFtO`< zitpofxb>`{FAC=2*lE!z-?s-N8=&JlVo^nW=rcHrg=Qsn^;JV(iO}{X>$kPQ3Za4R z$DtI<2FO*auP%tFtADkC1Iv0Z>76j`J)mneO@uJuTIUr5_Lq&M6|!$f=Ykk9v=ZQN z(=wS5_@8PqtAXEF=aTH`PeZuc&j?q@$e(kT*czAvZFc+5Rn_jMEisrK=D^PQooI7D zt+5bkpa1m6!S+gNBy>^{Kh=EpV(ay>VAc@pmyvXSiffpiT8&7C=Tdk#LAYzALp zB8g*9`Y*xD&qqLkD38w9^JUtD6+k7mRg{*Di`VO|!IVTT3Ao^PKy8gFj1BF8)h#_5!mzt1vGubGCr{VMsy)>{i?nX)ocQS#x-)CE zCdj->?c^v3{uQPLgVrm~)2i?r)#ZM@m)yg}TKJ<%4G z%{C zU+V5z_yd$Pg_?6QtJflY`$JM#HMh2KHoZgeiP{D=$x$xNEG@}6<4`N=`>SW+q%p-E zFI1Cdp6SwgagQv^O?+;7ka+#s&Ba%jZNOWWN$(KtIr^Z#U67VJ|IPMho-cpVyJOY5 zW$hDg&L<0Ya!hoG$YasWH?CF&`5WaySmo`W+zDclqCeY)=#*#}_U}s~8*rrfkLq#` zUi07(p?LS%vlJ4VP8OfKw-MHVEh6OzMqanUnnWr*4->_P6~na>1hONEE5%M;t|H?g zTMO4e($e1lEW{4h>a%7J*VM3wJqe2w~v-jH!*T+V^)`70(2ldbF~Pd z{~)rqiCx$|awDEs=4_WdLaP&mPTL&7<=;-j_BE{i&23ogz)th)lHu~9G^?M!Zvq=W zvc}%j&u5{or0wF|J8Zkr8e(*R?0QrbO{UB&ra^V#o8 z=_&7csDKczcw1d5td%yTRt>-@mP93i+b@3j*+kcSepy&0QO_j*k55bdPB84SaFoZ$ zvOO7vCoT@UVk=7kQRRKW;2w?(P{ zIc2gJKjK+c>SP87t9#NKGeV=7?dxiGo{0h$KxL{Ssx5W>xa)3h77`obiTU&x@h=()A2xDPh>uuAPI& zY*IREsAKhg`aYkfYr5uP&d`}672Kk1*y-qacd9tG{(7b z-?|F_O^EvZ_BZ4AcdNOW&O94CYL>QOBYYn{!RykyZLA!_ufRIiVc<66*D1SQmFHKM zZ?1*u-DK6!xX2>&J``!viezSx?L`KD7y4G=?K}p6e7!_C)g(*{RV5Z)q*^~pmFV`w z?Rt7qH?FwPE_I^ul+0RqTCe`2WjTwox6SPLcisGJ^#mbA@^EXWG&{>2S3uHW#?#*h zGXUB_DrY60n4(z9%Hv4K!cp0erxL$G#@p#dhkI*BMzJZsc6l&ow?SoJF;mdX0+LO0kJ$|JV@7APo4p&Nq2t7>_%~Eq3ek*1qw>dfU;%!iO!Zk@)!eZk(SU zN^|`t=Aj$FOD5j_2tdA~;)L9HHkHMX6+Ko?XF_#zFT#79-qI&C1erp9`ALvCN%eE6 z&_UC%W5{@|ELbZQCuWSSGbIL`$i99kR8Cd5F@hfTS!ad)ruWxRVF7EVaXX}?o97g) zr#+YdQd|aUp9Y*sLr!rMZjOI~-|t>zweIz|v~RRB(PxaVB^zy@FpjW#>jee6@4|jl zS8IOXQ{dF^Ex#@8vu6_%6tmr~rb)cgqW`Ay`ad!6?R}b8rlBGvz2Qg0)hKmpiTkB~ zu^+&I!YlJZ{o301trnzEa21ZI&-q=qoo_E*A4L$muq^%TSCD+&E@~=K-Bpq>_4B3s z`BH2$ZX##*ZAPH2jOxp~8~0s!EA`nT6+~eea5r!&A7u$h?2Aj+4|uMy@h+yvnC=76 zB5fm3BD~RKpJ}GLhhl>Ps91RerUc%GQkzI#mSi1OVU#*6xB#ex{*j6buhp5XpJF(; z;+t-Zd1iTvoQalZ?Qhj+bSxRe;%Hrd)Cl`nM*P8G`x3&&m(~cZA=Q&U#Qd`UH6X=a zS4=Qz%dLW#eE=)sp58=}N_5f%c0j-UALARz#M6hx0{_iZ@wS;rwb+wNStf9xBJj>? z70;2n8XzL&Dv>${VZ};>J?eF)rGQ^u9OqT6f|r<&$74=@e2-!6%JShMY+Tjx`G^_~ zl>Hb&$e_#*pRJJ1 z4#i77|H5cw%Im$`?(q^P4IjDHxL%J(aK1Nt&dy{pa4S9E>hu2yAsS2{fDnl5{PZoS_)W#&6)!>Yz1!*dC0>g7zfDJJ zx<6gDy{usmZeI5S+g|(qma_#e=Aw)HxFo+Wj-R6FIm%Obi?V!N*uISZRWJ1fP>>BX z84TVUr^!joqyy0f)j(bLce=+G>~+@2#vtNdaU8FFN4-ZdH*EoXKa(DZJ!m_ zf8g9v^~|DIND9(b-ypv1)AuFfr&n@VUig8f>e8MRvCPWJYDAFdC1x#=#78}QlggMa zQ$ewn*82LuEARL)9DG7%X;iT{wCv2(wjbHx^ZXu*@EwW!A5grq*Z=JaFylyJUEqsq zS};G#=KWKczU*_m`@`%1=#BH&>Q6JErY8i>2H3#t2H0o?@~UsROlIrXXA2Ro#Tf$S zJKBT#zw+_L-J;(2+|O{7MCbGEPaa%>nuz2y8~HOhc?+I13{qdCgRq{j3RVGkliA z#@==WF&@qGy3Y#=3}ZU@orvJ}trc*K2_^HNowsI%#FfUG6QD7kd*1tK_tzgMzlvDP zn&da2LxSVRs<%hH@5B{Fm0>+q)KM1Xgm7S|HNF4?P0_)wv7TGsv7UUbNn~89GO|6E zt}hYCoOfmVZI>v2A<%8yLm=#?NpkIGME=y-!l&i_thPsOX3X=LyoFWv(<@2j+`2~3 zcKp8EnE^KRXjlNL)8L%U12Jj)Dg91ymV22v#j+2Iv6nLY{BDAgcBxx7b+QD_AB&Xwx~8Va65qb{iL5 z$en7!kRDfu&P{oMMXm4F58o2d|!tfV`2gkgKW{>uljlC)$2t@AiKi9ejYBVsIX zJL=!FAm2>>L|-=|j%E|WNInteQyjMS+r+|e@?*>(@TRu4U64AN0qU%6INF*y$L#Y% zLL`1t8}Ggl1WS$?kNfeAg|$T_OwxHV^^@p4ER<9dq`mVtjah2sCiSoNFA*H!S_AMQ zO7A`0S7qBUr1(vkXx;haBhTs)BkyMGY=1FAre2~^_e>#af3fI7!Vv?{pZAs5p7m%N z11*>zrv?GE18`yM(j_u)?a1uqZh4IVZ(EH=C2kcs?!#hd^Tqjq?p|go`Pmm{K5ijE z3vmXRQ$qUI_-~c~Q!gI>JXgyDP_%rjilG;#?J9XHl!|W68eA9|4Z#VWn-h7Gb9vgF0`N93&j}Mdn}|Rj z(sah4dC>&%tuCwNS<9E9|K~p&5tBiNYoBV9Q|ee(fijbodYFMh?Sy9DuPv z`$T-co_7RWIKAi@n4yF$2=~zd?m#a;XKZjk`qo`gCDqrqX9!kSYvUnI~3RhRDZ)sN($kNi_kpwvk_Rd@sH2 zQPp8UVnIWJ%Y94Fy~fj(r5@S}FSz5kJX{80FSwvoXlrE^gH zUz|e;ev`2>E&7U0adndH8Od#;eG@9XU4HhJb`p-zxve92h^emslH|Qyj{h?bn{YhX zh6{$c(x&ShfB5W2{V=_L7|WxT<%SXlcZa^(Fp15<6fuNSBh=DTI8DcD)S*$jRr?`B zn7GB!W$D5Y zvdB$RZHg~AXVTrU03>%Fpxq{MfH3sPw}Qm|WqIIRQ(&cf_CECADb@a$g+>ba)+j+K zO@KCFX?R_KAXMYA2=1Yk0I3U~=T zm%gXZ=l=5z{huDKBT2y6;3ug5=)e8Oe=6L+jRGd;>chyLe|OOTr|x|N3!N7I>~vKYIT=e&c@+ zNDLJ)+(%up>i<7XwtxJU3mx!WWlSvoGb8AqZU^|-iw_L<#gZ)Zzq;N3^Q-@Rw!R?* zo+~X^&41Z4{Nsn+-o$DsFx>y|HTr+A5il$M-)r=@SL6SWUn3^mySIC_bdOC{5@`)9 zGNMgDM_C4BBFXlHm;V+NO?00Y>o->FJJe4lP37}mrWKUU)1gFXq8ROR_N6RpjVQq z5D?rB_eB*d^W$Uc1c_IRgC8r$=BJDs1Ivtx!t`yYrV#t$L(UQFzGRt!^bt3TCGWQ4 zw&yxNLh1v`)I2E8W~d_OIU68vG-R5gem8G6zSvC~&rBJ5^J;q!HH@KfJ5qQ2uC}iU z6N#N|8VbtPR7O--n~XR7*Zj_R@XiLJ>5(htIu4fFkgavcwW;kUFz+JBpDiu@_>kR+ zTyq_RK62*}qGQ78(W?If%|HVq2Qo~tO``KI(jaIzS2W}wtmd@hfRQFO3r<}&xdexdAZkigL$>=J13qsi3fxK-?hYZDs z?=40_8Ya^B6$MwyYiY*9OB(=zaI1!j@K7eWn~b=s@I#rr(Y=DaN~53c=c6MfZ4U9) z5ht#Vfugkwj+_u*=)i_v-(IYaP(Yep`9_1t0kqO8@N}G4XM054XMF*DuG46Jm)f^m_^(pGVU2zQtU{rs5ez<>~qwXTF&_-?uXfjCQB_RhDB%Xklg5A z{%ivYzx>0$>0B9RKEAl&Be47WY>&35b@{8MmeXjbL&FS+L62FmxX{a=8{;Qnxr*o5 zXADJm^$Ar>SP;#aLiOiAf@YM5@=yXx0b;r%3Qc1yuaT`v9?FAtND&h0D9Q%22TE;Q~ zR0X(KRIP_IQmYMONggbCm3iCGR&A;nvvEMQGFCFU&EIz~jie9E=#ss?rONeZeEy{z z&C$+i7k*xH(o4=~qx!b)s2%+$6(^J-~Lltmp$nQM?a2+ z>;tnlQgv3Q%X5i${ey|M+`j2~I3twFi83QBGF@~X5o>gfa*%=Aa4pRwRDFAWL}Do^ zB}-#vm_!06U=DN9+&^u2)9K)Ub*yZp@~HL!djkkgaqX!!7T%D^n8d-@@e&-yTwV_Q zCg5hM0-mavYtRR{4h>}uM<3P5^j~_m+D)vjc7~f#{B7gUhIMy)pwC>or=r0H;K6`6 zqn|H5gOQfKxOYnn+mf`uHuPWlHn8>%xxyJ1r}t03FatDdH+jIk;vsf{`zkyEMw(=N z_h5)~q*U_-rFQA~obt=EXBilU&0nQvoES@v!K(46f+HO}9Y6i(kg4fV;3zzP4Qq>F&jqmXwiac3^`CCeP51&hv^_w5ZtPBM zH3AR_6X{-^Ri!&L==$vFVfrLaP3aj&3+!ehzoGSPzE_jb6xYlFtrq|A!)$090ac(> zrI-_Rx1rztVaX|COoEd6ah#%Y@Z%cDPEtKPWX9BTdqeY=yBf)^y6?VH^1?C5!Me8p z#6EWS`B7#^8d&g==$9wnSI!0kzR+byk%DK5g# zOz{##a!fp!4$1j8&c)%`NyMNAJr1tFB%Kin9QdzYp(zBxtG2T>@%qjs=Q5TKh>dpw zdwbVc>tq^X0sI0gtb{F|8oJ`cX`5P=M#7JdSETt^YA3i=S_JmCcS&^G;|s4H!MzAF zOKD0iw#VaU!cl2^}D0w2LXfjdULQ zW_?M93Pd{F)Wvn&@ntRfGc49gTAI=W}s7RT*Ef6qnXBP6ZLZ49`W2jl=H1v8V zlJNbvms^(<-aAm%)B)Cju>B5X%U+eF(LjFH5%qVSjLWXS9q&voURCY7!+&Gvp3fz! z)#YNvd=6`>J1ns%47#_ukw7rdvk++=@rBU@V0rKmVv{acGtin78|Z-Rg({UACwQW+ zCT1SA*g}Y(J!;tXYq6VZHqK~Nn$egqKcjSoa$bokmp$UWuxhY^?~&yahp|3;Bx~v= zp&^+41dZo{r-zyFh;7Qd?ht5=6}W;Y;~ZP3L+Y*IkH`9{BGy~R0^QyHuXw~4@?rf` zh8rtc-$c@x)TrI{>^&lbC!604uUPXVu5Zj@BgPBcTr$(SQBR;kN7FUm!ajV2oJRiz zF`7WF-zo487!@Ius$bmb*0G`C9%}M_^641rCC;J1o4)6qXb1CKWKx3L*T5|ucPP9q zSI0Upg)^_6bV*7#^GG>djXSOZ#FiI8)}|gBZNeby;BX}hP&|6KXpZdS@6^tK8u}{F zL$jIv6mxZtaljlqb2F}x`3uPdE_#0S1FLeP0baPy`1CGu;*Ss721|glwEgbXgiP0U zl=uRLwLf9iX4mlm`#$;leNUAACEfkcvB05~%>y{^vw0LHV14=DDG?@|JHd~R_svV4@XRjUZMyLrmWK+`|vffYK8A z?Na}uK595(M0>LC6xGwCn{;*B>b;vpxi_-t7e2vPdp@!_k8C`15zc!3v!YJ^n+-xJ zeODJip}KxMoqvIJAaX(vt!@DPOufSyCtO7O8Rce6Hs<}yBw0%1bwvs?ry??n__M*> zHGzH6;?ICckV?$g=oy1tE|G2~1G$W8CY{?ML(VtSex_Z7%;g;B{l(OwW|jfNynhOO zy*G4)FYII7-DFRC-oGUhRQK+62tZ~SPmmi=#8wGBoNq8=7<#KW@U|^O@6)N^X_*Dw z8DoE6?Ve6tU(B5Ko-Yv|laIwXKtCc5rqeT9*sh22O^9jpz55A)UMFo|Xe|cm`et&qef2Y0BRpxd;CUaVIt!5d-^Oheii zJ!kr-v4wtD16OYzg4IWKPBNWTNig0VDZEp$v$eE^KcNt6!W+UZZy>X#lJ&QwPEq*y z2*1Ep-Gf;rkyeEclV`VU4x}uDNuWt!Ll&1(CD-BG=i?oS#nI+O{BS7a_cTuq9(ZobhHXdSGR}vZ9Hi(IQoE8qc`;jivff zY2b^r<+(Cf269^plAFA_YIB)Jq+@IIdlYTS`HmADUsX-Kzhiz%+Zf^CWMm{XHxs&D zu5x)Gc~+PiFBi{1EXa8oaO`+7C3n;SnAlqlliiah=g2Wf1Ad7LWwkV6djn8WNfC_MKI%J$#oxZx3oUvzY3P%N)`ezGcWY-7>B3GE=1T0PpyTY61{li(lWDJ zHca1q<7jb)ExOaYdU=E_z%SmOFJs@#q6zVm`|i56Z+G#d_`uLha0YX#`W@YdQky+Z z9S~*J?eY0E-8IbbKHOMkCv_YjnM#?@2p4@bG-);Eo<7^oUPs|EKjsS*Yt-T-Qnoxe zhK^3X8zhrC#_E};j|baJD4*Oq1SNEfrWo1T+xS5hXi_XM-Kq^EZ|Rwf$r23&XvQGE zp?RPHu}!TqlQAfs|Iug*8p%xDaaR@s3f|ns<=eb&nUP31@kfg+TGZ=L0po!&XshhVWqE_)FeDeV$ zjLaEpCdA+}>7oh$S|fe%Civ8K9G2I>G30CHAj@g()}dc#^YrwXVOAldj`R(^EvYSN z*nMd0asn40_@6Z@W)!GTODw&n)r@Po22MTXeML{Pr_N9eTBc}x^r*C`{yEQ;=Bcf< z^F+FU6Xj{FwVtu+eC=u2mAj=gRo@!f?-^P+UmJdi^HTgy$NAOqrmp;2!g^y-zOV1D zz_$V;Rpk8U8@&;z5aTUlKx8v#*W%J}$7;zypdq=g1-oCrGz%P#=D}pEW3G*@EBrgF znWeFAYQ@Pg`~3;cP7I<*e%RWw;IHsDYflXu5+`xL)2#T>T&}WKRH52A>Dt9sYf1G6 z34meJkyx^N)*!1@bof5DDC&b!`QgNl%KQ~XToLanktIi$|Aj^2$LXqrHwz@gS#d0S zhsf907Uv&gd%KFJ^cy9WI}>4cn3W4DK+AOD2%IK3wU-mA*dMEPP2YG%7=ljye|Lzf zsJroQ&EZ~`MwY-jC)EV>;LS|(+Ok>se+u_tHz8e`vNU1xHFGmmVno4sM@YL#{TcQ& zbhEMBP=g>-R`MqXrMhL_!SoB|Nz_`qGy}J?t>U#*`~H}*f;Y)!=fa&$Oz9%O_}5e| zKbY^k3;7A{36$lWzxKs#0SUmkAN(23sZ19&zVvHaxMNLJnc=csviG&am-j)5uPD9A zn=ujnpnVN0a{j>z>k---#4e(B!7`-KBX9)Xia!Tv7UtJcZK-t%gRGI=Pc5*pAAjwd zkE%sOMQn-}8(Q17{Rgw;y3PptA=Z=Rm94`F-x))_!M@2E3X7aCAgH##ARi6^pZljl zH5tomyz$uq1VFTaPT)2sZ?2WHLfimeWh~blbIduB@~7(^l@ZRIt<>$u!ua&nIr!o) zgC(9cPI-$XTV-_HS}M;+BM})9jWB z3~6l_Ogr29z+oI&wH|s#FLTej{>O1yx{0XiZ{GZZn_FF2)vi+QA?iIVT;HkaAT#2W zlj+mXtn6Q-E%VK#hrUg0D;4QtcSD#$uk@vm7GJ#Whj2+{!_XhNQ*=$)%~|eY)8=x7 zIfGsfQhmkZQE0Two1_@%cW72(=e7T4^!6NTU?W6#7v*7(F;7*_eqo2Uuvaq#5q zq8dOT%Jne$Xe^^I_2k->F<$I(#OrPlA>(NX8#HI?K($GtkoP}GXej_cS5MmJX+}@4`R~w ziM9u9(G67Jt7<9MV|$lP$*-orOQdbx-=HToT&2V}V~`(Wja5*a_M1>zp#r4LXX1Z8 z7ZW{A$_d2*{h+G$y-4oc7NN4D=(l%|24%FDIpLWV2EPNKikug(v#q?|RZvZ;)@jDA zWLP-#c>LlN+;4mA=m+)M@`(Vk(WM=2j6seyG8=i7g)2VW$RSV7wgW;b!)vx%)V@Nn zDSfy|gfD$T9`wOR1Bd~@UUY8gU)>LVB9{S{wM9_Ntl$hEfQA&mu$O5y-4fq zFG%^5#t)&$aRmAbs_W9`4-4j%JQ+uLSprk(7+)OYR}ndbz& zN&Q8X4x9Na=QitX=zz6CNCb2ZSoGf?euFG03?)i6DYfVHYZ;>aIvF-~-M#4F<-3hJ znv7RzIo=5<;~LP;RB*kaa13NHYC0O6P?5Zjj|A=jS9YVBwhR&zCq%G}{bl=Atk+uahK#0O*M`}a&4jLtv^8)cjR9iio zSCikati3HCcQ4uGVClus!`uAGXXTGV!u9S4#)55xAb7nhJ_QKohI{h znHo8>?HH3J-N@VVN|x^iYt2U7U(<3_>*kX>8fd#3i$gE>yW^0}}69 zFyv#4rGJ@L%4KQ{^mAMii$4Y;U)-+Ra{u`J#;G^wp)$XQ$o_E!WUYd3mqvG^v+@l> zL0Nbu(#FH~Ov4yo318&=&$(r;-^9t|1S@GocFnbN@Ge7zK|!$ZW^NX{4_V9pX+3{R zMRtMRnUy!S@UKgG+oTb+tX!7b`1Z5H3I}LKp5hO+1luYzYwzLs#MQ>4y4#r7r!H{= zAZ?*P+TZOW>Bt|d`m5cgZ&#dAK=Ww`()pI8mentwF%Z4mA#o@qc+F|pY#S@B329H# zuU~(m*0D%3>7D!?~|`P2$++V>k_RY`n5=-eugxKb>hBSva?@vYLq~ zlA`2GI2-bQ;#N=Yw8_5%_RClw7}caSS@7GmIYVASx>nVPAnJLQ@U0#H-#_x4cDMX6 zOSMgdKHSIm_dM4H8@IP_+6c1I7oGwbjO2A``NR}?gWHr=DP@H=!fh|yE;HH}HdQ>s z*N>?VGqFU{N=*FQsOodOc~}@?)|;*RT8WS0 zeO_4gd-HvE)WSQquiTykMkCHjlIr$r-O8(-zJ#`fo5JXo~J z9xy0Udh)oEJD%^V88o6bIB9QQzM zj6=G7d=JOh633e)vJ-ejHjqAPFwKrERazjB-$&RW(>7;hSbPrt87a0aA?fw%R z>h%x<7FVuv##bJF&-q(S*GFm&d``G~g5(GVTAHV+ymx5w|3Zuw9MeT|+uVSZxfBP5 z4{HMaKq)UP%mNAdW?df@(6xR?}V-2P4zI3_l|p;A*Q#^>0>&yBC4w*p~Gul*`G{m*HsLjAzim8F*4F5Ok-HTiJwfd`^*uiy$;=#AJ?)Z03%c z^No_6zls`t@`>6{3jd?nq?x8GVq|wz>#Ze}n{(%rpOF}YbEc8|1AM}7FXi%yk2-&` zuF^8Rt0fe=RP4|SRo!c}ph@0{MGbDfB2*Uc|Bxu)j)bzOXkhX$yV}8yZqu88Hi#B* zr$&iLlMp}qFQYGSFik7-H&VY%2yKH8+=6spioMtAEOQl~H5*S*2Qe|(h3 z;*@u>j9AaSvL2*+&u5=AbGimUNwE<)mSc5xt}SnL-ab3;Xi1bmFxD5 z;x+~zMh&->h?4B;@BW0b{SdWR*(MD1*=#ANwdofV$v^SOAKi5}>kfKqI;osa`7Em^ z(`~aJn$YaywH>O0Athz&#n%(kB!ACGpeKBpbsCcL-InCEv<6wuHSt)PO58w&0;Vzg zK2Bv_k&H+z&`g#!AHBDGjrjkVdaIx~+pY^U3GO5WcMtCF?(Xg$Ahhjk~)A zcemidEzm&YI{kisP0gJ4VOKr--m=!U9CVyXUgl*PLL9f89>uwiAQby#5#Qv0_K{9e z+XWpGoyN*{vMVwBz8*{jpg>Zj^~t+xx!@u4!u5y&r?isafaOKlNb9~i)o!ONGn5O> z=m|j0bZ1j?u9znA>=;Y77#}20l~Or6N5+$7$#-ESGX9 zEX9T~h3!J6R{bo2aN>dl*zQRm{28CL*LZ-6hq0QPg;B}`qK zFRYVvtT>tUYYwEzd>q{5#w>&(fT^r#J1!OAMv>d_CjScB+y^w=c&FLKpjBLIv^wK~ zlHjtshf23Ercgu4HJn&5+An(AsKe0XHq`UfNk|1Isd~7RCJe$;Yo-XA&gj)ln&B>U zJQpbK0Kfc_(Phu8<$F;i*7cy!T<*jneSdvB`ghFSzFtZy%nkBJ20OUP*|Nvh2^KF* zPk{NN2&}JrpMQyyZHOkw-Ddz88fB6ouxYyOaB1*+y=lKtkmcDU7Zhw6~ z9PsA(GZ@`s$URLF68s^V%65wqjXP%+{;KD>h;Ddm(nPGm`(o8*vz>ir*cx_=7<*J) zPCVetJTh;i9R%4YkDULJa6?byY47ofmKs`rs!x8gK`F_m3wT*5b_&h^s%R4eQceeP zW90cgOLF*?mz(b5n|V)z5Ohmg5L?-@4T1<(|5HBUXC{o~x zZAO0fQ4OOl$F}5BOVm;k+iv^fizvm-p4YT>wTITx!?VMukS7j>8>q~=%s#BrM0gaT$qhaDL>q)3Al{S<#X&Z-s&7*-ZG5 z`{b1LR%vw_(2n6)B9G&3uTS(kMz|HwlhPEG{xd4cd|;%wz^V9#X%_iVMLB_{%?#*N z^Uruqe=DG_SouL<{$?wb2!FAX-h54NX?G|D*_uNWP(?CVS;lwduS?Z= zmTqd(XL!Jv@lUeQge}M##iT{>g`4MkDxlqkbOQ{{aEp#6c8yyXpL`+1Bp~2+Cd_<~ zENQ&64i8htUk;Uy6RLr)|Ku8~HI8-K-oD%Yu!5{$Yh@l%i2Pu|<;7>TgS zJzIrnxXvWf6Z`4OvFLpo5S6HPOk3ar3E6%ZW_wg$`;&RvON%osi270DH|8i_U;;aC z0o2hxK1#6>#COr_w z3NKL!D9Czix?7s(w4XJRB8>WCo8BuKd_Kayt&sa*roBeXY%2_sIgEFo#uFJO>5;yD zwp<|2LU=qR!*oq=H47v%lc>Gp!FK>Nqx8z_|0ex1-q^rBq?`Q4Nmo69Vf(9@)hJaqq~L$P7h3V|`)8tt2Fyn?ymP&SMP|OA1By8UFrgB`zrj$|}I8 zeaym|tHTJCfK%eAl3;^}M_3N>{KmmnY1SY-zp~OJ>X4D9L025>K#(l8lAJzKz{oFC zyJqR_#BT#HN5(HwVCOP;Lr-YmOnQ?$nIiMdt7n1waleJVng(b*Kh0MC>t|(bUOFVM zQp2>!!8$!>RpC_FLO#R`HE1zOeWd)o+e!$xzy93`_T$7G=r-ZrAIhHMjv*_G;`(Id zHW2ez7uc|x+7f;r(Pf(z7@#@JU2`yifhIJ0U{B1iH)`t@v5d2$<0+!D(}#ar#bLD1xnVx%mHqpFHqdnBqOU&Lj< zab&KM=?p(Zg6%nhh@^x@$441`hK!Qws+%&4XgJqmj8xritnMxT)qJR|$5b-llS((s z5BVr$ddUkjxb*-0rRj6tat5i;92_Kaa%R*P=Pn3Nfij{dvr%y<{+SS|{bGk8J?bGbQwE;Ins~g2@66^C^1Y zCa$OOasM|n{Gx-#f|C&y{$AAxJpLbxtc+Xr1KJ0?K0SnBx~0A?r|arPekf0&|L-_$&5Hs&#hX(gS^SIDtx%`2Yy$shv9*7H z`u=6I?B5IdguU^M+_M~H8;R?CisY!Dov(=FwJ9Ph$)+(I^^axNi^r>Zy#w`jPD;o4 zsPZYT(){Ns?8|owuU^l_Z=Ol*2qVbaK2?W* zE4!F;2W9TbYFs=UZ-D?GLEN>EIO@)`P2zrWqO!#^>KVSDnJntThMYk16+J-dqGX2ir-&}v zrS~R)*joWcLBAIyJ{Y~il!AQ?Y#NP>G)RhIN8nqfD4;%$Ck4)Ow`9k_DnndbPHqeG zWBWtx?^n@Ki}4xBt&P5^RppnoBvUXjip|3__YdJc9D6#m{^i@O^w2HZ3-8+_sC^-X z>?+lv)wp-oCwPB!nLq?%k$vH4oojJ3(t25!!C{csLm5{%*pY`#5y4`u8 z?FcE(#*lx4r|@Od2|Qeyz(s+HNEgD%km4` zN$usUeM+2qdwoVwo>V${p(IRXRmr2V-Tm1XB$|AMd*ne$hsU~IQiQD(7egg8#^KWu z{jKmq%g2&=ix$e3*F`XMqn*(n<3A0z*2HGLkz+WWw%#!l2im+%&?c4zEX!~oR5aZ* zHKpi}ke@P~M7iuZTQN~Ob$Mkc^P~r-@lPLSQWcBvy3$5R6DS#~0$J0h zOI$ZE2l(rIT@)6MM%O|Ur2$@?$KNTla&LVa!%iGnuLquvgg*~@t++k4S9P+FoI7u6 zzXL|pudL8@tmN=BWNY~vrn4-^S21jknlSz$&YPHy9-0eX{*rbRrFR^i89m1K% zx)Fa~GDOn}9XL=pi>>_J5Sno2xcl9}k7>!?HKjd$z2Iz0M%l9bW@+d@GW@w&t86mp zJVIaC#9_2N*WOB);&sT=_-0S3^vv|+-=m_?;(=DmCAq1Y;8UO_!pss$dE|71@?}!m zH@^oG=Z}Vv@lPKrntlguFVG#zaw?O{u&y;bZ)d~ZiSJ?)4ufx0osUtUqnT1lqp-%y(ErssKfWXZvpP$r3(BE*l; z@oDLh{Urcy`;yx>O+Ta8+`B)P796U3n52EV*UhUzgYIpjMP`akNjv&eot7d3A%j_7 zzb_}$7RO7Ij9%O`BB_a+fgzh*-j-Nq2vmD60~zEwBY$Kr|H@uaZB{tK*kkjzE0hOm zMj6%fqhzQjI=jM+05+P=Kd#QS7q8!OCR(@4z2O15aJfKV-<~{MEV^Xqw!nPg%pWu| zeg|#R^DVXKC4a`X-Kh2Dij$0z9o7Ws}WdpMZX@qxQ{0V$@NrxzSGn)$;&Z^ zE-7BFl!O!z7kXyHB;^<$8W|RyS7!gIR-yXu?~9&=lg14J5a9{I5Zo2;zt)YdfVX}} zzke`J=TjJHW2nG~zlKlVA_1?`$Jv(5Wfrxs;td}TEr3rI038Rs_J6R6qw&v@#Jwv4 zgAP~yd>0U07L;HMc|Y@pJZEaR#%c-Uolw^$S8gdzO@-0PAB;cF| zc$H{eLeeVoR-cdV>;`SqUA5OQ1YmD5r^K&oCw(jA=7TFxS?BRwy%uh1785B63j!`?Pj1BKC`AWWoWqW6YAts^Yp! zx&*l3&ul)Rd5iAb8%}Tn``f7<{UwhDoAeN=*|d-xA=e(dNtD7?Rw|xp^rRFOlAAuF z1!Vww&ZBq=?>XTE6z&Tft?IOCh{daUo40~+VPH;d!_kgR_>eeyF?sTxDU-PMOAJ9c zG98`r@kH14)7cJgXLa=lIUJs7mg81u6s;ZUb89(QU@$cwPf0t(P~e+QviJ&Y^BBPHd*BS*eRYw6}y;krp1B2GN+6hZG^p* z6pY26X{$TOkJ%buV7D(YAE{CyA%;T6$&OgWMOU8;4F6x$4gt*#?JIY8xa`a4@lIM z>Q?H|(e{>Juql{#ZUw!~otY2F>^-lfoEsd65Zle=iDSDY)=8lDYkQ1>LqAlDZ}P2D zCRTKkoZOp+sY`J^o_J`HEGTx)I;-wzS(|F|+=@#A3r=`_J2%$4%8~Ch5$@fTf&n3SwgAonr!_L1*kXs8SKB@w3l>)E z37JHNwp&TDcH6lX|IT_@D%$q;WqdGRz)nPJY2G(R{?|R*z`^3IeeRo`q>W>*_bgiy z#KeyT^xS-Z>04K*^E##tUYNPd*}!QkxfSjCw!fQsSXQ>U?e+JYqelMokt(-^n^JR+)3n72HR?}PPuH+$&dFvHr+d(d^Yh8tK?o1 zs)pI)ZDuWHM;8Yc$#9LdozC#aCgkP5F`3~``#xi}DE((eNso==USpLdNkw&P_rulv z9p}LJAF{rnEmXgkD%(0xtxl!EcC(=a)4$>AY~CE#nw6!h-0qFO z@R$74`#?hOrfxD0xRxcK2QWbd)3;)=xY}d3d7XQBnE!d7ENG_zD6NYU2lGsfs-xW{ z{?J&_81VM0M3f~9TpR_o79LJSW)(k_lw1G8!B0^?Yjf6Th}kc^R}e7oAEV_k%4YA) zp&k}4pkxY2xrOk2Ds-f&Vg4INr-MzZW*z;c7ZEZl4+lU2`+@NHouHxN^4dG0GO>gOyao=s7>x)7&{OWgG z2~NE$%(B*9E1!d@dKFGtdMk3X#9i%D0W5z;c&afx=g&#RfSv~dg|u;TSvJyl63~1s zoEWSdmVHGi+$pXk@mA}+ZT+`55F;YhXSIl^OiuzUZ2pYU!@_c#M43Gs4e3Grd z+lm?^tyq6~0em4xms$|FKu6)R*KmEkO`_G?#=LV)d?0pxe+vnh+-326m1x4vcO_}m zn~^1sP|l{_ghH8{?xQ$q#?8mztxl=;aLPgVV!>aC(L9Cohd*mYFYZu@o$uw1FAh9g zBXSCsBx_1fe3+zFk-TEL*`yW96D>Z$^?i}^Ts=xc5>H;(90QIp{IA+P9gOOyETk&UQOo^0rxoPcE`QB-##OlmZP|2G;c`d?!7_%< zKOgPwM&r-twvUtLv*@Kvt~eU#G>$Jr9kv@4Wvz35#~#|=_P)M~Yj(wNa~AdgH)h^3 zy>Lft@;$-|%nQ@;nmmj@HLfd!wiwZVSz_{)w8+TR)kGtE+?8A?oAg+l&aY1x6JCRj zn3vURkw~FKaFMq=s{b6rM^^*|?S_pPvA?Q6m0~9NqdAJ@N53@y!si<_vA4^co(~`p zF7>Cq`XcWefu`O=hr+0^+V2ADNfybr_@I^lt1hJpYcDNy)#wtu>R1hPuGns`Q$No( zT7#Wj&sWxmJpOrz=DQS&Ex1;fWa3caa0ZAP;t4!3d!WZQ0-xw-iJ)D_`d@x~=7_dx z{#*3D=*U3~p>gGsSi&zJ$hC9J{k5@}7?|5PiiJ4mQk}yRRc|3XNy^p{bfC{OhVlD3 zr@3&}zA&QnzSbUr<@;X!$qrZU>kQ02jB77;J z)HGvpLB(GhZIW39b`-Y&q|rpPEWTLjw=uq?V%Pm4?T*`x@cWu)tJ|%u!S0)LJI7-- z1dsY(nW>FjV+B`7g7t()AcfqKM^Pr216tG^C2ij_j*@5R@;#V69C$JQ8V!I1Nr zK%TLEvU>&D>efV|k*0-pXtg-+IDC|!uPD0z%PZN0NR-R!vJ=j53-Fjh{SzX1j&V=G z?_MTwlC06c-awY)+`xot*e|D_rP3Si+;g}3vTnJF{%EIj*Olz~rg;+Fb$LBuMmS2f zeCPiJD{?Inuxm93i0~1Ph}Dxz1*_`kR_qF-KsCugpdXSQL!PQiN?W2dXU%tk{#OV4 z=)~H8|J9Atv!v3dx30XSDqMk=rdPM7to++xQuS7##Y%}EY4~cM=h-ca9K}OHip5U0 z$wpkB@PyU3y>OwO<;|0uznR{J@F`g2WEN#5XpTp%_@_r0$-Nz+-)=gcQU1j3}8nW#O_t=NjNb}ylBrKfg3zOQveWeNFc#v0s! z^s;ISu!_>fi=1AiokleBywEZkX?w7aqSBaaO}FSx^apZzJH;`_yH?3pI&mECP+!PU zm0m9-?gwb!D>1qAU@SKwEM2o=Bx;Gk9$vY(|AwT7_gqaJHy&8%-GNL2R@Zx_U2~rJ zIHTXxN#>PI{@7P+&ZLm0?&aJWhDw zb&9PiZ_#AEfu#~P!SYHS=4~a~$}dWv_LE`4&=A5}But_L5~j}^+X=VD70EqY!%Bx* zn)20q!bs9QNw#Y#e4uuJC=V1J#0~~-Jsx$QL4lB(_LReOf~U>3uZUgXt4V$bJRx=! z5tC)Yqw!jrL0a~q&Fz&2_LC#m>Tqa%ImchUta4Lr^1~Dz9)^Rtfq9E`Zuy7rd2 zz2P#z!^!21A=qGp%D>>io`#qMTR7g%NdJ z)zvHGU%#s+Tg!ChpHd>DhS6wF)XosdM{#kEGJXz$&Dq|6b_AUNeBQA?^Kqy6HkNAb zabO~H>?@LYRqHfINor55%5}RaAU%-c`bz85vKR2sSKw#~%@~`J`z1DE4Z8S}2#;!{ zTlPpZ>iT8$GN`NXFf>A2&+IZ3S#VP-pei0Fxve4CUEO^r%#%WY#a~>u{$9mkaV$-n z{5JKdbk_^HaokNCs*tI#hyw!szgL|0=&xK5`RBh5#3h!R%NPE#aJ%De7^H}i z4ED^nJO2(krTnoZpnHVsu`11xann@z-xTB#FPCE$eYIl*Ih))v+eg2#0|y(lXM{HY zklzD($kId?Fc?mcNmLne;eCXn`AW z``N;@Ld3Q*Y5|itV*kd(q1u+cEvy*!&TOI4f?|Y|_9JR3D4X8eh$5>Pde)43!G_X1 zFM4qea8UkltxNQYXUkAI%+^j^GVr1MYEs-XpTf*Tadkp9F)+0~Cb4Nnr7tO+?jlMxzI zIOJ5JP}-UX=40o;+)S7sH6#3YJj=D&G2MAWlQmqfNGd1vdO!VPkyfs(zc5>pAx$n) zhvf13oTPec7;jY@F#^tK~3V{osv{@+W`WPLHJ?w`nFFp=H0vJeA3 z3&tO35uH)yr}2GCrfGyJk!>s!`X_@ZY$%43-bKEFkZLmxnAsJb10Lix_&8+ z7$48=A9+Uy=w17ft0kaGI1chLZP;a1+a6bz{!R<-86gR5pDBCfZGsgS6gt8Pm@RAG z)s-3kDEEI~u>5#<$Ma)Bz|-WyiDY0AFuWvf$wi8=aPhDQs6A5@?@nfEI&M9(VElt= z%^6eDZnYlN?H+;C2z=vTG%q*|0$B$Q162uDDVS#5N}@d_F8BnQWRG!|h`#t*{!ssB z9lB+|)kM0{*?RBQnI}TUnYGX|GEMziXVx-E8Y@B$u_&bgBX7UNUxbs(z>S)vhtpwU zvo~7EdA>s9SlD?uFU)mz~dMv;Wh<2ps%|O%-le z;)|BmK7=x%L|(iZ?nBEB`}is0?it+U_HWl_E?9}-pR4s&F?})j{dQ%;MrsRmF;GQ- zNEFbIH%$ZOFKahEixv49j*!_w-v&Wg=zsOcaRZR)s@cp)7 zVv+BtEj)T*H)IO2D!i`bH-OnqudwH}MO?M%M_G9>a;_AA)35KNjO@PVKbjH>KYr!A z@#ydTIJ)lrI>q@$Mt<;$w$QvaPi%4!OC3QxaWzF+>O7JWtk<&kzd?$Tp=xjkc9Y9Hn%*YH%0OIFz5YCBVKu=}lWNpQ1 zWy|VN`({^6e<`(qEqPwdUW;B3|A>x#n4ye&y?EAOTy@S)pcmZ+LR`8i{O>u2^l?+< z!15cr^zW9hsYF9e^fpYN?Sn@^kG<>!lZ{z%r2XO?QCp=EXq?@+Q7o8zP;WCdw-)NR z{4uXqeF`_1O{jeZ$M`3}(8tOfm!IZnTxNz`$|;-|-; z^N~!d8p*SnmVWmc9#cJ|MY)PgK!yHjoTue+*FOd-Y|RIc<5}`HE=!tGr8Gl;ia5D!~HYPCC6=U|c8*P6y(93uY@I=r7aO1*%e7q$Gdg z=wt>5heI&uYmtSpN9N#}nzc@vTR>f+R&Hg*D|N3HIS^PmAnl~hdrwCQ8;peY2jSOP zKCdfPQHpvb!`xk2160^z#Ed1(3Vub(Ifg6+0vEgKYhHcht$N9sYxQ>U)9!v94x6gr zHrsVVmLi#oYSoCvyAA!$4+LgBDCo|6FnRqW6O;bG&;H944ETBzGWP*=M0AgXLDNJ2*|04dnvw}g34Sf4+P3}z>5MmQ34-3 zrJ0sdyRTC`qPE{gs_T^>d7cI4_BpPn4)zmS6GD=jc?77)^8N}LpPuc8o}8~%<@HR; zmHRy3Zo2oiVO~97FvgS6qkz&?Bak7gT1tlHQ?K{mNIV_?cfu_|WLgOG6WHZkEPE$YQ%>qhC zt>TU%Y#foy9X1qJl|Pgy_ymvu9(I0z#kr4-4Ssn!egs=GL%PW&f*!pOG4e1Q^##TR z%f5`*!whHY1Vpd5nuvG>+XmgjB_fBQtr6FGt3@!rdTzCUQahoa<18#0wB@+NrEpV1 z^hj-vJ0`deaOz$vFv2r~?03SYxAxy8$!E7v%TnI^68z+>3aGLNy!$<$f86=y2PE^j z&|O8u#&%r!fk2pq4f8*Z&XIczrzKF8402i!yJ5Rs+CaVzu&Qidl9p0 z705dS(e^A&?Oivgl{-v-h+$kpjobl>TGZSnQra}T^16Q;u=vf&J^&2)ztl;5re;ls zEB{eOOL_(kyRH8P6uss5EwaF)Rdi*5A$iX!hPlcc{W%mTgCB<#gqPFcBCh_+Q~sqc zg^o7L82D<&>XC*@mKYK$!Y@L#Sl!rTLO&(lfmIowAY}Yi1Jgnr8?Y;V5EUwLW zo~-f3IjXxKaGlw;U9pUX#_@;5N`{AB0bX$NwG6;~dVM35k2TyFNjWXKm*Tr=e9D8B z4mldB94iqr|Ew}Z%4nH#bZQ&G^{z+Mi#wl*o{eu?EW{AuQpRHWF);@-T@r#Y7rIZ2 zl5LjGVFz-}MU~!@Wq6SI#vr}BR$0|!6Kq8Rf7mk)6mOB!Xzpx8eW*~VXxy?6wec|1 zNVyMGt@cO;(`9x%oi}NG_w(ug6E=Mx!@^xFr_X%M<3Kp=7gvtYiANn-ORvZEKpo07 zD0CsjNM&4L)?gcs+^wS&Zt;oW(>atne*C6dBKCP6S%4vfC^o6c_cJc{)YM8@0al{i zPReiWBNMG{x}4w9oi>?-2dBo|(@>SWi_5WB8NR~H2%=tfM=`ra^6BW_ujLh%uSria z2JKVmba_?PD`lb2Z-wF4($4jVVP<$uZqQq*xAMb3z7>ae2B}J$&EHjMuxQ(gvtFb^ zcqR1|IhHYc&$phRv)@R>FPCFpH@Z%+{<|rZE zxX%8~sA>4X+giTuf6RX?%zG&gQP_Nn*+?|O9fU*cS0mK~l01gxYV+Q$fqxMtY5uo| z=m8x3Lf%OvYP#4lX%$=O($`;Gi|MEdxhTDwDZaXem*DdXH>wt)h5RKK%tOy?Beu4W6I>Z;<@w1E}W2>k;~ zBy$YqfUczQsFqBd;@Chek7KCS5}B)6lXh6nN7HM|inE78$nVabWkR5@^QTWC1{}&h zUf(fSxy>S9?i51MjEuSTua=PJ3Me0q)`mFdb~E$qB&$V9B>#l9G57UTR);PEglB-} zAjLQT%wV$es;8U0#QP5_TDwDxmeFYsh)!C?$*r(nwnf!y;!8h{HH)}rz{*F9Wi0gk0vdQ(K zsY0<8;hm2|C{%^2Ysd;VpJvuzj94k~R2v&r!m7)(JJ^IL|Ptb1& zRBWlg%JT`c6n)(a=PBNU8xU4LMzpArq~1oiKubHAYaPf*W$r|L8G0|Od%xnW$hRBP ziF{PgfG@E%+~&jt_Z2oMP|D4+UZ%QV-oBP{=<1pMxQ|eDq8aPH-_EmGo*+%ON;zU& z+b(+134PupF-EPsP+sls^P7?U1ny>T!ObdV{NY6Lcw?1OIy~!TqZ)s=NzN$AXp($$ z(&>^rbRRt=k$3EtQ1-4!!8^ZoqMT=IT(R5Haps8Qtqq61ltUA&wVh4m-$GlA5st%y zz3B1=S#!YZ4J4?UEqw2J$Q*xp_{Rwe-4x<4-|uvbO_k6xo46I2&&6d3?Fk<$$@(fB z<@t^7{M2)VG5&5P!&EkN6d*+-3Z1YJP|9(EFXGd$9ik>5IHX*}w>_Y7myrgIWor_k<^gX5*MMXfXb*LtU z+V>`YkF!oR&ra4G16~_>Q)PPhOXcOWXM`1>i2?Dn$h_WTVZR#Tg%vT#4${`qA}j(#D?~g+H2VOX^wAU@aq0g>C3@LmL1~ zmU$f4{&{poZ5}x)-e0ma!Z~b^h3`i+3qpb~yWG`c z;x`8ztCzUg++18L}7wtiFh_3q{xStO}StM+V($r3mn&qGV zybJ50nTK_9t%#o7uGmb0*{xIVK@>v~PUNmyh8F69%>CE+8=)*24OBhF6)Z9aFFI}* zA*%b(__z3z%SjyQ#HhFf+DlRsM07+eB#01r`oW8CL#*Z%WYcxV612~oZ1ISSSd>)mU|rE?^13gh_63h8|K z`RTgx5Ze5JML*&_5}teX>yTN$6*;E3_gh&@;g-x9`~3%-fR8gK@sku7p+#5wNh)D| zWUem?IGP}^Fja~8k^;N;k{DVrRTFLb#&zENvE6h6uZ89BV)0UhB{%m`z^dwKD9iW- zj3*`JO`^m6Qs-AZO;p!g_bAVKbT8{LdfZX2KX;cF^dJLzMdQ#yqkYE$<<}O3^|9(c zqBts&6~Mzc4E?TQ2kYVRS{yCTKzfTDFW0HAW!90$CRq?J)WlE%S!}5yDBmB? z)##uT-V#%rIHGD2VS`uM^@*(XHB;j-EAwyQA0n})v@1z@v2 zq(h{1DS_l8oTWo0zp_`2o@R_O^uDrL;Vry5&{0wfl8J10L`fEyKuNbJ$CXA1bWZGe zt8vX#Vx*MF*sB5D4)Fs~Y?_cMjR%9@BI1i*UCmmbELUTs@)W3#y+(^#YklI7=mKO) z!kBK;m+*=g0h%;D?@VMsrTbe3jj%S}z3?=!w&QG5^ZI!lILas0@EnVvR;yHLD4=ej zD|+0EjzE97Cm;>p?d-z4qDx?+w6G+Wc$bx<5hS0QJ_Yq`gnKs}hWSC=(?Tg0>LVja z;v=!8!eD`RHt<%io6+cyw#){(B98tzT4s6?&C53Z|(Rzppi6;aR}sA4j_KyJ5=fe0V%t*}N>Bz778BE_~R%-v496 zs}-{4Hno0>xOr1#JYelqs$udYK2Q_*BtDw71yACk!XKL~9FJ@r3>tJD_Ng6p+Vbdd zpEK3+t*U;#r)O%Sc7jff3{0P*=}t;9zH%_78y=>`kp-3fVJO}vYHb_GyVM{D-K4oE zHaR!c&8Qr-ALTAAWbduy>a~gE)XW7XQblzuwY-7&cRVf(;^sJ&_k5 zxL9UrBf{?z@%5#YI$qSGPM9h24pUueeLL+@6UhO#=ug8_Q8je)n> z=5#hB;R4KGjSirrOfgHp-p_bW48zH@9^`2&X(5lCq-={*wboWz2@v1dc@gL zLmHI0o^BIaL|g?*KD+s%e8|AX@UhcGul$H?C^|O%wR-{q%|0!NKR41*_=PF-_s>j` zymF<|Ut@;Yh_LPU2qik>e6u^L_$n*cYE=b;msGi|`qz4X< znNe)A6!rML5zUs_ZH35VuTYqOq>{*=6htiMbUpJv`{jSDPR?dTrT%3>vTOnFvA=U| zLS_RWbv57T3gA?5vUhRJ3+2qrqhk36i9#ASzO8XTi26fGoPp&Z-hH615gQ^4z9?6a za!hQ0t{>BO;Aqf?ecEb~#Xei$ZeEh zug(Dr6ZQfPWJfCq-#N?^VUv{qE=bWl0Mup3@g=N|_^bqwnqnJuhtVUb#D)&CuHP8! z;8PzFPm0*%2TJH?FkFgaufi&8jqkpwKbn}#5K@OMw*-1{pElY>JhJ4X#^rH%3+p8!Wf?=z(;Rx7 z|G-|*SSsXArlzW#q6Q=Q)Bb@A8kax4`&O?p@fT(6)X%dV5TzDae!u7Up)brU)eEM- zEK#gkxb#4Tq4%_=Rc|Ll>(POoxK2cNty0J8L)lF3izMzsd1*SJ&r+4{I-`rBVmQU! zsplZ|XUOpVKb3hEO$eLfw=U{IU88855lnxS(-SCIJBXO}wDi$Om4?$F>-%XgSohd3 zoh8TorO`J0SG$Q;sFWiY)+p1&qQ8W3;HBjDC1E`ZS~=Wp4bX%z(Z}7jZq`@rJD>FE zpTHbv#69!~-Ty9qN=a{&=SCa%e75a>;R37yQeeo}`NgFz~+mPhM8 zAm2>);rD&#TyXmxC#i8O1+v$LZK=YluN?XG2yKi(!Qb*tBGCcz~RZRfFm zgG`w`5{k<&e-D+Og~`-79o8wsy+xm*4wc<3wtEGxv=@4CJr+=`JE4lYqY!+7{oL#` z>~ji-$uJR*+;uU(C(ia9aajRm6K!cyEm>TXa`6{CZv>J0B`+y8?mL`-(JB7gGE~iCTAVr-J-MZ z21d~5~EPz3ra=_>uPyhu*gur@V>lGz|97`-Ir@;I1Jv zawHV>W#!Ud#&l(uM%uV9N)m2{6V2SNG5R&whgoe9sfBS|7L%H>ukHXn&YWu@i3gyV zdf*F>{?p;j^NGl_MN-fgCURBB^WHR7swn&aYBAd`~Ds2lGCsP zl@d(>EU<8}j)&1cJ+F7`L`sz|kYk{4yum|#0v;E^-xiZ{%Ys#46go;CqM$QAW6xaNJ|(`1qF3g~6&jbcH`) z)nmz`w{A8t6GrIx_51AptL*xsKH@TN6gLsjMO`nNE$gezGQ$N)XdByzSKXvTeL`kT zd30oObFZxBVHPx7k|({<>h%}eSUU5&;WA3ukxNim((S}nA8)|MpXezTC1t*oB2MsZ zoEILJU2;L%WN{S(WU%mnSXw}9zJr*iZ2lVYV^;oXp-`5G2Qyc3S_J~vzA3p9Jj$X(U7-|~K zokS*TlXXbyxdDCyO1@oXe2}SxMZpuc`x*C{IMwzjMoynV$3!>XEHICl{Y{cT>YZ^* z@*M5S#o;tNG_$g{gT54iDW$p)c7iJ0!tc`$x`Iz*ALqW|#>%W4ioL!XO0-s=UO6wF-lM5c2R#K((< z`J5PvXpId5-%%pLh$_W>WJJjW3SS0Si5o*bCUwQc^is0~1u%SDGB~lb-}1pFB}zVk z;f@ZQ|At(XDGd)Ia=iYkyLB4YLZjWRojsXNYa2N_;uXdp{|{$h0oB&prG481MT-?$ zyp#eh?i2`4kz%DS9^8urcSxbQ7fSKqP>L3Jm!iSl9RdUo&VRUb=bQOwzHio@Su1O0 z_ZSHumlq-|#}v_<7GN z3Afm^(kD-bC-LvJCzAeHr3$QGL$P8i&nV6i&=-Q(0amfT%qd0W6Bm|a;jmC`vMQG8 z^q$!&rsZVsX^VU3iX%BxE$xhGN_|S_Ue-ZuUG0;_?B%+f!q~#pjCDI#Sj-o6$G1aT z8_-k>=%>Te4T1p-rE#y(dDL(0~mo*vMmD7y^YQXw(Z7i8< zX~fKaSf}0`mc(_DLRs~vRx`c6s_;N`>f9gpGrWhD1tjJbImKD~ngKOdc+631IgvE$ zBHrb$(V7UhUMzQa_J}A`?DIErvhDtuT*phZK&u9&s@(_|ok|hG!1DWc#``G`i3@$p zBZiJY$igs3(z?k>hAHfkgd{<#1m)9?GxLR04w22X(b3jHAg9@fHiP|j51r~H3|~EB zp~NHP9&b!DO@$;D-`pw%#p-%7{FW1yGh5KXfl^O6$=_~{Gi#YepEk6-tkc3h%{SBl%*GJqhRL^NvESYISr+g8ly3eu68uw zZj)Vi%R6Jb9u21}cptxJ&K5*G?7FrVH&-Mv8MHS!w(tLaq>D3z-$La(vDD>*Ped^a zvOYo&f>Qhx&@XwJylX{@hHSx?1}j%B7y)bw$JOPVVuMS*uPEqe+q0iI9U<%+H}(R< zMdSs4dvtp!dIj||rbupcZHudZ&-nq(0+*z!y0Vqn9$_hL3p>yB<9&am32t~H;s(+z zf8t|p(q1gdZNfM|@E(5?tur=>50j#NB8av7MK=>XyRzx#=(jri^+qbY)%zMZSQD|e zyt!^jTZG_enxFSpFm1h)J)Pn}-$J-?oPel`!0*soHuH1y&{uF7YP(^0X21&OSFDO~ z5fx&TdCHm~pdl6n=0)e*szK?y{+ArD53fVLDsSotS#j1vW8NcJEiT|~>TzTZdZezT zM=;x4!`6-buA8z~3|7Mqt3m@P8#-Fbk_TZrFB|+G*9Gs48o1yg7s(~Qv#9=f^5eP5 zQVNfQ)P;ZJKI@cX;|a3<+S#fpu%zA+3PR0ByC5E4P+y$)2G-Fyf0fx&MZKS)Rb<@V zYziDQ(^K$Lu-usPlA%f5SdGNz(Hm90V2zSgX+Sh(lIH-xNdYF8d1f7Ux#*4Rte6R5hyuU2nf1z;R`tGE?x@xmu<89Ci^SEf zE%z*wR7xwwq3Jb|?^*D#F7dDZ6*Cc+Ezg{j#A_Ol>yF6LnFI0^nNR3YH{QChWk&kV z;5s|=O_wgEh}W9*-aIf}bp|E3*$|Dzhh$sV9*>Awg;#jZ{a2*iP9e8u>_g7)MLJOr zSXr0OSwr2Vv%=*vB58zJIl9`pLx~T$w|BK?AEIbjy^7B`><&i5QTSsh0*%RJB9e8w zL0=+XCrYo}>)PaTwx~uLzQjj2V`I<_U4}n(4nU!ER|E0a*EiqhPpoQ?IH6wcx1UHS z2!1?A8!Yi<68)IZMcDk@@mGPe^Tg?Qs^}j-lb$g^;$N~mKaZs1GH0}4-quk;^*o0r z)_i61rHe6s)mu4ZL!sa7=kbe0iYLJ_dZVS4+6tU1_3jhK(BseDIHQH(1Yt2jGUt0= zdicsKOFSMM65$lu;ZR#_8EYyzyQ98cj3^zJ$&${#s|oxm4Ye0wW!Jn+55_owvfPLd z{J_TWBy}@&(Nb4|bbH8q{V35$=ukY&5*OBgcy^J273BP2V;}AXdguL-?}yeu ztvq_lB(7iMu#~%5Am5TMaF41146WNP}BlrO2O=!LrB^Fp(!0fXXg zzogoM%8cUe<0J7&b+{Q^Xs^+KJhZyn!X~RO+Z?dpC18Pm8f4wuLT^N$gsVGcM;?BO z>SCHnMRT~w_2ftVuJdJDk{HWT&$&8Z{w65h9p|ljF;NMm=$KfK_rR+9~QcjOuzXt+M);`&*hfe9O}xJTn$dIpA=1-O_`?MzHuL)0+- z?U28YyeFAPPT1_%YbJ4{|2Zs8`fR6cAf*3ZW&1;1^2doJQq~;gM+$#(b^!ipR6<(z^Uk4-_10_dWf}?bmmB@%bJtz?LS*fW^xy zME-9tUjZ0%6krf^nma`S`-a2aFGZQvX$yJ&^PBnC3;L1nK1FD32rT2*oGkMgX6@Y1 z9zu?AX4OZ(=Khq_Kg7^Xpm9#n?6*Sk;9-E4`W2C3WtLJ(2<7=ks>guNewXNl`0nhT zXzxkt)4$Q*?SEv4I1y6s!>x#gygBDLxamusZAMtjEWW*%EYtvQqy+n!KDoM@PoUKs zp1ZUDx>_0r!OK*EA3jMltp7e(|JbMBA*FwHc3^Ysg>w49pj`Cs^GgepsZx`@9_AvB z;kW2neJy_uZtN2upwPZnnss_3N{zG+cBdByW2*+$xc>br|Mk&+JidRzMAG~G&sx!c zC@(-n%}kNaKY+GZLRL);LMP=Tvp^nt^#Gz^MXSn&%3M2v(-PmCcvA0linfrv6-LrDFQqE;P0HdqPa1SuA*x`q^U5 z3h0H8BZsgJ{#_pfk0e$o32Yv#l`$ma`cH!YumAt&Q2x`6`eS0vLCpD`4wdi!ns0y2 zrN8_24ngok9bs?$KVJU_DOzW}OWlf@;eWdK|Fp8)J&7VMFoiy0yVVu{$BOfJZ{lv0 z^IzYW25WXOeU|=CjM{dt5V)+hGmlGkUz72|NqDP8^7)( z&>ov8F~4W|FQxQ9{Y^8|UG}=#0k(d}pA`WAbiDtkjcGXq9LsMNX(1@y|AYJhT|pvq zH-{xXT2pu3n)%yDy2mejq{7(=5k9|L0p@mhy z|L}Xp>;HDXMh79sau1X4y@IoUqMrYT7)BohlQ{3y>Tcuz?fin}yOocm_z%q|&zQ8 zHq-btxjFOYPU=-77VOo6C*-yCO#?2Fx$=B%fHCV+^7bjX&_;N|z8_DgtP)WRXmhlx zRM8a~{-fOD*}mXZbxY^d#$)D|stD~$t2_$Zxd@w~*}O6LTP+XnJO?0acrmeJiw(Yp z12`OAD){o{$TDFS+AfTFMswiI_1)Q4fe*7guQF$W5I0a*9QWdlydz{PJkyb9GL5?P9Lz2II}jmhHoiKMt?a3U%!mR_ zsCR+%J8>c!T(ac|q0`Ct(af**?pExgri0FB;R)PkwboVBOpFZ^4{>cRiyNF5Pm3*! z?10Q}mxv~vzcI}JIE0z|PH82i+I%&c(XYY*195Y_AVxctIAEFSd{oor#l2Yj8Zu#& zJJMF|gy46cs}k+@I+<4eWspDf%b<+UrlHZSPAQ3Md3&^w#^bUhbiIZqKxDbmdnHlh zwt$M4Ccmb(s3^)y%|!eIJitSDdAhhM-*S|w^`Nbg{l*P@%QWV?*byjx+Ijim<9D|6v2Zt} zGiq16R}NR@cGIcu$C~OSy0kAAg?P*FJxNS*wM;1pv5QC4bafQzw&mCX zv`Rn3_hPz~Tvj<%x!8=h8{Y1Ny-vn-QUrSuD$D}Kau(wSn(=WtipcsKHi?gSePbC%GgO|SwuCu~kC_~kKw&5|iH0dd4c0P%XDmL7BkRR*{TENv*# zq2tZM>8mt!q(sl6%4#=6H|2wRqhc4;rB)*5p)Q zSi;;l)cC!wN)r2&<~=4Lqd@n!W#SBjY;7wI%x8)d8eN4GNUMq3UcSA|z=%kC^-;(y z6Oy}Vp*njdurj4`&c!zBmvobdHWMua-v`c!RficP8hguvGLtZ*J>&1U!pY z3gmoV(X&b+ie_r>xB#T+Lj_|U{ld^`O)rUWHlDGmsy)lvaqV#s$Rg=8QM|k{;x}j? zu^C`Da_AXKpYWE*dCT{3#9+k>aOjy{oh4i@#oX59t%&^YW|-C)C$W~1D;=7TTaQv} zb~(+0%$_YN*8;S){j~SQ3ASCvlYVI!>vU#aCD#3~5J`a+W_)L?D#A_u#NvB@~(Pe_rwQ0OkEm=m$X0^~kjh-sSqmL{Wt}&*S~zB`3vH9;^9r zYxhM_hthjr7t1!-?HqC=dhK#OXDBbzEJy2g7IlkN9ID#7;nfqxQ$6Bh9C_quwbm}+ zh^@rCTpVeTL*o~4`f%D7`5_Ao520%`!~l>fHp%gR<-Q}>x4=li|5hxVBlQwk=duH zMOoO({GN=O57}WJYf(enbk>;bbL zuIR(qZn>ThAd^#y7pD3ekIU`S3R|Njm6fg?a}0aU>3rW#%3$C&R1zx{3!mSa8fH`L&N98$9V{LVz1Govy0{-NDu z7u4*hoLW>eN$uW)<>w1|sJp?p1$9~s+ageCNb8^_;9MBaK{&TOzZj-(Qq!=K$@fn* zYQe=d7-SPqK1Cq6y-5sPjt^2_m#ea9m%6$on%u^7SgIZw)x&ga(Yz3Af^`bkJH zV4ejm_^a{O3EU>-m&a3~qe7>@g+$uj(Hmj#P+P~kOW}1*bRQCS%Q|7GW)BO`-eTp$UuzsY+K98 zf^c{LE|%V>3|fob3`sWI7bl2~T4%)i_9&o>s0Byyo^mu_zc!HYdG+m~x+}Q!AKGNFZ1rbia(@u4`-Ri z>ws=vd;Eda+vD0$FBwe+#l%%0_Na|oU5Tu7J6iAPY_aP~aklSIVlQ@iFz-3S3a;kB ztUDRkuJbybfmc{g3;;{8y11Lj)w2moRl|(sA$3?U-;52S)^T^+bRd2y0H0!A*W*N$ zUvhx(s+;?89q1$ZCX^I?!w#x#RbvvR%pu(wL4}xiyWCxL>+1{-V(&8c z3n<~g-e>=y%OceCPO0)tA94wWDW4r{@*_6B`W$gp6(pBGXA9RZSgRCRzv1YSk3(U? z1*Z0@WhC%k0+nN6(peV*yeGN-S@-b5YGTqOlPL)pIONK=d?wJ^Nl!Ma_abu9L5N2~v16|*o3nrY zT4tp?uJ}gys~91k-sXv~K+N0(1Wb%}tNL3uiU&LPe5C6MIS((T);sA&Z^g6vA+RrL zydBNAiIq{bc48uI3afHwD*0=#w#lQ(0l_8KVu2CScxqd_n7?a-JQ!UP-jP|K6X*7~XJd z53N0Zj{@dZEuyGl{H}v{qsxw4mr}}X8C(>p1vKEgK$iBh?Uj!xy8vwk?V#7adb79G z9xTV9FtO(9rJN-2N@uu3{pqv^#3iqto_!U^*(wm1E*%&D10cBkqAHI7q7+*yiS~ZVscO!5B$$}7HU{WP z8BQ&%)xbpQ#y6n@ZLpmBsT#2${*Y$p=5V5^whmc8a;**P2WszWk)4O`-p7P8)scRi z#3aMDS}+G~TDa8OT@h=OLc%gz?c#kDP!aZ~VR!m(uPOTUcWx2p7RdrDLQ?j`Cs9P? zJUVo;8Aoa|)kqO{jzPcZZw&2^`U<}>S=vo7_-pN$tq0T*m@~>Pp6|Z zC_W?HvE6pCnOrfeNp12LW;G8#w?0jxu2P4|{X}u;KE;K9s|fL83qLFo!>IwvzumyL z1asDx$(B6C*;ra1JuRAA_5|8vT_8xLM=p#XQl+~o$}aJ|r!;Iy@i6(W&~dLdl>Lz$ zzFo_ZtNqaJb?S^~DC=y3M5}Bx5c5}iR(NJW>wk3;!aTb(RW{el-mvO{zM-z;RUx#> z-;}qwClnP&0co7lIf{fnmdZLwg7bJLyAC9FP*=u9oLzjR9-pJZ(KLu{+%~iWG z?Ti?QT=ZRE%8fPe8AzGp4}lK+Q0d1(9E+rk)u}9gF}zw;5k~`dLWe&x?=MG2Yukf* z*`~{_5qq^LUDOj==IpPb%gc#Le;d1r?aHKi<~!}e^VdqS3F1WzT;rLf!hm&xb#R(M zP;CgvOjr+I4+AU#l8k0A0doF9^I;_Z1Bv0DS-nnl0|3V-wMm7rQ|vkyh z@@>udEV;a+aBE?ytHg=@L3)y+&& zoBa=u@veh5m~G~KBmEhxwpx+3!Nr+D`sE!nj(q!S#9NdIqw2H5K9VD`C_Vsh|4QBv zr-W%fqU<5?g2=!SJGtyzw=Px7&IeU2)1+snc?y>z>X%Zzy!;=F7heOW zw7|p=&Nj7{>PIctRca^ZRaw|{QQe29j-8RI0UI5#Rc0gFE$hlD(|fcXjNJMdW+mpc z)j(^L2S=QZYJ}xcU^Xq0=qwM^o=*we|F&OCqlN(RoHI`xvDETp zV$AJKpKMYj*0m^f0|`fz@qhSMb87NvD_nd3Fz0>62zu-l%?nU!Wag-~YPI%&D4r*#8+?7mhlY*I&lq@5@i?Fudst7AH{1`7uRGd1z6H`JCje zD#yHB!ziV&br5Gm!R`TMQ~h2lwHMqTadIt55K(VMyBdqylO4AuV9@A}u@Lp#IiuC7 z5@H*rJ{*Z-e|YvL=1pkNMO_Mwpro_g#`lJ~WWCM+o8&vkP1W_m+O!ql?kzrTc>VHs z)>nEj;lb~g(X~DSraE!f{jWa(%G!2M#f1_O0q=YhVvv$pm42QpNW^0G588vBC&?d> zgZt4=WOBrK{7e9zE=mi|1*nUNPTjF%w&_E zQ>@T&RTEx@c*LYZAl0nckXD9$N9>wbz_TBeUaMdCR?;QMgkEPHtPyf@XT1mq`c{cP z>sm$ox!OVPvA5-p+N0{cS&P#HHvv#5l#~^FZt(ZvxDe9MG|Kt$ZMesYy`JZd+fxSj z`#xrRL-iRih_6~6hqHcCvPiP73YT`k7deiyoG!Q6eAKcUHll=6EZ}n`={;SpdBe8h z>i*>Rqz`kc@5tkLDX@WHAjP|rY;VT_jyKzkBF&??M%Vi*PV|pB$3J|F{{-?pyf;!^ zdI;)$8;e#Zid?wXen{K=gmkbDNQs-d*z}mYBlg_2A9z-^r<5zyw`xplehMV_izk-8 z1T9bD*aMtse#%-ZaCY7EBy)k66D-331I~#(fRD&v4yF2PMR}=|@aOEc)=-J3bk9^U zo$XfdV~T(1@*)VZo=R7b$O#|J3{Vn;+{{qg!-H~fG#&benB^K*l%EZgdic|4T zGu97W7y|_O&NI}PV*7}STM`@AnkNjNuH#WZ<{PYU6%z;_LPhZ$;uHjSWlu# zJ*}DBQUUIoPk6%1J%Hu+aX-@GT`Q^gNm6+TPW1M8Av{=MTCMX@W7#lLf!0K!Kg`vp zdMQA_FL0?X0MKkvond6$vmH-#pE68T+Znm+(+2uY48@3IpVI=(aQaN3H)WDWK|K*d z$b#IK1EAxY1O6-&x-WFH8Uxt*n0}RCA<_}JE4!7KdrNM4jJfU%Q5?4;(57p9pdF2< z*#M=B?d^>~qPmjE+P8LXc#07JGw4|s$^9=|V}{$Z=tq(w=V-ot7Zg@$^Lv-breZOz z7UK8)J`X9QzF0oZ+sR5YZ_%_YwC~rz7e3PmEJ$$($u4JcgI1A2@a0i~;@@|)s!;>( z=07Ju92z%2a?UO)>10Gb*dlGn;96(E$pd^Pm*$iC@qTX>1fzZ4a%tz4%|V7+`ogLT zBydB3QNmu6r)oA#YUMkXBt*{=J;I;pk5)7yr9s)DPrLmR3wOq8e-cdI6;pU^b*0O` zg1~Y5_!;1xfeYCTi`L&Jzsl@*F&w(&bZop6~gZa07}s*AXn5sPK8)CU1k-npOxu^{F`O# zyQ=b)L}8SzunYTyyOh=fZoLTW+YG&Q69t8|zLhg#kbQjpMi-^iN5gM%dhV~D70=qv zIYlgR(AKnLq2aZ5VtKE0ezzo_b{tW3@D*k?wJUl|%zK5=aH1BG3hEW6ke`Jx2kyv)6>uU3j%|w@yJJ>qlR5yw$ehP(4|ZL0Kg^4~Q+1Ul_E}K4JfLMP(Hob*M<7 zn92d-_3yyhsJ?TwJ0aL79t|4yAEIxL1})dj*+td{^fP~FC5PMrPb#-Z9d9#9*AgEM z*2XBCDpS+9hmMPLD4d-|o9K4oLt87%o4}b5R$nTp^8$$Fv(RcDo^#V+VM7OU)}x9l zO49c5=tIKMMYuXVcs(Y=9>iqlBONoKO-b76_mXFMOEh5O2Fpo0+dF0!l;?3gS)y4Z z@SUD|i4;Eykp3pGDt6ERAtgYOgt{E@Xt{XQbGTUhMB5qS3 z&y);&^yo08XSpL(XTkgWnCoQnV|nI_ZUW9IEZ zvc0ppK&7>AR|+mU&+in^?L0eOo9mMaxE*^rSZe#%yt{zi|HAd8-ywZxCY&Dif>1{} z*Ncr*jvw~v4(oC zex?4n^4b&o@gJIX$%r0ZvE+S|i!;Y%Y=f3Ex63u0^63q3XpFT47X>#Z*mp5LxcI&)E{AJRq_`S+z0mQOBlJ>z5Vb zY4`TzPqsZGpzOYSMLkFikCH zXIDR>;4{ej59~+Ow0H@B-_>6L!u$qV({<}Hrtc>3srk?)@Eh%m&HS9#@uHm%2B9ML zAvU?zrzDnhCm)vYYwQ#`D4nQ)KI}=3 zQUk_VZ2zK26I=Y6h}XIVr`hp)*9E}s^?X{R2a6(IhF*EP5WvdBL|yNd5z3n?t5|Nc zK=ht#O9yxRn_;%uf`c5M&2X*ok& z*1lXuD)Ex6zkv%BDv&u41=W}jrqz2Z!oJ;7g~~nuz!=0+jQ?~|N%TlzqQL{DAt95) ze@tXK!`C!U&+h6!A{|byYRlYl&zgnlq+ufqU=W+OTn75AS%N>CCYigt>jbG$;$7bE zq@dT&ON|GbiRvG`PhMtYeT*V~xwrSeyt<5Kpq1@WXkA?$%lz{<+LR$ep)BtTI@W}g z-Ie2$7~D?Y&fK0~?81B*0WsCX{vdXpT2ntStf+3<-j~T z_P4#dc){f~O_zH4AcjQ4agLp%DsAiDoA_tg`A0{nu|U;U=B~+=kB>ADFCn64%9>-4 zFz%Z}D4m3l0%#1pR5o!LM?KLoTzzE|ApWb|N%I=aK4)hh?>x_YWft$)`KA@6r^;6= zhyPMD!4#%C+~C@6@sXjdl(l|Kz=H2-KFEH6oJ7*n3pIt<;w(@wrjEoM`S^0ZyH!%< ztEq;j*39h$R&Bt%&;pG5%TP~R{)K-!5}yh5W+=kF_}5F(1re*sk{4Hxn#>YRNEc=e zdS{Te%nBy7K49yb?QL|?XgB5CO}mea-khx*mmGA_hzd-g7NDs%y+tf#JEc>K zTacd3d8hq-`-3-6vM&$SFg4rDd_V_hD{gY-ifB?||Lmjxn10YCTCR0+UHnWP$KZOG zf8nDO$!6}EFEBgsyl%J+Hh!+*_u?LRqHNp-(6Rev3lyCdnAdo(Z?e*=I?V%iF zM_q+3y-!>KE|yWGu=Ck*jIu;3(X~zN)L60=(hG%00V$P>w<_(9miy{@4D*N(i1r+0 z6ulY!(0u{;3m)@L>wif+nz;A;+5MEH&$#M>sB4RnVJBd0I1WD2Fw?^m2Tr9hQ*Jm`Bd zGN8@5z&2N#Yf-vnzRimGR(68zS0K(2^3xdY7@^!or2LXvBCmJUA@DXQ`zO?-7G@b} z*O~gFf3a=DMs@|4I0$(ED;Nu-!a6kprYxf25KBK}oQ!H9LS=CpV>7 z^+H*%u4xQ;_7G`>OhH;X6~e()+GkgVGRwxQh5Yv6u}Wf$4mk;?Z=)6GT4)V6Il82o zultXB_UiT$pZXgY8%?c!x2#?2@jy1tZ1cczsVQw5w{p~GrjH(`Lk%?@3M*PqCUl-a zK2KVL@dV+`}MMaXNn%?}=86t+(NpN3Tx`cp zyd^S`8uPN3?RFYbjPC&FR*p8lw64N?K;u=;a&BG*X~kkxXn6P7O^Di>tug?|V3~rp z`zsr<3pEnO3zo>06&!fIjy2C9dzf5NMd|pP)_}qz&L}wb;8&s5mhj#%y;6=7A%V}3 zUvrR_Cr`~&9cgQ{^_?;o4gG5az!bgywJ9irtv(hGN$swHa@JOaV(*F z;8XU7U>8Q1`xP>4;5e_nT$8L5o+1DzUm_3e$?+IiPknE*1=c-pF$h=MvB4U4r{2h^_v06{CD zmJaP$l2qsj?-3IG(~r+5to8@f2ksSJ%yT~^KVvx-*Pm(~x(s(SMhc><%UM1z>G-al zvn6`FaND{Tr%V1w^mx!QNexgq($M={V6tnZ-4f7omukO}^ZGvGNzMrsH#=WzXz(o? z`AgI_RYkkxTX1F3$O*1PVToSHGJ%?TX#%O@2w@JoG7a0U$B}^^-pD|eX|ts)OuwYG zy;4254zv5PoB37_TD28YqvSx=gm7$}{&e2L4Rx1$mh~Q``pBjNXgaZo!C1wm<__Jj zIM}P5XbNi`Q4nnNP5)8g?W-9#-J%||HgS9Huvj-1!30$>oPT?gFhxTqu;}2T9*caAB^L6jQgp~^h zfhE)1`GKlxI=l)39gl0aw&z~k#eP3;a6iFzHM778#slr^m;TOxjw??c8GMb%m%1rx zJcVz(Oe*@i@GFo6EIsduC^Q%wQ{ zx`D5L#@IygXkJ3#1)ypF)hF|bj~phwNv-S+L#pTD5MtSp7t7~WZX(m-F8Vo;Cn{N- z{T*9|#_3~qQTXv}ckrlfc!%C1eY0Ll5P8Bw}YJxDVSa zA*r`1p9u&C8&3&!9_{p56&}K?5|ETc9!xX3xltUaEpr2 zdp8>Mo~s3CW*WvvM-MOveuZ)C#+7W6KEc410E@V-?Y-y=zqRRIQ*LXLElVxh5WI&^ zrrBcd6Q#`NdKzz#9{ygL)~ZXs3%CH*oO{7ucdK%n2)gus;O`FKe zUGVt>_)($L$9Sji-QkgI^LaH)ZPk2RB&U+rr%QxHF%`tdBzyR2FCS!yxAtjBEq%!$X6anaGrdo-+9Jq~zFu<3po;k+M}j_1vW(*MY!Uk8-3< z1$Y!TR5Wr+HjKy?dW2JdS@D*;_}9*?AGZ5}|C8Ytt}D%$Mj>G;*qt~YJ2hsBk8pN; z9z4uxO8RBL63(A9wMe+y4!sp}db24sUiCWhiLQO{t<}ZhH*<`87}Rk(c5gInThO&r z9-@K*o!uMH?{+A8`*ZjUNWe?xwaIS1*)7Bo7&adAft#K& z^3)zal-~6zFq?jR+-r=ZAH*Dp6!BukrMb|3YMFe{|5AT|WB$e5d%8cAuP(kY23lz( zLqy4l=Ajoo93zHw;O{MH4=}@A1H5jWn>h?UdzndbrF83en_WL772#iA6^g|_H%>la zS$zz?jQ{NOd!K#2<^3y#*XMn7oMeuw zqRR98hg}d+90=}f*1n1>5jouwuzBW48@r{q5qw^#oBQ=I|Cfn_Apdqa^^yz6Tc#K} zGw|yI?=#Cnpn6pG1pr#uLEZ;BEi4ux(cf(JrduC`3k`iM96l}`1jV9#iMfo>e(mNB zVnuDl)LhdRo=%*k8V zV7u<`0mS|Fs$x3+Xk`k4!c1*+hSugLOvIaNm6T?>sgRwu69=OU%dI1(xIw3@F-yJd zO}Go*M~d-wISKFaX|{(P?v$)L8t4F?^`29PQW zxyef_PdTBG3!*GdSBKrCrX*mGHoT=cJ#Q>OqUD5LzTNV|`r#5}sto1OPDa-G32$6D zKJHV3oQk7b{g0C7jB%m`;JUqf2g9&sI~lK3_Dky~FW2UxeiVCCEB3v%81}}q=blEW zgoMYMU$QCsxO-g|yf!pn^xjd$Tq}|}PdQEOBV&}_R$09H7gJQfkAWj*i+G4r38L#U zl-B^jqNNn`_pu}5%Dije=#J|*(;9DY;N7XP0J#r)d1S(B^Q){3dP{G};e>i#C$ThI zC(&0=c;UTe{hTsq`(N-|V{(|UFA&A~BJ;$j<=uu3+O8*ZR`!cP;F1^y-K&v8tzY(j zFVQd6L{SS9DL;kOA4t2J?l0~G&4FX7zV8`HJdeQ_Kmmx1dcJ(clEiFo%(XV{HpzWE zFD7+1wV-Hl+)dYQEmn<~?Ib6dw+nR279uwN67d~<%_$!ctpW}lOk69wz+WKu+p6BeZ|?(1%jM_ zE-N@%SNP|3Lk37ZF3tfx(uOa3#W8$x?o_f;1{a~QXyOAb$B!~%0nB4|9O+5e`NK5YV&f!rE4qX`Qm5L$@Q-gcRx7dUdpGJt z5&;OumP;JyEedL+EqO6ve6&G&x(VoDyrZIXxSACMm(kY(ef@(Sr@BChh#P%fe)*fb-wTQxr9{sfmJt!V~!a=w9cNi=?ze+ zu6FXQCGSZ`J1*c|a@SKd(f=#dV8n3m*y;9aqN(*UYv)T&bjK)#n+v#(OLh|HsQ&O% zU(RMN(pn*Ab!~G8+(%Kw5khQW$EDO5NnzIX`XM^5=7wg^Lg7k}#0+}f;LFJud<-ES zhrVuk^A1&W%ZmO8kKG2B$#I784&r_gCk9L1OnY$^kp99SUOqbq(m|TL4{7krKR@=} zznE!+y062j>!(Py{T?Vl;)VI$!MHqwRT=Pn>7x4e#KAUPGegZmkOSy&(x{tZZ;1#! zeXOda+r>2?qF0=3|6{{1x1k%r5?`?zaRNb7-A<o#0cu8>B>o~GFO*UZv>T#-yO!+ z)SJhxWZLSK=@Fv_gzfLzRl}p2oT?j@Qvl7Mh|>!!?4=ld^C=i`lzNVLnav? zR>Ou1f;P17i|)5_EH81ZTd#R#?yoxSH0O7ZUhGfp1NOCp!(u}7itO!&%g5w} z--I8)UcpD;ZGiz0Q7_(uP6s6C5o4!N_|rnyc_;Qv)boLrvC9bg9@0R^5R?OJF-nvk zx7NMO)gk}nOy^TYW(dvLe!=r3LOJZBUpr@U{bvT}CUJQ9BKu-{1?YnOG`FZGIXcc_ zho>&y-W3>T3GEip%WJrMh|#`l>GoS@`i+cxsU)UC1;$j})!YXj3{sEqbmQvDo*q+K;c6kKkSo`Se|L_DB74GOUnO3GnG#&~mxm)C#d-fkXRJAR`rWZ(L=& ztdGk;rR)%C!2Oz!HZ^y(dS^wXYRqm*Upy)rkX#)gfV3Ldgd6 z&R3JleYLmClqTYPKGQG%1%&6ACemofCFcxyw*A*`u>WI?i_do5iAhzm6f9^uMCdI4Eg zAwA#w*&`+S6z3uJ36!R^@UjhB(J=nS-`DUM=Av4rnup%_D?F<%C1eQ&RW}z0>pv$B z9ye_0dW3&uf@oLV;wPm=p zXn}M#hN1?A{m%}eWQp$vKt8PLdq)i5^=vb3>W$adtZ=c0uz4u5VfC9rM6%I=1u!ZZ0exm%FRxVDb9WKVuXI44Kq_xBMYuucF+E( z@_qgyt#QUH(=rb681yhK%#FiqbsE0amdtUk$Jt$Vovy^n*3M*2^wP9`flSeW1q846 z?BjYq`qj{`qsC#1up%UH3PG8rJ zNx`qSk7eu>3|r%LVV1fM-ZwYQ-1THcfX8c=ob>61<2oWQ*NSI)qa0&p)^s(C6GBn=t)#C-@yr#kvI*xtcJ}8pZx*%cItR1|xAd#ws`@6K&psbiL`t`o_ z(65V3hufCLiY_RvPkF=k(d;R(nmz&y^n0`k0r5BkAR`#h$afOK;cA*#icS`?YtsMj zQSezOinop+{NfHtGW#6ySmMVsNRwugftbv9%?&mNj`sSbvn*P5n(Ar-;p5$wcbg?yeKOcmj1a!qw6%zx7r01ZjBeU(g4$Ew&U4RA zKH$s47he68Urf!>?xVs|VlPQi3DGC+i3kSSL_IY4T@w&%weOXe9jAo%9<#U6*DWBZ z4OU=oF(9~+eEgWB=2is9074x>R?q19m4b+%_PT(3Kz4-4d)*)@dELH_?6vJ~mR{Ex z(cVl012-Wdgf|wA?q0p#ijV0DDUu&NqG<+8pbTM)3J&}|P`5*LIt6Jmp1T#k?BkX@ z$HMnVQH9ymUnykG*d$qlEK0+WRK#mAKDwmKy#{^->j?}|7_(n6G|9T&PI1W%6X>$j zbelp6Z;G^9UU&eIXwI4y;VE+sJWN4moX+@fRNuDY=Po%6cH7V0)vv@)Nu01iCP+Q7 z%-7#j(!#V|0ux$%RTNrmh2dN&&%28}u^+RXIJ)rivakE8Us4 z{_L&XWwCBl(Eywry@Qh{b|Wf(p%RWx!sg|sqz;ic9N$}>X3Jt|n0=Zil|Rz@_UO&I zMcqFOT!?iM1D=bng)c+D&}^z(D+r_O$TgcW>gO`0$8G7y4VGG5$ta{E@jy@gpo17~ zxhc-ms~?}*v6wvAz%;Pmf9;!WMysMEvXy0=0%c5CYSA+NOn_rxhPkq0pGuZ=wkSd! z(p2Gtdh08W{bj}Gc%SE+3JB6==ms`3U9?E)8II z9j3*7?7M`k`+LP!zCIK`Y0s4?2^^&Vu`JI2%}+Is`4%I5u4dR!3TyIvwH{pcWkhi- zA+0xciOVB0E@N2@IBqi7)dntb>v@~Ng4KP@zOrPWC2W>d>4)Dcf6mJ)&C;kR4|t8Y zv0i?BcT(`}{a@Bzr}=%Gtw(5r*w=+$n!Y$?%w5w_fd$Dl8taCzE#{bDPQm|dPW&0+ zdQ7M>%#OHBLszwE$!-*~Z|{}oqB_6sU%vJGp-aB38kSZ#o| zG2ai0GDe5f*}b-Dq8=`UB+sU;Ngf?W^J>xJ!QRix1|G5zl6gB?a^8Qg1B){SHT7|C zBpVb>nH6`N`(J$`0ev?PSNpxf#0@$qwSyapcWwC&DUF-^d(9m0Cb|De@C^GM-x{cc_AeoFRd`T@=43$5@!ERRNz47nerv3*syN_0!?JGuXa4Z+ zRBVz>?W?+1=iyyXTg&MM2qje12}X=)4z(le*!6I(WcG${Fy-iiTGFN?*{T-yL5 z2jx8q>UlPj5NF4KxcvXvd+V?$-*0bJ1PMt2acC6*m2QNg1*8NFx{+?lAw^nZl+FR9 zRJv>E7<%aLMq(I-bOg@BzTW-r-+9meUVFcPpZ(u+&BY8ecdUD@Ppn%cJBA_(x;m`i zt8ht$bMHq)>$Fgwmf63LWFTMaEl02mbem6;F%-FdxWt)~LV;WjKePE~RL|@-$u^Hh zUO{M@!>s@^%<{MUYU&T(Dz|-(FipRk?E@09Y05ynEq(tu8rp-^> zFpY^c2O&7#qc?1)Y4Ov}og8eVu8c`Cr8%T!Uc}PJ@HG%hR$cNo?>> zJ+`V9zF;Zv`&QKJxq#lT2etjJSAgvOpo`8X!YcaSrlKX>tec&iN>wzwG*pGB1#E~c z>70UYk;?YJi#=Zi3wSx_trwpn6sVU{*c16_?IAv*nShOhQ%%e~()12J%=f6*&BtTI zMc`L26}exRWJxAbl>DkM@7_bf_up1@M$jkrPWUXar9Azp;mLAm6R)fkw^60HK^Xw1 zC4lx0wt5@|<8@n0zQgq)g}#u~p3na07M)uZ0FV;i4w&qF{P;gI{QHC?^unC>7A1;& zuO2OKV^K{>Zlao;%iTu_+D1yNUXD%&Pc5q6;N*J%towh;(N{hGgb200`U3vaiF?ZY zlIgu(M(zf|qX2`|HOHn4%O!@%8X5cG!)Zb$@x#7V7ze1+sDSn5m#`rj^-!y5TUOy3 z$RJoeUg;b9v(Luzw1_f^$TYN;HtA==1RWU(T)Ct$ z%T=Es99)@Po~V zp=E(0{3P4b`5jwn(;wRgpLQa)S5xfPx?A;~=RaTZ^7h3Qol#qTtBryp1vn$mMC_Vq zqj*)(hsI2g?Q_(Gl_I@@LZIzG1|<5<#ak>t6nS*GPvz}Y1H-Aoi~}+F37vZyFixik zf6-?g#0}uoCgYE8sJGy@sTc-r_woAM34$uOoSs91S)TLX6Z-4s;1x>KCxLrxL zR^1|gv6Uc-vqyikH;?Aghmm70n3?y0xYJWiUtmEJ*i<&fCwj-!f~&uy1NR$}J<~V# z18~eP<;_R}q<#dBa>Jua{$^U)zb5z;PA!)ng%XF`jT?zdpoGEN>BmKNqspdC#06t{ zfHj&Cj9x#Qa_J@t*!_^*g(eY*Ng#f)Ae(*dsK5mI9N5oXvnHZ)9UkKTOQAX9VDK*n z$qXhkQq{j}Xve8ZLUvrh;@ql|Zl)PNpR6cI;#Yn4ZFGj1n#j=}vW{IJfA_K@ zr1Xoim2KSsSlB$#T=QpyL;;jbaYf8hF|(D-^6dAo!s0~EpKrmw(;Pf442Od2i=l}I zwm+W?0&QPoK{ZtqLeGuwf!-4KI9sUJk2Sph_ZamLF&{V!K+!~hr|>fkZ>n13LBsA) z4rae|(^PyUHFNSA5!iNF$P9o#mD&L}W$13H`j~_h69`myVAD;A(Fjbp>6~Ghe zuBS7w+)Y-6hv`fGW#^PC!8hgafGte>>wB&NWh()LHVUhBnw%^db&31)(myvNkuPqM zOMvBi+9NqBxf>4K)8AdBnU%`umWs}>&=}g6yS)X}Ivr0%Y*$`10u9c_+|YNkGme5i z+wM4tMT6;K>HudbFv>oT|_zd@3xhg6a47k%SpWsmtPzraGGBEir2MY3#C&W62Yk|rTb)hYyaLSbr%d|eO z=ng{Wsu)OKAoFLdIJg_E*?vZdzu}pR_HJ|1U|gsQ!{hGSi`d=cT^|0&M^(*HnOsqF zU^gi0%wox*hA3zte1{hE?x^}ldV)(XDe9+dF` z_zyu&R!HlVaL;u!B)paCHtywA@;xOU41e2lBQZ>~q!z~-f8`re63gk!5c0v4K4@aj zp+%xlsfRvshC-fR^pS}IBvFzEmcd_hHgmZETNf)3S-Pg(j-eixRqopjc%#nAut+Z0%0PHi|)z8js$$1-_HMaV4?^dlF2G19bE!0&QS7VjuM5@fkjCw+WcjY2CPi| zXlmQAMC|Fj&p2_%^s7L3I`aZh3!XQe$yX4a$*rIdP0uMP!+2R8pU9DCwGA&b zTMB-5!3C56k)eT5DOfP~#i;E0F@H`1&>g%{P`|HY>!2}AfUA>y!*HOz`zvDY^lS+# zsVsYMaH`VE7KAPjc{dA4;YYz}y>cMBgpCb}vdq-{y~`0CHb6y!PtDVE{3hA8r2aNO zBgp7sIts8rs7zjX)Meu>d@`jLVCqpUg+*O?~YkHcBK!?I&=^k$lA{YIinyITFoT?GkT0y5Rg z7psRIaaJ^IG^6ug{aiTYd{EugpM!-INW}PC^^AO}R(Q3E$%?Fzmx*q37fb&Y0Rx5b zv~x$R&~+v)%Zkh$@|PALryqsk$=*X3nV-9-(K@tUXXD_u>zw+e0Dv$tXPI zai~qzl#!72%D(eJ?`cs>8-DLf@Mv4zLmCpgt1{AH={_p39fRw*X2cftX&gdqw?zES zoA&0S>TWulZ6@_kresX-t{hF@P{5Tb^uk+omTNLXih$|b=%jj0*q>&K$ffBE!|@|A zKJV{)U>WY(BST;zl5};|L*jH89>fm%@a0(nNm{C6Dj-m)wh84}_bE%T^CtGJs-s*Dqiv0E(-eiA?MlcbFrOF^j5KL)Lg2wbL3F8x1I>N&CYv)QPEWOsU zEk4mUr~;*PuYg1gtqH)Xy;YZdG4E*PQc;mcOSZz*P5SPzxx<(fECt%ClTm;}`HSpU zU{B?yN(`yi?!&aNDvr`==}f~XV51}o@kwQ`(LyWlpK(kXxWE666XqA(euE4NhP4t( zSr7KA)c>cs@c;Ru^dR2VIc{at?{yLSSDqL9wMzgjPI;PB3K?h>P=GCmb!&0s%eQmIA6@a!GZKM$7!s?F-OR7d+1)Ut~ z_JL>SO%n4knS^(8b;J!r((e8e8sS|ba>6qTta)?O?j~K!oxBd<4kzI^lZX1Q)f1FXLJ$NK8|s&@{z=LJPV?oTEcPlWgbJejOV3SKcy5IpZ!N2 z^PlsFytDTJMEFIVo%dY3zFY2qFoJ<=!z+g+GTRmU7>^C+wL09S2<6R#{BB)C*a;ub zr^xv%vs%IcaY~fH5J(cJ5I=1td6~56?Tx7ls3rTrAnkZms$YYNy$4AigXT-M4xMFR z5Rzg}^_@a$_e%8wFMu2@79>hR_oGK6fA%k!G5ceKZv7SSS}2fisXzb{}uHJEF>Fm$8tog_P`N00O%uDSNaF*qpykq;nePcBx0GJkc z#vA=(UGyJ$&HvT_0S{Tv@IKUyQb+XxLGxc<|1Y1jv~GTPLh|T8Rkr^+9{>6*7y@XC zQ$K5_{OgPVoo8TBLpiD`-+KQy&iQ|>O#<^NfU$YY`<&|k_KkWskeHU4GS7dtQ~r+$ z9j3lHeeNn){0mg@-m2^63BBjQ@OG{{Qb}{7RV&Am|;k#60O&lYo*~ z`UKB1)9;iQ$ok#~e0t-dcRS<4D2C|d7|mO}75GWME308Gn>LFKiC~A-IjDc1 zzd7)hXL^xiovU?;B%w!?>llk!cp zF{)=3l{B7@s&p8h%~Kut9JV?CNwE@sH$8Vy$KvOBik(}c>nmg-4-E;hs-5%4#&9aD zZ8>f>743Asf8uB3o%=Zvdj@sPjkphR-ZO~3 z)&KMn!syp)k>vh#?Y*`>7Z)*7TI=}WLxkJB(?Vp7xv|IQ=?|$!PZX1p*TshO2y4=l zCENIo*jYliO<_w1?eF6^If@1&4t_OznterT)F^wh$>((to4s3Z))KnrDA~G)!woNz;u?fJV#MJUW+T z+Z|u(%%E=KI5mo=vtmO_KMzTLxgHabLoSsbztK#8|JW`=L10?WJUI3&E_XoUJyHNWk>?Wt0rsy4ogrEUO#q`*+?>#NJ% z2Ery|ma=G(MXzULM8y~`+G--|33o6`MUiwXL0gyg>uK$hf8p+D&4W6#53!o(SV1K> z#+)x_QI<>n4NRY6gHmX@Kd#JUkm{K}Jgw7fQEP=cT_4|hvmZ2FU-6n# zfnN;*O!51r#^|D@Rb_=)7IC1gtLu)^dER?IdD=N_k)F7f*FrnEZ`NHm(LyFo7Jhbt@FIWc!d7M$hag$q-r`%9@M zj9}i@A}~*XFx6{O%1F(f1=^p7IWzbRkwXx<-C`-b`qY~~9T}P`evM}UE!VPNYs{q^ zHDV*~L+={Z{G!1NV1$KP4&!N`jCEsy8EuEZ_ck$k2NEikwAp$UFMS^~c z2gh5{L=+jiiMwO$>3)k4mJ|1dUF8-=Pr&#zA~6}=?bpoBxNVw1WIFa*#PIV))R;PI zppLk|<_e!a7c0EzHs(D0vmn1kaUL0 zlhpNJ*98R*8#cWiirX2NMlmHP$Il8f5}$X~p^bgf@*qF~0h-P_D?RlNKde}H5U>PA zldayxMQM*W7KL;H@WI0Ki(#G?`;*3PM9rK-5?%3EO90DqI04^hM0k*>{hj3%c6fQH zaM)uG4Y}|>RZVb*Tx7?CH>b@zl-M)VYxU<|X2}so#hmDbeXF1DB&%%tH8K~`nOrFZYp2L9h#+3g}ee)wsS^}RL&4>1u*g{1icw4WJ01(yaITMIC zOYX}wZQt-=NUbWIv!6h2?&koZEjz}y_hne~oc-HG1MIJdWu>%SSf2|3^9;V8ch?r3 zg-1%7F{yqfu)iJE5AbS>J8n5%ZXd~6QB7FKy0z1}rn(Hv`b|AM))QQq{TDZU9(fo9 zR%zwqmb4UbEy3W?^QhN?AIAzRamKK~1F341gc#87JO1Q;g{McQh5dhnA({dRq)ruSB_#Z>XSId6lC8JvQ?`s`I}k3JJe z2UB%(-pU!7@Hq8(AvQVY)gLawIq$X>y<&o8idG&)fa5PuN(0osz;873ih~XZKe$_7 zc6;r+a|JmlY~DcJhk7S1*OxpD-iK}PGrR$o(k6jSo>SnfqrD=O&FiOlza`ofZ|w-d z91!f(a>3^Nx0gL4kVN@y!0T6)le2bV^_!_2MH8!9GtjX&cw(Phw{@WIc!(NNE1|Re z#XpsaN3g%BfG{Qr@8Eie?!WysWGC~kLq9hTHN_l+&ay`Dc$^Ab<{jC!{9E-U?MgUo z?5zGR&TTk%ZAc?@bC}^9u@S_m{A&mI+FWq(4X3LrzF&pohubd1{Bb)wy8zN1j}T>l zkL5q@YgqB#jTZE9u7@yNJl>mkMKbE3>}Bf=0^NhrdrCB7b83oh-YhX&2tH{kG=!oX8Eu_#XY{ zdki~JJIAyTJP-VT7thl7@xV2_N&$a%3JymUncus_$KGfMT&7jqtLhcwZdg~h@)Tu~ zs+#Hni^^VS?cJm4*1$ko4anyQ$?Pz%$6)I(V9)aTDQa%>@1xJ4(3=09_}u(3>3cZs zzGj~efe^#GVxgpn4QLCYxr<3*k4AC{_6h_MXfG5zX_0?mC^%s07e+p)(iu1T`Wi65 zM83{!^eZawzKh{0lfH4K;hG*3dSWjDLLxpfPz%mpS4%({Wigj8jC|Mw0B)2wcxsW= zwJB%WVLLIwAI9})&ulZ-rp>V{Ci(a3+#1-= zxVm4jBxe@vGqcW+DSi7vlvLk`c^8D*ZyL6=Vb1{Uph$;+@XpkW4My=4Ncq;q)e3_7 zts$Ow1Z$$s;{ia@(<=c1W_Qfbg$b@du4Krva2wT(SzkCi<7#FPo^{-;9B}vCpA~wh zmo9RatDY&TbouA_t@?dQZM0DNkZa@ks0usN6?5%ypVMrkap^MpUdMgB960%^XT8vL0vTZoJGx>sY>%LkK{swWwZh%f&NNrjWss;DQrLA zS!TFTt_Ot^*Vb~8^m2(T8R+%8Ffc=c14*3XM}SM?=D!V^lJ*1k5{Yzc(wL${nFW&$ z(l~{Mms&e5mQ(WZrY)EccUsl2rOu?BM`9f$0>P$}Pg!CF6H_e8GkgH?LsoVWFXpJl zgH-VDkD^{E|LW#lGK+S_NT0Ay?slCs+S0w8BOY6mvnqvURy_QF?*%~2{6I3+_jvi@ zSJyavV#x5+g=NV*IP?~&uEFM5&zf)NeB))!oNZqT(}7S%7Av3Q&d#=Ti?t5l_9yo; z5yD@+l}6nVB+*um%qMz?tXvajNPGq2;{jN&YZLkeq4h3CLkAFbA4ePeRoT?t=eA{| zUv-KsCXWjeVJVmLqqK{q3cm^Hf(E?v(Mq^q#Ufsh0c|fc=h`?|%l1C@v!Vo$gBrVn zU_0b_)7kvnG1s^8o*v>EF|0`j{G4VPwv)ycVjK5%eOG=p>ILd%-q(6XH{kZg##zb{ z*$HT&j2&q5cHkYagM5wN()nIaX!qV?tfc+T2sW5MI`T%{-+ij}-0rXB3XC7BE}&y6 zb7+`4xTqV9dRqP_GPtU+q`@{B5U0e&P%a6J{k2$%`woBZPJEZg$3!IF*A=1Y1HuGTD?lyG6<#r+T9r^H(ue|_!Fxuq-^3mUI6 znXSw2oQG^tibtxcl)tWn$V0=Jy`R>`AziBH`}KBb>I{meynVS&1TQ+QaNcGO0yR|c ziN6U-Ed6;%MemxL|DY{THkK=5I8iha)cA*Q*_ z&!}eKGBu1ODCTBP&HDS3PQc|DBa7>9b}RXmIWWobO65mHP35L(OsX zm&|1_G3`6HV*+dQ5e0>Y6yjZluDPsA9plKeNa3X*ttpohy%3Lk`^b>uCUnCD@=VV- zL{hH0-L=OrSM}-sw)UvM15Zws_neo*d(;R`0he3`I@&L2n|ws;Sdt$5Oh8ukd{2aP zb4Wn$g=y5VR8~O$Wr<>Uus5gU1MdI^EqHB{8bqRYqNVDQ*Cf$~|Wk_z&&}eNi*)_I)lk98QEoFA>BjRdo|H2U(kQz4dOpiotwSD;Y z-z`7|Z4pz8*bQc1oq1ekre)#|r1X<+NH8>j$^G5*hcUId%;4OG`APF839-_E8G$5i zN0N*~{NI=4NUF8?O8q}2$4z|mvv!$&qrDHeqlq?rwpYRmwoc9Yy2CRU?(fselLz{5 z&C-+{`v@<1nM>vVvXGe4eBYgDT{$SC9=GV=H~)C0NxYxr$?%2MlDHczD&xRZsV*Ss!2}D2UUq^`)Upm z?mKQp*ZXD&i@P(6f!R1*0)CCiVbOXX41q0fDQ>@{#14+dTotL7JC<>8!F%jp_8Ih@ z{(67cXystM)YvRgNtJhrCW$#uR;o}MBZhWqs5sdfFj+Cc>IjIYv%eeXNnJH#FWke# zzeFph935)b?Ih~!3Avqb^L+TC`M_x$jUE)1RFYeBSGVIlFqvxchzw!E&E}Xhk%#TU z`?;0o1)q0&OeszJhzyD<54I8N^+x#+`amL06lk;{v#y0zy+oDRW0zRk+J z2}42m`iF!M16duv3HFI%(|JFsY!P(nX)Sv$FI~exV2iU06qm#Brb*;nZ{t^XlG~0B zHWi_GKKhEL4+_aC=yW1$Nq%%f-15%~7ykrO5O5U3KNg-1mwxLe!w3GV;8)Q~klE?W zNKDLk5e_#vcO}*n-c(tdB2+Uar>~1kTliZqWauzY+u$4M(*~o8g!kZt_JfA5bx_eh zPmP8!m71WY5lydjUky+u{VE!g;GKOHRJH9R%MW|DnmK8kE|arFs=df7*Hu06y+IX! z#^fR6(_k@Ax!tG+!9ITdsjlv4we$WyVy&$YDVIWuct7>~IpZen`BQI!eQNa)8AIHk zzppv_WZEToqh18z;uhm>RG1)PA&f*Mibmy5S^YKE!&gE7e?n4s7dAvyW{y_9t;)XUZp8H2^`!nI%RNh+E4#pL+nuSR;~B_dWmFz+SbqQ%aVeoSJ<)^TJra?o(>uw z?+Z)KZBDDR|E@s3Y~a}y03Oj0bpE$V9>AAcrh0$nWYzO2_O(UdMZg2BrB^x+x zaYw>Z-VDy24Up;0lHm6Rdn`-lTgzKho$-d)7q?@5b=Fh8g#SGMk;%zi zDNQJ^9Z3lKotEboQD1kQ+Bf%diBjE&Ow-xWMt%B2Eo`Fu&M0e|pyrPie{rp5E2?Y_ zDWGKw@h)TE=QngYmqnd^jMSenDSR@VGoE38I5?^_>22188W-0V(|!A%nZ_p0*t$S$ zIE+4^xRtwYXTPkwr!QDFk3fz>2Dv3ot}i>J1r$o;SFN)4>NM+_`cEr|D*t?~!MQtK zG@RdEm*AOQ#60{eO++8|`0|$Wyj~06#CWE!$k2=Hzg2^&7On2)>*Z+^(YpAckMs1j zg&zo}Do_+bZxPdHf~D7OKCi`#>=Qq*?0b+nl$KrHYxm2T`#s0O`6Nw!w8uy!h~UPD zUnuM(fl?G++C!aMn_D7(Eb-wQ7{&C?91Fn(w zuWF7@!evRBUXE3wCV7SUb9wG%jL6EU5v4ra@C)!rxvVjA`14T21DSuav>nc&7JgDy z9sg*O^b!fT;r&_jszo}@R9_&f_3#mg`0SZ`%iU~~9p*g?bD`>$==mI zgMcx$2&N`Zt?94AdhhQ~p)fYxv5z`_MxIn4 z2C;hXpO(>Cp=wz{s{o>+MsthTEc)qu%t<~9^)QkoNh~t2EY|p*U}&{i4w5FLFi`B@Z9?i`l#SM>1 zFwI##0|I)1jC-|}uRDpjGR7XmChU!oQQTf4XuWq&#@%&v-{ym7x{ESPD`|5f%I50G zth*;>#=F*&&v|o7c(Aaq>`Rottwc(SPQjqb`sM5n0zp50_=kXb8Mt67WRxTkU2kiM z2%@YM2fkDfl=rs#>+ zYWws3h|!qWMc(=4yN0!Mm~)nxR^^9+H-C2NVfZM2K-(Ijn;mBL$1#%{iWJ+bw& zj&`LNK`k@jxrU}>3F8@*r7=A|fyLeaiVDF7C+@cjmvi{l7~p%P`M6cmN<5gj9+=(2 zYE1fX(q?6JnD|?LwQZiLju(KHhsTm^Xn+a7pi?;nZF_4ct90d}S{{IwyDq-FwI?UE zct>kl+Zk9JS@y-74mgUnl6(UF;Gz1X4SY)m#ktK_X^`#3;0g$zs9Iw~(PB1dmFY9j z$~;{MFpqZ52@{fW={q>*{t%9O{P~2cstFC>S$z-SYues5w02<14C$>o{i<2alw%ww zD~hzfV^qbFoD$A)0Y9~@oP+NdGn+gNlzgqr{MBx~@l}KOiFDbA$HabrzF!ge(t^eF zO+LP^oZFAj13vMH+Xgr7X`8s-Y(w<)$W#I5-*6oPqPmg0p;sU|{uC;5YDcdKN8o~z z&aEz7>3M=&W|!=0b0{ya7x)=(8S<=9HR+0c%nmd7GC4P71rPr#=vya~9GZXiQUd=s zTJ^I_Gci;3OgA%bSvfCGYqVA!Ta@hKX&|4f{S)XtpK9+Kz5eBm>=qgE>$U4E5d#wi zDFcp6ZW4M5opjT)haB4_=9f}N4C=Mb&lat>2~6=m51+=!wOV3qCQ&+y;W5u?++J>qrfDZNgNcqpWlP2gL&`m1X|IM^BcABF81cGJzjaS z-1JByF8kWTy?dsc50?CLPS9V9OyYbw_?c0-!H>*z*XnM=Xvuzm$3#$ZYoJw(SZu`8 z@)4R7S#U&{`QaoblpqfFhc*mP^#|H2wwd3%SMLt0om-VNF^UdP0ahYn8}C!a&5Fwj zlnxtPV8wS$ny60sk9@mLmQcPf0 zbe`u;6$O5#*Q0w^=L)n}OkgwVvYX(j6SM>}R=IIi&<&v(gEo zto1ABCG?mlVt;}Rb}DepG5=C6RPLfFiy1Ik9{n+Vju|3=j40)nbPIt?n(CK~EMfP= z1iXx0O!HR*ClB+$geTLC`DH-QYC#-F@(A347q2>eG}i;$FazAQpV}V|hs20v8(R;* z6{en&l$KslA#f^ZaFX=*^@zCLF76ip4P~QAi%s!-^5FN>x%k{u6=dxqE8}t<*}A^W zbfS!9)9WCZ4BmMs(pqB67V2Z9UVrXUe@MFC0P*~RS#5}>r3sKaZ2g3YeC(2xpp+4< zZ|KAA3`M~pUNBSzt@zaeuXs3W8+mHaR`NS%%bol!Sr_ht{MPY)^kii;6KdnB9=~*k zyPUFk_@VST*0Ds<1)sUG_xJWNQZRx63!DR!akB!mCB5o z=dxHz&+`82fx8&rANrcFySnEWVTzAxepD;2&Ab88XKf$Xcl{)p^5L(b6hbe=xo|wJLKp&|1N7`cAvrPU<>V@4r_T$RLUSPxVxqdh1s6s&nT%`YD^=d3b~ zcmy0~hf$(>z>3FGi4`8f#Ao9t5Z>8V@M{DWHLrn5)S$ch+)Q=Sj%iPljNdF9p@FbS zX-9cX@IIKppX(*H&IC-PMUmI4y+8U?_082CdwYDAZ2QJYUy5T(zd>F$j$gJ^{v8CGz2s^jDC&P$~-gz>|seRS&)oT7fx zark5~u`A8CQ8Hr0wCp6N^YHOioyH5w$Dd`eI%Y|4(WhRF_#~F~!y38>lCRdeU7ra+ znS+CT*wm06C3YBy-KKhIxs#^*(Oi#p!qY8+FZaps2l9m=W516q&sBz5SZTfCF8XfX zwmTqa$+vU%C6q5H0IG-Kuk%{;~P%~EW@xBX^{@K#fzHX1l z@9lM7fdvbF0g$?Ah)Wotj-HNV^pcI(*uO(fMxw!|p&u=%hZOeq z)um0zzAgTs+2N_m_IY>ghMN9;me@MUsIkh$z0=1*b&HGU&iyF*In1n(1H=I&Tv}--oZU>1%7cw&cB1a>nTMpyd?bmsXiP;miG_wp zVouBpHv4hPv~=M+cN?xIwcQ>iHV~iP_Cz?sH|8pFZ>jEu&vC#?U>23LUNf#!`Y1!w zzO}l`&}?#3ZL{p=tU;yBT-C#L+S;|c0FvR(&s&c&i=_4D{l=V%6}rd0IeEyy4YY7_ zG1SXv4qq-rJ1z99ApOU<(f;PER=3EF7mZEC70!06J4?>oe0dJ4P*C~`G~@Tn!A1?J z2J`xkdBr1K&~^lY#_X>J7~}kUDsNHaqNOI*Z>f7$)3I(e>MV!G(P2iRD33vz<3k?3gOslF@3#Wbyfzq5G z=>kKd<_(3`3?J;q)aofD$9GAWx+$HW_RqacsMc#&9dR7|Eh-`5pdP^NORoYJ;Br(@ zciiqgm)o;oeXYOiEdbj%Pd~=3xKux7cA#RLuj8hlcsz#)$CzjIx<)Y%TXAFT6C{QL z1c=62eH@ocEVftaUkSddQNM}JIAUOI`%c1 zz(;YNx}7Fp5Na4%U!99Wa!*y!ZdKD2f580M6baf#N7>T(=>jeCR^-$SUdAhPgH8pX zlTca$-7Bb)%=i@XzbDVEk7~_J1Qt2>s8rBlK z4~Yq@JQRNw$%lhhTYHV`C}HSjxxL=NyPT|D`|UbkxHYa=|FX_D$?d96{qM>fq@iFp z!WCK?g}=KgkmtiGpvT@9Li-Sn-I}urUuzge%BN511!J(~;70VVqOA^l3NbxyKr+|D z!NmhDxrcfpDb2tfrtdE%&h0i}klHqi`DN96tB@FZCo~;hi#%IFe{phV8D%NtN(71A@6s!UjI5Y*!BL~Ip{{9A}K zO7W%dE3RVw+IX-}S9PUY9Cqp7!hp-BSVTpeXy zslc(`d-lX1U&A&>nwygPCoq$vW~wxpeTTjw9T4cy48U4Y*e`augYMPsP|A5_L#49U5b=Ydg_wN~> z<#=KNG%x^s@%gMWe%KKcl@PYnM9Dqtl1CpSw_C;v*g-?-Ld*k{K4kpQOe6om2b6o`eH=9 zgIycY;mlJ5tbYpaTP842f^TacW$HAK4gW3xcTm!z`cDPGYWh6dF;TCF5eQSiRaXHb zezJ(Q#kbZz*mZd~HF^9$Rpna3rm}Wq$UNlAVIyulaHUgB*sbS6GnC<_svV8Pn_Y!yIZZ%K49tCaND(>QT=r+XLYC z%8sQ^DtEvnICFfh;R&=ZdUOnRk8))KO;~$cZT21yj|mKG?MFFE?cw3?oa1wvr0bU? zg%&)&dQOxbWWaVX+NB-GisTkxqJWX&7x6OnD04AsRUkHL$3Ki6(z7U>iM1C^>aBab zj(I23Hn(Lea2c8U3g>QB%2Bh0yJjDI*x&=_)+B3CTsa>R?AB^qqo@>&eKo|$3*Cj@ zOr4xtgH655O6+vAC9}QI+GK6$=?()tyLF-g>PRc?iY3a62T74&l7+f4#wyQ})UVukjzdI9Rglwyf%+|g_wEie%Xq4UHIT9FUXpIe@q*7uKOuQ!hZ zy5~Z8>$s2QB&x!kFc!X~(J@M1FnN`s4etf7B3m)yZ`(&DCq8}agFyGvgN;rJ%K<>0 zvGZstMHA0q$UEeyi^-F_dHegk3;z++J=!Y(B18q;*hNx~qX^F&?qy^hgvWfh+on-V zzv`1M-RwQzoR|+OuXt=a-x6QTK`JsWVjUrM7jV;2%z_{(I0DAI|EpCgw0` z|3x9T!(~QyM>Z{x{_z?b%T!QmA>pruIp(fj&#T&3G}${9PZ)X^ajTmtAvaYp;pm6w zHI?w(k5|^|e69P;7`^8>V9ie1fpvb!fb2vke@neEQo!I5%yVX0cF%O3%kE`7(Ud|* zpZOchu|Lbt=YU2PB|Sa){aA^xdSbUc6?lsTxcL0S%tA7h%)lvycdUn08+5qVtSy%R zRO;wCRUIOT&lhem`?+z8k!KD2I;c{0`KyT6tX32Tc3J0p=-P3gdHLkhZT3(h(3u%k z*7ilj-h&7CE~w`|t=&1=qJQYhKVgEL#Gs3mMT~u-d5kKRFQeZ#rJ*=>i!G|1j?Vy% z=~nq|MmOhnE9G9#066})eEY+9;6s-p+K7FJpr^qx4PCHEWu(4lv&Ee0;4NB!`))9qOICSH5-RAF~U0gB{^bBeV3 zy&94e+d4<^&D_H1v<}$#mdLlKjm0I!5>d2YN(x6!9->E^nnou`gbZkeI>rG}#yhN< zt6a?a`KbX9R%_}FnQTto;m{WChecZ#xaWf3*%R6&r&)Q6OG0Gn2y6H4zxQGzo$f#S z=;gnLy0x{eA5nhIbt7gkCmbqgROC z0mq3zC+!Z~V4`I;x?n@Jt7&YM%l#|v`g67Q_*__bBi7BPjV66}6615(IR z+Ws3Ff%$$d)e$I5(YYg`?m8~1W<=O2Z;J($eqz6yGNO`Dozs(9W1X`g>TiR}aT z=q;Sw@o%;TdFFmANfKz>upZDyu&-2{peD_>-U{qhfu@Oa+|Zb zIJ)K`W_mKClR_z&18#umy%8}yqg%cqh*yrq1UMBGHS_ACxUxYk_k|bK~kEuqfvb?w1kymKP{(OnGFqk?>Mp-RcKG;-^DGX{Hf z_-Y)BnhiYwuU5zHesa(2x4ub^-@*5{l$?E|!kc%WG$qVQ@;zH4rJIP)%thSjqSX$1 zZFD!XYqJp-Tj9*g%)O*je=!ED9ojncpH*v=9eOOe5@A{QB1E@UWl#tYeIKl?yX*`T z#>MWAy6t+sLU8xj<){drba~A4esBpxntME85j227h)7QBQeKp;Ab4s=o{dI&VCp06$oDd%Md|HAty`O~oqjN*T`f%)(!4D>Hoo{NUzs;(3RB3BJ zJ^=4B{|t=m7&vz9qT_9Wjpdl>edL$$Vy40HzjD@fZwoC3a?gZb8qoeZs8JZ!^ZYg8 z>=@$w2-bO9_q6#A*yp=VSBU)5H6xqCo#wHsAO-(Z*x%6D3X=Jn> z-9%0~4=YgDO@+r}MV4S|Y13&W1buVRnFQqD@$&=vbcpS+cPE zb}|7iwcLUP3OvY$G`@soS;hHvf11@N^5Smz{b4=Bb4CA`y}mx-#xMn*z<9Ru!lv*k z`W~;%8`AAX?OSg-_`6IAztGJu?H&()q!j&HF39-aIya-Ddjg6teN!^kaandfjZ)kN zX(m@2hmdmTJ?SnPA!E+0S!zw=bo>`Kv3|;YX>76hh`jlT9ONZgo^UOe8r$!+Ukax> zJ<(kolAlM~IjRld{MM}$p<#>Od+o%zQb z=oOKBRYKpTvp0^H<|_?%|Nk(n5u(S+hMXAS-DrYf*Lc0Su#sLx78D)!W2`rs<4*|$ zwEy}dyvu@oGs|>%Nuf5KqN1^MT|u8FNrxO48ixOh4Hp)qm7>eUzxB#>(LgaLx@g<) zbs^R!{OW1J$@p~QX>3jfJ=c)8&IDiTWr?5I_AQp_#6}nXkY8P~5&l6F^_(QKQs`vY z9HQ*q0E`;egj``V!rwFZ-g!;Li^cphP7dz;7>kgEGk0!lVq82<9$rehdoqHQZOxT` z7=i4wULsmn?OqlOf|M#H4Eg=mXCT8!^q3)(iFKJ!4za>tlr=CdP-yBKlxM*ZFLiB^ z3xDTK5uv4!JXE+H^^1P%7mwKKc4cyT!MdLDP&r zfw8CePBF@9haY~r-cIqScW)1~p=yXmys;&R?E6wsR<8AvMnt!qg-7~@K)gC?3qpKf zE?zXCj4rmWr(@D;9{SxQ0(S{<<~L)8#?aKk zd>({baWUPqeP>EZg<-*q-J#U!3k& z+90@;^`5cw)``j=dy`2DoaD9i-6>>jna9lMUv0kfGtZS+p=~cnl4nE_&T-URcAdzz zxWDgPgwE8_}0PN#>6MmyNn=h$Cd7o5-PsiCM6eJ8{YU*YC_K9!2ENIV=a{}~h`aWy9?)`du=V-m^- z6&Vq>WdlUe?A07+KV=YTr>NFlEjyTCZtAFdx-#;M9+MEy)T5;7G<;g^9CG77Y~M=k z$h~Yt7+pzhVYMe)nhk3*1=bpS%?Il+FaH?fxa%tmLYQ3opM~_|!h@RHhc0MA_71W~ z>XifZ7MNMnxA;8@kLDKAs5@>8pYVpXl)kysRwbDD1{rkswh34?vC?ZGSj^uO4tdI} z*&@vIjang@HyzZt0B?Dd|c-hMeye zK2EIYd|gyGaP(J*E-04KU1HZ16%2lcsS?yi9N0$0Dd~*WPaSCMgoFEYxx$mZevZ~= zL@U4%UgD8m$(NqKspYbIDGD3UN*D8n0Gq}QA%=%PBSj{Q?ZoQCjjI)BTYpH9#g^B! zKIM)1>AF#gdOH1q6wq#ljb-t?NQtkR3$MQV^j?4cZeVuKh_{S;%x9Ktn|`M>9ohzI zEi>&0wC)WJ^i#Pv0<^yE8DG0wH=Q24ofW5Q&H!p$d=QzES_x0**F=Ia>Xe6_w$RI^ zBNihE(+aEB{gN`#urdeK1gzWFkFuMYmw1ie-*Eg!fH?i$q_e|rEPEoc%>0S{+S>B% z8&`yd{I>?*td5$cGaeHx3?6UY{gR#r4^gh-KG=a0T|bzVuLn31q%+(pdB9I%VBwTa z-4d#X)$UD6Iv-^C7k!{o8(5Um28KBBC?hE2r7=Vdc9EORxQ%}oUhCMj$KOi$Zb-h& zt%d2J)j{U4xhgYxS9A)?3-m#^iem%cx^=&_>i)-DmA{Y=iogW;&c)!UHGhicdE&21 zmK!mxkITwa{j5D8TS9rf7lW^lhgqo8uA&lV^ZV~)c3)gOnDls@5Ql7geX&Wxd9F7l zqDI^drgl~0|4{XSvmzdUj%lY8s%|Pb1V9FRH;kKP8v^@DihpjP(i3JQE@IMD#N3!6 z7G8v#?e|*-X2{2h*5asYqc!$T3G5Mh~$ypLy|~dtOfG zm;^6$xFpBcne~~?}(BA<=vGdpO zj(1~|XlAK|>(AHax_d!c8=7jhcnKE|+xvz%ui5RgDv%0EjU8`6`{SjkNtBNT~_6Bu#P2-Ay z8lEZAq*4o3ANM75T8@pMErigwc^0l6Z2jP<0;T>K|1KiaBao0;``F{2aM=KvEP!oz zsa~2{AmX)Guk8n6v5FoOK(S?aszRGD{5bEnqHOt(KQK~;aj@6TZbF(>&VXtkFSpvo zcB)Dz7KP-F_C3^Zr6e&fj8L_Eh1y1ZF`=`E4$brwpJ#HRgB;pap-LMcZR@yG+Up{H z5Lar`SU-8upc<)GW69Jubu{Sd(4Hu66@LB^koxsXIKW~rQo6)@qWu&#UY+u@eD9*!l?#C_RXFt>nUr{z!Q<2FZ3Z#1{ zagB;`)n-?8?6$}WLyzhbDeQ2FOf=+E>JwHl9-ZF#M>b%yeyj<-3r5@sv_6wZ6MHF4 zSKU4z&N)KCBd%yHJjJha%_2F7T9)b)6ui}TTraZ1s4EkDVz8;0W0e`0)g0JQvcT)c zI~;0hViTuB;M}ry4~2gzXS64S{nUX&zFYbZ&f?9PZ>8vnA$P*{UoI0b#%%Jt{e<${*GS>CDq>K4OKtg`oMLbvvKp`?eDBPr_)7kPW}k(@mI72 zze&zD-sqr8jHc%QvfA)iJ6+%JYhdN6d^XdKxg9wqS^j*P(7mh1*bvXTn7fHvoavfv zlM!WIx|w2QSo-QGzK2&KsF@Ai8b&r!Yq?OPlioxAFqsPg-6H0&GwAvh=(J`$jHK;F zj^9ZOR(`|~u!E2Uz!n?%fkR12kGUd_bvPl!opkBunZ@oGAcC*Ok0p`4`)Wq7ebb^x}G75NOzxXLH9!Hto9wMUCd|*@N(o>93HMFYY`G5N8lm zfoDfD)P3wscSzbNrICC$D{Kz;18D`6-?H4MEm(_lKKd>&9fUyT?5J6+a`50C3({)c z@pL}g`jUa&P&@tgTfN5*_bp?`lE#)xsu#tMuKN0aelI~~C&W1O;m&V1X}X9mQTzPY zPL}KVlw4XaH2Zj?=p-kn$=h!@W-RHQj?d+ity_$?>Psv})Znl9d#K24_LEV~$%Gix zs^pWYezX=`hbRaGdr|32UT9Fg;BjkHdNa|(cxZyn*=+oSN@bJR784Q3eo6r5nR$=P z!`qMWBg7B1AX3Ab%Psf^`L_*HLwgdw=FK0CpkS9PqBd-fbb?Xyx(lktK(4%<#dCI1?D9O%7Ap|om zhs+iDojIcTl_VxO*$tzKp!VPwyoy6God;*g`=1WmTzyUIu}Jx*{D!GvG(Mme0& zpgE_9IfFh3-{z}7)0@a~0FbtN=de)-Q1S)Ba)czvrxSu*t6Y{k(8yf@nyy-FBI zu|p2-Qd?*Bhu7K4P6h?rgk1v!q1H)dD2#%JyHrt-gBqVlwj~wniHz2r~`0vT^X+jxAkTycpcmT5@lZ>yF`gQGL>T@#h zLkj1RST{06Fo6wO%DgA#4Yjfy03pIFPaqU>`^7lm`@Gk*ZNunIcet5n`deD}_}?6C zO8|~}H@|@RZqwzmU9<9)`bCn;KYopWc5OSJW9tDp>!}{^R_`WiV$w3BQNJqknnAVk zE>1&M9vo^OqfpCLy01W7rtgu-ql)AA(v%aIO~q9$mJUV&WnPore%CD`=`pl+TMo&N zw=gRSv%R=ZY3X-1A?UB5xov1?Y-=ibvoR}t$Q#NVb1XE_wmB|RD$=iAZT`+9G7mp| zJwuFp5d(kIT;(rKRkJzSiA5f;?VGXM(=(AEh}58lkDOot3Dvg|Ia+T!oj0E<1FjwV z-h+A)=1Hejl%qd+nLiWHp4YVO|gF=~&6 z?+V{{_nrDG_8cCz*5~O;yHQF%(Sn70f#55;W-!E3CHm3UESmtda&W4%x~* zZf+Z^VA6i`zmsltOe#qQ>fUAOFHS8EB#qcLnrBG~oXxHwJdVyY=~gU8X(715)hOsM zKHv&(#r;N$=lCm1|ISl%GMnabYIJ&2R;UN3$*~FC$`3nN0g<#b+NQ+i`il3Gclo6% z+Q)X~n27=k(ZG!1pp0eMDj%>7?H^+@& z8g?Z11jN;wb543{uBSam-PXn}??Df129!MSAKAW^uzO6ar0^ii)j@86d1SF3|;F@ys<%I%_p#RL6fPxqGD zurYYz>s@jy{%r1uaoEwI;*nvXFc_Uv^tO5jcf_Z;%pv}u_ev8p>y5`EIBh1CW}|6r2p!d@jo~=^oAJB?|-0~m)Y+N$r+gAfim`? z9K1b#F(S>zvjO18d(J2FD(q1G_SDYE(TEj&wLs}5z1od~1vvCpLCl!Hj+MnKC~Zax6Av zM@yWXTk)#qrX`yp0Q7LWF+;~ISjf!m-E)d9xuz{CBjh>}j^iF)N=HTn4dm!05)tX= z0yWv+rPn6ZkXi4Q7~WV10G*Yr3;vC9**$XUKeYhzF+Y+W7ZI2cgnPjHF>{I^rcTi3 zPDscO9&k$b4(+EtODrBh8%~=?-En0StFKcm&bY>dKDHlAi|T4_6xPfD;&p92Zv7nn zo2p(&laWTee7k!vG2;#D11vDI1n4+LORai_jJ1*KTjvj_k$Rl;Rr{`I`7!Fg1F>|C zoKFP0XJr;v+c#y|kRBIQYgC0_j8_U^pgT}U?5@0NDpLPWwK*&T{)A77rq5=3yVbll zAvTE3L(A#j=$(I_VOx1-SL8e9ESnYu^>?UjxLO!$VP-m0KvmAEw~d)KKQ|L||Hc&x zzQI@(KYHW4|3@p)%D!}-sBti&06lA?$(Emib5vbGzq&Ip;!-fwCAQ|$Qm=Z`JJvbf z5kUHpGWlUD_N*fb4zSFkvMf2AFJF_IKZS2j5xE+wwozNSnp6|YFBeV>I(+FC6@PDP z6lZ0>wnsX8$%i*o34X0A2A*NwC=iRfb+D-t7+s`>dD_WQ^j+8PX6GYi{n205Vintb?WUO={@S?h0N+e0ax>d5-u`uud+JRE zYic3SWIW$7wcFF5pILaM)<++(qVIe0W&~SX@T`HoYa-(;-V%sXMgsr>>3ln#H#3^U zV!>Ra+jq*AH=Gda2#=(({?xY|HqE*=e|_Fwmw4B8mJ9g!-tgra{QNwkjHKMB`Xr8I z=jCXo7VoTz%G6LO`)#(CI}fLYw6B>1640^}L5is4Lv~F!rnlg$vkOAY6wNop)a9LRYK-2j zPi3n)+5o;t6ocK9tkZ7Z%$!p*uiSa;(5z?p#-_>61D8ajRmwkf6S~B2Qum(W#>LbE z229B%jQ&i0rQ1onabwj_(&6YYm=N2+;xyfva0Oj&hu9PYz_ro)eHTKo^RZ4^P4jQN zKsemBG3Fg*gl#I%$~6FJV`e|o74~>NKxuaQlLwFu1E4+}KF{~qmT%b)!!V|?|D+Lm z%OHFt><+IJNs!U-puo@nN%%f}F`Aqol7(jEz9Ya=jX#trsGsUL)R{_bB;x<(|T+6rI>K#Yj&QF@Q7_p(D4Yv zXX2jYwE2ovgJwRAlbXZIA&LMtG;jB5L>j$#bqJqRzK`!@j1M0Mt@HKz8Hv}6%^yl% z;CZ-eS0{L+p_dvvyjTT9!_o5glT_1Lm0P#h<>aVVA5v464T zvr~)?!%`dLsO9OFL`L2H-wO@iBDr(jF-3@@onvTEHEO@`FpX;px%Oe)#c$J3$NRoc zai{zUT@RyW;iD8{Pw0-^2x*Bf;|RVT6`9t;7l1rB>GvHX4E1_w>&4xy2bh#9e8G9n z9$=86O`bn)0?hlwAj{9Fc2u`t~0xk;?S^0c{oWf79?J=7y`DOhMOR^mah!cilm zRxE{a@^#7pUhzwHkHurnyLrvrs>=c&Vq5{<&rIXTHc>$DIsNe3LQd%E%H$wdhk@5! zTg~F|ZW;)$dCP3+9wFY9kZdoYKPd;SYo1n>7XETh{@$|ufrnIm2A{fxbhm|buHV3I ztYleZ0zTu;*5ApXNXkA;=8MNpTOaC%5DyC*xsumA&Mu%b6?0zvh8{ zDevd4Gl1(l?ZqoM1N<{Sb#$K2BVg%n`259%-aqzhL>Q+yy|kGf zPLSSKtzB1Bj>EHWS~h z6K1iz(zP6w6QCcQGqy;*{;f`1kBb9LP7Ch?G&+Ym3N0;7eAY~Su!f@+CFt6|-+R@# zYMFVVb3vkQx~sQZoZ~)gXRxw(OZD)1h@nyb#|Jv}b2;C>KYt$i47R3c(7RD}hWX=n z7eEeu8i`Qb-DaBT2S*CqF>DEFvgj4HHu^WJiM<+ehgeXM% z6C5`d^h9>wxU<@-<4_a%GgotZ=Ag-9yx$R{@2dnP$R!GG;e)OTa((16-8YX~nJnK9 z-~6mG-s|*osim}^#2h+P!)h9xr}?BILl3Quu4%#qA)V2lS@k^~DA8MlhgvU;CT>n- zD{uQqe-~C)QtbLmAxF9N zI!2Z|B+0HpDSWqqkz{I(ODxg`pQ8f*5XpDGF}_^RHEHU$WJ(=2?hN|>a6Qd>vF=E+ zX7mTm!M^frr}k&Q_6eWmHPh!9J850bT*cXV*GHwaS4t7o(NRxX`Zk(Ml9no@6IGEW zNx=0PS-92F=lzk|6R*QhwG%Nth@{lpH)O+wqi8Ri|3IIc&|yLQqFVJ0uH6QtcXs0} z=m5NUQNA>;ukSkr%&1XbyvEGQj(t5~hQu`v{hoQm3j)}Q#vTjSYH$KV18Fbu%d8*9 zz{sC+xNqExRH=^BHXAs&`+}rQKJnf7wWObopS(fgaJK~ zg6fJ$c6@vi`5EE)w3~Mlfa_ZExMS|0F+ytYhVvS(hUe3jwKvhxl=Iom_3BBlRy5`D z*7aKK$oBlDNggE@!jDv>u7Dr(W7p+#!>Y~16dcoQ!GG33)zE%^Az zQhBTdlEBehhD^XUjfFC%2VQi!swH%SMQMhh8la75)1PZq-ZPBCL^OCZFO(X_AyV*- z^)bHwBdNGI9yQvgD}@fR-SvY>W!MRQn|O9~-Q?=!R{o?Qw5JX*D1Zc+9E9l`>SuS# zikafv>|{D9&@|wnT*hgd=FqY9wf<3n$DGXwm(s)=9lTkk8vDJkYx4yRSLq1TbFeU= z)72aGE}c7RALrMI5k<+zWq1zOElN%!%h}G1YfOY+#jX#J>s5?cIM24q2~F+nXJ*%b zJWQ{>JK2cvT3$pjE<5D>+hJWK!w=V2Ub{uZpv<>x<$W@j42d2ON-r{*B|Ot=8KXo{ z#`LL4scW&_AE+!h)Fdm-v3|FYkC2IA7(u$f`U_!T0z0R|QB3>t0#G!{7<2Fh?6cWI$TQE8&;=gEruT=kcHaftY3RxHKb_*gT(vD`tW2qX*P{-)qT|FvUA(O(Po z;de2gq6Cvv56u0g=dp^!QGe1dX{b{%;VDsz_Y@m?Ctk) zkr>G38u`aAB=%-#XK@v$Oe8Z6PY1&ch@9fXS!J=+zURUcT0!QR6Jav)I`#|MxXiH}ns7Am|Kv7rT_s-yAXj;t< zh{rf+mj`lm&z^`dhdECwp7vQB&cQr=_m+;no|Lc%xpMSAn32f!SJ)fVYp-!LaWPR} zL8c&4zBz=$%s%vnS!83_-E_d}d|U^E+8l~{`=|Y0O{IA)&q4M)223)-bLH(PL1Q*2 zNXGoa)nwE=QS%7z9=M}8nz>=>m~}@8NReJ?>$BzI&Ovu8`y*sH>dhr;I+x}#fhV`K z!H6p^n7-;hJ7&@MY-i<2A@feFOM$;yco3P1Dfbnx#*SW$r`k#NA(US4h~u-gaV!bV zz@ufc#D&>!>R}|0V4!wwb!3S0jwEm@M#!=JPR5Nj)1A*EkxU?JroKOZ2bmqB7w?5r z6V^g5B3eiO=QA14DkLoePBJEi4~-Um_p=!RD%%;CUf^+zh0reA8XB6;wcqKNHNA+; zfgKX`+8NCpP)$z%Qh(a1AkJ=IpC%YOS|on`yCp|fy^)j)YE<+^bUOj2uV{v=RP!Dy zdH%Z>-ib>577aVC{+szt5E+bW#XGG=Xbg3vPg0?p$A8-pY z;{)Z~nD~1lR%heS6*~NG9X2yW0JnY<2UhJkZjGf{s0*&%8^q}iyC~Rq-p6l1X3J~J zmgz-gXb7pH;PSSzS2N7`T{Qv1dogbjiYcjd9l(rrFAH5DB2P7u_uCP`#?%(-Q4(X> z*y!<}ue)42JUqsB%}P`4J7QE|8Kh)GrX)DP*?A~w@IykQAI?;1U>EQmF5t76w-eR| z6;x_Ws0Y@&7JyIAEtDc;7s0dU9KH^W+rO;m+)d`KTWQ3vNH}%Pc!|%ION;*Sg5ST| z@c|2Hk+IR~y0L@rCd?{FUMJo(RduN>r#DP9sFpg7Mm7DzsR=zNu9_rWzBuy?6|t1J zNmY-a?S&OXS8g4SmQ0ksDyjG{+Zioe>-?(XDG9(nj>z^uK3b(5Q}xGF?P|-=$?ZdJ z<*vLBX=&0SZGLDye7Rup2`}Y>xlS*4xCsl}dDKQ2p8YAQ3N1P-;1AhO&V;VGS3#Ez zn5t?I{@I|keR+y->uW8wn-8l2DZhjy10XYskbBj|XPT!J%uKZEs)OcPAv)740k||P zgmkuI@h85vUpBezHW(CZ15^B-DyAOr0m?jW=R{|0onlq)Mg{sKI8C`BHOHR zVFi!?=s)0n#M&e}e0Qm!ISLWb3JXEcPIB-d zwYOq+WfciLS7TiJdKmY4juu)iB*=&x=l^10|NJ1iD!0bzI?KON-8)70^L#|Cku7qu zPed+Oh0Dvh0*MdYi%>FK@5BaFF#T@@^kbBpy0S(5V>9px)oF1NGabyCr?3$F%warA zKf6idZT}o5>U;ft)E|)=|LVa{X3mW9bRRuL<9(8@WP0LEu+)0e0qNYQZ3|pZRsd}r zH?90|F(HB=UgE!~`BMSDBms}YNylX^T8P*1RF{7?>wkUtB}bs`FmI8F&^7icU)qh= zeB$$(7yk@Otm=C3DnD)g=alo`e;UvU`pQ{(u|1`+@~_Jd5RZDsl%y=Mz(!F!r7-lX zVMf6c!TL0R(hc{!D5PcfN2ug~G&-0-Xu-S8W_t`vc!Vk5tn-*9_`?s4Y+k8~pS=&$ zI9#2SfL=@w6-mHtuzGS={JZ>v@|(#1&!z+r2SAj0=aW`3vH$f*`)>_MqJKSNS67N4 ziET#L8dG-(AU?TmZ(hcf7XRlY`=37$j{T~S=0;;y{O@`K>2IDF7lau8LV0@$|GM}8 zwn)j{r$8AfIS1O`JmBAz5I}_(T|-DRssFKH{I|ycoAL6`@quV^I7c>EoBjXty?3i{^kMywiAUg!kZINA_0iW@!!3VfBT;=G>BRY$JTuOm*)9@DQ@tcWM49s+86o% zL3iar5w)HIf|3LOCu0*AnW1v)Yk}hbq`R`^5VamRD<}RJFY({@@joBrf|&oApA#Dp zBLDL*|L>*ji6UzK1!2m+p5v$4)ea>fkd5E)w*Jx~M!sZxZQT9Ud?SLj@)0S77;%PM zWyo``UM1}?Y)3U|#m$5HR!)OQa-(Q2OZ9+4F~WI+wJ zZ?iui+>B*?l23J64Z?HR_;IYb^EC)p!4L?)ynrBrfELJVOce+`ZyIJl+6g^ukag`5 zeFGsN6hnooqVf7Zh-H8XkxyjTJSj^(0#o3q(*N#XG*4D6kaE`i)z+vS`*N#AU?$b; z+<>&CDmKUhQsSL0V~mLE{SHE!K*;RM5s6$~Z*tfeIIV|JE0^UAPS6VRwrpQ5CQf>+ zW@q^ve7_Xs9S`20fQXM17Z|?(oAGqPi|Y4lG`=q6G5jtrXC;{JBB?`yF*iE=%IE6#}y9n2>C2r)m+XZqF0t8-|Ger0YSB;B~ zx%v)FDJp{qB+Osm{SlN36V`-1&#aCb&>cf1p4hGRd0j5Nyt=*f?xbxi)3H;&^SbL% zaRrfnu~p7Dq4Weg?m6walyxlUt}y*G3;wCOYBV7bD|u8lKfy zwpkN{YSP)T^-=ND292os2dUN5+{gP;7khoNr>0k?d|y%gyQZT*O;0$*d7>9=$hrOx zKwcXT^E3j9QQ3!SXN2SKSAGGUZu643Yeon&lX!NIVmIzo?2 zeov3=>nvITxrKE-X5X(pe}pI9?n*q)bAR9(cJKu2%_0n#t^ju zc+(-Pm9F~2mbbdsd%?pi2(qj;yX!dk?ay$} zA!dkrJR}``8`_k}@39Q02;p_+5w12n+OdvWF4S{^IjgD;!UraVzXreAdTVeid=*~# zYTxL7gKUVZ>-H0`d2wUCcpz)2V>-lKR=FSc|29K+H^(BjWS~0>{oBy%yj;(Xi0RiJt8QB7j}NPliXK0! z5CH0-yzNxg&POA0NH8$yAt=B||#8Q$!x`e&5a zowV))?jty9x%Gr$dQ?xlwU=lj7i5UL)r)&o|67f1VX_Y%S3)}iLo(Q!F|;4L|}KX@_EJrO5fAc!m%0+FBGbL94IKr9HnkomFO%y;99gAwjh zK?&$6A5SJuuACB^RVr{NowXsf+i~<~Z09C!*V`HA(>VH}du-0QQNc}+%^Yl$Ys@TG zEal1O%tGw9dk_(0=`ZvhTDE4P6S?>O!{NeLztVWV10UKw7e5J{Z|p4K`JDzK-l~=g zLdHo nIIXJ@}Ok*gD%^Py`pkl$!B@K^nI{DT1Lh8*|(tk`&|juSicyw z7pYH*>o;AGcL>w_O)x7e{h|=9-0v6om&B4IsE)NO%j;l^ z!*%HNN(55m#@o0~P7gC~%L6uUK>|L%$v=J%&3y#D!=A9Q0_D5vEfyN(W#v};?Wr6}BP8!1 znSKXtSNKcru|jWG$$@;^B)L_)J_t;*5@{2U$y26_p#RUE_aGeZ&Qq!VO(2v6LBuhZ ztR4Pb#;l>M&%H<$2=hOhlvCV1&(yhJ0p1;NVNwR|<5BK=h>A<_0_tr=Gd}=64O1WU}<)iN0o5>%WFm$xQB$q8B?Qx&Oi0 z@EIfC07!#;s*4Du)arLT?j#T^+7`JjcdC)scZrWe1%mC@wIfrjh^5PbH%7> zfX#;fo6#+oA-BQj&H}cn7PZS)i>!+jS3tMaUA1f6#m{Ab+31m~PIg!+e?M{FZNYlw z72|%&!C2Z-{<$*vfqWs`D0kq4sa1}fCICGcDQs6cy%~{rl1iw(r>82qUzGG=XIM|Q zH};~6vb@RAL9~W*pQxaAAy;AuBR6hQpKj5P@Pl7rV~HEye9mhWW{X4+AjT zhtn`KG^AX31$QY|uG_fKzAy2$>q~WjsxEs~`}KKcemyPlte>5p7b5OBfiN@jq!C{U&8W}YI2`yalg|2%!%QX=AEA#q~Fc)_oKH7v0`naKHW8R zdMEQeB^nQH@VErSILtN2H0_iPs-vA-Z<{YpDW?iQQ2{ydf!JahK-Fo11%xA{%j{$s zAMIH+Hkt#4xsNefvtA2z6YFZLT5GBxmh&w-jd5c3lnAnrN$Ezt84|g%l1~s<%FLe4 zH3CMgRca-*@S~Pl!?%T7@beEGqJjwEOp!%Y;C?Mf)hHrf8aEI;7zI%9K!Uaj3<77a zX8EV&ate~}Yci=#2~e9w^Tk&m*Me-8xg&#itFxTa*_meb{Ne4^8J*}h76y!*#0_l} zc1@ScO)I*(+?guvTR#=KyV|QeBBMRh>Y4PSf_dM%W;r#8*(GYE`r!1kG3>hv>q+?42C%lX{STelE`Q8mm+Tk1<38 zQ=B7R*bbsheU3S+$Q?z&=QDe45C9Y6AHtpS(bD*WpE3KsQ@byGujVen>o`l%!;0{G z3UG}yqOl37SKoSk=p)hyG^uF+vLSxi5neClRS%KIlV~6Q@oDt=P1ne+$ZcRjP3YlL zsS)R)h*>*ux1w>fbgUo~duIOJe0fw`aBff#Sev0!;(Rkzp7u_6NTKXzY(VQ!U?%R{ zoeZ3RB1poFyr}ntFnqWkj>v}q=GjZ$a!`7=aa1YD{mo$sgpF};tnZ20A?=Ot#7rRK zCBa|Do+keo(K6^cqx6N5lTDf_9BWxs0{+IWB_Pl^0FA6XjhFQW7Rpk~nk@5h4VX4J z?>;&(IYmyX<+(bV#u8hSnnEA-A&-x0wg;Ik82$ib2hw+zngwr=c(57w0nWXW9C&CX zx-JqF6YwPNU&ABLS4T*i=ub}e^%iUH5jrZ@j6hQ9mz4j(WPg7q2pRCPv{%v$7sy-2yiAb>33GakzD`R75W{*vcuGE?@)1AE8L%6gpRv7Lr*% zf`bq9_vj-S8nzthOsO83_sEt0a%ynER^?Dvua7`D7E$DLF6m3OO*^liCmj<%Hn2Zh z32>m}+vA`6gzAiI&kgh1wxF64ZwZiX{6r(EWT(3rwI%&ZW$R>~dYxFZ<*sF7l~zo8 zfL?k5OBZChx zg!;vY<%+cfM9_)z3BdlFKbg*Ib+Zn}aNX-Q=Xz-PBI`lktegklu1%Z(@#W;_fv$(k zs(L%aH!v6Duh?8wM!)e^OnAJ*$|=LJL3?PxVKh!B>RxS`n_uAR5`v8YLH=C!Pb1}C zp88b#ZO*RiE>-O9O^cS4kLfgv1~ikm6u#SwmRzB?XEISIoj-V&FAGI(0ZH8}1M;Mb z*4&wH$zTI$;F2cZ z(Zpma+6mv=KGBGjs~j#Tp?L*AO6tSzYr;B)Emzv}BRA4A$tZ0-OTYIz48N5Jzlx{I zbGz>uWkGMKMEy!VYe5~&=RN1?f9Ku72-tb@Is=aehFH&Vx+2-TyAY?kniUABZlaQt z%9TiCTPKxG^qz072(68txyFzk@?{znxnc^ zzq5~3LJ(X~t-_9*5)zsXqr#hLnyOiEzgz;2YiTvLd@4EFRcE<_9acEOL;le+FPlAE zTSFMGan``G~Ktk;2R0i~K=>@n5<=3}nq5&g z#$^p0Tudbw#+6Fe@K2kj1iqTdimeFLLIr|Aa!;TR_FJEp5XIfdqF6?mA?!0Kp|#a6$ruJA~lw5Hz^E z2LgoPKDfID3GOhsyYn~qo-6NpzxUqruhomin(5x%(^b26?W(7q>ZFi0hB--+sW~u~ z9C**!3|395ENh-AhLP{RjBqXq3w6Gc4frB*pXMhe0O{*tzL?^xN3C|*ziri;qt9@U zuFDEN!R^v31oOy8&Ny%EY7VRxZcLdIH?}ilvTv+t{V0hPF(p=7nd)`81_yRU@@FX? zXcbo+gJO)2XEz}LWS(9=nqWN#3J!zg!NUGF9JHBTQq2aM*m>EMwy~7X{o-pTNzu-7 zfKo@CBUK6X9|4;`=PW4Tuc_f4W@o0!7qt&G{hxR)Cy>9fW~L?cF4bLLjNx>M(I+2VswM z7uIT;tKeoCCB7`4-@tqY+<5e*UU00`^DmpL@x;f+)_IK6)TaKdiMv8Vt8j4QQh=v{ z4kNPYM!@a?!ZZ6qZl{QZ23pjjFvi#k&S-U-Z*PuzI2vFo8ds**w*kg?GyxYWk?TO| zhz~-ZW22B!q{EIV@9u4`j%};@TPNhZ&+(O_*9=$M>}I%N29<2x#vZ&AhNbF7$Jw8* z+j=f0h>~D6GZfE8WyzX*v0EH?1i|L)kNLww5B$>uniH_zI%5aMiG<&9rqPF|dSKnM zpfee@QRIuQSt|T0X4^wl6;UeV%4ud(f9o`+h*jCES*`DJ^548C6l^?|PY0mn#ezm?!ED>|j9=X34 zF&+AF1yUTzZz#Ig9Ogn9OhVT__aqs}MxlDivj))o?u87982^ zWd3!!6nbNmDqKS2|oM`iN$Kayz*W5p-^aI zy?CRv629O}N-KaI^9g}|;UND6z}=O;Hr-zp>_c!uD`~v`%)VDSrqbB2tRMgrv5kPB z1wu@2e$0K{hB`moT@VDgf*3@R{NmtnQ_}GJu7C*FD_OegZmH`f?@xs58apgR+w~?X zuN?Z76RnA@WQ~iPG_AW~(Nz>DQeAugUr$!wCS7x9)FyseW1T-8Q~h1S&L4ylJxrLF zu|I-Oc2QABf|LY7^0ro@Nn?lrb~crfnToLyD#cDti=8i9UiyGI4-J@^h)=A!*L=*H z)sG9Y_^F~O0}jMoIxqW&D7EiKHDl%UXxOib7Wm9^e(qjLN;8-1h523iY8V9Lo6XJR z+jtqRDw#*PY&Cv1u#0C+ASj-n{5<&V-W*;d!&gl8HAK`dfnzP`qM$}6^;h5bM_p+g z?FmT%2#1nyLpg>ZacDKtosg%!Hi>Eh-}Xg?zniB?HOQ9o;{>sOjGB&cT;nD-SvU}9 z2Vaz)DqE?=s#W&O6eO9A@3DW||0R*SMmQ@!ZC6JR{~v6Ru;62Icy*Me#4|}LQ4%vN zIuPNZVu*)+0W1(9O{2Pb(qcC(pn6Thy~yq(W(8n(BtoY+iTm^De#ku!EGGDj^+sxi zQ{!-0ndo=SEnDuTcq{v=EJsc&rHO8m!NW6jsfQ_tkYn5((&@B|vyMgcL;$_gu$~P~ zwxL}ZMn)88NC~%N#5w9T%-W7RFQFN<+96cvQ7_9e_}VkM?mp0xm5I9z z=}v^18d0fdK+7ZO1!)W=^zuw1A_}nBkBzfUaRK2|6%geRnjRZFooQjdJ7=GyI7!|C zX+Gto8o@@g3sgVu0iy84_|S?SF_$>liwhMty-d^3?&xBCkmk8QRh8Z#@i_UOt94#l z^}N2^5!F8Gl!N_SG@DgkJ0oAbAoJSaF%eS7Kf6UGuv#N4>k1ku-aaP9dk+ThNG&bY zX^X~vm6Z|`9iwJzE_Z=;+oWFS*LKWPl~k)hy$mR#=5!Z`yX&}ko^ejf4rkwFxDB4+ z*-jfJHQ6k=R_NRPGLK`d>8E{UR4kwxia!U_Bhf@S^mG;VErQo=pm*+aR4KHCgDBvU)S{~1|J!qA^ ztUvubhK2b%R7|{Fr@%q%3VKsqpWCX~fU0FyR5@gbgkGUeT00iY`An{NdK3JK%Ido% zR3oLlL9#|ltwzXbI1J@Nk>(qeu_CxN{7mZgHIR}TQ+-UwXd*my*zF|H{!tfyHifYBM|HXSo~D9F!iK z4#HkXj5KQXcj@I`bXUf$T%ELud!sudK4u@U{yBdQ!e^(COi;2(RmkcndYvPbTo8f_ zl^_`Lw=Ro-IVtp-cyG#(4HfAPMqPZfV9hg+cYl)}XVNG--_(vcYJ@$D=paV*Cvz)& zg18iL@}k=E((2~?`Z$CoIYLSS`6;DhV9RJfb4;p1oxlX&Bm1Ud``IK5Y60;ewg#58**Q(~GXa13b*vNomZ-BJaeU3vaUVC2GNxF}O~OZv1L53_=1+#;IanJ3 zmkochu-=EA&eEPG=^fWtQjUpl_EdwPe$%pGD9dwxT!2+;?*Ph_a@-Uzrb<-RR&7g8 z8+brhM6XjZ$fZGBt3q&M)d-6pzedK&f-Krs3`SejEH*`x$HvMqmx-|(4x{BdLZ`hFPY|+x@#g-pcx^3(bs8n3!gIs?0i%&<4*>KynZ(dIjMaVy&^X%>%=d3 zrx^rU<&t9Of-9R};mVnAa@$Rub%HV2q2a@Jm%vom@}+ZroH((U;?YGqmbDue)PiDa zbJrn`n5si>?oQV*=u;y#IX{2+%;@{N)ZDsD&4d=nf#Ju<4&R6Z(5NlT2F!PakOlQg z#1NL*Zf%v9A;EzlNVN3|O^9~Y_JAlY9~NLN;x%o1`#_?2(_D0KWdRwPys ztT~6BG)kc}ORr2CQ|7X~b1eKu4xN~>(uo8z&Xhc-hwWpXIY}l-w1e#Xm63&bcs-y2 zwbTZ7>sr{T^{wxdG2%5}R6V?lh{{@(@A0(puFasHw*n4))mSs(e-7dRaU19H*kW@j z{jnBJyMYmRG1vEqh~A+FwV4PSvBjXOCyS13U)tE`v zyZaEG+bryAWqJYck;l}&?3}R^8SCr`je1Am_t=I~hQz#dx_aH|$#--<&3 zdQmX@KJWgAd10gvbi3x^7OMc24+F}A**<6avs#thuc(CYiD=aED3;nW1Y!eB2>63P zC(v4OyCh{MhC_rb$!m<>e<iK0JKN&^vC*NfH*OD2(=qfe?Hc{Y7rA_ zI47FP*H@GN`mrN3aw+6>IJgGHZ9lq!`I3~mtNh#!aF8!*ofHH7zW`XsVCruMjBsk= zzhJ6bA77HS^)^nh3EoZ_Y%P|rv$rDj6M`W_!i*4_$wcCBS)G>YE>sLZDRZHqY;$)8 zs8qq&C{ie0l3_GF0i26Ze@=m!ZtR8T0_SrMf;q>v-fxD`XaC>=VBp9*@QL2LTzpvY z3U}ZKSn6n)Fu=8!D`joCT3>$I*Qo6-0rV+Sc#MQoAIb4XgxC;LWb(G3*Z;bhGGkH5 z-R>z~N8vbd>4YVy`ftezHF4Bvfk$kEbj;476TCY#C@ket`>PDzj~sS=0&|{A&pTk# zCh13GhDa5{n7vYQ#Ex(i37W2^FzmBQEt<>D2Y77MNq48Z#e;l<4pJZ0JXjrAA7M*3 z>C=XmBcm*Kx&Oti!9b@*-boEmz(_NVar6wJZej9zM85nKxfjd&q|9#?NzRVqg@9Kswte9uoURo6frb9YLlO|Jd4{zCu7U9-KXJgZn4UkMdK39$6=;uimg34+0YQ^ zVh`Kn?YapKyJBPMCUMfl3jzheZf&V6T-0{!AMWF3G5k7THW5$U4q7o?tRNYDtyd48 z3>nIw6}OwWM^7&skFntEkZD#9In|79Dn|L!mh)FLW{Wu2qQb3s!pt3tzs-e3eYX3~ zyVWcd7TF@Pj8A5~AaL_76vbJ{je`rJ0wpqDGElqAaQiSM(&%ezGK6fJ5Q} zlg(H5U@csYy~xT}$t!m0;0pvYdWY@z%Gcs^;6Y3e`GH-!FXN>EOvw#*3rl|vPa-QA zB4n(>S-y`TxM?f029!PQ6#K7nVT_S>Peu6PS&|3Gy6*K1(-LTX5I>yh4FsiF)f#=| zi9`=>8XI~;eaD)65bzqa2)mM+mA zw|u{^qkZjniQTZ^tW{q3svP5XuqGIl+yAwJBJTF@&IBzLrDNNJi*ro@pre5nq~CgZ zeyF1Q$#P2$cih*Fd~Q;msx)!%X@y!FprAO^2JdquPZyNp|dQ&5qZnrTX*m4#>)b9)~q@F37t{;2zKgzHd*x3{BHzl|0D~=}s}y_fJym+>5R|5EakTCTD-3rHgO_Gt~%Qjk=8| z^0&V+a=N~n`SkJ+A{vD}nuHCgMhg(lA#bDRB>dD0gkQA@U{<9ka?yFayE0 zSI;oj?xedhArFgm^|((sa(U^4-GE>TIxDXS);x}}_gz6r({V;V)mEjlf}A2%{chdO zelIw8*B9^dt+I4{(%yy1oAT^yE3X@kPF0fNe0qxuI+SjPEbEWThW@l&V?!tMdX+cn zZgfk24#CV4i^;H1spp^8JzfrwIN|!%Bp$!aSxiuI1YwY+GQlhQpe|Z9GXeEpxKr)N z64Oy%83)Z>KZwO7kcMrJ1PTkcc!Z`Lbki>?4-2MbPPJXbotf{utmoTOiwk^#td8c_ zEknp{#+7;>IoX)}=}K>nb_kapU*~9B#czZ`Dzf5=`Wf@m;-djRn$xkhC$qh~VE0?k zMoHXHm^~B(it*$gmwN0uSO(=Z+}B~s19&pA^n+E{9SBbJezv&&RC`IsdAk++?TRWE z8LQF$?3qKZb&zA1@s)14Zepv3D04#Q<_>=QYkur2DYIvGY!&HOncvaob)~C>414&B z*1uvz`pR9oj9Qse#oCiNw3p5N@9S{;Z_K(ame{WB;Y0SUelA!OA_27jAD!j28y z>lMDCKSC5)D8#NI2Ba`!)0n@iOPbHs+mdA(+h(SyCaR@UI5*0#m2v6!QmflspYGk#?zI2% z;siC|SopXO(2*zXg9#K=B`JR~1A)NTA(}$&;iOQF>7g;ZA-zd#M8g|Hg@Bu+e|9bydCs^MuVc$l4p?a5zh0|2M440SzY@F zCwex0pV&^soV`^p%629hkS##>nV&PXP4(cNl$=1X30_ zGmz-GnnhQTnp06#dBWIY<`S2Ag50d($iZ=A?D&%cbF7sOtsG4t^>oa&hKhGOx zGb>EuD8OR8KGMF&D8~X9K!j~ujkN7g+8wt0CuYr+DHWWjoYxFq;v~J$aAB^V(*JmZ zzm9+zjn0OmCm$O$+;y_gP%YA=2}{0} z9_j+t@GEMJ$fj^MD*E^d^(gsJ$*SY_Hv*%~?_Yg(Xa_bl-!gm<&w%)f%P%s4rsL!O zt+r5;6>aL}wcZ?w&9DxkYLt()`y-Ukx#==%p%F~>g(o+Fhb_KiR7Iob0cip;gyBOuu$qhZ;vv0 z;H`=K8c2+GiK=V2w#`b4u8rwbaIYbQ0ta(;b~vrPr_+wZK__gO9l@{6aj+NJUD}dQ zx7K~AhjRqXS|CeCZ?#n{*$bkZ(wl$Xp3rko1*Acn1oDeqnDxvu?TEdp`rxW1F27hD ziz*(os4T?`2(E4vZmjB)k|HIFFKZY9C6Ds&D30nfxNp+$`FVYdzM>Uu0vvI~dX5XG zd4}0xTt6XJMt5wM#eqf*O=`=X$HtBo4Dq6lTswD#lDZalcfclbX0v4mDdi*A!HnYi zRyXy;kD4;M$U+UtZ}+7tl?jb+RZ0~V09hmxeCZ!lKe1W={MBlb0aL`0eR6ZH6&Bsk zVg+?qBc}G8=qPW?9V;?#r!eCNhkb8NrcH6e>>(qbnsHT$*U|G|W39kW*Lp!kauyXP z;6i*iwY$UH>}$SCQpwk#?Ub?4i-ScEEA<0GWoF$Y& zYVm9zf4ddB@GD==+kK`4iIaju(M&||8TFhjdIyhG;VFtNTBVZZ_KL z0L}hMnBmuBJgT>`<-FPoLYp>s9I^R21sl*Rxq&0@-mem|S;wJ9gzY!AIQ^HTtA{rv z3Y~OpB)ih4W;6feb#3t41AE?UEc2U;gY@f27CY|I_=M9y_KWf zFWpqox_U~z+BqEn@{m+&7J+o-0%1DtD&ythE^3c{i|W#rmmkbt2rGF^|0>)RQ|0W3 zq;)%_$rQk_cEkjrZgt|_oD4}a zg_33+!OXgRV(~R>cG|Q4ZySM%F^|!v1C!P;X?L#l8g*^9;s(2I?mMb_4t+Ieh}4^% zlpG4j1L#_uc!SCZ?vwQ4cB|M|`sT(Hyyg1}w=P{ShoO8H`qyN|>C|Vn=|>Fu$~$-Z zQ@afj{7RFLDD^c&JNjNY^>oZ`hw7JoaC+K{geRC|)Wwc9BZ)%HeB!mc(K0K;eLODV zdx&i^QLmF1%bhM>Zxikyq9D1c5f2l(>3a&0g;#pa2&}e1?C|R`?#C$|c{6*-72roFR{V=?RshFOPjXHyh-flo-Q6 zBU0)|mlbQ+%?krq{nQwV3(tMzv>VLVd53XVBDu>)``oZ6S*d-^t^B;1-+8-XQh+(( zx+}*NhKD~;ae0PO8@X2%U75TC&H4Va38;PEJN#zBAZ%K!LaqLkWl|D!07>f0NGE9Q(arAJxqcs;V4YvWD?e(ev4*-+`872En z%*cYS{q%TciAe9z80+Y`6r$Iv&TCU8qtBbhl;8;ac!c;A^KyYEM(#>z-G>zm4vj)s za**Z|y1_yD(p~kdCH-iYdS@Mw^uUUNt@-Uyw*uZkqQl7>bPj*G0X6t84jO;<*4Hne zEq=v6SL4|GnA^Y{lBabs6Ex!9)n}Y3xqS2UuIjB?(nb23i#iA=HZqYi)VQ;JIqBDs zDbC84#VAj@-2VM1Y3$?pybnEsvprUH;U}}-4J9gy59yVkx9CPl$Gfh4cA8(BFI#Ev zMxsI0Boz~P5OCR+)6*q>XtB=S4VIz}TThrDOXT>zkX71xtduUA&#b+Y8LlUIk`d3n zJ((gw^11T_Eg>W{V?Nx@9c$-4=hL?K9(vV$Y#pG^Mq^-_@=6wd1E?&WoUcObW={Kpp+LE5{ z(Fa5}BN(6cWsNj7M1?x&^Z$$!L?L57)Vn6YCgHQA_#idoD@<)bdX`bHo8CD`&Qa@v zFWYA;L8?Br{;AS*yC7u^>=xiwGQ~eZU|(IO<+Ho~7Sf>KO}$EI6P4%6d|1j5a0{ESs-#jG{ju)nL;iD%sr)q zXYzJYs8LTsVjyRSatfF*g$lGZL>0y?goso9R-qnBObkpeJTgl&GR(__a=9l^Cdn7R zrQZH>0!`@C+iokt7xoqU%SIjR-fnwt$ddw)?k{VRwECfME|4!cFHA5t!_CSKZ?H5 zS>(M~v<4exM!)z?!Y|iD&z%>%9iN&~VQUwcUI%lox%yij&lV1(qPAZT-l+_el{-q@ zsLdU3E-rV}4n)hS6xJQeU(9)VM6w6`U$j-oB-`>Ki@D*>ba+z{$`m!_oBF9W zTN4yA)TzFPInNz-b6ljnU+e`Sjv^CmHvP4tm?`&8CNo`eV>UJe@2 zz?ZyGTS0aR;l(#XqJF2Ti|>b({_v5YY^Z%)xyel<-KNT~R8N0Cv_Jz9TUV~Je?7`% z`yJ4!L6or83^ap0F|=yT%4$qHIDaZiq0z=wRA-2U4f)W_Z{SU{ePjJ1)abV>Oxr9E z(jY?NFFe<4S?Kz!$9(@Ynas)l^_H_;V`ZHSqOfz`$3o9C^p~^&PZ!0_pP{7mVb0Ol z)-!mTXldD4sM--$zXjSQ(H0BexAMlwnEQI7g$V3V@J7endQOkr{89(dp$Rf->GleU zuYFsb2l>C7{7-0%u+`fy7%GnH(9%`!h^8uw7eOppx||4Ot&fa8<)1_@#}xPm$;bW& zztg{11vdQ=cXd_h0Wv`5tREgNu`^&urgdwwk4!p9t&maQ!yz1~+g*PxvoEi*loaQC z7N@r^N(0~z73$2A@Q?_k*u`yw7z;zBK;px%ZAr~I4|#!4$eM}WsV?!v z#=I0c9jIP8Y_J~E7oThB@QWc>t4|&xT;kXw5+MD*PKZc>ZFGk7Vg0N{p{Z( zvEDY%y|IjqVf$4#rJ89l zT_vUX!Rb8(4%94l{5lYev3gPOYcNVqR;7lA!i1*cF*Q`g2?cNZ)Y_#(=hf4Ge$|nZ zWHa)Fiv_n~g@SOFBm`}O!Wbwj(u78EUsUPQ!e~EuCqk99VlBs+qiHeJn-#CU(sqB1 zID&e|v$urXu469_-vWD|6m$g@Nb#avgA9qg-w8=8)C>qHVJmX_L>grCQ{mFeTzg- z)19<~iF;}=Ab>oiYDn7JiPI(_s(PWgwE`n~l^Q2s3WXp(gs zQq27M6!o3&uj7F+1$v;UW}5jZ@ApM$JcBmUT6BYxzx#@c!12D_9=VEk6^)rqce|t+ zk7w8_P!g?lRSR%m(wdc2$itlYk8Iaw;3V98Vw@IT=nrgN_dpfLN>N~T1(?$_`ez9m z;8y7YAAkmrF8vLyQdAA3ggCIcICD&v&w_St)~fL%fUfkW;d%tX)yyGFbIqZ!G(%8BN%mBa-ArsU`|! zTuvXw*}bdYB}5Tut>qbz8VGy`H{2fg8~B@^i`Mt&OuKLLP$~uhi`|j|$id(EN>;BeHbMyBoHeYs!AA>!w=42*^Wt*f zdhyhY9WM2MVHFnqg)56^q=pDd1z>vZ`bIB-jR!&q6fiIJ@bkLPjHL+#!!_{ zC}*+PGVe_f%qx=FoIqcws;yz5&(Z-AT2J&Phr}fEJ2&sqrDIzUMMjxp6V)yv1Q{_`L8XENmj{mvS`CXvhwHxYYIGIDY?9M^;qxK;A!II>%WGxWA%xoT zRx<&4`|&`jcVloR9Fk9%n%Di3FMNn7c}hyUMa?X@ifDYl!Q=R-JC4C*yFB-OZ6r^U z(S#aE)tYpi6#InROsYA(QV7*D!qYfqrqA#(-7|L&#I_5DYzEk$u)2ytcj zQHI{;dI7y3*1y63STP!n(&)|1V^=$y1HVdY;f>(3_oED$Bc)#y7zI)9x zIET5xc=7^+FnZ4RX0I1dRVPi;)N8PhK#PVjeOM+>r2&etB5>vQYi@asvBAr7LQxaI z?lB6<0Y3M<<8+QFQ(~JGM&$D$-i=hlh{khiaNSz;8l~MJ4qGwJoE*KXj>AW|*5^sC z+xa-#Vk#8#nr`iArabTCHLNP+xTIv7&Ei;S7>B7{o^}_ey4_#lsd7xa-4@45EO2XY+?=3Ck;JL`63Jm{d1_3TME_kxrGt=C2b zNfLIJKKqg*(5I3v^0)QQ$-RVlmT~u%!9axvp zWJj-^>Ed=R^DR1~Mg2yaTk#9I35eaGA<*Fa=j}O;6v&B4FMzj4hr`3WJ0}}+v)~a} ztn-Qo_BW1pia*)Kf1m4rw*nLd-jB3OJj1bbJ&(LdObe1J*f&UaV{F3>Cv>w1rEfpC zdf(`HUK#m>kS6$|zoBS#KrB{MoLRI!8SH(e&6qP;i#(V;FpAgWO7gnD5^!eUPBvrO z2utU`8Q_SMq)6jG?V;|~GG_EUEzbAFw2Nw7-FMrKQs)z~@v6bHwtP+1B>=E#MRRh^ zIIl8+@HSr1x)vN<{eYc4T5teXL*?=Hu{^#?5vOQe7~XkqE?e>W{42=ZltImWbW9HwGkW59SCHqv$d!d4h7d^Uq+T0V z#=l_ZU;!{IzI)}p)b~>s*dgs+4HkxnR@+k+>OQ5b($wEuBc!GpdUVxVfG)-)C*lK!j!vWtk znTSBaoO!tWK>}O-bCOYXes1zlY|q8U3;EIyYwTTZdfrBg3F7gMKoD5XEzMz+ooE=5 zW89%|Bo+zA(>HTGvs0Y%oUdbMpeqIFxcO=F;dn$67+GVzBlFQUaskMl`m@FbC|TvH zC|3I3s1>?0{<~NHC$P}JGy>=psrW|7vhU)lRFgLvF)qSBOP;K5f+6l681BMw;mY0^*U=ugp%M?nwV<=GP`2&s9gxeH)J`4T44UeR5MiYDYOadOm`tcG0o4y?AME%pxsrRDjKgW`k_Q{t65D; zk7(EECjHdZgHM*7r2#AOKx5G)+GUo$N8%u1!B!^Obks$!2Satcs>)6MkQ_a0iw+%? zo||he_=H`7JAAlRUN~F4*To5?3krz0Ugtw_bHUqYt10{FPn9B7MB&9zML%@3?5miU ztsa=vP~lVQfAQ9*%*uk3_6_v$K@9vZBK7X;TFt_8v0g?k2mMB>8@OS|Wj}4E!erRW z_I;*NfdoY%$qW8-UO=#r$2m0_qXk~hT#6GPEMYDS*E^_aU&(Q4Tm6-@ zf&jlDcxxVoSXjJ2+UY{pFuQkBQ{vB^MmBlia-4ex9?sw%)@1ep2YzdKZqv6S9x||% zm%i=Hb!7P!x&suvy%v&7Pu;l^R(IBPt#)%7yNOe6sORnPc_c&tC56Z| zckx+-ldyPI_b6b^DT0FB+43{}8I~V!P-6VMygDno;t;YBWo$%WVefLhpU(w^^wx&? zrM|%b=i~edoWsrm5vibp#QjFj?;@Q=p4$eVij(q}{EmXuPZZEzb(l$CA>$3ODCZ^T z$Ru=mJU=}ARjNeYy$Z)$TlP@ru(PrtwBUz`_g;&?WH&baI1Z#rWDlh`U@+4LrgYpx|80K)I};i`Z)D6gvyMmk=fY{ZngJ%^}p!*!ycz1G{rI&tax7$uQ~3 z`Vglbj71rL_^i`oM!mrn7oajS11GggMkICcpo2?76PiyJaLO>-lIBNIRbFfdtqP*#=^Lh@%j@|_GW7&4XYe>R8B4yy;A7O+(NVB5ZN{V_A;;!v2}k? zuwa2>{-a^}r`hQcUrNzyN4?htw2bR)dy*UIZDpiVq>eXvFDv$kv~Y_L$^Jh0fBh?! z)DnybLcRa=$1ejm#{tOFe)wb2f9-Mp`CNVyU)th+DY8@lA%p*V=RZ#H7DH5oMkBt; zrTs%Y{_6FJVNd))-@X}2{6(4k^Ryt$On~x5T~_8l>GnU*{?|X4L{E`bMc?*@{!2ao zn{D~~nb?%TJfW&gjQ=l|=+EO)JkCVFfM~-1y6pT@3V;8HMih{R*eAAE|7ErM(`Avu zpWqoZk_0yGzcu)r0j_!qK3%oA z{{9*|T(UFBUB4i;vJiyy?jP5bhXYbqb9x7%`0rw$;zQE39uc?6xa8SLCw5Y&qxpx5 z^=QNSSJZrBCj1M^+Oi;)De8~BemGGyh=D^dA+YDTH=9y!)Gr~oT2nUN`XWUlj3~(# z)}MlKvXPRnoSTRTQn{oZtSVOgCYo#1AHSa26*kxvMm*?u(8Pg403L_~PXzNi>?Hf< zkbL~9!g;bW$N~5w=8p!|tj4dV-B&J->0|2MM6s3`m&MH5z=1|ZF3;^@yPS(2tKGUR zhs!8Ug~e=j;O(9Vx5a|_M6nu9*}OJ`QkI|FD{C^1YV$H~yYxR^wmvrS_UWkT{)%ao zqJQd#%A}b2#V&C6pkXZ4>3muV!)a%)rhazMV?HiDNyD=7x8_Jrh<8t$8D^<{I;SfD)cSg|UuPsseBL2Zt=c@P_AStfgx7vkm&|PiNts7dR{wy*pbOOw2DppA z3sh$dl#_)d!Y}#?N_bsDiTNh@A9gEpH$Wwc$yAzUXjyUGH>YgFy^frLZqGfA{3{S! zWlnPC9F&W<>hEq+atD)ncH9~ceOz)>OEgWPK7ah!EG*q0nV;;TVgFszq$u{_Rf;tM zP~g-O$%_T4zIYIU^8m0cB|pR9|25s@dFNU|7tD=GQ@6FAGKi*Ce;uq+diRCbVSwKE zc~V~-$bc;&&27PDJ$}}*pH^L@?dJ+-P6i_PdC5o6q~|%Zv+EWu6}Hd_@+c=h3s}ef zEY4dsp;XpixTi!vN7}e0FJm3MNLORI&3}+{Qp==H(*X~k)R!LL+0DV1S++`4Q+J3(RaP_Hjm?pw+R?d3hl)9)R1aj;7 z$dsN3Jy}byKj3=%71gGLi-DzsOQd>O@FwwM!E2G?7Tvto+0;d+1AOt0Fu?1kXu8g! zpn_~`A;LybzK!Ny zPL;J+nlH?SMzg+w^(Sy6x{N_Y=Y9Yadw`+{SwbQl0B-;Q;Q3uyv+*B#O?XdH#7J4K zl1Em%lD2;r@Xapa$~@{BfH`n;cGf`e=jbn& z22RNTc#ptJP4<`9F;mJXR;0>#vNp%yk3VC1zA5KvcJu6=j%k%J#L&yJ6LVV*#7U^8 zdM*1Gzl3i~Bra*z=B^#<3?WEKp$(0k!J?8aLt8<6=|{H#5NY~-LfFCjxv38j6j824 z1*=dL@-*6+(6jK}g>H?>4U-AXeYBXZvfF4+t*w(u(9FVH^Qv~16PebJ`-P2{b;i3VpC)NYZ3mhQ`+)#Xzaefc(%n6;Lx0Tynp9wOyV_7QK6e; zpJBAcRGCdc%p8Z)0dDF2wk+`wIDcSzpgW9lnoSI(VVFH9Q>OELXtZ)XY=Ki?{rpO! zdR*<~-tD0F|6}Z}gW~G9ZPA3_?(XgoG`PEkkO09YxI+Vt2WTWfkU$8oK|_$>&}c&- zxLZSG!5xCzTRHpN``z>2t$R-ue{>a9P4{B0-yCDiF~>YJF!TN_kw6h0J*6?!^MUBu zweo%s%0JKeF#z6T@Q5q_Cy=OLGBlS}N8~sj$!V-QUpIlxu##idc`D%7smLrS;c5%TF2=rJfQr?67AzRD1!g^Qg?H%l_`cyue{{KU zUhb%h#P*n+UUZZ3m99v)S(9SP7*0*pa#S(ix_9jeuB0zICs45`w8KmgacuLyn0l-I zPl=|t1OOy+T%5%J7wG-(u>IHA&H%>Lm#t@qt==<>L8DTKcR*bB&IsCY3a{zH(mO1{ z#m_TwF*eoZj@sb7NF2&04(9~sLv<(H)#VUljaECZH|2E!a+@pp%-cFdq%4E&!K2&AeFKJN3X_?nM-q6HS)i~6KkTsi(vAG{A!Q4YG1*$4 zP5zJgkZcP&^yFr5v)I`6c?2ejKsNK#h^g=5>X7e?3&X8Mc!P`JU*&5ErtnhNdO0W? z^pQ$b@&R>Nnw9G z;k?i^?wr_B?P7kSPc*!1Wc7L3Za7Bu#_H~sIF=Pz)$j|A5H5)xD;{HNG!9VS_yC2&#lZimJ4dQS3dCdP&d z{_BZV1ITF}bC-wc{~h7chyK9_7aNO;t~*t>j+l6Cd_Ft{W`++&R5B?ZhD8rI0sNHK z9dY01mFuyYu6;$zj9^D-KiprM+jVrX+$CVXqrWS4`yjCxXH)#vBj7XV4rScOkf|^tqKKEKY>AT&0=Ehy`IP24Ju-H7Qd40WQ zvI7)`)e60nm`IE;emekWH&1w>{J*E}|Ck@iE?^7+>XqyZr0|tO|MT4uNof9@nP%4K zr;n(CcQss+900fQ`)1x@!m&5?-6^LI!e6KyAE#Pq8IDQJZh?3_R(SR_+6BMh&j-Q= zbTj=wlK4hj8U#N&W8-FX0(UviPC7$3VNah84Pt<893~sSU;EGTpN+HSrupkP(YZYs z9s=4&nQozS4{OSfwU}*JSZfP5!Ge8XJPm8R9yp&0mZm zu=;@|U1kSD1XWwK0cTw+OO|M+3-sRcdlWcKl_tURKsyuKlwjlLhXtplKNr{##lbH; zf^T}xut%PvhHBIuYg{~JvHgodE}@W&p6#jHC6iqq8A1s3!85LFY`|5q_0y$*+pkDSmq_-(O||W)u)}6Q4DlG$Pu6;uygzfj@!V0K|0&i@!JWdc+fD61XITue zSli7w3fsfQW`bK3p8dw zIiO(~pL`!ha#v)9t|4@Cd!+Y^(P~+-R}eoNpm!pS31CwH_S2Y5=M}cxrn4-Df1Z-p z82qnamLoUI{5>$P@MTT(N~f_%<0@(P!p&~ zwWN#QUJM_V{7!jnN2BE@Gxz);h({Iy;My5l53?QV51enliMx`WBvrjGdv&;qo2<>t zzP)Oo9&TH{&IagitVv`886x zpC2eecJ0afC$wHX+{Kb`*@|a_3(B}$)S7tv(EF8QNz;#hCWs0a>mdh4aSZCY z6Iw6Xy}@in4O{HzdZ-A}AT-y1WYtoM4OoaF@*yiNmpXwmNOOi$p`8(*MGoMX#eaZEWKXMkx*^ z6yVsktxVD0Q8abF+352IZv6n%r|?*PzZ0k&ycOLf=zQ9>#OZu@S}O{dW~e`jiHROE zP8!|MuYl!zX@d?*fu(Th{e+&6NECnQiJTn?k`PnWqY`oE*O7c^qIhBPzRjU34X^N_ zHwd@uhfyEkn)-_!;f-#yZya)!B@)29Y5bUJWy@4oA0>}#N2yoa8yE5I57Z)$p4@2V zEz%yt#b#<>86iLVw_jL%l-kLN4B}PN(C0_NB=!8kV^nT6H!mpjZ;e3Bq85G4cYkj6 zt8t^N>Cm4VLIBzHN^*2)NdvKehHA~(|D4c-jDSom-*i#YW7)LRtAa&|k@)^O-$1G0 zEsRFF|KWbt+%_{1yH9c-kObmoH%#N_(h|syPZ~xu-6-SA7CAx;O#O=1Jy@x<%Wl0! z)Ae*DcpP=dEJ4SBEapMPnik6kb+4jbr6wU3{Uj?*pE!K3E8W>>KwP5*Z`cIfsU9OF zIj_Sc`74a-2j*x#h5V90I5}g1Ke&t0A|3b7KEm*xq10!fEu`y6pQnheMKcX$H!fFx zJ3IgLGt*VzzzbC_j#g^n&+fdS%ht2!0w(EuZWe*gqhIY=WH2O*1a zM~nN}+>ngn$9uctqh3UKii*IbrWD`&Lj^M|{`MVRDCTWF|2Z77LhQW0t7Lm)fM;XQ z@RsrK6`c!5k0Zm2M^i7&eZ4PFfo=#8`Ln(-6zc6Sn5u{WlJxOE1G=Q{$^JamU;R^} zfG?>Q!Mdvbx5e>u>YMp;?-M!oKI;VU&-*Q_egs-Vu8VU*pYVkzS&%2cwFT@IOF3d1 ztl$TiT2kbHI$dbLI@Or0)-8;-%A2&h{$XP~yijkuOeu^l-t7HVFgOl}TBv`Kew6`Q zxXUE4iiD9UJ44t%bHnZwdJuu)u8Jqwj<#1`KZ(*w2c0!ZuIvOKk(^H z)aFV1BOqK>`0uxJ%&OXV{rO})@01l<9cs|aRz4J!gU`8)>Pj<-*7ef6xARG@;jLn>MED z{|LLGQVGBX4L}Hgyp5xLOLKt8{uz9k4NS-eWlb}w#PQE+^zi^PUuW(U6BgJ(!$O{p z%rJ{lmk3Bc%WLLsM%*Me;S0ScIFOFO*wShs&d_mncNYChw&~QTBRIYjor>OSXQr)H ztion68uU+7Y_1jk?ru_NGtH+}-0*HB%jKyIrwLhXXYx{uQnY^X^#3GZ|l03mk_srBp4?o7Ork~FnZra=YfG-K!4&~XiP*!j+rVfv) zldf4M{il-s=Y>4uOa8BcY5#Q%0JIQ8#&_ty^8_+#!z{wnir#73OsOnTcs9;u)a4T$ zJCgEJo~Yx|ELp2tQ5cNO&758Za3Yd#o{Rz!23Fm8f-&^fP-&`(vW5~|&0^2jx zwD>-nEwJ^t3$R(FY{XL?>8(m9ZoatdP+A2S9oGN05t1167r@?Ds!oprQ7AY zP^u60>=OD|XG4|^kNfC0fu`V4)_kp=cq7cXQ3MdJJx|l_sl&Q$ggM+w*K z(nPT)Kd(unO7rAmtzpO2>o$|*=bljeRqL=yz3dim9FwoG?^k!%XTz^K&bD<6he8PX zmKtt6@n%L(IO{a!yEJozej&1)+C4W0ia$41N4hLFjsFrfU>wD4>Qp9=AtZeDv*DtT zdL7~Av_U~G9X{Sj?hzXh1Ta9`21)B&fV1g6 zfCXUnU0&0G?xI(sO|wODzZyk1lyJJWL7@{2jx2xk&DOEM)(!44G5!O5VrBQH&WT_~ z(2CA@OP9Ak;&g~f@}*GgBO+kfy}4>CUi`*B{03>(*fbY#i|sbpM>-*PdWe>o0U9Wj z3E(D^2iTJ6Q2R1I=94{dxR4m@Azeozv1{>+roeeP?W8d&gzd%2*JRBz1zeAj+rJ9r zfPnUHCKzWiA*uQ;_+fQplkLQiUru~8(E#eBzBQsv^8?tCCFXr5>7jK4sHmt6&7W#O z(>UnDo^g`5^#D-)3o}=wZx0%mhwRouF&HqyxGQ{(Up}#xW0z~Dxipq5(t=E8~7sHp`B^lp3C2L$C*Ftvp;89B&7XT4_Is`5L@%I%1mlf zZnv3rqFq*Ob12>V?}{zRq1WOvTdrq}oEeyJ{o%o8_w|-pg2z>pDBG=c{OuEcdFURl z!+3Xd$1$y9lq6z$?`sfARX$JDB~e=E@ra5s%fhW>skFip_=dtR4S=0u-p4>eI$YFH zP1q|&Rc3p;YGA8o{Bnr_egFZ?NPT{|)#fd5q+jl1XX6gy@E{*JYtAL(;m7U>EQwJ} zL_WgKV31mwie-Ka^O6P;T1^0pl5k<1+5@xsB>GYgTzHP=XlIvgXxWx~|2VwOfMCbi zUJZxckX<5UchncWC3jK!P4`pE{tJh{HsJ zOa>jR_SDj+oB~t5T3apOM@xk$WtUO<{H34r_vMlN&KLE)wFx)fP8Nu_$iJ^l0d`5U zbvZ1FF^KAxIv$*iR`227rhBfv^GAb{aFX?(-+#Sp_++x#_ zY2)=X4TtOTm=()}*RV8tzGU2^81&h{_}z7e?Ow64EvzFXZR6}`=0_lc%c+C0cQE7h zR>W(@Eve3l^f7}AN1vEWuQ%oU!q|L+-@Rw8>khC+T0LcXHl-5uzSd>EykldoCS?6h zc~FyF^Ps-l{^o}bPH*3P+RlMzXKWE+7R0y zrv{h!j%*zWf`KWcxP;A|wyhhq!S0TsdMMwxaRM8WDkBQ)fZCIT)Z$?96?LHDHX{^+m6FB zA3$#-)1GEy9}S#g!fIp3T#jN|Lr8R*C4LX(c|An#Rbmmd6upwU`n>U%a+!Xm?Y!R< z22tlt8lb#I92iDMWr+5Y$Vlto&QtnbR5jh5mSzXfnufILh1jBloqD5t+%@1+CCGqW8x_xEQ zYuJJyciYRdB%%}ZNGjzX@KXyt`Ys2dW+R!w+T07KG7%%pCav(pCYajcG&|@yOF;gN^$?<#lgf)x1XZeV`Vd@xme)^qw`b z4*qnZmq^3IxK>i+MPNlFV_Xy1e7$Vyn|{I=d6Lm6?KIO=L&4hptSax+ednrAig0l3 zI$)=lOyZa8Xa6IAKVA#_4tl(_du$1=zz3EI(y=AI)jYp<1xrzTLS_hE$0#8u(yPku z+u+ElB}bZtMqiZQEx41f;p}D71|1*p4dK96#8!N7He{{_DS*WnyX8t>ltZ@o+J6(C zy`OfK(N&b{0#N$1`Gt4J5a;E82Yp?KD&7TAxMe-3rDW+Q97Pe; zy(=~UF@pdo`XBr}Y^M2y7X4?x3RU3PbOJQ^#hAl!dtkqqfw+Hd?{N(goz0yh7rS*R z63S4GK#YHM9OfC3W{-G6^;}*pwLEUXROaOz`(DJv(`dW`p!JSNAor}2XyS$fLUPp~ zKJKMsSMmx2s9K^W2b&w>{|Nyr-6u8Kx`gf&SZ_`JH)%aVTIBUao_RSJ4Bg%u^@6n^c^y;ugK8dgl3(i z`3d7#8 z(owT@(AD!~*$3B5z|N=_2#*;l-d!DF=8v$S+>jk)f4GlT&0JA9k)@dO!4Qx1m0F-t zC^C^NAo&WIjX(C!;%Bw_x%!X1W_n(epu1D`%L7>frh6rTnRB`A!w$ScL50Y(!% zXhPkKts-nGVH%qx}kyoQFEkd(Ok8)ZYoiXHDSHs_4{ z(q4UbWD8(FRQd0|#Kb=22{==FAF!YuFM;4@%iKMbi1O*y6N3}F0hM5ga=&tb3iSP4 zw$Jg^@fYw;8gu^JmHDq_anPv|@xZCB=+a*T1Hv=W-I`fkl0BuC#H5REOer{v?eJ_? zd{F0L_raPRGXYjCj*nOV)$DrhS)>8+acp!vBAK$62G3p97Pj6m(h&x3t~_ ze9Dw^0_{$$G`>94zQU6rrjqPfWU@@H8|r^HN+bD`{_E4jnqtqYFr0}+UNhC3U#j}> zm2p`=3#n5>=1UHF!&QX0wn`(7m)%nck{l6@FVA0H0KE>Yz-^Rq+!4VOntmsi@hPc`^H z-X`KNNON?QOyL{VCPR(aNxnl_x)pHBFsi88pJ6j{)g?@A{&F)U4-Q$Cs{tLzSLRUDP@br$_brX|S5 zT#>;3GV55>=G(z35QzWNlI&7gf!590$90)BX*!<_%4zB8*u0lGn!zHN6{!BE(wdXb z9m4UdAgh+Pmy0L56;mrQlEvZ#GPn_i8&M}Hl#8(sJ?lB%%tZ7@i*XO-LWJisaoZB2_$Se8y zng2x1>gx#67WhV{&Tf9v`O30GL9?s7H!Fy~&LPMzMDR4C0oD26HuitJhp%J*T2y`7 z9@z55j`IOY(rg8f3B9#=+VFYQy|0&Hnj^5rzFcRJoTD*NGMKxTXo(l}^3i zo?+AEZmXUakJqKG#P$Kk@sxPo6V_ZHi` z{Wt$QO;=kM=EYW)xx~y&JI!V2nRb#1MUd(IVRi9q((TP8R%qEYeRVEQ`w1uH!%<1m z3s2vVkb+L(vu}H;*x`JPhhhfGZ_4F*Lh*)<>s|qUIy+%Ws~#>BKVxw_OMIUB?$3ve zH}ZX9v)%s6(CZr>g*)GRNXbi)?L~j(oPfYAm43`qCT2jL5yiXIs(YA0we~XPEs?{dM~dJ=jKLi4HL9`Z-BX!xqp?LI zPDy~NEYWxPxmp>2pT&rd#n=a|*`y@DKff7=L@F2I@2DVw;e92_*rcn#PHoID26$@1 z6PX)Umm+<$%o1+t0>`iu1D21ZY>(_lOV)#&w-WrId@no7^FMl&)m=O5Vm7QILIw%T zmpBkC`@Z;4viWngU4^OGhOWUR%%6!3Um!1INN#x8siPzH>tJ3h!IxMk7GJs~=zNUn zv2oK~Fa6S)TeE=k&2)5Hz+p0*dDOkm$MMJ3qCpSblw~TKQr;jg>Bjo61PYmQf%U*C zA(8L7(a}TRnO4yOX-bK2ka&m!>si2% zaXUDbp_Ke`!GWM4X}-`*2uEVkcDKH*>lac%GNv$|2Qp=bUA=Oj3Yg~F?cP)HwAXql z-sRDO(9WpiUaMH{*7S(<_vEV{u66Te;W68tBPPvkCLbKvGNTStVC_;Q`VYa1;1!JK ziGxY|%S$NEj=2o2F{Cu7FB&R{skDxu<@UzQWRyG-6r3(CBikqEzT-PAT zYG8Jeq`U&0US1sBTKUP&lNRfKpzCi}^MCzGXQ=*W$0*phrVI3fN=T~gdl4w`MApoq z!{a`cTCCp+`OAGgAZd%=DMsHH>X+g@OurcDf$05#C1zMqLrE4ondxJKs@M`ewZ>~L zm{s@~{nV!mv7=3r=xP|~7`JGZ>lJSo&2L9s79%c<`a+7P}R^&A0?6IlWniS1j zm-&x=1UPx&syN+w_K|_mLLHA$Z;~Wz@n2KUcw`MPpJZpqt%iYwcuubtD0j;IY+=OD z_ot8JvRUcEUB-c1xNtXpg5YD)hU2VB!1odH?9zqi*w4AK{DV&0L()h4wOZyo(4WGA z-`vdIK8>Zq{FT=@@R8Nnq78A^#NqVZT`KptwBXJ_Hm3I_F==}w1T{)t3h~;6ze`Ao zc~zD4RMB*>#}%I+EB_|>ZXp9k1N?46wlsQjNxV_IGWKTTyVRFKmv{> z;up*2ik^-Zze%zz;D7rrXo@Cz-aT;?f*>-lbO^;n0P*Pgx##1}hDQ>ZMWG}ogNJ(3 zd%U=81_vK9kI4C{!B%Kc7q$+g8lFBqY}jK*N3QKUBt6OFF_KIF`QaF+u)(EOPx%2} zK;oP!;afSGU%ulwC?Q3&iSz8dcuK=eA!>w#v3l}$XEO9zo7zM}oVI?IJqjAkI|{wG z{Sa|s+DFZ&p(>*zPS;jIQUT;~jxPCH&pb!Bq>t9m( znYo%OiZHtR_%F=xlC0geE=?RHDc~*P^n`LM?UaWEBnP|V0WsPyCBv_KyVnwTe9XxM zJwlxB6Yf4a6n9gRDz55FtpKQ#o8kug3c`JK~Q7=V%w=XO+qDhC0 zxLW$aCGNicPD!qyX-#L|m~Tow*lJ5YT13l-a9~B;%h~)=g!7noFeKkgk@hey+^F4G z#o&LpUszUx)*N);wAgb_OD|^k0gtP1qXT_#`q|ulE_*iJ5zTwzZ6mi$xiBFgN2?_Jj&iiw6n&)HmwEXNKu%82McXG0Sm?XU zUoHX!?4^!zCz_U9v4%;He-Co}(=nuP#Q;XvlxF)LWC>qafG3a-s4n~iP4o?h`)|1@ zf7EG4bzr%*PB7NR1lI+tB|^0Nsq8AseS2jBPVY7dZ=eNGrR z4@Q0p0K+WK;d>s!S$HE{{hhE|0wH#nG5=_%x}o445s*w^-s|wFOnL>C=En8clk(UW zvFMaS_Ia7q`R{`QAy}uBYen!v#OgRJ9GfoNg#ktUAxsf}yGQ8-UOiI}ESwU;+hB+u zhIP9rOe1ccOQ6Ht6d7E|By7q#$)B499t<6M7j1y=(|!1|;Zg;SUEZxL6B$Zg*6JmJ zv)M5~p%wGj%#LPE>xm#5#UJN3>SqL7z2xFu#c;+d46Z(dNe-;Bk#>nJDqP0c@e7-% zw3;cZ@(agBOHH0`7w$ceh$8vUL{In{W^XRM7}a2}GHZ<)`q`9@(S# z1X%>A`;{Lip5u}ok@Hj)=U9Qn_=?$U{#^lOkG5l}!9Kxe!a)BCJ?sqKqddc(aBRbH zvU<5MxKE)JPZ&%Udx2=ufHHLr()93LO?uU$ytfZm#F9F%2IuH1usOYy3KwdK;#^|O zEV1i?K)s zg#xV?o^jo47QG+xTE6N3_>2Ej0k6YlSMPL(+6q%@q&Y+7B^?Bmu%7pvKO>93PSz>K zZsjKb2+{V!;Tfy8R0#0O8B%x=iaTa6;yXeeuA9Ig0LTJC6T_%PQG7i{RBTXFhTKpgDOKyAtjz1BFUq^77R}UaPr2>!ROgH3xqiwK9JWxkitf zPyU2zdxJS-$*}@b%G=E6&US*8!v?TUFrnTn>CMM$vBMarmbCdFg}z3$(H-hS>YKFX zEx;(mZHgDTU=m~LgW8p(h!qtNLy z{MrP%1_|VOy2g^;`T9$-FqsiVT33S#us;QG&(u24Vn}@Djk%QWtJ#~aI%-Pm3zSS% zUwTilz~l+72mRFP=!=;F@87s+!Vn3d9nnBP1F2zuSYr_ZZwjMTw>xGr+ja? zSDPJY+uOW4@<(Ds1C_-w(Rsmkih?G-ko))f2jOR(*VL>zG0w06E<^y$QTS`_>C2ks znuCk(tQQO9!Dfe0mu|3Mp5!@CS9xQg5193+!Px{iR>7?C_LEJf`d^g#xqbPQR0;SJ zCV>%Bx=#O6p$Vt%4fH5T61IIz$9yzh+e5{$KwkZ?XXhlxS70Zs2c9vz#-gEF_zVw3 zdXK!2F(RQ=kF!3^mp!>yJ|o-$(gkd?uGXW}N@x_hF3@B< z{*2G?d?Mc)=gyd0yMMSJmbygj1wwn#OdweDY%LiF1q*!Kp~ZuPWj{f)0-a~CpolN- zBUgrHG*q){s!?46o-dCWKwoKr#KIA}Qx&_;*|l+$RdVNq7ykRz#XDeSLJlfqZob%n z;JYsO1ja>8JB<3*fIH(d7^QU<0fuEUu8Qp^td|asb?I}l3E}ooUZBHb6^RQY&*hj@ zlTeuy3cb6pA{Hi~-Wlrmd4AhM`lh&e7~st*YX}YE8ACrlCVI`*H2qzx`k};~9Q1{$ zWCb8-5IE}yWXzD_Gkm3<>vL)%Z6HfDye)bq`nsQBV3i!pdP+w%Tsz_0_iEFt?f6@^ zD(=s?*_vM{XvK5L_{?r_L;Kf-b}D-5cI!JJ)*)qE47jluSrMj(Z(c?92%tN;FMpdR zPQW<+^K3lNB^W`pe{cbf5jya$*@Oks-O>!YT5Hxz245H97}+(7)gf@y_e>LR;G-`a zZBKKKxfBZay4NhslQrva38f4={*=}IEjfM)*f*aghP?3y$_+_fUet|`B=fUe@mR(3 zr^Yz{a=R8Z*t@l1E~88g2T={x;dk8ABANnynm%-`TDxD+5DA&3j1 zMWcyK>p;Ch>QCGw0%ZCeA-UVgHpD~q2dMQ&NoC5o_pi9#{P;SuIsb=_B8hBpjH9@i zZU7`^PsvIJT7fbnXmSEpQTmqZmsM)=Jh%N@7cZ{2eU5}^c@aVT8w&4`7n?AMhRPU` zWJ?-?0wqjYH^&`pd~M;YVJjT+Qgtg#2dxuN6F;JkikGF){Cqabp-3LRdT?n;NgGI? zn88kSFsk|@3iLK@RF&N34u}ZUbR*EqVx~0|k2OGR)kD5K+7Y;!a%vzWA!&5T*gVB9 zRl>^5cleYYsVf6cph=izkjQ!}9S8Txg$RIGo3{cZbhb5>?w`mF=uC7Sc%S8{o9Zs; z;kIf9pOsDFuvAWyu?bI+ufYZAdhKwZ+PKg~p< zqzP%Q(NJWRNLqKV?KJh6L!TCL&_LhSI_xG!bYy+od@rG*sxL#&dfEM)@uWJ~nmha|Oik+SheKgiEM8Dh<77ADL9&(av}| zd|}JE)`Z>`erC#c5p~hnIazO(p1;6RAI#aVv;Sa9YwO)HyqCJ`yfUR-Nc$=g{A-Be zP18#=9Nb?SLLm7<@drNS8QYZU5FXY4>V}XoDl`ink17e6$_=c>X{Y(iM z!`$#5fCe_NCn+h&B5#oWyqWdC6#0%Tv-oQOm+V~vq%Zp2t+hRQ9}X8@G^ca83Kj%` zDF8BD6}L}I7|s+`VK!x-T@Cb5z)PU`Yo!-!w^o^ymG>=+Rw05FK9#gs^gkPJ;jD82 zZdLn;fnW9``}6TlcF-p7+1s;H5tpIbrB)T{-8R6)|45XCn!|%WFcyk@#OaepAJ?f^ zqSwM5chhAo60HW6$8yW?-0%reyqhItb`+2HK1LHg{_+8TcMOPjnO5v6KF6Qv^1inV zN6Xaf#hSSDJIgXXXu+gaD`?)Ik*uq=o{A>Wru}fI(cs$oydI|5cBm-9D~|81V5$mN z%FCNFsL_|M)ktzVY*l0DdCL3GcXf zaQ+=mot87D04E?E%QvL83v2F#Mx5NuSZd#M&I+m3lmbKz-I@lnT~~bF#fNUW|LabF zu{F#!z3hIHn|=l!=W?8e6@pIKMs^^PX1JBM%){{xPI*x z`008*D)>-9zdJyqWxwNmaH-;r(0b)@dZ=AmcdvT0u}D?IyG_>>?EgMV2KbzT5u;e% z;V%=RvrTG5UN&EN+l#tY0>Azug{i!P8#TJp=rz6SqU?gar21tf<^)MKr)6PDk(IP< zWye)to+GTJA5xMeWz8(?QEFNsJi+&Bfnc8!@yON@GMmUXpDW}BNoQ2~QsR*vNw)-M zfQzeDZlrIHVZ|K~_v6~Xu?W{sjv9G}yw`hs1-pS(FRiq{V-vlE<``8~8|fy4!th{# zOjo$308KbTzKh3qSvn|7#yLFmo~qTyTYl4ii%?N7O@+-cZBwcQG(-2I$~r+}ghRSJ zU@pHPYy`vdlWpvKl*%hhVIJSj7CcFxPA@GD0-H`DlSCR;`>V!f$J07ZSScM8MZqx4 zzMCXY@r@kbg;E>wJAA=z(-+v61UXWYm%@(wp0{z@qUZ#NZCkW#IEQNIoG8yVd-!A7e=b>Cfqyu+dC))RV%Y{4>Tph5SR?E>ok~r7c|y5>qa$?!$)vWb zf&YGRwD}P1)^zeS4&7kDXpN^)HYYuAuf3n?E=Uczyy+6wpxKsiuqBcs61nVG@JtUT zUGcV-^AEuLLHRrQUmkms71B_mR)>>(u(fU=js97jHlSjMlkq&$6S8Z+ zj*}>dd%!IDMjZBip3#h&$Xi9o%@$46jn)J61iTPE$1s9Y^`+dfLwCh*4AFd^pUt)i3>Li!{56;d0)etW(Dp2d&?oHJHMgeZ-Y>`JN(GAE~ixok!Sf zLk7>XC(CAzU(^P{54(6{OG0*XSU0J1uin?1+~*PDq)roI_VD3%eI4BQ;x%ka<9G$F znCC>wr03Er3hpV^ataG6QD*)PnDzhHNod&_{JD)V2B#QUm<96^P2-X+>-|+B;3qD0 zdY<4tm#P&RFM4a8D*8IIu>yxmI3~LOzA(#xF%r+ovAzHV$`|}~_V}kHuC+-Mvl~y zGlPhIP!GYOr+{FD68G==>Kp>M_Ta&6rTvJPj8_2{fPNTJaso%&bC#1$gR_qse| zCbZ!TN@h_pmJ_A^{R)pxt9ATIWsIS;5_sWa$#;1^HuMZ%MotLie0za9nzgXX=>NKQ zk25ZB2*RQ#I%j!HA02y6tLs5DqSB_g-CVT3RP47kJw=N{HI-gyqX4IW8y;1t!3!Ph zv`)>4DFDq&oPSGzI;_2olqF)UviI4xjB3stPpU%d#9%D0`wmQ?Tv*wkrTf#tEwjdc zzLyp1KZfSf9x@!I9}>RORH_He#hZs=4RUpoix7vjnutb+erelO0CMb2fEsV<(d&5@ z1YKq$E~HO}`Wy@2?Ka1hRGbw1o$iz@U0}$E{j~mG;mbPV1g`vtQf9@9YDv;IL2utV zisguC77q|^dfc>~#V0Qf0+vqJm=ZbRIwu6dXxNTWbd-w(V9(y~H96~}2?^|J`2YmZ za7WcYsnS88EF#^bIdCD{3r>qXH(Bu84IDfBROZ`}$~6Ah?R86@euv{gCrY*1=xn$H zIUfJBoT(aztdoQP`nO-oJqKK;tb;#Wa{{*$lvkJl9l z)&bbyYJI4|=hz~q;0EoA7Gcu zzQC;O_)9`ns_(Vi6gn>w?ETi!1e8lj&Z*KZA3~yxlmM(ZZhq4>#Be~SV^{t`27_v7z9xzYuOE*N%tiae{ zDw#B=U*c*SFrJOKPT?UKDhrzc$ z=zDjw%^FR&1m_rE!n@gsHvo7Jsf(+|DIa9Olm#;b+GHP^eLWvA`P$qOIVk8I3T?f^ z&X&2b*M~Fp<;jIFn&QVOsXVrDeNVq&tRBA&ghDWN5A{Da*(R4!-T2G5Vkd5KY)c{W zIimL#iTDhWXL|!|wo!?i9*=b2tLGNkvhNNUsd98B7wtG0jj5K~;|3k0QXscGA}$S} z=ShdT{lu%pVUQdk2pS7DxLSO!SQW0eheKVJS!e8=RNjb97x*VBpb8FzC}#VL=^d>n z6a_k#qTRNir}A!QwDZv91MyYUAn}tP^setTl<2eg3~1rlg{LaNyuOZxND#n^tbE{{ z^MIvw9ex}6*AF3$?&R{+Eb&q%Ys-7q+%4C&C$QWlMN`UvAR-k79IURNC>Zh(DB4;) zA@WAY>`*4l102vXf6$SC3G`}s|qB_^O;|+4D9ADh?$mt4tF~wMZXH~;jkxL zeq0H6HR?#`&(Rnb&DmnnI|}AW_N7#Y4`-GDU%P{$t#3GT!^X?!F}`&C53yd6>(`eG zgXmrAdt5#tZt!NWD9E1=cON@WN9l$~Xj&DGHhKeMBcCnQ`6ta^Ec*`+r1NIB?FC?* zexJ3yYz;iA`V|0a=pgCJjLH3OEYc_YqR{7fXGwGX)o0LiE2HvPE|c4HOWrd#Bxl2^ zAG=mRGx==(|LX;NnlOu{+$l>anc|!!6@=F82z%S&%DwY_v^E83g?X3sk?osT9bm^N z$SyHL1)VE)?5&oWZ6E_YL#|g)=iic+#FGcVv zbHRb}9jS2bH6R9YqG0i&|2f!BZYxx(fFj#=1>dQzCCItMPeN$msrT-ckD>J0e(fgo zZ1fT?<+8``udR@=0wq9F89r>9@tzl{KVk#zS`st&kTm44Q=2LAcC2BB+aZVG>y)kr z%d?~2(F7iMZMNY_k7*qeUmDR?MfrC5*jQC6TxkNKX-yYtB z6|1D;!>bu#%n+ULM)lQcgZPd;aEF^$3AD`wGBj0xQ)4Dd?HNF!)PPZ2_-Xn1;Y9D!M z5)xp-f%ZEY9fI`exJf4g^`WwSDdip{(aG8&PVUEqg_i(@a(gefuf=xNI$Ik5okKjV zd`8zg)3?6qq#&JJ!6U-QE-qnKPz`t((@o*Hux)%^n<y0`PKed=rjkJZ;Yedt$A73yznA@bn5ns8b@xy9v{rw)iU6aEQal}ZRo)d zC1+v!RcJ2`noOZz>zX=_-eaM<gGOP=*24~Hm!9`- zg7Q0O5O-0(7OxKHZ)Ip}F3er#K(j8(L}*k2h~o6AcF&>fbLHfqKw{_mZ7X z7FWHt6bEM8F~AIynGAD7E_9uf_EkLLY*|b^{#I??LVr+q$Ew@*A-TV$TUvn_Kjw~$ z$>Yaj@T7=qjB0ha--wVD8+HecEs55{+L7KjG4aXO6n({jz~Ch}0j|f}c7gqblL1zW zR5Q!s#(nmu27~LLL<;ExQS0F$9Gk|!Hri=oMFFHAMV|#jO=9OC`FQkh<{!w<0-t^Q zu@OWdQksCr0p=w|7pN(3Xf^Ehb}$~4^9y4+rh1hij`se)SpXrb2iBQxNgF87$qg?T z{T~g;NUKg|=wF{ohQ$nW!V%{V*Z&$&_2H`%ji$=h_PU6;;v;2(0&!r1R!I+*2WBXi zg}Mb%?(i6n@%HBzui?eTExTn8jo3QuC@EE14x;wCXYyp1Gzf5#mQ};-W8Q^UEi_h{|0dp5VWrQ^VgH1#l)MkO)(H#< z9r?VGL8cXqd14s>xR?p%^|bW|ary#xzV7`_sUjwJPM47k{;h~1R@YOMeWLI}737A? zERg5KZF9YlU=NMZRCAqernOC91iqC&PW9@1!zhI(HM)Fl^mx|m^Iac6q|qm~yngWu zCyp{Nn0*h)QB}o#YSS_N7Sz@;HotSx8f5G9*ank1cqk5&C-=Q`wkF%tTFPb8F6Vwj ze;R1g_X>SYCq|##30rAXeD~O|7@*FqcuU8EVnCkahQV=7JFRC)n{j~W^&~x4yyEhh zG(1o4pH#Ky-)_m{ylu|5ek=W8ST3;i(oXSeJI(`w*Z!fQ-KipVvN*zDpZ9|?b-}e) z+Ix7&G$j#ok}KQdn?9GR*2B9K;RUIt-l1rcD9if*&_#Gb_-*gzLWw(>X8L5 z5684{h;EoI_I%OD=MG)$z4Xb#lYb4|E98}jIw6lf&(gmYSey5kfCu6qmc!pM8f>A*0YTL4lA^Sja%(Vpt!JCZxLodomeTR#B%lx_xSD539{9wF8Px0sl@WnmJb&R9Q% zt~20|30-~qqa9AOPQB3r-`0{H#d|!$7a4O|7S)?_YL0v&M;c}JE0l~XLCkUL--zbI z<_WfcN^<#iA;FAGy--zlC6r-d%nH>4gIc=T7qSYy9`2 zvBKExb2*_i%vqS4?R4^>=F9AXdgpt0Lo!F!}j6uNflo6{xPSu2oyB#!}mAl?SRm8FM~m$GvTAKhF?fpj!_@M zeq!5sfczqQ#8uiZaAZ&lBX{wUKG)4&5=qFwHkV67^dnWK@XeSKG*edYr4ONIPk%K) zy=%#&YbGa)90YA52mQ96AhKw&mq{HH;ohUc*6_e1D6GS-VQG$njHU3MUesR0nHc>^Mjz6iAF3tv<*Odjg}VrY+H(`&o^Z!$B%gm6CM9|euy2@hpCJIm z(1fxY^2|YO&AE{1l8cG(Ob4_QzcH8fYmlN){*T@JP5g*&>bs;xLv2P%tDaqpRngI& zQ8Uv034mCKTs!=#hl%u7L#9Fk-6DNPlhAFS1r5x4xufRY0on$lc&Fb~-Q6`-Bm!LQ zdUH&A*Xw7azw!9LlMDI^K4@{)<%y-E3 zQbD}%GQVSkJ@@g2)J=e?if`pzXW7N#@E5uG4ye~_e!-o^09K^PD*N~Wh#oXru;}L+ zlqrJhtL+<)KoC{vzUxUjXgb2m6zC(}8`xamLrzHYYlKyMYQ~_44b<+E`tUFc@cs^8 zKa}B?NFo#TkBY^|o)1hRZdfuI8l7-b?M6w2v7gT>v^)Gdjdpd(WJu_QX@~5}KRlXu zh7f11fP^AH+(;D`4&5$YVu#wB5g8_=sE%(UY(@aFR<22 zGp$5yOC$RMb{N(y1qaKG|+ujvwV`-5QG!hv*aubhE{;sgj3%;O$5Kj_x}y zSDjwJ<3=@LVuRN%&f%_nbl$2b0-si+3x$6))7G^Y1UV#>*WoAT}sUd9?ncOS_RAXIZS^}X|I&w4Yr}$+4*y(eb z)B{*UJA~lK4YDLY)AU01agn#b+!Pn<5w91>%+nCz2&0gH_M3{<@OujC#fqT6y}EK$ zY;i{UJ9TjmX^>%8RE+S%K0juxWX}l=Z38Z*WV!yZ9W_ZLT z8tKmVy6tx-R4<4COanq}HKP+IzYHQQtn1BhkWAfik5cKWuUdD2U~c9tI$B00Q zyJgq?LWy>}yyZdK;se}oa4zFzM6AxK%9!r|NO zXc#1bK1&F#|8Q9kZWc5WdM_?zl^t{xwDFk(a_=si?$FN-xfy`;J3`8_oF722_ScZ% zgNtU0BiBz`*$m&sI$Ws9vZ~xCSWiP1#)a6W4bSahM+Ls=Zyj{VyuC}W*&+Q^(K(AN zcro54fwo5;%AZl{*9-#71zVX+EFL{J`ekcz&Iwak0W$4u*f!Y|k5LWOxPuckxz8yD z%;CP^>_|OSJJ|pRG7QVe!Y%*?LOs#9+T1PccQ03G98-|Z^?^Y zKOnP@)7F7~OJe#EVT}Ss4ck|hl}gE|NV~s7FvRPqy>x`PYk<|AP5JhG==oser+%B_ zt{Xc8c~ug9a@zk{Y4UT@76;$8(*Mb9;V}8>APE{Wfd@*XBWndpk<6120LQnP;olzKu49i0`W;;r;Oid(;|VJ9n(l9|EJ(I8de* z<}?o-V0Vb#_C(oOc3d%s-1cs*d9<6>E+-g{y;KC|DQ&Vt?}x9*CYAOZglC#F1>OBA z=5{gWH|9MPRtf=6+?yZj$A@PA1%0uEmPW6r##1KCo0UMJ1x-7{)4tOV6DO-reZSJ&TO z-^SvM6Bi6&PSE=xa5U3jfEo>h*)%u)+J$uls;p@gq_NE*oSvchCt)y5R4`9 zCQ`~^Np@%!f`~X`Ho=X!s(vzhkjtZ+eC-C<+c2)DX&FeF)e=Ces`w+!J1k2dzlntzInPFpWy> z&ti(F`o#nHFaMYQLal0sY5Wiy|dZ3a261AchLq<9>2Pq2rS5iCvY)i zrhbI2E&WPhFpn16d!ug9y+pYb!YifMy8ZN6E|20v;wz<~P)bCQb=GV0H`qof7x;Dz z5c&z3TScjAzwaC0S}R4O>dkauCbcemZ|U=Xv?{AQ_MV#M+~&Ea0ovP4%SH9GpU_XW zx5p$DS=1amv}C3cxNFu8C&!p+r)Q(lAKu0Z9n_i%B@eIxg|v96-!19kwZ@`@yOH== z>J0|br@K}I<2)&T^=21C6g-V_lHeSo8*6!>HgBP>4ZF49kgw}I}`ffNo2|zctXZEHLaf9xRe1x zug7$RY|nwwRoIS`rSf=0WDu7;?wqWULK{>0Z_V03yDY!cK^oT$2T$?OZjweS+Z_LU zrT+_M{a?yo6W)OC7Hj^0Olo?Q2dJ9=OhH}0TzU0sm6?Hc?yTI6ZgdAA!?FW>JZ^wG z|Ap`1Db-%VRSQkJhu7h?c;qqrUyT0$@Q40(aES@j?+?gkQ?2FUyjcQ1PM4$yB2Z^4 zTzKEwpPk539M&m<){F3d&^srzeNQwle9y&e;9WFGjed zRjBG|R6R7C*ILQwRnHW@V`HVKtJSs*Kgj8yt{LEjEcJ5~nO=EI;xA(u|MG+V?S~wI z>>C#k2k$Q{@xQ;&fAhmUK;!z!;@3Z|)c<>ZnK;t0pBzTQeok)j{~c*5Ht~dkviY?1 z|AjaI&zGje2Ub#^M9l5~UsD&F_rTMYH|{L^Pv6{MZ^i#;5C3uypz&ntf-3x9K(@aX z;Qq~NEcX*HS*D-tKR<*2{11MHS9|hUVLWeS`_D^0wv$}`n{zx}fmArI_n)SdJUO7e zj)3-x8+ck3EeQT$L=yl;eyQ|^2k-B<;9uS4gK_w$H=ACagZ@JK zq;x%u-ss1c>DK(TTWJ^m;5t+0p9_TBpVHqdHH`?|f8ui88ZKorR|%km+&uJCkm1aR zV2})(vgrL`x)k5;d(y$pO9S`qpqZTcA1t$fvm}9E{Xq$cWNYgE)OKwJprnHh`b8fD z(T2^Hg-j9sPQ#l3%$2MCO1xGAn=u5ZY&B!mX1&<9oP*cM|32$e#7I*DqvkLE4zs=p z%jKr7mVP7(-vSwVGjd-G|wL7-GMWc_+La+Lq|lG>LxoYzKO!cpD{uUG>BcUw2Er6$)xA}*__mf?RGO(92$ zJ$S1JyKN1J{#6zB*OmESpS!d5YC>TPtdwbh3%G(f3zbAs@QyrTeNmhz>H!Yf?dKO6 z+!(}fxi>l$sSdV~74y)BpxuVdG_7Oy}tx*SQG9D=|&`$0+Wq zBe3&arN6oT&*h#|A=?}Nc4#0{GkBMnG!;aV#Zj&U;vCi_a)po+c!P#LE0*KP;R-?|vexBdy z3ikMf%*O2ouUHH|OuIdVg@wf;Vvsz!%fY`6PGr&=h$a)v&{R{@E>e?DNNKSZFiEsc=6)!bnLZs9_VC!Mpyzq2a6EQFD@?r^;?eW(k&VqT0B5tm0#MO ztcRgEHMScHhN~!30$}WTH``gS%?}u{=hS?BI>bC5X%eZaEk}=*t4$t!!o$O>-l=L; znuteIg9`d0UNO-%Y3d~OpXr*$LsejKc2KrjvDj8A;tgy_h{aQKL zDUyJt8kVT}L$s2h%9@HhkR7u-rIzqga)a(YP4!1j-J(&C)T6qy?a{*gqAT-?wU*p~ zc3#jCNW|7HyF#yt^ZiDy8rd=hU>T4ExQFOxWhm{mNCGQH4v3jhSK$BiJ8>WY-U->D z+{e>`{p)T(&>*b58ZX;`3!ukllZ!N4 zrq}Qauh^>R=E6l`dqj)hZ*|OTjCVyRwA0;6?X`e_o{`TQf^M@#xObdeOb@K0{b2!D z*MwXtNY4B;Pk!PoKRIKb(*q)83mF;S&hj@p4){I6&ZSJ}c}ZAkTdY&`HF3oNYc8`p z?%E4GhQs-@|JfJbUpi&By=+=9?gM(xl|`?mLNLfaRvVOKhoR3@Z?Wj<`0}G^?}=Yc zF1HNLoowXih4chWj~y!(k&PwKTJvinygYg<8T|aGSKCeTUGaT03FzT+J6lbD)(&(z zV~wmjp`0r-hy?B0j<&W1Xp?u!LsSdSyrc)BfTEAp3<)Oi(CGhNpI5gMRK=3vby`#i z?|G!9x69EP-y!WT2~Z#iCrmFVHEDegDj6- zJyaPgXZOu7D&M0*am?2Ql7o1$H%Ng;uXj6g9vQ24e=ZEnTNJILLz%%07aCwIbkfRD z)5qaVCjZC#hI7xe_XN&_U^U}n*lQ4b;IpmBiuUSWl7bj*Bk$i{4M;X3GlzqEwmYn0 z2NIv!kKC`0O+8C(wRJ02*s_KxyfJ)No6O0NM#&U!N@la69k(Ns$8h2ka?6sS?cp;X zd@6&k+rUaYZrp)gc$e)@&kRIS3{c=*Hby+&)c zKKt9u;osgZtEkjYFk#pnQqgf!Btl-ydd(WECw=#ngl=mX`DZK*13OjbhM1|s({7iC z%pUS3I$iObniX{Z4;R{;gFYQkMfxe!~E6xE)K86EZz3~gFw zjfk{PUmHp@B~IUl-TJIrzL3b@_C9xVH!oZhUuVK+1>u%`Q~RC9!y>a9mx|~N=1O>9 zV_Po&mJ}Y|zu2iK{sTf{)r>69xh5O_0jNe*^pV-Vq64zo#_2Xr4wtYC1W0BG@!1Rb zCjOB1Gn-uk~Jh~k`xu){B zyyP#ZPSQAHKk&?NVNl*djSAb@QXC&OPHN1#cKdXEPAkB6#a#}U^g-gybE@a)9A-FH$S2eRp>^ig{e zD+{yy^g>Fk@$n=OJX1Q(j{V|8)Kf)#n-3OSv?uhO6h~qivnc(i2=rb3|OM)u{L`b5yxsTrtrdXczifaK5wS>au4hnaon#) z2;2PDUpbTB<3YZ)Tu5LTVI+D-;C^m(oSl&pNU9uD??a&OW;!3MCLpMcX?tA5xQA@_= zpbr?`T?=k|I`&mZ;x{9CFMkdd4^YrJA>e&;+C=qhG|(HN#GXH8P^b-@-Wnu$lUT7! zw_nf-Z3f*-7Y#~M(|lKR33fhey)J8}EpBL#VrmTdk^gajo3+J>*0@V$kx5ZZ1-zdI z`Wxr!Z&_h16&#LNrJWsMHAlDGVC#=@c=9FW%(;Pg#Y^E=iOw^`=Uo_HSGHg-Wb-q~ z;{&}b_b0?&CTwC#_&BaRRABu%4WK!H=E@jL7>wVfy+90lVNVFAlxc)BMMuxViE=`) zf1HxMJYUEOLdySw%Dq~J_*N3(%}U9tsf|VgZPI6x@47WsoLusxFT{s@v4l*pcrTNU z|9pY89o}-}%IZUiR=@nzW~0T1PpP9cK5oj~ROR7uunu`PXSP z%}SG^dw1+EdBQ(|;I4H0116-us3ASA?jwo>tGJJ!08*iEAWAbeE8j zYrUQUvDOLWV0!kjnB!6^#8WHeD)2FvOzmkED%+Bzg5{?QzJ5ds*!?;)O!g)e+R<=X zv5V&Y+%zPL9RVtvj70Hiz6P{yUHlCdDtIXf(XYuSpB*y{!OGeMJYGzj>w7=uo|A!L zv|*BzfF`F$z!caa-@8E}0J;A%h3$v+OP_wipk#`JAUa|t^fS^QYtb3&sa}j(LE`LF zdL0eJx5tsFO~Qylztxz1BIM$9x=|nc z{)kIC11-=r{UXhl`b?Sc(IMGITKN$g+E@fi;}J#NK6C8xFTvG=ir()KIJaV#qk^sl zh1114O-n6HwCEU)B{p7`r38KD9>s(;D(H6VITad4xTu>J5lRL|ls~WS7jG5=_v|E^IIR zc_$9X|KthF|3}=q&@v>coJE>iUpITgjqSlKEpuiyrfG{wMJGbKzMgM2!dtoCt&5$% z(I&b#TbOp7q+n-_Dc&qEG-tO{>4Cow};>mb`ZMmD4wn9qz4{vBm-HjR3-?|m|=VYwK17?)gv%5ba>QPjs(I+&fr^9a5J%ZAid6jPpm}@FR zD1<;M*y01X8?co&Oku%6E=)!~X>7oRvthAwD3sS*PqLkR0^8tI4A4*h35GBog=_WtVRdS)suqL<}p=p2&gF*#oE;99QcwF5{hbf zRkIn;(#geG;V`;mVUrn5=XXHdQfdaeI-$zmp*?7Z2cwX5QEbe3FryM(l=$S zoM@J(V!5mPxGi2=qm1_&*^_VBup&#hHT6X0bI-0C9y!3p0 zn1CJj?%*OPpLa*H=}{aU{@5Jaw8dcg^Zw4tV+0=M4>nL7`4fxu+j7ee?)n9rmo5J{ zi0*&*hG{-ax|+wu#nGh}X?s8!MqaEWAVyDBipey(yg%DY>PH1lG-g-{d0lZLS(6~! zt5qjpZmNIWwoP?avau&2CqGi-VHq7KP$p*0EIdJkqc0rm2i?5UDot7LiKCW1s4eUb zx!6BEpbXXN16_XZgfsnoXDRLGfGo@PX&~0Z@W#A?W}iFV(j>;G#ID>8#Q1I>Hv{VC z1xxchBIII$^6l{ltBJ-C(-!hXsf=nH#UaZQQXxajZ6|-;^!D}+1i=7?2A8f5xCw}u znSh9bmPs^IL8!r?=}hkAqdje&lp6}O!8;}Lc`8|tirAh-g27<$*;hej5e_PA>?As? z+0&QSI<@Z<7!0ZlUZOKu2WRs;e<&fH<5p1W_tHB2?ihvh-9Boq{ln&~EiENd2%WPm zY!22=*Uj`ceoU?IlhS4xKiNXNikUleRrf(&T?HIfUm3PY@M}f|2wa!T-hdK+y<-{a zlF+OzLDT7<#4yzGD)iB8K^9qe*1EkdHbTa}ijVZBo5a67Et}ncdE=6*O2eT_BmZ>P zlghVx+rm{CjVy7iQ-HdSY^V)*3DU0J@$Y+5Q7i>T&@>+oWwNnXl|P^}82t`r5UO0g z?v_xP9gmd?cbGc%Imu|#_2O%UA^6djG$c#(6~Al$Y!dzjV)FYDbS?ucZv8JmvwvBd zdX9U{_53d8QHdAw;bKo{33kTLx_r3Ov9kGaHY(!U|ze27PI@pQ57nT`8z9?m|Tcm4eG zq+bbGeNII41%|367IszX-oDie!6Msp)7#?DQuInPVzR1SEpLI=_aQhlzG8=q_r+8e zHgyOjfl=Ac6kA%SUJmei9AzACzR1#1!|R;O1j*YiS4%T$mfIytOjW*HiR^PQ{lwX< z-HM)BR^0(ut4P;YOG={JtFgWJ<`Ke&bOklfW`j=UhxWhTb_2WI`o;#Wal4P!tnrS= zyTFjg6ASD{%F0>KGEtB6ea|(=jt9mo%B86w!)Cc$nJ;;zAXlIuOzxQbvGX*TrKt)w*BecDyn(9-^`G{sg z#890V*5-Xn^e2d(4YKxd%D`NM``3D35#6>aPWHl* za(~@ZexaCe@AJ1}^xuzW@V$wY6``xTWn;8iz8$7`$$9}j07TB9>Z{3q%lq0Gi|uNT z!3fC+&6-)~HHpBxdx!T;m*JwcjVUF8zCBUO`D^zO@!Us2ZKGFhw)NW ziY9XMHWQoPEf2=Nc(hkB>AQd1CW}RZ{~1Nj(grZf zaRWNNQ6vn8vv`=M=$_r) zf$sL?{)`01wLAo&EB=S)BbkDS++bw}-Q1sF4$TVrHWMNou%l6D))#CyM@1hh_pj5g zu6nR6Q&4>i_KlcI*5Z#vu+%xUN`PuCJO|M-Q~L-L(~RR%Ubt{sjL4N(2o{OeN_y`T z=0de`J*EwD&7hs;`TSSu$gkm%!=w`~h7}MWBk1%{v7U;wCo<7REUNDDkp!u0wwK{9 zY>?h&H^NPBHG(GgHo^d(U=uF@mq$RZjAZ5Vjh?vdbx?QSZjY#(PH?2ZjyK7(`>CV5 za_w={g|3=kZlk{wC@;KR7)p=ID0E$22+H{SG2dut=n0*)B$T_>a4jkkMEuBe%~}K6 z*Ad6$fpxP7bNvq6EaR(36<@=(TBm=%yM9=G2IcxqK|yc1zf#r_<@tabxSedHuP?eulB@H6|tn)Kn6R293x8Hnp_UigjVuodo4QY(jQVDsnT1zZZXYejv&^lV!0B|-$ zAg5g_6YmL`*e+{Ws+sgPF8;Lpso`SA2`F8-S2M6MWNs5-s3&KsP@kP@#78C{>Y*O9w;P0{N-6mglQB3<{uz8`WACw2g(9` zz^ap;#qa`|z?)uJ9Q|3pk$#QUm*opX*phkSG9L0i7|Q4q)ycrlK88yto?&sSD2SXR z0)h%(yWN4}X)(yYGYH0`;0}nx3uO~;Q$FMOEkfQ@e=KJ+V5dwNE#K@Is7F{q>Avlh zL@w1!CuhA_a$M~L6fRLYWI`!+*B+GzKnZjt>)l=cvrW$zf$$JvjV6=gA^#f0fdYQx zCNQN0#Q;|}nSx*Sej%-Dm5nxoIuxi46`HvZ)(2Btf5h4h5%7BR6_$*5BR`9{~qm%)CXqa6qJg!O4-(vP2ZSb}*B;wQ0_ zP|udzPI(vcuv#Az%Dqc!h$sIR9RBUpkM4K^ZO<8CAC=6Gjj5z^`Zlk2PRF~oUt3)_ z8jH5|Po*9#dt%Sf84w@k6CcDP79< zqI2S6T=sWa|NdY+{o!2#n9H43l#Wt^@bs|3J?3zsA-U;nYcQ0;pSjJ*e;JjC6Q~h! z9@zI?8yq!mV0sJr)2>=t0);Ux4{Y}LQyyLSPUOMRlH@4kb0_lA@pw7<+%ubn($rx! z4(x7Lq98((rVZUp=+bby; zXzx}Sxgb8GrJ?CvkYxG_&*+ZO(5s3+DfFG3cld!xw>r-RQ)vh&WM0OTO=33^nG!=d zlwa56^n@g|&D;YEQE%4O znL{nl&X>veVaW!+*2f{=_x?E+6SBVr`ubPTAjf3$+nF@xIRADW5bOiQzBN<3?l)Ss zg$C~}xoONP0eT&jic|NxWrN_W&Fikd5}TcDQTTOCon?K|dhnZ@#gOF@;keT5JXCM3 z2Zww(v}6~Q-5SrMSzHgh3U6K`C{PF0J`PB91XXyApfpT)YbzfUjtnp=u7!&7y+mfn zSua33P(4=M!rY^w$8@MVixvAo530zC`&a$@|7nj~us#P@JXuLCdCnTnh#hA=2Z$`^ z-~0o}>$|z034P|CQl9=eeK5Exc75+L34=EJYJ3ZbWj^kFj>YD$nDM4WyR5qV<>vS; z(D;&X^SU^wViz^nNgF` z&~>iCFUHmX8njyL*#us>w*?m0%_8Qj^kPg`@#9j6zQ1 z6%x`dkAtZ}%Ln9(ujG>BX-3#BdCGqm_iFr3b!Z7fif;`EJ(dESI_@fz^1)G}r?jgql zKw28U-LkQ{o}v*SAkyK#7&8MiX@yYuCrG|YqoG1+Hsm^auRZ>5LPr)>s^&}> zrOz*2QmX-F45oP)M%gE)vXAOJ&^uz(lV{{3Z&OW*_ERHX&i(;LnEjrZw%Cm`2{BkR z?32DTL#;lZrXZ!1b4JbAdboTCbY8Fz4BpB}zT-($t8@I-Eyc(4?;4PeoXD+^lH|>c zwel9cM`2P31~x7-g|a)aSevx>$@yJ+)t!qykFA_-w?{Z_j|{p3SbLeYXDb|imq<2D z9?L|04-V=W-|FOa6I0s-r2J&~A>Q`}V>dcvN>@fRn#v&e8N@3 zOAFApLpxA%&P3B5M+2;daYT+Tn?wkZ(U%V6V??TZwPeo0*zATBp1o~pVnSyNOcgXk zu%wf=5dQ9))iQ)jwb86H_uno;S0^;t2PfOh17QnFSfJk zL%*jtZQSz84J(Zvm9o-g%pMB_9lDE~78lG^il>^6V2jgZIVz724crt_3>1rc5AC7& zG6xJ@Uro*=pe7d|a(2}Dduzu~TshV&@a1u0(jDzDHl16;N|}A3Y?!pR?$xv5>>A!$ zw2ytG8agNX%3Ze-|6YzU-~;FQ;p$}=?P^g^dZwU<5*^SSt<|WYYw>_8Y5fLYh>5p) zS;!Br4sKfE{}f9xlgQ`W&*pQM5P3M3Y?^h(w)+KbI#UwoBOLCx;p=^n>?6mb@wBM= zB?OWQ<&Bpm_tTjvH{5r&s}-#9B@(2daQwkR+puQv8hicEVPDI&xUh_ZGF67BcL_tF3&I5hd)b185I~3f3`{ zp)YPEMP|5Ef|=S=;A;R+BsINnF7_B&x z;@Z{^m$`fkFFrzWTzL*NY@G|<^OujnelGfC*k{^=H9AaU3uj4@A)U{AOgjmU9Gi~R zkMLW3sxZw!VYG5NLYfr)3D*9hwReKD!lFcSe7)_G9dUP@)m+mfd`dBKEQu}skIv!% zzFJ-iOoxs?a|FNrVJjvkevC8x-2@&IukDBIXom7epua*>eNm%DhKx$r=9*~OAz6W6 z-?=ddnXYy?y>(CZe*LDkb*gDoQt&q0(IIK>e*e%LBvRzuLo(xgH?PjQdEd*wfIpG4 zGv=LtsA>h>3&-VYmnGIWD%EN~2~sPW)OY{cjf3?gWqqBk#9dtNdsmzBqG|Bzpd}<$ zN?dvED^GqO9zo)`Npjko7de3o=PBk`95wz-F*$)t`}fnl6AtFMhek#pLQ(aE*Dxwx zlhfB&=4OArCn?Z1yowB}@dq+5AgA!kR9NUxcbHx9@>#Hcw zP&A@TKVno%KYHA5*8YkMe?)Ayob*q2CDuNlJEr?w$*7sYgE_8kLzvA^oYgbARwXJ1 z4-c=CTJDBlaq#5S4~hD40I?F!=s~>6yiTkcM)<}kZo?wJ`sXycS zqQZbi0D?iFw*INQj|cNLaE=^TdHs}53(c017xK!t=kiUzfh*35GW27R@?4_IUrhxA0Rry%?S{ zB?agYEPbQ0HDS9rVDR|3lbgu66hIJix;dn9dwIx$#bAYj{Ql{)9e;TSOLJ`1Nrz|U z1!_{PFuca74WPoQf-0?1ZH2A&U8a}|rDAN#mJEbFm?#*d86C^&J$tuV$BIg+6)x&J zQS1u3{r+;lpka)k9=sP0;p^EhAIaeRGhuZ<6=QI;?C7#R4T>j@$Nc`S(R>$gs1cTm ze{|p1b*&f}sX1xQ$G`g3bT7KC!2M-vyMmgcrj$%%#gW8l* zBbm_`0!mVwTSh}8Ko7_h4PJk5k7ii@7c*u>PfacshEg6Cj&&X@JD zp6v#Lo8M@$^_j%qna}^ImR1k0x;y`4{=0IOamJzp<3P8#Bfc8oASe_}k`I{FK3sejQvr&Lctspy9T>p%$lV5eIaktER_6%cCUP@d8 zfS_%uOrhceEW`!+1Bv!+IqoBMb@6+liIiEk(DCT3+)!u0SyUXCU14&5~&4~ zo6sZcER#YBlLLg7{!BC`ClB5At@Y-xm`O=(`JU9%g+-qt1$#4DiTu0D4v_3v*up(M zW>=j+)s@-Q)U?`bnvvgiOR?UFfL<9+7g&LMTHdcSuuy2|ovxL-<41$}lT$jwgY;>v zW~K94yAgT(oEpI|JAP%!2gKs4wcXpQ<;W8Vj;fn!Pw5Df+$T8ReFI_~WTsh_g;w^( zn_55jG%~Y+si1v9^QJSzl)TZ|%psXYd&cJ>c!)udqLWDI6t+YNDceyyse&}73@klD?3#2Dku|+#+U_M^<&!4!%uN7ip@WI-jaEwXS$A7KaqZ!Hai| z6w*GBFX8~)1)AI0_s)Z|qQ{-8K(QwIZptD0K*tQ_Bc&V%V$M6c@V%MV48VNiV#U$1 z4xXW)yh&oomC-7>sjeI-P?uht_$oQ~g$JcuK2)|C?iIkSmJ?v{Ji*W2d{?WnxK4iE zLqY1!d@bK6OKnhK3&AV=fB=_y7%AI4<=O!#w><_0{MNxk!jAvCBmXxk<3CQT-#!r# z7Dl(f{yzG2;HP_HtGpjiZ~UBz1_R7UFk<`?k#LlP;r++{uc~_(7IhkqD?_K)>qZ1{ zUtf?||61sHq<8MWY$jNQ9n-1#ZNE?h3Z#pZD5z{d2yl(|(}ZUm z@;+4;m3;TA^^cnjoM`mjR@yp!DsbV#$zzd!m){%&S&(LQ)|RpyIjd!+cXW{cLHoSrTtAfT{t;X)x<*|#$! zC!;qhkn+JlwE)XatFAts<~#e(-L3wAL;HGsW*x7$+nSt;RH-+v@dP&I*8GgoJGrK4 zWxxVpNu0~C0xX9$veoxKE`0OH@!_Ry$7LEnFqf5_*P3bmgs;DJrYGKstb(V&k)fxj zs+YwGB7#4Km1*bZLn;>Sr=1xR8@{cZm%tyl@S}6k|8F2Nc_hkosS7j`kdp4Znq4Opn0JbC!`ok;R%0(-OHjLO$AOePVDfy?oqIDRnUS0 z&68{Ih55b-kGt$5Fye{i- ziD_#8n6jp@`O1q6*1owBoh(L^58MnkGOdNjzuG;@aaKoqy$Nft&QF;4GWv}1YfK%I}=~4y!qA?3wctEXq0hlkP9!m!Th|4 zv`umfs|A3Ao~vy*&ykpTiX=eO.nii.gz +with open(test_csv_path, "r") as csvfile: + reader_obj = csv.reader(csvfile) + orders = list(reader_obj) + +model = MySegmentation() + +row_counter = 0 +for row in orders: + input_image_path = osjoin(input_dir_path, f"{row[0]}_0000.nii.gz") + + if not os.path.exists(input_image_path): + FileNotFoundError(f"Could not find input image at: {input_image_path}") + + #read the input volume + image_np, properties = SimpleITKIO().read_images([input_image_path]) + + print(f"Segmenting image {row_counter:03d}: {row[0]}_0000.nii.gz") + + #segment the volume + pred_labels = model.process_image( + image_np, properties) + + #write the segmentation volume + SimpleITKIO().write_seg(pred_labels, os.path.join(output_dir_path, f"{row[0]}.nii.gz"), properties) + + print("Done.") + + row_counter += 1 diff --git a/docker/template/src/nnUNet_results/Dataset102_TriALS/.gitignore b/docker/template/src/nnUNet_results/Dataset102_TriALS/.gitignore new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/__init__.py b/docker/template/src/nnunetv2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/batch_running/__init__.py b/docker/template/src/nnunetv2/batch_running/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/batch_running/benchmarking/__init__.py b/docker/template/src/nnunetv2/batch_running/benchmarking/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/batch_running/benchmarking/generate_benchmarking_commands.py b/docker/template/src/nnunetv2/batch_running/benchmarking/generate_benchmarking_commands.py new file mode 100644 index 0000000..ca37206 --- /dev/null +++ b/docker/template/src/nnunetv2/batch_running/benchmarking/generate_benchmarking_commands.py @@ -0,0 +1,41 @@ +if __name__ == '__main__': + """ + This code probably only works within the DKFZ infrastructure (using LSF). You will need to adapt it to your scheduler! + """ + gpu_models = [#'NVIDIAA100_PCIE_40GB', 'NVIDIAGeForceRTX2080Ti', 'NVIDIATITANRTX', 'TeslaV100_SXM2_32GB', + 'NVIDIAA100_SXM4_40GB']#, 'TeslaV100_PCIE_32GB'] + datasets = [2, 3, 4, 5] + trainers = ['nnUNetTrainerBenchmark_5epochs', 'nnUNetTrainerBenchmark_5epochs_noDataLoading'] + plans = ['nnUNetPlans'] + configs = ['2d', '2d_bs3x', '2d_bs6x', '3d_fullres', '3d_fullres_bs3x', '3d_fullres_bs6x'] + num_gpus = 1 + + benchmark_configurations = {d: configs for d in datasets} + + exclude_hosts = "-R \"select[hname!='e230-dgxa100-1']'\"" + resources = "-R \"tensorcore\"" + queue = "-q gpu" + preamble = "-L /bin/bash \"source ~/load_env_torch210.sh && " + train_command = 'nnUNet_compile=False nnUNet_results=/dkfz/cluster/gpu/checkpoints/OE0441/isensee/nnUNet_results_remake_benchmark nnUNetv2_train' + + folds = (0, ) + + use_these_modules = { + tr: plans for tr in trainers + } + + additional_arguments = f' -num_gpus {num_gpus}' # '' + + output_file = "/home/isensee/deleteme.txt" + with open(output_file, 'w') as f: + for g in gpu_models: + gpu_requirements = f"-gpu num={num_gpus}:j_exclusive=yes:gmodel={g}" + for tr in use_these_modules.keys(): + for p in use_these_modules[tr]: + for dataset in benchmark_configurations.keys(): + for config in benchmark_configurations[dataset]: + for fl in folds: + command = f'bsub {exclude_hosts} {resources} {queue} {gpu_requirements} {preamble} {train_command} {dataset} {config} {fl} -tr {tr} -p {p}' + if additional_arguments is not None and len(additional_arguments) > 0: + command += f' {additional_arguments}' + f.write(f'{command}\"\n') \ No newline at end of file diff --git a/docker/template/src/nnunetv2/batch_running/benchmarking/summarize_benchmark_results.py b/docker/template/src/nnunetv2/batch_running/benchmarking/summarize_benchmark_results.py new file mode 100644 index 0000000..d966321 --- /dev/null +++ b/docker/template/src/nnunetv2/batch_running/benchmarking/summarize_benchmark_results.py @@ -0,0 +1,70 @@ +from batchgenerators.utilities.file_and_folder_operations import join, load_json, isfile +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.paths import nnUNet_results +from nnunetv2.utilities.file_path_utilities import get_output_folder + +if __name__ == '__main__': + trainers = ['nnUNetTrainerBenchmark_5epochs', 'nnUNetTrainerBenchmark_5epochs_noDataLoading'] + datasets = [2, 3, 4, 5] + plans = ['nnUNetPlans'] + configs = ['2d', '2d_bs3x', '2d_bs6x', '3d_fullres', '3d_fullres_bs3x', '3d_fullres_bs6x'] + output_file = join(nnUNet_results, 'benchmark_results.csv') + + torch_version = '2.1.0.dev20230330'#"2.0.0"#"2.1.0.dev20230328" #"1.11.0a0+gitbc2c6ed" # + cudnn_version = 8700 # 8302 # + num_gpus = 1 + + unique_gpus = set() + + # collect results in the most janky way possible. Amazing coding skills! + all_results = {} + for tr in trainers: + all_results[tr] = {} + for p in plans: + all_results[tr][p] = {} + for c in configs: + all_results[tr][p][c] = {} + for d in datasets: + dataset_name = maybe_convert_to_dataset_name(d) + output_folder = get_output_folder(dataset_name, tr, p, c, fold=0) + expected_benchmark_file = join(output_folder, 'benchmark_result.json') + all_results[tr][p][c][d] = {} + if isfile(expected_benchmark_file): + # filter results for what we want + results = [i for i in load_json(expected_benchmark_file).values() + if i['num_gpus'] == num_gpus and i['cudnn_version'] == cudnn_version and + i['torch_version'] == torch_version] + for r in results: + all_results[tr][p][c][d][r['gpu_name']] = r + unique_gpus.add(r['gpu_name']) + + # haha. Fuck this. Collect GPUs in the code above. + # unique_gpus = np.unique([i["gpu_name"] for tr in trainers for p in plans for c in configs for d in datasets for i in all_results[tr][p][c][d]]) + + unique_gpus = list(unique_gpus) + unique_gpus.sort() + + with open(output_file, 'w') as f: + f.write('Dataset,Trainer,Plans,Config') + for g in unique_gpus: + f.write(f",{g}") + f.write("\n") + for d in datasets: + for tr in trainers: + for p in plans: + for c in configs: + gpu_results = [] + for g in unique_gpus: + if g in all_results[tr][p][c][d].keys(): + gpu_results.append(round(all_results[tr][p][c][d][g]["fastest_epoch"], ndigits=2)) + else: + gpu_results.append("MISSING") + # skip if all are missing + if all([i == 'MISSING' for i in gpu_results]): + continue + f.write(f"{d},{tr},{p},{c}") + for g in gpu_results: + f.write(f",{g}") + f.write("\n") + f.write("\n") + diff --git a/docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon.py b/docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon.py new file mode 100644 index 0000000..b670661 --- /dev/null +++ b/docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon.py @@ -0,0 +1,114 @@ +from typing import Tuple + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.evaluation.evaluate_predictions import load_summary_json +from nnunetv2.paths import nnUNet_results +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name, convert_dataset_name_to_id +from nnunetv2.utilities.file_path_utilities import get_output_folder + + +def collect_results(trainers: dict, datasets: List, output_file: str, + configurations=("2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres"), + folds=tuple(np.arange(5))): + results_dirs = (nnUNet_results,) + datasets_names = [maybe_convert_to_dataset_name(i) for i in datasets] + with open(output_file, 'w') as f: + for i, d in zip(datasets, datasets_names): + for c in configurations: + for module in trainers.keys(): + for plans in trainers[module]: + for r in results_dirs: + expected_output_folder = get_output_folder(d, module, plans, c) + if isdir(expected_output_folder): + results_folds = [] + f.write(f"{d},{c},{module},{plans},{r}") + for fl in folds: + expected_output_folder_fold = get_output_folder(d, module, plans, c, fl) + expected_summary_file = join(expected_output_folder_fold, "validation", + "summary.json") + if not isfile(expected_summary_file): + print('expected output file not found:', expected_summary_file) + f.write(",") + results_folds.append(np.nan) + else: + foreground_mean = load_summary_json(expected_summary_file)['foreground_mean'][ + 'Dice'] + results_folds.append(foreground_mean) + f.write(f",{foreground_mean:02.4f}") + f.write(f",{np.nanmean(results_folds):02.4f}\n") + + +def summarize(input_file, output_file, folds: Tuple[int, ...], configs: Tuple[str, ...], datasets, trainers): + txt = np.loadtxt(input_file, dtype=str, delimiter=',') + num_folds = txt.shape[1] - 6 + valid_configs = {} + for d in datasets: + if isinstance(d, int): + d = maybe_convert_to_dataset_name(d) + configs_in_txt = np.unique(txt[:, 1][txt[:, 0] == d]) + valid_configs[d] = [i for i in configs_in_txt if i in configs] + assert max(folds) < num_folds + + with open(output_file, 'w') as f: + f.write("name") + for d in valid_configs.keys(): + for c in valid_configs[d]: + f.write(",%d_%s" % (convert_dataset_name_to_id(d), c[:4])) + f.write(',mean\n') + valid_entries = txt[:, 4] == nnUNet_results + for t in trainers.keys(): + trainer_locs = valid_entries & (txt[:, 2] == t) + for pl in trainers[t]: + f.write(f"{t}__{pl}") + trainer_plan_locs = trainer_locs & (txt[:, 3] == pl) + r = [] + for d in valid_configs.keys(): + trainer_plan_d_locs = trainer_plan_locs & (txt[:, 0] == d) + for v in valid_configs[d]: + trainer_plan_d_config_locs = trainer_plan_d_locs & (txt[:, 1] == v) + if np.any(trainer_plan_d_config_locs): + # we cannot have more than one row + assert np.sum(trainer_plan_d_config_locs) == 1 + + # now check that we have all folds + selected_row = txt[np.argwhere(trainer_plan_d_config_locs)[0,0]] + + fold_results = selected_row[[i + 5 for i in folds]] + + if '' in fold_results: + print('missing fold in', t, pl, d, v) + f.write(",nan") + r.append(np.nan) + else: + mean_dice = np.mean([float(i) for i in fold_results]) + f.write(f",{mean_dice:02.4f}") + r.append(mean_dice) + else: + print('missing:', t, pl, d, v) + f.write(",nan") + r.append(np.nan) + f.write(f",{np.mean(r):02.4f}\n") + + +if __name__ == '__main__': + use_these_trainers = { + 'nnUNetTrainer': ('nnUNetPlans',), + 'nnUNetTrainerDiceCELoss_noSmooth': ('nnUNetPlans',), + 'nnUNetTrainer_DASegOrd0': ('nnUNetPlans',), + } + all_results_file= join(nnUNet_results, 'customDecResults.csv') + datasets = [2, 3, 4, 17, 20, 24, 27, 38, 55, 64, 82] + collect_results(use_these_trainers, datasets, all_results_file) + + folds = (0, 1, 2, 3, 4) + configs = ("3d_fullres", "3d_lowres") + output_file = join(nnUNet_results, 'customDecResults_summary5fold.csv') + summarize(all_results_file, output_file, folds, configs, datasets, use_these_trainers) + + folds = (0, ) + configs = ("3d_fullres", "3d_lowres") + output_file = join(nnUNet_results, 'customDecResults_summaryfold0.csv') + summarize(all_results_file, output_file, folds, configs, datasets, use_these_trainers) + diff --git a/docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon_2d.py b/docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon_2d.py new file mode 100644 index 0000000..2795d3d --- /dev/null +++ b/docker/template/src/nnunetv2/batch_running/collect_results_custom_Decathlon_2d.py @@ -0,0 +1,18 @@ +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.batch_running.collect_results_custom_Decathlon import collect_results, summarize +from nnunetv2.paths import nnUNet_results + +if __name__ == '__main__': + use_these_trainers = { + 'nnUNetTrainer': ('nnUNetPlans', ), + } + all_results_file = join(nnUNet_results, 'hrnet_results.csv') + datasets = [2, 3, 4, 17, 20, 24, 27, 38, 55, 64, 82] + collect_results(use_these_trainers, datasets, all_results_file) + + folds = (0, ) + configs = ('2d', ) + output_file = join(nnUNet_results, 'hrnet_results_summary_fold0.csv') + summarize(all_results_file, output_file, folds, configs, datasets, use_these_trainers) + diff --git a/docker/template/src/nnunetv2/batch_running/generate_lsf_runs_customDecathlon.py b/docker/template/src/nnunetv2/batch_running/generate_lsf_runs_customDecathlon.py new file mode 100644 index 0000000..0a75fbd --- /dev/null +++ b/docker/template/src/nnunetv2/batch_running/generate_lsf_runs_customDecathlon.py @@ -0,0 +1,86 @@ +from copy import deepcopy +import numpy as np + + +def merge(dict1, dict2): + keys = np.unique(list(dict1.keys()) + list(dict2.keys())) + keys = np.unique(keys) + res = {} + for k in keys: + all_configs = [] + if dict1.get(k) is not None: + all_configs += list(dict1[k]) + if dict2.get(k) is not None: + all_configs += list(dict2[k]) + if len(all_configs) > 0: + res[k] = tuple(np.unique(all_configs)) + return res + + +if __name__ == "__main__": + # after the Nature Methods paper we switch our evaluation to a different (more stable/high quality) set of + # datasets for evaluation and future development + configurations_all = { + 2: ("3d_fullres", "2d"), + 3: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + 4: ("2d", "3d_fullres"), + 17: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + 20: ("2d", "3d_fullres"), + 24: ("2d", "3d_fullres"), + 27: ("2d", "3d_fullres"), + 38: ("2d", "3d_fullres"), + 55: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + 64: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + 82: ("2d", "3d_fullres"), + # 83: ("2d", "3d_fullres"), + } + + configurations_3d_fr_only = { + i: ("3d_fullres", ) for i in configurations_all if "3d_fullres" in configurations_all[i] + } + + configurations_3d_c_only = { + i: ("3d_cascade_fullres", ) for i in configurations_all if "3d_cascade_fullres" in configurations_all[i] + } + + configurations_3d_lr_only = { + i: ("3d_lowres", ) for i in configurations_all if "3d_lowres" in configurations_all[i] + } + + configurations_2d_only = { + i: ("2d", ) for i in configurations_all if "2d" in configurations_all[i] + } + + num_gpus = 1 + exclude_hosts = "-R \"select[hname!='e230-dgx2-2']\" -R \"select[hname!='e230-dgx2-1']\" -R \"select[hname!='e230-dgx1-1']\" -R \"select[hname!='e230-dgxa100-1']\" -R \"select[hname!='e230-dgxa100-2']\" -R \"select[hname!='e230-dgxa100-3']\" -R \"select[hname!='e230-dgxa100-4']\"" + resources = "-R \"tensorcore\"" + gpu_requirements = f"-gpu num={num_gpus}:j_exclusive=yes:gmem=33G" + queue = "-q gpu-lowprio" + preamble = "-L /bin/bash \"source ~/load_env_cluster4.sh && " + train_command = 'nnUNet_results=/dkfz/cluster/gpu/checkpoints/OE0441/isensee/nnUNet_results_remake_release nnUNetv2_train' + + folds = (0, ) + # use_this = configurations_2d_only + use_this = merge(configurations_3d_fr_only, configurations_3d_lr_only) + # use_this = merge(use_this, configurations_3d_c_only) + + use_these_modules = { + 'nnUNetTrainer': ('nnUNetPlans',), + 'nnUNetTrainerDiceCELoss_noSmooth': ('nnUNetPlans',), + # 'nnUNetTrainer_DASegOrd0': ('nnUNetPlans',), + } + + additional_arguments = f'--disable_checkpointing -num_gpus {num_gpus}' # '' + + output_file = "/home/isensee/deleteme.txt" + with open(output_file, 'w') as f: + for tr in use_these_modules.keys(): + for p in use_these_modules[tr]: + for dataset in use_this.keys(): + for config in use_this[dataset]: + for fl in folds: + command = f'bsub {exclude_hosts} {resources} {queue} {gpu_requirements} {preamble} {train_command} {dataset} {config} {fl} -tr {tr} -p {p}' + if additional_arguments is not None and len(additional_arguments) > 0: + command += f' {additional_arguments}' + f.write(f'{command}\"\n') + diff --git a/docker/template/src/nnunetv2/batch_running/release_trainings/__init__.py b/docker/template/src/nnunetv2/batch_running/release_trainings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py b/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/collect_results.py b/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/collect_results.py new file mode 100644 index 0000000..828c396 --- /dev/null +++ b/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/collect_results.py @@ -0,0 +1,113 @@ +from typing import Tuple + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.evaluation.evaluate_predictions import load_summary_json +from nnunetv2.paths import nnUNet_results +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name, convert_dataset_name_to_id +from nnunetv2.utilities.file_path_utilities import get_output_folder + + +def collect_results(trainers: dict, datasets: List, output_file: str, + configurations=("2d", "3d_fullres", "3d_lowres", "3d_cascade_fullres"), + folds=tuple(np.arange(5))): + results_dirs = (nnUNet_results,) + datasets_names = [maybe_convert_to_dataset_name(i) for i in datasets] + with open(output_file, 'w') as f: + for i, d in zip(datasets, datasets_names): + for c in configurations: + for module in trainers.keys(): + for plans in trainers[module]: + for r in results_dirs: + expected_output_folder = get_output_folder(d, module, plans, c) + if isdir(expected_output_folder): + results_folds = [] + f.write(f"{d},{c},{module},{plans},{r}") + for fl in folds: + expected_output_folder_fold = get_output_folder(d, module, plans, c, fl) + expected_summary_file = join(expected_output_folder_fold, "validation", + "summary.json") + if not isfile(expected_summary_file): + print('expected output file not found:', expected_summary_file) + f.write(",") + results_folds.append(np.nan) + else: + foreground_mean = load_summary_json(expected_summary_file)['foreground_mean'][ + 'Dice'] + results_folds.append(foreground_mean) + f.write(f",{foreground_mean:02.4f}") + f.write(f",{np.nanmean(results_folds):02.4f}\n") + + +def summarize(input_file, output_file, folds: Tuple[int, ...], configs: Tuple[str, ...], datasets, trainers): + txt = np.loadtxt(input_file, dtype=str, delimiter=',') + num_folds = txt.shape[1] - 6 + valid_configs = {} + for d in datasets: + if isinstance(d, int): + d = maybe_convert_to_dataset_name(d) + configs_in_txt = np.unique(txt[:, 1][txt[:, 0] == d]) + valid_configs[d] = [i for i in configs_in_txt if i in configs] + assert max(folds) < num_folds + + with open(output_file, 'w') as f: + f.write("name") + for d in valid_configs.keys(): + for c in valid_configs[d]: + f.write(",%d_%s" % (convert_dataset_name_to_id(d), c[:4])) + f.write(',mean\n') + valid_entries = txt[:, 4] == nnUNet_results + for t in trainers.keys(): + trainer_locs = valid_entries & (txt[:, 2] == t) + for pl in trainers[t]: + f.write(f"{t}__{pl}") + trainer_plan_locs = trainer_locs & (txt[:, 3] == pl) + r = [] + for d in valid_configs.keys(): + trainer_plan_d_locs = trainer_plan_locs & (txt[:, 0] == d) + for v in valid_configs[d]: + trainer_plan_d_config_locs = trainer_plan_d_locs & (txt[:, 1] == v) + if np.any(trainer_plan_d_config_locs): + # we cannot have more than one row + assert np.sum(trainer_plan_d_config_locs) == 1 + + # now check that we have all folds + selected_row = txt[np.argwhere(trainer_plan_d_config_locs)[0,0]] + + fold_results = selected_row[[i + 5 for i in folds]] + + if '' in fold_results: + print('missing fold in', t, pl, d, v) + f.write(",nan") + r.append(np.nan) + else: + mean_dice = np.mean([float(i) for i in fold_results]) + f.write(f",{mean_dice:02.4f}") + r.append(mean_dice) + else: + print('missing:', t, pl, d, v) + f.write(",nan") + r.append(np.nan) + f.write(f",{np.mean(r):02.4f}\n") + + +if __name__ == '__main__': + use_these_trainers = { + 'nnUNetTrainer': ('nnUNetPlans',), + 'nnUNetTrainer_v1loss': ('nnUNetPlans',), + } + all_results_file = join(nnUNet_results, 'customDecResults.csv') + datasets = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 17, 20, 24, 27, 35, 38, 48, 55, 64, 82] + collect_results(use_these_trainers, datasets, all_results_file) + + folds = (0, 1, 2, 3, 4) + configs = ("3d_fullres", "3d_lowres") + output_file = join(nnUNet_results, 'customDecResults_summary5fold.csv') + summarize(all_results_file, output_file, folds, configs, datasets, use_these_trainers) + + folds = (0, ) + configs = ("3d_fullres", "3d_lowres") + output_file = join(nnUNet_results, 'customDecResults_summaryfold0.csv') + summarize(all_results_file, output_file, folds, configs, datasets, use_these_trainers) + diff --git a/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/generate_lsf_commands.py b/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/generate_lsf_commands.py new file mode 100644 index 0000000..7c5934f --- /dev/null +++ b/docker/template/src/nnunetv2/batch_running/release_trainings/nnunetv2_v1/generate_lsf_commands.py @@ -0,0 +1,93 @@ +from copy import deepcopy +import numpy as np + + +def merge(dict1, dict2): + keys = np.unique(list(dict1.keys()) + list(dict2.keys())) + keys = np.unique(keys) + res = {} + for k in keys: + all_configs = [] + if dict1.get(k) is not None: + all_configs += list(dict1[k]) + if dict2.get(k) is not None: + all_configs += list(dict2[k]) + if len(all_configs) > 0: + res[k] = tuple(np.unique(all_configs)) + return res + + +if __name__ == "__main__": + # after the Nature Methods paper we switch our evaluation to a different (more stable/high quality) set of + # datasets for evaluation and future development + configurations_all = { + # 1: ("3d_fullres", "2d"), + 2: ("3d_fullres", "2d"), + # 3: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 4: ("2d", "3d_fullres"), + 5: ("2d", "3d_fullres"), + # 6: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 7: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 8: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 9: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 10: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 17: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + 20: ("2d", "3d_fullres"), + 24: ("2d", "3d_fullres"), + 27: ("2d", "3d_fullres"), + 35: ("2d", "3d_fullres"), + 38: ("2d", "3d_fullres"), + # 55: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 64: ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + # 82: ("2d", "3d_fullres"), + # 83: ("2d", "3d_fullres"), + } + + configurations_3d_fr_only = { + i: ("3d_fullres", ) for i in configurations_all if "3d_fullres" in configurations_all[i] + } + + configurations_3d_c_only = { + i: ("3d_cascade_fullres", ) for i in configurations_all if "3d_cascade_fullres" in configurations_all[i] + } + + configurations_3d_lr_only = { + i: ("3d_lowres", ) for i in configurations_all if "3d_lowres" in configurations_all[i] + } + + configurations_2d_only = { + i: ("2d", ) for i in configurations_all if "2d" in configurations_all[i] + } + + num_gpus = 1 + exclude_hosts = "-R \"select[hname!='e230-dgx2-2']\" -R \"select[hname!='e230-dgx2-1']\"" + resources = "-R \"tensorcore\"" + gpu_requirements = f"-gpu num={num_gpus}:j_exclusive=yes:gmem=1G" + queue = "-q gpu-lowprio" + preamble = "-L /bin/bash \"source ~/load_env_cluster4.sh && " + train_command = 'nnUNet_keep_files_open=True nnUNet_results=/dkfz/cluster/gpu/data/OE0441/isensee/nnUNet_results_remake_release_normfix nnUNetv2_train' + + folds = (0, 1, 2, 3, 4) + # use_this = configurations_2d_only + # use_this = merge(configurations_3d_fr_only, configurations_3d_lr_only) + # use_this = merge(use_this, configurations_3d_c_only) + use_this = configurations_all + + use_these_modules = { + 'nnUNetTrainer': ('nnUNetPlans',), + } + + additional_arguments = f'--disable_checkpointing -num_gpus {num_gpus}' # '' + + output_file = "/home/isensee/deleteme.txt" + with open(output_file, 'w') as f: + for tr in use_these_modules.keys(): + for p in use_these_modules[tr]: + for dataset in use_this.keys(): + for config in use_this[dataset]: + for fl in folds: + command = f'bsub {exclude_hosts} {resources} {queue} {gpu_requirements} {preamble} {train_command} {dataset} {config} {fl} -tr {tr} -p {p}' + if additional_arguments is not None and len(additional_arguments) > 0: + command += f' {additional_arguments}' + f.write(f'{command}\"\n') + diff --git a/docker/template/src/nnunetv2/configuration.py b/docker/template/src/nnunetv2/configuration.py new file mode 100644 index 0000000..cdc8cb6 --- /dev/null +++ b/docker/template/src/nnunetv2/configuration.py @@ -0,0 +1,10 @@ +import os + +from nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA + +default_num_processes = 8 if 'nnUNet_def_n_proc' not in os.environ else int(os.environ['nnUNet_def_n_proc']) + +ANISO_THRESHOLD = 3 # determines when a sample is considered anisotropic (3 means that the spacing in the low +# resolution axis must be 3x as large as the next largest spacing) + +default_n_proc_DA = get_allowed_n_proc_DA() diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset027_ACDC.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset027_ACDC.py new file mode 100644 index 0000000..569ff6f --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset027_ACDC.py @@ -0,0 +1,87 @@ +import os +import shutil +from pathlib import Path + +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw + + +def make_out_dirs(dataset_id: int, task_name="ACDC"): + dataset_name = f"Dataset{dataset_id:03d}_{task_name}" + + out_dir = Path(nnUNet_raw.replace('"', "")) / dataset_name + out_train_dir = out_dir / "imagesTr" + out_labels_dir = out_dir / "labelsTr" + out_test_dir = out_dir / "imagesTs" + + os.makedirs(out_dir, exist_ok=True) + os.makedirs(out_train_dir, exist_ok=True) + os.makedirs(out_labels_dir, exist_ok=True) + os.makedirs(out_test_dir, exist_ok=True) + + return out_dir, out_train_dir, out_labels_dir, out_test_dir + + +def copy_files(src_data_folder: Path, train_dir: Path, labels_dir: Path, test_dir: Path): + """Copy files from the ACDC dataset to the nnUNet dataset folder. Returns the number of training cases.""" + patients_train = sorted([f for f in (src_data_folder / "training").iterdir() if f.is_dir()]) + patients_test = sorted([f for f in (src_data_folder / "testing").iterdir() if f.is_dir()]) + + num_training_cases = 0 + # Copy training files and corresponding labels. + for patient_dir in patients_train: + for file in patient_dir.iterdir(): + if file.suffix == ".gz" and "_gt" not in file.name and "_4d" not in file.name: + # The stem is 'patient.nii', and the suffix is '.gz'. + # We split the stem and append _0000 to the patient part. + shutil.copy(file, train_dir / f"{file.stem.split('.')[0]}_0000.nii.gz") + num_training_cases += 1 + elif file.suffix == ".gz" and "_gt" in file.name: + shutil.copy(file, labels_dir / file.name.replace("_gt", "")) + + # Copy test files. + for patient_dir in patients_test: + for file in patient_dir.iterdir(): + if file.suffix == ".gz" and "_gt" not in file.name and "_4d" not in file.name: + shutil.copy(file, test_dir / f"{file.stem.split('.')[0]}_0000.nii.gz") + + return num_training_cases + + +def convert_acdc(src_data_folder: str, dataset_id=27): + out_dir, train_dir, labels_dir, test_dir = make_out_dirs(dataset_id=dataset_id) + num_training_cases = copy_files(Path(src_data_folder), train_dir, labels_dir, test_dir) + + generate_dataset_json( + str(out_dir), + channel_names={ + 0: "cineMRI", + }, + labels={ + "background": 0, + "RV": 1, + "MLV": 2, + "LVC": 3, + }, + file_ending=".nii.gz", + num_training_cases=num_training_cases, + ) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument( + "-i", + "--input_folder", + type=str, + help="The downloaded ACDC dataset dir. Should contain extracted 'training' and 'testing' folders.", + ) + parser.add_argument( + "-d", "--dataset_id", required=False, type=int, default=27, help="nnU-Net Dataset ID, default: 27" + ) + args = parser.parse_args() + print("Converting...") + convert_acdc(args.input_folder, args.dataset_id) + print("Done!") diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset073_Fluo_C3DH_A549_SIM.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset073_Fluo_C3DH_A549_SIM.py new file mode 100644 index 0000000..eca22d0 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset073_Fluo_C3DH_A549_SIM.py @@ -0,0 +1,85 @@ +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +import tifffile +from batchgenerators.utilities.file_and_folder_operations import * +import shutil + + +if __name__ == '__main__': + """ + This is going to be my test dataset for working with tif as input and output images + + All we do here is copy the files and rename them. Not file conversions take place + """ + dataset_name = 'Dataset073_Fluo_C3DH_A549_SIM' + + imagestr = join(nnUNet_raw, dataset_name, 'imagesTr') + imagests = join(nnUNet_raw, dataset_name, 'imagesTs') + labelstr = join(nnUNet_raw, dataset_name, 'labelsTr') + maybe_mkdir_p(imagestr) + maybe_mkdir_p(imagests) + maybe_mkdir_p(labelstr) + + # we extract the downloaded train and test datasets to two separate folders and name them Fluo-C3DH-A549-SIM_train + # and Fluo-C3DH-A549-SIM_test + train_source = '/home/fabian/Downloads/Fluo-C3DH-A549-SIM_train' + test_source = '/home/fabian/Downloads/Fluo-C3DH-A549-SIM_test' + + # with the old nnU-Net we had to convert all the files to nifti. This is no longer required. We can just copy the + # tif files + + # tif is broken when it comes to spacing. No standards. Grr. So when we use tif nnU-Net expects a separate file + # that specifies the spacing. This file needs to exist for EVERY training/test case to allow for different spacings + # between files. Important! The spacing must align with the axes. + # Here when we do print(tifffile.imread('IMAGE').shape) we get (29, 300, 350). The low resolution axis is the first. + # The spacing on the website is griven in the wrong axis order. Great. + spacing = (1, 0.126, 0.126) + + # train set + for seq in ['01', '02']: + images_dir = join(train_source, seq) + seg_dir = join(train_source, seq + '_GT', 'SEG') + # if we were to be super clean we would go by IDs but here we just trust the files are sorted the correct way. + # Simpler filenames in the cell tracking challenge would be soooo nice. + images = subfiles(images_dir, suffix='.tif', sort=True, join=False) + segs = subfiles(seg_dir, suffix='.tif', sort=True, join=False) + for i, (im, se) in enumerate(zip(images, segs)): + target_name = f'{seq}_image_{i:03d}' + # we still need the '_0000' suffix for images! Otherwise we would not be able to support multiple input + # channels distributed over separate files + shutil.copy(join(images_dir, im), join(imagestr, target_name + '_0000.tif')) + # spacing file! + save_json({'spacing': spacing}, join(imagestr, target_name + '.json')) + shutil.copy(join(seg_dir, se), join(labelstr, target_name + '.tif')) + # spacing file! + save_json({'spacing': spacing}, join(labelstr, target_name + '.json')) + + # test set, same a strain just without the segmentations + for seq in ['01', '02']: + images_dir = join(test_source, seq) + images = subfiles(images_dir, suffix='.tif', sort=True, join=False) + for i, im in enumerate(images): + target_name = f'{seq}_image_{i:03d}' + shutil.copy(join(images_dir, im), join(imagests, target_name + '_0000.tif')) + # spacing file! + save_json({'spacing': spacing}, join(imagests, target_name + '.json')) + + # now we generate the dataset json + generate_dataset_json( + join(nnUNet_raw, dataset_name), + {0: 'fluorescence_microscopy'}, + {'background': 0, 'cell': 1}, + 60, + '.tif' + ) + + # custom split to ensure we are stratifying properly. This dataset only has 2 folds + caseids = [i[:-4] for i in subfiles(labelstr, suffix='.tif', join=False)] + splits = [] + splits.append( + {'train': [i for i in caseids if i.startswith('01_')], 'val': [i for i in caseids if i.startswith('02_')]} + ) + splits.append( + {'train': [i for i in caseids if i.startswith('02_')], 'val': [i for i in caseids if i.startswith('01_')]} + ) + save_json(splits, join(nnUNet_preprocessed, dataset_name, 'splits_final.json')) \ No newline at end of file diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset114_MNMs.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset114_MNMs.py new file mode 100644 index 0000000..20eecd6 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset114_MNMs.py @@ -0,0 +1,198 @@ +import csv +import os +import random +from pathlib import Path + +import nibabel as nib +from batchgenerators.utilities.file_and_folder_operations import load_json, save_json + +from nnunetv2.dataset_conversion.Dataset027_ACDC import make_out_dirs +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_preprocessed + + +def read_csv(csv_file: str): + patient_info = {} + + with open(csv_file) as csvfile: + reader = csv.reader(csvfile) + headers = next(reader) + patient_index = headers.index("External code") + ed_index = headers.index("ED") + es_index = headers.index("ES") + vendor_index = headers.index("Vendor") + + for row in reader: + patient_info[row[patient_index]] = { + "ed": int(row[ed_index]), + "es": int(row[es_index]), + "vendor": row[vendor_index], + } + + return patient_info + + +# ------------------------------------------------------------------------------ +# Conversion to nnUNet format +# ------------------------------------------------------------------------------ +def convert_mnms(src_data_folder: Path, csv_file_name: str, dataset_id: int): + out_dir, out_train_dir, out_labels_dir, out_test_dir = make_out_dirs(dataset_id, task_name="MNMs") + patients_train = [f for f in (src_data_folder / "Training" / "Labeled").iterdir() if f.is_dir()] + patients_test = [f for f in (src_data_folder / "Testing").iterdir() if f.is_dir()] + + patient_info = read_csv(str(src_data_folder / csv_file_name)) + + save_cardiac_phases(patients_train, patient_info, out_train_dir, out_labels_dir) + save_cardiac_phases(patients_test, patient_info, out_test_dir) + + # There are non-orthonormal direction cosines in the test and validation data. + # Not sure if the data should be fixed, or we should skip the problematic data. + # patients_val = [f for f in (src_data_folder / "Validation").iterdir() if f.is_dir()] + # save_cardiac_phases(patients_val, patient_info, out_train_dir, out_labels_dir) + + generate_dataset_json( + str(out_dir), + channel_names={ + 0: "cineMRI", + }, + labels={"background": 0, "LVBP": 1, "LVM": 2, "RV": 3}, + file_ending=".nii.gz", + num_training_cases=len(patients_train) * 2, # 2 since we have ED and ES for each patient + ) + + +def save_cardiac_phases( + patients: list[Path], patient_info: dict[str, dict[str, int]], out_dir: Path, labels_dir: Path = None +): + for patient in patients: + print(f"Processing patient: {patient.name}") + + image = nib.load(patient / f"{patient.name}_sa.nii.gz") + ed_frame = patient_info[patient.name]["ed"] + es_frame = patient_info[patient.name]["es"] + + save_extracted_nifti_slice(image, ed_frame=ed_frame, es_frame=es_frame, out_dir=out_dir, patient=patient) + + if labels_dir: + label = nib.load(patient / f"{patient.name}_sa_gt.nii.gz") + save_extracted_nifti_slice(label, ed_frame=ed_frame, es_frame=es_frame, out_dir=labels_dir, patient=patient) + + +def save_extracted_nifti_slice(image, ed_frame: int, es_frame: int, out_dir: Path, patient: Path): + # Save only extracted diastole and systole slices from the 4D H x W x D x time volume. + image_ed = nib.Nifti1Image(image.dataobj[..., ed_frame], image.affine) + image_es = nib.Nifti1Image(image.dataobj[..., es_frame], image.affine) + + # Labels do not have modality identifiers. Labels always end with 'gt'. + suffix = ".nii.gz" if image.get_filename().endswith("_gt.nii.gz") else "_0000.nii.gz" + + nib.save(image_ed, str(out_dir / f"{patient.name}_frame{ed_frame:02d}{suffix}")) + nib.save(image_es, str(out_dir / f"{patient.name}_frame{es_frame:02d}{suffix}")) + + +# ------------------------------------------------------------------------------ +# Create custom splits +# ------------------------------------------------------------------------------ +def create_custom_splits(src_data_folder: Path, csv_file: str, dataset_id: int, num_val_patients: int = 25): + existing_splits = os.path.join(nnUNet_preprocessed, f"Dataset{dataset_id}_MNMs", "splits_final.json") + splits = load_json(existing_splits) + + patients_train = [f.name for f in (src_data_folder / "Training" / "Labeled").iterdir() if f.is_dir()] + # Filter out any patients not in the training set + patient_info = { + patient: data + for patient, data in read_csv(str(src_data_folder / csv_file)).items() + if patient in patients_train + } + + # Get train and validation patients for both vendors + patients_a = [patient for patient, patient_data in patient_info.items() if patient_data["vendor"] == "A"] + patients_b = [patient for patient, patient_data in patient_info.items() if patient_data["vendor"] == "B"] + train_a, val_a = get_vendor_split(patients_a, num_val_patients) + train_b, val_b = get_vendor_split(patients_b, num_val_patients) + + # Build filenames from corresponding patient frames + train_a = [f"{patient}_frame{patient_info[patient][frame]:02d}" for patient in train_a for frame in ["es", "ed"]] + train_b = [f"{patient}_frame{patient_info[patient][frame]:02d}" for patient in train_b for frame in ["es", "ed"]] + train_a_mix_1, train_a_mix_2 = train_a[: len(train_a) // 2], train_a[len(train_a) // 2 :] + train_b_mix_1, train_b_mix_2 = train_b[: len(train_b) // 2], train_b[len(train_b) // 2 :] + val_a = [f"{patient}_frame{patient_info[patient][frame]:02d}" for patient in val_a for frame in ["es", "ed"]] + val_b = [f"{patient}_frame{patient_info[patient][frame]:02d}" for patient in val_b for frame in ["es", "ed"]] + + for train_set in [train_a, train_b, train_a_mix_1 + train_b_mix_1, train_a_mix_2 + train_b_mix_2]: + # For each train set, we evaluate on A, B and (A + B) respectively + # See table 3 from the original paper for more details. + splits.append({"train": train_set, "val": val_a}) + splits.append({"train": train_set, "val": val_b}) + splits.append({"train": train_set, "val": val_a + val_b}) + + save_json(splits, existing_splits) + + +def get_vendor_split(patients: list[str], num_val_patients: int): + random.shuffle(patients) + total_patients = len(patients) + num_training_patients = total_patients - num_val_patients + return patients[:num_training_patients], patients[num_training_patients:] + + +if __name__ == "__main__": + import argparse + + class RawTextArgumentDefaultsHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter): + pass + + parser = argparse.ArgumentParser(add_help=False, formatter_class=RawTextArgumentDefaultsHelpFormatter) + parser.add_argument( + "-h", + "--help", + action="help", + default=argparse.SUPPRESS, + help="MNMs conversion utility helper. This script can be used to convert MNMs data into the expected nnUNet " + "format. It can also be used to create additional custom splits, for explicitly training on combinations " + "of vendors A and B (see `--custom-splits`).\n" + "If you wish to generate the custom splits, run the following pipeline:\n\n" + "(1) Run `Dataset114_MNMs -i \n" + "(2) Run `nnUNetv2_plan_and_preprocess -d 114 --verify_dataset_integrity`\n" + "(3) Start training, but stop after initial splits are created: `nnUNetv2_train 114 2d 0`\n" + "(4) Re-run `Dataset114_MNMs`, with `-s True`.\n" + "(5) Re-run training.\n", + ) + parser.add_argument( + "-i", + "--input_folder", + type=str, + default="./data/M&Ms/OpenDataset/", + help="The downloaded MNMs dataset dir. Should contain a csv file, as well as Training, Validation and Testing " + "folders.", + ) + parser.add_argument( + "-c", + "--csv_file_name", + type=str, + default="211230_M&Ms_Dataset_information_diagnosis_opendataset.csv", + help="The csv file containing the dataset information.", + ), + parser.add_argument("-d", "--dataset_id", type=int, default=114, help="nnUNet Dataset ID.") + parser.add_argument( + "-s", + "--custom_splits", + type=bool, + default=False, + help="Whether to append custom splits for training and testing on different vendors. If True, will create " + "splits for training on patients from vendors A, B or a mix of A and B. Splits are tested on a hold-out " + "validation sets of patients from A, B or A and B combined. See section 2.4 and table 3 from " + "https://arxiv.org/abs/2011.07592 for more info.", + ) + + args = parser.parse_args() + args.input_folder = Path(args.input_folder) + + if args.custom_splits: + print("Appending custom splits...") + create_custom_splits(args.input_folder, args.csv_file_name, args.dataset_id) + else: + print("Converting...") + convert_mnms(args.input_folder, args.csv_file_name, args.dataset_id) + + print("Done!") diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py new file mode 100644 index 0000000..e307e14 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset115_EMIDEC.py @@ -0,0 +1,61 @@ +import shutil +from pathlib import Path + +from nnunetv2.dataset_conversion.Dataset027_ACDC import make_out_dirs +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json + + +def copy_files(src_data_dir: Path, src_test_dir: Path, train_dir: Path, labels_dir: Path, test_dir: Path): + """Copy files from the EMIDEC dataset to the nnUNet dataset folder. Returns the number of training cases.""" + patients_train = sorted([f for f in src_data_dir.iterdir() if f.is_dir()]) + patients_test = sorted([f for f in src_test_dir.iterdir() if f.is_dir()]) + + # Copy training files and corresponding labels. + for patient in patients_train: + train_file = patient / "Images" / f"{patient.name}.nii.gz" + label_file = patient / "Contours" / f"{patient.name}.nii.gz" + shutil.copy(train_file, train_dir / f"{train_file.stem.split('.')[0]}_0000.nii.gz") + shutil.copy(label_file, labels_dir) + + # Copy test files. + for patient in patients_test: + test_file = patient / "Images" / f"{patient.name}.nii.gz" + shutil.copy(test_file, test_dir / f"{test_file.stem.split('.')[0]}_0000.nii.gz") + + return len(patients_train) + + +def convert_emidec(src_data_dir: str, src_test_dir: str, dataset_id=27): + out_dir, train_dir, labels_dir, test_dir = make_out_dirs(dataset_id=dataset_id, task_name="EMIDEC") + num_training_cases = copy_files(Path(src_data_dir), Path(src_test_dir), train_dir, labels_dir, test_dir) + + generate_dataset_json( + str(out_dir), + channel_names={ + 0: "cineMRI", + }, + labels={ + "background": 0, + "cavity": 1, + "normal_myocardium": 2, + "myocardial_infarction": 3, + "no_reflow": 4, + }, + file_ending=".nii.gz", + num_training_cases=num_training_cases, + ) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument("-i", "--input_dir", type=str, help="The EMIDEC dataset directory.") + parser.add_argument("-t", "--test_dir", type=str, help="The EMIDEC test set directory.") + parser.add_argument( + "-d", "--dataset_id", required=False, type=int, default=115, help="nnU-Net Dataset ID, default: 115" + ) + args = parser.parse_args() + print("Converting...") + convert_emidec(args.input_dir, args.test_dir, args.dataset_id) + print("Done!") diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset120_RoadSegmentation.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset120_RoadSegmentation.py new file mode 100644 index 0000000..90dcc6c --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset120_RoadSegmentation.py @@ -0,0 +1,87 @@ +import multiprocessing +import shutil +from multiprocessing import Pool + +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw +from skimage import io +from acvl_utils.morphology.morphology_helper import generic_filter_components +from scipy.ndimage import binary_fill_holes + + +def load_and_covnert_case(input_image: str, input_seg: str, output_image: str, output_seg: str, + min_component_size: int = 50): + seg = io.imread(input_seg) + seg[seg == 255] = 1 + image = io.imread(input_image) + image = image.sum(2) + mask = image == (3 * 255) + # the dataset has large white areas in which road segmentations can exist but no image information is available. + # Remove the road label in these areas + mask = generic_filter_components(mask, filter_fn=lambda ids, sizes: [i for j, i in enumerate(ids) if + sizes[j] > min_component_size]) + mask = binary_fill_holes(mask) + seg[mask] = 0 + io.imsave(output_seg, seg, check_contrast=False) + shutil.copy(input_image, output_image) + + +if __name__ == "__main__": + # extracted archive from https://www.kaggle.com/datasets/insaff/massachusetts-roads-dataset?resource=download + source = '/media/fabian/data/raw_datasets/Massachussetts_road_seg/road_segmentation_ideal' + + dataset_name = 'Dataset120_RoadSegmentation' + + imagestr = join(nnUNet_raw, dataset_name, 'imagesTr') + imagests = join(nnUNet_raw, dataset_name, 'imagesTs') + labelstr = join(nnUNet_raw, dataset_name, 'labelsTr') + labelsts = join(nnUNet_raw, dataset_name, 'labelsTs') + maybe_mkdir_p(imagestr) + maybe_mkdir_p(imagests) + maybe_mkdir_p(labelstr) + maybe_mkdir_p(labelsts) + + train_source = join(source, 'training') + test_source = join(source, 'testing') + + with multiprocessing.get_context("spawn").Pool(8) as p: + + # not all training images have a segmentation + valid_ids = subfiles(join(train_source, 'output'), join=False, suffix='png') + num_train = len(valid_ids) + r = [] + for v in valid_ids: + r.append( + p.starmap_async( + load_and_covnert_case, + (( + join(train_source, 'input', v), + join(train_source, 'output', v), + join(imagestr, v[:-4] + '_0000.png'), + join(labelstr, v), + 50 + ),) + ) + ) + + # test set + valid_ids = subfiles(join(test_source, 'output'), join=False, suffix='png') + for v in valid_ids: + r.append( + p.starmap_async( + load_and_covnert_case, + (( + join(test_source, 'input', v), + join(test_source, 'output', v), + join(imagests, v[:-4] + '_0000.png'), + join(labelsts, v), + 50 + ),) + ) + ) + _ = [i.get() for i in r] + + generate_dataset_json(join(nnUNet_raw, dataset_name), {0: 'R', 1: 'G', 2: 'B'}, {'background': 0, 'road': 1}, + num_train, '.png', dataset_name=dataset_name) diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset137_BraTS21.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset137_BraTS21.py new file mode 100644 index 0000000..b4817d2 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset137_BraTS21.py @@ -0,0 +1,98 @@ +import multiprocessing +import shutil +from multiprocessing import Pool + +import SimpleITK as sitk +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw + + +def copy_BraTS_segmentation_and_convert_labels_to_nnUNet(in_file: str, out_file: str) -> None: + # use this for segmentation only!!! + # nnUNet wants the labels to be continuous. BraTS is 0, 1, 2, 4 -> we make that into 0, 1, 2, 3 + img = sitk.ReadImage(in_file) + img_npy = sitk.GetArrayFromImage(img) + + uniques = np.unique(img_npy) + for u in uniques: + if u not in [0, 1, 2, 4]: + raise RuntimeError('unexpected label') + + seg_new = np.zeros_like(img_npy) + seg_new[img_npy == 4] = 3 + seg_new[img_npy == 2] = 1 + seg_new[img_npy == 1] = 2 + img_corr = sitk.GetImageFromArray(seg_new) + img_corr.CopyInformation(img) + sitk.WriteImage(img_corr, out_file) + + +def convert_labels_back_to_BraTS(seg: np.ndarray): + new_seg = np.zeros_like(seg) + new_seg[seg == 1] = 2 + new_seg[seg == 3] = 4 + new_seg[seg == 2] = 1 + return new_seg + + +def load_convert_labels_back_to_BraTS(filename, input_folder, output_folder): + a = sitk.ReadImage(join(input_folder, filename)) + b = sitk.GetArrayFromImage(a) + c = convert_labels_back_to_BraTS(b) + d = sitk.GetImageFromArray(c) + d.CopyInformation(a) + sitk.WriteImage(d, join(output_folder, filename)) + + +def convert_folder_with_preds_back_to_BraTS_labeling_convention(input_folder: str, output_folder: str, num_processes: int = 12): + """ + reads all prediction files (nifti) in the input folder, converts the labels back to BraTS convention and saves the + """ + maybe_mkdir_p(output_folder) + nii = subfiles(input_folder, suffix='.nii.gz', join=False) + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + p.starmap(load_convert_labels_back_to_BraTS, zip(nii, [input_folder] * len(nii), [output_folder] * len(nii))) + + +if __name__ == '__main__': + brats_data_dir = '/home/isensee/drives/E132-Rohdaten/BraTS_2021/training' + + task_id = 137 + task_name = "BraTS2021" + + foldername = "Dataset%03.0d_%s" % (task_id, task_name) + + # setting up nnU-Net folders + out_base = join(nnUNet_raw, foldername) + imagestr = join(out_base, "imagesTr") + labelstr = join(out_base, "labelsTr") + maybe_mkdir_p(imagestr) + maybe_mkdir_p(labelstr) + + case_ids = subdirs(brats_data_dir, prefix='BraTS', join=False) + + for c in case_ids: + shutil.copy(join(brats_data_dir, c, c + "_t1.nii.gz"), join(imagestr, c + '_0000.nii.gz')) + shutil.copy(join(brats_data_dir, c, c + "_t1ce.nii.gz"), join(imagestr, c + '_0001.nii.gz')) + shutil.copy(join(brats_data_dir, c, c + "_t2.nii.gz"), join(imagestr, c + '_0002.nii.gz')) + shutil.copy(join(brats_data_dir, c, c + "_flair.nii.gz"), join(imagestr, c + '_0003.nii.gz')) + + copy_BraTS_segmentation_and_convert_labels_to_nnUNet(join(brats_data_dir, c, c + "_seg.nii.gz"), + join(labelstr, c + '.nii.gz')) + + generate_dataset_json(out_base, + channel_names={0: 'T1', 1: 'T1ce', 2: 'T2', 3: 'Flair'}, + labels={ + 'background': 0, + 'whole tumor': (1, 2, 3), + 'tumor core': (2, 3), + 'enhancing tumor': (3, ) + }, + num_training_cases=len(case_ids), + file_ending='.nii.gz', + regions_class_order=(1, 2, 3), + license='see https://www.synapse.org/#!Synapse:syn25829067/wiki/610863', + reference='see https://www.synapse.org/#!Synapse:syn25829067/wiki/610863', + dataset_release='1.0') diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset218_Amos2022_task1.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset218_Amos2022_task1.py new file mode 100644 index 0000000..1f33cd7 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset218_Amos2022_task1.py @@ -0,0 +1,70 @@ +from batchgenerators.utilities.file_and_folder_operations import * +import shutil +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw + + +def convert_amos_task1(amos_base_dir: str, nnunet_dataset_id: int = 218): + """ + AMOS doesn't say anything about how the validation set is supposed to be used. So we just incorporate that into + the train set. Having a 5-fold cross-validation is superior to a single train:val split + """ + task_name = "AMOS2022_postChallenge_task1" + + foldername = "Dataset%03.0d_%s" % (nnunet_dataset_id, task_name) + + # setting up nnU-Net folders + out_base = join(nnUNet_raw, foldername) + imagestr = join(out_base, "imagesTr") + imagests = join(out_base, "imagesTs") + labelstr = join(out_base, "labelsTr") + maybe_mkdir_p(imagestr) + maybe_mkdir_p(imagests) + maybe_mkdir_p(labelstr) + + dataset_json_source = load_json(join(amos_base_dir, 'dataset.json')) + + training_identifiers = [i['image'].split('/')[-1][:-7] for i in dataset_json_source['training']] + tr_ctr = 0 + for tr in training_identifiers: + if int(tr.split("_")[-1]) <= 410: # these are the CT images + tr_ctr += 1 + shutil.copy(join(amos_base_dir, 'imagesTr', tr + '.nii.gz'), join(imagestr, f'{tr}_0000.nii.gz')) + shutil.copy(join(amos_base_dir, 'labelsTr', tr + '.nii.gz'), join(labelstr, f'{tr}.nii.gz')) + + test_identifiers = [i['image'].split('/')[-1][:-7] for i in dataset_json_source['test']] + for ts in test_identifiers: + if int(ts.split("_")[-1]) <= 500: # these are the CT images + shutil.copy(join(amos_base_dir, 'imagesTs', ts + '.nii.gz'), join(imagests, f'{ts}_0000.nii.gz')) + + val_identifiers = [i['image'].split('/')[-1][:-7] for i in dataset_json_source['validation']] + for vl in val_identifiers: + if int(vl.split("_")[-1]) <= 409: # these are the CT images + tr_ctr += 1 + shutil.copy(join(amos_base_dir, 'imagesVa', vl + '.nii.gz'), join(imagestr, f'{vl}_0000.nii.gz')) + shutil.copy(join(amos_base_dir, 'labelsVa', vl + '.nii.gz'), join(labelstr, f'{vl}.nii.gz')) + + generate_dataset_json(out_base, {0: "CT"}, labels={v: int(k) for k,v in dataset_json_source['labels'].items()}, + num_training_cases=tr_ctr, file_ending='.nii.gz', + dataset_name=task_name, reference='https://amos22.grand-challenge.org/', + release='https://zenodo.org/record/7262581', + overwrite_image_reader_writer='NibabelIOWithReorient', + description="This is the dataset as released AFTER the challenge event. It has the " + "validation set gt in it! We just use the validation images as additional " + "training cases because AMOS doesn't specify how they should be used. nnU-Net's" + " 5-fold CV is better than some random train:val split.") + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('input_folder', type=str, + help="The downloaded and extracted AMOS2022 (https://amos22.grand-challenge.org/) data. " + "Use this link: https://zenodo.org/record/7262581." + "You need to specify the folder with the imagesTr, imagesVal, labelsTr etc subfolders here!") + parser.add_argument('-d', required=False, type=int, default=218, help='nnU-Net Dataset ID, default: 218') + args = parser.parse_args() + amos_base = args.input_folder + convert_amos_task1(amos_base, args.d) + + diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset219_Amos2022_task2.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset219_Amos2022_task2.py new file mode 100644 index 0000000..9a5e2c6 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset219_Amos2022_task2.py @@ -0,0 +1,65 @@ +from batchgenerators.utilities.file_and_folder_operations import * +import shutil +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw + + +def convert_amos_task2(amos_base_dir: str, nnunet_dataset_id: int = 219): + """ + AMOS doesn't say anything about how the validation set is supposed to be used. So we just incorporate that into + the train set. Having a 5-fold cross-validation is superior to a single train:val split + """ + task_name = "AMOS2022_postChallenge_task2" + + foldername = "Dataset%03.0d_%s" % (nnunet_dataset_id, task_name) + + # setting up nnU-Net folders + out_base = join(nnUNet_raw, foldername) + imagestr = join(out_base, "imagesTr") + imagests = join(out_base, "imagesTs") + labelstr = join(out_base, "labelsTr") + maybe_mkdir_p(imagestr) + maybe_mkdir_p(imagests) + maybe_mkdir_p(labelstr) + + dataset_json_source = load_json(join(amos_base_dir, 'dataset.json')) + + training_identifiers = [i['image'].split('/')[-1][:-7] for i in dataset_json_source['training']] + for tr in training_identifiers: + shutil.copy(join(amos_base_dir, 'imagesTr', tr + '.nii.gz'), join(imagestr, f'{tr}_0000.nii.gz')) + shutil.copy(join(amos_base_dir, 'labelsTr', tr + '.nii.gz'), join(labelstr, f'{tr}.nii.gz')) + + test_identifiers = [i['image'].split('/')[-1][:-7] for i in dataset_json_source['test']] + for ts in test_identifiers: + shutil.copy(join(amos_base_dir, 'imagesTs', ts + '.nii.gz'), join(imagests, f'{ts}_0000.nii.gz')) + + val_identifiers = [i['image'].split('/')[-1][:-7] for i in dataset_json_source['validation']] + for vl in val_identifiers: + shutil.copy(join(amos_base_dir, 'imagesVa', vl + '.nii.gz'), join(imagestr, f'{vl}_0000.nii.gz')) + shutil.copy(join(amos_base_dir, 'labelsVa', vl + '.nii.gz'), join(labelstr, f'{vl}.nii.gz')) + + generate_dataset_json(out_base, {0: "either_CT_or_MR"}, labels={v: int(k) for k,v in dataset_json_source['labels'].items()}, + num_training_cases=len(training_identifiers) + len(val_identifiers), file_ending='.nii.gz', + dataset_name=task_name, reference='https://amos22.grand-challenge.org/', + release='https://zenodo.org/record/7262581', + overwrite_image_reader_writer='NibabelIOWithReorient', + description="This is the dataset as released AFTER the challenge event. It has the " + "validation set gt in it! We just use the validation images as additional " + "training cases because AMOS doesn't specify how they should be used. nnU-Net's" + " 5-fold CV is better than some random train:val split.") + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('input_folder', type=str, + help="The downloaded and extracted AMOS2022 (https://amos22.grand-challenge.org/) data. " + "Use this link: https://zenodo.org/record/7262581." + "You need to specify the folder with the imagesTr, imagesVal, labelsTr etc subfolders here!") + parser.add_argument('-d', required=False, type=int, default=219, help='nnU-Net Dataset ID, default: 219') + args = parser.parse_args() + amos_base = args.input_folder + convert_amos_task2(amos_base, args.d) + + # /home/isensee/Downloads/amos22/amos22/ + diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py new file mode 100644 index 0000000..20a794c --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset220_KiTS2023.py @@ -0,0 +1,50 @@ +from batchgenerators.utilities.file_and_folder_operations import * +import shutil +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw + + +def convert_kits2023(kits_base_dir: str, nnunet_dataset_id: int = 220): + task_name = "KiTS2023" + + foldername = "Dataset%03.0d_%s" % (nnunet_dataset_id, task_name) + + # setting up nnU-Net folders + out_base = join(nnUNet_raw, foldername) + imagestr = join(out_base, "imagesTr") + labelstr = join(out_base, "labelsTr") + maybe_mkdir_p(imagestr) + maybe_mkdir_p(labelstr) + + cases = subdirs(kits_base_dir, prefix='case_', join=False) + for tr in cases: + shutil.copy(join(kits_base_dir, tr, 'imaging.nii.gz'), join(imagestr, f'{tr}_0000.nii.gz')) + shutil.copy(join(kits_base_dir, tr, 'segmentation.nii.gz'), join(labelstr, f'{tr}.nii.gz')) + + generate_dataset_json(out_base, {0: "CT"}, + labels={ + "background": 0, + "kidney": (1, 2, 3), + "masses": (2, 3), + "tumor": 2 + }, + regions_class_order=(1, 3, 2), + num_training_cases=len(cases), file_ending='.nii.gz', + dataset_name=task_name, reference='none', + release='prerelease', + overwrite_image_reader_writer='NibabelIOWithReorient', + description="KiTS2023") + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('input_folder', type=str, + help="The downloaded and extracted KiTS2023 dataset (must have case_XXXXX subfolders)") + parser.add_argument('-d', required=False, type=int, default=220, help='nnU-Net Dataset ID, default: 220') + args = parser.parse_args() + amos_base = args.input_folder + convert_kits2023(amos_base, args.d) + + # /media/isensee/raw_data/raw_datasets/kits23/dataset + diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset221_AutoPETII_2023.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset221_AutoPETII_2023.py new file mode 100644 index 0000000..56ef16e --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset221_AutoPETII_2023.py @@ -0,0 +1,70 @@ +from batchgenerators.utilities.file_and_folder_operations import * +import shutil +from nnunetv2.dataset_conversion.generate_dataset_json import generate_dataset_json +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed + + +def convert_autopet(autopet_base_dir:str = '/media/isensee/My Book1/AutoPET/nifti/FDG-PET-CT-Lesions', + nnunet_dataset_id: int = 221): + task_name = "AutoPETII_2023" + + foldername = "Dataset%03.0d_%s" % (nnunet_dataset_id, task_name) + + # setting up nnU-Net folders + out_base = join(nnUNet_raw, foldername) + imagestr = join(out_base, "imagesTr") + labelstr = join(out_base, "labelsTr") + maybe_mkdir_p(imagestr) + maybe_mkdir_p(labelstr) + + patients = subdirs(autopet_base_dir, prefix='PETCT', join=False) + n = 0 + identifiers = [] + for pat in patients: + patient_acquisitions = subdirs(join(autopet_base_dir, pat), join=False) + for pa in patient_acquisitions: + n += 1 + identifier = f"{pat}_{pa}" + identifiers.append(identifier) + if not isfile(join(imagestr, f'{identifier}_0000.nii.gz')): + shutil.copy(join(autopet_base_dir, pat, pa, 'CTres.nii.gz'), join(imagestr, f'{identifier}_0000.nii.gz')) + if not isfile(join(imagestr, f'{identifier}_0001.nii.gz')): + shutil.copy(join(autopet_base_dir, pat, pa, 'SUV.nii.gz'), join(imagestr, f'{identifier}_0001.nii.gz')) + if not isfile(join(imagestr, f'{identifier}.nii.gz')): + shutil.copy(join(autopet_base_dir, pat, pa, 'SEG.nii.gz'), join(labelstr, f'{identifier}.nii.gz')) + + generate_dataset_json(out_base, {0: "CT", 1:"CT"}, + labels={ + "background": 0, + "tumor": 1 + }, + num_training_cases=n, file_ending='.nii.gz', + dataset_name=task_name, reference='https://autopet-ii.grand-challenge.org/', + release='release', + # overwrite_image_reader_writer='NibabelIOWithReorient', + description=task_name) + + # manual split + splits = [] + for fold in range(5): + val_patients = patients[fold :: 5] + splits.append( + { + 'train': [i for i in identifiers if not any([i.startswith(v) for v in val_patients])], + 'val': [i for i in identifiers if any([i.startswith(v) for v in val_patients])], + } + ) + pp_out_dir = join(nnUNet_preprocessed, foldername) + maybe_mkdir_p(pp_out_dir) + save_json(splits, join(pp_out_dir, 'splits_final.json'), sort_keys=False) + + +if __name__ == '__main__': + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('input_folder', type=str, + help="The downloaded and extracted autopet dataset (must have PETCT_XXX subfolders)") + parser.add_argument('-d', required=False, type=int, default=221, help='nnU-Net Dataset ID, default: 221') + args = parser.parse_args() + amos_base = args.input_folder + convert_autopet(amos_base, args.d) diff --git a/docker/template/src/nnunetv2/dataset_conversion/Dataset988_dummyDataset4.py b/docker/template/src/nnunetv2/dataset_conversion/Dataset988_dummyDataset4.py new file mode 100644 index 0000000..80b295d --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/Dataset988_dummyDataset4.py @@ -0,0 +1,32 @@ +import os + +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.paths import nnUNet_raw +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets + +if __name__ == '__main__': + # creates a dummy dataset where there are no files in imagestr and labelstr + source_dataset = 'Dataset004_Hippocampus' + + target_dataset = 'Dataset987_dummyDataset4' + target_dataset_dir = join(nnUNet_raw, target_dataset) + maybe_mkdir_p(target_dataset_dir) + + dataset = get_filenames_of_train_images_and_targets(join(nnUNet_raw, source_dataset)) + + # the returned dataset will have absolute paths. We should use relative paths so that you can freely copy + # datasets around between systems. As long as the source dataset is there it will continue working even if + # nnUNet_raw is in different locations + + # paths must be relative to target_dataset_dir!!! + for k in dataset.keys(): + dataset[k]['label'] = os.path.relpath(dataset[k]['label'], target_dataset_dir) + dataset[k]['images'] = [os.path.relpath(i, target_dataset_dir) for i in dataset[k]['images']] + + # load old dataset.json + dataset_json = load_json(join(nnUNet_raw, source_dataset, 'dataset.json')) + dataset_json['dataset'] = dataset + + # save + save_json(dataset_json, join(target_dataset_dir, 'dataset.json'), sort_keys=False) diff --git a/docker/template/src/nnunetv2/dataset_conversion/__init__.py b/docker/template/src/nnunetv2/dataset_conversion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/dataset_conversion/convert_MSD_dataset.py b/docker/template/src/nnunetv2/dataset_conversion/convert_MSD_dataset.py new file mode 100644 index 0000000..40dddc1 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/convert_MSD_dataset.py @@ -0,0 +1,133 @@ +import argparse +import multiprocessing +import shutil +from multiprocessing import Pool +from typing import Optional +import SimpleITK as sitk +from batchgenerators.utilities.file_and_folder_operations import * +from nnunetv2.paths import nnUNet_raw +from nnunetv2.utilities.dataset_name_id_conversion import find_candidate_datasets +from nnunetv2.configuration import default_num_processes +import numpy as np + + +def split_4d_nifti(filename, output_folder): + img_itk = sitk.ReadImage(filename) + dim = img_itk.GetDimension() + file_base = os.path.basename(filename) + if dim == 3: + shutil.copy(filename, join(output_folder, file_base[:-7] + "_0000.nii.gz")) + return + elif dim != 4: + raise RuntimeError("Unexpected dimensionality: %d of file %s, cannot split" % (dim, filename)) + else: + img_npy = sitk.GetArrayFromImage(img_itk) + spacing = img_itk.GetSpacing() + origin = img_itk.GetOrigin() + direction = np.array(img_itk.GetDirection()).reshape(4,4) + # now modify these to remove the fourth dimension + spacing = tuple(list(spacing[:-1])) + origin = tuple(list(origin[:-1])) + direction = tuple(direction[:-1, :-1].reshape(-1)) + for i, t in enumerate(range(img_npy.shape[0])): + img = img_npy[t] + img_itk_new = sitk.GetImageFromArray(img) + img_itk_new.SetSpacing(spacing) + img_itk_new.SetOrigin(origin) + img_itk_new.SetDirection(direction) + sitk.WriteImage(img_itk_new, join(output_folder, file_base[:-7] + "_%04.0d.nii.gz" % i)) + + +def convert_msd_dataset(source_folder: str, overwrite_target_id: Optional[int] = None, + num_processes: int = default_num_processes) -> None: + if source_folder.endswith('/') or source_folder.endswith('\\'): + source_folder = source_folder[:-1] + + labelsTr = join(source_folder, 'labelsTr') + imagesTs = join(source_folder, 'imagesTs') + imagesTr = join(source_folder, 'imagesTr') + assert isdir(labelsTr), f"labelsTr subfolder missing in source folder" + assert isdir(imagesTs), f"imagesTs subfolder missing in source folder" + assert isdir(imagesTr), f"imagesTr subfolder missing in source folder" + dataset_json = join(source_folder, 'dataset.json') + assert isfile(dataset_json), f"dataset.json missing in source_folder" + + # infer source dataset id and name + task, dataset_name = os.path.basename(source_folder).split('_') + task_id = int(task[4:]) + + # check if target dataset id is taken + target_id = task_id if overwrite_target_id is None else overwrite_target_id + existing_datasets = find_candidate_datasets(target_id) + assert len(existing_datasets) == 0, f"Target dataset id {target_id} is already taken, please consider changing " \ + f"it using overwrite_target_id. Conflicting dataset: {existing_datasets} (check nnUNet_results, nnUNet_preprocessed and nnUNet_raw!)" + + target_dataset_name = f"Dataset{target_id:03d}_{dataset_name}" + target_folder = join(nnUNet_raw, target_dataset_name) + target_imagesTr = join(target_folder, 'imagesTr') + target_imagesTs = join(target_folder, 'imagesTs') + target_labelsTr = join(target_folder, 'labelsTr') + maybe_mkdir_p(target_imagesTr) + maybe_mkdir_p(target_imagesTs) + maybe_mkdir_p(target_labelsTr) + + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + results = [] + + # convert 4d train images + source_images = [i for i in subfiles(imagesTr, suffix='.nii.gz', join=False) if + not i.startswith('.') and not i.startswith('_')] + source_images = [join(imagesTr, i) for i in source_images] + + results.append( + p.starmap_async( + split_4d_nifti, zip(source_images, [target_imagesTr] * len(source_images)) + ) + ) + + # convert 4d test images + source_images = [i for i in subfiles(imagesTs, suffix='.nii.gz', join=False) if + not i.startswith('.') and not i.startswith('_')] + source_images = [join(imagesTs, i) for i in source_images] + + results.append( + p.starmap_async( + split_4d_nifti, zip(source_images, [target_imagesTs] * len(source_images)) + ) + ) + + # copy segmentations + source_images = [i for i in subfiles(labelsTr, suffix='.nii.gz', join=False) if + not i.startswith('.') and not i.startswith('_')] + for s in source_images: + shutil.copy(join(labelsTr, s), join(target_labelsTr, s)) + + [i.get() for i in results] + + dataset_json = load_json(dataset_json) + dataset_json['labels'] = {j: int(i) for i, j in dataset_json['labels'].items()} + dataset_json['file_ending'] = ".nii.gz" + dataset_json["channel_names"] = dataset_json["modality"] + del dataset_json["modality"] + del dataset_json["training"] + del dataset_json["test"] + save_json(dataset_json, join(nnUNet_raw, target_dataset_name, 'dataset.json'), sort_keys=False) + + +def entry_point(): + parser = argparse.ArgumentParser() + parser.add_argument('-i', type=str, required=True, + help='Downloaded and extracted MSD dataset folder. CANNOT be nnUNetv1 dataset! Example: ' + '/home/fabian/Downloads/Task05_Prostate') + parser.add_argument('-overwrite_id', type=int, required=False, default=None, + help='Overwrite the dataset id. If not set we use the id of the MSD task (inferred from ' + 'folder name). Only use this if you already have an equivalently numbered dataset!') + parser.add_argument('-np', type=int, required=False, default=default_num_processes, + help=f'Number of processes used. Default: {default_num_processes}') + args = parser.parse_args() + convert_msd_dataset(args.i, args.overwrite_id, args.np) + + +if __name__ == '__main__': + entry_point() + # convert_msd_dataset('/home/fabian/Downloads/Task05_Prostate', overwrite_target_id=201) diff --git a/docker/template/src/nnunetv2/dataset_conversion/convert_raw_dataset_from_old_nnunet_format.py b/docker/template/src/nnunetv2/dataset_conversion/convert_raw_dataset_from_old_nnunet_format.py new file mode 100644 index 0000000..fb77533 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/convert_raw_dataset_from_old_nnunet_format.py @@ -0,0 +1,53 @@ +import shutil +from copy import deepcopy + +from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p, isdir, load_json, save_json +from nnunetv2.paths import nnUNet_raw + + +def convert(source_folder, target_dataset_name): + """ + remember that old tasks were called TaskXXX_YYY and new ones are called DatasetXXX_YYY + source_folder + """ + if isdir(join(nnUNet_raw, target_dataset_name)): + raise RuntimeError(f'Target dataset name {target_dataset_name} already exists. Aborting... ' + f'(we might break something). If you are sure you want to proceed, please manually ' + f'delete {join(nnUNet_raw, target_dataset_name)}') + maybe_mkdir_p(join(nnUNet_raw, target_dataset_name)) + shutil.copytree(join(source_folder, 'imagesTr'), join(nnUNet_raw, target_dataset_name, 'imagesTr')) + shutil.copytree(join(source_folder, 'labelsTr'), join(nnUNet_raw, target_dataset_name, 'labelsTr')) + if isdir(join(source_folder, 'imagesTs')): + shutil.copytree(join(source_folder, 'imagesTs'), join(nnUNet_raw, target_dataset_name, 'imagesTs')) + if isdir(join(source_folder, 'labelsTs')): + shutil.copytree(join(source_folder, 'labelsTs'), join(nnUNet_raw, target_dataset_name, 'labelsTs')) + if isdir(join(source_folder, 'imagesVal')): + shutil.copytree(join(source_folder, 'imagesVal'), join(nnUNet_raw, target_dataset_name, 'imagesVal')) + if isdir(join(source_folder, 'labelsVal')): + shutil.copytree(join(source_folder, 'labelsVal'), join(nnUNet_raw, target_dataset_name, 'labelsVal')) + shutil.copy(join(source_folder, 'dataset.json'), join(nnUNet_raw, target_dataset_name)) + + dataset_json = load_json(join(nnUNet_raw, target_dataset_name, 'dataset.json')) + del dataset_json['tensorImageSize'] + del dataset_json['numTest'] + del dataset_json['training'] + del dataset_json['test'] + dataset_json['channel_names'] = deepcopy(dataset_json['modality']) + del dataset_json['modality'] + + dataset_json['labels'] = {j: int(i) for i, j in dataset_json['labels'].items()} + dataset_json['file_ending'] = ".nii.gz" + save_json(dataset_json, join(nnUNet_raw, target_dataset_name, 'dataset.json'), sort_keys=False) + + +def convert_entry_point(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("input_folder", type=str, + help='Raw old nnUNet dataset. This must be the folder with imagesTr,labelsTr etc subfolders! ' + 'Please provide the PATH to the old Task, not just the task name. nnU-Net V2 does not ' + 'know where v1 tasks are.') + parser.add_argument("output_dataset_name", type=str, + help='New dataset NAME (not path!). Must follow the DatasetXXX_NAME convention!') + args = parser.parse_args() + convert(args.input_folder, args.output_dataset_name) diff --git a/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py new file mode 100644 index 0000000..e68c6a6 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py @@ -0,0 +1,74 @@ +import SimpleITK as sitk +import shutil + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json, nifti_files + +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.paths import nnUNet_raw +from nnunetv2.utilities.label_handling.label_handling import LabelManager + + +def sparsify_segmentation(seg: np.ndarray, label_manager: LabelManager, percent_of_slices: float) -> np.ndarray: + assert label_manager.has_ignore_label, "This preprocessor only works with datasets that have an ignore label!" + seg_new = np.ones_like(seg) * label_manager.ignore_label + x, y, z = seg.shape + # x + num_slices = max(1, round(x * percent_of_slices)) + selected_slices = np.random.choice(x, num_slices, replace=False) + seg_new[selected_slices] = seg[selected_slices] + # y + num_slices = max(1, round(y * percent_of_slices)) + selected_slices = np.random.choice(y, num_slices, replace=False) + seg_new[:, selected_slices] = seg[:, selected_slices] + # z + num_slices = max(1, round(z * percent_of_slices)) + selected_slices = np.random.choice(z, num_slices, replace=False) + seg_new[:, :, selected_slices] = seg[:, :, selected_slices] + return seg_new + + +if __name__ == '__main__': + dataset_name = 'IntegrationTest_Hippocampus_regions_ignore' + dataset_id = 996 + dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}" + + try: + existing_dataset_name = maybe_convert_to_dataset_name(dataset_id) + if existing_dataset_name != dataset_name: + raise FileExistsError(f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If " + f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and " + f"nnUNet_results!") + except RuntimeError: + pass + + if isdir(join(nnUNet_raw, dataset_name)): + shutil.rmtree(join(nnUNet_raw, dataset_name)) + + source_dataset = maybe_convert_to_dataset_name(4) + shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name)) + + # additionally optimize entire hippocampus region, remove Posterior + dj = load_json(join(nnUNet_raw, dataset_name, 'dataset.json')) + dj['labels'] = { + 'background': 0, + 'hippocampus': (1, 2), + 'anterior': 1, + 'ignore': 3 + } + dj['regions_class_order'] = (2, 1) + save_json(dj, join(nnUNet_raw, dataset_name, 'dataset.json'), sort_keys=False) + + # now add ignore label to segmentation images + np.random.seed(1234) + lm = LabelManager(label_dict=dj['labels'], regions_class_order=dj.get('regions_class_order')) + + segs = nifti_files(join(nnUNet_raw, dataset_name, 'labelsTr')) + for s in segs: + seg_itk = sitk.ReadImage(s) + seg_npy = sitk.GetArrayFromImage(seg_itk) + seg_npy = sparsify_segmentation(seg_npy, lm, 0.1 / 3) + seg_itk_new = sitk.GetImageFromArray(seg_npy) + seg_itk_new.CopyInformation(seg_itk) + sitk.WriteImage(seg_itk_new, s) + diff --git a/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py new file mode 100644 index 0000000..b40c534 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py @@ -0,0 +1,37 @@ +import shutil + +from batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json + +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.paths import nnUNet_raw + +if __name__ == '__main__': + dataset_name = 'IntegrationTest_Hippocampus_regions' + dataset_id = 997 + dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}" + + try: + existing_dataset_name = maybe_convert_to_dataset_name(dataset_id) + if existing_dataset_name != dataset_name: + raise FileExistsError( + f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If " + f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and " + f"nnUNet_results!") + except RuntimeError: + pass + + if isdir(join(nnUNet_raw, dataset_name)): + shutil.rmtree(join(nnUNet_raw, dataset_name)) + + source_dataset = maybe_convert_to_dataset_name(4) + shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name)) + + # additionally optimize entire hippocampus region, remove Posterior + dj = load_json(join(nnUNet_raw, dataset_name, 'dataset.json')) + dj['labels'] = { + 'background': 0, + 'hippocampus': (1, 2), + 'anterior': 1 + } + dj['regions_class_order'] = (2, 1) + save_json(dj, join(nnUNet_raw, dataset_name, 'dataset.json'), sort_keys=False) diff --git a/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py new file mode 100644 index 0000000..1781a27 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py @@ -0,0 +1,33 @@ +import shutil + +from batchgenerators.utilities.file_and_folder_operations import isdir, join, load_json, save_json + +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.paths import nnUNet_raw + + +if __name__ == '__main__': + dataset_name = 'IntegrationTest_Hippocampus_ignore' + dataset_id = 998 + dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}" + + try: + existing_dataset_name = maybe_convert_to_dataset_name(dataset_id) + if existing_dataset_name != dataset_name: + raise FileExistsError(f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If " + f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and " + f"nnUNet_results!") + except RuntimeError: + pass + + if isdir(join(nnUNet_raw, dataset_name)): + shutil.rmtree(join(nnUNet_raw, dataset_name)) + + source_dataset = maybe_convert_to_dataset_name(4) + shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name)) + + # set class 2 to ignore label + dj = load_json(join(nnUNet_raw, dataset_name, 'dataset.json')) + dj['labels']['ignore'] = 2 + del dj['labels']['Posterior'] + save_json(dj, join(nnUNet_raw, dataset_name, 'dataset.json'), sort_keys=False) diff --git a/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py new file mode 100644 index 0000000..33075da --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py @@ -0,0 +1,27 @@ +import shutil + +from batchgenerators.utilities.file_and_folder_operations import isdir, join + +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.paths import nnUNet_raw + + +if __name__ == '__main__': + dataset_name = 'IntegrationTest_Hippocampus' + dataset_id = 999 + dataset_name = f"Dataset{dataset_id:03d}_{dataset_name}" + + try: + existing_dataset_name = maybe_convert_to_dataset_name(dataset_id) + if existing_dataset_name != dataset_name: + raise FileExistsError(f"A different dataset with id {dataset_id} already exists :-(: {existing_dataset_name}. If " + f"you intent to delete it, remember to also remove it in nnUNet_preprocessed and " + f"nnUNet_results!") + except RuntimeError: + pass + + if isdir(join(nnUNet_raw, dataset_name)): + shutil.rmtree(join(nnUNet_raw, dataset_name)) + + source_dataset = maybe_convert_to_dataset_name(4) + shutil.copytree(join(nnUNet_raw, source_dataset), join(nnUNet_raw, dataset_name)) diff --git a/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py b/docker/template/src/nnunetv2/dataset_conversion/datasets_for_integration_tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/dataset_conversion/generate_dataset_json.py b/docker/template/src/nnunetv2/dataset_conversion/generate_dataset_json.py new file mode 100644 index 0000000..429fa05 --- /dev/null +++ b/docker/template/src/nnunetv2/dataset_conversion/generate_dataset_json.py @@ -0,0 +1,103 @@ +from typing import Tuple + +from batchgenerators.utilities.file_and_folder_operations import save_json, join + + +def generate_dataset_json(output_folder: str, + channel_names: dict, + labels: dict, + num_training_cases: int, + file_ending: str, + regions_class_order: Tuple[int, ...] = None, + dataset_name: str = None, reference: str = None, release: str = None, license: str = None, + description: str = None, + overwrite_image_reader_writer: str = None, **kwargs): + """ + Generates a dataset.json file in the output folder + + channel_names: + Channel names must map the index to the name of the channel, example: + { + 0: 'T1', + 1: 'CT' + } + Note that the channel names may influence the normalization scheme!! Learn more in the documentation. + + labels: + This will tell nnU-Net what labels to expect. Important: This will also determine whether you use region-based training or not. + Example regular labels: + { + 'background': 0, + 'left atrium': 1, + 'some other label': 2 + } + Example region-based training: + { + 'background': 0, + 'whole tumor': (1, 2, 3), + 'tumor core': (2, 3), + 'enhancing tumor': 3 + } + + Remember that nnU-Net expects consecutive values for labels! nnU-Net also expects 0 to be background! + + num_training_cases: is used to double check all cases are there! + + file_ending: needed for finding the files correctly. IMPORTANT! File endings must match between images and + segmentations! + + dataset_name, reference, release, license, description: self-explanatory and not used by nnU-Net. Just for + completeness and as a reminder that these would be great! + + overwrite_image_reader_writer: If you need a special IO class for your dataset you can derive it from + BaseReaderWriter, place it into nnunet.imageio and reference it here by name + + kwargs: whatever you put here will be placed in the dataset.json as well + + """ + has_regions: bool = any([isinstance(i, (tuple, list)) and len(i) > 1 for i in labels.values()]) + if has_regions: + assert regions_class_order is not None, f"You have defined regions but regions_class_order is not set. " \ + f"You need that." + # channel names need strings as keys + keys = list(channel_names.keys()) + for k in keys: + if not isinstance(k, str): + channel_names[str(k)] = channel_names[k] + del channel_names[k] + + # labels need ints as values + for l in labels.keys(): + value = labels[l] + if isinstance(value, (tuple, list)): + value = tuple([int(i) for i in value]) + labels[l] = value + else: + labels[l] = int(labels[l]) + + dataset_json = { + 'channel_names': channel_names, # previously this was called 'modality'. I didn't like this so this is + # channel_names now. Live with it. + 'labels': labels, + 'numTraining': num_training_cases, + 'file_ending': file_ending, + } + + if dataset_name is not None: + dataset_json['name'] = dataset_name + if reference is not None: + dataset_json['reference'] = reference + if release is not None: + dataset_json['release'] = release + if license is not None: + dataset_json['licence'] = license + if description is not None: + dataset_json['description'] = description + if overwrite_image_reader_writer is not None: + dataset_json['overwrite_image_reader_writer'] = overwrite_image_reader_writer + if regions_class_order is not None: + dataset_json['regions_class_order'] = regions_class_order + + dataset_json.update(kwargs) + + save_json(dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) diff --git a/docker/template/src/nnunetv2/ensembling/__init__.py b/docker/template/src/nnunetv2/ensembling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/ensembling/ensemble.py b/docker/template/src/nnunetv2/ensembling/ensemble.py new file mode 100644 index 0000000..d4a9be4 --- /dev/null +++ b/docker/template/src/nnunetv2/ensembling/ensemble.py @@ -0,0 +1,206 @@ +import argparse +import multiprocessing +import shutil +from copy import deepcopy +from multiprocessing import Pool +from typing import List, Union, Tuple + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import load_json, join, subfiles, \ + maybe_mkdir_p, isdir, save_pickle, load_pickle, isfile +from nnunetv2.configuration import default_num_processes +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.utilities.label_handling.label_handling import LabelManager +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + +def average_probabilities(list_of_files: List[str]) -> np.ndarray: + assert len(list_of_files), 'At least one file must be given in list_of_files' + avg = None + for f in list_of_files: + if avg is None: + avg = np.load(f)['probabilities'] + # maybe increase precision to prevent rounding errors + if avg.dtype != np.float32: + avg = avg.astype(np.float32) + else: + avg += np.load(f)['probabilities'] + avg /= len(list_of_files) + return avg + + +def merge_files(list_of_files, + output_filename_truncated: str, + output_file_ending: str, + image_reader_writer: BaseReaderWriter, + label_manager: LabelManager, + save_probabilities: bool = False): + # load the pkl file associated with the first file in list_of_files + properties = load_pickle(list_of_files[0][:-4] + '.pkl') + # load and average predictions + probabilities = average_probabilities(list_of_files) + segmentation = label_manager.convert_logits_to_segmentation(probabilities) + image_reader_writer.write_seg(segmentation, output_filename_truncated + output_file_ending, properties) + if save_probabilities: + np.savez_compressed(output_filename_truncated + '.npz', probabilities=probabilities) + save_pickle(probabilities, output_filename_truncated + '.pkl') + + +def ensemble_folders(list_of_input_folders: List[str], + output_folder: str, + save_merged_probabilities: bool = False, + num_processes: int = default_num_processes, + dataset_json_file_or_dict: str = None, + plans_json_file_or_dict: str = None): + """we need too much shit for this function. Problem is that we now have to support region-based training plus + multiple input/output formats so there isn't really a way around this. + + If plans and dataset json are not specified, we assume each of the folders has a corresponding plans.json + and/or dataset.json in it. These are usually copied into those folders by nnU-Net during prediction. + We just pick the dataset.json and plans.json from the first of the folders and we DONT check whether the 5 + folders contain the same plans etc! This can be a feature if results from different datasets are to be merged (only + works if label dict in dataset.json is the same between these datasets!!!)""" + if dataset_json_file_or_dict is not None: + if isinstance(dataset_json_file_or_dict, str): + dataset_json = load_json(dataset_json_file_or_dict) + else: + dataset_json = dataset_json_file_or_dict + else: + dataset_json = load_json(join(list_of_input_folders[0], 'dataset.json')) + + if plans_json_file_or_dict is not None: + if isinstance(plans_json_file_or_dict, str): + plans = load_json(plans_json_file_or_dict) + else: + plans = plans_json_file_or_dict + else: + plans = load_json(join(list_of_input_folders[0], 'plans.json')) + + plans_manager = PlansManager(plans) + + # now collect the files in each of the folders and enforce that all files are present in all folders + files_per_folder = [set(subfiles(i, suffix='.npz', join=False)) for i in list_of_input_folders] + # first build a set with all files + s = deepcopy(files_per_folder[0]) + for f in files_per_folder[1:]: + s.update(f) + for f in files_per_folder: + assert len(s.difference(f)) == 0, "Not all folders contain the same files for ensembling. Please only " \ + "provide folders that contain the predictions" + lists_of_lists_of_files = [[join(fl, fi) for fl in list_of_input_folders] for fi in s] + output_files_truncated = [join(output_folder, fi[:-4]) for fi in s] + + image_reader_writer = plans_manager.image_reader_writer_class() + label_manager = plans_manager.get_label_manager(dataset_json) + + maybe_mkdir_p(output_folder) + shutil.copy(join(list_of_input_folders[0], 'dataset.json'), output_folder) + + with multiprocessing.get_context("spawn").Pool(num_processes) as pool: + num_preds = len(s) + _ = pool.starmap( + merge_files, + zip( + lists_of_lists_of_files, + output_files_truncated, + [dataset_json['file_ending']] * num_preds, + [image_reader_writer] * num_preds, + [label_manager] * num_preds, + [save_merged_probabilities] * num_preds + ) + ) + + +def entry_point_ensemble_folders(): + parser = argparse.ArgumentParser() + parser.add_argument('-i', nargs='+', type=str, required=True, + help='list of input folders') + parser.add_argument('-o', type=str, required=True, help='output folder') + parser.add_argument('-np', type=int, required=False, default=default_num_processes, + help=f"Numbers of processes used for ensembling. Default: {default_num_processes}") + parser.add_argument('--save_npz', action='store_true', required=False, help='Set this flag to store output ' + 'probabilities in separate .npz files') + + args = parser.parse_args() + ensemble_folders(args.i, args.o, args.save_npz, args.np) + + +def ensemble_crossvalidations(list_of_trained_model_folders: List[str], + output_folder: str, + folds: Union[Tuple[int, ...], List[int]] = (0, 1, 2, 3, 4), + num_processes: int = default_num_processes, + overwrite: bool = True) -> None: + """ + Feature: different configurations can now have different splits + """ + dataset_json = load_json(join(list_of_trained_model_folders[0], 'dataset.json')) + plans_manager = PlansManager(join(list_of_trained_model_folders[0], 'plans.json')) + + # first collect all unique filenames + files_per_folder = {} + unique_filenames = set() + for tr in list_of_trained_model_folders: + files_per_folder[tr] = {} + for f in folds: + if not isdir(join(tr, f'fold_{f}', 'validation')): + raise RuntimeError(f'Expected model output directory does not exist. You must train all requested ' + f'folds of the specified model.\nModel: {tr}\nFold: {f}') + files_here = subfiles(join(tr, f'fold_{f}', 'validation'), suffix='.npz', join=False) + if len(files_here) == 0: + raise RuntimeError(f"No .npz files found in folder {join(tr, f'fold_{f}', 'validation')}. Rerun your " + f"validation with the --npz flag. Use nnUNetv2_train [...] --val --npz.") + files_per_folder[tr][f] = subfiles(join(tr, f'fold_{f}', 'validation'), suffix='.npz', join=False) + unique_filenames.update(files_per_folder[tr][f]) + + # verify that all trained_model_folders have all predictions + ok = True + for tr, fi in files_per_folder.items(): + all_files_here = set() + for f in folds: + all_files_here.update(fi[f]) + diff = unique_filenames.difference(all_files_here) + if len(diff) > 0: + ok = False + print(f'model {tr} does not seem to contain all predictions. Missing: {diff}') + if not ok: + raise RuntimeError('There were missing files, see print statements above this one') + + # now we need to collect where these files are + file_mapping = [] + for tr in list_of_trained_model_folders: + file_mapping.append({}) + for f in folds: + for fi in files_per_folder[tr][f]: + # check for duplicates + assert fi not in file_mapping[-1].keys(), f"Duplicate detected. Case {fi} is present in more than " \ + f"one fold of model {tr}." + file_mapping[-1][fi] = join(tr, f'fold_{f}', 'validation', fi) + + lists_of_lists_of_files = [[fm[i] for fm in file_mapping] for i in unique_filenames] + output_files_truncated = [join(output_folder, fi[:-4]) for fi in unique_filenames] + + image_reader_writer = plans_manager.image_reader_writer_class() + maybe_mkdir_p(output_folder) + label_manager = plans_manager.get_label_manager(dataset_json) + + if not overwrite: + tmp = [isfile(i + dataset_json['file_ending']) for i in output_files_truncated] + lists_of_lists_of_files = [lists_of_lists_of_files[i] for i in range(len(tmp)) if not tmp[i]] + output_files_truncated = [output_files_truncated[i] for i in range(len(tmp)) if not tmp[i]] + + with multiprocessing.get_context("spawn").Pool(num_processes) as pool: + num_preds = len(lists_of_lists_of_files) + _ = pool.starmap( + merge_files, + zip( + lists_of_lists_of_files, + output_files_truncated, + [dataset_json['file_ending']] * num_preds, + [image_reader_writer] * num_preds, + [label_manager] * num_preds, + [False] * num_preds + ) + ) + + shutil.copy(join(list_of_trained_model_folders[0], 'plans.json'), join(output_folder, 'plans.json')) + shutil.copy(join(list_of_trained_model_folders[0], 'dataset.json'), join(output_folder, 'dataset.json')) diff --git a/docker/template/src/nnunetv2/evaluation/__init__.py b/docker/template/src/nnunetv2/evaluation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/evaluation/accumulate_cv_results.py b/docker/template/src/nnunetv2/evaluation/accumulate_cv_results.py new file mode 100644 index 0000000..f1a79f0 --- /dev/null +++ b/docker/template/src/nnunetv2/evaluation/accumulate_cv_results.py @@ -0,0 +1,58 @@ +import shutil +from typing import Union, List, Tuple + +from batchgenerators.utilities.file_and_folder_operations import load_json, join, isdir, maybe_mkdir_p, subfiles, isfile + +from nnunetv2.configuration import default_num_processes +from nnunetv2.evaluation.evaluate_predictions import compute_metrics_on_folder +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + +def accumulate_cv_results(trained_model_folder, + merged_output_folder: str, + folds: Union[List[int], Tuple[int, ...]], + num_processes: int = default_num_processes, + overwrite: bool = True): + """ + There are a lot of things that can get fucked up, so the simplest way to deal with potential problems is to + collect the cv results into a separate folder and then evaluate them again. No messing with summary_json files! + """ + + if overwrite and isdir(merged_output_folder): + shutil.rmtree(merged_output_folder) + maybe_mkdir_p(merged_output_folder) + + dataset_json = load_json(join(trained_model_folder, 'dataset.json')) + plans_manager = PlansManager(join(trained_model_folder, 'plans.json')) + rw = plans_manager.image_reader_writer_class() + shutil.copy(join(trained_model_folder, 'dataset.json'), join(merged_output_folder, 'dataset.json')) + shutil.copy(join(trained_model_folder, 'plans.json'), join(merged_output_folder, 'plans.json')) + + did_we_copy_something = False + for f in folds: + expected_validation_folder = join(trained_model_folder, f'fold_{f}', 'validation') + if not isdir(expected_validation_folder): + raise RuntimeError(f"fold {f} of model {trained_model_folder} is missing. Please train it!") + predicted_files = subfiles(expected_validation_folder, suffix=dataset_json['file_ending'], join=False) + for pf in predicted_files: + if overwrite and isfile(join(merged_output_folder, pf)): + raise RuntimeError(f'More than one of your folds has a prediction for case {pf}') + if overwrite or not isfile(join(merged_output_folder, pf)): + shutil.copy(join(expected_validation_folder, pf), join(merged_output_folder, pf)) + did_we_copy_something = True + + if did_we_copy_something or not isfile(join(merged_output_folder, 'summary.json')): + label_manager = plans_manager.get_label_manager(dataset_json) + gt_folder = join(nnUNet_raw, plans_manager.dataset_name, 'labelsTr') + if not isdir(gt_folder): + gt_folder = join(nnUNet_preprocessed, plans_manager.dataset_name, 'gt_segmentations') + compute_metrics_on_folder(gt_folder, + merged_output_folder, + join(merged_output_folder, 'summary.json'), + rw, + dataset_json['file_ending'], + label_manager.foreground_regions if label_manager.has_regions else + label_manager.foreground_labels, + label_manager.ignore_label, + num_processes) diff --git a/docker/template/src/nnunetv2/evaluation/evaluate_predictions.py b/docker/template/src/nnunetv2/evaluation/evaluate_predictions.py new file mode 100644 index 0000000..a2c342a --- /dev/null +++ b/docker/template/src/nnunetv2/evaluation/evaluate_predictions.py @@ -0,0 +1,263 @@ +import multiprocessing +import os +from copy import deepcopy +from typing import Tuple, List, Union + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import subfiles, join, save_json, load_json, \ + isfile +from nnunetv2.configuration import default_num_processes +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json, \ + determine_reader_writer_from_file_ending +from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO +# the Evaluator class of the previous nnU-Net was great and all but man was it overengineered. Keep it simple +from nnunetv2.utilities.json_export import recursive_fix_for_json_export +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + +def label_or_region_to_key(label_or_region: Union[int, Tuple[int]]): + return str(label_or_region) + + +def key_to_label_or_region(key: str): + try: + return int(key) + except ValueError: + key = key.replace('(', '') + key = key.replace(')', '') + split = key.split(',') + return tuple([int(i) for i in split if len(i) > 0]) + + +def save_summary_json(results: dict, output_file: str): + """ + stupid json does not support tuples as keys (why does it have to be so shitty) so we need to convert that shit + ourselves + """ + results_converted = deepcopy(results) + # convert keys in mean metrics + results_converted['mean'] = {label_or_region_to_key(k): results['mean'][k] for k in results['mean'].keys()} + # convert metric_per_case + for i in range(len(results_converted["metric_per_case"])): + results_converted["metric_per_case"][i]['metrics'] = \ + {label_or_region_to_key(k): results["metric_per_case"][i]['metrics'][k] + for k in results["metric_per_case"][i]['metrics'].keys()} + # sort_keys=True will make foreground_mean the first entry and thus easy to spot + save_json(results_converted, output_file, sort_keys=True) + + +def load_summary_json(filename: str): + results = load_json(filename) + # convert keys in mean metrics + results['mean'] = {key_to_label_or_region(k): results['mean'][k] for k in results['mean'].keys()} + # convert metric_per_case + for i in range(len(results["metric_per_case"])): + results["metric_per_case"][i]['metrics'] = \ + {key_to_label_or_region(k): results["metric_per_case"][i]['metrics'][k] + for k in results["metric_per_case"][i]['metrics'].keys()} + return results + + +def labels_to_list_of_regions(labels: List[int]): + return [(i,) for i in labels] + + +def region_or_label_to_mask(segmentation: np.ndarray, region_or_label: Union[int, Tuple[int, ...]]) -> np.ndarray: + if np.isscalar(region_or_label): + return segmentation == region_or_label + else: + mask = np.zeros_like(segmentation, dtype=bool) + for r in region_or_label: + mask[segmentation == r] = True + return mask + + +def compute_tp_fp_fn_tn(mask_ref: np.ndarray, mask_pred: np.ndarray, ignore_mask: np.ndarray = None): + if ignore_mask is None: + use_mask = np.ones_like(mask_ref, dtype=bool) + else: + use_mask = ~ignore_mask + tp = np.sum((mask_ref & mask_pred) & use_mask) + fp = np.sum(((~mask_ref) & mask_pred) & use_mask) + fn = np.sum((mask_ref & (~mask_pred)) & use_mask) + tn = np.sum(((~mask_ref) & (~mask_pred)) & use_mask) + return tp, fp, fn, tn + + +def compute_metrics(reference_file: str, prediction_file: str, image_reader_writer: BaseReaderWriter, + labels_or_regions: Union[List[int], List[Union[int, Tuple[int, ...]]]], + ignore_label: int = None) -> dict: + # load images + seg_ref, seg_ref_dict = image_reader_writer.read_seg(reference_file) + seg_pred, seg_pred_dict = image_reader_writer.read_seg(prediction_file) + # spacing = seg_ref_dict['spacing'] + + ignore_mask = seg_ref == ignore_label if ignore_label is not None else None + + results = {} + results['reference_file'] = reference_file + results['prediction_file'] = prediction_file + results['metrics'] = {} + for r in labels_or_regions: + results['metrics'][r] = {} + mask_ref = region_or_label_to_mask(seg_ref, r) + mask_pred = region_or_label_to_mask(seg_pred, r) + tp, fp, fn, tn = compute_tp_fp_fn_tn(mask_ref, mask_pred, ignore_mask) + if tp + fp + fn == 0: + results['metrics'][r]['Dice'] = np.nan + results['metrics'][r]['IoU'] = np.nan + else: + results['metrics'][r]['Dice'] = 2 * tp / (2 * tp + fp + fn) + results['metrics'][r]['IoU'] = tp / (tp + fp + fn) + results['metrics'][r]['FP'] = fp + results['metrics'][r]['TP'] = tp + results['metrics'][r]['FN'] = fn + results['metrics'][r]['TN'] = tn + results['metrics'][r]['n_pred'] = fp + tp + results['metrics'][r]['n_ref'] = fn + tp + return results + + +def compute_metrics_on_folder(folder_ref: str, folder_pred: str, output_file: str, + image_reader_writer: BaseReaderWriter, + file_ending: str, + regions_or_labels: Union[List[int], List[Union[int, Tuple[int, ...]]]], + ignore_label: int = None, + num_processes: int = default_num_processes, + chill: bool = True) -> dict: + """ + output_file must end with .json; can be None + """ + if output_file is not None: + assert output_file.endswith('.json'), 'output_file should end with .json' + files_pred = subfiles(folder_pred, suffix=file_ending, join=False) + files_ref = subfiles(folder_ref, suffix=file_ending, join=False) + if not chill: + present = [isfile(join(folder_pred, i)) for i in files_ref] + assert all(present), "Not all files in folder_pred exist in folder_ref" + files_ref = [join(folder_ref, i) for i in files_pred] + files_pred = [join(folder_pred, i) for i in files_pred] + with multiprocessing.get_context("spawn").Pool(num_processes) as pool: + # for i in list(zip(files_ref, files_pred, [image_reader_writer] * len(files_pred), [regions_or_labels] * len(files_pred), [ignore_label] * len(files_pred))): + # compute_metrics(*i) + results = pool.starmap( + compute_metrics, + list(zip(files_ref, files_pred, [image_reader_writer] * len(files_pred), [regions_or_labels] * len(files_pred), + [ignore_label] * len(files_pred))) + ) + + # mean metric per class + metric_list = list(results[0]['metrics'][regions_or_labels[0]].keys()) + means = {} + for r in regions_or_labels: + means[r] = {} + for m in metric_list: + means[r][m] = np.nanmean([i['metrics'][r][m] for i in results]) + + # foreground mean + foreground_mean = {} + for m in metric_list: + values = [] + for k in means.keys(): + if k == 0 or k == '0': + continue + values.append(means[k][m]) + foreground_mean[m] = np.mean(values) + + [recursive_fix_for_json_export(i) for i in results] + recursive_fix_for_json_export(means) + recursive_fix_for_json_export(foreground_mean) + result = {'metric_per_case': results, 'mean': means, 'foreground_mean': foreground_mean} + if output_file is not None: + save_summary_json(result, output_file) + return result + # print('DONE') + + +def compute_metrics_on_folder2(folder_ref: str, folder_pred: str, dataset_json_file: str, plans_file: str, + output_file: str = None, + num_processes: int = default_num_processes, + chill: bool = False): + dataset_json = load_json(dataset_json_file) + # get file ending + file_ending = dataset_json['file_ending'] + + # get reader writer class + example_file = subfiles(folder_ref, suffix=file_ending, join=True)[0] + rw = determine_reader_writer_from_dataset_json(dataset_json, example_file)() + + # maybe auto set output file + if output_file is None: + output_file = join(folder_pred, 'summary.json') + + lm = PlansManager(plans_file).get_label_manager(dataset_json) + compute_metrics_on_folder(folder_ref, folder_pred, output_file, rw, file_ending, + lm.foreground_regions if lm.has_regions else lm.foreground_labels, lm.ignore_label, + num_processes, chill=chill) + + +def compute_metrics_on_folder_simple(folder_ref: str, folder_pred: str, labels: Union[Tuple[int, ...], List[int]], + output_file: str = None, + num_processes: int = default_num_processes, + ignore_label: int = None, + chill: bool = False): + example_file = subfiles(folder_ref, join=True)[0] + file_ending = os.path.splitext(example_file)[-1] + rw = determine_reader_writer_from_file_ending(file_ending, example_file, allow_nonmatching_filename=True, + verbose=False)() + # maybe auto set output file + if output_file is None: + output_file = join(folder_pred, 'summary.json') + compute_metrics_on_folder(folder_ref, folder_pred, output_file, rw, file_ending, + labels, ignore_label=ignore_label, num_processes=num_processes, chill=chill) + + +def evaluate_folder_entry_point(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('gt_folder', type=str, help='folder with gt segmentations') + parser.add_argument('pred_folder', type=str, help='folder with predicted segmentations') + parser.add_argument('-djfile', type=str, required=True, + help='dataset.json file') + parser.add_argument('-pfile', type=str, required=True, + help='plans.json file') + parser.add_argument('-o', type=str, required=False, default=None, + help='Output file. Optional. Default: pred_folder/summary.json') + parser.add_argument('-np', type=int, required=False, default=default_num_processes, + help=f'number of processes used. Optional. Default: {default_num_processes}') + parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred does not have all files that are present in folder_gt') + args = parser.parse_args() + compute_metrics_on_folder2(args.gt_folder, args.pred_folder, args.djfile, args.pfile, args.o, args.np, chill=args.chill) + + +def evaluate_simple_entry_point(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('gt_folder', type=str, help='folder with gt segmentations') + parser.add_argument('pred_folder', type=str, help='folder with predicted segmentations') + parser.add_argument('-l', type=int, nargs='+', required=True, + help='list of labels') + parser.add_argument('-il', type=int, required=False, default=None, + help='ignore label') + parser.add_argument('-o', type=str, required=False, default=None, + help='Output file. Optional. Default: pred_folder/summary.json') + parser.add_argument('-np', type=int, required=False, default=default_num_processes, + help=f'number of processes used. Optional. Default: {default_num_processes}') + parser.add_argument('--chill', action='store_true', help='dont crash if folder_pred does not have all files that are present in folder_gt') + + args = parser.parse_args() + compute_metrics_on_folder_simple(args.gt_folder, args.pred_folder, args.l, args.o, args.np, args.il, chill=args.chill) + + +if __name__ == '__main__': + folder_ref = '/media/fabian/data/nnUNet_raw/Dataset004_Hippocampus/labelsTr' + folder_pred = '/home/fabian/results/nnUNet_remake/Dataset004_Hippocampus/nnUNetModule__nnUNetPlans__3d_fullres/fold_0/validation' + output_file = '/home/fabian/results/nnUNet_remake/Dataset004_Hippocampus/nnUNetModule__nnUNetPlans__3d_fullres/fold_0/validation/summary.json' + image_reader_writer = SimpleITKIO() + file_ending = '.nii.gz' + regions = labels_to_list_of_regions([1, 2]) + ignore_label = None + num_processes = 12 + compute_metrics_on_folder(folder_ref, folder_pred, output_file, image_reader_writer, file_ending, regions, ignore_label, + num_processes) diff --git a/docker/template/src/nnunetv2/evaluation/find_best_configuration.py b/docker/template/src/nnunetv2/evaluation/find_best_configuration.py new file mode 100644 index 0000000..7e9f774 --- /dev/null +++ b/docker/template/src/nnunetv2/evaluation/find_best_configuration.py @@ -0,0 +1,333 @@ +import argparse +import os.path +from copy import deepcopy +from typing import Union, List, Tuple + +from batchgenerators.utilities.file_and_folder_operations import load_json, join, isdir, save_json + +from nnunetv2.configuration import default_num_processes +from nnunetv2.ensembling.ensemble import ensemble_crossvalidations +from nnunetv2.evaluation.accumulate_cv_results import accumulate_cv_results +from nnunetv2.evaluation.evaluate_predictions import compute_metrics_on_folder, load_summary_json +from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw, nnUNet_results +from nnunetv2.postprocessing.remove_connected_components import determine_postprocessing +from nnunetv2.utilities.file_path_utilities import maybe_convert_to_dataset_name, get_output_folder, \ + convert_identifier_to_trainer_plans_config, get_ensemble_name, folds_tuple_to_string +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + +default_trained_models = tuple([ + {'plans': 'nnUNetPlans', 'configuration': '2d', 'trainer': 'nnUNetTrainer'}, + {'plans': 'nnUNetPlans', 'configuration': '3d_fullres', 'trainer': 'nnUNetTrainer'}, + {'plans': 'nnUNetPlans', 'configuration': '3d_lowres', 'trainer': 'nnUNetTrainer'}, + {'plans': 'nnUNetPlans', 'configuration': '3d_cascade_fullres', 'trainer': 'nnUNetTrainer'}, +]) + + +def filter_available_models(model_dict: Union[List[dict], Tuple[dict, ...]], dataset_name_or_id: Union[str, int]): + valid = [] + for trained_model in model_dict: + plans_manager = PlansManager(join(nnUNet_preprocessed, maybe_convert_to_dataset_name(dataset_name_or_id), + trained_model['plans'] + '.json')) + # check if configuration exists + # 3d_cascade_fullres and 3d_lowres do not exist for each dataset so we allow them to be absent IF they are not + # specified in the plans file + if trained_model['configuration'] not in plans_manager.available_configurations: + print(f"Configuration {trained_model['configuration']} not found in plans {trained_model['plans']}.\n" + f"Inferred plans file: {join(nnUNet_preprocessed, maybe_convert_to_dataset_name(dataset_name_or_id), trained_model['plans'] + '.json')}.") + continue + + # check if trained model output folder exists. This is a requirement. No mercy here. + expected_output_folder = get_output_folder(dataset_name_or_id, trained_model['trainer'], trained_model['plans'], + trained_model['configuration'], fold=None) + if not isdir(expected_output_folder): + raise RuntimeError(f"Trained model {trained_model} does not have an output folder. " + f"Expected: {expected_output_folder}. Please run the training for this model! (don't forget " + f"the --npz flag if you want to ensemble multiple configurations)") + + valid.append(trained_model) + return valid + + +def generate_inference_command(dataset_name_or_id: Union[int, str], configuration_name: str, + plans_identifier: str = 'nnUNetPlans', trainer_name: str = 'nnUNetTrainer', + folds: Union[List[int], Tuple[int, ...]] = (0, 1, 2, 3, 4), + folder_with_segs_from_prev_stage: str = None, + input_folder: str = 'INPUT_FOLDER', + output_folder: str = 'OUTPUT_FOLDER', + save_npz: bool = False): + fold_str = '' + for f in folds: + fold_str += f' {f}' + + predict_command = '' + trained_model_folder = get_output_folder(dataset_name_or_id, trainer_name, plans_identifier, configuration_name, fold=None) + plans_manager = PlansManager(join(trained_model_folder, 'plans.json')) + configuration_manager = plans_manager.get_configuration(configuration_name) + if 'previous_stage' in plans_manager.available_configurations: + prev_stage = configuration_manager.previous_stage_name + predict_command += generate_inference_command(dataset_name_or_id, prev_stage, plans_identifier, trainer_name, + folds, None, output_folder='OUTPUT_FOLDER_PREV_STAGE') + '\n' + folder_with_segs_from_prev_stage = 'OUTPUT_FOLDER_PREV_STAGE' + + predict_command = f'nnUNetv2_predict -d {dataset_name_or_id} -i {input_folder} -o {output_folder} -f {fold_str} ' \ + f'-tr {trainer_name} -c {configuration_name} -p {plans_identifier}' + if folder_with_segs_from_prev_stage is not None: + predict_command += f' -prev_stage_predictions {folder_with_segs_from_prev_stage}' + if save_npz: + predict_command += ' --save_probabilities' + return predict_command + + +def find_best_configuration(dataset_name_or_id, + allowed_trained_models: Union[List[dict], Tuple[dict, ...]] = default_trained_models, + allow_ensembling: bool = True, + num_processes: int = default_num_processes, + overwrite: bool = True, + folds: Union[List[int], Tuple[int, ...]] = (0, 1, 2, 3, 4), + strict: bool = False): + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + all_results = {} + + allowed_trained_models = filter_available_models(deepcopy(allowed_trained_models), dataset_name_or_id) + + for m in allowed_trained_models: + output_folder = get_output_folder(dataset_name_or_id, m['trainer'], m['plans'], m['configuration'], fold=None) + if not isdir(output_folder) and strict: + raise RuntimeError(f'{dataset_name}: The output folder of plans {m["plans"]} configuration ' + f'{m["configuration"]} is missing. Please train the model (all requested folds!) first!') + identifier = os.path.basename(output_folder) + merged_output_folder = join(output_folder, f'crossval_results_folds_{folds_tuple_to_string(folds)}') + accumulate_cv_results(output_folder, merged_output_folder, folds, num_processes, overwrite) + all_results[identifier] = { + 'source': merged_output_folder, + 'result': load_summary_json(join(merged_output_folder, 'summary.json'))['foreground_mean']['Dice'] + } + + if allow_ensembling: + for i in range(len(allowed_trained_models)): + for j in range(i + 1, len(allowed_trained_models)): + m1, m2 = allowed_trained_models[i], allowed_trained_models[j] + + output_folder_1 = get_output_folder(dataset_name_or_id, m1['trainer'], m1['plans'], m1['configuration'], fold=None) + output_folder_2 = get_output_folder(dataset_name_or_id, m2['trainer'], m2['plans'], m2['configuration'], fold=None) + identifier = get_ensemble_name(output_folder_1, output_folder_2, folds) + + output_folder_ensemble = join(nnUNet_results, dataset_name, 'ensembles', identifier) + + ensemble_crossvalidations([output_folder_1, output_folder_2], output_folder_ensemble, folds, + num_processes, overwrite=overwrite) + + # evaluate ensembled predictions + plans_manager = PlansManager(join(output_folder_1, 'plans.json')) + dataset_json = load_json(join(output_folder_1, 'dataset.json')) + label_manager = plans_manager.get_label_manager(dataset_json) + rw = plans_manager.image_reader_writer_class() + + compute_metrics_on_folder(join(nnUNet_preprocessed, dataset_name, 'gt_segmentations'), + output_folder_ensemble, + join(output_folder_ensemble, 'summary.json'), + rw, + dataset_json['file_ending'], + label_manager.foreground_regions if label_manager.has_regions else + label_manager.foreground_labels, + label_manager.ignore_label, + num_processes) + all_results[identifier] = \ + { + 'source': output_folder_ensemble, + 'result': load_summary_json(join(output_folder_ensemble, 'summary.json'))['foreground_mean']['Dice'] + } + + # pick best and report inference command + best_score = max([i['result'] for i in all_results.values()]) + best_keys = [k for k in all_results.keys() if all_results[k]['result'] == best_score] # may never happen but theoretically + # there can be a tie. Let's pick the first model in this case because it's going to be the simpler one (ensembles + # come after single configs) + best_key = best_keys[0] + + print() + print('***All results:***') + for k, v in all_results.items(): + print(f'{k}: {v["result"]}') + print(f'\n*Best*: {best_key}: {all_results[best_key]["result"]}') + print() + + print('***Determining postprocessing for best model/ensemble***') + determine_postprocessing(all_results[best_key]['source'], join(nnUNet_preprocessed, dataset_name, 'gt_segmentations'), + plans_file_or_dict=join(all_results[best_key]['source'], 'plans.json'), + dataset_json_file_or_dict=join(all_results[best_key]['source'], 'dataset.json'), + num_processes=num_processes, keep_postprocessed_files=True) + + # in addition to just reading the console output (how it was previously) we should return the information + # needed to run the full inference via API + return_dict = { + 'folds': folds, + 'dataset_name_or_id': dataset_name_or_id, + 'considered_models': allowed_trained_models, + 'ensembling_allowed': allow_ensembling, + 'all_results': {i: j['result'] for i, j in all_results.items()}, + 'best_model_or_ensemble': { + 'result_on_crossval_pre_pp': all_results[best_key]["result"], + 'result_on_crossval_post_pp': load_json(join(all_results[best_key]['source'], 'postprocessed', 'summary.json'))['foreground_mean']['Dice'], + 'postprocessing_file': join(all_results[best_key]['source'], 'postprocessing.pkl'), + 'some_plans_file': join(all_results[best_key]['source'], 'plans.json'), + # just needed for label handling, can + # come from any of the ensemble members (if any) + 'selected_model_or_models': [] + } + } + # convert best key to inference command: + if best_key.startswith('ensemble___'): + prefix, m1, m2, folds_string = best_key.split('___') + tr1, pl1, c1 = convert_identifier_to_trainer_plans_config(m1) + tr2, pl2, c2 = convert_identifier_to_trainer_plans_config(m2) + return_dict['best_model_or_ensemble']['selected_model_or_models'].append( + { + 'configuration': c1, + 'trainer': tr1, + 'plans_identifier': pl1, + }) + return_dict['best_model_or_ensemble']['selected_model_or_models'].append( + { + 'configuration': c2, + 'trainer': tr2, + 'plans_identifier': pl2, + }) + else: + tr, pl, c = convert_identifier_to_trainer_plans_config(best_key) + return_dict['best_model_or_ensemble']['selected_model_or_models'].append( + { + 'configuration': c, + 'trainer': tr, + 'plans_identifier': pl, + }) + + save_json(return_dict, join(nnUNet_results, dataset_name, 'inference_information.json')) # save this so that we don't have to run this + # everything someone wants to be reminded of the inference commands. They can just load this and give it to + # print_inference_instructions + + # print it + print_inference_instructions(return_dict, instructions_file=join(nnUNet_results, dataset_name, 'inference_instructions.txt')) + return return_dict + + +def print_inference_instructions(inference_info_dict: dict, instructions_file: str = None): + def _print_and_maybe_write_to_file(string): + print(string) + if f_handle is not None: + f_handle.write(f'{string}\n') + + f_handle = open(instructions_file, 'w') if instructions_file is not None else None + print() + _print_and_maybe_write_to_file('***Run inference like this:***\n') + output_folders = [] + + dataset_name_or_id = inference_info_dict['dataset_name_or_id'] + if len(inference_info_dict['best_model_or_ensemble']['selected_model_or_models']) > 1: + is_ensemble = True + _print_and_maybe_write_to_file('An ensemble won! What a surprise! Run the following commands to run predictions with the ensemble members:\n') + else: + is_ensemble = False + + for j, i in enumerate(inference_info_dict['best_model_or_ensemble']['selected_model_or_models']): + tr, c, pl = i['trainer'], i['configuration'], i['plans_identifier'] + if is_ensemble: + output_folder_name = f"OUTPUT_FOLDER_MODEL_{j+1}" + else: + output_folder_name = f"OUTPUT_FOLDER" + output_folders.append(output_folder_name) + + _print_and_maybe_write_to_file(generate_inference_command(dataset_name_or_id, c, pl, tr, inference_info_dict['folds'], + save_npz=is_ensemble, output_folder=output_folder_name)) + + if is_ensemble: + output_folder_str = output_folders[0] + for o in output_folders[1:]: + output_folder_str += f' {o}' + output_ensemble = f"OUTPUT_FOLDER" + _print_and_maybe_write_to_file('\nThe run ensembling with:\n') + _print_and_maybe_write_to_file(f"nnUNetv2_ensemble -i {output_folder_str} -o {output_ensemble} -np {default_num_processes}") + + _print_and_maybe_write_to_file("\n***Once inference is completed, run postprocessing like this:***\n") + _print_and_maybe_write_to_file(f"nnUNetv2_apply_postprocessing -i OUTPUT_FOLDER -o OUTPUT_FOLDER_PP " + f"-pp_pkl_file {inference_info_dict['best_model_or_ensemble']['postprocessing_file']} -np {default_num_processes} " + f"-plans_json {inference_info_dict['best_model_or_ensemble']['some_plans_file']}") + + +def dumb_trainer_config_plans_to_trained_models_dict(trainers: List[str], configs: List[str], plans: List[str]): + """ + function is called dumb because it's dumb + """ + ret = [] + for t in trainers: + for c in configs: + for p in plans: + ret.append( + {'plans': p, 'configuration': c, 'trainer': t} + ) + return tuple(ret) + + +def find_best_configuration_entry_point(): + parser = argparse.ArgumentParser() + parser.add_argument('dataset_name_or_id', type=str, help='Dataset Name or id') + parser.add_argument('-p', nargs='+', required=False, default=['nnUNetPlans'], + help='List of plan identifiers. Default: nnUNetPlans') + parser.add_argument('-c', nargs='+', required=False, default=['2d', '3d_fullres', '3d_lowres', '3d_cascade_fullres'], + help="List of configurations. Default: ['2d', '3d_fullres', '3d_lowres', '3d_cascade_fullres']") + parser.add_argument('-tr', nargs='+', required=False, default=['nnUNetTrainer'], + help='List of trainers. Default: nnUNetTrainer') + parser.add_argument('-np', required=False, default=default_num_processes, type=int, + help='Number of processes to use for ensembling, postprocessing etc') + parser.add_argument('-f', nargs='+', type=int, default=(0, 1, 2, 3, 4), + help='Folds to use. Default: 0 1 2 3 4') + parser.add_argument('--disable_ensembling', action='store_true', required=False, + help='Set this flag to disable ensembling') + parser.add_argument('--no_overwrite', action='store_true', + help='If set we will not overwrite already ensembled files etc. May speed up concecutive ' + 'runs of this command (why would you want to do that?) at the risk of not updating ' + 'outdated results.') + args = parser.parse_args() + + model_dict = dumb_trainer_config_plans_to_trained_models_dict(args.tr, args.c, args.p) + dataset_name = maybe_convert_to_dataset_name(args.dataset_name_or_id) + + find_best_configuration(dataset_name, model_dict, allow_ensembling=not args.disable_ensembling, + num_processes=args.np, overwrite=not args.no_overwrite, folds=args.f, + strict=False) + + +def accumulate_crossval_results_entry_point(): + parser = argparse.ArgumentParser('Copies all predicted segmentations from the individual folds into one joint ' + 'folder and evaluates them') + parser.add_argument('dataset_name_or_id', type=str, help='Dataset Name or id') + parser.add_argument('-c', type=str, required=True, + default='3d_fullres', + help="Configuration") + parser.add_argument('-o', type=str, required=False, default=None, + help="Output folder. If not specified, the output folder will be located in the trained " \ + "model directory (named crossval_results_folds_XXX).") + parser.add_argument('-f', nargs='+', type=int, default=(0, 1, 2, 3, 4), + help='Folds to use. Default: 0 1 2 3 4') + parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', + help='Plan identifier in which to search for the specified configuration. Default: nnUNetPlans') + parser.add_argument('-tr', type=str, required=False, default='nnUNetTrainer', + help='Trainer class. Default: nnUNetTrainer') + args = parser.parse_args() + trained_model_folder = get_output_folder(args.dataset_name_or_id, args.tr, args.p, args.c) + + if args.o is None: + merged_output_folder = join(trained_model_folder, f'crossval_results_folds_{folds_tuple_to_string(args.f)}') + else: + merged_output_folder = args.o + + accumulate_cv_results(trained_model_folder, merged_output_folder, args.f) + + +if __name__ == '__main__': + find_best_configuration(4, + default_trained_models, + True, + 8, + False, + (0, 1, 2, 3, 4)) diff --git a/docker/template/src/nnunetv2/experiment_planning/__init__.py b/docker/template/src/nnunetv2/experiment_planning/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py b/docker/template/src/nnunetv2/experiment_planning/dataset_fingerprint/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/experiment_planning/dataset_fingerprint/fingerprint_extractor.py b/docker/template/src/nnunetv2/experiment_planning/dataset_fingerprint/fingerprint_extractor.py new file mode 100644 index 0000000..a4bec96 --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/dataset_fingerprint/fingerprint_extractor.py @@ -0,0 +1,199 @@ +import multiprocessing +import os +from time import sleep +from typing import List, Type, Union + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import load_json, join, save_json, isfile, maybe_mkdir_p +from tqdm import tqdm + +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +from nnunetv2.preprocessing.cropping.cropping import crop_to_nonzero +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets + + +class DatasetFingerprintExtractor(object): + def __init__(self, dataset_name_or_id: Union[str, int], num_processes: int = 8, verbose: bool = False): + """ + extracts the dataset fingerprint used for experiment planning. The dataset fingerprint will be saved as a + json file in the input_folder + + Philosophy here is to do only what we really need. Don't store stuff that we can easily read from somewhere + else. Don't compute stuff we don't need (except for intensity_statistics_per_channel) + """ + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + self.verbose = verbose + + self.dataset_name = dataset_name + self.input_folder = join(nnUNet_raw, dataset_name) + self.num_processes = num_processes + self.dataset_json = load_json(join(self.input_folder, 'dataset.json')) + self.dataset = get_filenames_of_train_images_and_targets(self.input_folder, self.dataset_json) + + # We don't want to use all foreground voxels because that can accumulate a lot of data (out of memory). It is + # also not critically important to get all pixels as long as there are enough. Let's use 10e7 voxels in total + # (for the entire dataset) + self.num_foreground_voxels_for_intensitystats = 10e7 + + @staticmethod + def collect_foreground_intensities(segmentation: np.ndarray, images: np.ndarray, seed: int = 1234, + num_samples: int = 10000): + """ + images=image with multiple channels = shape (c, x, y(, z)) + """ + assert images.ndim == 4 + assert segmentation.ndim == 4 + + assert not np.any(np.isnan(segmentation)), "Segmentation contains NaN values. grrrr.... :-(" + assert not np.any(np.isnan(images)), "Images contains NaN values. grrrr.... :-(" + + rs = np.random.RandomState(seed) + + intensities_per_channel = [] + # we don't use the intensity_statistics_per_channel at all, it's just something that might be nice to have + intensity_statistics_per_channel = [] + + # segmentation is 4d: 1,x,y,z. We need to remove the empty dimension for the following code to work + foreground_mask = segmentation[0] > 0 + + for i in range(len(images)): + foreground_pixels = images[i][foreground_mask] + num_fg = len(foreground_pixels) + # sample with replacement so that we don't get issues with cases that have less than num_samples + # foreground_pixels. We could also just sample less in those cases but that would than cause these + # training cases to be underrepresented + intensities_per_channel.append( + rs.choice(foreground_pixels, num_samples, replace=True) if num_fg > 0 else []) + intensity_statistics_per_channel.append({ + 'mean': np.mean(foreground_pixels) if num_fg > 0 else np.nan, + 'median': np.median(foreground_pixels) if num_fg > 0 else np.nan, + 'min': np.min(foreground_pixels) if num_fg > 0 else np.nan, + 'max': np.max(foreground_pixels) if num_fg > 0 else np.nan, + 'percentile_99_5': np.percentile(foreground_pixels, 99.5) if num_fg > 0 else np.nan, + 'percentile_00_5': np.percentile(foreground_pixels, 0.5) if num_fg > 0 else np.nan, + + }) + + return intensities_per_channel, intensity_statistics_per_channel + + @staticmethod + def analyze_case(image_files: List[str], segmentation_file: str, reader_writer_class: Type[BaseReaderWriter], + num_samples: int = 10000): + rw = reader_writer_class() + images, properties_images = rw.read_images(image_files) + segmentation, properties_seg = rw.read_seg(segmentation_file) + + # we no longer crop and save the cropped images before this is run. Instead we run the cropping on the fly. + # Downside is that we need to do this twice (once here and once during preprocessing). Upside is that we don't + # need to save the cropped data anymore. Given that cropping is not too expensive it makes sense to do it this + # way. This is only possible because we are now using our new input/output interface. + data_cropped, seg_cropped, bbox = crop_to_nonzero(images, segmentation) + + foreground_intensities_per_channel, foreground_intensity_stats_per_channel = \ + DatasetFingerprintExtractor.collect_foreground_intensities(seg_cropped, data_cropped, + num_samples=num_samples) + + spacing = properties_images['spacing'] + + shape_before_crop = images.shape[1:] + shape_after_crop = data_cropped.shape[1:] + relative_size_after_cropping = np.prod(shape_after_crop) / np.prod(shape_before_crop) + return shape_after_crop, spacing, foreground_intensities_per_channel, foreground_intensity_stats_per_channel, \ + relative_size_after_cropping + + def run(self, overwrite_existing: bool = False) -> dict: + # we do not save the properties file in self.input_folder because that folder might be read-only. We can only + # reliably write in nnUNet_preprocessed and nnUNet_results, so nnUNet_preprocessed it is + preprocessed_output_folder = join(nnUNet_preprocessed, self.dataset_name) + maybe_mkdir_p(preprocessed_output_folder) + properties_file = join(preprocessed_output_folder, 'dataset_fingerprint.json') + + if not isfile(properties_file) or overwrite_existing: + reader_writer_class = determine_reader_writer_from_dataset_json(self.dataset_json, + # yikes. Rip the following line + self.dataset[self.dataset.keys().__iter__().__next__()]['images'][0]) + + # determine how many foreground voxels we need to sample per training case + num_foreground_samples_per_case = int(self.num_foreground_voxels_for_intensitystats // + len(self.dataset)) + + r = [] + with multiprocessing.get_context("spawn").Pool(self.num_processes) as p: + for k in self.dataset.keys(): + r.append(p.starmap_async(DatasetFingerprintExtractor.analyze_case, + ((self.dataset[k]['images'], self.dataset[k]['label'], reader_writer_class, + num_foreground_samples_per_case),))) + remaining = list(range(len(self.dataset))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(self.dataset), disable=self.verbose) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) + + # results = ptqdm(DatasetFingerprintExtractor.analyze_case, + # (training_images_per_case, training_labels_per_case), + # processes=self.num_processes, zipped=True, reader_writer_class=reader_writer_class, + # num_samples=num_foreground_samples_per_case, disable=self.verbose) + results = [i.get()[0] for i in r] + + shapes_after_crop = [r[0] for r in results] + spacings = [r[1] for r in results] + foreground_intensities_per_channel = [np.concatenate([r[2][i] for r in results]) for i in + range(len(results[0][2]))] + # we drop this so that the json file is somewhat human readable + # foreground_intensity_stats_by_case_and_modality = [r[3] for r in results] + median_relative_size_after_cropping = np.median([r[4] for r in results], 0) + + num_channels = len(self.dataset_json['channel_names'].keys() + if 'channel_names' in self.dataset_json.keys() + else self.dataset_json['modality'].keys()) + intensity_statistics_per_channel = {} + for i in range(num_channels): + intensity_statistics_per_channel[i] = { + 'mean': float(np.mean(foreground_intensities_per_channel[i])), + 'median': float(np.median(foreground_intensities_per_channel[i])), + 'std': float(np.std(foreground_intensities_per_channel[i])), + 'min': float(np.min(foreground_intensities_per_channel[i])), + 'max': float(np.max(foreground_intensities_per_channel[i])), + 'percentile_99_5': float(np.percentile(foreground_intensities_per_channel[i], 99.5)), + 'percentile_00_5': float(np.percentile(foreground_intensities_per_channel[i], 0.5)), + } + + fingerprint = { + "spacings": spacings, + "shapes_after_crop": shapes_after_crop, + 'foreground_intensity_properties_per_channel': intensity_statistics_per_channel, + "median_relative_size_after_cropping": median_relative_size_after_cropping + } + + try: + save_json(fingerprint, properties_file) + except Exception as e: + if isfile(properties_file): + os.remove(properties_file) + raise e + else: + fingerprint = load_json(properties_file) + return fingerprint + + +if __name__ == '__main__': + dfe = DatasetFingerprintExtractor(2, 8) + dfe.run(overwrite_existing=False) diff --git a/docker/template/src/nnunetv2/experiment_planning/experiment_planners/__init__.py b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/experiment_planning/experiment_planners/default_experiment_planner.py b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/default_experiment_planner.py new file mode 100644 index 0000000..1055170 --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/default_experiment_planner.py @@ -0,0 +1,542 @@ +import shutil +from copy import deepcopy +from functools import lru_cache +from typing import List, Union, Tuple, Type + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import load_json, join, save_json, isfile, maybe_mkdir_p +from dynamic_network_architectures.architectures.unet import PlainConvUNet, ResidualEncoderUNet +from dynamic_network_architectures.building_blocks.helper import convert_dim_to_conv_op, get_matching_instancenorm + +from nnunetv2.configuration import ANISO_THRESHOLD +from nnunetv2.experiment_planning.experiment_planners.network_topology import get_pool_and_conv_props +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +from nnunetv2.preprocessing.normalization.map_channel_name_to_normalization import get_normalization_scheme +from nnunetv2.preprocessing.resampling.default_resampling import resample_data_or_seg_to_shape, compute_new_shape +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.json_export import recursive_fix_for_json_export +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets + + +class ExperimentPlanner(object): + def __init__(self, dataset_name_or_id: Union[str, int], + gpu_memory_target_in_gb: float = 8, + preprocessor_name: str = 'DefaultPreprocessor', plans_name: str = 'nnUNetPlans', + overwrite_target_spacing: Union[List[float], Tuple[float, ...]] = None, + suppress_transpose: bool = False): + """ + overwrite_target_spacing only affects 3d_fullres! (but by extension 3d_lowres which starts with fullres may + also be affected + """ + + self.dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + self.suppress_transpose = suppress_transpose + self.raw_dataset_folder = join(nnUNet_raw, self.dataset_name) + preprocessed_folder = join(nnUNet_preprocessed, self.dataset_name) + self.dataset_json = load_json(join(self.raw_dataset_folder, 'dataset.json')) + self.dataset = get_filenames_of_train_images_and_targets(self.raw_dataset_folder, self.dataset_json) + + # load dataset fingerprint + if not isfile(join(preprocessed_folder, 'dataset_fingerprint.json')): + raise RuntimeError('Fingerprint missing for this dataset. Please run nnUNet_extract_dataset_fingerprint') + + self.dataset_fingerprint = load_json(join(preprocessed_folder, 'dataset_fingerprint.json')) + + self.anisotropy_threshold = ANISO_THRESHOLD + + self.UNet_base_num_features = 32 + self.UNet_class = PlainConvUNet + # the following two numbers are really arbitrary and were set to reproduce nnU-Net v1's configurations as + # much as possible + self.UNet_reference_val_3d = 560000000 # 455600128 550000000 + self.UNet_reference_val_2d = 85000000 # 83252480 + self.UNet_reference_com_nfeatures = 32 + self.UNet_reference_val_corresp_GB = 8 + self.UNet_reference_val_corresp_bs_2d = 12 + self.UNet_reference_val_corresp_bs_3d = 2 + self.UNet_vram_target_GB = gpu_memory_target_in_gb + self.UNet_featuremap_min_edge_length = 4 + self.UNet_blocks_per_stage_encoder = (2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) + self.UNet_blocks_per_stage_decoder = (2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2) + self.UNet_min_batch_size = 2 + self.UNet_max_features_2d = 512 + self.UNet_max_features_3d = 320 + + self.lowres_creation_threshold = 0.25 # if the patch size of fullres is less than 25% of the voxels in the + # median shape then we need a lowres config as well + + self.preprocessor_name = preprocessor_name + self.plans_identifier = plans_name + self.overwrite_target_spacing = overwrite_target_spacing + assert overwrite_target_spacing is None or len(overwrite_target_spacing), 'if overwrite_target_spacing is ' \ + 'used then three floats must be ' \ + 'given (as list or tuple)' + assert overwrite_target_spacing is None or all([isinstance(i, float) for i in overwrite_target_spacing]), \ + 'if overwrite_target_spacing is used then three floats must be given (as list or tuple)' + + self.plans = None + + def determine_reader_writer(self): + example_image = self.dataset[self.dataset.keys().__iter__().__next__()]['images'][0] + return determine_reader_writer_from_dataset_json(self.dataset_json, example_image) + + @staticmethod + @lru_cache(maxsize=None) + def static_estimate_VRAM_usage(patch_size: Tuple[int], + n_stages: int, + strides: Union[int, List[int], Tuple[int, ...]], + UNet_class: Union[Type[PlainConvUNet], Type[ResidualEncoderUNet]], + num_input_channels: int, + features_per_stage: Tuple[int], + blocks_per_stage_encoder: Union[int, Tuple[int]], + blocks_per_stage_decoder: Union[int, Tuple[int]], + num_labels: int): + """ + Works for PlainConvUNet, ResidualEncoderUNet + """ + dim = len(patch_size) + conv_op = convert_dim_to_conv_op(dim) + norm_op = get_matching_instancenorm(conv_op) + net = UNet_class(num_input_channels, n_stages, + features_per_stage, + conv_op, + 3, + strides, + blocks_per_stage_encoder, + num_labels, + blocks_per_stage_decoder, + norm_op=norm_op) + return net.compute_conv_feature_map_size(patch_size) + + def determine_resampling(self, *args, **kwargs): + """ + returns what functions to use for resampling data and seg, respectively. Also returns kwargs + resampling function must be callable(data, current_spacing, new_spacing, **kwargs) + + determine_resampling is called within get_plans_for_configuration to allow for different functions for each + configuration + """ + resampling_data = resample_data_or_seg_to_shape + resampling_data_kwargs = { + "is_seg": False, + "order": 3, + "order_z": 0, + "force_separate_z": None, + } + resampling_seg = resample_data_or_seg_to_shape + resampling_seg_kwargs = { + "is_seg": True, + "order": 1, + "order_z": 0, + "force_separate_z": None, + } + return resampling_data, resampling_data_kwargs, resampling_seg, resampling_seg_kwargs + + def determine_segmentation_softmax_export_fn(self, *args, **kwargs): + """ + function must be callable(data, new_shape, current_spacing, new_spacing, **kwargs). The new_shape should be + used as target. current_spacing and new_spacing are merely there in case we want to use it somehow + + determine_segmentation_softmax_export_fn is called within get_plans_for_configuration to allow for different + functions for each configuration + + """ + resampling_fn = resample_data_or_seg_to_shape + resampling_fn_kwargs = { + "is_seg": False, + "order": 1, + "order_z": 0, + "force_separate_z": None, + } + return resampling_fn, resampling_fn_kwargs + + def determine_fullres_target_spacing(self) -> np.ndarray: + """ + per default we use the 50th percentile=median for the target spacing. Higher spacing results in smaller data + and thus faster and easier training. Smaller spacing results in larger data and thus longer and harder training + + For some datasets the median is not a good choice. Those are the datasets where the spacing is very anisotropic + (for example ACDC with (10, 1.5, 1.5)). These datasets still have examples with a spacing of 5 or 6 mm in the low + resolution axis. Choosing the median here will result in bad interpolation artifacts that can substantially + impact performance (due to the low number of slices). + """ + if self.overwrite_target_spacing is not None: + return np.array(self.overwrite_target_spacing) + + spacings = self.dataset_fingerprint['spacings'] + sizes = self.dataset_fingerprint['shapes_after_crop'] + + target = np.percentile(np.vstack(spacings), 50, 0) + + # todo sizes_after_resampling = [compute_new_shape(j, i, target) for i, j in zip(spacings, sizes)] + + target_size = np.percentile(np.vstack(sizes), 50, 0) + # we need to identify datasets for which a different target spacing could be beneficial. These datasets have + # the following properties: + # - one axis which much lower resolution than the others + # - the lowres axis has much less voxels than the others + # - (the size in mm of the lowres axis is also reduced) + worst_spacing_axis = np.argmax(target) + other_axes = [i for i in range(len(target)) if i != worst_spacing_axis] + other_spacings = [target[i] for i in other_axes] + other_sizes = [target_size[i] for i in other_axes] + + has_aniso_spacing = target[worst_spacing_axis] > (self.anisotropy_threshold * max(other_spacings)) + has_aniso_voxels = target_size[worst_spacing_axis] * self.anisotropy_threshold < min(other_sizes) + + if has_aniso_spacing and has_aniso_voxels: + spacings_of_that_axis = np.vstack(spacings)[:, worst_spacing_axis] + target_spacing_of_that_axis = np.percentile(spacings_of_that_axis, 10) + # don't let the spacing of that axis get higher than the other axes + if target_spacing_of_that_axis < max(other_spacings): + target_spacing_of_that_axis = max(max(other_spacings), target_spacing_of_that_axis) + 1e-5 + target[worst_spacing_axis] = target_spacing_of_that_axis + return target + + def determine_normalization_scheme_and_whether_mask_is_used_for_norm(self) -> Tuple[List[str], List[bool]]: + if 'channel_names' not in self.dataset_json.keys(): + print('WARNING: "modalities" should be renamed to "channel_names" in dataset.json. This will be ' + 'enforced soon!') + modalities = self.dataset_json['channel_names'] if 'channel_names' in self.dataset_json.keys() else \ + self.dataset_json['modality'] + normalization_schemes = [get_normalization_scheme(m) for m in modalities.values()] + if self.dataset_fingerprint['median_relative_size_after_cropping'] < (3 / 4.): + use_nonzero_mask_for_norm = [i.leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true for i in + normalization_schemes] + else: + use_nonzero_mask_for_norm = [False] * len(normalization_schemes) + assert all([i in (True, False) for i in use_nonzero_mask_for_norm]), 'use_nonzero_mask_for_norm must be ' \ + 'True or False and cannot be None' + normalization_schemes = [i.__name__ for i in normalization_schemes] + return normalization_schemes, use_nonzero_mask_for_norm + + def determine_transpose(self): + if self.suppress_transpose: + return [0, 1, 2], [0, 1, 2] + + # todo we should use shapes for that as well. Not quite sure how yet + target_spacing = self.determine_fullres_target_spacing() + + max_spacing_axis = np.argmax(target_spacing) + remaining_axes = [i for i in list(range(3)) if i != max_spacing_axis] + transpose_forward = [max_spacing_axis] + remaining_axes + transpose_backward = [np.argwhere(np.array(transpose_forward) == i)[0][0] for i in range(3)] + return transpose_forward, transpose_backward + + def get_plans_for_configuration(self, + spacing: Union[np.ndarray, Tuple[float, ...], List[float]], + median_shape: Union[np.ndarray, Tuple[int, ...], List[int]], + data_identifier: str, + approximate_n_voxels_dataset: float) -> dict: + assert all([i > 0 for i in spacing]), f"Spacing must be > 0! Spacing: {spacing}" + # print(spacing, median_shape, approximate_n_voxels_dataset) + # find an initial patch size + # we first use the spacing to get an aspect ratio + tmp = 1 / np.array(spacing) + + # we then upscale it so that it initially is certainly larger than what we need (rescale to have the same + # volume as a patch of size 256 ** 3) + # this may need to be adapted when using absurdly large GPU memory targets. Increasing this now would not be + # ideal because large initial patch sizes increase computation time because more iterations in the while loop + # further down may be required. + if len(spacing) == 3: + initial_patch_size = [round(i) for i in tmp * (256 ** 3 / np.prod(tmp)) ** (1 / 3)] + elif len(spacing) == 2: + initial_patch_size = [round(i) for i in tmp * (2048 ** 2 / np.prod(tmp)) ** (1 / 2)] + else: + raise RuntimeError() + + # clip initial patch size to median_shape. It makes little sense to have it be larger than that. Note that + # this is different from how nnU-Net v1 does it! + # todo patch size can still get too large because we pad the patch size to a multiple of 2**n + initial_patch_size = np.array([min(i, j) for i, j in zip(initial_patch_size, median_shape[:len(spacing)])]) + + # use that to get the network topology. Note that this changes the patch_size depending on the number of + # pooling operations (must be divisible by 2**num_pool in each axis) + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(spacing, initial_patch_size, + self.UNet_featuremap_min_edge_length, + 999999) + + # now estimate vram consumption + num_stages = len(pool_op_kernel_sizes) + estimate = self.static_estimate_VRAM_usage(tuple(patch_size), + num_stages, + tuple([tuple(i) for i in pool_op_kernel_sizes]), + self.UNet_class, + len(self.dataset_json['channel_names'].keys() + if 'channel_names' in self.dataset_json.keys() + else self.dataset_json['modality'].keys()), + tuple([min(self.UNet_max_features_2d if len(patch_size) == 2 else + self.UNet_max_features_3d, + self.UNet_reference_com_nfeatures * 2 ** i) for + i in range(len(pool_op_kernel_sizes))]), + self.UNet_blocks_per_stage_encoder[:num_stages], + self.UNet_blocks_per_stage_decoder[:num_stages - 1], + len(self.dataset_json['labels'].keys())) + + # how large is the reference for us here (batch size etc)? + # adapt for our vram target + reference = (self.UNet_reference_val_2d if len(spacing) == 2 else self.UNet_reference_val_3d) * \ + (self.UNet_vram_target_GB / self.UNet_reference_val_corresp_GB) + + while estimate > reference: + # print(patch_size) + # patch size seems to be too large, so we need to reduce it. Reduce the axis that currently violates the + # aspect ratio the most (that is the largest relative to median shape) + axis_to_be_reduced = np.argsort(patch_size / median_shape[:len(spacing)])[-1] + + # we cannot simply reduce that axis by shape_must_be_divisible_by[axis_to_be_reduced] because this + # may cause us to skip some valid sizes, for example shape_must_be_divisible_by is 64 for a shape of 256. + # If we subtracted that we would end up with 192, skipping 224 which is also a valid patch size + # (224 / 2**5 = 7; 7 < 2 * self.UNet_featuremap_min_edge_length(4) so it's valid). So we need to first + # subtract shape_must_be_divisible_by, then recompute it and then subtract the + # recomputed shape_must_be_divisible_by. Annoying. + tmp = deepcopy(patch_size) + tmp[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced] + _, _, _, _, shape_must_be_divisible_by = \ + get_pool_and_conv_props(spacing, tmp, + self.UNet_featuremap_min_edge_length, + 999999) + patch_size[axis_to_be_reduced] -= shape_must_be_divisible_by[axis_to_be_reduced] + + # now recompute topology + network_num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, \ + shape_must_be_divisible_by = get_pool_and_conv_props(spacing, patch_size, + self.UNet_featuremap_min_edge_length, + 999999) + + num_stages = len(pool_op_kernel_sizes) + estimate = self.static_estimate_VRAM_usage(tuple(patch_size), + num_stages, + tuple([tuple(i) for i in pool_op_kernel_sizes]), + self.UNet_class, + len(self.dataset_json['channel_names'].keys() + if 'channel_names' in self.dataset_json.keys() + else self.dataset_json['modality'].keys()), + tuple([min(self.UNet_max_features_2d if len(patch_size) == 2 else + self.UNet_max_features_3d, + self.UNet_reference_com_nfeatures * 2 ** i) for + i in range(len(pool_op_kernel_sizes))]), + self.UNet_blocks_per_stage_encoder[:num_stages], + self.UNet_blocks_per_stage_decoder[:num_stages - 1], + len(self.dataset_json['labels'].keys())) + + # alright now let's determine the batch size. This will give self.UNet_min_batch_size if the while loop was + # executed. If not, additional vram headroom is used to increase batch size + ref_bs = self.UNet_reference_val_corresp_bs_2d if len(spacing) == 2 else self.UNet_reference_val_corresp_bs_3d + batch_size = round((reference / estimate) * ref_bs) + + # we need to cap the batch size to cover at most 5% of the entire dataset. Overfitting precaution. We cannot + # go smaller than self.UNet_min_batch_size though + bs_corresponding_to_5_percent = round( + approximate_n_voxels_dataset * 0.05 / np.prod(patch_size, dtype=np.float64)) + batch_size = max(min(batch_size, bs_corresponding_to_5_percent), self.UNet_min_batch_size) + + resampling_data, resampling_data_kwargs, resampling_seg, resampling_seg_kwargs = self.determine_resampling() + resampling_softmax, resampling_softmax_kwargs = self.determine_segmentation_softmax_export_fn() + + normalization_schemes, mask_is_used_for_norm = \ + self.determine_normalization_scheme_and_whether_mask_is_used_for_norm() + num_stages = len(pool_op_kernel_sizes) + plan = { + 'data_identifier': data_identifier, + 'preprocessor_name': self.preprocessor_name, + 'batch_size': batch_size, + 'patch_size': patch_size, + 'median_image_size_in_voxels': median_shape, + 'spacing': spacing, + 'normalization_schemes': normalization_schemes, + 'use_mask_for_norm': mask_is_used_for_norm, + 'UNet_class_name': self.UNet_class.__name__, + 'UNet_base_num_features': self.UNet_base_num_features, + 'n_conv_per_stage_encoder': self.UNet_blocks_per_stage_encoder[:num_stages], + 'n_conv_per_stage_decoder': self.UNet_blocks_per_stage_decoder[:num_stages - 1], + 'num_pool_per_axis': network_num_pool_per_axis, + 'pool_op_kernel_sizes': pool_op_kernel_sizes, + 'conv_kernel_sizes': conv_kernel_sizes, + 'unet_max_num_features': self.UNet_max_features_3d if len(spacing) == 3 else self.UNet_max_features_2d, + 'resampling_fn_data': resampling_data.__name__, + 'resampling_fn_seg': resampling_seg.__name__, + 'resampling_fn_data_kwargs': resampling_data_kwargs, + 'resampling_fn_seg_kwargs': resampling_seg_kwargs, + 'resampling_fn_probabilities': resampling_softmax.__name__, + 'resampling_fn_probabilities_kwargs': resampling_softmax_kwargs, + } + return plan + + def plan_experiment(self): + """ + MOVE EVERYTHING INTO THE PLANS. MAXIMUM FLEXIBILITY + + Ideally I would like to move transpose_forward/backward into the configurations so that this can also be done + differently for each configuration but this would cause problems with identifying the correct axes for 2d. There + surely is a way around that but eh. I'm feeling lazy and featuritis must also not be pushed to the extremes. + + So for now if you want a different transpose_forward/backward you need to create a new planner. Also not too + hard. + """ + + # first get transpose + transpose_forward, transpose_backward = self.determine_transpose() + + # get fullres spacing and transpose it + fullres_spacing = self.determine_fullres_target_spacing() + fullres_spacing_transposed = fullres_spacing[transpose_forward] + + # get transposed new median shape (what we would have after resampling) + new_shapes = [compute_new_shape(j, i, fullres_spacing) for i, j in + zip(self.dataset_fingerprint['spacings'], self.dataset_fingerprint['shapes_after_crop'])] + new_median_shape = np.median(new_shapes, 0) + new_median_shape_transposed = new_median_shape[transpose_forward] + + approximate_n_voxels_dataset = float(np.prod(new_median_shape_transposed, dtype=np.float64) * + self.dataset_json['numTraining']) + # only run 3d if this is a 3d dataset + if new_median_shape_transposed[0] != 1: + plan_3d_fullres = self.get_plans_for_configuration(fullres_spacing_transposed, + new_median_shape_transposed, + self.generate_data_identifier('3d_fullres'), + approximate_n_voxels_dataset) + # maybe add 3d_lowres as well + patch_size_fullres = plan_3d_fullres['patch_size'] + median_num_voxels = np.prod(new_median_shape_transposed, dtype=np.float64) + num_voxels_in_patch = np.prod(patch_size_fullres, dtype=np.float64) + + plan_3d_lowres = None + lowres_spacing = deepcopy(plan_3d_fullres['spacing']) + + spacing_increase_factor = 1.03 # used to be 1.01 but that is slow with new GPU memory estimation! + + while num_voxels_in_patch / median_num_voxels < self.lowres_creation_threshold: + # we incrementally increase the target spacing. We start with the anisotropic axis/axes until it/they + # is/are similar (factor 2) to the other ax(i/e)s. + max_spacing = max(lowres_spacing) + if np.any((max_spacing / lowres_spacing) > 2): + lowres_spacing[(max_spacing / lowres_spacing) > 2] *= spacing_increase_factor + else: + lowres_spacing *= spacing_increase_factor + median_num_voxels = np.prod(plan_3d_fullres['spacing'] / lowres_spacing * new_median_shape_transposed, + dtype=np.float64) + # print(lowres_spacing) + plan_3d_lowres = self.get_plans_for_configuration(lowres_spacing, + [round(i) for i in plan_3d_fullres['spacing'] / + lowres_spacing * new_median_shape_transposed], + self.generate_data_identifier('3d_lowres'), + float(np.prod(median_num_voxels) * + self.dataset_json['numTraining'])) + num_voxels_in_patch = np.prod(plan_3d_lowres['patch_size'], dtype=np.int64) + print(f'Attempting to find 3d_lowres config. ' + f'\nCurrent spacing: {lowres_spacing}. ' + f'\nCurrent patch size: {plan_3d_lowres["patch_size"]}. ' + f'\nCurrent median shape: {plan_3d_fullres["spacing"] / lowres_spacing * new_median_shape_transposed}') + if plan_3d_lowres is not None: + plan_3d_lowres['batch_dice'] = False + plan_3d_fullres['batch_dice'] = True + else: + plan_3d_fullres['batch_dice'] = False + else: + plan_3d_fullres = None + plan_3d_lowres = None + + # 2D configuration + plan_2d = self.get_plans_for_configuration(fullres_spacing_transposed[1:], + new_median_shape_transposed[1:], + self.generate_data_identifier('2d'), approximate_n_voxels_dataset) + plan_2d['batch_dice'] = True + + print('2D U-Net configuration:') + print(plan_2d) + print() + + # median spacing and shape, just for reference when printing the plans + median_spacing = np.median(self.dataset_fingerprint['spacings'], 0)[transpose_forward] + median_shape = np.median(self.dataset_fingerprint['shapes_after_crop'], 0)[transpose_forward] + + # instead of writing all that into the plans we just copy the original file. More files, but less crowded + # per file. + shutil.copy(join(self.raw_dataset_folder, 'dataset.json'), + join(nnUNet_preprocessed, self.dataset_name, 'dataset.json')) + + # json is stupid and I hate it... "Object of type int64 is not JSON serializable" -> my ass + plans = { + 'dataset_name': self.dataset_name, + 'plans_name': self.plans_identifier, + 'original_median_spacing_after_transp': [float(i) for i in median_spacing], + 'original_median_shape_after_transp': [int(round(i)) for i in median_shape], + 'image_reader_writer': self.determine_reader_writer().__name__, + 'transpose_forward': [int(i) for i in transpose_forward], + 'transpose_backward': [int(i) for i in transpose_backward], + 'configurations': {'2d': plan_2d}, + 'experiment_planner_used': self.__class__.__name__, + 'label_manager': 'LabelManager', + 'foreground_intensity_properties_per_channel': self.dataset_fingerprint[ + 'foreground_intensity_properties_per_channel'] + } + + if plan_3d_lowres is not None: + plans['configurations']['3d_lowres'] = plan_3d_lowres + if plan_3d_fullres is not None: + plans['configurations']['3d_lowres']['next_stage'] = '3d_cascade_fullres' + print('3D lowres U-Net configuration:') + print(plan_3d_lowres) + print() + if plan_3d_fullres is not None: + plans['configurations']['3d_fullres'] = plan_3d_fullres + print('3D fullres U-Net configuration:') + print(plan_3d_fullres) + print() + if plan_3d_lowres is not None: + plans['configurations']['3d_cascade_fullres'] = { + 'inherits_from': '3d_fullres', + 'previous_stage': '3d_lowres' + } + + plans['configurations']['2d_p256'] = { + 'inherits_from': '2d', + 'patch_size': [256, 256] + } + + plans['configurations']['2d_p512'] = { + 'inherits_from': '2d', + 'patch_size': [512, 512] + } + + self.plans = plans + self.save_plans(plans) + return plans + + def save_plans(self, plans): + recursive_fix_for_json_export(plans) + + plans_file = join(nnUNet_preprocessed, self.dataset_name, self.plans_identifier + '.json') + + # we don't want to overwrite potentially existing custom configurations every time this is executed. So let's + # read the plans file if it already exists and keep any non-default configurations + if isfile(plans_file): + old_plans = load_json(plans_file) + old_configurations = old_plans['configurations'] + for c in plans['configurations'].keys(): + if c in old_configurations.keys(): + del (old_configurations[c]) + plans['configurations'].update(old_configurations) + + maybe_mkdir_p(join(nnUNet_preprocessed, self.dataset_name)) + save_json(plans, plans_file, sort_keys=False) + print(f"Plans were saved to {join(nnUNet_preprocessed, self.dataset_name, self.plans_identifier + '.json')}") + + def generate_data_identifier(self, configuration_name: str) -> str: + """ + configurations are unique within each plans file but different plans file can have configurations with the + same name. In order to distinguish the associated data we need a data identifier that reflects not just the + config but also the plans it originates from + """ + return self.plans_identifier + '_' + configuration_name + + def load_plans(self, fname: str): + self.plans = load_json(fname) + + +if __name__ == '__main__': + ExperimentPlanner(2, 8).plan_experiment() diff --git a/docker/template/src/nnunetv2/experiment_planning/experiment_planners/network_topology.py b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/network_topology.py new file mode 100644 index 0000000..1ce6a46 --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/network_topology.py @@ -0,0 +1,105 @@ +from copy import deepcopy +import numpy as np + + +def get_shape_must_be_divisible_by(net_numpool_per_axis): + return 2 ** np.array(net_numpool_per_axis) + + +def pad_shape(shape, must_be_divisible_by): + """ + pads shape so that it is divisible by must_be_divisible_by + :param shape: + :param must_be_divisible_by: + :return: + """ + if not isinstance(must_be_divisible_by, (tuple, list, np.ndarray)): + must_be_divisible_by = [must_be_divisible_by] * len(shape) + else: + assert len(must_be_divisible_by) == len(shape) + + new_shp = [shape[i] + must_be_divisible_by[i] - shape[i] % must_be_divisible_by[i] for i in range(len(shape))] + + for i in range(len(shape)): + if shape[i] % must_be_divisible_by[i] == 0: + new_shp[i] -= must_be_divisible_by[i] + new_shp = np.array(new_shp).astype(int) + return new_shp + + +def get_pool_and_conv_props(spacing, patch_size, min_feature_map_size, max_numpool): + """ + this is the same as get_pool_and_conv_props_v2 from old nnunet + + :param spacing: + :param patch_size: + :param min_feature_map_size: min edge length of feature maps in bottleneck + :param max_numpool: + :return: + """ + # todo review this code + dim = len(spacing) + + current_spacing = deepcopy(list(spacing)) + current_size = deepcopy(list(patch_size)) + + pool_op_kernel_sizes = [[1] * len(spacing)] + conv_kernel_sizes = [] + + num_pool_per_axis = [0] * dim + kernel_size = [1] * dim + + while True: + # exclude axes that we cannot pool further because of min_feature_map_size constraint + valid_axes_for_pool = [i for i in range(dim) if current_size[i] >= 2*min_feature_map_size] + if len(valid_axes_for_pool) < 1: + break + + spacings_of_axes = [current_spacing[i] for i in valid_axes_for_pool] + + # find axis that are within factor of 2 within smallest spacing + min_spacing_of_valid = min(spacings_of_axes) + valid_axes_for_pool = [i for i in valid_axes_for_pool if current_spacing[i] / min_spacing_of_valid < 2] + + # max_numpool constraint + valid_axes_for_pool = [i for i in valid_axes_for_pool if num_pool_per_axis[i] < max_numpool] + + if len(valid_axes_for_pool) == 1: + if current_size[valid_axes_for_pool[0]] >= 3 * min_feature_map_size: + pass + else: + break + if len(valid_axes_for_pool) < 1: + break + + # now we need to find kernel sizes + # kernel sizes are initialized to 1. They are successively set to 3 when their associated axis becomes within + # factor 2 of min_spacing. Once they are 3 they remain 3 + for d in range(dim): + if kernel_size[d] == 3: + continue + else: + if current_spacing[d] / min(current_spacing) < 2: + kernel_size[d] = 3 + + other_axes = [i for i in range(dim) if i not in valid_axes_for_pool] + + pool_kernel_sizes = [0] * dim + for v in valid_axes_for_pool: + pool_kernel_sizes[v] = 2 + num_pool_per_axis[v] += 1 + current_spacing[v] *= 2 + current_size[v] = np.ceil(current_size[v] / 2) + for nv in other_axes: + pool_kernel_sizes[nv] = 1 + + pool_op_kernel_sizes.append(pool_kernel_sizes) + conv_kernel_sizes.append(deepcopy(kernel_size)) + #print(conv_kernel_sizes) + + must_be_divisible_by = get_shape_must_be_divisible_by(num_pool_per_axis) + patch_size = pad_shape(patch_size, must_be_divisible_by) + + # we need to add one more conv_kernel_size for the bottleneck. We always use 3x3(x3) conv here + conv_kernel_sizes.append([3]*dim) + return num_pool_per_axis, pool_op_kernel_sizes, conv_kernel_sizes, patch_size, must_be_divisible_by diff --git a/docker/template/src/nnunetv2/experiment_planning/experiment_planners/readme.md b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/readme.md new file mode 100644 index 0000000..e2e4e18 --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/readme.md @@ -0,0 +1,38 @@ +What do experiment planners need to do (these are notes for myself while rewriting nnU-Net, they are provided as is +without further explanations. These notes also include new features): +- (done) preprocessor name should be configurable via cli +- (done) gpu memory target should be configurable via cli +- (done) plans name should be configurable via cli +- (done) data name should be specified in plans (plans specify the data they want to use, this will allow us to manually + edit plans files without having to copy the data folders) +- plans must contain: + - (done) transpose forward/backward + - (done) preprocessor name (can differ for each config) + - (done) spacing + - (done) normalization scheme + - (done) target spacing + - (done) conv and pool op kernel sizes + - (done) base num features for architecture + - (done) data identifier + - num conv per stage? + - (done) use mask for norm + - [NO. Handled by LabelManager & dataset.json] num segmentation outputs + - [NO. Handled by LabelManager & dataset.json] ignore class + - [NO. Handled by LabelManager & dataset.json] list of regions or classes + - [NO. Handled by LabelManager & dataset.json] regions class order, if applicable + - (done) resampling function to be used + - (done) the image reader writer class that should be used + + +dataset.json +mandatory: +- numTraining +- labels (value 'ignore' has special meaning. Cannot have more than one ignore_label) +- modalities +- file_ending + +optional +- overwrite_image_reader_writer (if absent, auto) +- regions +- region_class_order +- \ No newline at end of file diff --git a/docker/template/src/nnunetv2/experiment_planning/experiment_planners/resencUNet_planner.py b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/resencUNet_planner.py new file mode 100644 index 0000000..52ca938 --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/experiment_planners/resencUNet_planner.py @@ -0,0 +1,54 @@ +from typing import Union, List, Tuple + +from torch import nn + +from nnunetv2.experiment_planning.experiment_planners.default_experiment_planner import ExperimentPlanner +from dynamic_network_architectures.architectures.unet import ResidualEncoderUNet + + +class ResEncUNetPlanner(ExperimentPlanner): + def __init__(self, dataset_name_or_id: Union[str, int], + gpu_memory_target_in_gb: float = 8, + preprocessor_name: str = 'DefaultPreprocessor', plans_name: str = 'nnUNetResEncUNetPlans', + overwrite_target_spacing: Union[List[float], Tuple[float, ...]] = None, + suppress_transpose: bool = False): + super().__init__(dataset_name_or_id, gpu_memory_target_in_gb, preprocessor_name, plans_name, + overwrite_target_spacing, suppress_transpose) + + self.UNet_base_num_features = 32 + self.UNet_class = ResidualEncoderUNet + # the following two numbers are really arbitrary and were set to reproduce default nnU-Net's configurations as + # much as possible + self.UNet_reference_val_3d = 680000000 + self.UNet_reference_val_2d = 135000000 + self.UNet_reference_com_nfeatures = 32 + self.UNet_reference_val_corresp_GB = 8 + self.UNet_reference_val_corresp_bs_2d = 12 + self.UNet_reference_val_corresp_bs_3d = 2 + self.UNet_featuremap_min_edge_length = 4 + self.UNet_blocks_per_stage_encoder = (1, 3, 4, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6) + self.UNet_blocks_per_stage_decoder = (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) + self.UNet_min_batch_size = 2 + self.UNet_max_features_2d = 512 + self.UNet_max_features_3d = 320 + + +if __name__ == '__main__': + # we know both of these networks run with batch size 2 and 12 on ~8-10GB, respectively + net = ResidualEncoderUNet(input_channels=1, n_stages=6, features_per_stage=(32, 64, 128, 256, 320, 320), + conv_op=nn.Conv3d, kernel_sizes=3, strides=(1, 2, 2, 2, 2, 2), + n_blocks_per_stage=(1, 3, 4, 6, 6, 6), num_classes=3, + n_conv_per_stage_decoder=(1, 1, 1, 1, 1), + conv_bias=True, norm_op=nn.InstanceNorm3d, norm_op_kwargs={}, dropout_op=None, + nonlin=nn.LeakyReLU, nonlin_kwargs={'inplace': True}, deep_supervision=True) + print(net.compute_conv_feature_map_size((128, 128, 128))) # -> 558319104. The value you see above was finetuned + # from this one to match the regular nnunetplans more closely + + net = ResidualEncoderUNet(input_channels=1, n_stages=7, features_per_stage=(32, 64, 128, 256, 512, 512, 512), + conv_op=nn.Conv2d, kernel_sizes=3, strides=(1, 2, 2, 2, 2, 2, 2), + n_blocks_per_stage=(1, 3, 4, 6, 6, 6, 6), num_classes=3, + n_conv_per_stage_decoder=(1, 1, 1, 1, 1, 1), + conv_bias=True, norm_op=nn.InstanceNorm2d, norm_op_kwargs={}, dropout_op=None, + nonlin=nn.LeakyReLU, nonlin_kwargs={'inplace': True}, deep_supervision=True) + print(net.compute_conv_feature_map_size((512, 512))) # -> 129793792 + diff --git a/docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_api.py b/docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_api.py new file mode 100644 index 0000000..7748572 --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_api.py @@ -0,0 +1,137 @@ +from typing import List, Type, Optional, Tuple, Union + +import nnunetv2 +from batchgenerators.utilities.file_and_folder_operations import join, maybe_mkdir_p, load_json + +from nnunetv2.experiment_planning.dataset_fingerprint.fingerprint_extractor import DatasetFingerprintExtractor +from nnunetv2.experiment_planning.experiment_planners.default_experiment_planner import ExperimentPlanner +from nnunetv2.experiment_planning.verify_dataset_integrity import verify_dataset_integrity +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +from nnunetv2.utilities.dataset_name_id_conversion import convert_id_to_dataset_name +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager +from nnunetv2.configuration import default_num_processes +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets + + +def extract_fingerprint_dataset(dataset_id: int, + fingerprint_extractor_class: Type[ + DatasetFingerprintExtractor] = DatasetFingerprintExtractor, + num_processes: int = default_num_processes, check_dataset_integrity: bool = False, + clean: bool = True, verbose: bool = True): + """ + Returns the fingerprint as a dictionary (additionally to saving it) + """ + dataset_name = convert_id_to_dataset_name(dataset_id) + print(dataset_name) + + if check_dataset_integrity: + verify_dataset_integrity(join(nnUNet_raw, dataset_name), num_processes) + + fpe = fingerprint_extractor_class(dataset_id, num_processes, verbose=verbose) + return fpe.run(overwrite_existing=clean) + + +def extract_fingerprints(dataset_ids: List[int], fingerprint_extractor_class_name: str = 'DatasetFingerprintExtractor', + num_processes: int = default_num_processes, check_dataset_integrity: bool = False, + clean: bool = True, verbose: bool = True): + """ + clean = False will not actually run this. This is just a switch for use with nnUNetv2_plan_and_preprocess where + we don't want to rerun fingerprint extraction every time. + """ + fingerprint_extractor_class = recursive_find_python_class(join(nnunetv2.__path__[0], "experiment_planning"), + fingerprint_extractor_class_name, + current_module="nnunetv2.experiment_planning") + for d in dataset_ids: + extract_fingerprint_dataset(d, fingerprint_extractor_class, num_processes, check_dataset_integrity, clean, + verbose) + + +def plan_experiment_dataset(dataset_id: int, + experiment_planner_class: Type[ExperimentPlanner] = ExperimentPlanner, + gpu_memory_target_in_gb: float = 8, preprocess_class_name: str = 'DefaultPreprocessor', + overwrite_target_spacing: Optional[Tuple[float, ...]] = None, + overwrite_plans_name: Optional[str] = None) -> dict: + """ + overwrite_target_spacing ONLY applies to 3d_fullres and 3d_cascade fullres! + """ + kwargs = {} + if overwrite_plans_name is not None: + kwargs['plans_name'] = overwrite_plans_name + return experiment_planner_class(dataset_id, + gpu_memory_target_in_gb=gpu_memory_target_in_gb, + preprocessor_name=preprocess_class_name, + overwrite_target_spacing=[float(i) for i in overwrite_target_spacing] if + overwrite_target_spacing is not None else overwrite_target_spacing, + suppress_transpose=False, # might expose this later, + **kwargs + ).plan_experiment() + + +def plan_experiments(dataset_ids: List[int], experiment_planner_class_name: str = 'ExperimentPlanner', + gpu_memory_target_in_gb: float = 8, preprocess_class_name: str = 'DefaultPreprocessor', + overwrite_target_spacing: Optional[Tuple[float, ...]] = None, + overwrite_plans_name: Optional[str] = None): + """ + overwrite_target_spacing ONLY applies to 3d_fullres and 3d_cascade fullres! + """ + experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], "experiment_planning"), + experiment_planner_class_name, + current_module="nnunetv2.experiment_planning") + for d in dataset_ids: + plan_experiment_dataset(d, experiment_planner, gpu_memory_target_in_gb, preprocess_class_name, + overwrite_target_spacing, overwrite_plans_name) + + +def preprocess_dataset(dataset_id: int, + plans_identifier: str = 'nnUNetPlans', + configurations: Union[Tuple[str], List[str]] = ('2d', '3d_fullres', '3d_lowres'), + num_processes: Union[int, Tuple[int, ...], List[int]] = (8, 4, 8), + verbose: bool = False) -> None: + if not isinstance(num_processes, list): + num_processes = list(num_processes) + if len(num_processes) == 1: + num_processes = num_processes * len(configurations) + if len(num_processes) != len(configurations): + raise RuntimeError( + f'The list provided with num_processes must either have len 1 or as many elements as there are ' + f'configurations (see --help). Number of configurations: {len(configurations)}, length ' + f'of num_processes: ' + f'{len(num_processes)}') + + dataset_name = convert_id_to_dataset_name(dataset_id) + print(f'Preprocessing dataset {dataset_name}') + plans_file = join(nnUNet_preprocessed, dataset_name, plans_identifier + '.json') + plans_manager = PlansManager(plans_file) + for n, c in zip(num_processes, configurations): + print(f'Configuration: {c}...') + if c not in plans_manager.available_configurations: + print( + f"INFO: Configuration {c} not found in plans file {plans_identifier + '.json'} of " + f"dataset {dataset_name}. Skipping.") + continue + configuration_manager = plans_manager.get_configuration(c) + preprocessor = configuration_manager.preprocessor_class(verbose=verbose) + preprocessor.run(dataset_id, c, plans_identifier, num_processes=n) + + # copy the gt to a folder in the nnUNet_preprocessed so that we can do validation even if the raw data is no + # longer there (useful for compute cluster where only the preprocessed data is available) + from distutils.file_util import copy_file + maybe_mkdir_p(join(nnUNet_preprocessed, dataset_name, 'gt_segmentations')) + dataset_json = load_json(join(nnUNet_raw, dataset_name, 'dataset.json')) + dataset = get_filenames_of_train_images_and_targets(join(nnUNet_raw, dataset_name), dataset_json) + # only copy files that are newer than the ones already present + for k in dataset: + copy_file(dataset[k]['label'], + join(nnUNet_preprocessed, dataset_name, 'gt_segmentations', k + dataset_json['file_ending']), + update=True) + + + +def preprocess(dataset_ids: List[int], + plans_identifier: str = 'nnUNetPlans', + configurations: Union[Tuple[str], List[str]] = ('2d', '3d_fullres', '3d_lowres'), + num_processes: Union[int, Tuple[int, ...], List[int]] = (8, 4, 8), + verbose: bool = False): + for d in dataset_ids: + preprocess_dataset(d, plans_identifier, configurations, num_processes, verbose) diff --git a/docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py b/docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py new file mode 100644 index 0000000..556f04a --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/plan_and_preprocess_entrypoints.py @@ -0,0 +1,201 @@ +from nnunetv2.configuration import default_num_processes +from nnunetv2.experiment_planning.plan_and_preprocess_api import extract_fingerprints, plan_experiments, preprocess + + +def extract_fingerprint_entry(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-d', nargs='+', type=int, + help="[REQUIRED] List of dataset IDs. Example: 2 4 5. This will run fingerprint extraction, experiment " + "planning and preprocessing for these datasets. Can of course also be just one dataset") + parser.add_argument('-fpe', type=str, required=False, default='DatasetFingerprintExtractor', + help='[OPTIONAL] Name of the Dataset Fingerprint Extractor class that should be used. Default is ' + '\'DatasetFingerprintExtractor\'.') + parser.add_argument('-np', type=int, default=default_num_processes, required=False, + help=f'[OPTIONAL] Number of processes used for fingerprint extraction. ' + f'Default: {default_num_processes}') + parser.add_argument("--verify_dataset_integrity", required=False, default=False, action="store_true", + help="[RECOMMENDED] set this flag to check the dataset integrity. This is useful and should be done once for " + "each dataset!") + parser.add_argument("--clean", required=False, default=False, action="store_true", + help='[OPTIONAL] Set this flag to overwrite existing fingerprints. If this flag is not set and a ' + 'fingerprint already exists, the fingerprint extractor will not run.') + parser.add_argument('--verbose', required=False, action='store_true', + help='Set this to print a lot of stuff. Useful for debugging. Will disable progress bar! ' + 'Recommended for cluster environments') + args, unrecognized_args = parser.parse_known_args() + extract_fingerprints(args.d, args.fpe, args.np, args.verify_dataset_integrity, args.clean, args.verbose) + + +def plan_experiment_entry(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-d', nargs='+', type=int, + help="[REQUIRED] List of dataset IDs. Example: 2 4 5. This will run fingerprint extraction, experiment " + "planning and preprocessing for these datasets. Can of course also be just one dataset") + parser.add_argument('-pl', type=str, default='ExperimentPlanner', required=False, + help='[OPTIONAL] Name of the Experiment Planner class that should be used. Default is ' + '\'ExperimentPlanner\'. Note: There is no longer a distinction between 2d and 3d planner. ' + 'It\'s an all in one solution now. Wuch. Such amazing.') + parser.add_argument('-gpu_memory_target', default=8, type=float, required=False, + help='[OPTIONAL] DANGER ZONE! Sets a custom GPU memory target. Default: 8 [GB]. Changing this will ' + 'affect patch and batch size and will ' + 'definitely affect your models performance! Only use this if you really know what you ' + 'are doing and NEVER use this without running the default nnU-Net first (as a baseline).') + parser.add_argument('-preprocessor_name', default='DefaultPreprocessor', type=str, required=False, + help='[OPTIONAL] DANGER ZONE! Sets a custom preprocessor class. This class must be located in ' + 'nnunetv2.preprocessing. Default: \'DefaultPreprocessor\'. Changing this may affect your ' + 'models performance! Only use this if you really know what you ' + 'are doing and NEVER use this without running the default nnU-Net first (as a baseline).') + parser.add_argument('-overwrite_target_spacing', default=None, nargs='+', required=False, + help='[OPTIONAL] DANGER ZONE! Sets a custom target spacing for the 3d_fullres and 3d_cascade_fullres ' + 'configurations. Default: None [no changes]. Changing this will affect image size and ' + 'potentially patch and batch ' + 'size. This will definitely affect your models performance! Only use this if you really ' + 'know what you are doing and NEVER use this without running the default nnU-Net first ' + '(as a baseline). Changing the target spacing for the other configurations is currently ' + 'not implemented. New target spacing must be a list of three numbers!') + parser.add_argument('-overwrite_plans_name', default=None, required=False, + help='[OPTIONAL] DANGER ZONE! If you used -gpu_memory_target, -preprocessor_name or ' + '-overwrite_target_spacing it is best practice to use -overwrite_plans_name to generate a ' + 'differently named plans file such that the nnunet default plans are not ' + 'overwritten. You will then need to specify your custom plans file with -p whenever ' + 'running other nnunet commands (training, inference etc)') + args, unrecognized_args = parser.parse_known_args() + plan_experiments(args.d, args.pl, args.gpu_memory_target, args.preprocessor_name, args.overwrite_target_spacing, + args.overwrite_plans_name) + + +def preprocess_entry(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-d', nargs='+', type=int, + help="[REQUIRED] List of dataset IDs. Example: 2 4 5. This will run fingerprint extraction, experiment " + "planning and preprocessing for these datasets. Can of course also be just one dataset") + parser.add_argument('-plans_name', default='nnUNetPlans', required=False, + help='[OPTIONAL] You can use this to specify a custom plans file that you may have generated') + parser.add_argument('-c', required=False, default=['2d', '3d_fullres', '3d_lowres'], nargs='+', + help='[OPTIONAL] Configurations for which the preprocessing should be run. Default: 2d 3d_fullres ' + '3d_lowres. 3d_cascade_fullres does not need to be specified because it uses the data ' + 'from 3d_fullres. Configurations that do not exist for some dataset will be skipped.') + parser.add_argument('-np', type=int, nargs='+', default=[8, 4, 8], required=False, + help="[OPTIONAL] Use this to define how many processes are to be used. If this is just one number then " + "this number of processes is used for all configurations specified with -c. If it's a " + "list of numbers this list must have as many elements as there are configurations. We " + "then iterate over zip(configs, num_processes) to determine then umber of processes " + "used for each configuration. More processes is always faster (up to the number of " + "threads your PC can support, so 8 for a 4 core CPU with hyperthreading. If you don't " + "know what that is then dont touch it, or at least don't increase it!). DANGER: More " + "often than not the number of processes that can be used is limited by the amount of " + "RAM available. Image resampling takes up a lot of RAM. MONITOR RAM USAGE AND " + "DECREASE -np IF YOUR RAM FILLS UP TOO MUCH!. Default: 8 processes for 2d, 4 " + "for 3d_fullres, 8 for 3d_lowres and 4 for everything else") + parser.add_argument('--verbose', required=False, action='store_true', + help='Set this to print a lot of stuff. Useful for debugging. Will disable progress bar! ' + 'Recommended for cluster environments') + args, unrecognized_args = parser.parse_known_args() + if args.np is None: + default_np = { + '2d': 4, + '3d_lowres': 8, + '3d_fullres': 4 + } + np = {default_np[c] if c in default_np.keys() else 4 for c in args.c} + else: + np = args.np + preprocess(args.d, args.plans_name, configurations=args.c, num_processes=np, verbose=args.verbose) + + +def plan_and_preprocess_entry(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-d', nargs='+', type=int, + help="[REQUIRED] List of dataset IDs. Example: 2 4 5. This will run fingerprint extraction, experiment " + "planning and preprocessing for these datasets. Can of course also be just one dataset") + parser.add_argument('-fpe', type=str, required=False, default='DatasetFingerprintExtractor', + help='[OPTIONAL] Name of the Dataset Fingerprint Extractor class that should be used. Default is ' + '\'DatasetFingerprintExtractor\'.') + parser.add_argument('-npfp', type=int, default=8, required=False, + help='[OPTIONAL] Number of processes used for fingerprint extraction. Default: 8') + parser.add_argument("--verify_dataset_integrity", required=False, default=False, action="store_true", + help="[RECOMMENDED] set this flag to check the dataset integrity. This is useful and should be done once for " + "each dataset!") + parser.add_argument('--no_pp', default=False, action='store_true', required=False, + help='[OPTIONAL] Set this to only run fingerprint extraction and experiment planning (no ' + 'preprocesing). Useful for debugging.') + parser.add_argument("--clean", required=False, default=False, action="store_true", + help='[OPTIONAL] Set this flag to overwrite existing fingerprints. If this flag is not set and a ' + 'fingerprint already exists, the fingerprint extractor will not run. REQUIRED IF YOU ' + 'CHANGE THE DATASET FINGERPRINT EXTRACTOR OR MAKE CHANGES TO THE DATASET!') + parser.add_argument('-pl', type=str, default='ExperimentPlanner', required=False, + help='[OPTIONAL] Name of the Experiment Planner class that should be used. Default is ' + '\'ExperimentPlanner\'. Note: There is no longer a distinction between 2d and 3d planner. ' + 'It\'s an all in one solution now. Wuch. Such amazing.') + parser.add_argument('-gpu_memory_target', default=8, type=int, required=False, + help='[OPTIONAL] DANGER ZONE! Sets a custom GPU memory target. Default: 8 [GB]. Changing this will ' + 'affect patch and batch size and will ' + 'definitely affect your models performance! Only use this if you really know what you ' + 'are doing and NEVER use this without running the default nnU-Net first (as a baseline).') + parser.add_argument('-preprocessor_name', default='DefaultPreprocessor', type=str, required=False, + help='[OPTIONAL] DANGER ZONE! Sets a custom preprocessor class. This class must be located in ' + 'nnunetv2.preprocessing. Default: \'DefaultPreprocessor\'. Changing this may affect your ' + 'models performance! Only use this if you really know what you ' + 'are doing and NEVER use this without running the default nnU-Net first (as a baseline).') + parser.add_argument('-overwrite_target_spacing', default=None, nargs='+', required=False, + help='[OPTIONAL] DANGER ZONE! Sets a custom target spacing for the 3d_fullres and 3d_cascade_fullres ' + 'configurations. Default: None [no changes]. Changing this will affect image size and ' + 'potentially patch and batch ' + 'size. This will definitely affect your models performance! Only use this if you really ' + 'know what you are doing and NEVER use this without running the default nnU-Net first ' + '(as a baseline). Changing the target spacing for the other configurations is currently ' + 'not implemented. New target spacing must be a list of three numbers!') + parser.add_argument('-overwrite_plans_name', default='nnUNetPlans', required=False, + help='[OPTIONAL] uSE A CUSTOM PLANS IDENTIFIER. If you used -gpu_memory_target, ' + '-preprocessor_name or ' + '-overwrite_target_spacing it is best practice to use -overwrite_plans_name to generate a ' + 'differently named plans file such that the nnunet default plans are not ' + 'overwritten. You will then need to specify your custom plans file with -p whenever ' + 'running other nnunet commands (training, inference etc)') + parser.add_argument('-c', required=False, default=['2d', '3d_fullres', '3d_lowres'], nargs='+', + help='[OPTIONAL] Configurations for which the preprocessing should be run. Default: 2d 3d_fullres ' + '3d_lowres. 3d_cascade_fullres does not need to be specified because it uses the data ' + 'from 3d_fullres. Configurations that do not exist for some dataset will be skipped.') + parser.add_argument('-np', type=int, nargs='+', default=None, required=False, + help="[OPTIONAL] Use this to define how many processes are to be used. If this is just one number then " + "this number of processes is used for all configurations specified with -c. If it's a " + "list of numbers this list must have as many elements as there are configurations. We " + "then iterate over zip(configs, num_processes) to determine then umber of processes " + "used for each configuration. More processes is always faster (up to the number of " + "threads your PC can support, so 8 for a 4 core CPU with hyperthreading. If you don't " + "know what that is then dont touch it, or at least don't increase it!). DANGER: More " + "often than not the number of processes that can be used is limited by the amount of " + "RAM available. Image resampling takes up a lot of RAM. MONITOR RAM USAGE AND " + "DECREASE -np IF YOUR RAM FILLS UP TOO MUCH!. Default: 8 processes for 2d, 4 " + "for 3d_fullres, 8 for 3d_lowres and 4 for everything else") + parser.add_argument('--verbose', required=False, action='store_true', + help='Set this to print a lot of stuff. Useful for debugging. Will disable progress bar! ' + 'Recommended for cluster environments') + args = parser.parse_args() + + # fingerprint extraction + print("Fingerprint extraction...") + extract_fingerprints(args.d, args.fpe, args.npfp, args.verify_dataset_integrity, args.clean, args.verbose) + + # experiment planning + print('Experiment planning...') + plan_experiments(args.d, args.pl, args.gpu_memory_target, args.preprocessor_name, args.overwrite_target_spacing, args.overwrite_plans_name) + + # manage default np + if args.np is None: + default_np = {"2d": 8, "3d_fullres": 4, "3d_lowres": 8} + np = [default_np[c] if c in default_np.keys() else 4 for c in args.c] + else: + np = args.np + # preprocessing + if not args.no_pp: + print('Preprocessing...') + preprocess(args.d, args.overwrite_plans_name, args.c, np, args.verbose) + + +if __name__ == '__main__': + plan_and_preprocess_entry() diff --git a/docker/template/src/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py b/docker/template/src/nnunetv2/experiment_planning/plans_for_pretraining/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/experiment_planning/plans_for_pretraining/move_plans_between_datasets.py b/docker/template/src/nnunetv2/experiment_planning/plans_for_pretraining/move_plans_between_datasets.py new file mode 100644 index 0000000..7219ddc --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/plans_for_pretraining/move_plans_between_datasets.py @@ -0,0 +1,82 @@ +import argparse +from typing import Union + +from batchgenerators.utilities.file_and_folder_operations import join, isdir, isfile, load_json, save_json + +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json +from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw +from nnunetv2.utilities.file_path_utilities import maybe_convert_to_dataset_name +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets + + +def move_plans_between_datasets( + source_dataset_name_or_id: Union[int, str], + target_dataset_name_or_id: Union[int, str], + source_plans_identifier: str, + target_plans_identifier: str = None): + source_dataset_name = maybe_convert_to_dataset_name(source_dataset_name_or_id) + target_dataset_name = maybe_convert_to_dataset_name(target_dataset_name_or_id) + + if target_plans_identifier is None: + target_plans_identifier = source_plans_identifier + + source_folder = join(nnUNet_preprocessed, source_dataset_name) + assert isdir(source_folder), f"Cannot move plans because preprocessed directory of source dataset is missing. " \ + f"Run nnUNetv2_plan_and_preprocess for source dataset first!" + + source_plans_file = join(source_folder, source_plans_identifier + '.json') + assert isfile(source_plans_file), f"Source plans are missing. Run the corresponding experiment planning first! " \ + f"Expected file: {source_plans_file}" + + source_plans = load_json(source_plans_file) + source_plans['dataset_name'] = target_dataset_name + + # we need to change data_identifier to use target_plans_identifier + if target_plans_identifier != source_plans_identifier: + for c in source_plans['configurations'].keys(): + if 'data_identifier' in source_plans['configurations'][c].keys(): + old_identifier = source_plans['configurations'][c]["data_identifier"] + if old_identifier.startswith(source_plans_identifier): + new_identifier = target_plans_identifier + old_identifier[len(source_plans_identifier):] + else: + new_identifier = target_plans_identifier + '_' + old_identifier + source_plans['configurations'][c]["data_identifier"] = new_identifier + + # we need to change the reader writer class! + target_raw_data_dir = join(nnUNet_raw, target_dataset_name) + target_dataset_json = load_json(join(target_raw_data_dir, 'dataset.json')) + + # we may need to change the reader/writer + # pick any file from the source dataset + dataset = get_filenames_of_train_images_and_targets(target_raw_data_dir, target_dataset_json) + example_image = dataset[dataset.keys().__iter__().__next__()]['images'][0] + rw = determine_reader_writer_from_dataset_json(target_dataset_json, example_image, allow_nonmatching_filename=True, + verbose=False) + + source_plans["image_reader_writer"] = rw.__name__ + if target_plans_identifier is not None: + source_plans["plans_name"] = target_plans_identifier + + save_json(source_plans, join(nnUNet_preprocessed, target_dataset_name, target_plans_identifier + '.json'), + sort_keys=False) + + +def entry_point_move_plans_between_datasets(): + parser = argparse.ArgumentParser() + parser.add_argument('-s', type=str, required=True, + help='Source dataset name or id') + parser.add_argument('-t', type=str, required=True, + help='Target dataset name or id') + parser.add_argument('-sp', type=str, required=True, + help='Source plans identifier. If your plans are named "nnUNetPlans.json" then the ' + 'identifier would be nnUNetPlans') + parser.add_argument('-tp', type=str, required=False, default=None, + help='Target plans identifier. Default is None meaning the source plans identifier will ' + 'be kept. Not recommended if the source plans identifier is a default nnU-Net identifier ' + 'such as nnUNetPlans!!!') + args = parser.parse_args() + move_plans_between_datasets(args.s, args.t, args.sp, args.tp) + + +if __name__ == '__main__': + move_plans_between_datasets(2, 4, 'nnUNetPlans', 'nnUNetPlansFrom2') diff --git a/docker/template/src/nnunetv2/experiment_planning/verify_dataset_integrity.py b/docker/template/src/nnunetv2/experiment_planning/verify_dataset_integrity.py new file mode 100644 index 0000000..8d646a2 --- /dev/null +++ b/docker/template/src/nnunetv2/experiment_planning/verify_dataset_integrity.py @@ -0,0 +1,231 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +from typing import Type + +import numpy as np +import pandas as pd +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json +from nnunetv2.paths import nnUNet_raw +from nnunetv2.utilities.label_handling.label_handling import LabelManager +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets + + +def verify_labels(label_file: str, readerclass: Type[BaseReaderWriter], expected_labels: List[int]) -> bool: + rw = readerclass() + seg, properties = rw.read_seg(label_file) + found_labels = np.sort(pd.unique(seg.ravel())) # np.unique(seg) + unexpected_labels = [i for i in found_labels if i not in expected_labels] + if len(found_labels) == 0 and found_labels[0] == 0: + print('WARNING: File %s only has label 0 (which should be background). This may be intentional or not, ' + 'up to you.' % label_file) + if len(unexpected_labels) > 0: + print("Error: Unexpected labels found in file %s.\nExpected: %s\nFound: %s" % (label_file, expected_labels, + found_labels)) + return False + return True + + +def check_cases(image_files: List[str], label_file: str, expected_num_channels: int, + readerclass: Type[BaseReaderWriter]) -> bool: + rw = readerclass() + ret = True + + images, properties_image = rw.read_images(image_files) + segmentation, properties_seg = rw.read_seg(label_file) + + # check for nans + if np.any(np.isnan(images)): + print(f'Images contain NaN pixel values. You need to fix that by ' + f'replacing NaN values with something that makes sense for your images!\nImages:\n{image_files}') + ret = False + if np.any(np.isnan(segmentation)): + print(f'Segmentation contains NaN pixel values. You need to fix that.\nSegmentation:\n{label_file}') + ret = False + + # check shapes + shape_image = images.shape[1:] + shape_seg = segmentation.shape[1:] + if shape_image != shape_seg: + print('Error: Shape mismatch between segmentation and corresponding images. \nShape images: %s. ' + '\nShape seg: %s. \nImage files: %s. \nSeg file: %s\n' % + (shape_image, shape_seg, image_files, label_file)) + ret = False + + # check spacings + spacing_images = properties_image['spacing'] + spacing_seg = properties_seg['spacing'] + if not np.allclose(spacing_seg, spacing_images): + print('Error: Spacing mismatch between segmentation and corresponding images. \nSpacing images: %s. ' + '\nSpacing seg: %s. \nImage files: %s. \nSeg file: %s\n' % + (shape_image, shape_seg, image_files, label_file)) + ret = False + + # check modalities + if not len(images) == expected_num_channels: + print('Error: Unexpected number of modalities. \nExpected: %d. \nGot: %d. \nImages: %s\n' + % (expected_num_channels, len(images), image_files)) + ret = False + + # nibabel checks + if 'nibabel_stuff' in properties_image.keys(): + # this image was read with NibabelIO + affine_image = properties_image['nibabel_stuff']['original_affine'] + affine_seg = properties_seg['nibabel_stuff']['original_affine'] + if not np.allclose(affine_image, affine_seg): + print('WARNING: Affine is not the same for image and seg! \nAffine image: %s \nAffine seg: %s\n' + 'Image files: %s. \nSeg file: %s.\nThis can be a problem but doesn\'t have to be. Please run ' + 'nnUNet_plot_dataset_pngs to verify if everything is OK!\n' + % (affine_image, affine_seg, image_files, label_file)) + + # sitk checks + if 'sitk_stuff' in properties_image.keys(): + # this image was read with SimpleITKIO + # spacing has already been checked, only check direction and origin + origin_image = properties_image['sitk_stuff']['origin'] + origin_seg = properties_seg['sitk_stuff']['origin'] + if not np.allclose(origin_image, origin_seg): + print('Warning: Origin mismatch between segmentation and corresponding images. \nOrigin images: %s. ' + '\nOrigin seg: %s. \nImage files: %s. \nSeg file: %s\n' % + (origin_image, origin_seg, image_files, label_file)) + direction_image = properties_image['sitk_stuff']['direction'] + direction_seg = properties_seg['sitk_stuff']['direction'] + if not np.allclose(direction_image, direction_seg): + print('Warning: Direction mismatch between segmentation and corresponding images. \nDirection images: %s. ' + '\nDirection seg: %s. \nImage files: %s. \nSeg file: %s\n' % + (direction_image, direction_seg, image_files, label_file)) + + return ret + + +def verify_dataset_integrity(folder: str, num_processes: int = 8) -> None: + """ + folder needs the imagesTr, imagesTs and labelsTr subfolders. There also needs to be a dataset.json + checks if the expected number of training cases and labels are present + for each case, if possible, checks whether the pixel grids are aligned + checks whether the labels really only contain values they should + :param folder: + :return: + """ + assert isfile(join(folder, "dataset.json")), f"There needs to be a dataset.json file in folder, folder={folder}" + dataset_json = load_json(join(folder, "dataset.json")) + + if not 'dataset' in dataset_json.keys(): + assert isdir(join(folder, "imagesTr")), f"There needs to be a imagesTr subfolder in folder, folder={folder}" + assert isdir(join(folder, "labelsTr")), f"There needs to be a labelsTr subfolder in folder, folder={folder}" + + # make sure all required keys are there + dataset_keys = list(dataset_json.keys()) + required_keys = ['labels', "channel_names", "numTraining", "file_ending"] + assert all([i in dataset_keys for i in required_keys]), 'not all required keys are present in dataset.json.' \ + '\n\nRequired: \n%s\n\nPresent: \n%s\n\nMissing: ' \ + '\n%s\n\nUnused by nnU-Net:\n%s' % \ + (str(required_keys), + str(dataset_keys), + str([i for i in required_keys if i not in dataset_keys]), + str([i for i in dataset_keys if i not in required_keys])) + + expected_num_training = dataset_json['numTraining'] + num_modalities = len(dataset_json['channel_names'].keys() + if 'channel_names' in dataset_json.keys() + else dataset_json['modality'].keys()) + file_ending = dataset_json['file_ending'] + + dataset = get_filenames_of_train_images_and_targets(folder, dataset_json) + + # check if the right number of training cases is present + assert len(dataset) == expected_num_training, 'Did not find the expected number of training cases ' \ + '(%d). Found %d instead.\nExamples: %s' % \ + (expected_num_training, len(dataset), + list(dataset.keys())[:5]) + + # check if corresponding labels are present + if 'dataset' in dataset_json.keys(): + # just check if everything is there + ok = True + missing_images = [] + missing_labels = [] + for k in dataset: + for i in dataset[k]['images']: + if not isfile(i): + missing_images.append(i) + ok = False + if not isfile(dataset[k]['label']): + missing_labels.append(dataset[k]['label']) + ok = False + if not ok: + raise FileNotFoundError(f"Some expected files were missing. Make sure you are properly referencing them " + f"in the dataset.json. Or use imagesTr & labelsTr folders!\nMissing images:" + f"\n{missing_images}\n\nMissing labels:\n{missing_labels}") + else: + # old code that uses imagestr and labelstr folders + labelfiles = subfiles(join(folder, 'labelsTr'), suffix=file_ending, join=False) + label_identifiers = [i[:-len(file_ending)] for i in labelfiles] + labels_present = [i in label_identifiers for i in dataset.keys()] + missing = [i for j, i in enumerate(dataset.keys()) if not labels_present[j]] + assert all(labels_present), f'not all training cases have a label file in labelsTr. Fix that. Missing: {missing}' + + labelfiles = [v['label'] for v in dataset.values()] + image_files = [v['images'] for v in dataset.values()] + + # no plans exist yet, so we can't use PlansManager and gotta roll with the default. It's unlikely to cause + # problems anyway + label_manager = LabelManager(dataset_json['labels'], regions_class_order=dataset_json.get('regions_class_order')) + expected_labels = label_manager.all_labels + if label_manager.has_ignore_label: + expected_labels.append(label_manager.ignore_label) + labels_valid_consecutive = np.ediff1d(expected_labels) == 1 + assert all( + labels_valid_consecutive), f'Labels must be in consecutive order (0, 1, 2, ...). The labels {np.array(expected_labels)[1:][~labels_valid_consecutive]} do not satisfy this restriction' + + # determine reader/writer class + reader_writer_class = determine_reader_writer_from_dataset_json(dataset_json, dataset[dataset.keys().__iter__().__next__()]['images'][0]) + + # check whether only the desired labels are present + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + result = p.starmap( + verify_labels, + zip([join(folder, 'labelsTr', i) for i in labelfiles], [reader_writer_class] * len(labelfiles), + [expected_labels] * len(labelfiles)) + ) + if not all(result): + raise RuntimeError( + 'Some segmentation images contained unexpected labels. Please check text output above to see which one(s).') + + # check whether shapes and spacings match between images and labels + result = p.starmap( + check_cases, + zip(image_files, labelfiles, [num_modalities] * expected_num_training, + [reader_writer_class] * expected_num_training) + ) + if not all(result): + raise RuntimeError( + 'Some images have errors. Please check text output above to see which one(s) and what\'s going on.') + + # check for nans + # check all same orientation nibabel + print('\n####################') + print('verify_dataset_integrity Done. \nIf you didn\'t see any error messages then your dataset is most likely OK!') + print('####################\n') + + +if __name__ == "__main__": + # investigate geometry issues + example_folder = join(nnUNet_raw, 'Dataset250_COMPUTING_it0') + num_processes = 6 + verify_dataset_integrity(example_folder, num_processes) diff --git a/docker/template/src/nnunetv2/imageio/__init__.py b/docker/template/src/nnunetv2/imageio/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/imageio/base_reader_writer.py b/docker/template/src/nnunetv2/imageio/base_reader_writer.py new file mode 100644 index 0000000..2847478 --- /dev/null +++ b/docker/template/src/nnunetv2/imageio/base_reader_writer.py @@ -0,0 +1,107 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from typing import Tuple, Union, List +import numpy as np + + +class BaseReaderWriter(ABC): + @staticmethod + def _check_all_same(input_list): + # compare all entries to the first + for i in input_list[1:]: + if i != input_list[0]: + return False + return True + + @staticmethod + def _check_all_same_array(input_list): + # compare all entries to the first + for i in input_list[1:]: + if i.shape != input_list[0].shape or not np.allclose(i, input_list[0]): + return False + return True + + @abstractmethod + def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[np.ndarray, dict]: + """ + Reads a sequence of images and returns a 4d (!) np.ndarray along with a dictionary. The 4d array must have the + modalities (or color channels, or however you would like to call them) in its first axis, followed by the + spatial dimensions (so shape must be c,x,y,z where c is the number of modalities (can be 1)). + Use the dictionary to store necessary meta information that is lost when converting to numpy arrays, for + example the Spacing, Orientation and Direction of the image. This dictionary will be handed over to write_seg + for exporting the predicted segmentations, so make sure you have everything you need in there! + + IMPORTANT: dict MUST have a 'spacing' key with a tuple/list of length 3 with the voxel spacing of the np.ndarray. + Example: my_dict = {'spacing': (3, 0.5, 0.5), ...}. This is needed for planning and + preprocessing. The ordering of the numbers must correspond to the axis ordering in the returned numpy array. So + if the array has shape c,x,y,z and the spacing is (a,b,c) then a must be the spacing of x, b the spacing of y + and c the spacing of z. + + In the case of 2D images, the returned array should have shape (c, 1, x, y) and the spacing should be + (999, sp_x, sp_y). Make sure 999 is larger than sp_x and sp_y! Example: shape=(3, 1, 224, 224), + spacing=(999, 1, 1) + + For images that don't have a spacing, set the spacing to 1 (2d exception with 999 for the first axis still applies!) + + :param image_fnames: + :return: + 1) a np.ndarray of shape (c, x, y, z) where c is the number of image channels (can be 1) and x, y, z are + the spatial dimensions (set x=1 for 2D! Example: (3, 1, 224, 224) for RGB image). + 2) a dictionary with metadata. This can be anything. BUT it HAS to include a {'spacing': (a, b, c)} where a + is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set + a=999 (largest spacing value! Make it larger than b and c) + + """ + pass + + @abstractmethod + def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]: + """ + Same requirements as BaseReaderWriter.read_image. Returned segmentations must have shape 1,x,y,z. Multiple + segmentations are not (yet?) allowed + + If images and segmentations can be read the same way you can just `return self.read_image((image_fname,))` + :param seg_fname: + :return: + 1) a np.ndarray of shape (1, x, y, z) where x, y, z are + the spatial dimensions (set x=1 for 2D! Example: (1, 1, 224, 224) for 2D segmentation). + 2) a dictionary with metadata. This can be anything. BUT it HAS to include a {'spacing': (a, b, c)} where a + is the spacing of x, b of y and c of z! If an image doesn't have spacing, just set this to 1. For 2D, set + a=999 (largest spacing value! Make it larger than b and c) + """ + pass + + @abstractmethod + def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None: + """ + Export the predicted segmentation to the desired file format. The given seg array will have the same shape and + orientation as the corresponding image data, so you don't need to do any resampling or whatever. Just save :-) + + properties is the same dictionary you created during read_images/read_seg so you can use the information here + to restore metadata + + IMPORTANT: Segmentations are always 3D! If your input images were 2d then the segmentation will have shape + 1,x,y. You need to catch that and export accordingly (for 2d images you need to convert the 3d segmentation + to 2d via seg = seg[0])! + + :param seg: A segmentation (np.ndarray, integer) of shape (x, y, z). For 2D segmentations this will be (1, y, z)! + :param output_fname: + :param properties: the dictionary that you created in read_images (the ones this segmentation is based on). + Use this to restore metadata + :return: + """ + pass \ No newline at end of file diff --git a/docker/template/src/nnunetv2/imageio/natural_image_reader_writer.py b/docker/template/src/nnunetv2/imageio/natural_image_reader_writer.py new file mode 100644 index 0000000..11946c3 --- /dev/null +++ b/docker/template/src/nnunetv2/imageio/natural_image_reader_writer.py @@ -0,0 +1,73 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, Union, List +import numpy as np +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from skimage import io + + +class NaturalImage2DIO(BaseReaderWriter): + """ + ONLY SUPPORTS 2D IMAGES!!! + """ + + # there are surely more we could add here. Everything that can be read by skimage.io should be supported + supported_file_endings = [ + '.png', + # '.jpg', + # '.jpeg', # jpg not supported because we cannot allow lossy compression! segmentation maps! + '.bmp', + '.tif' + ] + + def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[np.ndarray, dict]: + images = [] + for f in image_fnames: + npy_img = io.imread(f) + if npy_img.ndim == 3: + # rgb image, last dimension should be the color channel and the size of that channel should be 3 + # (or 4 if we have alpha) + assert npy_img.shape[-1] == 3 or npy_img.shape[-1] == 4, "If image has three dimensions then the last " \ + "dimension must have shape 3 or 4 " \ + f"(RGB or RGBA). Image shape here is {npy_img.shape}" + # move RGB(A) to front, add additional dim so that we have shape (1, c, X, Y), where c is either 3 or 4 + images.append(npy_img.transpose((2, 0, 1))[:, None]) + elif npy_img.ndim == 2: + # grayscale image + images.append(npy_img[None, None]) + + if not self._check_all_same([i.shape for i in images]): + print('ERROR! Not all input images have the same shape!') + print('Shapes:') + print([i.shape for i in images]) + print('Image files:') + print(image_fnames) + raise RuntimeError() + return np.vstack(images).astype(np.float32), {'spacing': (999, 1, 1)} + + def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]: + return self.read_images((seg_fname, )) + + def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None: + io.imsave(output_fname, seg[0].astype(np.uint8), check_contrast=False) + + +if __name__ == '__main__': + images = ('/media/fabian/data/nnUNet_raw/Dataset120_RoadSegmentation/imagesTr/img-11_0000.png',) + segmentation = '/media/fabian/data/nnUNet_raw/Dataset120_RoadSegmentation/labelsTr/img-11.png' + imgio = NaturalImage2DIO() + img, props = imgio.read_images(images) + seg, segprops = imgio.read_seg(segmentation) \ No newline at end of file diff --git a/docker/template/src/nnunetv2/imageio/nibabel_reader_writer.py b/docker/template/src/nnunetv2/imageio/nibabel_reader_writer.py new file mode 100644 index 0000000..8faafb7 --- /dev/null +++ b/docker/template/src/nnunetv2/imageio/nibabel_reader_writer.py @@ -0,0 +1,204 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, Union, List +import numpy as np +from nibabel import io_orientation + +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +import nibabel + + +class NibabelIO(BaseReaderWriter): + """ + Nibabel loads the images in a different order than sitk. We convert the axes to the sitk order to be + consistent. This is of course considered properly in segmentation export as well. + + IMPORTANT: Run nnUNet_plot_dataset_pngs to verify that this did not destroy the alignment of data and seg! + """ + supported_file_endings = [ + '.nii.gz', + '.nrrd', + '.mha' + ] + + def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[np.ndarray, dict]: + images = [] + original_affines = [] + + spacings_for_nnunet = [] + for f in image_fnames: + nib_image = nibabel.load(f) + assert nib_image.ndim == 3, 'only 3d images are supported by NibabelIO' + original_affine = nib_image.affine + + original_affines.append(original_affine) + + # spacing is taken in reverse order to be consistent with SimpleITK axis ordering (confusing, I know...) + spacings_for_nnunet.append( + [float(i) for i in nib_image.header.get_zooms()[::-1]] + ) + + # transpose image to be consistent with the way SimpleITk reads images. Yeah. Annoying. + images.append(nib_image.get_fdata().transpose((2, 1, 0))[None]) + + if not self._check_all_same([i.shape for i in images]): + print('ERROR! Not all input images have the same shape!') + print('Shapes:') + print([i.shape for i in images]) + print('Image files:') + print(image_fnames) + raise RuntimeError() + if not self._check_all_same_array(original_affines): + print('WARNING! Not all input images have the same original_affines!') + print('Affines:') + print(original_affines) + print('Image files:') + print(image_fnames) + print('It is up to you to decide whether that\'s a problem. You should run nnUNet_plot_dataset_pngs to verify ' + 'that segmentations and data overlap.') + if not self._check_all_same(spacings_for_nnunet): + print('ERROR! Not all input images have the same spacing_for_nnunet! This might be caused by them not ' + 'having the same affine') + print('spacings_for_nnunet:') + print(spacings_for_nnunet) + print('Image files:') + print(image_fnames) + raise RuntimeError() + + stacked_images = np.vstack(images) + dict = { + 'nibabel_stuff': { + 'original_affine': original_affines[0], + }, + 'spacing': spacings_for_nnunet[0] + } + return stacked_images.astype(np.float32), dict + + def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]: + return self.read_images((seg_fname, )) + + def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None: + # revert transpose + seg = seg.transpose((2, 1, 0)).astype(np.uint8) + seg_nib = nibabel.Nifti1Image(seg, affine=properties['nibabel_stuff']['original_affine']) + nibabel.save(seg_nib, output_fname) + + +class NibabelIOWithReorient(BaseReaderWriter): + """ + Reorients images to RAS + + Nibabel loads the images in a different order than sitk. We convert the axes to the sitk order to be + consistent. This is of course considered properly in segmentation export as well. + + IMPORTANT: Run nnUNet_plot_dataset_pngs to verify that this did not destroy the alignment of data and seg! + """ + supported_file_endings = [ + '.nii.gz', + '.nrrd', + '.mha' + ] + + def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[np.ndarray, dict]: + images = [] + original_affines = [] + reoriented_affines = [] + + spacings_for_nnunet = [] + for f in image_fnames: + nib_image = nibabel.load(f) + assert nib_image.ndim == 3, 'only 3d images are supported by NibabelIO' + original_affine = nib_image.affine + reoriented_image = nib_image.as_reoriented(io_orientation(original_affine)) + reoriented_affine = reoriented_image.affine + + original_affines.append(original_affine) + reoriented_affines.append(reoriented_affine) + + # spacing is taken in reverse order to be consistent with SimpleITK axis ordering (confusing, I know...) + spacings_for_nnunet.append( + [float(i) for i in reoriented_image.header.get_zooms()[::-1]] + ) + + # transpose image to be consistent with the way SimpleITk reads images. Yeah. Annoying. + images.append(reoriented_image.get_fdata().transpose((2, 1, 0))[None]) + + if not self._check_all_same([i.shape for i in images]): + print('ERROR! Not all input images have the same shape!') + print('Shapes:') + print([i.shape for i in images]) + print('Image files:') + print(image_fnames) + raise RuntimeError() + if not self._check_all_same_array(reoriented_affines): + print('WARNING! Not all input images have the same reoriented_affines!') + print('Affines:') + print(reoriented_affines) + print('Image files:') + print(image_fnames) + print('It is up to you to decide whether that\'s a problem. You should run nnUNet_plot_dataset_pngs to verify ' + 'that segmentations and data overlap.') + if not self._check_all_same(spacings_for_nnunet): + print('ERROR! Not all input images have the same spacing_for_nnunet! This might be caused by them not ' + 'having the same affine') + print('spacings_for_nnunet:') + print(spacings_for_nnunet) + print('Image files:') + print(image_fnames) + raise RuntimeError() + + stacked_images = np.vstack(images) + dict = { + 'nibabel_stuff': { + 'original_affine': original_affines[0], + 'reoriented_affine': reoriented_affines[0], + }, + 'spacing': spacings_for_nnunet[0] + } + return stacked_images.astype(np.float32), dict + + def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]: + return self.read_images((seg_fname, )) + + def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None: + # revert transpose + seg = seg.transpose((2, 1, 0)).astype(np.uint8) + + seg_nib = nibabel.Nifti1Image(seg, affine=properties['nibabel_stuff']['reoriented_affine']) + seg_nib_reoriented = seg_nib.as_reoriented(io_orientation(properties['nibabel_stuff']['original_affine'])) + assert np.allclose(properties['nibabel_stuff']['original_affine'], seg_nib_reoriented.affine), \ + 'restored affine does not match original affine' + nibabel.save(seg_nib_reoriented, output_fname) + + +if __name__ == '__main__': + img_file = 'patient028_frame01_0000.nii.gz' + seg_file = 'patient028_frame01.nii.gz' + + nibio = NibabelIO() + images, dct = nibio.read_images([img_file]) + seg, dctseg = nibio.read_seg(seg_file) + + nibio_r = NibabelIOWithReorient() + images_r, dct_r = nibio_r.read_images([img_file]) + seg_r, dctseg_r = nibio_r.read_seg(seg_file) + + nibio.write_seg(seg[0], '/home/isensee/seg_nibio.nii.gz', dctseg) + nibio_r.write_seg(seg_r[0], '/home/isensee/seg_nibio_r.nii.gz', dctseg_r) + + s_orig = nibabel.load(seg_file).get_fdata() + s_nibio = nibabel.load('/home/isensee/seg_nibio.nii.gz').get_fdata() + s_nibio_r = nibabel.load('/home/isensee/seg_nibio_r.nii.gz').get_fdata() diff --git a/docker/template/src/nnunetv2/imageio/reader_writer_registry.py b/docker/template/src/nnunetv2/imageio/reader_writer_registry.py new file mode 100644 index 0000000..606334c --- /dev/null +++ b/docker/template/src/nnunetv2/imageio/reader_writer_registry.py @@ -0,0 +1,79 @@ +import traceback +from typing import Type + +from batchgenerators.utilities.file_and_folder_operations import join + +import nnunetv2 +from nnunetv2.imageio.natural_image_reader_writer import NaturalImage2DIO +from nnunetv2.imageio.nibabel_reader_writer import NibabelIO, NibabelIOWithReorient +from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO +from nnunetv2.imageio.tif_reader_writer import Tiff3DIO +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class + +LIST_OF_IO_CLASSES = [ + NaturalImage2DIO, + SimpleITKIO, + Tiff3DIO, + NibabelIO, + NibabelIOWithReorient +] + + +def determine_reader_writer_from_dataset_json(dataset_json_content: dict, example_file: str = None, + allow_nonmatching_filename: bool = False, verbose: bool = True + ) -> Type[BaseReaderWriter]: + if 'overwrite_image_reader_writer' in dataset_json_content.keys() and \ + dataset_json_content['overwrite_image_reader_writer'] != 'None': + ioclass_name = dataset_json_content['overwrite_image_reader_writer'] + # trying to find that class in the nnunetv2.imageio module + try: + ret = recursive_find_reader_writer_by_name(ioclass_name) + if verbose: print(f'Using {ret} reader/writer') + return ret + except RuntimeError: + if verbose: print(f'Warning: Unable to find ioclass specified in dataset.json: {ioclass_name}') + if verbose: print('Trying to automatically determine desired class') + return determine_reader_writer_from_file_ending(dataset_json_content['file_ending'], example_file, + allow_nonmatching_filename, verbose) + + +def determine_reader_writer_from_file_ending(file_ending: str, example_file: str = None, allow_nonmatching_filename: bool = False, + verbose: bool = True): + for rw in LIST_OF_IO_CLASSES: + if file_ending.lower() in rw.supported_file_endings: + if example_file is not None: + # if an example file is provided, try if we can actually read it. If not move on to the next reader + try: + tmp = rw() + _ = tmp.read_images((example_file,)) + if verbose: print(f'Using {rw} as reader/writer') + return rw + except: + if verbose: print(f'Failed to open file {example_file} with reader {rw}:') + traceback.print_exc() + pass + else: + if verbose: print(f'Using {rw} as reader/writer') + return rw + else: + if allow_nonmatching_filename and example_file is not None: + try: + tmp = rw() + _ = tmp.read_images((example_file,)) + if verbose: print(f'Using {rw} as reader/writer') + return rw + except: + if verbose: print(f'Failed to open file {example_file} with reader {rw}:') + if verbose: traceback.print_exc() + pass + raise RuntimeError(f"Unable to determine a reader for file ending {file_ending} and file {example_file} (file None means no file provided).") + + +def recursive_find_reader_writer_by_name(rw_class_name: str) -> Type[BaseReaderWriter]: + ret = recursive_find_python_class(join(nnunetv2.__path__[0], "imageio"), rw_class_name, 'nnunetv2.imageio') + if ret is None: + raise RuntimeError("Unable to find reader writer class '%s'. Please make sure this class is located in the " + "nnunetv2.imageio module." % rw_class_name) + else: + return ret diff --git a/docker/template/src/nnunetv2/imageio/readme.md b/docker/template/src/nnunetv2/imageio/readme.md new file mode 100644 index 0000000..7819425 --- /dev/null +++ b/docker/template/src/nnunetv2/imageio/readme.md @@ -0,0 +1,7 @@ +- Derive your adapter from `BaseReaderWriter`. +- Reimplement all abstractmethods. +- make sure to support 2d and 3d input images (or raise some error). +- place it in this folder or nnU-Net won't find it! +- add it to LIST_OF_IO_CLASSES in `reader_writer_registry.py` + +Bam, you're done! \ No newline at end of file diff --git a/docker/template/src/nnunetv2/imageio/simpleitk_reader_writer.py b/docker/template/src/nnunetv2/imageio/simpleitk_reader_writer.py new file mode 100644 index 0000000..6a9afc2 --- /dev/null +++ b/docker/template/src/nnunetv2/imageio/simpleitk_reader_writer.py @@ -0,0 +1,129 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Tuple, Union, List +import numpy as np +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +import SimpleITK as sitk + + +class SimpleITKIO(BaseReaderWriter): + supported_file_endings = [ + '.nii.gz', + '.nrrd', + '.mha' + ] + + def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[np.ndarray, dict]: + images = [] + spacings = [] + origins = [] + directions = [] + + spacings_for_nnunet = [] + for f in image_fnames: + itk_image = sitk.ReadImage(f) + spacings.append(itk_image.GetSpacing()) + origins.append(itk_image.GetOrigin()) + directions.append(itk_image.GetDirection()) + npy_image = sitk.GetArrayFromImage(itk_image) + if npy_image.ndim == 2: + # 2d + npy_image = npy_image[None, None] + max_spacing = max(spacings[-1]) + spacings_for_nnunet.append((max_spacing * 999, *list(spacings[-1])[::-1])) + elif npy_image.ndim == 3: + # 3d, as in original nnunet + npy_image = npy_image[None] + spacings_for_nnunet.append(list(spacings[-1])[::-1]) + elif npy_image.ndim == 4: + # 4d, multiple modalities in one file + spacings_for_nnunet.append(list(spacings[-1])[::-1][1:]) + pass + else: + raise RuntimeError(f"Unexpected number of dimensions: {npy_image.ndim} in file {f}") + + images.append(npy_image) + spacings_for_nnunet[-1] = list(np.abs(spacings_for_nnunet[-1])) + + if not self._check_all_same([i.shape for i in images]): + print('ERROR! Not all input images have the same shape!') + print('Shapes:') + print([i.shape for i in images]) + print('Image files:') + print(image_fnames) + raise RuntimeError() + if not self._check_all_same(spacings): + print('ERROR! Not all input images have the same spacing!') + print('Spacings:') + print(spacings) + print('Image files:') + print(image_fnames) + raise RuntimeError() + if not self._check_all_same(origins): + print('WARNING! Not all input images have the same origin!') + print('Origins:') + print(origins) + print('Image files:') + print(image_fnames) + print('It is up to you to decide whether that\'s a problem. You should run nnUNet_plot_dataset_pngs to verify ' + 'that segmentations and data overlap.') + if not self._check_all_same(directions): + print('WARNING! Not all input images have the same direction!') + print('Directions:') + print(directions) + print('Image files:') + print(image_fnames) + print('It is up to you to decide whether that\'s a problem. You should run nnUNet_plot_dataset_pngs to verify ' + 'that segmentations and data overlap.') + if not self._check_all_same(spacings_for_nnunet): + print('ERROR! Not all input images have the same spacing_for_nnunet! (This should not happen and must be a ' + 'bug. Please report!') + print('spacings_for_nnunet:') + print(spacings_for_nnunet) + print('Image files:') + print(image_fnames) + raise RuntimeError() + + stacked_images = np.vstack(images) + dict = { + 'sitk_stuff': { + # this saves the sitk geometry information. This part is NOT used by nnU-Net! + 'spacing': spacings[0], + 'origin': origins[0], + 'direction': directions[0] + }, + # the spacing is inverted with [::-1] because sitk returns the spacing in the wrong order lol. Image arrays + # are returned x,y,z but spacing is returned z,y,x. Duh. + 'spacing': spacings_for_nnunet[0] + } + return stacked_images.astype(np.float32), dict + + def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]: + return self.read_images((seg_fname, )) + + def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None: + assert seg.ndim == 3, 'segmentation must be 3d. If you are exporting a 2d segmentation, please provide it as shape 1,x,y' + output_dimension = len(properties['sitk_stuff']['spacing']) + assert 1 < output_dimension < 4 + if output_dimension == 2: + seg = seg[0] + + itk_image = sitk.GetImageFromArray(seg.astype(np.uint8)) + itk_image.SetSpacing(properties['sitk_stuff']['spacing']) + itk_image.SetOrigin(properties['sitk_stuff']['origin']) + itk_image.SetDirection(properties['sitk_stuff']['direction']) + + sitk.WriteImage(itk_image, output_fname, True) diff --git a/docker/template/src/nnunetv2/imageio/tif_reader_writer.py b/docker/template/src/nnunetv2/imageio/tif_reader_writer.py new file mode 100644 index 0000000..19ad882 --- /dev/null +++ b/docker/template/src/nnunetv2/imageio/tif_reader_writer.py @@ -0,0 +1,100 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os.path +from typing import Tuple, Union, List +import numpy as np +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +import tifffile +from batchgenerators.utilities.file_and_folder_operations import isfile, load_json, save_json, split_path, join + + +class Tiff3DIO(BaseReaderWriter): + """ + reads and writes 3D tif(f) images. Uses tifffile package. Ignores metadata (for now)! + + If you have 2D tiffs, use NaturalImage2DIO + + Supports the use of auxiliary files for spacing information. If used, the auxiliary files are expected to end + with .json and omit the channel identifier. So, for example, the corresponding of image image1_0000.tif is + expected to be image1.json)! + """ + supported_file_endings = [ + '.tif', + '.tiff', + ] + + def read_images(self, image_fnames: Union[List[str], Tuple[str, ...]]) -> Tuple[np.ndarray, dict]: + # figure out file ending used here + ending = '.' + image_fnames[0].split('.')[-1] + assert ending.lower() in self.supported_file_endings, f'Ending {ending} not supported by {self.__class__.__name__}' + ending_length = len(ending) + truncate_length = ending_length + 5 # 5 comes from len(_0000) + + images = [] + for f in image_fnames: + image = tifffile.imread(f) + if image.ndim != 3: + raise RuntimeError(f"Only 3D images are supported! File: {f}") + images.append(image[None]) + + # see if aux file can be found + expected_aux_file = image_fnames[0][:-truncate_length] + '.json' + if isfile(expected_aux_file): + spacing = load_json(expected_aux_file)['spacing'] + assert len(spacing) == 3, f'spacing must have 3 entries, one for each dimension of the image. File: {expected_aux_file}' + else: + print(f'WARNING no spacing file found for images {image_fnames}\nAssuming spacing (1, 1, 1).') + spacing = (1, 1, 1) + + if not self._check_all_same([i.shape for i in images]): + print('ERROR! Not all input images have the same shape!') + print('Shapes:') + print([i.shape for i in images]) + print('Image files:') + print(image_fnames) + raise RuntimeError() + + return np.vstack(images).astype(np.float32), {'spacing': spacing} + + def write_seg(self, seg: np.ndarray, output_fname: str, properties: dict) -> None: + # not ideal but I really have no clue how to set spacing/resolution information properly in tif files haha + tifffile.imwrite(output_fname, data=seg.astype(np.uint8), compression='zlib') + file = os.path.basename(output_fname) + out_dir = os.path.dirname(output_fname) + ending = file.split('.')[-1] + save_json({'spacing': properties['spacing']}, join(out_dir, file[:-(len(ending) + 1)] + '.json')) + + def read_seg(self, seg_fname: str) -> Tuple[np.ndarray, dict]: + # figure out file ending used here + ending = '.' + seg_fname.split('.')[-1] + assert ending.lower() in self.supported_file_endings, f'Ending {ending} not supported by {self.__class__.__name__}' + ending_length = len(ending) + + seg = tifffile.imread(seg_fname) + if seg.ndim != 3: + raise RuntimeError(f"Only 3D images are supported! File: {seg_fname}") + seg = seg[None] + + # see if aux file can be found + expected_aux_file = seg_fname[:-ending_length] + '.json' + if isfile(expected_aux_file): + spacing = load_json(expected_aux_file)['spacing'] + assert len(spacing) == 3, f'spacing must have 3 entries, one for each dimension of the image. File: {expected_aux_file}' + assert all([i > 0 for i in spacing]), f"Spacing must be > 0, spacing: {spacing}" + else: + print(f'WARNING no spacing file found for segmentation {seg_fname}\nAssuming spacing (1, 1, 1).') + spacing = (1, 1, 1) + + return seg.astype(np.float32), {'spacing': spacing} \ No newline at end of file diff --git a/docker/template/src/nnunetv2/inference/__init__.py b/docker/template/src/nnunetv2/inference/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/inference/data_iterators.py b/docker/template/src/nnunetv2/inference/data_iterators.py new file mode 100644 index 0000000..a35e330 --- /dev/null +++ b/docker/template/src/nnunetv2/inference/data_iterators.py @@ -0,0 +1,316 @@ +import multiprocessing +import queue +from torch.multiprocessing import Event, Queue, Manager + +from time import sleep +from typing import Union, List + +import numpy as np +import torch +from batchgenerators.dataloading.data_loader import DataLoader + +from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor +from nnunetv2.utilities.label_handling.label_handling import convert_labelmap_to_one_hot +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager + + +def preprocess_fromfiles_save_to_queue(list_of_lists: List[List[str]], + list_of_segs_from_prev_stage_files: Union[None, List[str]], + output_filenames_truncated: Union[None, List[str]], + plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + target_queue: Queue, + done_event: Event, + abort_event: Event, + verbose: bool = False): + try: + label_manager = plans_manager.get_label_manager(dataset_json) + preprocessor = configuration_manager.preprocessor_class(verbose=verbose) + for idx in range(len(list_of_lists)): + data, seg, data_properties = preprocessor.run_case(list_of_lists[idx], + list_of_segs_from_prev_stage_files[ + idx] if list_of_segs_from_prev_stage_files is not None else None, + plans_manager, + configuration_manager, + dataset_json) + if list_of_segs_from_prev_stage_files is not None and list_of_segs_from_prev_stage_files[idx] is not None: + seg_onehot = convert_labelmap_to_one_hot(seg[0], label_manager.foreground_labels, data.dtype) + data = np.vstack((data, seg_onehot)) + + data = torch.from_numpy(data).contiguous().float() + + item = {'data': data, 'data_properties': data_properties, + 'ofile': output_filenames_truncated[idx] if output_filenames_truncated is not None else None} + success = False + while not success: + try: + if abort_event.is_set(): + return + target_queue.put(item, timeout=0.01) + success = True + except queue.Full: + pass + done_event.set() + except Exception as e: + abort_event.set() + raise e + + +def preprocessing_iterator_fromfiles(list_of_lists: List[List[str]], + list_of_segs_from_prev_stage_files: Union[None, List[str]], + output_filenames_truncated: Union[None, List[str]], + plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_processes: int, + pin_memory: bool = False, + verbose: bool = False): + context = multiprocessing.get_context('spawn') + manager = Manager() + num_processes = min(len(list_of_lists), num_processes) + assert num_processes >= 1 + processes = [] + done_events = [] + target_queues = [] + abort_event = manager.Event() + for i in range(num_processes): + event = manager.Event() + queue = Manager().Queue(maxsize=1) + pr = context.Process(target=preprocess_fromfiles_save_to_queue, + args=( + list_of_lists[i::num_processes], + list_of_segs_from_prev_stage_files[ + i::num_processes] if list_of_segs_from_prev_stage_files is not None else None, + output_filenames_truncated[ + i::num_processes] if output_filenames_truncated is not None else None, + plans_manager, + dataset_json, + configuration_manager, + queue, + event, + abort_event, + verbose + ), daemon=True) + pr.start() + target_queues.append(queue) + done_events.append(event) + processes.append(pr) + + worker_ctr = 0 + while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()): + if not target_queues[worker_ctr].empty(): + item = target_queues[worker_ctr].get() + worker_ctr = (worker_ctr + 1) % num_processes + else: + all_ok = all( + [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set() + if not all_ok: + raise RuntimeError('Background workers died. Look for the error message further up! If there is ' + 'none then your RAM was full and the worker was killed by the OS. Use fewer ' + 'workers or get more RAM in that case!') + sleep(0.01) + continue + if pin_memory: + [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)] + yield item + [p.join() for p in processes] + +class PreprocessAdapter(DataLoader): + def __init__(self, list_of_lists: List[List[str]], + list_of_segs_from_prev_stage_files: Union[None, List[str]], + preprocessor: DefaultPreprocessor, + output_filenames_truncated: Union[None, List[str]], + plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_threads_in_multithreaded: int = 1): + self.preprocessor, self.plans_manager, self.configuration_manager, self.dataset_json = \ + preprocessor, plans_manager, configuration_manager, dataset_json + + self.label_manager = plans_manager.get_label_manager(dataset_json) + + if list_of_segs_from_prev_stage_files is None: + list_of_segs_from_prev_stage_files = [None] * len(list_of_lists) + if output_filenames_truncated is None: + output_filenames_truncated = [None] * len(list_of_lists) + + super().__init__(list(zip(list_of_lists, list_of_segs_from_prev_stage_files, output_filenames_truncated)), + 1, num_threads_in_multithreaded, + seed_for_shuffle=1, return_incomplete=True, + shuffle=False, infinite=False, sampling_probabilities=None) + + self.indices = list(range(len(list_of_lists))) + + def generate_train_batch(self): + idx = self.get_indices()[0] + files = self._data[idx][0] + seg_prev_stage = self._data[idx][1] + ofile = self._data[idx][2] + # if we have a segmentation from the previous stage we have to process it together with the images so that we + # can crop it appropriately (if needed). Otherwise it would just be resized to the shape of the data after + # preprocessing and then there might be misalignments + data, seg, data_properties = self.preprocessor.run_case(files, seg_prev_stage, self.plans_manager, + self.configuration_manager, + self.dataset_json) + if seg_prev_stage is not None: + seg_onehot = convert_labelmap_to_one_hot(seg[0], self.label_manager.foreground_labels, data.dtype) + data = np.vstack((data, seg_onehot)) + + data = torch.from_numpy(data) + + return {'data': data, 'data_properties': data_properties, 'ofile': ofile} + + +class PreprocessAdapterFromNpy(DataLoader): + def __init__(self, list_of_images: List[np.ndarray], + list_of_segs_from_prev_stage: Union[List[np.ndarray], None], + list_of_image_properties: List[dict], + truncated_ofnames: Union[List[str], None], + plans_manager: PlansManager, dataset_json: dict, configuration_manager: ConfigurationManager, + num_threads_in_multithreaded: int = 1, verbose: bool = False): + preprocessor = configuration_manager.preprocessor_class(verbose=verbose) + self.preprocessor, self.plans_manager, self.configuration_manager, self.dataset_json, self.truncated_ofnames = \ + preprocessor, plans_manager, configuration_manager, dataset_json, truncated_ofnames + + self.label_manager = plans_manager.get_label_manager(dataset_json) + + if list_of_segs_from_prev_stage is None: + list_of_segs_from_prev_stage = [None] * len(list_of_images) + if truncated_ofnames is None: + truncated_ofnames = [None] * len(list_of_images) + + super().__init__( + list(zip(list_of_images, list_of_segs_from_prev_stage, list_of_image_properties, truncated_ofnames)), + 1, num_threads_in_multithreaded, + seed_for_shuffle=1, return_incomplete=True, + shuffle=False, infinite=False, sampling_probabilities=None) + + self.indices = list(range(len(list_of_images))) + + def generate_train_batch(self): + idx = self.get_indices()[0] + image = self._data[idx][0] + seg_prev_stage = self._data[idx][1] + props = self._data[idx][2] + ofname = self._data[idx][3] + # if we have a segmentation from the previous stage we have to process it together with the images so that we + # can crop it appropriately (if needed). Otherwise it would just be resized to the shape of the data after + # preprocessing and then there might be misalignments + data, seg = self.preprocessor.run_case_npy(image, seg_prev_stage, props, + self.plans_manager, + self.configuration_manager, + self.dataset_json) + if seg_prev_stage is not None: + seg_onehot = convert_labelmap_to_one_hot(seg[0], self.label_manager.foreground_labels, data.dtype) + data = np.vstack((data, seg_onehot)) + + data = torch.from_numpy(data) + + return {'data': data, 'data_properties': props, 'ofile': ofname} + + +def preprocess_fromnpy_save_to_queue(list_of_images: List[np.ndarray], + list_of_segs_from_prev_stage: Union[List[np.ndarray], None], + list_of_image_properties: List[dict], + truncated_ofnames: Union[List[str], None], + plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + target_queue: Queue, + done_event: Event, + abort_event: Event, + verbose: bool = False): + try: + label_manager = plans_manager.get_label_manager(dataset_json) + preprocessor = configuration_manager.preprocessor_class(verbose=verbose) + for idx in range(len(list_of_images)): + data, seg = preprocessor.run_case_npy(list_of_images[idx], + list_of_segs_from_prev_stage[ + idx] if list_of_segs_from_prev_stage is not None else None, + list_of_image_properties[idx], + plans_manager, + configuration_manager, + dataset_json) + if list_of_segs_from_prev_stage is not None and list_of_segs_from_prev_stage[idx] is not None: + seg_onehot = convert_labelmap_to_one_hot(seg[0], label_manager.foreground_labels, data.dtype) + data = np.vstack((data, seg_onehot)) + + data = torch.from_numpy(data).contiguous().float() + + item = {'data': data, 'data_properties': list_of_image_properties[idx], + 'ofile': truncated_ofnames[idx] if truncated_ofnames is not None else None} + success = False + while not success: + try: + if abort_event.is_set(): + return + target_queue.put(item, timeout=0.01) + success = True + except queue.Full: + pass + done_event.set() + except Exception as e: + abort_event.set() + raise e + + +def preprocessing_iterator_fromnpy(list_of_images: List[np.ndarray], + list_of_segs_from_prev_stage: Union[List[np.ndarray], None], + list_of_image_properties: List[dict], + truncated_ofnames: Union[List[str], None], + plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_processes: int, + pin_memory: bool = False, + verbose: bool = False): + context = multiprocessing.get_context('spawn') + manager = Manager() + num_processes = min(len(list_of_images), num_processes) + assert num_processes >= 1 + target_queues = [] + processes = [] + done_events = [] + abort_event = manager.Event() + for i in range(num_processes): + event = manager.Event() + queue = manager.Queue(maxsize=1) + pr = context.Process(target=preprocess_fromnpy_save_to_queue, + args=( + list_of_images[i::num_processes], + list_of_segs_from_prev_stage[ + i::num_processes] if list_of_segs_from_prev_stage is not None else None, + list_of_image_properties[i::num_processes], + truncated_ofnames[i::num_processes] if truncated_ofnames is not None else None, + plans_manager, + dataset_json, + configuration_manager, + queue, + event, + abort_event, + verbose + ), daemon=True) + pr.start() + done_events.append(event) + processes.append(pr) + target_queues.append(queue) + + worker_ctr = 0 + while (not done_events[worker_ctr].is_set()) or (not target_queues[worker_ctr].empty()): + if not target_queues[worker_ctr].empty(): + item = target_queues[worker_ctr].get() + worker_ctr = (worker_ctr + 1) % num_processes + else: + all_ok = all( + [i.is_alive() or j.is_set() for i, j in zip(processes, done_events)]) and not abort_event.is_set() + if not all_ok: + raise RuntimeError('Background workers died. Look for the error message further up! If there is ' + 'none then your RAM was full and the worker was killed by the OS. Use fewer ' + 'workers or get more RAM in that case!') + sleep(0.01) + continue + if pin_memory: + [i.pin_memory() for i in item.values() if isinstance(i, torch.Tensor)] + yield item + [p.join() for p in processes] diff --git a/docker/template/src/nnunetv2/inference/examples.py b/docker/template/src/nnunetv2/inference/examples.py new file mode 100644 index 0000000..a66d98f --- /dev/null +++ b/docker/template/src/nnunetv2/inference/examples.py @@ -0,0 +1,102 @@ +if __name__ == '__main__': + from nnunetv2.paths import nnUNet_results, nnUNet_raw + import torch + from batchgenerators.utilities.file_and_folder_operations import join + from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor + from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO + + # nnUNetv2_predict -d 3 -f 0 -c 3d_lowres -i imagesTs -o imagesTs_predlowres --continue_prediction + + # instantiate the nnUNetPredictor + predictor = nnUNetPredictor( + tile_step_size=0.5, + use_gaussian=True, + use_mirroring=True, + perform_everything_on_device=True, + device=torch.device('cuda', 0), + verbose=False, + verbose_preprocessing=False, + allow_tqdm=True + ) + # initializes the network architecture, loads the checkpoint + predictor.initialize_from_trained_model_folder( + join(nnUNet_results, 'Dataset003_Liver/nnUNetTrainer__nnUNetPlans__3d_lowres'), + use_folds=(0,), + checkpoint_name='checkpoint_final.pth', + ) + # variant 1: give input and output folders + predictor.predict_from_files(join(nnUNet_raw, 'Dataset003_Liver/imagesTs'), + join(nnUNet_raw, 'Dataset003_Liver/imagesTs_predlowres'), + save_probabilities=False, overwrite=False, + num_processes_preprocessing=2, num_processes_segmentation_export=2, + folder_with_segs_from_prev_stage=None, num_parts=1, part_id=0) + + # variant 2, use list of files as inputs. Note how we use nested lists!!! + indir = join(nnUNet_raw, 'Dataset003_Liver/imagesTs') + outdir = join(nnUNet_raw, 'Dataset003_Liver/imagesTs_predlowres') + predictor.predict_from_files([[join(indir, 'liver_152_0000.nii.gz')], + [join(indir, 'liver_142_0000.nii.gz')]], + [join(outdir, 'liver_152.nii.gz'), + join(outdir, 'liver_142.nii.gz')], + save_probabilities=False, overwrite=True, + num_processes_preprocessing=2, num_processes_segmentation_export=2, + folder_with_segs_from_prev_stage=None, num_parts=1, part_id=0) + + # variant 2.5, returns segmentations + indir = join(nnUNet_raw, 'Dataset003_Liver/imagesTs') + predicted_segmentations = predictor.predict_from_files([[join(indir, 'liver_152_0000.nii.gz')], + [join(indir, 'liver_142_0000.nii.gz')]], + None, + save_probabilities=True, overwrite=True, + num_processes_preprocessing=2, + num_processes_segmentation_export=2, + folder_with_segs_from_prev_stage=None, num_parts=1, + part_id=0) + + # predict several npy images + from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO + + img, props = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_147_0000.nii.gz')]) + img2, props2 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_146_0000.nii.gz')]) + img3, props3 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_145_0000.nii.gz')]) + img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')]) + # we do not set output files so that the segmentations will be returned. You can of course also specify output + # files instead (no return value on that case) + ret = predictor.predict_from_list_of_npy_arrays([img, img2, img3, img4], + None, + [props, props2, props3, props4], + None, 2, save_probabilities=False, + num_processes_segmentation_export=2) + + # predict a single numpy array + img, props = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_147_0000.nii.gz')]) + ret = predictor.predict_single_npy_array(img, props, None, None, True) + + # custom iterator + + img, props = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_147_0000.nii.gz')]) + img2, props2 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_146_0000.nii.gz')]) + img3, props3 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_145_0000.nii.gz')]) + img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')]) + + + # each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! + # If 'ofile' is None, the result will be returned instead of written to a file + # the iterator is responsible for performing the correct preprocessing! + # note how the iterator here does not use multiprocessing -> preprocessing will be done in the main thread! + # take a look at the default iterators for predict_from_files and predict_from_list_of_npy_arrays + # (they both use predictor.predict_from_data_iterator) for inspiration! + def my_iterator(list_of_input_arrs, list_of_input_props): + preprocessor = predictor.configuration_manager.preprocessor_class(verbose=predictor.verbose) + for a, p in zip(list_of_input_arrs, list_of_input_props): + data, seg = preprocessor.run_case_npy(a, + None, + p, + predictor.plans_manager, + predictor.configuration_manager, + predictor.dataset_json) + yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properties': p, 'ofile': None} + + + ret = predictor.predict_from_data_iterator(my_iterator([img, img2, img3, img4], [props, props2, props3, props4]), + save_probabilities=False, num_processes_segmentation_export=3) diff --git a/docker/template/src/nnunetv2/inference/export_prediction.py b/docker/template/src/nnunetv2/inference/export_prediction.py new file mode 100644 index 0000000..3303567 --- /dev/null +++ b/docker/template/src/nnunetv2/inference/export_prediction.py @@ -0,0 +1,145 @@ +import os +from copy import deepcopy +from typing import Union, List + +import numpy as np +import torch +from acvl_utils.cropping_and_padding.bounding_boxes import bounding_box_to_slice +from batchgenerators.utilities.file_and_folder_operations import load_json, isfile, save_pickle + +from nnunetv2.configuration import default_num_processes +from nnunetv2.utilities.label_handling.label_handling import LabelManager +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager + + +def convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits: Union[torch.Tensor, np.ndarray], + plans_manager: PlansManager, + configuration_manager: ConfigurationManager, + label_manager: LabelManager, + properties_dict: dict, + return_probabilities: bool = False, + num_threads_torch: int = default_num_processes): + old_threads = torch.get_num_threads() + torch.set_num_threads(num_threads_torch) + + # resample to original shape + current_spacing = configuration_manager.spacing if \ + len(configuration_manager.spacing) == \ + len(properties_dict['shape_after_cropping_and_before_resampling']) else \ + [properties_dict['spacing'][0], *configuration_manager.spacing] + predicted_logits = configuration_manager.resampling_fn_probabilities(predicted_logits, + properties_dict['shape_after_cropping_and_before_resampling'], + current_spacing, + properties_dict['spacing']) + # return value of resampling_fn_probabilities can be ndarray or Tensor but that does not matter because + # apply_inference_nonlin will convert to torch + predicted_probabilities = label_manager.apply_inference_nonlin(predicted_logits) + del predicted_logits + segmentation = label_manager.convert_probabilities_to_segmentation(predicted_probabilities) + + # segmentation may be torch.Tensor but we continue with numpy + if isinstance(segmentation, torch.Tensor): + segmentation = segmentation.cpu().numpy() + + # put segmentation in bbox (revert cropping) + segmentation_reverted_cropping = np.zeros(properties_dict['shape_before_cropping'], + dtype=np.uint8 if len(label_manager.foreground_labels) < 255 else np.uint16) + slicer = bounding_box_to_slice(properties_dict['bbox_used_for_cropping']) + segmentation_reverted_cropping[slicer] = segmentation + del segmentation + + # revert transpose + segmentation_reverted_cropping = segmentation_reverted_cropping.transpose(plans_manager.transpose_backward) + if return_probabilities: + # revert cropping + predicted_probabilities = label_manager.revert_cropping_on_probabilities(predicted_probabilities, + properties_dict[ + 'bbox_used_for_cropping'], + properties_dict[ + 'shape_before_cropping']) + predicted_probabilities = predicted_probabilities.cpu().numpy() + # revert transpose + predicted_probabilities = predicted_probabilities.transpose([0] + [i + 1 for i in + plans_manager.transpose_backward]) + torch.set_num_threads(old_threads) + return segmentation_reverted_cropping, predicted_probabilities + else: + torch.set_num_threads(old_threads) + return segmentation_reverted_cropping + + +def export_prediction_from_logits(predicted_array_or_file: Union[np.ndarray, torch.Tensor], properties_dict: dict, + configuration_manager: ConfigurationManager, + plans_manager: PlansManager, + dataset_json_dict_or_file: Union[dict, str], output_file_truncated: str, + save_probabilities: bool = False): + # if isinstance(predicted_array_or_file, str): + # tmp = deepcopy(predicted_array_or_file) + # if predicted_array_or_file.endswith('.npy'): + # predicted_array_or_file = np.load(predicted_array_or_file) + # elif predicted_array_or_file.endswith('.npz'): + # predicted_array_or_file = np.load(predicted_array_or_file)['softmax'] + # os.remove(tmp) + + if isinstance(dataset_json_dict_or_file, str): + dataset_json_dict_or_file = load_json(dataset_json_dict_or_file) + + label_manager = plans_manager.get_label_manager(dataset_json_dict_or_file) + ret = convert_predicted_logits_to_segmentation_with_correct_shape( + predicted_array_or_file, plans_manager, configuration_manager, label_manager, properties_dict, + return_probabilities=save_probabilities + ) + del predicted_array_or_file + + # save + if save_probabilities: + segmentation_final, probabilities_final = ret + np.savez_compressed(output_file_truncated + '.npz', probabilities=probabilities_final) + save_pickle(properties_dict, output_file_truncated + '.pkl') + del probabilities_final, ret + else: + segmentation_final = ret + del ret + + rw = plans_manager.image_reader_writer_class() + rw.write_seg(segmentation_final, output_file_truncated + dataset_json_dict_or_file['file_ending'], + properties_dict) + + +def resample_and_save(predicted: Union[torch.Tensor, np.ndarray], target_shape: List[int], output_file: str, + plans_manager: PlansManager, configuration_manager: ConfigurationManager, properties_dict: dict, + dataset_json_dict_or_file: Union[dict, str], num_threads_torch: int = default_num_processes) \ + -> None: + # # needed for cascade + # if isinstance(predicted, str): + # assert isfile(predicted), "If isinstance(segmentation_softmax, str) then " \ + # "isfile(segmentation_softmax) must be True" + # del_file = deepcopy(predicted) + # predicted = np.load(predicted) + # os.remove(del_file) + old_threads = torch.get_num_threads() + torch.set_num_threads(num_threads_torch) + + if isinstance(dataset_json_dict_or_file, str): + dataset_json_dict_or_file = load_json(dataset_json_dict_or_file) + + # resample to original shape + current_spacing = configuration_manager.spacing if \ + len(configuration_manager.spacing) == len(properties_dict['shape_after_cropping_and_before_resampling']) else \ + [properties_dict['spacing'][0], *configuration_manager.spacing] + target_spacing = configuration_manager.spacing if len(configuration_manager.spacing) == \ + len(properties_dict['shape_after_cropping_and_before_resampling']) else \ + [properties_dict['spacing'][0], *configuration_manager.spacing] + predicted_array_or_file = configuration_manager.resampling_fn_probabilities(predicted, + target_shape, + current_spacing, + target_spacing) + + # create segmentation (argmax, regions, etc) + label_manager = plans_manager.get_label_manager(dataset_json_dict_or_file) + segmentation = label_manager.convert_logits_to_segmentation(predicted_array_or_file) + # segmentation may be torch.Tensor but we continue with numpy + if isinstance(segmentation, torch.Tensor): + segmentation = segmentation.cpu().numpy() + np.savez_compressed(output_file, seg=segmentation.astype(np.uint8)) + torch.set_num_threads(old_threads) diff --git a/docker/template/src/nnunetv2/inference/predict_from_raw_data.py b/docker/template/src/nnunetv2/inference/predict_from_raw_data.py new file mode 100644 index 0000000..1b6543e --- /dev/null +++ b/docker/template/src/nnunetv2/inference/predict_from_raw_data.py @@ -0,0 +1,917 @@ +import inspect +import itertools +import multiprocessing +import os +from copy import deepcopy +from time import sleep +from typing import Tuple, Union, List, Optional + +import numpy as np +import torch +from acvl_utils.cropping_and_padding.padding import pad_nd_image +from batchgenerators.dataloading.multi_threaded_augmenter import MultiThreadedAugmenter +from batchgenerators.utilities.file_and_folder_operations import load_json, join, isfile, maybe_mkdir_p, isdir, subdirs, \ + save_json +from torch import nn +from torch._dynamo import OptimizedModule +from torch.nn.parallel import DistributedDataParallel +from tqdm import tqdm + +import nnunetv2 +from nnunetv2.configuration import default_num_processes +from nnunetv2.inference.data_iterators import PreprocessAdapterFromNpy, preprocessing_iterator_fromfiles, \ + preprocessing_iterator_fromnpy +from nnunetv2.inference.export_prediction import export_prediction_from_logits, \ + convert_predicted_logits_to_segmentation_with_correct_shape +from nnunetv2.inference.sliding_window_prediction import compute_gaussian, \ + compute_steps_for_sliding_window +from nnunetv2.utilities.file_path_utilities import get_output_folder, check_workers_alive_and_busy +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.helpers import empty_cache, dummy_context +from nnunetv2.utilities.json_export import recursive_fix_for_json_export +from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager +from nnunetv2.utilities.utils import create_lists_from_splitted_dataset_folder + + +class nnUNetPredictor(object): + def __init__(self, + tile_step_size: float = 0.5, + use_gaussian: bool = True, + use_mirroring: bool = True, + perform_everything_on_device: bool = True, + device: torch.device = torch.device('cuda'), + verbose: bool = False, + verbose_preprocessing: bool = False, + allow_tqdm: bool = True): + self.verbose = verbose + self.verbose_preprocessing = verbose_preprocessing + self.allow_tqdm = allow_tqdm + + self.plans_manager, self.configuration_manager, self.list_of_parameters, self.network, self.dataset_json, \ + self.trainer_name, self.allowed_mirroring_axes, self.label_manager = None, None, None, None, None, None, None, None + + self.tile_step_size = tile_step_size + self.use_gaussian = use_gaussian + self.use_mirroring = use_mirroring + if device.type == 'cuda': + # device = torch.device(type='cuda', index=0) # set the desired GPU with CUDA_VISIBLE_DEVICES! + # why would I ever want to do that. Stupid dobby. This kills DDP inference... + pass + if device.type != 'cuda': + print(f'perform_everything_on_device=True is only supported for cuda devices! Setting this to False') + perform_everything_on_device = False + self.device = device + self.perform_everything_on_device = perform_everything_on_device + + def initialize_from_trained_model_folder(self, model_training_output_dir: str, + use_folds: Union[Tuple[Union[int, str]], None], + checkpoint_name: str = 'checkpoint_final.pth'): + """ + This is used when making predictions with a trained model + """ + if use_folds is None: + use_folds = nnUNetPredictor.auto_detect_available_folds(model_training_output_dir, checkpoint_name) + + dataset_json = load_json(join(model_training_output_dir, 'dataset.json')) + plans = load_json(join(model_training_output_dir, 'plans.json')) + plans_manager = PlansManager(plans) + + if isinstance(use_folds, str): + use_folds = [use_folds] + + parameters = [] + for i, f in enumerate(use_folds): + f = int(f) if f != 'all' else f + checkpoint = torch.load(join(model_training_output_dir, f'fold_{f}', checkpoint_name), + map_location=torch.device('cpu')) + if i == 0: + trainer_name = checkpoint['trainer_name'] + configuration_name = checkpoint['init_args']['configuration'] + inference_allowed_mirroring_axes = checkpoint['inference_allowed_mirroring_axes'] if \ + 'inference_allowed_mirroring_axes' in checkpoint.keys() else None + + parameters.append(checkpoint['network_weights']) + + configuration_manager = plans_manager.get_configuration(configuration_name) + # restore network + num_input_channels = determine_num_input_channels(plans_manager, configuration_manager, dataset_json) + trainer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), + trainer_name, 'nnunetv2.training.nnUNetTrainer') + network = trainer_class.build_network_architecture(plans_manager, dataset_json, configuration_manager, + num_input_channels, enable_deep_supervision=False) + self.plans_manager = plans_manager + self.configuration_manager = configuration_manager + self.list_of_parameters = parameters + self.network = network + self.dataset_json = dataset_json + self.trainer_name = trainer_name + self.allowed_mirroring_axes = inference_allowed_mirroring_axes + self.label_manager = plans_manager.get_label_manager(dataset_json) + if ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) \ + and not isinstance(self.network, OptimizedModule): + print('Using torch.compile') + self.network = torch.compile(self.network) + + def manual_initialization(self, network: nn.Module, plans_manager: PlansManager, + configuration_manager: ConfigurationManager, parameters: Optional[List[dict]], + dataset_json: dict, trainer_name: str, + inference_allowed_mirroring_axes: Optional[Tuple[int, ...]]): + """ + This is used by the nnUNetTrainer to initialize nnUNetPredictor for the final validation + """ + self.plans_manager = plans_manager + self.configuration_manager = configuration_manager + self.list_of_parameters = parameters + self.network = network + self.dataset_json = dataset_json + self.trainer_name = trainer_name + self.allowed_mirroring_axes = inference_allowed_mirroring_axes + self.label_manager = plans_manager.get_label_manager(dataset_json) + allow_compile = True + allow_compile = allow_compile and ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) + allow_compile = allow_compile and not isinstance(self.network, OptimizedModule) + if isinstance(self.network, DistributedDataParallel): + allow_compile = allow_compile and isinstance(self.network.module, OptimizedModule) + if allow_compile: + print('Using torch.compile') + self.network = torch.compile(self.network) + + @staticmethod + def auto_detect_available_folds(model_training_output_dir, checkpoint_name): + print('use_folds is None, attempting to auto detect available folds') + fold_folders = subdirs(model_training_output_dir, prefix='fold_', join=False) + fold_folders = [i for i in fold_folders if i != 'fold_all'] + fold_folders = [i for i in fold_folders if isfile(join(model_training_output_dir, i, checkpoint_name))] + use_folds = [int(i.split('_')[-1]) for i in fold_folders] + print(f'found the following folds: {use_folds}') + return use_folds + + def _manage_input_and_output_lists(self, list_of_lists_or_source_folder: Union[str, List[List[str]]], + output_folder_or_list_of_truncated_output_files: Union[None, str, List[str]], + folder_with_segs_from_prev_stage: str = None, + overwrite: bool = True, + part_id: int = 0, + num_parts: int = 1, + save_probabilities: bool = False): + if isinstance(list_of_lists_or_source_folder, str): + list_of_lists_or_source_folder = create_lists_from_splitted_dataset_folder(list_of_lists_or_source_folder, + self.dataset_json['file_ending']) + print(f'There are {len(list_of_lists_or_source_folder)} cases in the source folder') + list_of_lists_or_source_folder = list_of_lists_or_source_folder[part_id::num_parts] + caseids = [os.path.basename(i[0])[:-(len(self.dataset_json['file_ending']) + 5)] for i in + list_of_lists_or_source_folder] + print( + f'I am process {part_id} out of {num_parts} (max process ID is {num_parts - 1}, we start counting with 0!)') + print(f'There are {len(caseids)} cases that I would like to predict') + + if isinstance(output_folder_or_list_of_truncated_output_files, str): + output_filename_truncated = [join(output_folder_or_list_of_truncated_output_files, i) for i in caseids] + else: + output_filename_truncated = output_folder_or_list_of_truncated_output_files + + seg_from_prev_stage_files = [join(folder_with_segs_from_prev_stage, i + self.dataset_json['file_ending']) if + folder_with_segs_from_prev_stage is not None else None for i in caseids] + # remove already predicted files form the lists + if not overwrite and output_filename_truncated is not None: + tmp = [isfile(i + self.dataset_json['file_ending']) for i in output_filename_truncated] + if save_probabilities: + tmp2 = [isfile(i + '.npz') for i in output_filename_truncated] + tmp = [i and j for i, j in zip(tmp, tmp2)] + not_existing_indices = [i for i, j in enumerate(tmp) if not j] + + output_filename_truncated = [output_filename_truncated[i] for i in not_existing_indices] + list_of_lists_or_source_folder = [list_of_lists_or_source_folder[i] for i in not_existing_indices] + seg_from_prev_stage_files = [seg_from_prev_stage_files[i] for i in not_existing_indices] + print(f'overwrite was set to {overwrite}, so I am only working on cases that haven\'t been predicted yet. ' + f'That\'s {len(not_existing_indices)} cases.') + return list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files + + def predict_from_files(self, + list_of_lists_or_source_folder: Union[str, List[List[str]]], + output_folder_or_list_of_truncated_output_files: Union[str, None, List[str]], + save_probabilities: bool = False, + overwrite: bool = True, + num_processes_preprocessing: int = default_num_processes, + num_processes_segmentation_export: int = default_num_processes, + folder_with_segs_from_prev_stage: str = None, + num_parts: int = 1, + part_id: int = 0): + """ + This is nnU-Net's default function for making predictions. It works best for batch predictions + (predicting many images at once). + """ + if isinstance(output_folder_or_list_of_truncated_output_files, str): + output_folder = output_folder_or_list_of_truncated_output_files + elif isinstance(output_folder_or_list_of_truncated_output_files, list): + output_folder = os.path.dirname(output_folder_or_list_of_truncated_output_files[0]) + else: + output_folder = None + + ######################## + # let's store the input arguments so that its clear what was used to generate the prediction + if output_folder is not None: + my_init_kwargs = {} + for k in inspect.signature(self.predict_from_files).parameters.keys(): + my_init_kwargs[k] = locals()[k] + my_init_kwargs = deepcopy( + my_init_kwargs) # let's not unintentionally change anything in-place. Take this as a + recursive_fix_for_json_export(my_init_kwargs) + maybe_mkdir_p(output_folder) + save_json(my_init_kwargs, join(output_folder, 'predict_from_raw_data_args.json')) + + # we need these two if we want to do things with the predictions like for example apply postprocessing + save_json(self.dataset_json, join(output_folder, 'dataset.json'), sort_keys=False) + save_json(self.plans_manager.plans, join(output_folder, 'plans.json'), sort_keys=False) + ####################### + + # check if we need a prediction from the previous stage + if self.configuration_manager.previous_stage_name is not None: + assert folder_with_segs_from_prev_stage is not None, \ + f'The requested configuration is a cascaded network. It requires the segmentations of the previous ' \ + f'stage ({self.configuration_manager.previous_stage_name}) as input. Please provide the folder where' \ + f' they are located via folder_with_segs_from_prev_stage' + + # sort out input and output filenames + list_of_lists_or_source_folder, output_filename_truncated, seg_from_prev_stage_files = \ + self._manage_input_and_output_lists(list_of_lists_or_source_folder, + output_folder_or_list_of_truncated_output_files, + folder_with_segs_from_prev_stage, overwrite, part_id, num_parts, + save_probabilities) + if len(list_of_lists_or_source_folder) == 0: + return + + data_iterator = self._internal_get_data_iterator_from_lists_of_filenames(list_of_lists_or_source_folder, + seg_from_prev_stage_files, + output_filename_truncated, + num_processes_preprocessing) + + return self.predict_from_data_iterator(data_iterator, save_probabilities, num_processes_segmentation_export) + + def _internal_get_data_iterator_from_lists_of_filenames(self, + input_list_of_lists: List[List[str]], + seg_from_prev_stage_files: Union[List[str], None], + output_filenames_truncated: Union[List[str], None], + num_processes: int): + return preprocessing_iterator_fromfiles(input_list_of_lists, seg_from_prev_stage_files, + output_filenames_truncated, self.plans_manager, self.dataset_json, + self.configuration_manager, num_processes, self.device.type == 'cuda', + self.verbose_preprocessing) + # preprocessor = self.configuration_manager.preprocessor_class(verbose=self.verbose_preprocessing) + # # hijack batchgenerators, yo + # # we use the multiprocessing of the batchgenerators dataloader to handle all the background worker stuff. This + # # way we don't have to reinvent the wheel here. + # num_processes = max(1, min(num_processes, len(input_list_of_lists))) + # ppa = PreprocessAdapter(input_list_of_lists, seg_from_prev_stage_files, preprocessor, + # output_filenames_truncated, self.plans_manager, self.dataset_json, + # self.configuration_manager, num_processes) + # if num_processes == 0: + # mta = SingleThreadedAugmenter(ppa, None) + # else: + # mta = MultiThreadedAugmenter(ppa, None, num_processes, 1, None, pin_memory=pin_memory) + # return mta + + def get_data_iterator_from_raw_npy_data(self, + image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], + segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, + np.ndarray, + List[ + np.ndarray]], + properties_or_list_of_properties: Union[dict, List[dict]], + truncated_ofname: Union[str, List[str], None], + num_processes: int = 3): + + list_of_images = [image_or_list_of_images] if not isinstance(image_or_list_of_images, list) else \ + image_or_list_of_images + + if isinstance(segs_from_prev_stage_or_list_of_segs_from_prev_stage, np.ndarray): + segs_from_prev_stage_or_list_of_segs_from_prev_stage = [ + segs_from_prev_stage_or_list_of_segs_from_prev_stage] + + if isinstance(truncated_ofname, str): + truncated_ofname = [truncated_ofname] + + if isinstance(properties_or_list_of_properties, dict): + properties_or_list_of_properties = [properties_or_list_of_properties] + + num_processes = min(num_processes, len(list_of_images)) + pp = preprocessing_iterator_fromnpy( + list_of_images, + segs_from_prev_stage_or_list_of_segs_from_prev_stage, + properties_or_list_of_properties, + truncated_ofname, + self.plans_manager, + self.dataset_json, + self.configuration_manager, + num_processes, + self.device.type == 'cuda', + self.verbose_preprocessing + ) + + return pp + + def predict_from_list_of_npy_arrays(self, + image_or_list_of_images: Union[np.ndarray, List[np.ndarray]], + segs_from_prev_stage_or_list_of_segs_from_prev_stage: Union[None, + np.ndarray, + List[ + np.ndarray]], + properties_or_list_of_properties: Union[dict, List[dict]], + truncated_ofname: Union[str, List[str], None], + num_processes: int = 3, + save_probabilities: bool = False, + num_processes_segmentation_export: int = default_num_processes): + iterator = self.get_data_iterator_from_raw_npy_data(image_or_list_of_images, + segs_from_prev_stage_or_list_of_segs_from_prev_stage, + properties_or_list_of_properties, + truncated_ofname, + num_processes) + return self.predict_from_data_iterator(iterator, save_probabilities, num_processes_segmentation_export) + + def predict_from_data_iterator(self, + data_iterator, + save_probabilities: bool = False, + num_processes_segmentation_export: int = default_num_processes): + """ + each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! + If 'ofile' is None, the result will be returned instead of written to a file + """ + with multiprocessing.get_context("spawn").Pool(num_processes_segmentation_export) as export_pool: + worker_list = [i for i in export_pool._pool] + r = [] + for preprocessed in data_iterator: + data = preprocessed['data'] + if isinstance(data, str): + delfile = data + data = torch.from_numpy(np.load(data)) + os.remove(delfile) + + ofile = preprocessed['ofile'] + if ofile is not None: + print(f'\nPredicting {os.path.basename(ofile)}:') + else: + print(f'\nPredicting image of shape {data.shape}:') + + print(f'perform_everything_on_device: {self.perform_everything_on_device}') + + properties = preprocessed['data_properties'] + + # let's not get into a runaway situation where the GPU predicts so fast that the disk has to b swamped with + # npy files + proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) + while not proceed: + # print('sleeping') + sleep(0.1) + proceed = not check_workers_alive_and_busy(export_pool, worker_list, r, allowed_num_queued=2) + + prediction = self.predict_logits_from_preprocessed_data(data).cpu() + + if ofile is not None: + # this needs to go into background processes + # export_prediction_from_logits(prediction, properties, configuration_manager, plans_manager, + # dataset_json, ofile, save_probabilities) + print('sending off prediction to background worker for resampling and export') + r.append( + export_pool.starmap_async( + export_prediction_from_logits, + ((prediction, properties, self.configuration_manager, self.plans_manager, + self.dataset_json, ofile, save_probabilities),) + ) + ) + else: + # convert_predicted_logits_to_segmentation_with_correct_shape(prediction, plans_manager, + # configuration_manager, label_manager, + # properties, + # save_probabilities) + print('sending off prediction to background worker for resampling') + r.append( + export_pool.starmap_async( + convert_predicted_logits_to_segmentation_with_correct_shape, ( + (prediction, self.plans_manager, + self.configuration_manager, self.label_manager, + properties, + save_probabilities),) + ) + ) + if ofile is not None: + print(f'done with {os.path.basename(ofile)}') + else: + print(f'\nDone with image of shape {data.shape}:') + ret = [i.get()[0] for i in r] + + if isinstance(data_iterator, MultiThreadedAugmenter): + data_iterator._finish() + + # clear lru cache + compute_gaussian.cache_clear() + # clear device cache + empty_cache(self.device) + return ret + + def predict_single_npy_array(self, input_image: np.ndarray, image_properties: dict, + segmentation_previous_stage: np.ndarray = None, + output_file_truncated: str = None, + save_or_return_probabilities: bool = False): + """ + image_properties must only have a 'spacing' key! + """ + ppa = PreprocessAdapterFromNpy([input_image], [segmentation_previous_stage], [image_properties], + [output_file_truncated], + self.plans_manager, self.dataset_json, self.configuration_manager, + num_threads_in_multithreaded=1, verbose=self.verbose) + if self.verbose: + print('preprocessing') + dct = next(ppa) + + if self.verbose: + print('predicting') + predicted_logits = self.predict_logits_from_preprocessed_data(dct['data']).cpu() + + if self.verbose: + print('resampling to original shape') + if output_file_truncated is not None: + export_prediction_from_logits(predicted_logits, dct['data_properties'], self.configuration_manager, + self.plans_manager, self.dataset_json, output_file_truncated, + save_or_return_probabilities) + else: + ret = convert_predicted_logits_to_segmentation_with_correct_shape(predicted_logits, self.plans_manager, + self.configuration_manager, + self.label_manager, + dct['data_properties'], + return_probabilities= + save_or_return_probabilities) + if save_or_return_probabilities: + return ret[0], ret[1] + else: + return ret + + def predict_logits_from_preprocessed_data(self, data: torch.Tensor) -> torch.Tensor: + """ + IMPORTANT! IF YOU ARE RUNNING THE CASCADE, THE SEGMENTATION FROM THE PREVIOUS STAGE MUST ALREADY BE STACKED ON + TOP OF THE IMAGE AS ONE-HOT REPRESENTATION! SEE PreprocessAdapter ON HOW THIS SHOULD BE DONE! + + RETURNED LOGITS HAVE THE SHAPE OF THE INPUT. THEY MUST BE CONVERTED BACK TO THE ORIGINAL IMAGE SIZE. + SEE convert_predicted_logits_to_segmentation_with_correct_shape + """ + n_threads = torch.get_num_threads() + torch.set_num_threads(default_num_processes if default_num_processes < n_threads else n_threads) + with torch.no_grad(): + prediction = None + + for params in self.list_of_parameters: + + # messing with state dict names... + if not isinstance(self.network, OptimizedModule): + self.network.load_state_dict(params) + else: + self.network._orig_mod.load_state_dict(params) + + # why not leave prediction on device if perform_everything_on_device? Because this may cause the + # second iteration to crash due to OOM. Grabbing tha twith try except cause way more bloated code than + # this actually saves computation time + if prediction is None: + prediction = self.predict_sliding_window_return_logits(data).to('cpu') + else: + prediction += self.predict_sliding_window_return_logits(data).to('cpu') + + if len(self.list_of_parameters) > 1: + prediction /= len(self.list_of_parameters) + + if self.verbose: print('Prediction done') + prediction = prediction.to('cpu') + torch.set_num_threads(n_threads) + return prediction + + def _internal_get_sliding_window_slicers(self, image_size: Tuple[int, ...]): + slicers = [] + if len(self.configuration_manager.patch_size) < len(image_size): + assert len(self.configuration_manager.patch_size) == len( + image_size) - 1, 'if tile_size has less entries than image_size, ' \ + 'len(tile_size) ' \ + 'must be one shorter than len(image_size) ' \ + '(only dimension ' \ + 'discrepancy of 1 allowed).' + steps = compute_steps_for_sliding_window(image_size[1:], self.configuration_manager.patch_size, + self.tile_step_size) + if self.verbose: print(f'n_steps {image_size[0] * len(steps[0]) * len(steps[1])}, image size is' + f' {image_size}, tile_size {self.configuration_manager.patch_size}, ' + f'tile_step_size {self.tile_step_size}\nsteps:\n{steps}') + for d in range(image_size[0]): + for sx in steps[0]: + for sy in steps[1]: + slicers.append( + tuple([slice(None), d, *[slice(si, si + ti) for si, ti in + zip((sx, sy), self.configuration_manager.patch_size)]])) + else: + steps = compute_steps_for_sliding_window(image_size, self.configuration_manager.patch_size, + self.tile_step_size) + if self.verbose: print( + f'n_steps {np.prod([len(i) for i in steps])}, image size is {image_size}, tile_size {self.configuration_manager.patch_size}, ' + f'tile_step_size {self.tile_step_size}\nsteps:\n{steps}') + for sx in steps[0]: + for sy in steps[1]: + for sz in steps[2]: + slicers.append( + tuple([slice(None), *[slice(si, si + ti) for si, ti in + zip((sx, sy, sz), self.configuration_manager.patch_size)]])) + return slicers + + def _internal_maybe_mirror_and_predict(self, x: torch.Tensor) -> torch.Tensor: + mirror_axes = self.allowed_mirroring_axes if self.use_mirroring else None + if 'SAMed' in self.trainer_name: + prediction = self.network(x, True, self.configuration_manager.patch_size[-1])['masks'] + + else: + prediction = self.network(x) + + if mirror_axes is not None: + # check for invalid numbers in mirror_axes + # x should be 5d for 3d images and 4d for 2d. so the max value of mirror_axes cannot exceed len(x.shape) - 3 + assert max(mirror_axes) <= x.ndim - 3, 'mirror_axes does not match the dimension of the input!' + + axes_combinations = [ + c for i in range(len(mirror_axes)) for c in itertools.combinations([m + 2 for m in mirror_axes], i + 1) + ] + for axes in axes_combinations: + if 'SAMed' in self.trainer_name: + prediction += torch.flip(self.network(torch.flip(x, (*axes,)),True, self.configuration_manager.patch_size[-1])['masks'], (*axes,)) + else: + prediction += torch.flip(self.network(torch.flip(x, (*axes,))), (*axes,)) + prediction /= (len(axes_combinations) + 1) + return prediction + + def _internal_predict_sliding_window_return_logits(self, + data: torch.Tensor, + slicers, + do_on_device: bool = True, + ): + results_device = self.device if do_on_device else torch.device('cpu') + empty_cache(self.device) + + # move data to device + if self.verbose: + print(f'move image to device {results_device}') + data = data.to(results_device) + + # preallocate arrays + if self.verbose: + print(f'preallocating results arrays on device {results_device}') + predicted_logits = torch.zeros((self.label_manager.num_segmentation_heads, *data.shape[1:]), + dtype=torch.half, + device=results_device) + n_predictions = torch.zeros(data.shape[1:], dtype=torch.half, device=results_device) + if self.use_gaussian: + gaussian = compute_gaussian(tuple(self.configuration_manager.patch_size), sigma_scale=1. / 8, + value_scaling_factor=10, + device=results_device) + + if self.verbose: print('running prediction') + if not self.allow_tqdm and self.verbose: print(f'{len(slicers)} steps') + for sl in tqdm(slicers, disable=not self.allow_tqdm): + workon = data[sl][None] + workon = workon.to(self.device, non_blocking=False) + + prediction = self._internal_maybe_mirror_and_predict(workon)[0].to(results_device) + + predicted_logits[sl] += (prediction * gaussian if self.use_gaussian else prediction) + n_predictions[sl[1:]] += (gaussian if self.use_gaussian else 1) + + predicted_logits /= n_predictions + # check for infs + if torch.any(torch.isinf(predicted_logits)): + raise RuntimeError('Encountered inf in predicted array. Aborting... If this problem persists, ' + 'reduce value_scaling_factor in compute_gaussian or increase the dtype of ' + 'predicted_logits to fp32') + return predicted_logits + + def predict_sliding_window_return_logits(self, input_image: torch.Tensor) \ + -> Union[np.ndarray, torch.Tensor]: + assert isinstance(input_image, torch.Tensor) + self.network = self.network.to(self.device) + self.network.eval() + + empty_cache(self.device) + + # Autocast is a little bitch. + # If the device_type is 'cpu' then it's slow as heck on some CPUs (no auto bfloat16 support detection) + # and needs to be disabled. + # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False + # is set. Whyyyyyyy. (this is why we don't make use of enabled=False) + # So autocast will only be active if we have a cuda device. + with torch.no_grad(): + with torch.autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context(): + assert input_image.ndim == 4, 'input_image must be a 4D np.ndarray or torch.Tensor (c, x, y, z)' + + if self.verbose: print(f'Input shape: {input_image.shape}') + if self.verbose: print("step_size:", self.tile_step_size) + if self.verbose: print("mirror_axes:", self.allowed_mirroring_axes if self.use_mirroring else None) + + # if input_image is smaller than tile_size we need to pad it to tile_size. + data, slicer_revert_padding = pad_nd_image(input_image, self.configuration_manager.patch_size, + 'constant', {'value': 0}, True, + None) + + slicers = self._internal_get_sliding_window_slicers(data.shape[1:]) + + if self.perform_everything_on_device and self.device != 'cpu': + # we need to try except here because we can run OOM in which case we need to fall back to CPU as a results device + try: + predicted_logits = self._internal_predict_sliding_window_return_logits(data, slicers, self.perform_everything_on_device) + except RuntimeError: + print('Prediction on device was unsuccessful, probably due to a lack of memory. Moving results arrays to CPU') + empty_cache(self.device) + predicted_logits = self._internal_predict_sliding_window_return_logits(data, slicers, False) + else: + predicted_logits = self._internal_predict_sliding_window_return_logits(data, slicers, self.perform_everything_on_device) + + empty_cache(self.device) + # revert padding + predicted_logits = predicted_logits[tuple([slice(None), *slicer_revert_padding[1:]])] + return predicted_logits + + +def predict_entry_point_modelfolder(): + import argparse + parser = argparse.ArgumentParser(description='Use this to run inference with nnU-Net. This function is used when ' + 'you want to manually specify a folder containing a trained nnU-Net ' + 'model. This is useful when the nnunet environment variables ' + '(nnUNet_results) are not set.') + parser.add_argument('-i', type=str, required=True, + help='input folder. Remember to use the correct channel numberings for your files (_0000 etc). ' + 'File endings must be the same as the training dataset!') + parser.add_argument('-o', type=str, required=True, + help='Output folder. If it does not exist it will be created. Predicted segmentations will ' + 'have the same name as their source images.') + parser.add_argument('-m', type=str, required=True, + help='Folder in which the trained model is. Must have subfolders fold_X for the different ' + 'folds you trained') + parser.add_argument('-f', nargs='+', type=str, required=False, default=(0, 1, 2, 3, 4), + help='Specify the folds of the trained model that should be used for prediction. ' + 'Default: (0, 1, 2, 3, 4)') + parser.add_argument('-step_size', type=float, required=False, default=0.5, + help='Step size for sliding window prediction. The larger it is the faster but less accurate ' + 'the prediction. Default: 0.5. Cannot be larger than 1. We recommend the default.') + parser.add_argument('--disable_tta', action='store_true', required=False, default=False, + help='Set this flag to disable test time data augmentation in the form of mirroring. Faster, ' + 'but less accurate inference. Not recommended.') + parser.add_argument('--verbose', action='store_true', help="Set this if you like being talked to. You will have " + "to be a good listener/reader.") + parser.add_argument('--save_probabilities', action='store_true', + help='Set this to export predicted class "probabilities". Required if you want to ensemble ' + 'multiple configurations.') + parser.add_argument('--continue_prediction', '--c', action='store_true', + help='Continue an aborted previous prediction (will not overwrite existing files)') + parser.add_argument('-chk', type=str, required=False, default='checkpoint_final.pth', + help='Name of the checkpoint you want to use. Default: checkpoint_final.pth') + parser.add_argument('-npp', type=int, required=False, default=3, + help='Number of processes used for preprocessing. More is not always better. Beware of ' + 'out-of-RAM issues. Default: 3') + parser.add_argument('-nps', type=int, required=False, default=3, + help='Number of processes used for segmentation export. More is not always better. Beware of ' + 'out-of-RAM issues. Default: 3') + parser.add_argument('-prev_stage_predictions', type=str, required=False, default=None, + help='Folder containing the predictions of the previous stage. Required for cascaded models.') + parser.add_argument('-device', type=str, default='cuda', required=False, + help="Use this to set the device the inference should run with. Available options are 'cuda' " + "(GPU), 'cpu' (CPU) and 'mps' (Apple M1/M2). Do NOT use this to set which GPU ID! " + "Use CUDA_VISIBLE_DEVICES=X nnUNetv2_predict [...] instead!") + parser.add_argument('--disable_progress_bar', action='store_true', required=False, default=False, + help='Set this flag to disable progress bar. Recommended for HPC environments (non interactive ' + 'jobs)') + + + print( + "\n#######################################################################\nPlease cite the following paper " + "when using nnU-Net:\n" + "Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). " + "nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. " + "Nature methods, 18(2), 203-211.\n#######################################################################\n") + + args = parser.parse_args() + args.f = [i if i == 'all' else int(i) for i in args.f] + + if not isdir(args.o): + maybe_mkdir_p(args.o) + + assert args.device in ['cpu', 'cuda', + 'mps'], f'-device must be either cpu, mps or cuda. Other devices are not tested/supported. Got: {args.device}.' + if args.device == 'cpu': + # let's allow torch to use hella threads + import multiprocessing + torch.set_num_threads(multiprocessing.cpu_count()) + device = torch.device('cpu') + elif args.device == 'cuda': + # multithreading in torch doesn't help nnU-Net if run on GPU + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + device = torch.device('cuda') + else: + device = torch.device('mps') + + predictor = nnUNetPredictor(tile_step_size=args.step_size, + use_gaussian=True, + use_mirroring=not args.disable_tta, + perform_everything_on_device=True, + device=device, + verbose=args.verbose, + allow_tqdm=not args.disable_progress_bar) + predictor.initialize_from_trained_model_folder(args.m, args.f, args.chk) + predictor.predict_from_files(args.i, args.o, save_probabilities=args.save_probabilities, + overwrite=not args.continue_prediction, + num_processes_preprocessing=args.npp, + num_processes_segmentation_export=args.nps, + folder_with_segs_from_prev_stage=args.prev_stage_predictions, + num_parts=1, part_id=0) + + +def predict_entry_point(): + import argparse + parser = argparse.ArgumentParser(description='Use this to run inference with nnU-Net. This function is used when ' + 'you want to manually specify a folder containing a trained nnU-Net ' + 'model. This is useful when the nnunet environment variables ' + '(nnUNet_results) are not set.') + parser.add_argument('-i', type=str, required=True, + help='input folder. Remember to use the correct channel numberings for your files (_0000 etc). ' + 'File endings must be the same as the training dataset!') + parser.add_argument('-o', type=str, required=True, + help='Output folder. If it does not exist it will be created. Predicted segmentations will ' + 'have the same name as their source images.') + parser.add_argument('-d', type=str, required=True, + help='Dataset with which you would like to predict. You can specify either dataset name or id') + parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', + help='Plans identifier. Specify the plans in which the desired configuration is located. ' + 'Default: nnUNetPlans') + parser.add_argument('-tr', type=str, required=False, default='nnUNetTrainer', + help='What nnU-Net trainer class was used for training? Default: nnUNetTrainer') + parser.add_argument('-c', type=str, required=True, + help='nnU-Net configuration that should be used for prediction. Config must be located ' + 'in the plans specified with -p') + parser.add_argument('-f', nargs='+', type=str, required=False, default=(0, 1, 2, 3, 4), + help='Specify the folds of the trained model that should be used for prediction. ' + 'Default: (0, 1, 2, 3, 4)') + parser.add_argument('-step_size', type=float, required=False, default=0.5, + help='Step size for sliding window prediction. The larger it is the faster but less accurate ' + 'the prediction. Default: 0.5. Cannot be larger than 1. We recommend the default.') + parser.add_argument('--disable_tta', action='store_true', required=False, default=False, + help='Set this flag to disable test time data augmentation in the form of mirroring. Faster, ' + 'but less accurate inference. Not recommended.') + parser.add_argument('--verbose', action='store_true', help="Set this if you like being talked to. You will have " + "to be a good listener/reader.") + parser.add_argument('--save_probabilities', action='store_true', + help='Set this to export predicted class "probabilities". Required if you want to ensemble ' + 'multiple configurations.') + parser.add_argument('--continue_prediction', action='store_true', + help='Continue an aborted previous prediction (will not overwrite existing files)') + parser.add_argument('-chk', type=str, required=False, default='checkpoint_final.pth', + help='Name of the checkpoint you want to use. Default: checkpoint_final.pth') + parser.add_argument('-npp', type=int, required=False, default=3, + help='Number of processes used for preprocessing. More is not always better. Beware of ' + 'out-of-RAM issues. Default: 3') + parser.add_argument('-nps', type=int, required=False, default=3, + help='Number of processes used for segmentation export. More is not always better. Beware of ' + 'out-of-RAM issues. Default: 3') + parser.add_argument('-prev_stage_predictions', type=str, required=False, default=None, + help='Folder containing the predictions of the previous stage. Required for cascaded models.') + parser.add_argument('-num_parts', type=int, required=False, default=1, + help='Number of separate nnUNetv2_predict call that you will be making. Default: 1 (= this one ' + 'call predicts everything)') + parser.add_argument('-part_id', type=int, required=False, default=0, + help='If multiple nnUNetv2_predict exist, which one is this? IDs start with 0 can end with ' + 'num_parts - 1. So when you submit 5 nnUNetv2_predict calls you need to set -num_parts ' + '5 and use -part_id 0, 1, 2, 3 and 4. Simple, right? Note: You are yourself responsible ' + 'to make these run on separate GPUs! Use CUDA_VISIBLE_DEVICES (google, yo!)') + parser.add_argument('-device', type=str, default='cuda', required=False, + help="Use this to set the device the inference should run with. Available options are 'cuda' " + "(GPU), 'cpu' (CPU) and 'mps' (Apple M1/M2). Do NOT use this to set which GPU ID! " + "Use CUDA_VISIBLE_DEVICES=X nnUNetv2_predict [...] instead!") + parser.add_argument('--disable_progress_bar', action='store_true', required=False, default=False, + help='Set this flag to disable progress bar. Recommended for HPC environments (non interactive ' + 'jobs)') + + print( + "\n#######################################################################\nPlease cite the following paper " + "when using nnU-Net:\n" + "Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). " + "nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. " + "Nature methods, 18(2), 203-211.\n#######################################################################\n") + + args = parser.parse_args() + args.f = [i if i == 'all' else int(i) for i in args.f] + + model_folder = get_output_folder(args.d, args.tr, args.p, args.c) + + if not isdir(args.o): + maybe_mkdir_p(args.o) + + # slightly passive aggressive haha + assert args.part_id < args.num_parts, 'Do you even read the documentation? See nnUNetv2_predict -h.' + + assert args.device in ['cpu', 'cuda', + 'mps'], f'-device must be either cpu, mps or cuda. Other devices are not tested/supported. Got: {args.device}.' + if args.device == 'cpu': + # let's allow torch to use hella threads + import multiprocessing + torch.set_num_threads(multiprocessing.cpu_count()) + device = torch.device('cpu') + elif args.device == 'cuda': + # multithreading in torch doesn't help nnU-Net if run on GPU + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + device = torch.device('cuda') + else: + device = torch.device('mps') + + predictor = nnUNetPredictor(tile_step_size=args.step_size, + use_gaussian=True, + use_mirroring=not args.disable_tta, + perform_everything_on_device=True, + device=device, + verbose=args.verbose, + verbose_preprocessing=False, + allow_tqdm=not args.disable_progress_bar) + predictor.initialize_from_trained_model_folder( + model_folder, + args.f, + checkpoint_name=args.chk + ) + predictor.predict_from_files(args.i, args.o, save_probabilities=args.save_probabilities, + overwrite=not args.continue_prediction, + num_processes_preprocessing=args.npp, + num_processes_segmentation_export=args.nps, + folder_with_segs_from_prev_stage=args.prev_stage_predictions, + num_parts=args.num_parts, + part_id=args.part_id) + # r = predict_from_raw_data(args.i, + # args.o, + # model_folder, + # args.f, + # args.step_size, + # use_gaussian=True, + # use_mirroring=not args.disable_tta, + # perform_everything_on_device=True, + # verbose=args.verbose, + # save_probabilities=args.save_probabilities, + # overwrite=not args.continue_prediction, + # checkpoint_name=args.chk, + # num_processes_preprocessing=args.npp, + # num_processes_segmentation_export=args.nps, + # folder_with_segs_from_prev_stage=args.prev_stage_predictions, + # num_parts=args.num_parts, + # part_id=args.part_id, + # device=device) + + +if __name__ == '__main__': + # predict a bunch of files + from nnunetv2.paths import nnUNet_results, nnUNet_raw + predictor = nnUNetPredictor( + tile_step_size=0.5, + use_gaussian=True, + use_mirroring=True, + perform_everything_on_device=True, + device=torch.device('cuda', 0), + verbose=False, + verbose_preprocessing=False, + allow_tqdm=True + ) + predictor.initialize_from_trained_model_folder( + join(nnUNet_results, 'Dataset003_Liver/nnUNetTrainer__nnUNetPlans__3d_lowres'), + use_folds=(0, ), + checkpoint_name='checkpoint_final.pth', + ) + predictor.predict_from_files(join(nnUNet_raw, 'Dataset003_Liver/imagesTs'), + join(nnUNet_raw, 'Dataset003_Liver/imagesTs_predlowres'), + save_probabilities=False, overwrite=False, + num_processes_preprocessing=2, num_processes_segmentation_export=2, + folder_with_segs_from_prev_stage=None, num_parts=1, part_id=0) + + # predict a numpy array + from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO + img, props = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTr/liver_63_0000.nii.gz')]) + ret = predictor.predict_single_npy_array(img, props, None, None, False) + + iterator = predictor.get_data_iterator_from_raw_npy_data([img], None, [props], None, 1) + ret = predictor.predict_from_data_iterator(iterator, False, 1) + + + # predictor = nnUNetPredictor( + # tile_step_size=0.5, + # use_gaussian=True, + # use_mirroring=True, + # perform_everything_on_device=True, + # device=torch.device('cuda', 0), + # verbose=False, + # allow_tqdm=True + # ) + # predictor.initialize_from_trained_model_folder( + # join(nnUNet_results, 'Dataset003_Liver/nnUNetTrainer__nnUNetPlans__3d_cascade_fullres'), + # use_folds=(0,), + # checkpoint_name='checkpoint_final.pth', + # ) + # predictor.predict_from_files(join(nnUNet_raw, 'Dataset003_Liver/imagesTs'), + # join(nnUNet_raw, 'Dataset003_Liver/imagesTs_predCascade'), + # save_probabilities=False, overwrite=False, + # num_processes_preprocessing=2, num_processes_segmentation_export=2, + # folder_with_segs_from_prev_stage='/media/isensee/data/nnUNet_raw/Dataset003_Liver/imagesTs_predlowres', + # num_parts=1, part_id=0) + diff --git a/docker/template/src/nnunetv2/inference/readme.md b/docker/template/src/nnunetv2/inference/readme.md new file mode 100644 index 0000000..4f832a1 --- /dev/null +++ b/docker/template/src/nnunetv2/inference/readme.md @@ -0,0 +1,205 @@ +The nnU-Net inference is now much more dynamic than before, allowing you to more seamlessly integrate nnU-Net into +your existing workflows. +This readme will give you a quick rundown of your options. This is not a complete guide. Look into the code to learn +all the details! + +# Preface +In terms of speed, the most efficient inference strategy is the one done by the nnU-Net defaults! Images are read on +the fly and preprocessed in background workers. The main process takes the preprocessed images, predicts them and +sends the prediction off to another set of background workers which will resize the resulting logits, convert +them to a segmentation and export the segmentation. + +The reason the default setup is the best option is because + +1) loading and preprocessing as well as segmentation export are interlaced with the prediction. The main process can +focus on communicating with the compute device (i.e. your GPU) and does not have to do any other processing. +This uses your resources as well as possible! +2) only the images and segmentation that are currently being needed are stored in RAM! Imaging predicting many images +and having to store all of them + the results in your system memory + +# nnUNetPredictor +The new nnUNetPredictor class encapsulates the inferencing code and makes it simple to switch between modes. Your +code can hold a nnUNetPredictor instance and perform prediction on the fly. Previously this was not possible and each +new prediction request resulted in reloading the parameters and reinstantiating the network architecture. Not ideal. + +The nnUNetPredictor must be ininitialized manually! You will want to use the +`predictor.initialize_from_trained_model_folder` function for 99% of use cases! + +New feature: If you do not specify an output folder / output files then the predicted segmentations will be +returned + + +## Recommended nnU-Net default: predict from source files + +tldr: +- loads images on the fly +- performs preprocessing in background workers +- main process focuses only on making predictions +- results are again given to background workers for resampling and (optional) export + +pros: +- best suited for predicting a large number of images +- nicer to your RAM + +cons: +- not ideal when single images are to be predicted +- requires images to be present as files + +Example: +```python + from nnunetv2.paths import nnUNet_results, nnUNet_raw + import torch + from batchgenerators.utilities.file_and_folder_operations import join + from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor + + # instantiate the nnUNetPredictor + predictor = nnUNetPredictor( + tile_step_size=0.5, + use_gaussian=True, + use_mirroring=True, + perform_everything_on_device=True, + device=torch.device('cuda', 0), + verbose=False, + verbose_preprocessing=False, + allow_tqdm=True + ) + # initializes the network architecture, loads the checkpoint + predictor.initialize_from_trained_model_folder( + join(nnUNet_results, 'Dataset003_Liver/nnUNetTrainer__nnUNetPlans__3d_lowres'), + use_folds=(0,), + checkpoint_name='checkpoint_final.pth', + ) + # variant 1: give input and output folders + predictor.predict_from_files(join(nnUNet_raw, 'Dataset003_Liver/imagesTs'), + join(nnUNet_raw, 'Dataset003_Liver/imagesTs_predlowres'), + save_probabilities=False, overwrite=False, + num_processes_preprocessing=2, num_processes_segmentation_export=2, + folder_with_segs_from_prev_stage=None, num_parts=1, part_id=0) +``` + +Instead if giving input and output folders you can also give concrete files. If you give concrete files, there is no +need for the _0000 suffix anymore! This can be useful in situations where you have no control over the filenames! +Remember that the files must be given as 'list of lists' where each entry in the outer list is a case to be predicted +and the inner list contains all the files belonging to that case. There is just one file for datasets with just one +input modality (such as CT) but may be more files for others (such as MRI where there is sometimes T1, T2, Flair etc). +IMPORTANT: the order in which the files for each case are given must match the order of the channels as defined in the +dataset.json! + +If you give files as input, you need to give individual output files as output! + +```python + # variant 2, use list of files as inputs. Note how we use nested lists!!! + indir = join(nnUNet_raw, 'Dataset003_Liver/imagesTs') + outdir = join(nnUNet_raw, 'Dataset003_Liver/imagesTs_predlowres') + predictor.predict_from_files([[join(indir, 'liver_152_0000.nii.gz')], + [join(indir, 'liver_142_0000.nii.gz')]], + [join(outdir, 'liver_152.nii.gz'), + join(outdir, 'liver_142.nii.gz')], + save_probabilities=False, overwrite=False, + num_processes_preprocessing=2, num_processes_segmentation_export=2, + folder_with_segs_from_prev_stage=None, num_parts=1, part_id=0) +``` + +Did you know? If you do not specify output files, the predicted segmentations will be returned: +```python + # variant 2.5, returns segmentations + indir = join(nnUNet_raw, 'Dataset003_Liver/imagesTs') + predicted_segmentations = predictor.predict_from_files([[join(indir, 'liver_152_0000.nii.gz')], + [join(indir, 'liver_142_0000.nii.gz')]], + None, + save_probabilities=False, overwrite=True, + num_processes_preprocessing=2, num_processes_segmentation_export=2, + folder_with_segs_from_prev_stage=None, num_parts=1, part_id=0) +``` + +## Prediction from npy arrays +tldr: +- you give images as a list of npy arrays +- performs preprocessing in background workers +- main process focuses only on making predictions +- results are again given to background workers for resampling and (optional) export + +pros: +- the correct variant for when you have images in RAM already +- well suited for predicting multiple images + +cons: +- uses more ram than the default +- unsuited for large number of images as all images must be held in RAM + +```python + from nnunetv2.imageio.simpleitk_reader_writer import SimpleITKIO + + img, props = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_147_0000.nii.gz')]) + img2, props2 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_146_0000.nii.gz')]) + img3, props3 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_145_0000.nii.gz')]) + img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')]) + # we do not set output files so that the segmentations will be returned. You can of course also specify output + # files instead (no return value on that case) + ret = predictor.predict_from_list_of_npy_arrays([img, img2, img3, img4], + None, + [props, props2, props3, props4], + None, 2, save_probabilities=False, + num_processes_segmentation_export=2) +``` + +## Predicting a single npy array + +tldr: +- you give one image as npy array +- everything is done in the main process: preprocessing, prediction, resampling, (export) +- no interlacing, slowest variant! +- ONLY USE THIS IF YOU CANNOT GIVE NNUNET MULTIPLE IMAGES AT ONCE FOR SOME REASON + +pros: +- no messing with multiprocessing +- no messing with data iterator blabla + +cons: +- slows as heck, yo +- never the right choice unless you can only give a single image at a time to nnU-Net + +```python + # predict a single numpy array + img, props = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTr/liver_63_0000.nii.gz')]) + ret = predictor.predict_single_npy_array(img, props, None, None, False) +``` + +## Predicting with a custom data iterator +tldr: +- highly flexible +- not for newbies + +pros: +- you can do everything yourself +- you have all the freedom you want +- really fast if you remember to use multiprocessing in your iterator + +cons: +- you need to do everything yourself +- harder than you might think + +```python + img, props = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_147_0000.nii.gz')]) + img2, props2 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_146_0000.nii.gz')]) + img3, props3 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_145_0000.nii.gz')]) + img4, props4 = SimpleITKIO().read_images([join(nnUNet_raw, 'Dataset003_Liver/imagesTs/liver_144_0000.nii.gz')]) + # each element returned by data_iterator must be a dict with 'data', 'ofile' and 'data_properties' keys! + # If 'ofile' is None, the result will be returned instead of written to a file + # the iterator is responsible for performing the correct preprocessing! + # note how the iterator here does not use multiprocessing -> preprocessing will be done in the main thread! + # take a look at the default iterators for predict_from_files and predict_from_list_of_npy_arrays + # (they both use predictor.predict_from_data_iterator) for inspiration! + def my_iterator(list_of_input_arrs, list_of_input_props): + preprocessor = predictor.configuration_manager.preprocessor_class(verbose=predictor.verbose) + for a, p in zip(list_of_input_arrs, list_of_input_props): + data, seg = preprocessor.run_case_npy(a, + None, + p, + predictor.plans_manager, + predictor.configuration_manager, + predictor.dataset_json) + yield {'data': torch.from_numpy(data).contiguous().pin_memory(), 'data_properties': p, 'ofile': None} + ret = predictor.predict_from_data_iterator(my_iterator([img, img2, img3, img4], [props, props2, props3, props4]), + save_probabilities=False, num_processes_segmentation_export=3) +``` \ No newline at end of file diff --git a/docker/template/src/nnunetv2/inference/sliding_window_prediction.py b/docker/template/src/nnunetv2/inference/sliding_window_prediction.py new file mode 100644 index 0000000..a6f8ebb --- /dev/null +++ b/docker/template/src/nnunetv2/inference/sliding_window_prediction.py @@ -0,0 +1,67 @@ +from functools import lru_cache + +import numpy as np +import torch +from typing import Union, Tuple, List +from acvl_utils.cropping_and_padding.padding import pad_nd_image +from scipy.ndimage import gaussian_filter + + +@lru_cache(maxsize=2) +def compute_gaussian(tile_size: Union[Tuple[int, ...], List[int]], sigma_scale: float = 1. / 8, + value_scaling_factor: float = 1, dtype=torch.float16, device=torch.device('cuda', 0)) \ + -> torch.Tensor: + tmp = np.zeros(tile_size) + center_coords = [i // 2 for i in tile_size] + sigmas = [i * sigma_scale for i in tile_size] + tmp[tuple(center_coords)] = 1 + gaussian_importance_map = gaussian_filter(tmp, sigmas, 0, mode='constant', cval=0) + + gaussian_importance_map = torch.from_numpy(gaussian_importance_map) + + gaussian_importance_map = gaussian_importance_map / torch.max(gaussian_importance_map) * value_scaling_factor + gaussian_importance_map = gaussian_importance_map.type(dtype).to(device) + + # gaussian_importance_map cannot be 0, otherwise we may end up with nans! + gaussian_importance_map[gaussian_importance_map == 0] = torch.min( + gaussian_importance_map[gaussian_importance_map != 0]) + + return gaussian_importance_map + + +def compute_steps_for_sliding_window(image_size: Tuple[int, ...], tile_size: Tuple[int, ...], tile_step_size: float) -> \ + List[List[int]]: + assert [i >= j for i, j in zip(image_size, tile_size)], "image size must be as large or larger than patch_size" + assert 0 < tile_step_size <= 1, 'step_size must be larger than 0 and smaller or equal to 1' + + # our step width is patch_size*step_size at most, but can be narrower. For example if we have image size of + # 110, patch size of 64 and step_size of 0.5, then we want to make 3 steps starting at coordinate 0, 23, 46 + target_step_sizes_in_voxels = [i * tile_step_size for i in tile_size] + + num_steps = [int(np.ceil((i - k) / j)) + 1 for i, j, k in zip(image_size, target_step_sizes_in_voxels, tile_size)] + + steps = [] + for dim in range(len(tile_size)): + # the highest step value for this dimension is + max_step_value = image_size[dim] - tile_size[dim] + if num_steps[dim] > 1: + actual_step_size = max_step_value / (num_steps[dim] - 1) + else: + actual_step_size = 99999999999 # does not matter because there is only one step at 0 + + steps_here = [int(np.round(actual_step_size * i)) for i in range(num_steps[dim])] + + steps.append(steps_here) + + return steps + + +if __name__ == '__main__': + a = torch.rand((4, 2, 32, 23)) + a_npy = a.numpy() + + a_padded = pad_nd_image(a, new_shape=(48, 27)) + a_npy_padded = pad_nd_image(a_npy, new_shape=(48, 27)) + assert all([i == j for i, j in zip(a_padded.shape, (4, 2, 48, 27))]) + assert all([i == j for i, j in zip(a_npy_padded.shape, (4, 2, 48, 27))]) + assert np.all(a_padded.numpy() == a_npy_padded) diff --git a/docker/template/src/nnunetv2/model_sharing/__init__.py b/docker/template/src/nnunetv2/model_sharing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/model_sharing/entry_points.py b/docker/template/src/nnunetv2/model_sharing/entry_points.py new file mode 100644 index 0000000..1ab7c93 --- /dev/null +++ b/docker/template/src/nnunetv2/model_sharing/entry_points.py @@ -0,0 +1,61 @@ +from nnunetv2.model_sharing.model_download import download_and_install_from_url +from nnunetv2.model_sharing.model_export import export_pretrained_model +from nnunetv2.model_sharing.model_import import install_model_from_zip_file + + +def print_license_warning(): + print('') + print('######################################################') + print('!!!!!!!!!!!!!!!!!!!!!!!!WARNING!!!!!!!!!!!!!!!!!!!!!!!') + print('######################################################') + print("Using the pretrained model weights is subject to the license of the dataset they were trained on. Some " + "allow commercial use, others don't. It is your responsibility to make sure you use them appropriately! Use " + "nnUNet_print_pretrained_model_info(task_name) to see a summary of the dataset and where to find its license!") + print('######################################################') + print('') + + +def download_by_url(): + import argparse + parser = argparse.ArgumentParser( + description="Use this to download pretrained models. This script is intended to download models via url only. " + "CAREFUL: This script will overwrite " + "existing models (if they share the same trainer class and plans as " + "the pretrained model.") + parser.add_argument("url", type=str, help='URL of the pretrained model') + args = parser.parse_args() + url = args.url + download_and_install_from_url(url) + + +def install_from_zip_entry_point(): + import argparse + parser = argparse.ArgumentParser( + description="Use this to install a zip file containing a pretrained model.") + parser.add_argument("zip", type=str, help='zip file') + args = parser.parse_args() + zip = args.zip + install_model_from_zip_file(zip) + + +def export_pretrained_model_entry(): + import argparse + parser = argparse.ArgumentParser( + description="Use this to export a trained model as a zip file.") + parser.add_argument('-d', type=str, required=True, help='Dataset name or id') + parser.add_argument('-o', type=str, required=True, help='Output file name') + parser.add_argument('-c', nargs='+', type=str, required=False, + default=('3d_lowres', '3d_fullres', '2d', '3d_cascade_fullres'), + help="List of configuration names") + parser.add_argument('-tr', required=False, type=str, default='nnUNetTrainer', help='Trainer class') + parser.add_argument('-p', required=False, type=str, default='nnUNetPlans', help='plans identifier') + parser.add_argument('-f', required=False, nargs='+', type=str, default=(0, 1, 2, 3, 4), help='list of fold ids') + parser.add_argument('-chk', required=False, nargs='+', type=str, default=('checkpoint_final.pth', ), + help='Lis tof checkpoint names to export. Default: checkpoint_final.pth') + parser.add_argument('--not_strict', action='store_false', default=False, required=False, help='Set this to allow missing folds and/or configurations') + parser.add_argument('--exp_cv_preds', action='store_true', required=False, help='Set this to export the cross-validation predictions as well') + args = parser.parse_args() + + export_pretrained_model(dataset_name_or_id=args.d, output_file=args.o, configurations=args.c, trainer=args.tr, + plans_identifier=args.p, folds=args.f, strict=not args.not_strict, save_checkpoints=args.chk, + export_crossval_predictions=args.exp_cv_preds) diff --git a/docker/template/src/nnunetv2/model_sharing/model_download.py b/docker/template/src/nnunetv2/model_sharing/model_download.py new file mode 100644 index 0000000..02dac5f --- /dev/null +++ b/docker/template/src/nnunetv2/model_sharing/model_download.py @@ -0,0 +1,47 @@ +from typing import Optional + +import requests +from batchgenerators.utilities.file_and_folder_operations import * +from time import time +from nnunetv2.model_sharing.model_import import install_model_from_zip_file +from nnunetv2.paths import nnUNet_results +from tqdm import tqdm + + +def download_and_install_from_url(url): + assert nnUNet_results is not None, "Cannot install model because network_training_output_dir is not " \ + "set (RESULTS_FOLDER missing as environment variable, see " \ + "Installation instructions)" + print('Downloading pretrained model from url:', url) + import http.client + http.client.HTTPConnection._http_vsn = 10 + http.client.HTTPConnection._http_vsn_str = 'HTTP/1.0' + + import os + home = os.path.expanduser('~') + random_number = int(time() * 1e7) + tempfile = join(home, f'.nnunetdownload_{str(random_number)}') + + try: + download_file(url=url, local_filename=tempfile, chunk_size=8192 * 16) + print("Download finished. Extracting...") + install_model_from_zip_file(tempfile) + print("Done") + except Exception as e: + raise e + finally: + if isfile(tempfile): + os.remove(tempfile) + + +def download_file(url: str, local_filename: str, chunk_size: Optional[int] = 8192 * 16) -> str: + # borrowed from https://stackoverflow.com/questions/16694907/download-large-file-in-python-with-requests + # NOTE the stream=True parameter below + with requests.get(url, stream=True, timeout=100) as r: + r.raise_for_status() + with tqdm.wrapattr(open(local_filename, 'wb'), "write", total=int(r.headers.get("Content-Length"))) as f: + for chunk in r.iter_content(chunk_size=chunk_size): + f.write(chunk) + return local_filename + + diff --git a/docker/template/src/nnunetv2/model_sharing/model_export.py b/docker/template/src/nnunetv2/model_sharing/model_export.py new file mode 100644 index 0000000..51eb455 --- /dev/null +++ b/docker/template/src/nnunetv2/model_sharing/model_export.py @@ -0,0 +1,124 @@ +import zipfile + +from nnunetv2.utilities.file_path_utilities import * + + +def export_pretrained_model(dataset_name_or_id: Union[int, str], output_file: str, + configurations: Tuple[str] = ("2d", "3d_lowres", "3d_fullres", "3d_cascade_fullres"), + trainer: str = 'nnUNetTrainer', + plans_identifier: str = 'nnUNetPlans', + folds: Tuple[int, ...] = (0, 1, 2, 3, 4), + strict: bool = True, + save_checkpoints: Tuple[str, ...] = ('checkpoint_final.pth',), + export_crossval_predictions: bool = False) -> None: + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + with(zipfile.ZipFile(output_file, 'w', zipfile.ZIP_DEFLATED)) as zipf: + for c in configurations: + print(f"Configuration {c}") + trainer_output_dir = get_output_folder(dataset_name, trainer, plans_identifier, c) + + if not isdir(trainer_output_dir): + if strict: + raise RuntimeError(f"{dataset_name} is missing the trained model of configuration {c}") + else: + continue + + expected_fold_folder = [f"fold_{i}" if i != 'all' else 'fold_all' for i in folds] + assert all([isdir(join(trainer_output_dir, i)) for i in expected_fold_folder]), \ + f"not all requested folds are present; {dataset_name} {c}; requested folds: {folds}" + + assert isfile(join(trainer_output_dir, "plans.json")), f"plans.json missing, {dataset_name} {c}" + + for fold_folder in expected_fold_folder: + print(f"Exporting {fold_folder}") + # debug.json, does not exist yet + source_file = join(trainer_output_dir, fold_folder, "debug.json") + if isfile(source_file): + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + + # all requested checkpoints + for chk in save_checkpoints: + source_file = join(trainer_output_dir, fold_folder, chk) + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + + # progress.png + source_file = join(trainer_output_dir, fold_folder, "progress.png") + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + + # if it exists, network architecture.png + source_file = join(trainer_output_dir, fold_folder, "network_architecture.pdf") + if isfile(source_file): + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + + # validation folder with all predicted segmentations etc + if export_crossval_predictions: + source_folder = join(trainer_output_dir, fold_folder, "validation") + files = [i for i in subfiles(source_folder, join=False) if not i.endswith('.npz') and not i.endswith('.pkl')] + for f in files: + zipf.write(join(source_folder, f), os.path.relpath(join(source_folder, f), nnUNet_results)) + # just the summary.json file from the validation + else: + source_file = join(trainer_output_dir, fold_folder, "validation", "summary.json") + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + + source_folder = join(trainer_output_dir, f'crossval_results_folds_{folds_tuple_to_string(folds)}') + if isdir(source_folder): + if export_crossval_predictions: + source_files = subfiles(source_folder, join=True) + else: + source_files = [ + join(trainer_output_dir, f'crossval_results_folds_{folds_tuple_to_string(folds)}', i) for i in + ['summary.json', 'postprocessing.pkl', 'postprocessing.json'] + ] + for s in source_files: + if isfile(s): + zipf.write(s, os.path.relpath(s, nnUNet_results)) + # plans + source_file = join(trainer_output_dir, "plans.json") + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + # fingerprint + source_file = join(trainer_output_dir, "dataset_fingerprint.json") + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + # dataset + source_file = join(trainer_output_dir, "dataset.json") + zipf.write(source_file, os.path.relpath(source_file, nnUNet_results)) + + ensemble_dir = join(nnUNet_results, dataset_name, 'ensembles') + + if not isdir(ensemble_dir): + print("No ensemble directory found for task", dataset_name_or_id) + return + subd = subdirs(ensemble_dir, join=False) + # figure out whether the models in the ensemble are all within the exported models here + for ens in subd: + identifiers, folds = convert_ensemble_folder_to_model_identifiers_and_folds(ens) + ok = True + for i in identifiers: + tr, pl, c = convert_identifier_to_trainer_plans_config(i) + if tr == trainer and pl == plans_identifier and c in configurations: + pass + else: + ok = False + if ok: + print(f'found matching ensemble: {ens}') + source_folder = join(ensemble_dir, ens) + if export_crossval_predictions: + source_files = subfiles(source_folder, join=True) + else: + source_files = [ + join(source_folder, i) for i in + ['summary.json', 'postprocessing.pkl', 'postprocessing.json'] if isfile(join(source_folder, i)) + ] + for s in source_files: + zipf.write(s, os.path.relpath(s, nnUNet_results)) + inference_information_file = join(nnUNet_results, dataset_name, 'inference_information.json') + if isfile(inference_information_file): + zipf.write(inference_information_file, os.path.relpath(inference_information_file, nnUNet_results)) + inference_information_txt_file = join(nnUNet_results, dataset_name, 'inference_information.txt') + if isfile(inference_information_txt_file): + zipf.write(inference_information_txt_file, os.path.relpath(inference_information_txt_file, nnUNet_results)) + print('Done') + + +if __name__ == '__main__': + export_pretrained_model(2, '/home/fabian/temp/dataset2.zip', strict=False, export_crossval_predictions=True, folds=(0, )) diff --git a/docker/template/src/nnunetv2/model_sharing/model_import.py b/docker/template/src/nnunetv2/model_sharing/model_import.py new file mode 100644 index 0000000..0356e90 --- /dev/null +++ b/docker/template/src/nnunetv2/model_sharing/model_import.py @@ -0,0 +1,8 @@ +import zipfile + +from nnunetv2.paths import nnUNet_results + + +def install_model_from_zip_file(zip_file: str): + with zipfile.ZipFile(zip_file, 'r') as zip_ref: + zip_ref.extractall(nnUNet_results) \ No newline at end of file diff --git a/docker/template/src/nnunetv2/nets/LightMUNet.py b/docker/template/src/nnunetv2/nets/LightMUNet.py new file mode 100644 index 0000000..a26c029 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/LightMUNet.py @@ -0,0 +1,287 @@ +from __future__ import annotations + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +from monai.networks.blocks.convolutions import Convolution +from monai.networks.blocks.segresnet_block import ResBlock, get_conv_layer, get_upsample_layer +from monai.networks.layers.factories import Dropout +from monai.networks.layers.utils import get_act_layer, get_norm_layer +from monai.utils import UpsampleMode + +from mamba_ssm import Mamba + + +def get_dwconv_layer( + spatial_dims: int, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, bias: bool = False +): + depth_conv = Convolution(spatial_dims=spatial_dims, in_channels=in_channels, out_channels=in_channels, + strides=stride, kernel_size=kernel_size, bias=bias, conv_only=True, groups=in_channels) + point_conv = Convolution(spatial_dims=spatial_dims, in_channels=in_channels, out_channels=out_channels, + strides=stride, kernel_size=1, bias=bias, conv_only=True, groups=1) + return torch.nn.Sequential(depth_conv, point_conv) + +class RVMLayer(nn.Module): + def __init__(self, input_dim, output_dim, d_state = 16, d_conv = 4, expand = 2): + super().__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.norm = nn.LayerNorm(input_dim) + self.mamba = Mamba( + d_model=input_dim, # Model dimension d_model + d_state=d_state, # SSM state expansion factor + d_conv=d_conv, # Local convolution width + expand=expand, # Block expansion factor + ) + self.proj = nn.Linear(input_dim, output_dim) + self.skip_scale= nn.Parameter(torch.ones(1)) + + def forward(self, x): + if x.dtype == torch.float16: + x = x.type(torch.float32) + B, C = x.shape[:2] + assert C == self.input_dim + n_tokens = x.shape[2:].numel() + img_dims = x.shape[2:] + x_flat = x.reshape(B, C, n_tokens).transpose(-1, -2) + x_norm = self.norm(x_flat) + x_mamba = self.mamba(x_norm) + self.skip_scale * x_flat + x_mamba = self.norm(x_mamba) + x_mamba = self.proj(x_mamba) + out = x_mamba.transpose(-1, -2).reshape(B, self.output_dim, *img_dims) + return out + + +def get_rvm_layer( + spatial_dims: int, in_channels: int, out_channels: int, stride: int = 1 +): + mamba_layer = RVMLayer(input_dim=in_channels, output_dim=out_channels) + if stride != 1: + if spatial_dims==2: + return nn.Sequential(mamba_layer, nn.MaxPool2d(kernel_size=stride, stride=stride)) + if spatial_dims==3: + return nn.Sequential(mamba_layer, nn.MaxPool3d(kernel_size=stride, stride=stride)) + return mamba_layer + + +class ResMambaBlock(nn.Module): + + def __init__( + self, + spatial_dims: int, + in_channels: int, + norm: tuple | str, + kernel_size: int = 3, + act: tuple | str = ("RELU", {"inplace": True}), + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions, could be 1, 2 or 3. + in_channels: number of input channels. + norm: feature normalization type and arguments. + kernel_size: convolution kernel size, the value should be an odd number. Defaults to 3. + act: activation type and arguments. Defaults to ``RELU``. + """ + + super().__init__() + + if kernel_size % 2 != 1: + raise AssertionError("kernel_size should be an odd number.") + + self.norm1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels) + self.norm2 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels) + self.act = get_act_layer(act) + self.conv1 = get_rvm_layer( + spatial_dims, in_channels=in_channels, out_channels=in_channels + ) + self.conv2 = get_rvm_layer( + spatial_dims, in_channels=in_channels, out_channels=in_channels + ) + + def forward(self, x): + identity = x + + x = self.norm1(x) + x = self.act(x) + x = self.conv1(x) + + x = self.norm2(x) + x = self.act(x) + x = self.conv2(x) + + x += identity + + return x + + +class ResUpBlock(nn.Module): + + def __init__( + self, + spatial_dims: int, + in_channels: int, + norm: tuple | str, + kernel_size: int = 3, + act: tuple | str = ("RELU", {"inplace": True}), + ) -> None: + """ + Args: + spatial_dims: number of spatial dimensions, could be 1, 2 or 3. + in_channels: number of input channels. + norm: feature normalization type and arguments. + kernel_size: convolution kernel size, the value should be an odd number. Defaults to 3. + act: activation type and arguments. Defaults to ``RELU``. + """ + + super().__init__() + + if kernel_size % 2 != 1: + raise AssertionError("kernel_size should be an odd number.") + + self.norm1 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels) + self.norm2 = get_norm_layer(name=norm, spatial_dims=spatial_dims, channels=in_channels) + self.act = get_act_layer(act) + self.conv = get_dwconv_layer( + spatial_dims, in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size + ) + self.skip_scale= nn.Parameter(torch.ones(1)) + + def forward(self, x): + identity = x + + x = self.norm1(x) + x = self.act(x) + x = self.conv(x) + self.skip_scale * identity + x = self.norm2(x) + x = self.act(x) + return x + + +class LightMUNet(nn.Module): + + def __init__( + self, + spatial_dims: int = 3, + init_filters: int = 8, + in_channels: int = 1, + out_channels: int = 2, + dropout_prob: float | None = None, + act: tuple | str = ("RELU", {"inplace": True}), + norm: tuple | str = ("GROUP", {"num_groups": 8}), + norm_name: str = "", + num_groups: int = 8, + use_conv_final: bool = True, + blocks_down: list = [1, 2, 2, 4], + blocks_up: list = [1, 1, 1], + upsample_mode: UpsampleMode | str = UpsampleMode.NONTRAINABLE, + ): + super().__init__() + + if spatial_dims not in (2, 3): + raise ValueError("`spatial_dims` can only be 2 or 3.") + + self.spatial_dims = spatial_dims + self.init_filters = init_filters + self.in_channels = in_channels + self.blocks_down = blocks_down + self.blocks_up = blocks_up + self.dropout_prob = dropout_prob + self.act = act # input options + self.act_mod = get_act_layer(act) + if norm_name: + if norm_name.lower() != "group": + raise ValueError(f"Deprecating option 'norm_name={norm_name}', please use 'norm' instead.") + norm = ("group", {"num_groups": num_groups}) + self.norm = norm + self.upsample_mode = UpsampleMode(upsample_mode) + self.use_conv_final = use_conv_final + self.convInit = get_conv_layer(spatial_dims, in_channels, init_filters) + self.down_layers = self._make_down_layers() + self.up_layers, self.up_samples = self._make_up_layers() + self.conv_final = self._make_final_conv(out_channels) + + if dropout_prob is not None: + self.dropout = Dropout[Dropout.DROPOUT, spatial_dims](dropout_prob) + + def _make_down_layers(self): + down_layers = nn.ModuleList() + blocks_down, spatial_dims, filters, norm = (self.blocks_down, self.spatial_dims, self.init_filters, self.norm) + for i, item in enumerate(blocks_down): + layer_in_channels = filters * 2**i + downsample_mamba = ( + get_rvm_layer(spatial_dims, layer_in_channels // 2, layer_in_channels, stride=2) + if i > 0 + else nn.Identity() + ) + down_layer = nn.Sequential( + downsample_mamba, *[ResMambaBlock(spatial_dims, layer_in_channels, norm=norm, act=self.act) for _ in range(item)] + ) + down_layers.append(down_layer) + return down_layers + + def _make_up_layers(self): + up_layers, up_samples = nn.ModuleList(), nn.ModuleList() + upsample_mode, blocks_up, spatial_dims, filters, norm = ( + self.upsample_mode, + self.blocks_up, + self.spatial_dims, + self.init_filters, + self.norm, + ) + n_up = len(blocks_up) + for i in range(n_up): + sample_in_channels = filters * 2 ** (n_up - i) + up_layers.append( + nn.Sequential( + *[ + ResBlock(spatial_dims, sample_in_channels // 2, norm=norm, act=self.act) + for _ in range(blocks_up[i]) + ] + ) + ) + up_samples.append( + nn.Sequential( + *[ + get_conv_layer(spatial_dims, sample_in_channels, sample_in_channels // 2, kernel_size=1), + get_upsample_layer(spatial_dims, sample_in_channels // 2, upsample_mode=upsample_mode), + ] + ) + ) + return up_layers, up_samples + + def _make_final_conv(self, out_channels: int): + return nn.Sequential( + get_norm_layer(name=self.norm, spatial_dims=self.spatial_dims, channels=self.init_filters), + self.act_mod, + get_conv_layer(self.spatial_dims, self.init_filters, out_channels, kernel_size=1, bias=True), + ) + + def encode(self, x: torch.Tensor) -> tuple[torch.Tensor, list[torch.Tensor]]: + x = self.convInit(x) + if self.dropout_prob is not None: + x = self.dropout(x) + down_x = [] + + for down in self.down_layers: + x = down(x) + down_x.append(x) + + return x, down_x + + def decode(self, x: torch.Tensor, down_x: list[torch.Tensor]) -> torch.Tensor: + for i, (up, upl) in enumerate(zip(self.up_samples, self.up_layers)): + x = up(x) + down_x[i + 1] + x = upl(x) + + if self.use_conv_final: + x = self.conv_final(x) + return x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x, down_x = self.encode(x) + down_x.reverse() + + x = self.decode(x, down_x) + return x \ No newline at end of file diff --git a/docker/template/src/nnunetv2/nets/UMambaBot.py b/docker/template/src/nnunetv2/nets/UMambaBot.py new file mode 100644 index 0000000..863ee05 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/UMambaBot.py @@ -0,0 +1,269 @@ +import numpy as np +import torch +from torch import nn +from typing import Union, Type, List, Tuple + +from dynamic_network_architectures.building_blocks.helper import get_matching_convtransp +from dynamic_network_architectures.building_blocks.plain_conv_encoder import PlainConvEncoder +from dynamic_network_architectures.building_blocks.residual import StackedResidualBlocks +from dynamic_network_architectures.building_blocks.residual_encoders import ResidualEncoder +from dynamic_network_architectures.building_blocks.residual import BasicBlockD, BottleneckD +from torch.nn.modules.conv import _ConvNd +from torch.nn.modules.dropout import _DropoutNd +from dynamic_network_architectures.building_blocks.helper import convert_conv_op_to_dim + +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from dynamic_network_architectures.building_blocks.helper import get_matching_instancenorm, convert_dim_to_conv_op +from nnunetv2.utilities.network_initialization import InitWeights_He +from mamba_ssm import Mamba + +class UNetResDecoder(nn.Module): + def __init__(self, + encoder: Union[PlainConvEncoder, ResidualEncoder], + num_classes: int, + n_conv_per_stage: Union[int, Tuple[int, ...], List[int]], + deep_supervision, nonlin_first: bool = False): + """ + This class needs the skips of the encoder as input in its forward. + + the encoder goes all the way to the bottleneck, so that's where the decoder picks up. stages in the decoder + are sorted by order of computation, so the first stage has the lowest resolution and takes the bottleneck + features and the lowest skip as inputs + the decoder has two (three) parts in each stage: + 1) conv transpose to upsample the feature maps of the stage below it (or the bottleneck in case of the first stage) + 2) n_conv_per_stage conv blocks to let the two inputs get to know each other and merge + 3) (optional if deep_supervision=True) a segmentation output Todo: enable upsample logits? + :param encoder: + :param num_classes: + :param n_conv_per_stage: + :param deep_supervision: + """ + super().__init__() + self.deep_supervision = deep_supervision + self.encoder = encoder + self.num_classes = num_classes + n_stages_encoder = len(encoder.output_channels) + if isinstance(n_conv_per_stage, int): + n_conv_per_stage = [n_conv_per_stage] * (n_stages_encoder - 1) + assert len(n_conv_per_stage) == n_stages_encoder - 1, "n_conv_per_stage must have as many entries as we have " \ + "resolution stages - 1 (n_stages in encoder - 1), " \ + "here: %d" % n_stages_encoder + + transpconv_op = get_matching_convtransp(conv_op=encoder.conv_op) + + # we start with the bottleneck and work out way up + stages = [] + transpconvs = [] + seg_layers = [] + for s in range(1, n_stages_encoder): + input_features_below = encoder.output_channels[-s] + input_features_skip = encoder.output_channels[-(s + 1)] + stride_for_transpconv = encoder.strides[-s] + transpconvs.append(transpconv_op( + input_features_below, input_features_skip, stride_for_transpconv, stride_for_transpconv, + bias=encoder.conv_bias + )) + # input features to conv is 2x input_features_skip (concat input_features_skip with transpconv output) + stages.append(StackedResidualBlocks( + n_blocks = n_conv_per_stage[s-1], + conv_op = encoder.conv_op, + input_channels = 2 * input_features_skip, + output_channels = input_features_skip, + kernel_size = encoder.kernel_sizes[-(s + 1)], + initial_stride = 1, + conv_bias = encoder.conv_bias, + norm_op = encoder.norm_op, + norm_op_kwargs = encoder.norm_op_kwargs, + dropout_op = encoder.dropout_op, + dropout_op_kwargs = encoder.dropout_op_kwargs, + nonlin = encoder.nonlin, + nonlin_kwargs = encoder.nonlin_kwargs, + )) + + # we always build the deep supervision outputs so that we can always load parameters. If we don't do this + # then a model trained with deep_supervision=True could not easily be loaded at inference time where + # deep supervision is not needed. It's just a convenience thing + seg_layers.append(encoder.conv_op(input_features_skip, num_classes, 1, 1, 0, bias=True)) + + self.stages = nn.ModuleList(stages) + self.transpconvs = nn.ModuleList(transpconvs) + self.seg_layers = nn.ModuleList(seg_layers) + + def forward(self, skips): + """ + we expect to get the skips in the order they were computed, so the bottleneck should be the last entry + :param skips: + :return: + """ + lres_input = skips[-1] + seg_outputs = [] + for s in range(len(self.stages)): + x = self.transpconvs[s](lres_input) + x = torch.cat((x, skips[-(s+2)]), 1) + x = self.stages[s](x) + if self.deep_supervision: + seg_outputs.append(self.seg_layers[s](x)) + elif s == (len(self.stages) - 1): + seg_outputs.append(self.seg_layers[-1](x)) + lres_input = x + + # invert seg outputs so that the largest segmentation prediction is returned first + seg_outputs = seg_outputs[::-1] + + if not self.deep_supervision: + r = seg_outputs[0] + else: + r = seg_outputs + return r + + def compute_conv_feature_map_size(self, input_size): + """ + IMPORTANT: input_size is the input_size of the encoder! + :param input_size: + :return: + """ + # first we need to compute the skip sizes. Skip bottleneck because all output feature maps of our ops will at + # least have the size of the skip above that (therefore -1) + skip_sizes = [] + for s in range(len(self.encoder.strides) - 1): + skip_sizes.append([i // j for i, j in zip(input_size, self.encoder.strides[s])]) + input_size = skip_sizes[-1] + # print(skip_sizes) + + assert len(skip_sizes) == len(self.stages) + + # our ops are the other way around, so let's match things up + output = np.int64(0) + for s in range(len(self.stages)): + # print(skip_sizes[-(s+1)], self.encoder.output_channels[-(s+2)]) + # conv blocks + output += self.stages[s].compute_conv_feature_map_size(skip_sizes[-(s+1)]) + # trans conv + output += np.prod([self.encoder.output_channels[-(s+2)], *skip_sizes[-(s+1)]], dtype=np.int64) + # segmentation + if self.deep_supervision or (s == (len(self.stages) - 1)): + output += np.prod([self.num_classes, *skip_sizes[-(s+1)]], dtype=np.int64) + return output + +class UMambaBot(nn.Module): + def __init__(self, + input_channels: int, + n_stages: int, + features_per_stage: Union[int, List[int], Tuple[int, ...]], + conv_op: Type[_ConvNd], + kernel_sizes: Union[int, List[int], Tuple[int, ...]], + strides: Union[int, List[int], Tuple[int, ...]], + n_conv_per_stage: Union[int, List[int], Tuple[int, ...]], + num_classes: int, + n_conv_per_stage_decoder: Union[int, Tuple[int, ...], List[int]], + conv_bias: bool = False, + norm_op: Union[None, Type[nn.Module]] = None, + norm_op_kwargs: dict = None, + dropout_op: Union[None, Type[_DropoutNd]] = None, + dropout_op_kwargs: dict = None, + nonlin: Union[None, Type[torch.nn.Module]] = None, + nonlin_kwargs: dict = None, + deep_supervision: bool = False, + block: Union[Type[BasicBlockD], Type[BottleneckD]] = BasicBlockD, + bottleneck_channels: Union[int, List[int], Tuple[int, ...]] = None, + stem_channels: int = None + ): + super().__init__() + n_blocks_per_stage = n_conv_per_stage + if isinstance(n_blocks_per_stage, int): + n_blocks_per_stage = [n_blocks_per_stage] * n_stages + if isinstance(n_conv_per_stage_decoder, int): + n_conv_per_stage_decoder = [n_conv_per_stage_decoder] * (n_stages - 1) + assert len(n_blocks_per_stage) == n_stages, "n_blocks_per_stage must have as many entries as we have " \ + f"resolution stages. here: {n_stages}. " \ + f"n_blocks_per_stage: {n_blocks_per_stage}" + assert len(n_conv_per_stage_decoder) == (n_stages - 1), "n_conv_per_stage_decoder must have one less entries " \ + f"as we have resolution stages. here: {n_stages} " \ + f"stages, so it should have {n_stages - 1} entries. " \ + f"n_conv_per_stage_decoder: {n_conv_per_stage_decoder}" + self.encoder = ResidualEncoder(input_channels, n_stages, features_per_stage, conv_op, kernel_sizes, strides, + n_blocks_per_stage, conv_bias, norm_op, norm_op_kwargs, dropout_op, + dropout_op_kwargs, nonlin, nonlin_kwargs, block, bottleneck_channels, + return_skips=True, disable_default_stem=False, stem_channels=stem_channels) + # layer norm + self.ln = nn.LayerNorm(features_per_stage[-1]) + self.mamba = Mamba( + d_model=features_per_stage[-1], + d_state=16, + d_conv=4, + expand=2, + ) + self.decoder = UNetResDecoder(self.encoder, num_classes, n_conv_per_stage_decoder, deep_supervision) + + def forward(self, x): + skips = self.encoder(x) + middle_feature = skips[-1] + B, C = middle_feature.shape[:2] + n_tokens = middle_feature.shape[2:].numel() + img_dims = middle_feature.shape[2:] + middle_feature_flat = middle_feature.view(B, C, n_tokens).transpose(-1, -2) + middle_feature_flat = self.ln(middle_feature_flat) + out = self.mamba(middle_feature_flat) + out = out.transpose(-1, -2).view(B, C, *img_dims) + skips[-1] = out + + return self.decoder(skips) + + def compute_conv_feature_map_size(self, input_size): + assert len(input_size) == convert_conv_op_to_dim(self.encoder.conv_op), "just give the image size without color/feature channels or " \ + "batch channel. Do not give input_size=(b, c, x, y(, z)). " \ + "Give input_size=(x, y(, z))!" + return self.encoder.compute_conv_feature_map_size(input_size) + self.decoder.compute_conv_feature_map_size(input_size) + + +def get_umamba_bot_from_plans(plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_input_channels: int, + deep_supervision: bool = True): + """ + we may have to change this in the future to accommodate other plans -> network mappings + + num_input_channels can differ depending on whether we do cascade. Its best to make this info available in the + trainer rather than inferring it again from the plans here. + """ + num_stages = len(configuration_manager.conv_kernel_sizes) + + dim = len(configuration_manager.conv_kernel_sizes[0]) + conv_op = convert_dim_to_conv_op(dim) + + label_manager = plans_manager.get_label_manager(dataset_json) + + segmentation_network_class_name = 'UMambaBot' + network_class = UMambaBot + kwargs = { + 'UMambaBot': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + } + } + + conv_or_blocks_per_stage = { + 'n_conv_per_stage': configuration_manager.n_conv_per_stage_encoder, + 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder + } + + model = network_class( + input_channels=num_input_channels, + n_stages=num_stages, + features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i, + configuration_manager.unet_max_num_features) for i in range(num_stages)], + conv_op=conv_op, + kernel_sizes=configuration_manager.conv_kernel_sizes, + strides=configuration_manager.pool_op_kernel_sizes, + num_classes=label_manager.num_segmentation_heads, + deep_supervision=deep_supervision, + **conv_or_blocks_per_stage, + **kwargs[segmentation_network_class_name] + ) + model.apply(InitWeights_He(1e-2)) + + return model diff --git a/docker/template/src/nnunetv2/nets/UMambaEnc.py b/docker/template/src/nnunetv2/nets/UMambaEnc.py new file mode 100644 index 0000000..a5ec3c4 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/UMambaEnc.py @@ -0,0 +1,414 @@ +import numpy as np +import torch +from torch import nn +from typing import Union, Type, List, Tuple + +from dynamic_network_architectures.building_blocks.helper import get_matching_convtransp +from dynamic_network_architectures.building_blocks.plain_conv_encoder import PlainConvEncoder + +from dynamic_network_architectures.building_blocks.simple_conv_blocks import StackedConvBlocks +from dynamic_network_architectures.building_blocks.residual import StackedResidualBlocks + +from dynamic_network_architectures.building_blocks.helper import maybe_convert_scalar_to_list, get_matching_pool_op +from dynamic_network_architectures.building_blocks.residual import BasicBlockD, BottleneckD +from torch.nn.modules.conv import _ConvNd +from torch.nn.modules.dropout import _DropoutNd +from torch.cuda.amp import autocast +from dynamic_network_architectures.building_blocks.helper import convert_conv_op_to_dim +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from dynamic_network_architectures.building_blocks.helper import get_matching_instancenorm, convert_dim_to_conv_op +from dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0 +from nnunetv2.utilities.network_initialization import InitWeights_He +from mamba_ssm import Mamba + +class MambaLayer(nn.Module): + def __init__(self, dim, d_state = 16, d_conv = 4, expand = 2): + super().__init__() + self.dim = dim + self.norm = nn.LayerNorm(dim) + self.mamba = Mamba( + d_model=dim, # Model dimension d_model + d_state=d_state, # SSM state expansion factor + d_conv=d_conv, # Local convolution width + expand=expand, # Block expansion factor + ) + + @autocast(enabled=False) + def forward(self, x): + if x.dtype == torch.float16: + x = x.type(torch.float32) + B, C = x.shape[:2] + assert C == self.dim + n_tokens = x.shape[2:].numel() + img_dims = x.shape[2:] + x_flat = x.reshape(B, C, n_tokens).transpose(-1, -2) + x_norm = self.norm(x_flat) + x_mamba = self.mamba(x_norm) + out = x_mamba.transpose(-1, -2).reshape(B, C, *img_dims) + + return out + + +class ResidualMambaEncoder(nn.Module): + def __init__(self, + input_channels: int, + n_stages: int, + features_per_stage: Union[int, List[int], Tuple[int, ...]], + conv_op: Type[_ConvNd], + kernel_sizes: Union[int, List[int], Tuple[int, ...]], + strides: Union[int, List[int], Tuple[int, ...], Tuple[Tuple[int, ...], ...]], + n_blocks_per_stage: Union[int, List[int], Tuple[int, ...]], + conv_bias: bool = False, + norm_op: Union[None, Type[nn.Module]] = None, + norm_op_kwargs: dict = None, + dropout_op: Union[None, Type[_DropoutNd]] = None, + dropout_op_kwargs: dict = None, + nonlin: Union[None, Type[torch.nn.Module]] = None, + nonlin_kwargs: dict = None, + block: Union[Type[BasicBlockD], Type[BottleneckD]] = BasicBlockD, + bottleneck_channels: Union[int, List[int], Tuple[int, ...]] = None, + return_skips: bool = False, + disable_default_stem: bool = False, + stem_channels: int = None, + pool_type: str = 'conv', + stochastic_depth_p: float = 0.0, + squeeze_excitation: bool = False, + squeeze_excitation_reduction_ratio: float = 1. / 16 + ): + super().__init__() + if isinstance(kernel_sizes, int): + kernel_sizes = [kernel_sizes] * n_stages + if isinstance(features_per_stage, int): + features_per_stage = [features_per_stage] * n_stages + if isinstance(n_blocks_per_stage, int): + n_blocks_per_stage = [n_blocks_per_stage] * n_stages + if isinstance(strides, int): + strides = [strides] * n_stages + if bottleneck_channels is None or isinstance(bottleneck_channels, int): + bottleneck_channels = [bottleneck_channels] * n_stages + assert len( + bottleneck_channels) == n_stages, "bottleneck_channels must be None or have as many entries as we have resolution stages (n_stages)" + assert len( + kernel_sizes) == n_stages, "kernel_sizes must have as many entries as we have resolution stages (n_stages)" + assert len( + n_blocks_per_stage) == n_stages, "n_conv_per_stage must have as many entries as we have resolution stages (n_stages)" + assert len( + features_per_stage) == n_stages, "features_per_stage must have as many entries as we have resolution stages (n_stages)" + assert len(strides) == n_stages, "strides must have as many entries as we have resolution stages (n_stages). " \ + "Important: first entry is recommended to be 1, else we run strided conv drectly on the input" + + pool_op = get_matching_pool_op(conv_op, pool_type=pool_type) if pool_type != 'conv' else None + + # build a stem, Todo maybe we need more flexibility for this in the future. For now, if you need a custom + # stem you can just disable the stem and build your own. + # THE STEM DOES NOT DO STRIDE/POOLING IN THIS IMPLEMENTATION + if not disable_default_stem: + if stem_channels is None: + stem_channels = features_per_stage[0] + self.stem = StackedConvBlocks(1, conv_op, input_channels, stem_channels, kernel_sizes[0], 1, conv_bias, + norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin, nonlin_kwargs) + input_channels = stem_channels + else: + self.stem = None + + # now build the network + stages = [] + mamba_layers = [] + for s in range(n_stages): + stride_for_conv = strides[s] if pool_op is None else 1 + + stage = StackedResidualBlocks( + n_blocks_per_stage[s], conv_op, input_channels, features_per_stage[s], kernel_sizes[s], stride_for_conv, + conv_bias, norm_op, norm_op_kwargs, dropout_op, dropout_op_kwargs, nonlin, nonlin_kwargs, + block=block, bottleneck_channels=bottleneck_channels[s], stochastic_depth_p=stochastic_depth_p, + squeeze_excitation=squeeze_excitation, + squeeze_excitation_reduction_ratio=squeeze_excitation_reduction_ratio + ) + + if pool_op is not None: + stage = nn.Sequential(pool_op(strides[s]), stage) + + stages.append(stage) + input_channels = features_per_stage[s] + + mamba_layers.append(MambaLayer(input_channels)) + + #self.stages = nn.Sequential(*stages) + self.stages = nn.ModuleList(stages) + self.output_channels = features_per_stage + self.strides = [maybe_convert_scalar_to_list(conv_op, i) for i in strides] + self.return_skips = return_skips + + # we store some things that a potential decoder needs + self.conv_op = conv_op + self.norm_op = norm_op + self.norm_op_kwargs = norm_op_kwargs + self.nonlin = nonlin + self.nonlin_kwargs = nonlin_kwargs + self.dropout_op = dropout_op + self.dropout_op_kwargs = dropout_op_kwargs + self.conv_bias = conv_bias + self.kernel_sizes = kernel_sizes + + self.mamba_layers = nn.ModuleList(mamba_layers) + + def forward(self, x): + if self.stem is not None: + x = self.stem(x) + ret = [] + #for s in self.stages: + for s in range(len(self.stages)): + #x = s(x) + x = self.stages[s](x) + x = self.mamba_layers[s](x) + ret.append(x) + if self.return_skips: + return ret + else: + return ret[-1] + + def compute_conv_feature_map_size(self, input_size): + if self.stem is not None: + output = self.stem.compute_conv_feature_map_size(input_size) + else: + output = np.int64(0) + + for s in range(len(self.stages)): + output += self.stages[s].compute_conv_feature_map_size(input_size) + input_size = [i // j for i, j in zip(input_size, self.strides[s])] + + return output + +class UNetResDecoder(nn.Module): + def __init__(self, + encoder: Union[PlainConvEncoder, ResidualMambaEncoder], + num_classes: int, + n_conv_per_stage: Union[int, Tuple[int, ...], List[int]], + deep_supervision, nonlin_first: bool = False): + """ + This class needs the skips of the encoder as input in its forward. + + the encoder goes all the way to the bottleneck, so that's where the decoder picks up. stages in the decoder + are sorted by order of computation, so the first stage has the lowest resolution and takes the bottleneck + features and the lowest skip as inputs + the decoder has two (three) parts in each stage: + 1) conv transpose to upsample the feature maps of the stage below it (or the bottleneck in case of the first stage) + 2) n_conv_per_stage conv blocks to let the two inputs get to know each other and merge + 3) (optional if deep_supervision=True) a segmentation output Todo: enable upsample logits? + :param encoder: + :param num_classes: + :param n_conv_per_stage: + :param deep_supervision: + """ + super().__init__() + self.deep_supervision = deep_supervision + self.encoder = encoder + self.num_classes = num_classes + n_stages_encoder = len(encoder.output_channels) + if isinstance(n_conv_per_stage, int): + n_conv_per_stage = [n_conv_per_stage] * (n_stages_encoder - 1) + assert len(n_conv_per_stage) == n_stages_encoder - 1, "n_conv_per_stage must have as many entries as we have " \ + "resolution stages - 1 (n_stages in encoder - 1), " \ + "here: %d" % n_stages_encoder + + transpconv_op = get_matching_convtransp(conv_op=encoder.conv_op) + + # we start with the bottleneck and work out way up + stages = [] + transpconvs = [] + seg_layers = [] + for s in range(1, n_stages_encoder): + input_features_below = encoder.output_channels[-s] + input_features_skip = encoder.output_channels[-(s + 1)] + stride_for_transpconv = encoder.strides[-s] + transpconvs.append(transpconv_op( + input_features_below, input_features_skip, stride_for_transpconv, stride_for_transpconv, + bias=encoder.conv_bias + )) + # input features to conv is 2x input_features_skip (concat input_features_skip with transpconv output) + stages.append(StackedResidualBlocks( + n_blocks = n_conv_per_stage[s-1], + conv_op = encoder.conv_op, + input_channels = 2 * input_features_skip, + output_channels = input_features_skip, + kernel_size = encoder.kernel_sizes[-(s + 1)], + initial_stride = 1, + conv_bias = encoder.conv_bias, + norm_op = encoder.norm_op, + norm_op_kwargs = encoder.norm_op_kwargs, + dropout_op = encoder.dropout_op, + dropout_op_kwargs = encoder.dropout_op_kwargs, + nonlin = encoder.nonlin, + nonlin_kwargs = encoder.nonlin_kwargs, + )) + # we always build the deep supervision outputs so that we can always load parameters. If we don't do this + # then a model trained with deep_supervision=True could not easily be loaded at inference time where + # deep supervision is not needed. It's just a convenience thing + seg_layers.append(encoder.conv_op(input_features_skip, num_classes, 1, 1, 0, bias=True)) + + self.stages = nn.ModuleList(stages) + self.transpconvs = nn.ModuleList(transpconvs) + self.seg_layers = nn.ModuleList(seg_layers) + + def forward(self, skips): + """ + we expect to get the skips in the order they were computed, so the bottleneck should be the last entry + :param skips: + :return: + """ + lres_input = skips[-1] + seg_outputs = [] + for s in range(len(self.stages)): + x = self.transpconvs[s](lres_input) + x = torch.cat((x, skips[-(s+2)]), 1) + x = self.stages[s](x) + if self.deep_supervision: + seg_outputs.append(self.seg_layers[s](x)) + elif s == (len(self.stages) - 1): + seg_outputs.append(self.seg_layers[-1](x)) + lres_input = x + + # invert seg outputs so that the largest segmentation prediction is returned first + seg_outputs = seg_outputs[::-1] + + if not self.deep_supervision: + r = seg_outputs[0] + else: + r = seg_outputs + return r + + def compute_conv_feature_map_size(self, input_size): + """ + IMPORTANT: input_size is the input_size of the encoder! + :param input_size: + :return: + """ + # first we need to compute the skip sizes. Skip bottleneck because all output feature maps of our ops will at + # least have the size of the skip above that (therefore -1) + skip_sizes = [] + for s in range(len(self.encoder.strides) - 1): + skip_sizes.append([i // j for i, j in zip(input_size, self.encoder.strides[s])]) + input_size = skip_sizes[-1] + # print(skip_sizes) + + assert len(skip_sizes) == len(self.stages) + + # our ops are the other way around, so let's match things up + output = np.int64(0) + for s in range(len(self.stages)): + # print(skip_sizes[-(s+1)], self.encoder.output_channels[-(s+2)]) + # conv blocks + output += self.stages[s].compute_conv_feature_map_size(skip_sizes[-(s+1)]) + # trans conv + output += np.prod([self.encoder.output_channels[-(s+2)], *skip_sizes[-(s+1)]], dtype=np.int64) + # segmentation + if self.deep_supervision or (s == (len(self.stages) - 1)): + output += np.prod([self.num_classes, *skip_sizes[-(s+1)]], dtype=np.int64) + return output + +class UMambaEnc(nn.Module): + def __init__(self, + input_channels: int, + n_stages: int, + features_per_stage: Union[int, List[int], Tuple[int, ...]], + conv_op: Type[_ConvNd], + kernel_sizes: Union[int, List[int], Tuple[int, ...]], + strides: Union[int, List[int], Tuple[int, ...]], + n_conv_per_stage: Union[int, List[int], Tuple[int, ...]], + num_classes: int, + n_conv_per_stage_decoder: Union[int, Tuple[int, ...], List[int]], + conv_bias: bool = False, + norm_op: Union[None, Type[nn.Module]] = None, + norm_op_kwargs: dict = None, + dropout_op: Union[None, Type[_DropoutNd]] = None, + dropout_op_kwargs: dict = None, + nonlin: Union[None, Type[torch.nn.Module]] = None, + nonlin_kwargs: dict = None, + deep_supervision: bool = False, + block: Union[Type[BasicBlockD], Type[BottleneckD]] = BasicBlockD, + bottleneck_channels: Union[int, List[int], Tuple[int, ...]] = None, + stem_channels: int = None + ): + super().__init__() + n_blocks_per_stage = n_conv_per_stage + if isinstance(n_blocks_per_stage, int): + n_blocks_per_stage = [n_blocks_per_stage] * n_stages + if isinstance(n_conv_per_stage_decoder, int): + n_conv_per_stage_decoder = [n_conv_per_stage_decoder] * (n_stages - 1) + assert len(n_blocks_per_stage) == n_stages, "n_blocks_per_stage must have as many entries as we have " \ + f"resolution stages. here: {n_stages}. " \ + f"n_blocks_per_stage: {n_blocks_per_stage}" + assert len(n_conv_per_stage_decoder) == (n_stages - 1), "n_conv_per_stage_decoder must have one less entries " \ + f"as we have resolution stages. here: {n_stages} " \ + f"stages, so it should have {n_stages - 1} entries. " \ + f"n_conv_per_stage_decoder: {n_conv_per_stage_decoder}" + self.encoder = ResidualMambaEncoder(input_channels, n_stages, features_per_stage, conv_op, kernel_sizes, strides, + n_blocks_per_stage, conv_bias, norm_op, norm_op_kwargs, dropout_op, + dropout_op_kwargs, nonlin, nonlin_kwargs, block, bottleneck_channels, + return_skips=True, disable_default_stem=False, stem_channels=stem_channels) + self.decoder = UNetResDecoder(self.encoder, num_classes, n_conv_per_stage_decoder, deep_supervision) + + def forward(self, x): + skips = self.encoder(x) + return self.decoder(skips) + + def compute_conv_feature_map_size(self, input_size): + assert len(input_size) == convert_conv_op_to_dim(self.encoder.conv_op), "just give the image size without color/feature channels or " \ + "batch channel. Do not give input_size=(b, c, x, y(, z)). " \ + "Give input_size=(x, y(, z))!" + return self.encoder.compute_conv_feature_map_size(input_size) + self.decoder.compute_conv_feature_map_size(input_size) + + +def get_umamba_enc_from_plans(plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_input_channels: int, + deep_supervision: bool = True): + """ + we may have to change this in the future to accommodate other plans -> network mappings + + num_input_channels can differ depending on whether we do cascade. Its best to make this info available in the + trainer rather than inferring it again from the plans here. + """ + num_stages = len(configuration_manager.conv_kernel_sizes) + + dim = len(configuration_manager.conv_kernel_sizes[0]) + conv_op = convert_dim_to_conv_op(dim) + + label_manager = plans_manager.get_label_manager(dataset_json) + + segmentation_network_class_name = 'UMambaEnc' + network_class = UMambaEnc + kwargs = { + 'UMambaEnc': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + } + } + + conv_or_blocks_per_stage = { + 'n_conv_per_stage': configuration_manager.n_conv_per_stage_encoder, + 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder + } + + model = network_class( + input_channels=num_input_channels, + n_stages=num_stages, + features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i, + configuration_manager.unet_max_num_features) for i in range(num_stages)], + conv_op=conv_op, + kernel_sizes=configuration_manager.conv_kernel_sizes, + strides=configuration_manager.pool_op_kernel_sizes, + num_classes=label_manager.num_segmentation_heads, + deep_supervision=deep_supervision, + **conv_or_blocks_per_stage, + **kwargs[segmentation_network_class_name] + ) + model.apply(InitWeights_He(1e-2)) + if network_class == UMambaEnc: + model.apply(init_last_bn_before_add_to_0) + + return model diff --git a/docker/template/src/nnunetv2/nets/__init__.py b/docker/template/src/nnunetv2/nets/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/nets/mednextv1/MedNextV1.py b/docker/template/src/nnunetv2/nets/mednextv1/MedNextV1.py new file mode 100644 index 0000000..3f8cb83 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/mednextv1/MedNextV1.py @@ -0,0 +1,432 @@ +import torch +import torch.nn as nn +import torch.utils.checkpoint as checkpoint + +from nnunetv2.nets.mednextv1.blocks import * + +class MedNeXt(nn.Module): + + def __init__(self, + in_channels: int, + n_channels: int, + n_classes: int, + exp_r: int = 4, # Expansion ratio as in Swin Transformers + kernel_size: int = 7, # Ofcourse can test kernel_size + enc_kernel_size: int = None, + dec_kernel_size: int = None, + deep_supervision: bool = False, # Can be used to test deep supervision + do_res: bool = False, # Can be used to individually test residual connection + do_res_up_down: bool = False, # Additional 'res' connection on up and down convs + checkpoint_style: bool = None, # Either inside block or outside block + block_counts: list = [2,2,2,2,2,2,2,2,2], # Can be used to test staging ratio: + # [3,3,9,3] in Swin as opposed to [2,2,2,2,2] in nnUNet + norm_type = 'group', + dim = '3d', # 2d or 3d + grn = False + ): + + super().__init__() + + self.do_ds = deep_supervision + assert checkpoint_style in [None, 'outside_block'] + self.inside_block_checkpointing = False + self.outside_block_checkpointing = False + if checkpoint_style == 'outside_block': + self.outside_block_checkpointing = True + assert dim in ['2d', '3d'] + + if kernel_size is not None: + enc_kernel_size = kernel_size + dec_kernel_size = kernel_size + + if dim == '2d': + conv = nn.Conv2d + elif dim == '3d': + conv = nn.Conv3d + + self.stem = conv(in_channels, n_channels, kernel_size=1) + if type(exp_r) == int: + exp_r = [exp_r for i in range(len(block_counts))] + + self.enc_block_0 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels, + out_channels=n_channels, + exp_r=exp_r[0], + kernel_size=enc_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[0])] + ) + + self.down_0 = MedNeXtDownBlock( + in_channels=n_channels, + out_channels=2*n_channels, + exp_r=exp_r[1], + kernel_size=enc_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim + ) + + self.enc_block_1 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels*2, + out_channels=n_channels*2, + exp_r=exp_r[1], + kernel_size=enc_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[1])] + ) + + self.down_1 = MedNeXtDownBlock( + in_channels=2*n_channels, + out_channels=4*n_channels, + exp_r=exp_r[2], + kernel_size=enc_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim, + grn=grn + ) + + self.enc_block_2 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels*4, + out_channels=n_channels*4, + exp_r=exp_r[2], + kernel_size=enc_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[2])] + ) + + self.down_2 = MedNeXtDownBlock( + in_channels=4*n_channels, + out_channels=8*n_channels, + exp_r=exp_r[3], + kernel_size=enc_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim, + grn=grn + ) + + self.enc_block_3 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels*8, + out_channels=n_channels*8, + exp_r=exp_r[3], + kernel_size=enc_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[3])] + ) + + self.down_3 = MedNeXtDownBlock( + in_channels=8*n_channels, + out_channels=16*n_channels, + exp_r=exp_r[4], + kernel_size=enc_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim, + grn=grn + ) + + self.bottleneck = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels*16, + out_channels=n_channels*16, + exp_r=exp_r[4], + kernel_size=dec_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[4])] + ) + + self.up_3 = MedNeXtUpBlock( + in_channels=16*n_channels, + out_channels=8*n_channels, + exp_r=exp_r[5], + kernel_size=dec_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim, + grn=grn + ) + + self.dec_block_3 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels*8, + out_channels=n_channels*8, + exp_r=exp_r[5], + kernel_size=dec_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[5])] + ) + + self.up_2 = MedNeXtUpBlock( + in_channels=8*n_channels, + out_channels=4*n_channels, + exp_r=exp_r[6], + kernel_size=dec_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim, + grn=grn + ) + + self.dec_block_2 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels*4, + out_channels=n_channels*4, + exp_r=exp_r[6], + kernel_size=dec_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[6])] + ) + + self.up_1 = MedNeXtUpBlock( + in_channels=4*n_channels, + out_channels=2*n_channels, + exp_r=exp_r[7], + kernel_size=dec_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim, + grn=grn + ) + + self.dec_block_1 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels*2, + out_channels=n_channels*2, + exp_r=exp_r[7], + kernel_size=dec_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[7])] + ) + + self.up_0 = MedNeXtUpBlock( + in_channels=2*n_channels, + out_channels=n_channels, + exp_r=exp_r[8], + kernel_size=dec_kernel_size, + do_res=do_res_up_down, + norm_type=norm_type, + dim=dim, + grn=grn + ) + + self.dec_block_0 = nn.Sequential(*[ + MedNeXtBlock( + in_channels=n_channels, + out_channels=n_channels, + exp_r=exp_r[8], + kernel_size=dec_kernel_size, + do_res=do_res, + norm_type=norm_type, + dim=dim, + grn=grn + ) + for i in range(block_counts[8])] + ) + + self.out_0 = OutBlock(in_channels=n_channels, n_classes=n_classes, dim=dim) + + # Used to fix PyTorch checkpointing bug + self.dummy_tensor = nn.Parameter(torch.tensor([1.]), requires_grad=True) + + if deep_supervision: + self.out_1 = OutBlock(in_channels=n_channels*2, n_classes=n_classes, dim=dim) + self.out_2 = OutBlock(in_channels=n_channels*4, n_classes=n_classes, dim=dim) + self.out_3 = OutBlock(in_channels=n_channels*8, n_classes=n_classes, dim=dim) + self.out_4 = OutBlock(in_channels=n_channels*16, n_classes=n_classes, dim=dim) + + self.block_counts = block_counts + + + def iterative_checkpoint(self, sequential_block, x): + """ + This simply forwards x through each block of the sequential_block while + using gradient_checkpointing. This implementation is designed to bypass + the following issue in PyTorch's gradient checkpointing: + https://discuss.pytorch.org/t/checkpoint-with-no-grad-requiring-inputs-problem/19117/9 + """ + for l in sequential_block: + x = checkpoint.checkpoint(l, x, self.dummy_tensor) + return x + + + def forward(self, x): + + x = self.stem(x) + if self.outside_block_checkpointing: + x_res_0 = self.iterative_checkpoint(self.enc_block_0, x) + x = checkpoint.checkpoint(self.down_0, x_res_0, self.dummy_tensor) + x_res_1 = self.iterative_checkpoint(self.enc_block_1, x) + x = checkpoint.checkpoint(self.down_1, x_res_1, self.dummy_tensor) + x_res_2 = self.iterative_checkpoint(self.enc_block_2, x) + x = checkpoint.checkpoint(self.down_2, x_res_2, self.dummy_tensor) + x_res_3 = self.iterative_checkpoint(self.enc_block_3, x) + x = checkpoint.checkpoint(self.down_3, x_res_3, self.dummy_tensor) + + x = self.iterative_checkpoint(self.bottleneck, x) + if self.do_ds: + x_ds_4 = checkpoint.checkpoint(self.out_4, x, self.dummy_tensor) + + x_up_3 = checkpoint.checkpoint(self.up_3, x, self.dummy_tensor) + dec_x = x_res_3 + x_up_3 + x = self.iterative_checkpoint(self.dec_block_3, dec_x) + if self.do_ds: + x_ds_3 = checkpoint.checkpoint(self.out_3, x, self.dummy_tensor) + del x_res_3, x_up_3 + + x_up_2 = checkpoint.checkpoint(self.up_2, x, self.dummy_tensor) + dec_x = x_res_2 + x_up_2 + x = self.iterative_checkpoint(self.dec_block_2, dec_x) + if self.do_ds: + x_ds_2 = checkpoint.checkpoint(self.out_2, x, self.dummy_tensor) + del x_res_2, x_up_2 + + x_up_1 = checkpoint.checkpoint(self.up_1, x, self.dummy_tensor) + dec_x = x_res_1 + x_up_1 + x = self.iterative_checkpoint(self.dec_block_1, dec_x) + if self.do_ds: + x_ds_1 = checkpoint.checkpoint(self.out_1, x, self.dummy_tensor) + del x_res_1, x_up_1 + + x_up_0 = checkpoint.checkpoint(self.up_0, x, self.dummy_tensor) + dec_x = x_res_0 + x_up_0 + x = self.iterative_checkpoint(self.dec_block_0, dec_x) + del x_res_0, x_up_0, dec_x + + x = checkpoint.checkpoint(self.out_0, x, self.dummy_tensor) + + else: + x_res_0 = self.enc_block_0(x) + x = self.down_0(x_res_0) + x_res_1 = self.enc_block_1(x) + x = self.down_1(x_res_1) + x_res_2 = self.enc_block_2(x) + x = self.down_2(x_res_2) + x_res_3 = self.enc_block_3(x) + x = self.down_3(x_res_3) + + x = self.bottleneck(x) + if self.do_ds: + x_ds_4 = self.out_4(x) + + x_up_3 = self.up_3(x) + dec_x = x_res_3 + x_up_3 + x = self.dec_block_3(dec_x) + + if self.do_ds: + x_ds_3 = self.out_3(x) + del x_res_3, x_up_3 + + x_up_2 = self.up_2(x) + dec_x = x_res_2 + x_up_2 + x = self.dec_block_2(dec_x) + if self.do_ds: + x_ds_2 = self.out_2(x) + del x_res_2, x_up_2 + + x_up_1 = self.up_1(x) + dec_x = x_res_1 + x_up_1 + x = self.dec_block_1(dec_x) + if self.do_ds: + x_ds_1 = self.out_1(x) + del x_res_1, x_up_1 + + x_up_0 = self.up_0(x) + dec_x = x_res_0 + x_up_0 + x = self.dec_block_0(dec_x) + del x_res_0, x_up_0, dec_x + + x = self.out_0(x) + + if self.do_ds: + return [x, x_ds_1, x_ds_2, x_ds_3, x_ds_4] + else: + return x + + +if __name__ == "__main__": + + network = MedNeXt( + in_channels = 1, + n_channels = 32, + n_classes = 13, + exp_r=[2,3,4,4,4,4,4,3,2], # Expansion ratio as in Swin Transformers + # exp_r = 2, + kernel_size=3, # Can test kernel_size + deep_supervision=True, # Can be used to test deep supervision + do_res=True, # Can be used to individually test residual connection + do_res_up_down = True, + # block_counts = [2,2,2,2,2,2,2,2,2], + block_counts = [3,4,8,8,8,8,8,4,3], + checkpoint_style = None, + dim = '2d', + grn=True + + ).cuda() + + # network = MedNeXt_RegularUpDown( + # in_channels = 1, + # n_channels = 32, + # n_classes = 13, + # exp_r=[2,3,4,4,4,4,4,3,2], # Expansion ratio as in Swin Transformers + # kernel_size=3, # Can test kernel_size + # deep_supervision=True, # Can be used to test deep supervision + # do_res=True, # Can be used to individually test residual connection + # block_counts = [2,2,2,2,2,2,2,2,2], + # + # ).cuda() + + def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + print(count_parameters(network)) + + from fvcore.nn import FlopCountAnalysis + from fvcore.nn import parameter_count_table + + # model = ResTranUnet(img_size=128, in_channels=1, num_classes=14, dummy=False).cuda() + x = torch.zeros((1,1,64,64,64), requires_grad=False).cuda() + flops = FlopCountAnalysis(network, x) + print(flops.total()) + + with torch.no_grad(): + print(network) + x = torch.zeros((1, 1, 128, 128, 128)).cuda() + print(network(x)[0].shape) diff --git a/docker/template/src/nnunetv2/nets/mednextv1/__init__.py b/docker/template/src/nnunetv2/nets/mednextv1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/nets/mednextv1/blocks.py b/docker/template/src/nnunetv2/nets/mednextv1/blocks.py new file mode 100644 index 0000000..f8fd4d7 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/mednextv1/blocks.py @@ -0,0 +1,265 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class MedNeXtBlock(nn.Module): + + def __init__(self, + in_channels:int, + out_channels:int, + exp_r:int=4, + kernel_size:int=7, + do_res:int=True, + norm_type:str = 'group', + n_groups:int or None = None, + dim = '3d', + grn = False + ): + + super().__init__() + + self.do_res = do_res + + assert dim in ['2d', '3d'] + self.dim = dim + if self.dim == '2d': + conv = nn.Conv2d + elif self.dim == '3d': + conv = nn.Conv3d + + # First convolution layer with DepthWise Convolutions + self.conv1 = conv( + in_channels = in_channels, + out_channels = in_channels, + kernel_size = kernel_size, + stride = 1, + padding = kernel_size//2, + groups = in_channels if n_groups is None else n_groups, + ) + + # Normalization Layer. GroupNorm is used by default. + if norm_type=='group': + self.norm = nn.GroupNorm( + num_groups=in_channels, + num_channels=in_channels + ) + elif norm_type=='layer': + self.norm = LayerNorm( + normalized_shape=in_channels, + data_format='channels_first' + ) + + # Second convolution (Expansion) layer with Conv3D 1x1x1 + self.conv2 = conv( + in_channels = in_channels, + out_channels = exp_r*in_channels, + kernel_size = 1, + stride = 1, + padding = 0 + ) + + # GeLU activations + self.act = nn.GELU() + + # Third convolution (Compression) layer with Conv3D 1x1x1 + self.conv3 = conv( + in_channels = exp_r*in_channels, + out_channels = out_channels, + kernel_size = 1, + stride = 1, + padding = 0 + ) + + self.grn = grn + if grn: + if dim == '3d': + self.grn_beta = nn.Parameter(torch.zeros(1,exp_r*in_channels,1,1,1), requires_grad=True) + self.grn_gamma = nn.Parameter(torch.zeros(1,exp_r*in_channels,1,1,1), requires_grad=True) + elif dim == '2d': + self.grn_beta = nn.Parameter(torch.zeros(1,exp_r*in_channels,1,1), requires_grad=True) + self.grn_gamma = nn.Parameter(torch.zeros(1,exp_r*in_channels,1,1), requires_grad=True) + + + def forward(self, x, dummy_tensor=None): + + x1 = x + x1 = self.conv1(x1) + x1 = self.act(self.conv2(self.norm(x1))) + if self.grn: + # gamma, beta: learnable affine transform parameters + # X: input of shape (N,C,H,W,D) + if self.dim == '3d': + gx = torch.norm(x1, p=2, dim=(-3, -2, -1), keepdim=True) + elif self.dim == '2d': + gx = torch.norm(x1, p=2, dim=(-2, -1), keepdim=True) + nx = gx / (gx.mean(dim=1, keepdim=True)+1e-6) + x1 = self.grn_gamma * (x1 * nx) + self.grn_beta + x1 + x1 = self.conv3(x1) + if self.do_res: + x1 = x + x1 + return x1 + + +class MedNeXtDownBlock(MedNeXtBlock): + + def __init__(self, in_channels, out_channels, exp_r=4, kernel_size=7, + do_res=False, norm_type = 'group', dim='3d', grn=False): + + super().__init__(in_channels, out_channels, exp_r, kernel_size, + do_res = False, norm_type = norm_type, dim=dim, + grn=grn) + + if dim == '2d': + conv = nn.Conv2d + elif dim == '3d': + conv = nn.Conv3d + self.resample_do_res = do_res + if do_res: + self.res_conv = conv( + in_channels = in_channels, + out_channels = out_channels, + kernel_size = 1, + stride = 2 + ) + + self.conv1 = conv( + in_channels = in_channels, + out_channels = in_channels, + kernel_size = kernel_size, + stride = 2, + padding = kernel_size//2, + groups = in_channels, + ) + + def forward(self, x, dummy_tensor=None): + + x1 = super().forward(x) + + if self.resample_do_res: + res = self.res_conv(x) + x1 = x1 + res + + return x1 + + +class MedNeXtUpBlock(MedNeXtBlock): + + def __init__(self, in_channels, out_channels, exp_r=4, kernel_size=7, + do_res=False, norm_type = 'group', dim='3d', grn = False): + super().__init__(in_channels, out_channels, exp_r, kernel_size, + do_res=False, norm_type = norm_type, dim=dim, + grn=grn) + + self.resample_do_res = do_res + + self.dim = dim + if dim == '2d': + conv = nn.ConvTranspose2d + elif dim == '3d': + conv = nn.ConvTranspose3d + if do_res: + self.res_conv = conv( + in_channels = in_channels, + out_channels = out_channels, + kernel_size = 1, + stride = 2 + ) + + self.conv1 = conv( + in_channels = in_channels, + out_channels = in_channels, + kernel_size = kernel_size, + stride = 2, + padding = kernel_size//2, + groups = in_channels, + ) + + + def forward(self, x, dummy_tensor=None): + + x1 = super().forward(x) + # Asymmetry but necessary to match shape + + if self.dim == '2d': + x1 = torch.nn.functional.pad(x1, (1,0,1,0)) + elif self.dim == '3d': + x1 = torch.nn.functional.pad(x1, (1,0,1,0,1,0)) + + if self.resample_do_res: + res = self.res_conv(x) + if self.dim == '2d': + res = torch.nn.functional.pad(res, (1,0,1,0)) + elif self.dim == '3d': + res = torch.nn.functional.pad(res, (1,0,1,0,1,0)) + x1 = x1 + res + + return x1 + + +class OutBlock(nn.Module): + + def __init__(self, in_channels, n_classes, dim): + super().__init__() + + if dim == '2d': + conv = nn.ConvTranspose2d + elif dim == '3d': + conv = nn.ConvTranspose3d + self.conv_out = conv(in_channels, n_classes, kernel_size=1) + + def forward(self, x, dummy_tensor=None): + return self.conv_out(x) + + +class LayerNorm(nn.Module): + """ LayerNorm that supports two data formats: channels_last (default) or channels_first. + The ordering of the dimensions in the inputs. channels_last corresponds to inputs with + shape (batch_size, height, width, channels) while channels_first corresponds to inputs + with shape (batch_size, channels, height, width). + """ + def __init__(self, normalized_shape, eps=1e-5, data_format="channels_last"): + super().__init__() + self.weight = nn.Parameter(torch.ones(normalized_shape)) # beta + self.bias = nn.Parameter(torch.zeros(normalized_shape)) # gamma + self.eps = eps + self.data_format = data_format + if self.data_format not in ["channels_last", "channels_first"]: + raise NotImplementedError + self.normalized_shape = (normalized_shape, ) + + def forward(self, x, dummy_tensor=False): + if self.data_format == "channels_last": + return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) + elif self.data_format == "channels_first": + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None, None] * x + self.bias[:, None, None, None] + return x + + +if __name__ == "__main__": + + + # network = nnUNeXtBlock(in_channels=12, out_channels=12, do_res=False).cuda() + + # with torch.no_grad(): + # print(network) + # x = torch.zeros((2, 12, 8, 8, 8)).cuda() + # print(network(x).shape) + + # network = DownsampleBlock(in_channels=12, out_channels=24, do_res=False) + + # with torch.no_grad(): + # print(network) + # x = torch.zeros((2, 12, 128, 128, 128)) + # print(network(x).shape) + + network = MedNeXtBlock(in_channels=12, out_channels=12, do_res=True, grn=True, norm_type='group').cuda() + # network = LayerNorm(normalized_shape=12, data_format='channels_last').cuda() + # network.eval() + with torch.no_grad(): + print(network) + x = torch.zeros((2, 12, 64, 64, 64)).cuda() + print(network(x).shape) diff --git a/docker/template/src/nnunetv2/nets/mednextv1/create_mednext_v1.py b/docker/template/src/nnunetv2/nets/mednextv1/create_mednext_v1.py new file mode 100644 index 0000000..84d619c --- /dev/null +++ b/docker/template/src/nnunetv2/nets/mednextv1/create_mednext_v1.py @@ -0,0 +1,83 @@ +from nnunetv2.nets.mednextv1.MedNextV1 import MedNeXt + +def create_mednextv1_small(num_input_channels, num_classes, kernel_size=3, ds=False): + + return MedNeXt( + in_channels = num_input_channels, + n_channels = 32, + n_classes = num_classes, + exp_r=2, + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down = True, + block_counts = [2,2,2,2,2,2,2,2,2] + ) + + +def create_mednextv1_base(num_input_channels, num_classes, kernel_size=3, ds=False): + + return MedNeXt( + in_channels = num_input_channels, + n_channels = 32, + n_classes = num_classes, + exp_r=[2,3,4,4,4,4,4,3,2], + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down = True, + block_counts = [2,2,2,2,2,2,2,2,2] + ) + + +def create_mednextv1_medium(num_input_channels, num_classes, kernel_size=3, ds=False): + + return MedNeXt( + in_channels = num_input_channels, + n_channels = 32, + n_classes = num_classes, + exp_r=[2,3,4,4,4,4,4,3,2], + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down = True, + block_counts = [3,4,4,4,4,4,4,4,3], + checkpoint_style = 'outside_block' + ) + + +def create_mednextv1_large(num_input_channels, num_classes, kernel_size=3, ds=False): + + return MedNeXt( + in_channels = num_input_channels, + n_channels = 32, + n_classes = num_classes, + exp_r=[3,4,8,8,8,8,8,4,3], + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down = True, + block_counts = [3,4,8,8,8,8,8,4,3], + checkpoint_style = 'outside_block' + ) + + +def create_mednext_v1(num_input_channels, num_classes, model_id, kernel_size=3, + deep_supervision=False): + + model_dict = { + 'S': create_mednextv1_small, + 'B': create_mednextv1_base, + 'M': create_mednextv1_medium, + 'L': create_mednextv1_large, + } + + return model_dict[model_id]( + num_input_channels, num_classes, kernel_size, deep_supervision + ) + + +if __name__ == "__main__": + + model = create_mednextv1_large(1, 3, 3, False) + print(model) \ No newline at end of file diff --git a/docker/template/src/nnunetv2/nets/sam_lora_image_encoder.py b/docker/template/src/nnunetv2/nets/sam_lora_image_encoder.py new file mode 100644 index 0000000..6f1331a --- /dev/null +++ b/docker/template/src/nnunetv2/nets/sam_lora_image_encoder.py @@ -0,0 +1,206 @@ +from typing import Mapping, Any + +from nnunetv2.nets.segment_anything import sam_model_registry + +import math +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from torch.nn.parameter import Parameter +# from segment_anything.modeling import Sam +from safetensors import safe_open +from safetensors.torch import save_file +# from icecream import ic +from nnunetv2.nets.segment_anything.modeling import Sam +from torch._dynamo import OptimizedModule + +# from segment_anything import build_sam, SamPredictor + +class _LoRA_qkv(nn.Module): + """In Sam it is implemented as + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) + q, k, v = qkv.unbind(0) + """ + + def __init__( + self, + qkv: nn.Module, + linear_a_q: nn.Module, + linear_b_q: nn.Module, + linear_a_v: nn.Module, + linear_b_v: nn.Module, + ): + super().__init__() + self.qkv = qkv + self.linear_a_q = linear_a_q + self.linear_b_q = linear_b_q + self.linear_a_v = linear_a_v + self.linear_b_v = linear_b_v + self.dim = qkv.in_features + self.w_identity = torch.eye(qkv.in_features) + + def forward(self, x): + qkv = self.qkv(x) # B,N,N,3*org_C + new_q = self.linear_b_q(self.linear_a_q(x)) + new_v = self.linear_b_v(self.linear_a_v(x)) + qkv[:, :, :, : self.dim] += new_q + qkv[:, :, :, -self.dim:] += new_v + return qkv + + +class LoRA_Sam(nn.Module): + """Applies low-rank adaptation to a Sam model's image encoder. + + Args: + sam_model: a vision transformer model, see base_vit.py + r: rank of LoRA + num_classes: how many classes the model output, default to the vit model + lora_layer: which layer we apply LoRA. + + Examples:: + >>> model = ViT('B_16_imagenet1k') + >>> lora_model = LoRA_ViT(model, r=4) + >>> preds = lora_model(img) + >>> print(preds.shape) + torch.Size([1, 1000]) + """ + + def __init__(self, sam_model: Sam, r: int, lora_layer=None): + super(LoRA_Sam, self).__init__() + + assert r > 0 + # base_vit_dim = sam_model.image_encoder.patch_embed.proj.out_channels + # dim = base_vit_dim + if lora_layer: + self.lora_layer = lora_layer + else: + self.lora_layer = list( + range(len(sam_model.image_encoder.blocks))) # Only apply lora to the image encoder by default + # create for storage, then we can init them or load weights + self.w_As = [] # These are linear layers + self.w_Bs = [] + + # lets freeze first + for param in sam_model.image_encoder.parameters(): + param.requires_grad = False + + # Here, we do the surgery + for t_layer_i, blk in enumerate(sam_model.image_encoder.blocks): + # If we only want few lora layer instead of all + if t_layer_i not in self.lora_layer: + continue + w_qkv_linear = blk.attn.qkv + self.dim = w_qkv_linear.in_features + w_a_linear_q = nn.Linear(self.dim, r, bias=False) + w_b_linear_q = nn.Linear(r, self.dim, bias=False) + w_a_linear_v = nn.Linear(self.dim, r, bias=False) + w_b_linear_v = nn.Linear(r, self.dim, bias=False) + self.w_As.append(w_a_linear_q) + self.w_Bs.append(w_b_linear_q) + self.w_As.append(w_a_linear_v) + self.w_Bs.append(w_b_linear_v) + blk.attn.qkv = _LoRA_qkv( + w_qkv_linear, + w_a_linear_q, + w_b_linear_q, + w_a_linear_v, + w_b_linear_v, + ) + self.reset_parameters() + self.sam = sam_model + + def get_lora_parameters(self) -> None: + r"""Only safetensors is supported now. + + pip install safetensor if you do not have one installed yet. + + save both lora and fc parameters. + """ + + # assert filename.endswith(".pt") or filename.endswith('.pth') + + num_layer = len(self.w_As) # actually, it is half + a_tensors = {f"w_a_{i:03d}": self.w_As[i].weight for i in range(num_layer)} + b_tensors = {f"w_b_{i:03d}": self.w_Bs[i].weight for i in range(num_layer)} + prompt_encoder_tensors = {} + mask_decoder_tensors = {} + + # save prompt encoder, only `state_dict`, the `named_parameter` is not permitted + if isinstance(self.sam, torch.nn.DataParallel) or isinstance(self.sam, torch.nn.parallel.DistributedDataParallel): + state_dict = self.sam.module.state_dict() + else: + state_dict = self.sam.state_dict() + for key, value in state_dict.items(): + if 'prompt_encoder' in key: + prompt_encoder_tensors[key] = value + if 'mask_decoder' in key: + mask_decoder_tensors[key] = value + + merged_dict = {**a_tensors, **b_tensors, **prompt_encoder_tensors, **mask_decoder_tensors} + # torch.save(merged_dict, filename) + return merged_dict + def load_state_dict(self, state_dict: Mapping[str, Any], + strict: bool = True, assign: bool = False): + self.load_lora_parameters(state_dict) + + def load_lora_parameters(self, state_dict) -> None: + r"""Only safetensors is supported now. + + pip install safetensor if you do not have one installed yet.\ + + load both lora and fc parameters. + """ + + # assert filename.endswith(".pt") or filename.endswith('.pth') + # if torch.cuda.is_available(): + # state_dict = torch.load(filename, map_location='cuda') + # else: + # state_dict = torch.load(filename, map_location='cpu') + # + for i, w_A_linear in enumerate(self.w_As): + saved_key = f"w_a_{i:03d}" + saved_tensor = state_dict[saved_key] + w_A_linear.weight = Parameter(saved_tensor) + + for i, w_B_linear in enumerate(self.w_Bs): + saved_key = f"w_b_{i:03d}" + saved_tensor = state_dict[saved_key] + w_B_linear.weight = Parameter(saved_tensor) + + sam_dict = self.sam.state_dict() + sam_keys = sam_dict.keys() + + # load prompt encoder + prompt_encoder_keys = [k for k in sam_keys if 'prompt_encoder' in k] + prompt_encoder_values = [state_dict[k] for k in prompt_encoder_keys] + prompt_encoder_new_state_dict = {k: v for k, v in zip(prompt_encoder_keys, prompt_encoder_values)} + sam_dict.update(prompt_encoder_new_state_dict) + + # load mask decoder + mask_decoder_keys = [k for k in sam_keys if 'mask_decoder' in k] + mask_decoder_values = [state_dict[k] for k in mask_decoder_keys] + mask_decoder_new_state_dict = {k: v for k, v in zip(mask_decoder_keys, mask_decoder_values)} + sam_dict.update(mask_decoder_new_state_dict) + self.sam.load_state_dict(sam_dict) + + def reset_parameters(self) -> None: + for w_A in self.w_As: + nn.init.kaiming_uniform_(w_A.weight, a=math.sqrt(5)) + for w_B in self.w_Bs: + nn.init.zeros_(w_B.weight) + + def forward(self, batched_input, multimask_output, image_size): + return self.sam(batched_input, multimask_output, image_size) + + + # def forward(self, x: Tensor) -> Tensor: + # return self.lora_vit(x) + + +if __name__ == "__main__": + sam = sam_model_registry["vit_b"](checkpoint="sam_vit_b_01ec64.pth") + lora_sam = LoRA_Sam(sam, 4) + lora_sam.sam.image_encoder(torch.rand(size=(1, 3, 1024, 1024))) diff --git a/docker/template/src/nnunetv2/nets/segment_anything/__init__.py b/docker/template/src/nnunetv2/nets/segment_anything/__init__.py new file mode 100644 index 0000000..34383d8 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .build_sam import ( + build_sam, + build_sam_vit_h, + build_sam_vit_l, + build_sam_vit_b, + sam_model_registry, +) +from .predictor import SamPredictor +from .automatic_mask_generator import SamAutomaticMaskGenerator diff --git a/docker/template/src/nnunetv2/nets/segment_anything/automatic_mask_generator.py b/docker/template/src/nnunetv2/nets/segment_anything/automatic_mask_generator.py new file mode 100644 index 0000000..2326497 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/automatic_mask_generator.py @@ -0,0 +1,372 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torchvision.ops.boxes import batched_nms, box_area # type: ignore + +from typing import Any, Dict, List, Optional, Tuple + +from .modeling import Sam +from .predictor import SamPredictor +from .utils.amg import ( + MaskData, + area_from_rle, + batch_iterator, + batched_mask_to_box, + box_xyxy_to_xywh, + build_all_layer_point_grids, + calculate_stability_score, + coco_encode_rle, + generate_crop_boxes, + is_box_near_crop_edge, + mask_to_rle_pytorch, + remove_small_regions, + rle_to_mask, + uncrop_boxes_xyxy, + uncrop_masks, + uncrop_points, +) + + +class SamAutomaticMaskGenerator: + def __init__( + self, + model: Sam, + points_per_side: Optional[int] = 32, + points_per_batch: int = 64, + pred_iou_thresh: float = 0.88, + stability_score_thresh: float = 0.95, + stability_score_offset: float = 1.0, + box_nms_thresh: float = 0.7, + crop_n_layers: int = 0, + crop_nms_thresh: float = 0.7, + crop_overlap_ratio: float = 512 / 1500, + crop_n_points_downscale_factor: int = 1, + point_grids: Optional[List[np.ndarray]] = None, + min_mask_region_area: int = 0, + output_mode: str = "binary_mask", + ) -> None: + """ + Using a SAM model, generates masks for the entire image. + Generates a grid of point prompts over the image, then filters + low quality and duplicate masks. The default settings are chosen + for SAM with a ViT-H backbone. + + Arguments: + model (Sam): The SAM model to use for mask prediction. + points_per_side (int or None): The number of points to be sampled + along one side of the image. The total number of points is + points_per_side**2. If None, 'point_grids' must provide explicit + point sampling. + points_per_batch (int): Sets the number of points run simultaneously + by the model. Higher numbers may be faster but use more GPU memory. + pred_iou_thresh (float): A filtering threshold in [0,1], using the + model's predicted mask quality. + stability_score_thresh (float): A filtering threshold in [0,1], using + the stability of the mask under changes to the cutoff used to binarize + the model's mask predictions. + stability_score_offset (float): The amount to shift the cutoff when + calculated the stability score. + box_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks. + crops_n_layers (int): If >0, mask prediction will be run again on + crops of the image. Sets the number of layers to run, where each + layer has 2**i_layer number of image crops. + crops_nms_thresh (float): The box IoU cutoff used by non-maximal + suppression to filter duplicate masks between different crops. + crop_overlap_ratio (float): Sets the degree to which crops overlap. + In the first crop layer, crops will overlap by this fraction of + the image length. Later layers with more crops scale down this overlap. + crop_n_points_downscale_factor (int): The number of points-per-side + sampled in layer n is scaled down by crop_n_points_downscale_factor**n. + point_grids (list(np.ndarray) or None): A list over explicit grids + of points used for sampling, normalized to [0,1]. The nth grid in the + list is used in the nth crop layer. Exclusive with points_per_side. + min_mask_region_area (int): If >0, postprocessing will be applied + to remove disconnected regions and holes in masks with area smaller + than min_mask_region_area. Requires opencv. + output_mode (str): The form masks are returned in. Can be 'binary_mask', + 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. + For large resolutions, 'binary_mask' may consume large amounts of + memory. + """ + + assert (points_per_side is None) != ( + point_grids is None + ), "Exactly one of points_per_side or point_grid must be provided." + if points_per_side is not None: + self.point_grids = build_all_layer_point_grids( + points_per_side, + crop_n_layers, + crop_n_points_downscale_factor, + ) + elif point_grids is not None: + self.point_grids = point_grids + else: + raise ValueError("Can't have both points_per_side and point_grid be None.") + + assert output_mode in [ + "binary_mask", + "uncompressed_rle", + "coco_rle", + ], f"Unknown output_mode {output_mode}." + if output_mode == "coco_rle": + from pycocotools import mask as mask_utils # type: ignore # noqa: F401 + + if min_mask_region_area > 0: + import cv2 # type: ignore # noqa: F401 + + self.predictor = SamPredictor(model) + self.points_per_batch = points_per_batch + self.pred_iou_thresh = pred_iou_thresh + self.stability_score_thresh = stability_score_thresh + self.stability_score_offset = stability_score_offset + self.box_nms_thresh = box_nms_thresh + self.crop_n_layers = crop_n_layers + self.crop_nms_thresh = crop_nms_thresh + self.crop_overlap_ratio = crop_overlap_ratio + self.crop_n_points_downscale_factor = crop_n_points_downscale_factor + self.min_mask_region_area = min_mask_region_area + self.output_mode = output_mode + + @torch.no_grad() + def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: + """ + Generates masks for the given image. + + Arguments: + image (np.ndarray): The image to generate masks for, in HWC uint8 format. + + Returns: + list(dict(str, any)): A list over records for masks. Each record is + a dict containing the following keys: + segmentation (dict(str, any) or np.ndarray): The mask. If + output_mode='binary_mask', is an array of shape HW. Otherwise, + is a dictionary containing the RLE. + bbox (list(float)): The box around the mask, in XYWH format. + area (int): The area in pixels of the mask. + predicted_iou (float): The model's own prediction of the mask's + quality. This is filtered by the pred_iou_thresh parameter. + point_coords (list(list(float))): The point coordinates input + to the model to generate this mask. + stability_score (float): A measure of the mask's quality. This + is filtered on using the stability_score_thresh parameter. + crop_box (list(float)): The crop of the image used to generate + the mask, given in XYWH format. + """ + + # Generate masks + mask_data = self._generate_masks(image) + + # Filter small disconnected regions and holes in masks + if self.min_mask_region_area > 0: + mask_data = self.postprocess_small_regions( + mask_data, + self.min_mask_region_area, + max(self.box_nms_thresh, self.crop_nms_thresh), + ) + + # Encode masks + if self.output_mode == "coco_rle": + mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] + elif self.output_mode == "binary_mask": + mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] + else: + mask_data["segmentations"] = mask_data["rles"] + + # Write mask records + curr_anns = [] + for idx in range(len(mask_data["segmentations"])): + ann = { + "segmentation": mask_data["segmentations"][idx], + "area": area_from_rle(mask_data["rles"][idx]), + "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), + "predicted_iou": mask_data["iou_preds"][idx].item(), + "point_coords": [mask_data["points"][idx].tolist()], + "stability_score": mask_data["stability_score"][idx].item(), + "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), + } + curr_anns.append(ann) + + return curr_anns + + def _generate_masks(self, image: np.ndarray) -> MaskData: + orig_size = image.shape[:2] + crop_boxes, layer_idxs = generate_crop_boxes( + orig_size, self.crop_n_layers, self.crop_overlap_ratio + ) + + # Iterate over image crops + data = MaskData() + for crop_box, layer_idx in zip(crop_boxes, layer_idxs): + crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) + data.cat(crop_data) + + # Remove duplicate masks between crops + if len(crop_boxes) > 1: + # Prefer masks from smaller crops + scores = 1 / box_area(data["crop_boxes"]) + scores = scores.to(data["boxes"].device) + keep_by_nms = batched_nms( + data["boxes"].float(), + scores, + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.crop_nms_thresh, + ) + data.filter(keep_by_nms) + + data.to_numpy() + return data + + def _process_crop( + self, + image: np.ndarray, + crop_box: List[int], + crop_layer_idx: int, + orig_size: Tuple[int, ...], + ) -> MaskData: + # Crop the image and calculate embeddings + x0, y0, x1, y1 = crop_box + cropped_im = image[y0:y1, x0:x1, :] + cropped_im_size = cropped_im.shape[:2] + self.predictor.set_image(cropped_im) + + # Get points for this crop + points_scale = np.array(cropped_im_size)[None, ::-1] + points_for_image = self.point_grids[crop_layer_idx] * points_scale + + # Generate masks for this crop in batches + data = MaskData() + for (points,) in batch_iterator(self.points_per_batch, points_for_image): + batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) + data.cat(batch_data) + del batch_data + self.predictor.reset_image() + + # Remove duplicates within this crop. + keep_by_nms = batched_nms( + data["boxes"].float(), + data["iou_preds"], + torch.zeros(len(data["boxes"])), # categories + iou_threshold=self.box_nms_thresh, + ) + data.filter(keep_by_nms) + + # Return to the original image frame + data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) + data["points"] = uncrop_points(data["points"], crop_box) + data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) + + return data + + def _process_batch( + self, + points: np.ndarray, + im_size: Tuple[int, ...], + crop_box: List[int], + orig_size: Tuple[int, ...], + ) -> MaskData: + orig_h, orig_w = orig_size + + # Run model on this batch + transformed_points = self.predictor.transform.apply_coords(points, im_size) + in_points = torch.as_tensor(transformed_points, device=self.predictor.device) + in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) + masks, iou_preds, _ = self.predictor.predict_torch( + in_points[:, None, :], + in_labels[:, None], + multimask_output=True, + return_logits=True, + ) + + # Serialize predictions and store in MaskData + data = MaskData( + masks=masks.flatten(0, 1), + iou_preds=iou_preds.flatten(0, 1), + points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), + ) + del masks + + # Filter by predicted IoU + if self.pred_iou_thresh > 0.0: + keep_mask = data["iou_preds"] > self.pred_iou_thresh + data.filter(keep_mask) + + # Calculate stability score + data["stability_score"] = calculate_stability_score( + data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset + ) + if self.stability_score_thresh > 0.0: + keep_mask = data["stability_score"] >= self.stability_score_thresh + data.filter(keep_mask) + + # Threshold masks and calculate boxes + data["masks"] = data["masks"] > self.predictor.model.mask_threshold + data["boxes"] = batched_mask_to_box(data["masks"]) + + # Filter boxes that touch crop boundaries + keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) + if not torch.all(keep_mask): + data.filter(keep_mask) + + # Compress to RLE + data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w) + data["rles"] = mask_to_rle_pytorch(data["masks"]) + del data["masks"] + + return data + + @staticmethod + def postprocess_small_regions( + mask_data: MaskData, min_area: int, nms_thresh: float + ) -> MaskData: + """ + Removes small disconnected regions and holes in masks, then reruns + box NMS to remove any new duplicates. + + Edits mask_data in place. + + Requires open-cv as a dependency. + """ + if len(mask_data["rles"]) == 0: + return mask_data + + # Filter small disconnected regions and holes + new_masks = [] + scores = [] + for rle in mask_data["rles"]: + mask = rle_to_mask(rle) + + mask, changed = remove_small_regions(mask, min_area, mode="holes") + unchanged = not changed + mask, changed = remove_small_regions(mask, min_area, mode="islands") + unchanged = unchanged and not changed + + new_masks.append(torch.as_tensor(mask).unsqueeze(0)) + # Give score=0 to changed masks and score=1 to unchanged masks + # so NMS will prefer ones that didn't need postprocessing + scores.append(float(unchanged)) + + # Recalculate boxes and remove any new duplicates + masks = torch.cat(new_masks, dim=0) + boxes = batched_mask_to_box(masks) + keep_by_nms = batched_nms( + boxes.float(), + torch.as_tensor(scores), + torch.zeros(len(boxes)), # categories + iou_threshold=nms_thresh, + ) + + # Only recalculate RLEs for masks that have changed + for i_mask in keep_by_nms: + if scores[i_mask] == 0.0: + mask_torch = masks[i_mask].unsqueeze(0) + mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0] + mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly + mask_data.filter(keep_by_nms) + + return mask_data diff --git a/docker/template/src/nnunetv2/nets/segment_anything/build_sam.py b/docker/template/src/nnunetv2/nets/segment_anything/build_sam.py new file mode 100644 index 0000000..9ba5bb3 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/build_sam.py @@ -0,0 +1,168 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch.nn import functional as F +# from icecream import ic + +from functools import partial + +from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer + + +def build_sam_vit_h(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], + checkpoint=None): + return _build_sam( + encoder_embed_dim=1280, + encoder_depth=32, + encoder_num_heads=16, + encoder_global_attn_indexes=[7, 15, 23, 31], + checkpoint=checkpoint, + num_classes=num_classes, + image_size=image_size, + pixel_mean=pixel_mean, + pixel_std=pixel_std + ) + + +build_sam = build_sam_vit_h + + +def build_sam_vit_l(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], + checkpoint=None): + return _build_sam( + encoder_embed_dim=1024, + encoder_depth=24, + encoder_num_heads=16, + encoder_global_attn_indexes=[5, 11, 17, 23], + checkpoint=checkpoint, + num_classes=num_classes, + image_size=image_size, + pixel_mean=pixel_mean, + pixel_std=pixel_std + ) + + +def build_sam_vit_b(image_size, num_classes, pixel_mean=[123.675, 116.28, 103.53], pixel_std=[58.395, 57.12, 57.375], + checkpoint=None): + return _build_sam( + encoder_embed_dim=768, + encoder_depth=12, + encoder_num_heads=12, + encoder_global_attn_indexes=[2, 5, 8, 11], + # adopt global attention at [3, 6, 9, 12] transform layer, else window attention layer + checkpoint=checkpoint, + num_classes=num_classes, + image_size=image_size, + pixel_mean=pixel_mean, + pixel_std=pixel_std + ) + + +sam_model_registry = { + "default": build_sam_vit_h, + "vit_h": build_sam_vit_h, + "vit_l": build_sam_vit_l, + "vit_b": build_sam_vit_b, +} + + +def _build_sam( + encoder_embed_dim, + encoder_depth, + encoder_num_heads, + encoder_global_attn_indexes, + num_classes, + image_size, + pixel_mean, + pixel_std, + checkpoint=None, +): + prompt_embed_dim = 256 + image_size = image_size + vit_patch_size = 16 + image_embedding_size = image_size // vit_patch_size # Divide by 16 here + sam = Sam( + image_encoder=ImageEncoderViT( + depth=encoder_depth, + embed_dim=encoder_embed_dim, + img_size=image_size, + mlp_ratio=4, + norm_layer=partial(torch.nn.LayerNorm, eps=1e-6), + num_heads=encoder_num_heads, + patch_size=vit_patch_size, + qkv_bias=True, + use_rel_pos=True, + global_attn_indexes=encoder_global_attn_indexes, + window_size=14, + out_chans=prompt_embed_dim, + ), + prompt_encoder=PromptEncoder( + embed_dim=prompt_embed_dim, + image_embedding_size=(image_embedding_size, image_embedding_size), + input_image_size=(image_size, image_size), + mask_in_chans=16, + ), + mask_decoder=MaskDecoder( + # num_multimask_outputs=3, + num_multimask_outputs=num_classes, + transformer=TwoWayTransformer( + depth=2, + embedding_dim=prompt_embed_dim, + mlp_dim=2048, + num_heads=8, + ), + transformer_dim=prompt_embed_dim, + iou_head_depth=3, + iou_head_hidden_dim=256, + ), + # pixel_mean=[123.675, 116.28, 103.53], + # pixel_std=[58.395, 57.12, 57.375], + pixel_mean=pixel_mean, + pixel_std=pixel_std + ) + # sam.eval() + sam.train() + if checkpoint is not None: + with open(checkpoint, "rb") as f: + state_dict = torch.load(f) + try: + sam.load_state_dict(state_dict) + except: + new_state_dict = load_from(sam, state_dict, image_size, vit_patch_size, encoder_global_attn_indexes) + sam.load_state_dict(new_state_dict) + return sam, image_embedding_size + + +def load_from(sam, state_dict, image_size, vit_patch_size, encoder_global_attn_indexes): + ega = encoder_global_attn_indexes + sam_dict = sam.state_dict() + except_keys = ['mask_tokens', 'output_hypernetworks_mlps', 'iou_prediction_head'] + new_state_dict = {k: v for k, v in state_dict.items() if + k in sam_dict.keys() and except_keys[0] not in k and except_keys[1] not in k and except_keys[2] not in k} + pos_embed = new_state_dict['image_encoder.pos_embed'] + token_size = int(image_size // vit_patch_size) + if pos_embed.shape[1] != token_size: + # resize pos embedding, which may sacrifice the performance, but I have no better idea + pos_embed = pos_embed.permute(0, 3, 1, 2) # [b, c, h, w] + pos_embed = F.interpolate(pos_embed, (token_size, token_size), mode='bilinear', align_corners=False) + pos_embed = pos_embed.permute(0, 2, 3, 1) # [b, h, w, c] + new_state_dict['image_encoder.pos_embed'] = pos_embed + rel_pos_keys = [k for k in sam_dict.keys() if 'rel_pos' in k] + global_rel_pos_keys = [] + for rel_pos_key in rel_pos_keys: + num = int(rel_pos_key.split('.')[2]) + if num in encoder_global_attn_indexes: + global_rel_pos_keys.append(rel_pos_key) + # global_rel_pos_keys = [k for k in rel_pos_keys if '2' in k or '5' in k or '8' in k or '11' in k] + for k in global_rel_pos_keys: + rel_pos_params = new_state_dict[k] + h, w = rel_pos_params.shape + rel_pos_params = rel_pos_params.unsqueeze(0).unsqueeze(0) + rel_pos_params = F.interpolate(rel_pos_params, (token_size * 2 - 1, w), mode='bilinear', align_corners=False) + new_state_dict[k] = rel_pos_params[0, 0, ...] + sam_dict.update(new_state_dict) + return sam_dict diff --git a/docker/template/src/nnunetv2/nets/segment_anything/modeling/__init__.py b/docker/template/src/nnunetv2/nets/segment_anything/modeling/__init__.py new file mode 100644 index 0000000..38e9062 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/modeling/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from .sam import Sam +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder +from .transformer import TwoWayTransformer diff --git a/docker/template/src/nnunetv2/nets/segment_anything/modeling/common.py b/docker/template/src/nnunetv2/nets/segment_anything/modeling/common.py new file mode 100644 index 0000000..2bf1523 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/modeling/common.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn + +from typing import Type + + +class MLPBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + mlp_dim: int, + act: Type[nn.Module] = nn.GELU, + ) -> None: + super().__init__() + self.lin1 = nn.Linear(embedding_dim, mlp_dim) + self.lin2 = nn.Linear(mlp_dim, embedding_dim) + self.act = act() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return self.lin2(self.act(self.lin1(x))) + + +# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa +# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa +class LayerNorm2d(nn.Module): + def __init__(self, num_channels: int, eps: float = 1e-6) -> None: + super().__init__() + self.weight = nn.Parameter(torch.ones(num_channels)) + self.bias = nn.Parameter(torch.zeros(num_channels)) + self.eps = eps + + def forward(self, x: torch.Tensor) -> torch.Tensor: + u = x.mean(1, keepdim=True) + s = (x - u).pow(2).mean(1, keepdim=True) + x = (x - u) / torch.sqrt(s + self.eps) + x = self.weight[:, None, None] * x + self.bias[:, None, None] + return x diff --git a/docker/template/src/nnunetv2/nets/segment_anything/modeling/image_encoder.py b/docker/template/src/nnunetv2/nets/segment_anything/modeling/image_encoder.py new file mode 100644 index 0000000..9e382c1 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/modeling/image_encoder.py @@ -0,0 +1,396 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +import torch.nn.functional as F +# from icecream import ic + +from typing import Optional, Tuple, Type + +from .common import LayerNorm2d, MLPBlock + + +# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa +class ImageEncoderViT(nn.Module): + def __init__( + self, + img_size: int = 1024, + patch_size: int = 16, + in_chans: int = 3, + embed_dim: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: float = 4.0, + out_chans: int = 256, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_abs_pos: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + global_attn_indexes: Tuple[int, ...] = (), + ) -> None: + """ + Args: + img_size (int): Input image size. + patch_size (int): Patch size. + in_chans (int): Number of input image channels. + embed_dim (int): Patch embedding dimension. + depth (int): Depth of ViT. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_abs_pos (bool): If True, use absolute positional embeddings. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. + global_attn_indexes (list): Indexes for blocks using global attention. + """ + super().__init__() + self.img_size = img_size + + self.patch_embed = PatchEmbed( + kernel_size=(patch_size, patch_size), + stride=(patch_size, patch_size), + in_chans=in_chans, + embed_dim=embed_dim, + ) + + self.pos_embed: Optional[nn.Parameter] = None + if use_abs_pos: + # Initialize absolute positional embedding with pretrain image size. + self.pos_embed = nn.Parameter( + torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim) + ) + + self.blocks = nn.ModuleList() + for i in range(depth): + block = Block( + dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + norm_layer=norm_layer, + act_layer=act_layer, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + window_size=window_size if i not in global_attn_indexes else 0, + input_size=(img_size // patch_size, img_size // patch_size), + ) + self.blocks.append(block) + + self.neck = nn.Sequential( + nn.Conv2d( + embed_dim, + out_chans, + kernel_size=1, + bias=False, + ), + LayerNorm2d(out_chans), + nn.Conv2d( + out_chans, + out_chans, + kernel_size=3, + padding=1, + bias=False, + ), + LayerNorm2d(out_chans), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.patch_embed(x) # pre embed: [1, 3, 1024, 1024], post embed: [1, 64, 64, 768] + if self.pos_embed is not None: + x = x + self.pos_embed + + for blk in self.blocks: + x = blk(x) + + x = self.neck(x.permute(0, 3, 1, 2)) # [b, c, h, w], [1, 256, 64, 64] + + return x + + +class Block(nn.Module): + """Transformer blocks with support of window attention and residual propagation blocks""" + + def __init__( + self, + dim: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + norm_layer: Type[nn.Module] = nn.LayerNorm, + act_layer: Type[nn.Module] = nn.GELU, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + window_size: int = 0, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads in each ViT block. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool): If True, add a learnable bias to query, key, value. + norm_layer (nn.Module): Normalization layer. + act_layer (nn.Module): Activation layer. + use_rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + window_size (int): Window size for window attention blocks. If it equals 0, then + use global attention. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + use_rel_pos=use_rel_pos, + rel_pos_zero_init=rel_pos_zero_init, + input_size=input_size if window_size == 0 else (window_size, window_size), + ) + + self.norm2 = norm_layer(dim) + self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer) + + self.window_size = window_size + + def forward(self, x: torch.Tensor) -> torch.Tensor: + shortcut = x + x = self.norm1(x) + # Window partition + if self.window_size > 0: + H, W = x.shape[1], x.shape[2] + x, pad_hw = window_partition(x, self.window_size) # [B * num_windows, window_size, window_size, C] + + x = self.attn(x) + # Reverse window partition + if self.window_size > 0: + x = window_unpartition(x, self.window_size, pad_hw, (H, W)) + + x = shortcut + x + x = x + self.mlp(self.norm2(x)) + + return x + + +class Attention(nn.Module): + """Multi-head Attention block with relative position embeddings.""" + + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = True, + use_rel_pos: bool = False, + rel_pos_zero_init: bool = True, + input_size: Optional[Tuple[int, int]] = None, + ) -> None: + """ + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + qkv_bias (bool: If True, add a learnable bias to query, key, value. + rel_pos (bool): If True, add relative positional embeddings to the attention map. + rel_pos_zero_init (bool): If True, zero initialize relative positional parameters. + input_size (int or None): Input resolution for calculating the relative positional + parameter size. + """ + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + self.use_rel_pos = use_rel_pos + if self.use_rel_pos: + assert ( + input_size is not None + ), "Input size must be provided if using relative positional encoding." + # initialize relative positional embeddings + self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim)) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, H, W, _ = x.shape + # qkv with shape (3, B, nHead, H * W, C) + qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + # q, k, v with shape (B * nHead, H * W, C) + q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0) + + attn = (q * self.scale) @ k.transpose(-2, -1) + + if self.use_rel_pos: + attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W)) + + attn = attn.softmax(dim=-1) + x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1) + x = self.proj(x) + + return x + + +def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]: + """ + Partition into non-overlapping windows with padding if needed. + Args: + x (tensor): input tokens with [B, H, W, C]. + window_size (int): window size. + + Returns: + windows: windows after partition with [B * num_windows, window_size, window_size, C]. + (Hp, Wp): padded height and width before partition + """ + B, H, W, C = x.shape + + pad_h = (window_size - H % window_size) % window_size + pad_w = (window_size - W % window_size) % window_size + if pad_h > 0 or pad_w > 0: + x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h)) + Hp, Wp = H + pad_h, W + pad_w + + x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows, (Hp, Wp) + + +def window_unpartition( + windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int] +) -> torch.Tensor: + """ + Window unpartition into original sequences and removing padding. + Args: + x (tensor): input tokens with [B * num_windows, window_size, window_size, C]. + window_size (int): window size. + pad_hw (Tuple): padded height and width (Hp, Wp). + hw (Tuple): original height and width (H, W) before padding. + + Returns: + x: unpartitioned sequences with [B, H, W, C]. + """ + Hp, Wp = pad_hw + H, W = hw + B = windows.shape[0] // (Hp * Wp // window_size // window_size) + x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1) + + if Hp > H or Wp > W: + x = x[:, :H, :W, :].contiguous() + return x + + +def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor: + """ + Get relative positional embeddings according to the relative positions of + query and key sizes. + Args: + q_size (int): size of query q. + k_size (int): size of key k. + rel_pos (Tensor): relative position embeddings (L, C). + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + rel_pos_resized = F.interpolate( + rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), + size=max_rel_dist, + mode="linear", + ) + rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) + else: + rel_pos_resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) + k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) + relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0) + + return rel_pos_resized[relative_coords.long()] + + +def add_decomposed_rel_pos( + attn: torch.Tensor, + q: torch.Tensor, + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + q_size: Tuple[int, int], + k_size: Tuple[int, int], +) -> torch.Tensor: + """ + Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`. + https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950 + Args: + attn (Tensor): attention map. + q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C). + rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis. + rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis. + q_size (Tuple): spatial sequence size of query q with (q_h, q_w). + k_size (Tuple): spatial sequence size of key k with (k_h, k_w). + + Returns: + attn (Tensor): attention map with added relative positional embeddings. + """ + q_h, q_w = q_size + k_h, k_w = k_size + Rh = get_rel_pos(q_h, k_h, rel_pos_h) + Rw = get_rel_pos(q_w, k_w, rel_pos_w) + + B, _, dim = q.shape + r_q = q.reshape(B, q_h, q_w, dim) + rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh) + rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw) + + attn = ( + attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :] + ).view(B, q_h * q_w, k_h * k_w) + + return attn + + +class PatchEmbed(nn.Module): + """ + Image to Patch Embedding. + """ + + def __init__( + self, + kernel_size: Tuple[int, int] = (16, 16), + stride: Tuple[int, int] = (16, 16), + padding: Tuple[int, int] = (0, 0), + in_chans: int = 3, + embed_dim: int = 768, + ) -> None: + """ + Args: + kernel_size (Tuple): kernel size of the projection layer. + stride (Tuple): stride of the projection layer. + padding (Tuple): padding size of the projection layer. + in_chans (int): Number of input image channels. + embed_dim (int): embed_dim (int): Patch embedding dimension. + """ + super().__init__() + + self.proj = nn.Conv2d( + in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.proj(x) + # B C H W -> B H W C + x = x.permute(0, 2, 3, 1) + return x diff --git a/docker/template/src/nnunetv2/nets/segment_anything/modeling/mask_decoder.py b/docker/template/src/nnunetv2/nets/segment_anything/modeling/mask_decoder.py new file mode 100644 index 0000000..2f4f184 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/modeling/mask_decoder.py @@ -0,0 +1,178 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F +# from icecream import ic + +from typing import List, Tuple, Type + +from .common import LayerNorm2d + + +class MaskDecoder(nn.Module): + def __init__( + self, + *, + transformer_dim: int, + transformer: nn.Module, + num_multimask_outputs: int = 3, + activation: Type[nn.Module] = nn.GELU, + iou_head_depth: int = 3, + iou_head_hidden_dim: int = 256, + ) -> None: + """ + Predicts masks given an image and prompt embeddings, using a + tranformer architecture. + + Arguments: + transformer_dim (int): the channel dimension of the transformer + transformer (nn.Module): the transformer used to predict masks + num_multimask_outputs (int): the number of masks to predict + when disambiguating masks + activation (nn.Module): the type of activation to use when + upscaling masks + iou_head_depth (int): the depth of the MLP used to predict + mask quality + iou_head_hidden_dim (int): the hidden dimension of the MLP + used to predict mask quality + """ + super().__init__() + self.transformer_dim = transformer_dim + self.transformer = transformer + + self.num_multimask_outputs = num_multimask_outputs + + self.iou_token = nn.Embedding(1, transformer_dim) + self.num_mask_tokens = num_multimask_outputs + 1 + self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim) + + self.output_upscaling = nn.Sequential( + nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2), + LayerNorm2d(transformer_dim // 4), + activation(), + nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2), + activation(), + ) + self.output_hypernetworks_mlps = nn.ModuleList( + [ + MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3) + for i in range(self.num_mask_tokens) + ] + ) + + self.iou_prediction_head = MLP( + transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth + ) + + def forward( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + multimask_output: bool, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Predict masks given image and prompt embeddings. + + Arguments: + image_embeddings (torch.Tensor): the embeddings from the image encoder + image_pe (torch.Tensor): positional encoding with the shape of image_embeddings + sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes + dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs + multimask_output (bool): Whether to return multiple masks or a single + mask. + + Returns: + torch.Tensor: batched predicted masks + torch.Tensor: batched predictions of mask quality + """ + masks, iou_pred = self.predict_masks( + image_embeddings=image_embeddings, + image_pe=image_pe, + sparse_prompt_embeddings=sparse_prompt_embeddings, + dense_prompt_embeddings=dense_prompt_embeddings, + ) + + # Select the correct mask or masks for output + # if multimask_output: + # mask_slice = slice(1, None) + # else: + # mask_slice = slice(0, 1) + # masks = masks[:, mask_slice, :, :] + # iou_pred = iou_pred[:, mask_slice] + + # Prepare output + return masks, iou_pred + + def predict_masks( + self, + image_embeddings: torch.Tensor, + image_pe: torch.Tensor, + sparse_prompt_embeddings: torch.Tensor, + dense_prompt_embeddings: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Predicts masks. See 'forward' for more details.""" + # Concatenate output tokens + output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0) + output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1) + tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1) + + # Expand per-image data in batch direction to be per-mask + src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0) + src = src + dense_prompt_embeddings + pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0) + b, c, h, w = src.shape + + # Run the transformer + hs, src = self.transformer(src, pos_src, tokens) + iou_token_out = hs[:, 0, :] + mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + + # Upscale mask embeddings and predict masks using the mask tokens + src = src.transpose(1, 2).view(b, c, h, w) + upscaled_embedding = self.output_upscaling(src) + hyper_in_list: List[torch.Tensor] = [] + for i in range(self.num_mask_tokens): + hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])) + hyper_in = torch.stack(hyper_in_list, dim=1) # [b, c, token_num] + + b, c, h, w = upscaled_embedding.shape # [h, token_num, h, w] + masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w) # [1, 4, 256, 256], 256 = 4 * 64, the size of image embeddings + + # Generate mask quality predictions + iou_pred = self.iou_prediction_head(iou_token_out) + + return masks, iou_pred + + +# Lightly adapted from +# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa +class MLP(nn.Module): + def __init__( + self, + input_dim: int, + hidden_dim: int, + output_dim: int, + num_layers: int, + sigmoid_output: bool = False, + ) -> None: + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList( + nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]) + ) + self.sigmoid_output = sigmoid_output + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + if self.sigmoid_output: + x = F.sigmoid(x) + return x diff --git a/docker/template/src/nnunetv2/nets/segment_anything/modeling/prompt_encoder.py b/docker/template/src/nnunetv2/nets/segment_anything/modeling/prompt_encoder.py new file mode 100644 index 0000000..5989635 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/modeling/prompt_encoder.py @@ -0,0 +1,214 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch import nn + +from typing import Any, Optional, Tuple, Type + +from .common import LayerNorm2d + + +class PromptEncoder(nn.Module): + def __init__( + self, + embed_dim: int, + image_embedding_size: Tuple[int, int], + input_image_size: Tuple[int, int], + mask_in_chans: int, + activation: Type[nn.Module] = nn.GELU, + ) -> None: + """ + Encodes prompts for input to SAM's mask decoder. + + Arguments: + embed_dim (int): The prompts' embedding dimension + image_embedding_size (tuple(int, int)): The spatial size of the + image embedding, as (H, W). + input_image_size (int): The padded size of the image as input + to the image encoder, as (H, W). + mask_in_chans (int): The number of hidden channels used for + encoding input masks. + activation (nn.Module): The activation to use when encoding + input masks. + """ + super().__init__() + self.embed_dim = embed_dim + self.input_image_size = input_image_size + self.image_embedding_size = image_embedding_size + self.pe_layer = PositionEmbeddingRandom(embed_dim // 2) + + self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners + point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)] + self.point_embeddings = nn.ModuleList(point_embeddings) + self.not_a_point_embed = nn.Embedding(1, embed_dim) + + self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1]) + self.mask_downscaling = nn.Sequential( + nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans // 4), + activation(), + nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2), + LayerNorm2d(mask_in_chans), + activation(), + nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1), + ) # downsample to 1/4 + self.no_mask_embed = nn.Embedding(1, embed_dim) + + def get_dense_pe(self) -> torch.Tensor: + """ + Returns the positional encoding used to encode point prompts, + applied to a dense set of points the shape of the image encoding. + + Returns: + torch.Tensor: Positional encoding with shape + 1x(embed_dim)x(embedding_h)x(embedding_w) + """ + return self.pe_layer(self.image_embedding_size).unsqueeze(0) + + def _embed_points( + self, + points: torch.Tensor, + labels: torch.Tensor, + pad: bool, + ) -> torch.Tensor: + """Embeds point prompts.""" + points = points + 0.5 # Shift to center of pixel + if pad: + padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device) + padding_label = -torch.ones((labels.shape[0], 1), device=labels.device) + points = torch.cat([points, padding_point], dim=1) + labels = torch.cat([labels, padding_label], dim=1) + point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size) + point_embedding[labels == -1] = 0.0 + point_embedding[labels == -1] += self.not_a_point_embed.weight + point_embedding[labels == 0] += self.point_embeddings[0].weight + point_embedding[labels == 1] += self.point_embeddings[1].weight + return point_embedding + + def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor: + """Embeds box prompts.""" + boxes = boxes + 0.5 # Shift to center of pixel + coords = boxes.reshape(-1, 2, 2) + corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size) + corner_embedding[:, 0, :] += self.point_embeddings[2].weight + corner_embedding[:, 1, :] += self.point_embeddings[3].weight + return corner_embedding + + def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor: + """Embeds mask inputs.""" + mask_embedding = self.mask_downscaling(masks) + return mask_embedding + + def _get_batch_size( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> int: + """ + Gets the batch size of the output given the batch size of the input prompts. + """ + if points is not None: + return points[0].shape[0] + elif boxes is not None: + return boxes.shape[0] + elif masks is not None: + return masks.shape[0] + else: + return 1 + + def _get_device(self) -> torch.device: + return self.point_embeddings[0].weight.device + + def forward( + self, + points: Optional[Tuple[torch.Tensor, torch.Tensor]], + boxes: Optional[torch.Tensor], + masks: Optional[torch.Tensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Embeds different types of prompts, returning both sparse and dense + embeddings. + + Arguments: + points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates + and labels to embed. + boxes (torch.Tensor or none): boxes to embed + masks (torch.Tensor or none): masks to embed + + Returns: + torch.Tensor: sparse embeddings for the points and boxes, with shape + BxNx(embed_dim), where N is determined by the number of input points + and boxes. + torch.Tensor: dense embeddings for the masks, in the shape + Bx(embed_dim)x(embed_H)x(embed_W) + """ + bs = self._get_batch_size(points, boxes, masks) + sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device()) + if points is not None: + coords, labels = points + point_embeddings = self._embed_points(coords, labels, pad=(boxes is None)) + sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1) + if boxes is not None: + box_embeddings = self._embed_boxes(boxes) + sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1) + + if masks is not None: + dense_embeddings = self._embed_masks(masks) + else: + dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand( + bs, -1, self.image_embedding_size[0], self.image_embedding_size[1] + ) + + return sparse_embeddings, dense_embeddings + + +class PositionEmbeddingRandom(nn.Module): + """ + Positional encoding using random spatial frequencies. + """ + + def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None: + super().__init__() + if scale is None or scale <= 0.0: + scale = 1.0 + self.register_buffer( + "positional_encoding_gaussian_matrix", + scale * torch.randn((2, num_pos_feats)), + ) + + def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor: + """Positionally encode points that are normalized to [0,1].""" + # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape + coords = 2 * coords - 1 + coords = coords @ self.positional_encoding_gaussian_matrix + coords = 2 * np.pi * coords + # outputs d_1 x ... x d_n x C shape + return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1) + + def forward(self, size: Tuple[int, int]) -> torch.Tensor: + """Generate positional encoding for a grid of the specified size.""" + h, w = size + device: Any = self.positional_encoding_gaussian_matrix.device + grid = torch.ones((h, w), device=device, dtype=torch.float32) + y_embed = grid.cumsum(dim=0) - 0.5 + x_embed = grid.cumsum(dim=1) - 0.5 + y_embed = y_embed / h + x_embed = x_embed / w + + pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1)) + return pe.permute(2, 0, 1) # C x H x W + + def forward_with_coords( + self, coords_input: torch.Tensor, image_size: Tuple[int, int] + ) -> torch.Tensor: + """Positionally encode points that are not normalized to [0,1].""" + coords = coords_input.clone() + coords[:, :, 0] = coords[:, :, 0] / image_size[1] + coords[:, :, 1] = coords[:, :, 1] / image_size[0] + return self._pe_encoding(coords.to(torch.float)) # B x N x C diff --git a/docker/template/src/nnunetv2/nets/segment_anything/modeling/sam.py b/docker/template/src/nnunetv2/nets/segment_anything/modeling/sam.py new file mode 100644 index 0000000..50f5088 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/modeling/sam.py @@ -0,0 +1,208 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import nn +from torch.nn import functional as F +# from icecream import ic + +from typing import Any, Dict, List, Tuple + +from .image_encoder import ImageEncoderViT +from .mask_decoder import MaskDecoder +from .prompt_encoder import PromptEncoder + + +class Sam(nn.Module): + mask_threshold: float = 0.0 + image_format: str = "RGB" + + def __init__( + self, + image_encoder: ImageEncoderViT, + prompt_encoder: PromptEncoder, + mask_decoder: MaskDecoder, + pixel_mean: List[float] = [123.675, 116.28, 103.53], + pixel_std: List[float] = [58.395, 57.12, 57.375], + ) -> None: + """ + SAM predicts object masks from an image and input prompts. + + Arguments: + image_encoder (ImageEncoderViT): The backbone used to encode the + image into image embeddings that allow for efficient mask prediction. + prompt_encoder (PromptEncoder): Encodes various types of input prompts. + mask_decoder (MaskDecoder): Predicts masks from the image embeddings + and encoded prompts. + pixel_mean (list(float)): Mean values for normalizing pixels in the input image. + pixel_std (list(float)): Std values for normalizing pixels in the input image. + """ + super().__init__() + self.image_encoder = image_encoder + self.prompt_encoder = prompt_encoder + self.mask_decoder = mask_decoder + self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) + self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) + + @property + def device(self) -> Any: + return self.pixel_mean.device + + def forward(self, batched_input, multimask_output, image_size): + if isinstance(batched_input, list): + outputs = self.forward_test(batched_input, multimask_output) + else: + outputs = self.forward_train(batched_input, multimask_output, image_size) + return outputs + + def forward_train(self, batched_input, multimask_output, image_size): + input_images = self.preprocess(batched_input) + image_embeddings = self.image_encoder(input_images) + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=None, boxes=None, masks=None + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=image_embeddings, + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=(image_size, image_size), + original_size=(image_size, image_size) + ) + outputs = { + 'masks': masks, + 'iou_predictions': iou_predictions, + 'low_res_logits': low_res_masks + } + return outputs + + @torch.no_grad() + def forward_test( + self, + batched_input: List[Dict[str, Any]], + multimask_output: bool, + ) -> List[Dict[str, torch.Tensor]]: + """ + Predicts masks end-to-end from provided images and prompts. + If prompts are not known in advance, using SamPredictor is + recommended over calling the model directly. + + Arguments: + batched_input (list(dict)): A list over input images, each a + dictionary with the following keys. A prompt key can be + excluded if it is not present. + 'image': The image as a torch tensor in 3xHxW format, + already transformed for input to the model. + 'original_size': (tuple(int, int)) The original size of + the image before transformation, as (H, W). + 'point_coords': (torch.Tensor) Batched point prompts for + this image, with shape BxNx2. Already transformed to the + input frame of the model. + 'point_labels': (torch.Tensor) Batched labels for point prompts, + with shape BxN. + 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4. + Already transformed to the input frame of the model. + 'mask_inputs': (torch.Tensor) Batched mask inputs to the model, + in the form Bx1xHxW. + multimask_output (bool): Whether the model should predict multiple + disambiguating masks, or return a single mask. + + Returns: + (list(dict)): A list over input images, where each element is + as dictionary with the following keys. + 'masks': (torch.Tensor) Batched binary mask predictions, + with shape BxCxHxW, where B is the number of input promts, + C is determiend by multimask_output, and (H, W) is the + original size of the image. + 'iou_predictions': (torch.Tensor) The model's predictions + of mask quality, in shape BxC. + 'low_res_logits': (torch.Tensor) Low resolution logits with + shape BxCxHxW, where H=W=256. Can be passed as mask input + to subsequent iterations of prediction. + """ + input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0) + image_embeddings = self.image_encoder(input_images) + + outputs = [] + for image_record, curr_embedding in zip(batched_input, image_embeddings): + if "point_coords" in image_record: + points = (image_record["point_coords"], image_record["point_labels"]) + else: + points = None + sparse_embeddings, dense_embeddings = self.prompt_encoder( + points=points, + boxes=image_record.get("boxes", None), + masks=image_record.get("mask_inputs", None), + ) + low_res_masks, iou_predictions = self.mask_decoder( + image_embeddings=curr_embedding.unsqueeze(0), + image_pe=self.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + masks = self.postprocess_masks( + low_res_masks, + input_size=image_record["image"].shape[-2:], + original_size=image_record["original_size"], + ) + masks = masks > self.mask_threshold + outputs.append( + { + "masks": masks, + "iou_predictions": iou_predictions, + "low_res_logits": low_res_masks, + } + ) + return outputs + + def postprocess_masks( + self, + masks: torch.Tensor, + input_size: Tuple[int, ...], + original_size: Tuple[int, ...], + ) -> torch.Tensor: + """ + Remove padding and upscale masks to the original image size. + + Arguments: + masks (torch.Tensor): Batched masks from the mask_decoder, + in BxCxHxW format. + input_size (tuple(int, int)): The size of the image input to the + model, in (H, W) format. Used to remove padding. + original_size (tuple(int, int)): The original size of the image + before resizing for input to the model, in (H, W) format. + + Returns: + (torch.Tensor): Batched masks in BxCxHxW format, where (H, W) + is given by original_size. + """ + masks = F.interpolate( + masks, + (self.image_encoder.img_size, self.image_encoder.img_size), + mode="bilinear", + align_corners=False, + ) + masks = masks[..., : input_size[0], : input_size[1]] + masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False) + return masks + + def preprocess(self, x: torch.Tensor) -> torch.Tensor: + """Normalize pixel values and pad to a square input.""" + # Normalize colors + x = (x - self.pixel_mean) / self.pixel_std + + # Pad + h, w = x.shape[-2:] + padh = self.image_encoder.img_size - h + padw = self.image_encoder.img_size - w + x = F.pad(x, (0, padw, 0, padh)) + return x + diff --git a/docker/template/src/nnunetv2/nets/segment_anything/modeling/transformer.py b/docker/template/src/nnunetv2/nets/segment_anything/modeling/transformer.py new file mode 100644 index 0000000..f1a2812 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/modeling/transformer.py @@ -0,0 +1,240 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +from torch import Tensor, nn + +import math +from typing import Tuple, Type + +from .common import MLPBlock + + +class TwoWayTransformer(nn.Module): + def __init__( + self, + depth: int, + embedding_dim: int, + num_heads: int, + mlp_dim: int, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + ) -> None: + """ + A transformer decoder that attends to an input image using + queries whose positional embedding is supplied. + + Args: + depth (int): number of layers in the transformer + embedding_dim (int): the channel dimension for the input embeddings + num_heads (int): the number of heads for multihead attention. Must + divide embedding_dim + mlp_dim (int): the channel dimension internal to the MLP block + activation (nn.Module): the activation to use in the MLP block + """ + super().__init__() + self.depth = depth + self.embedding_dim = embedding_dim + self.num_heads = num_heads + self.mlp_dim = mlp_dim + self.layers = nn.ModuleList() + + for i in range(depth): + self.layers.append( + TwoWayAttentionBlock( + embedding_dim=embedding_dim, + num_heads=num_heads, + mlp_dim=mlp_dim, + activation=activation, + attention_downsample_rate=attention_downsample_rate, + skip_first_layer_pe=(i == 0), + ) + ) + + self.final_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm_final_attn = nn.LayerNorm(embedding_dim) + + def forward( + self, + image_embedding: Tensor, + image_pe: Tensor, + point_embedding: Tensor, + ) -> Tuple[Tensor, Tensor]: + """ + Args: + image_embedding (torch.Tensor): image to attend to. Should be shape + B x embedding_dim x h x w for any h and w. + image_pe (torch.Tensor): the positional encoding to add to the image. Must + have the same shape as image_embedding. + point_embedding (torch.Tensor): the embedding to add to the query points. + Must have shape B x N_points x embedding_dim for any N_points. + + Returns: + torch.Tensor: the processed point_embedding + torch.Tensor: the processed image_embedding + """ + # BxCxHxW -> BxHWxC == B x N_image_tokens x C + bs, c, h, w = image_embedding.shape + image_embedding = image_embedding.flatten(2).permute(0, 2, 1) + image_pe = image_pe.flatten(2).permute(0, 2, 1) + + # Prepare queries + queries = point_embedding + keys = image_embedding + + # Apply transformer blocks and final layernorm + for layer in self.layers: + queries, keys = layer( + queries=queries, + keys=keys, + query_pe=point_embedding, + key_pe=image_pe, + ) + + # Apply the final attenion layer from the points to the image + q = queries + point_embedding + k = keys + image_pe + attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm_final_attn(queries) + + return queries, keys + + +class TwoWayAttentionBlock(nn.Module): + def __init__( + self, + embedding_dim: int, + num_heads: int, + mlp_dim: int = 2048, + activation: Type[nn.Module] = nn.ReLU, + attention_downsample_rate: int = 2, + skip_first_layer_pe: bool = False, + ) -> None: + """ + A transformer block with four layers: (1) self-attention of sparse + inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp + block on sparse inputs, and (4) cross attention of dense inputs to sparse + inputs. + + Arguments: + embedding_dim (int): the channel dimension of the embeddings + num_heads (int): the number of heads in the attention layers + mlp_dim (int): the hidden dimension of the mlp block + activation (nn.Module): the activation of the mlp block + skip_first_layer_pe (bool): skip the PE on the first layer + """ + super().__init__() + self.self_attn = Attention(embedding_dim, num_heads) + self.norm1 = nn.LayerNorm(embedding_dim) + + self.cross_attn_token_to_image = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + self.norm2 = nn.LayerNorm(embedding_dim) + + self.mlp = MLPBlock(embedding_dim, mlp_dim, activation) + self.norm3 = nn.LayerNorm(embedding_dim) + + self.norm4 = nn.LayerNorm(embedding_dim) + self.cross_attn_image_to_token = Attention( + embedding_dim, num_heads, downsample_rate=attention_downsample_rate + ) + + self.skip_first_layer_pe = skip_first_layer_pe + + def forward( + self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor + ) -> Tuple[Tensor, Tensor]: + # Self attention block + if self.skip_first_layer_pe: + queries = self.self_attn(q=queries, k=queries, v=queries) + else: + q = queries + query_pe + attn_out = self.self_attn(q=q, k=q, v=queries) + queries = queries + attn_out + queries = self.norm1(queries) + + # Cross attention block, tokens attending to image embedding + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys) + queries = queries + attn_out + queries = self.norm2(queries) + + # MLP block + mlp_out = self.mlp(queries) + queries = queries + mlp_out + queries = self.norm3(queries) + + # Cross attention block, image embedding attending to tokens + q = queries + query_pe + k = keys + key_pe + attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries) + keys = keys + attn_out + keys = self.norm4(keys) + + return queries, keys + + +class Attention(nn.Module): + """ + An attention layer that allows for downscaling the size of the embedding + after projection to queries, keys, and values. + """ + + def __init__( + self, + embedding_dim: int, + num_heads: int, + downsample_rate: int = 1, + ) -> None: + super().__init__() + self.embedding_dim = embedding_dim + self.internal_dim = embedding_dim // downsample_rate + self.num_heads = num_heads + assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim." + + self.q_proj = nn.Linear(embedding_dim, self.internal_dim) + self.k_proj = nn.Linear(embedding_dim, self.internal_dim) + self.v_proj = nn.Linear(embedding_dim, self.internal_dim) + self.out_proj = nn.Linear(self.internal_dim, embedding_dim) + + def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor: + b, n, c = x.shape + x = x.reshape(b, n, num_heads, c // num_heads) + return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head + + def _recombine_heads(self, x: Tensor) -> Tensor: + b, n_heads, n_tokens, c_per_head = x.shape + x = x.transpose(1, 2) + return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C + + def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor: + # Input projections + q = self.q_proj(q) + k = self.k_proj(k) + v = self.v_proj(v) + + # Separate into heads + q = self._separate_heads(q, self.num_heads) + k = self._separate_heads(k, self.num_heads) + v = self._separate_heads(v, self.num_heads) + + # Attention + _, _, _, c_per_head = q.shape + attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens + attn = attn / math.sqrt(c_per_head) + attn = torch.softmax(attn, dim=-1) + + # Get output + out = attn @ v + out = self._recombine_heads(out) + out = self.out_proj(out) + + return out diff --git a/docker/template/src/nnunetv2/nets/segment_anything/predictor.py b/docker/template/src/nnunetv2/nets/segment_anything/predictor.py new file mode 100644 index 0000000..5af7540 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/predictor.py @@ -0,0 +1,269 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +from nnunetv2.nets.segment_anything.modeling import Sam + +from typing import Optional, Tuple + +from .utils.transforms import ResizeLongestSide + + +class SamPredictor: + def __init__( + self, + sam_model: Sam, + ) -> None: + """ + Uses SAM to calculate the image embedding for an image, and then + allow repeated, efficient mask prediction given prompts. + + Arguments: + sam_model (Sam): The model to use for mask prediction. + """ + super().__init__() + self.model = sam_model + self.transform = ResizeLongestSide(sam_model.image_encoder.img_size) + self.reset_image() + + def set_image( + self, + image: np.ndarray, + image_format: str = "RGB", + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. + + Arguments: + image (np.ndarray): The image for calculating masks. Expects an + image in HWC uint8 format, with pixel values in [0, 255]. + image_format (str): The color format of the image, in ['RGB', 'BGR']. + """ + assert image_format in [ + "RGB", + "BGR", + ], f"image_format must be in ['RGB', 'BGR'], is {image_format}." + if image_format != self.model.image_format: + image = image[..., ::-1] + + # Transform the image to the form expected by the model + input_image = self.transform.apply_image(image) + input_image_torch = torch.as_tensor(input_image, device=self.device) + input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :] + + self.set_torch_image(input_image_torch, image.shape[:2]) + + @torch.no_grad() + def set_torch_image( + self, + transformed_image: torch.Tensor, + original_image_size: Tuple[int, ...], + ) -> None: + """ + Calculates the image embeddings for the provided image, allowing + masks to be predicted with the 'predict' method. Expects the input + image to be already transformed to the format expected by the model. + + Arguments: + transformed_image (torch.Tensor): The input image, with shape + 1x3xHxW, which has been transformed with ResizeLongestSide. + original_image_size (tuple(int, int)): The size of the image + before transformation, in (H, W) format. + """ + assert ( + len(transformed_image.shape) == 4 + and transformed_image.shape[1] == 3 + and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size + ), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}." + self.reset_image() + + self.original_size = original_image_size + self.input_size = tuple(transformed_image.shape[-2:]) + input_image = self.model.preprocess(transformed_image) + self.features = self.model.image_encoder(input_image) + self.is_image_set = True + + def predict( + self, + point_coords: Optional[np.ndarray] = None, + point_labels: Optional[np.ndarray] = None, + box: Optional[np.ndarray] = None, + mask_input: Optional[np.ndarray] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + + Arguments: + point_coords (np.ndarray or None): A Nx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (np.ndarray or None): A length N array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A length 4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form 1xHxW, where + for SAM, H=W=256. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (np.ndarray): The output masks in CxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (np.ndarray): An array of length C containing the model's + predictions for the quality of each mask. + (np.ndarray): An array of shape CxHxW, where C is the number + of masks and H=W=256. These low resolution logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + # Transform input prompts + coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None + if point_coords is not None: + assert ( + point_labels is not None + ), "point_labels must be supplied if point_coords is supplied." + point_coords = self.transform.apply_coords(point_coords, self.original_size) + coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device) + labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device) + coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :] + if box is not None: + box = self.transform.apply_boxes(box, self.original_size) + box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device) + box_torch = box_torch[None, :] + if mask_input is not None: + mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device) + mask_input_torch = mask_input_torch[None, :, :, :] + + masks, iou_predictions, low_res_masks = self.predict_torch( + coords_torch, + labels_torch, + box_torch, + mask_input_torch, + multimask_output, + return_logits=return_logits, + ) + + masks = masks[0].detach().cpu().numpy() + iou_predictions = iou_predictions[0].detach().cpu().numpy() + low_res_masks = low_res_masks[0].detach().cpu().numpy() + return masks, iou_predictions, low_res_masks + + @torch.no_grad() + def predict_torch( + self, + point_coords: Optional[torch.Tensor], + point_labels: Optional[torch.Tensor], + boxes: Optional[torch.Tensor] = None, + mask_input: Optional[torch.Tensor] = None, + multimask_output: bool = True, + return_logits: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Predict masks for the given input prompts, using the currently set image. + Input prompts are batched torch tensors and are expected to already be + transformed to the input frame using ResizeLongestSide. + + Arguments: + point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the + model. Each point is in (X,Y) in pixels. + point_labels (torch.Tensor or None): A BxN array of labels for the + point prompts. 1 indicates a foreground point and 0 indicates a + background point. + box (np.ndarray or None): A Bx4 array given a box prompt to the + model, in XYXY format. + mask_input (np.ndarray): A low resolution mask input to the model, typically + coming from a previous prediction iteration. Has form Bx1xHxW, where + for SAM, H=W=256. Masks returned by a previous iteration of the + predict method do not need further transformation. + multimask_output (bool): If true, the model will return three masks. + For ambiguous input prompts (such as a single click), this will often + produce better masks than a single prediction. If only a single + mask is needed, the model's predicted quality score can be used + to select the best mask. For non-ambiguous prompts, such as multiple + input prompts, multimask_output=False can give better results. + return_logits (bool): If true, returns un-thresholded masks logits + instead of a binary mask. + + Returns: + (torch.Tensor): The output masks in BxCxHxW format, where C is the + number of masks, and (H, W) is the original image size. + (torch.Tensor): An array of shape BxC containing the model's + predictions for the quality of each mask. + (torch.Tensor): An array of shape BxCxHxW, where C is the number + of masks and H=W=256. These low res logits can be passed to + a subsequent iteration as mask input. + """ + if not self.is_image_set: + raise RuntimeError("An image must be set with .set_image(...) before mask prediction.") + + if point_coords is not None: + points = (point_coords, point_labels) + else: + points = None + + # Embed prompts + sparse_embeddings, dense_embeddings = self.model.prompt_encoder( + points=points, + boxes=boxes, + masks=mask_input, + ) + + # Predict masks + low_res_masks, iou_predictions = self.model.mask_decoder( + image_embeddings=self.features, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embeddings, + dense_prompt_embeddings=dense_embeddings, + multimask_output=multimask_output, + ) + + # Upscale the masks to the original image resolution + masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size) + + if not return_logits: + masks = masks > self.model.mask_threshold + + return masks, iou_predictions, low_res_masks + + def get_image_embedding(self) -> torch.Tensor: + """ + Returns the image embeddings for the currently set image, with + shape 1xCxHxW, where C is the embedding dimension and (H,W) are + the embedding spatial dimension of SAM (typically C=256, H=W=64). + """ + if not self.is_image_set: + raise RuntimeError( + "An image must be set with .set_image(...) to generate an embedding." + ) + assert self.features is not None, "Features must exist if an image has been set." + return self.features + + @property + def device(self) -> torch.device: + return self.model.device + + def reset_image(self) -> None: + """Resets the currently set image.""" + self.is_image_set = False + self.features = None + self.orig_h = None + self.orig_w = None + self.input_h = None + self.input_w = None diff --git a/docker/template/src/nnunetv2/nets/segment_anything/utils/__init__.py b/docker/template/src/nnunetv2/nets/segment_anything/utils/__init__.py new file mode 100644 index 0000000..5277f46 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/utils/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/docker/template/src/nnunetv2/nets/segment_anything/utils/amg.py b/docker/template/src/nnunetv2/nets/segment_anything/utils/amg.py new file mode 100644 index 0000000..3a13777 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/utils/amg.py @@ -0,0 +1,346 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch + +import math +from copy import deepcopy +from itertools import product +from typing import Any, Dict, Generator, ItemsView, List, Tuple + + +class MaskData: + """ + A structure for storing masks and their related data in batched format. + Implements basic filtering and concatenation. + """ + + def __init__(self, **kwargs) -> None: + for v in kwargs.values(): + assert isinstance( + v, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats = dict(**kwargs) + + def __setitem__(self, key: str, item: Any) -> None: + assert isinstance( + item, (list, np.ndarray, torch.Tensor) + ), "MaskData only supports list, numpy arrays, and torch tensors." + self._stats[key] = item + + def __delitem__(self, key: str) -> None: + del self._stats[key] + + def __getitem__(self, key: str) -> Any: + return self._stats[key] + + def items(self) -> ItemsView[str, Any]: + return self._stats.items() + + def filter(self, keep: torch.Tensor) -> None: + for k, v in self._stats.items(): + if v is None: + self._stats[k] = None + elif isinstance(v, torch.Tensor): + self._stats[k] = v[torch.as_tensor(keep, device=v.device)] + elif isinstance(v, np.ndarray): + self._stats[k] = v[keep.detach().cpu().numpy()] + elif isinstance(v, list) and keep.dtype == torch.bool: + self._stats[k] = [a for i, a in enumerate(v) if keep[i]] + elif isinstance(v, list): + self._stats[k] = [v[i] for i in keep] + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def cat(self, new_stats: "MaskData") -> None: + for k, v in new_stats.items(): + if k not in self._stats or self._stats[k] is None: + self._stats[k] = deepcopy(v) + elif isinstance(v, torch.Tensor): + self._stats[k] = torch.cat([self._stats[k], v], dim=0) + elif isinstance(v, np.ndarray): + self._stats[k] = np.concatenate([self._stats[k], v], axis=0) + elif isinstance(v, list): + self._stats[k] = self._stats[k] + deepcopy(v) + else: + raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.") + + def to_numpy(self) -> None: + for k, v in self._stats.items(): + if isinstance(v, torch.Tensor): + self._stats[k] = v.detach().cpu().numpy() + + +def is_box_near_crop_edge( + boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0 +) -> torch.Tensor: + """Filter masks at the edge of a crop, but not at the edge of the original image.""" + crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device) + orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device) + boxes = uncrop_boxes_xyxy(boxes, crop_box).float() + near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0) + near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0) + near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge) + return torch.any(near_crop_edge, dim=1) + + +def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor: + box_xywh = deepcopy(box_xyxy) + box_xywh[2] = box_xywh[2] - box_xywh[0] + box_xywh[3] = box_xywh[3] - box_xywh[1] + return box_xywh + + +def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: + assert len(args) > 0 and all( + len(a) == len(args[0]) for a in args + ), "Batched iteration must have inputs of all the same size." + n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) + for b in range(n_batches): + yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] + + +def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: + """ + Encodes masks to an uncompressed RLE, in the format expected by + pycoco tools. + """ + # Put in fortran order and flatten h,w + b, h, w = tensor.shape + tensor = tensor.permute(0, 2, 1).flatten(1) + + # Compute change indices + diff = tensor[:, 1:] ^ tensor[:, :-1] + change_indices = diff.nonzero() + + # Encode run length + out = [] + for i in range(b): + cur_idxs = change_indices[change_indices[:, 0] == i, 1] + cur_idxs = torch.cat( + [ + torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device), + cur_idxs + 1, + torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), + ] + ) + btw_idxs = cur_idxs[1:] - cur_idxs[:-1] + counts = [] if tensor[i, 0] == 0 else [0] + counts.extend(btw_idxs.detach().cpu().tolist()) + out.append({"size": [h, w], "counts": counts}) + return out + + +def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: + """Compute a binary mask from an uncompressed RLE.""" + h, w = rle["size"] + mask = np.empty(h * w, dtype=bool) + idx = 0 + parity = False + for count in rle["counts"]: + mask[idx : idx + count] = parity + idx += count + parity ^= True + mask = mask.reshape(w, h) + return mask.transpose() # Put in C order + + +def area_from_rle(rle: Dict[str, Any]) -> int: + return sum(rle["counts"][1::2]) + + +def calculate_stability_score( + masks: torch.Tensor, mask_threshold: float, threshold_offset: float +) -> torch.Tensor: + """ + Computes the stability score for a batch of masks. The stability + score is the IoU between the binary masks obtained by thresholding + the predicted mask logits at high and low values. + """ + # One mask is always contained inside the other. + # Save memory by preventing unnecesary cast to torch.int64 + intersections = ( + (masks > (mask_threshold + threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + unions = ( + (masks > (mask_threshold - threshold_offset)) + .sum(-1, dtype=torch.int16) + .sum(-1, dtype=torch.int32) + ) + return intersections / unions + + +def build_point_grid(n_per_side: int) -> np.ndarray: + """Generates a 2D grid of points evenly spaced in [0,1]x[0,1].""" + offset = 1 / (2 * n_per_side) + points_one_side = np.linspace(offset, 1 - offset, n_per_side) + points_x = np.tile(points_one_side[None, :], (n_per_side, 1)) + points_y = np.tile(points_one_side[:, None], (1, n_per_side)) + points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2) + return points + + +def build_all_layer_point_grids( + n_per_side: int, n_layers: int, scale_per_layer: int +) -> List[np.ndarray]: + """Generates point grids for all crop layers.""" + points_by_layer = [] + for i in range(n_layers + 1): + n_points = int(n_per_side / (scale_per_layer**i)) + points_by_layer.append(build_point_grid(n_points)) + return points_by_layer + + +def generate_crop_boxes( + im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float +) -> Tuple[List[List[int]], List[int]]: + """ + Generates a list of crop boxes of different sizes. Each layer + has (2**i)**2 boxes for the ith layer. + """ + crop_boxes, layer_idxs = [], [] + im_h, im_w = im_size + short_side = min(im_h, im_w) + + # Original image + crop_boxes.append([0, 0, im_w, im_h]) + layer_idxs.append(0) + + def crop_len(orig_len, n_crops, overlap): + return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops)) + + for i_layer in range(n_layers): + n_crops_per_side = 2 ** (i_layer + 1) + overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side)) + + crop_w = crop_len(im_w, n_crops_per_side, overlap) + crop_h = crop_len(im_h, n_crops_per_side, overlap) + + crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)] + crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)] + + # Crops in XYWH format + for x0, y0 in product(crop_box_x0, crop_box_y0): + box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)] + crop_boxes.append(box) + layer_idxs.append(i_layer + 1) + + return crop_boxes, layer_idxs + + +def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device) + # Check if boxes has a channel dimension + if len(boxes.shape) == 3: + offset = offset.unsqueeze(1) + return boxes + offset + + +def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor: + x0, y0, _, _ = crop_box + offset = torch.tensor([[x0, y0]], device=points.device) + # Check if points has a channel dimension + if len(points.shape) == 3: + offset = offset.unsqueeze(1) + return points + offset + + +def uncrop_masks( + masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int +) -> torch.Tensor: + x0, y0, x1, y1 = crop_box + if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h: + return masks + # Coordinate transform masks + pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0) + pad = (x0, pad_x - x0, y0, pad_y - y0) + return torch.nn.functional.pad(masks, pad, value=0) + + +def remove_small_regions( + mask: np.ndarray, area_thresh: float, mode: str +) -> Tuple[np.ndarray, bool]: + """ + Removes small disconnected regions and holes in a mask. Returns the + mask and an indicator of if the mask has been modified. + """ + import cv2 # type: ignore + + assert mode in ["holes", "islands"] + correct_holes = mode == "holes" + working_mask = (correct_holes ^ mask).astype(np.uint8) + n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8) + sizes = stats[:, -1][1:] # Row 0 is background label + small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh] + if len(small_regions) == 0: + return mask, False + fill_labels = [0] + small_regions + if not correct_holes: + fill_labels = [i for i in range(n_labels) if i not in fill_labels] + # If every region is below threshold, keep largest + if len(fill_labels) == 0: + fill_labels = [int(np.argmax(sizes)) + 1] + mask = np.isin(regions, fill_labels) + return mask, True + + +def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]: + from pycocotools import mask as mask_utils # type: ignore + + h, w = uncompressed_rle["size"] + rle = mask_utils.frPyObjects(uncompressed_rle, h, w) + rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json + return rle + + +def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor: + """ + Calculates boxes in XYXY format around masks. Return [0,0,0,0] for + an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4. + """ + # torch.max below raises an error on empty inputs, just skip in this case + if torch.numel(masks) == 0: + return torch.zeros(*masks.shape[:-2], 4, device=masks.device) + + # Normalize shape to CxHxW + shape = masks.shape + h, w = shape[-2:] + if len(shape) > 2: + masks = masks.flatten(0, -3) + else: + masks = masks.unsqueeze(0) + + # Get top and bottom edges + in_height, _ = torch.max(masks, dim=-1) + in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :] + bottom_edges, _ = torch.max(in_height_coords, dim=-1) + in_height_coords = in_height_coords + h * (~in_height) + top_edges, _ = torch.min(in_height_coords, dim=-1) + + # Get left and right edges + in_width, _ = torch.max(masks, dim=-2) + in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :] + right_edges, _ = torch.max(in_width_coords, dim=-1) + in_width_coords = in_width_coords + w * (~in_width) + left_edges, _ = torch.min(in_width_coords, dim=-1) + + # If the mask is empty the right edge will be to the left of the left edge. + # Replace these boxes with [0, 0, 0, 0] + empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges) + out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1) + out = out * (~empty_filter).unsqueeze(-1) + + # Return to original shape + if len(shape) > 2: + out = out.reshape(*shape[:-2], 4) + else: + out = out[0] + + return out diff --git a/docker/template/src/nnunetv2/nets/segment_anything/utils/onnx.py b/docker/template/src/nnunetv2/nets/segment_anything/utils/onnx.py new file mode 100644 index 0000000..4297b31 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/utils/onnx.py @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import torch +import torch.nn as nn +from torch.nn import functional as F + +from typing import Tuple + +from ..modeling import Sam +from .amg import calculate_stability_score + + +class SamOnnxModel(nn.Module): + """ + This model should not be called directly, but is used in ONNX export. + It combines the prompt encoder, mask decoder, and mask postprocessing of Sam, + with some functions modified to enable model tracing. Also supports extra + options controlling what information. See the ONNX export script for details. + """ + + def __init__( + self, + model: Sam, + return_single_mask: bool, + use_stability_score: bool = False, + return_extra_metrics: bool = False, + ) -> None: + super().__init__() + self.mask_decoder = model.mask_decoder + self.model = model + self.img_size = model.image_encoder.img_size + self.return_single_mask = return_single_mask + self.use_stability_score = use_stability_score + self.stability_score_offset = 1.0 + self.return_extra_metrics = return_extra_metrics + + @staticmethod + def resize_longest_image_size( + input_image_size: torch.Tensor, longest_side: int + ) -> torch.Tensor: + input_image_size = input_image_size.to(torch.float32) + scale = longest_side / torch.max(input_image_size) + transformed_size = scale * input_image_size + transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64) + return transformed_size + + def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor: + point_coords = point_coords + 0.5 + point_coords = point_coords / self.img_size + point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords) + point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding) + + point_embedding = point_embedding * (point_labels != -1) + point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * ( + point_labels == -1 + ) + + for i in range(self.model.prompt_encoder.num_point_embeddings): + point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[ + i + ].weight * (point_labels == i) + + return point_embedding + + def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor: + mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask) + mask_embedding = mask_embedding + ( + 1 - has_mask_input + ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1) + return mask_embedding + + def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor: + masks = F.interpolate( + masks, + size=(self.img_size, self.img_size), + mode="bilinear", + align_corners=False, + ) + + prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size) + masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])] + + orig_im_size = orig_im_size.to(torch.int64) + h, w = orig_im_size[0], orig_im_size[1] + masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False) + return masks + + def select_masks( + self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int + ) -> Tuple[torch.Tensor, torch.Tensor]: + # Determine if we should return the multiclick mask or not from the number of points. + # The reweighting is used to avoid control flow. + score_reweight = torch.tensor( + [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)] + ).to(iou_preds.device) + score = iou_preds + (num_points - 2.5) * score_reweight + best_idx = torch.argmax(score, dim=1) + masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1) + iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1) + + return masks, iou_preds + + @torch.no_grad() + def forward( + self, + image_embeddings: torch.Tensor, + point_coords: torch.Tensor, + point_labels: torch.Tensor, + mask_input: torch.Tensor, + has_mask_input: torch.Tensor, + orig_im_size: torch.Tensor, + ): + sparse_embedding = self._embed_points(point_coords, point_labels) + dense_embedding = self._embed_masks(mask_input, has_mask_input) + + masks, scores = self.model.mask_decoder.predict_masks( + image_embeddings=image_embeddings, + image_pe=self.model.prompt_encoder.get_dense_pe(), + sparse_prompt_embeddings=sparse_embedding, + dense_prompt_embeddings=dense_embedding, + ) + + if self.use_stability_score: + scores = calculate_stability_score( + masks, self.model.mask_threshold, self.stability_score_offset + ) + + if self.return_single_mask: + masks, scores = self.select_masks(masks, scores, point_coords.shape[1]) + + upscaled_masks = self.mask_postprocessing(masks, orig_im_size) + + if self.return_extra_metrics: + stability_scores = calculate_stability_score( + upscaled_masks, self.model.mask_threshold, self.stability_score_offset + ) + areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1) + return upscaled_masks, scores, stability_scores, areas, masks + + return upscaled_masks, scores, masks diff --git a/docker/template/src/nnunetv2/nets/segment_anything/utils/transforms.py b/docker/template/src/nnunetv2/nets/segment_anything/utils/transforms.py new file mode 100644 index 0000000..3ad3466 --- /dev/null +++ b/docker/template/src/nnunetv2/nets/segment_anything/utils/transforms.py @@ -0,0 +1,102 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import numpy as np +import torch +from torch.nn import functional as F +from torchvision.transforms.functional import resize, to_pil_image # type: ignore + +from copy import deepcopy +from typing import Tuple + + +class ResizeLongestSide: + """ + Resizes images to longest side 'target_length', as well as provides + methods for resizing coordinates and boxes. Provides methods for + transforming both numpy array and batched torch tensors. + """ + + def __init__(self, target_length: int) -> None: + self.target_length = target_length + + def apply_image(self, image: np.ndarray) -> np.ndarray: + """ + Expects a numpy array with shape HxWxC in uint8 format. + """ + target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) + return np.array(resize(to_pil_image(image), target_size)) + + def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array of length 2 in the final dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).astype(float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray: + """ + Expects a numpy array shape Bx4. Requires the original image size + in (H, W) format. + """ + boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor: + """ + Expects batched images with shape BxCxHxW and float format. This + transformation may not exactly match apply_image. apply_image is + the transformation expected by the model. + """ + # Expects an image in BCHW format. May not exactly match apply_image. + target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length) + return F.interpolate( + image, target_size, mode="bilinear", align_corners=False, antialias=True + ) + + def apply_coords_torch( + self, coords: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with length 2 in the last dimension. Requires the + original image size in (H, W) format. + """ + old_h, old_w = original_size + new_h, new_w = self.get_preprocess_shape( + original_size[0], original_size[1], self.target_length + ) + coords = deepcopy(coords).to(torch.float) + coords[..., 0] = coords[..., 0] * (new_w / old_w) + coords[..., 1] = coords[..., 1] * (new_h / old_h) + return coords + + def apply_boxes_torch( + self, boxes: torch.Tensor, original_size: Tuple[int, ...] + ) -> torch.Tensor: + """ + Expects a torch tensor with shape Bx4. Requires the original image + size in (H, W) format. + """ + boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size) + return boxes.reshape(-1, 4) + + @staticmethod + def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]: + """ + Compute the output size given input size and target long side length. + """ + scale = long_side_length * 1.0 / max(oldh, oldw) + newh, neww = oldh * scale, oldw * scale + neww = int(neww + 0.5) + newh = int(newh + 0.5) + return (newh, neww) diff --git a/docker/template/src/nnunetv2/paths.py b/docker/template/src/nnunetv2/paths.py new file mode 100644 index 0000000..f2b65bc --- /dev/null +++ b/docker/template/src/nnunetv2/paths.py @@ -0,0 +1,63 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +join = os.path.join +""" +Please make sure your data is organized as follows: + +data/ +├── nnUNet_raw/ +│ ├── Dataset701_AbdomenCT/ +│ │ ├── imagesTr +│ │ │ ├── FLARE22_Tr_0001_0000.nii.gz +│ │ │ ├── FLARE22_Tr_0002_0000.nii.gz +│ │ │ ├── ... +│ │ ├── labelsTr +│ │ │ ├── FLARE22_Tr_0001.nii.gz +│ │ │ ├── FLARE22_Tr_0002.nii.gz +│ │ │ ├── ... +│ │ ├── dataset.json +│ ├── Dataset702_AbdomenMR/ +│ │ ├── imagesTr +│ │ │ ├── amos_0507_0000.nii.gz +│ │ │ ├── amos_0508_0000.nii.gz +│ │ │ ├── ... +│ │ ├── labelsTr +│ │ │ ├── amos_0507.nii.gz +│ │ │ ├── amos_0508.nii.gz +│ │ │ ├── ... +│ │ ├── dataset.json +│ ├── ... +""" +base = join(os.sep.join(__file__.split(os.sep)[:-3]), 'data') +nnUNet_raw = os.environ.get('nnUNet_raw') +nnUNet_preprocessed = os.environ.get('nnUNet_preprocessed') +nnUNet_results = os.environ.get('nnUNet_results') + +if nnUNet_raw is None: + print("nnUNet_raw is not defined and nnU-Net can only be used on data for which preprocessed files " + "are already present on your system. nnU-Net cannot be used for experiment planning and preprocessing like " + "this. If this is not intended, please read documentation/setting_up_paths.md for information on how to set " + "this up properly.") + +if nnUNet_preprocessed is None: + print("nnUNet_preprocessed is not defined and nnU-Net can not be used for preprocessing " + "or training. If this is not intended, please read documentation/setting_up_paths.md for information on how " + "to set this up.") + +if nnUNet_results is None: + print("nnUNet_results is not defined and nnU-Net cannot be used for training or " + "inference. If this is not intended behavior, please read documentation/setting_up_paths.md for information " + "on how to set this up.") diff --git a/docker/template/src/nnunetv2/postprocessing/__init__.py b/docker/template/src/nnunetv2/postprocessing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/postprocessing/remove_connected_components.py b/docker/template/src/nnunetv2/postprocessing/remove_connected_components.py new file mode 100644 index 0000000..c8021ac --- /dev/null +++ b/docker/template/src/nnunetv2/postprocessing/remove_connected_components.py @@ -0,0 +1,361 @@ +import argparse +import multiprocessing +import shutil +from typing import Union, Tuple, List, Callable + +import numpy as np +from acvl_utils.morphology.morphology_helper import remove_all_but_largest_component +from batchgenerators.utilities.file_and_folder_operations import load_json, subfiles, maybe_mkdir_p, join, isfile, \ + isdir, save_pickle, load_pickle, save_json +from nnunetv2.configuration import default_num_processes +from nnunetv2.evaluation.accumulate_cv_results import accumulate_cv_results +from nnunetv2.evaluation.evaluate_predictions import region_or_label_to_mask, compute_metrics_on_folder, \ + load_summary_json, label_or_region_to_key +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.paths import nnUNet_raw +from nnunetv2.utilities.file_path_utilities import folds_tuple_to_string +from nnunetv2.utilities.json_export import recursive_fix_for_json_export +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager + + +def remove_all_but_largest_component_from_segmentation(segmentation: np.ndarray, + labels_or_regions: Union[int, Tuple[int, ...], + List[Union[int, Tuple[int, ...]]]], + background_label: int = 0) -> np.ndarray: + mask = np.zeros_like(segmentation, dtype=bool) + if not isinstance(labels_or_regions, list): + labels_or_regions = [labels_or_regions] + for l_or_r in labels_or_regions: + mask |= region_or_label_to_mask(segmentation, l_or_r) + mask_keep = remove_all_but_largest_component(mask) + ret = np.copy(segmentation) # do not modify the input! + ret[mask & ~mask_keep] = background_label + return ret + + +def apply_postprocessing(segmentation: np.ndarray, pp_fns: List[Callable], pp_fn_kwargs: List[dict]): + for fn, kwargs in zip(pp_fns, pp_fn_kwargs): + segmentation = fn(segmentation, **kwargs) + return segmentation + + +def load_postprocess_save(segmentation_file: str, + output_fname: str, + image_reader_writer: BaseReaderWriter, + pp_fns: List[Callable], + pp_fn_kwargs: List[dict]): + seg, props = image_reader_writer.read_seg(segmentation_file) + seg = apply_postprocessing(seg[0], pp_fns, pp_fn_kwargs) + image_reader_writer.write_seg(seg, output_fname, props) + + +def determine_postprocessing(folder_predictions: str, + folder_ref: str, + plans_file_or_dict: Union[str, dict], + dataset_json_file_or_dict: Union[str, dict], + num_processes: int = default_num_processes, + keep_postprocessed_files: bool = True): + """ + Determines nnUNet postprocessing. Its output is a postprocessing.pkl file in folder_predictions which can be + used with apply_postprocessing_to_folder. + + Postprocessed files are saved in folder_predictions/postprocessed. Set + keep_postprocessed_files=False to delete these files after this function is done (temp files will eb created + and deleted regardless). + + If plans_file_or_dict or dataset_json_file_or_dict are None, we will look for them in input_folder + """ + output_folder = join(folder_predictions, 'postprocessed') + + if plans_file_or_dict is None: + expected_plans_file = join(folder_predictions, 'plans.json') + if not isfile(expected_plans_file): + raise RuntimeError(f"Expected plans file missing: {expected_plans_file}. The plans files should have been " + f"created while running nnUNetv2_predict. Sadge.") + plans_file_or_dict = load_json(expected_plans_file) + plans_manager = PlansManager(plans_file_or_dict) + + if dataset_json_file_or_dict is None: + expected_dataset_json_file = join(folder_predictions, 'dataset.json') + if not isfile(expected_dataset_json_file): + raise RuntimeError( + f"Expected plans file missing: {expected_dataset_json_file}. The plans files should have been " + f"created while running nnUNetv2_predict. Sadge.") + dataset_json_file_or_dict = load_json(expected_dataset_json_file) + + if not isinstance(dataset_json_file_or_dict, dict): + dataset_json = load_json(dataset_json_file_or_dict) + else: + dataset_json = dataset_json_file_or_dict + + rw = plans_manager.image_reader_writer_class() + label_manager = plans_manager.get_label_manager(dataset_json) + labels_or_regions = label_manager.foreground_regions if label_manager.has_regions else label_manager.foreground_labels + + predicted_files = subfiles(folder_predictions, suffix=dataset_json['file_ending'], join=False) + ref_files = subfiles(folder_ref, suffix=dataset_json['file_ending'], join=False) + # we should print a warning if not all files from folder_ref are present in folder_predictions + if not all([i in predicted_files for i in ref_files]): + print(f'WARNING: Not all files in folder_ref were found in folder_predictions. Determining postprocessing ' + f'should always be done on the entire dataset!') + + # before we start we should evaluate the imaegs in the source folder + if not isfile(join(folder_predictions, 'summary.json')): + compute_metrics_on_folder(folder_ref, + folder_predictions, + join(folder_predictions, 'summary.json'), + rw, + dataset_json['file_ending'], + labels_or_regions, + label_manager.ignore_label, + num_processes) + + # we save the postprocessing functions in here + pp_fns = [] + pp_fn_kwargs = [] + + # pool party! + with multiprocessing.get_context("spawn").Pool(num_processes) as pool: + # now let's see whether removing all but the largest foreground region improves the scores + output_here = join(output_folder, 'temp', 'keep_largest_fg') + maybe_mkdir_p(output_here) + pp_fn = remove_all_but_largest_component_from_segmentation + kwargs = { + 'labels_or_regions': label_manager.foreground_labels, + } + + pool.starmap( + load_postprocess_save, + zip( + [join(folder_predictions, i) for i in predicted_files], + [join(output_here, i) for i in predicted_files], + [rw] * len(predicted_files), + [[pp_fn]] * len(predicted_files), + [[kwargs]] * len(predicted_files) + ) + ) + compute_metrics_on_folder(folder_ref, + output_here, + join(output_here, 'summary.json'), + rw, + dataset_json['file_ending'], + labels_or_regions, + label_manager.ignore_label, + num_processes) + # now we need to figure out if doing this improved the dice scores. We will implement that defensively in so far + # that if a single class got worse as a result we won't do this. We can change this in the future but right now I + # prefer to do it this way + baseline_results = load_summary_json(join(folder_predictions, 'summary.json')) + pp_results = load_summary_json(join(output_here, 'summary.json')) + do_this = pp_results['foreground_mean']['Dice'] > baseline_results['foreground_mean']['Dice'] + if do_this: + for class_id in pp_results['mean'].keys(): + if pp_results['mean'][class_id]['Dice'] < baseline_results['mean'][class_id]['Dice']: + do_this = False + break + if do_this: + print(f'Results were improved by removing all but the largest foreground region. ' + f'Mean dice before: {round(baseline_results["foreground_mean"]["Dice"], 5)} ' + f'after: {round(pp_results["foreground_mean"]["Dice"], 5)}') + source = output_here + pp_fns.append(pp_fn) + pp_fn_kwargs.append(kwargs) + else: + print(f'Removing all but the largest foreground region did not improve results!') + source = folder_predictions + + # in the old nnU-Net we could just apply all-but-largest component removal to all classes at the same time and + # then evaluate for each class whether this improved results. This is no longer possible because we now support + # region-based predictions and regions can overlap, causing interactions + # in principle the order with which the postprocessing is applied to the regions matter as well and should be + # investigated, but due to some things that I am too lazy to explain right now it's going to be alright (I think) + # to stick to the order in which they are declared in dataset.json (if you want to think about it then think about + # region_class_order) + # 2023_02_06: I hate myself for the comment above. Thanks past me + if len(labels_or_regions) > 1: + for label_or_region in labels_or_regions: + pp_fn = remove_all_but_largest_component_from_segmentation + kwargs = { + 'labels_or_regions': label_or_region, + } + + output_here = join(output_folder, 'temp', 'keep_largest_perClassOrRegion') + maybe_mkdir_p(output_here) + + pool.starmap( + load_postprocess_save, + zip( + [join(source, i) for i in predicted_files], + [join(output_here, i) for i in predicted_files], + [rw] * len(predicted_files), + [[pp_fn]] * len(predicted_files), + [[kwargs]] * len(predicted_files) + ) + ) + compute_metrics_on_folder(folder_ref, + output_here, + join(output_here, 'summary.json'), + rw, + dataset_json['file_ending'], + labels_or_regions, + label_manager.ignore_label, + num_processes) + baseline_results = load_summary_json(join(source, 'summary.json')) + pp_results = load_summary_json(join(output_here, 'summary.json')) + do_this = pp_results['mean'][label_or_region]['Dice'] > baseline_results['mean'][label_or_region]['Dice'] + if do_this: + print(f'Results were improved by removing all but the largest component for {label_or_region}. ' + f'Dice before: {round(baseline_results["mean"][label_or_region]["Dice"], 5)} ' + f'after: {round(pp_results["mean"][label_or_region]["Dice"], 5)}') + if isdir(join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest')): + shutil.rmtree(join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest')) + shutil.move(output_here, join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest'), ) + source = join(output_folder, 'temp', 'keep_largest_perClassOrRegion_currentBest') + pp_fns.append(pp_fn) + pp_fn_kwargs.append(kwargs) + else: + print(f'Removing all but the largest component for {label_or_region} did not improve results! ' + f'Dice before: {round(baseline_results["mean"][label_or_region]["Dice"], 5)} ' + f'after: {round(pp_results["mean"][label_or_region]["Dice"], 5)}') + [shutil.copy(join(source, i), join(output_folder, i)) for i in subfiles(source, join=False)] + save_pickle((pp_fns, pp_fn_kwargs), join(folder_predictions, 'postprocessing.pkl')) + + baseline_results = load_summary_json(join(folder_predictions, 'summary.json')) + final_results = load_summary_json(join(output_folder, 'summary.json')) + tmp = { + 'input_folder': {i: baseline_results[i] for i in ['foreground_mean', 'mean']}, + 'postprocessed': {i: final_results[i] for i in ['foreground_mean', 'mean']}, + 'postprocessing_fns': [i.__name__ for i in pp_fns], + 'postprocessing_kwargs': pp_fn_kwargs, + } + # json is a very annoying little bi###. Can't handle tuples as dict keys. + tmp['input_folder']['mean'] = {label_or_region_to_key(k): tmp['input_folder']['mean'][k] for k in + tmp['input_folder']['mean'].keys()} + tmp['postprocessed']['mean'] = {label_or_region_to_key(k): tmp['postprocessed']['mean'][k] for k in + tmp['postprocessed']['mean'].keys()} + # did I already say that I hate json? "TypeError: Object of type int64 is not JSON serializable" You retarded bro? + recursive_fix_for_json_export(tmp) + save_json(tmp, join(folder_predictions, 'postprocessing.json')) + + shutil.rmtree(join(output_folder, 'temp')) + + if not keep_postprocessed_files: + shutil.rmtree(output_folder) + return pp_fns, pp_fn_kwargs + + +def apply_postprocessing_to_folder(input_folder: str, + output_folder: str, + pp_fns: List[Callable], + pp_fn_kwargs: List[dict], + plans_file_or_dict: Union[str, dict] = None, + dataset_json_file_or_dict: Union[str, dict] = None, + num_processes=8) -> None: + """ + If plans_file_or_dict or dataset_json_file_or_dict are None, we will look for them in input_folder + """ + if plans_file_or_dict is None: + expected_plans_file = join(input_folder, 'plans.json') + if not isfile(expected_plans_file): + raise RuntimeError(f"Expected plans file missing: {expected_plans_file}. The plans file should have been " + f"created while running nnUNetv2_predict. Sadge. If the folder you want to apply " + f"postprocessing to was create from an ensemble then just specify one of the " + f"plans files of the ensemble members in plans_file_or_dict") + plans_file_or_dict = load_json(expected_plans_file) + plans_manager = PlansManager(plans_file_or_dict) + + if dataset_json_file_or_dict is None: + expected_dataset_json_file = join(input_folder, 'dataset.json') + if not isfile(expected_dataset_json_file): + raise RuntimeError( + f"Expected plans file missing: {expected_dataset_json_file}. The dataset.json should have been " + f"copied while running nnUNetv2_predict/nnUNetv2_ensemble. Sadge.") + dataset_json_file_or_dict = load_json(expected_dataset_json_file) + + if not isinstance(dataset_json_file_or_dict, dict): + dataset_json = load_json(dataset_json_file_or_dict) + else: + dataset_json = dataset_json_file_or_dict + + rw = plans_manager.image_reader_writer_class() + + maybe_mkdir_p(output_folder) + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + files = subfiles(input_folder, suffix=dataset_json['file_ending'], join=False) + + _ = p.starmap(load_postprocess_save, + zip( + [join(input_folder, i) for i in files], + [join(output_folder, i) for i in files], + [rw] * len(files), + [pp_fns] * len(files), + [pp_fn_kwargs] * len(files) + ) + ) + + +def entry_point_determine_postprocessing_folder(): + parser = argparse.ArgumentParser('Writes postprocessing.pkl and postprocessing.json in input_folder.') + parser.add_argument('-i', type=str, required=True, help='Input folder') + parser.add_argument('-ref', type=str, required=True, help='Folder with gt labels') + parser.add_argument('-plans_json', type=str, required=False, default=None, + help="plans file to use. If not specified we will look for the plans.json file in the " + "input folder (input_folder/plans.json)") + parser.add_argument('-dataset_json', type=str, required=False, default=None, + help="dataset.json file to use. If not specified we will look for the dataset.json file in the " + "input folder (input_folder/dataset.json)") + parser.add_argument('-np', type=int, required=False, default=default_num_processes, + help=f"number of processes to use. Default: {default_num_processes}") + parser.add_argument('--remove_postprocessed', action='store_true', required=False, + help='set this is you don\'t want to keep the postprocessed files') + + args = parser.parse_args() + determine_postprocessing(args.i, args.ref, args.plans_json, args.dataset_json, args.np, + not args.remove_postprocessed) + + +def entry_point_apply_postprocessing(): + parser = argparse.ArgumentParser('Apples postprocessing specified in pp_pkl_file to input folder.') + parser.add_argument('-i', type=str, required=True, help='Input folder') + parser.add_argument('-o', type=str, required=True, help='Output folder') + parser.add_argument('-pp_pkl_file', type=str, required=True, help='postprocessing.pkl file') + parser.add_argument('-np', type=int, required=False, default=default_num_processes, + help=f"number of processes to use. Default: {default_num_processes}") + parser.add_argument('-plans_json', type=str, required=False, default=None, + help="plans file to use. If not specified we will look for the plans.json file in the " + "input folder (input_folder/plans.json)") + parser.add_argument('-dataset_json', type=str, required=False, default=None, + help="dataset.json file to use. If not specified we will look for the dataset.json file in the " + "input folder (input_folder/dataset.json)") + args = parser.parse_args() + pp_fns, pp_fn_kwargs = load_pickle(args.pp_pkl_file) + apply_postprocessing_to_folder(args.i, args.o, pp_fns, pp_fn_kwargs, args.plans_json, args.dataset_json, args.np) + + +if __name__ == '__main__': + trained_model_folder = '/home/fabian/results/nnUNet_remake/Dataset004_Hippocampus/nnUNetTrainer__nnUNetPlans__3d_fullres' + labelstr = join(nnUNet_raw, 'Dataset004_Hippocampus', 'labelsTr') + plans_manager = PlansManager(join(trained_model_folder, 'plans.json')) + dataset_json = load_json(join(trained_model_folder, 'dataset.json')) + folds = (0, 1, 2, 3, 4) + label_manager = plans_manager.get_label_manager(dataset_json) + + merged_output_folder = join(trained_model_folder, f'crossval_results_folds_{folds_tuple_to_string(folds)}') + accumulate_cv_results(trained_model_folder, merged_output_folder, folds, 8, False) + + fns, kwargs = determine_postprocessing(merged_output_folder, labelstr, plans_manager.plans, + dataset_json, 8, keep_postprocessed_files=True) + save_pickle((fns, kwargs), join(trained_model_folder, 'postprocessing.pkl')) + fns, kwargs = load_pickle(join(trained_model_folder, 'postprocessing.pkl')) + + apply_postprocessing_to_folder(merged_output_folder, merged_output_folder + '_pp', fns, kwargs, + plans_manager.plans, dataset_json, + 8) + compute_metrics_on_folder(labelstr, + merged_output_folder + '_pp', + join(merged_output_folder + '_pp', 'summary.json'), + plans_manager.image_reader_writer_class(), + dataset_json['file_ending'], + label_manager.foreground_regions if label_manager.has_regions else label_manager.foreground_labels, + label_manager.ignore_label, + 8) diff --git a/docker/template/src/nnunetv2/preprocessing/__init__.py b/docker/template/src/nnunetv2/preprocessing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/preprocessing/cropping/__init__.py b/docker/template/src/nnunetv2/preprocessing/cropping/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/preprocessing/cropping/cropping.py b/docker/template/src/nnunetv2/preprocessing/cropping/cropping.py new file mode 100644 index 0000000..96fe7b7 --- /dev/null +++ b/docker/template/src/nnunetv2/preprocessing/cropping/cropping.py @@ -0,0 +1,51 @@ +import numpy as np + + +# Hello! crop_to_nonzero is the function you are looking for. Ignore the rest. +from acvl_utils.cropping_and_padding.bounding_boxes import get_bbox_from_mask, crop_to_bbox, bounding_box_to_slice + + +def create_nonzero_mask(data): + """ + + :param data: + :return: the mask is True where the data is nonzero + """ + from scipy.ndimage import binary_fill_holes + assert data.ndim in (3, 4), "data must have shape (C, X, Y, Z) or shape (C, X, Y)" + nonzero_mask = np.zeros(data.shape[1:], dtype=bool) + for c in range(data.shape[0]): + this_mask = data[c] != 0 + nonzero_mask = nonzero_mask | this_mask + nonzero_mask = binary_fill_holes(nonzero_mask) + return nonzero_mask + + +def crop_to_nonzero(data, seg=None, nonzero_label=-1): + """ + + :param data: + :param seg: + :param nonzero_label: this will be written into the segmentation map + :return: + """ + nonzero_mask = create_nonzero_mask(data) + bbox = get_bbox_from_mask(nonzero_mask) + + slicer = bounding_box_to_slice(bbox) + data = data[tuple([slice(None), *slicer])] + + if seg is not None: + seg = seg[tuple([slice(None), *slicer])] + + nonzero_mask = nonzero_mask[slicer][None] + if seg is not None: + seg[(seg == 0) & (~nonzero_mask)] = nonzero_label + else: + nonzero_mask = nonzero_mask.astype(np.int8) + nonzero_mask[nonzero_mask == 0] = nonzero_label + nonzero_mask[nonzero_mask > 0] = 0 + seg = nonzero_mask + return data, seg, bbox + + diff --git a/docker/template/src/nnunetv2/preprocessing/normalization/__init__.py b/docker/template/src/nnunetv2/preprocessing/normalization/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/preprocessing/normalization/default_normalization_schemes.py b/docker/template/src/nnunetv2/preprocessing/normalization/default_normalization_schemes.py new file mode 100644 index 0000000..3c90a91 --- /dev/null +++ b/docker/template/src/nnunetv2/preprocessing/normalization/default_normalization_schemes.py @@ -0,0 +1,95 @@ +from abc import ABC, abstractmethod +from typing import Type + +import numpy as np +from numpy import number + + +class ImageNormalization(ABC): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = None + + def __init__(self, use_mask_for_norm: bool = None, intensityproperties: dict = None, + target_dtype: Type[number] = np.float32): + assert use_mask_for_norm is None or isinstance(use_mask_for_norm, bool) + self.use_mask_for_norm = use_mask_for_norm + assert isinstance(intensityproperties, dict) + self.intensityproperties = intensityproperties + self.target_dtype = target_dtype + + @abstractmethod + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + """ + Image and seg must have the same shape. Seg is not always used + """ + pass + + +class ZScoreNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = True + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + """ + here seg is used to store the zero valued region. The value for that region in the segmentation is -1 by + default. + """ + image = image.astype(self.target_dtype) + if self.use_mask_for_norm is not None and self.use_mask_for_norm: + # negative values in the segmentation encode the 'outside' region (think zero values around the brain as + # in BraTS). We want to run the normalization only in the brain region, so we need to mask the image. + # The default nnU-net sets use_mask_for_norm to True if cropping to the nonzero region substantially + # reduced the image size. + mask = seg >= 0 + mean = image[mask].mean() + std = image[mask].std() + image[mask] = (image[mask] - mean) / (max(std, 1e-8)) + else: + mean = image.mean() + std = image.std() + image = (image - mean) / (max(std, 1e-8)) + return image + + +class CTNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + assert self.intensityproperties is not None, "CTNormalization requires intensity properties" + image = image.astype(self.target_dtype) + mean_intensity = self.intensityproperties['mean'] + std_intensity = self.intensityproperties['std'] + lower_bound = self.intensityproperties['percentile_00_5'] + upper_bound = self.intensityproperties['percentile_99_5'] + image = np.clip(image, lower_bound, upper_bound) + image = (image - mean_intensity) / max(std_intensity, 1e-8) + return image + + +class NoNormalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + return image.astype(self.target_dtype) + + +class RescaleTo01Normalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + image = image.astype(self.target_dtype) + image = image - image.min() + image = image / np.clip(image.max(), a_min=1e-8, a_max=None) + return image + + +class RGBTo01Normalization(ImageNormalization): + leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False + + def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray: + assert image.min() >= 0, "RGB images are uint 8, for whatever reason I found pixel values smaller than 0. " \ + "Your images do not seem to be RGB images" + assert image.max() <= 255, "RGB images are uint 8, for whatever reason I found pixel values greater than 255" \ + ". Your images do not seem to be RGB images" + image = image.astype(self.target_dtype) + image = image / 255. + return image + diff --git a/docker/template/src/nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py b/docker/template/src/nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py new file mode 100644 index 0000000..18f027b --- /dev/null +++ b/docker/template/src/nnunetv2/preprocessing/normalization/map_channel_name_to_normalization.py @@ -0,0 +1,24 @@ +from typing import Type + +from nnunetv2.preprocessing.normalization.default_normalization_schemes import CTNormalization, NoNormalization, \ + ZScoreNormalization, RescaleTo01Normalization, RGBTo01Normalization, ImageNormalization + +channel_name_to_normalization_mapping = { + 'CT': CTNormalization, + 'noNorm': NoNormalization, + 'zscore': ZScoreNormalization, + 'rescale_to_0_1': RescaleTo01Normalization, + 'rgb_to_0_1': RGBTo01Normalization +} + + +def get_normalization_scheme(channel_name: str) -> Type[ImageNormalization]: + """ + If we find the channel_name in channel_name_to_normalization_mapping return the corresponding normalization. If it is + not found, use the default (ZScoreNormalization) + """ + norm_scheme = channel_name_to_normalization_mapping.get(channel_name) + if norm_scheme is None: + norm_scheme = ZScoreNormalization + # print('Using %s for image normalization' % norm_scheme.__name__) + return norm_scheme diff --git a/docker/template/src/nnunetv2/preprocessing/normalization/readme.md b/docker/template/src/nnunetv2/preprocessing/normalization/readme.md new file mode 100644 index 0000000..7b54396 --- /dev/null +++ b/docker/template/src/nnunetv2/preprocessing/normalization/readme.md @@ -0,0 +1,5 @@ +The channel_names entry in dataset.json only determines the normlaization scheme. So if you want to use something different +then you can just +- create a new subclass of ImageNormalization +- map your custom channel identifier to that subclass in channel_name_to_normalization_mapping +- run plan and preprocess again with your custom normlaization scheme \ No newline at end of file diff --git a/docker/template/src/nnunetv2/preprocessing/preprocessors/__init__.py b/docker/template/src/nnunetv2/preprocessing/preprocessors/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/preprocessing/preprocessors/default_preprocessor.py b/docker/template/src/nnunetv2/preprocessing/preprocessors/default_preprocessor.py new file mode 100644 index 0000000..ae71059 --- /dev/null +++ b/docker/template/src/nnunetv2/preprocessing/preprocessors/default_preprocessor.py @@ -0,0 +1,295 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +import shutil +from time import sleep +from typing import Union, Tuple + +import nnunetv2 +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw +from nnunetv2.preprocessing.cropping.cropping import crop_to_nonzero +from nnunetv2.preprocessing.resampling.default_resampling import compute_new_shape +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets +from tqdm import tqdm + + +class DefaultPreprocessor(object): + def __init__(self, verbose: bool = True): + self.verbose = verbose + """ + Everything we need is in the plans. Those are given when run() is called + """ + + def run_case_npy(self, data: np.ndarray, seg: Union[np.ndarray, None], properties: dict, + plans_manager: PlansManager, configuration_manager: ConfigurationManager, + dataset_json: Union[dict, str]): + # let's not mess up the inputs! + data = np.copy(data) + if seg is not None: + assert data.shape[1:] == seg.shape[1:], "Shape mismatch between image and segmentation. Please fix your dataset and make use of the --verify_dataset_integrity flag to ensure everything is correct" + seg = np.copy(seg) + + has_seg = seg is not None + + # apply transpose_forward, this also needs to be applied to the spacing! + data = data.transpose([0, *[i + 1 for i in plans_manager.transpose_forward]]) + if seg is not None: + seg = seg.transpose([0, *[i + 1 for i in plans_manager.transpose_forward]]) + original_spacing = [properties['spacing'][i] for i in plans_manager.transpose_forward] + + # crop, remember to store size before cropping! + shape_before_cropping = data.shape[1:] + properties['shape_before_cropping'] = shape_before_cropping + # this command will generate a segmentation. This is important because of the nonzero mask which we may need + data, seg, bbox = crop_to_nonzero(data, seg) + properties['bbox_used_for_cropping'] = bbox + # print(data.shape, seg.shape) + properties['shape_after_cropping_and_before_resampling'] = data.shape[1:] + + # resample + target_spacing = configuration_manager.spacing # this should already be transposed + + if len(target_spacing) < len(data.shape[1:]): + # target spacing for 2d has 2 entries but the data and original_spacing have three because everything is 3d + # in 2d configuration we do not change the spacing between slices + target_spacing = [original_spacing[0]] + target_spacing + new_shape = compute_new_shape(data.shape[1:], original_spacing, target_spacing) + + # normalize + # normalization MUST happen before resampling or we get huge problems with resampled nonzero masks no + # longer fitting the images perfectly! + data = self._normalize(data, seg, configuration_manager, + plans_manager.foreground_intensity_properties_per_channel) + + # print('current shape', data.shape[1:], 'current_spacing', original_spacing, + # '\ntarget shape', new_shape, 'target_spacing', target_spacing) + old_shape = data.shape[1:] + data = configuration_manager.resampling_fn_data(data, new_shape, original_spacing, target_spacing) + seg = configuration_manager.resampling_fn_seg(seg, new_shape, original_spacing, target_spacing) + if self.verbose: + print(f'old shape: {old_shape}, new_shape: {new_shape}, old_spacing: {original_spacing}, ' + f'new_spacing: {target_spacing}, fn_data: {configuration_manager.resampling_fn_data}') + + # if we have a segmentation, sample foreground locations for oversampling and add those to properties + if has_seg: + # reinstantiating LabelManager for each case is not ideal. We could replace the dataset_json argument + # with a LabelManager Instance in this function because that's all its used for. Dunno what's better. + # LabelManager is pretty light computation-wise. + label_manager = plans_manager.get_label_manager(dataset_json) + collect_for_this = label_manager.foreground_regions if label_manager.has_regions \ + else label_manager.foreground_labels + + # when using the ignore label we want to sample only from annotated regions. Therefore we also need to + # collect samples uniformly from all classes (incl background) + if label_manager.has_ignore_label: + collect_for_this.append(label_manager.all_labels) + + # no need to filter background in regions because it is already filtered in handle_labels + # print(all_labels, regions) + properties['class_locations'] = self._sample_foreground_locations(seg, collect_for_this, + verbose=self.verbose) + seg = self.modify_seg_fn(seg, plans_manager, dataset_json, configuration_manager) + if np.max(seg) > 127: + seg = seg.astype(np.int16) + else: + seg = seg.astype(np.int8) + return data, seg + + def run_case(self, image_files: List[str], seg_file: Union[str, None], plans_manager: PlansManager, + configuration_manager: ConfigurationManager, + dataset_json: Union[dict, str]): + """ + seg file can be none (test cases) + + order of operations is: transpose -> crop -> resample + so when we export we need to run the following order: resample -> crop -> transpose (we could also run + transpose at a different place, but reverting the order of operations done during preprocessing seems cleaner) + """ + if isinstance(dataset_json, str): + dataset_json = load_json(dataset_json) + + rw = plans_manager.image_reader_writer_class() + + # load image(s) + data, data_properties = rw.read_images(image_files) + + # if possible, load seg + if seg_file is not None: + seg, _ = rw.read_seg(seg_file) + else: + seg = None + + data, seg = self.run_case_npy(data, seg, data_properties, plans_manager, configuration_manager, + dataset_json) + return data, seg, data_properties + + def run_case_save(self, output_filename_truncated: str, image_files: List[str], seg_file: str, + plans_manager: PlansManager, configuration_manager: ConfigurationManager, + dataset_json: Union[dict, str]): + data, seg, properties = self.run_case(image_files, seg_file, plans_manager, configuration_manager, dataset_json) + # print('dtypes', data.dtype, seg.dtype) + np.savez_compressed(output_filename_truncated + '.npz', data=data, seg=seg) + write_pickle(properties, output_filename_truncated + '.pkl') + + @staticmethod + def _sample_foreground_locations(seg: np.ndarray, classes_or_regions: Union[List[int], List[Tuple[int, ...]]], + seed: int = 1234, verbose: bool = False): + num_samples = 10000 + min_percent_coverage = 0.01 # at least 1% of the class voxels need to be selected, otherwise it may be too + # sparse + rndst = np.random.RandomState(seed) + class_locs = {} + for c in classes_or_regions: + k = c if not isinstance(c, list) else tuple(c) + if isinstance(c, (tuple, list)): + mask = seg == c[0] + for cc in c[1:]: + mask = mask | (seg == cc) + all_locs = np.argwhere(mask) + else: + all_locs = np.argwhere(seg == c) + if len(all_locs) == 0: + class_locs[k] = [] + continue + target_num_samples = min(num_samples, len(all_locs)) + target_num_samples = max(target_num_samples, int(np.ceil(len(all_locs) * min_percent_coverage))) + + selected = all_locs[rndst.choice(len(all_locs), target_num_samples, replace=False)] + class_locs[k] = selected + if verbose: + print(c, target_num_samples) + return class_locs + + def _normalize(self, data: np.ndarray, seg: np.ndarray, configuration_manager: ConfigurationManager, + foreground_intensity_properties_per_channel: dict) -> np.ndarray: + for c in range(data.shape[0]): + scheme = configuration_manager.normalization_schemes[c] + normalizer_class = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing", "normalization"), + scheme, + 'nnunetv2.preprocessing.normalization') + if normalizer_class is None: + raise RuntimeError(f'Unable to locate class \'{scheme}\' for normalization') + normalizer = normalizer_class(use_mask_for_norm=configuration_manager.use_mask_for_norm[c], + intensityproperties=foreground_intensity_properties_per_channel[str(c)]) + data[c] = normalizer.run(data[c], seg[0]) + return data + + def run(self, dataset_name_or_id: Union[int, str], configuration_name: str, plans_identifier: str, + num_processes: int): + """ + data identifier = configuration name in plans. EZ. + """ + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + + assert isdir(join(nnUNet_raw, dataset_name)), "The requested dataset could not be found in nnUNet_raw" + + plans_file = join(nnUNet_preprocessed, dataset_name, plans_identifier + '.json') + assert isfile(plans_file), "Expected plans file (%s) not found. Run corresponding nnUNet_plan_experiment " \ + "first." % plans_file + plans = load_json(plans_file) + plans_manager = PlansManager(plans) + configuration_manager = plans_manager.get_configuration(configuration_name) + + if self.verbose: + print(f'Preprocessing the following configuration: {configuration_name}') + if self.verbose: + print(configuration_manager) + + dataset_json_file = join(nnUNet_preprocessed, dataset_name, 'dataset.json') + dataset_json = load_json(dataset_json_file) + + output_directory = join(nnUNet_preprocessed, dataset_name, configuration_manager.data_identifier) + + if isdir(output_directory): + shutil.rmtree(output_directory) + + maybe_mkdir_p(output_directory) + + dataset = get_filenames_of_train_images_and_targets(join(nnUNet_raw, dataset_name), dataset_json) + + # identifiers = [os.path.basename(i[:-len(dataset_json['file_ending'])]) for i in seg_fnames] + # output_filenames_truncated = [join(output_directory, i) for i in identifiers] + + # multiprocessing magic. + r = [] + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + for k in dataset.keys(): + r.append(p.starmap_async(self.run_case_save, + ((join(output_directory, k), dataset[k]['images'], dataset[k]['label'], + plans_manager, configuration_manager, + dataset_json),))) + remaining = list(range(len(dataset))) + # p is pretty nifti. If we kill workers they just respawn but don't do any work. + # So we need to store the original pool of workers. + workers = [j for j in p._pool] + with tqdm(desc=None, total=len(dataset), disable=self.verbose) as pbar: + while len(remaining) > 0: + all_alive = all([j.is_alive() for j in workers]) + if not all_alive: + raise RuntimeError('Some background worker is 6 feet under. Yuck. \n' + 'OK jokes aside.\n' + 'One of your background processes is missing. This could be because of ' + 'an error (look for an error message) or because it was killed ' + 'by your OS due to running out of RAM. If you don\'t see ' + 'an error message, out of RAM is likely the problem. In that case ' + 'reducing the number of workers might help') + done = [i for i in remaining if r[i].ready()] + for _ in done: + pbar.update() + remaining = [i for i in remaining if i not in done] + sleep(0.1) + + def modify_seg_fn(self, seg: np.ndarray, plans_manager: PlansManager, dataset_json: dict, + configuration_manager: ConfigurationManager) -> np.ndarray: + # this function will be called at the end of self.run_case. Can be used to change the segmentation + # after resampling. Useful for experimenting with sparse annotations: I can introduce sparsity after resampling + # and don't have to create a new dataset each time I modify my experiments + return seg + + +def example_test_case_preprocessing(): + # (paths to files may need adaptations) + plans_file = '/home/isensee/drives/gpu_data/nnUNet_preprocessed/Dataset219_AMOS2022_postChallenge_task2/nnUNetPlans.json' + dataset_json_file = '/home/isensee/drives/gpu_data/nnUNet_preprocessed/Dataset219_AMOS2022_postChallenge_task2/dataset.json' + input_images = ['/home/isensee/drives/e132-rohdaten/nnUNetv2/Dataset219_AMOS2022_postChallenge_task2/imagesTr/amos_0600_0000.nii.gz', ] # if you only have one channel, you still need a list: ['case000_0000.nii.gz'] + + configuration = '3d_fullres' + pp = DefaultPreprocessor() + + # _ because this position would be the segmentation if seg_file was not None (training case) + # even if you have the segmentation, don't put the file there! You should always evaluate in the original + # resolution. What comes out of the preprocessor might have been resampled to some other image resolution (as + # specified by plans) + plans_manager = PlansManager(plans_file) + data, _, properties = pp.run_case(input_images, seg_file=None, plans_manager=plans_manager, + configuration_manager=plans_manager.get_configuration(configuration), + dataset_json=dataset_json_file) + + # voila. Now plug data into your prediction function of choice. We of course recommend nnU-Net's default (TODO) + return data + + +if __name__ == '__main__': + example_test_case_preprocessing() + # pp = DefaultPreprocessor() + # pp.run(2, '2d', 'nnUNetPlans', 8) + + ########################################################################################################### + # how to process a test cases? This is an example: + # example_test_case_preprocessing() diff --git a/docker/template/src/nnunetv2/preprocessing/resampling/__init__.py b/docker/template/src/nnunetv2/preprocessing/resampling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/preprocessing/resampling/default_resampling.py b/docker/template/src/nnunetv2/preprocessing/resampling/default_resampling.py new file mode 100644 index 0000000..e83f614 --- /dev/null +++ b/docker/template/src/nnunetv2/preprocessing/resampling/default_resampling.py @@ -0,0 +1,216 @@ +from collections import OrderedDict +from typing import Union, Tuple, List + +import numpy as np +import pandas as pd +import torch +from batchgenerators.augmentations.utils import resize_segmentation +from scipy.ndimage.interpolation import map_coordinates +from skimage.transform import resize +from nnunetv2.configuration import ANISO_THRESHOLD + + +def get_do_separate_z(spacing: Union[Tuple[float, ...], List[float], np.ndarray], anisotropy_threshold=ANISO_THRESHOLD): + do_separate_z = (np.max(spacing) / np.min(spacing)) > anisotropy_threshold + return do_separate_z + + +def get_lowres_axis(new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]): + axis = np.where(max(new_spacing) / np.array(new_spacing) == 1)[0] # find which axis is anisotropic + return axis + + +def compute_new_shape(old_shape: Union[Tuple[int, ...], List[int], np.ndarray], + old_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray]) -> np.ndarray: + assert len(old_spacing) == len(old_shape) + assert len(old_shape) == len(new_spacing) + new_shape = np.array([int(round(i / j * k)) for i, j, k in zip(old_spacing, new_spacing, old_shape)]) + return new_shape + + +def resample_data_or_seg_to_spacing(data: np.ndarray, + current_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, + order: int = 3, order_z: int = 0, + force_separate_z: Union[bool, None] = False, + separate_z_anisotropy_threshold: float = ANISO_THRESHOLD): + if force_separate_z is not None: + do_separate_z = force_separate_z + if force_separate_z: + axis = get_lowres_axis(current_spacing) + else: + axis = None + else: + if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(current_spacing) + elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(new_spacing) + else: + do_separate_z = False + axis = None + + if axis is not None: + if len(axis) == 3: + # every axis has the same spacing, this should never happen, why is this code here? + do_separate_z = False + elif len(axis) == 2: + # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample + # separately in the out of plane axis + do_separate_z = False + else: + pass + + if data is not None: + assert data.ndim == 4, "data must be c x y z" + + shape = np.array(data[0].shape) + new_shape = compute_new_shape(shape[1:], current_spacing, new_spacing) + + data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z) + return data_reshaped + + +def resample_data_or_seg_to_shape(data: Union[torch.Tensor, np.ndarray], + new_shape: Union[Tuple[int, ...], List[int], np.ndarray], + current_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + new_spacing: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, + order: int = 3, order_z: int = 0, + force_separate_z: Union[bool, None] = False, + separate_z_anisotropy_threshold: float = ANISO_THRESHOLD): + """ + needed for segmentation export. Stupid, I know. Maybe we can fix that with Leos new resampling functions + """ + if isinstance(data, torch.Tensor): + data = data.cpu().numpy() + if force_separate_z is not None: + do_separate_z = force_separate_z + if force_separate_z: + axis = get_lowres_axis(current_spacing) + else: + axis = None + else: + if get_do_separate_z(current_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(current_spacing) + elif get_do_separate_z(new_spacing, separate_z_anisotropy_threshold): + do_separate_z = True + axis = get_lowres_axis(new_spacing) + else: + do_separate_z = False + axis = None + + if axis is not None: + if len(axis) == 3: + # every axis has the same spacing, this should never happen, why is this code here? + do_separate_z = False + elif len(axis) == 2: + # this happens for spacings like (0.24, 1.25, 1.25) for example. In that case we do not want to resample + # separately in the out of plane axis + do_separate_z = False + else: + pass + + if data is not None: + assert data.ndim == 4, "data must be c x y z" + + data_reshaped = resample_data_or_seg(data, new_shape, is_seg, axis, order, do_separate_z, order_z=order_z) + return data_reshaped + + +def resample_data_or_seg(data: np.ndarray, new_shape: Union[Tuple[float, ...], List[float], np.ndarray], + is_seg: bool = False, axis: Union[None, int] = None, order: int = 3, + do_separate_z: bool = False, order_z: int = 0): + """ + separate_z=True will resample with order 0 along z + :param data: + :param new_shape: + :param is_seg: + :param axis: + :param order: + :param do_separate_z: + :param order_z: only applies if do_separate_z is True + :return: + """ + assert data.ndim == 4, "data must be (c, x, y, z)" + assert len(new_shape) == data.ndim - 1 + + if is_seg: + resize_fn = resize_segmentation + kwargs = OrderedDict() + else: + resize_fn = resize + kwargs = {'mode': 'edge', 'anti_aliasing': False} + dtype_data = data.dtype + shape = np.array(data[0].shape) + new_shape = np.array(new_shape) + if np.any(shape != new_shape): + data = data.astype(float) + if do_separate_z: + # print("separate z, order in z is", order_z, "order inplane is", order) + assert len(axis) == 1, "only one anisotropic axis supported" + axis = axis[0] + if axis == 0: + new_shape_2d = new_shape[1:] + elif axis == 1: + new_shape_2d = new_shape[[0, 2]] + else: + new_shape_2d = new_shape[:-1] + + reshaped_final_data = [] + for c in range(data.shape[0]): + reshaped_data = [] + for slice_id in range(shape[axis]): + if axis == 0: + reshaped_data.append(resize_fn(data[c, slice_id], new_shape_2d, order, **kwargs)) + elif axis == 1: + reshaped_data.append(resize_fn(data[c, :, slice_id], new_shape_2d, order, **kwargs)) + else: + reshaped_data.append(resize_fn(data[c, :, :, slice_id], new_shape_2d, order, **kwargs)) + reshaped_data = np.stack(reshaped_data, axis) + if shape[axis] != new_shape[axis]: + + # The following few lines are blatantly copied and modified from sklearn's resize() + rows, cols, dim = new_shape[0], new_shape[1], new_shape[2] + orig_rows, orig_cols, orig_dim = reshaped_data.shape + + row_scale = float(orig_rows) / rows + col_scale = float(orig_cols) / cols + dim_scale = float(orig_dim) / dim + + map_rows, map_cols, map_dims = np.mgrid[:rows, :cols, :dim] + map_rows = row_scale * (map_rows + 0.5) - 0.5 + map_cols = col_scale * (map_cols + 0.5) - 0.5 + map_dims = dim_scale * (map_dims + 0.5) - 0.5 + + coord_map = np.array([map_rows, map_cols, map_dims]) + if not is_seg or order_z == 0: + reshaped_final_data.append(map_coordinates(reshaped_data, coord_map, order=order_z, + mode='nearest')[None]) + else: + unique_labels = np.sort(pd.unique(reshaped_data.ravel())) # np.unique(reshaped_data) + reshaped = np.zeros(new_shape, dtype=dtype_data) + + for i, cl in enumerate(unique_labels): + reshaped_multihot = np.round( + map_coordinates((reshaped_data == cl).astype(float), coord_map, order=order_z, + mode='nearest')) + reshaped[reshaped_multihot > 0.5] = cl + reshaped_final_data.append(reshaped[None]) + else: + reshaped_final_data.append(reshaped_data[None]) + reshaped_final_data = np.vstack(reshaped_final_data) + else: + # print("no separate z, order", order) + reshaped = [] + for c in range(data.shape[0]): + reshaped.append(resize_fn(data[c], new_shape, order, **kwargs)[None]) + reshaped_final_data = np.vstack(reshaped) + return reshaped_final_data.astype(dtype_data) + else: + # print("no resampling necessary") + return data diff --git a/docker/template/src/nnunetv2/preprocessing/resampling/utils.py b/docker/template/src/nnunetv2/preprocessing/resampling/utils.py new file mode 100644 index 0000000..0bff719 --- /dev/null +++ b/docker/template/src/nnunetv2/preprocessing/resampling/utils.py @@ -0,0 +1,15 @@ +from typing import Callable + +import nnunetv2 +from batchgenerators.utilities.file_and_folder_operations import join +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class + + +def recursive_find_resampling_fn_by_name(resampling_fn: str) -> Callable: + ret = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing", "resampling"), resampling_fn, + 'nnunetv2.preprocessing.resampling') + if ret is None: + raise RuntimeError("Unable to find resampling function named '%s'. Please make sure this fn is located in the " + "nnunetv2.preprocessing.resampling module." % resampling_fn) + else: + return ret diff --git a/docker/template/src/nnunetv2/run/__init__.py b/docker/template/src/nnunetv2/run/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/run/load_pretrained_weights.py b/docker/template/src/nnunetv2/run/load_pretrained_weights.py new file mode 100644 index 0000000..bb26e41 --- /dev/null +++ b/docker/template/src/nnunetv2/run/load_pretrained_weights.py @@ -0,0 +1,66 @@ +import torch +from torch._dynamo import OptimizedModule +from torch.nn.parallel import DistributedDataParallel as DDP + + +def load_pretrained_weights(network, fname, verbose=False): + """ + Transfers all weights between matching keys in state_dicts. matching is done by name and we only transfer if the + shape is also the same. Segmentation layers (the 1x1(x1) layers that produce the segmentation maps) + identified by keys ending with '.seg_layers') are not transferred! + + If the pretrained weights were obtained with a training outside nnU-Net and DDP or torch.optimize was used, + you need to change the keys of the pretrained state_dict. DDP adds a 'module.' prefix and torch.optim adds + '_orig_mod'. You DO NOT need to worry about this if pretraining was done with nnU-Net as + nnUNetTrainer.save_checkpoint takes care of that! + + """ + saved_model = torch.load(fname) + pretrained_dict = saved_model['network_weights'] + + skip_strings_in_pretrained = [ + '.seg_layers.', + ] + + if isinstance(network, DDP): + mod = network.module + else: + mod = network + if isinstance(mod, OptimizedModule): + mod = mod._orig_mod + + model_dict = mod.state_dict() + # verify that all but the segmentation layers have the same shape + for key, _ in model_dict.items(): + if all([i not in key for i in skip_strings_in_pretrained]): + assert key in pretrained_dict, \ + f"Key {key} is missing in the pretrained model weights. The pretrained weights do not seem to be " \ + f"compatible with your network." + assert model_dict[key].shape == pretrained_dict[key].shape, \ + f"The shape of the parameters of key {key} is not the same. Pretrained model: " \ + f"{pretrained_dict[key].shape}; your network: {model_dict[key]}. The pretrained model " \ + f"does not seem to be compatible with your network." + + # fun fact: in principle this allows loading from parameters that do not cover the entire network. For example pretrained + # encoders. Not supported by this function though (see assertions above) + + # commenting out this abomination of a dict comprehension for preservation in the archives of 'what not to do' + # pretrained_dict = {'module.' + k if is_ddp else k: v + # for k, v in pretrained_dict.items() + # if (('module.' + k if is_ddp else k) in model_dict) and + # all([i not in k for i in skip_strings_in_pretrained])} + + pretrained_dict = {k: v for k, v in pretrained_dict.items() + if k in model_dict.keys() and all([i not in k for i in skip_strings_in_pretrained])} + + model_dict.update(pretrained_dict) + + print("################### Loading pretrained weights from file ", fname, '###################') + if verbose: + print("Below is the list of overlapping blocks in pretrained model and nnUNet architecture:") + for key, value in pretrained_dict.items(): + print(key, 'shape', value.shape) + print("################### Done ###################") + mod.load_state_dict(model_dict) + + diff --git a/docker/template/src/nnunetv2/run/run_training.py b/docker/template/src/nnunetv2/run/run_training.py new file mode 100644 index 0000000..93dd759 --- /dev/null +++ b/docker/template/src/nnunetv2/run/run_training.py @@ -0,0 +1,274 @@ +import os +import socket +from typing import Union, Optional + +import nnunetv2 +import torch.cuda +import torch.distributed as dist +import torch.multiprocessing as mp +from batchgenerators.utilities.file_and_folder_operations import join, isfile, load_json +from nnunetv2.paths import nnUNet_preprocessed +from nnunetv2.run.load_pretrained_weights import load_pretrained_weights +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from torch.backends import cudnn + + +def find_free_network_port() -> int: + """Finds a free port on localhost. + + It is useful in single-node training when we don't want to connect to a real main node but have to set the + `MASTER_PORT` environment variable. + """ + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.bind(("", 0)) + port = s.getsockname()[1] + s.close() + return port + + +def get_trainer_from_args(dataset_name_or_id: Union[int, str], + configuration: str, + fold: int, + trainer_name: str = 'nnUNetTrainer', + plans_identifier: str = 'nnUNetPlans', + use_compressed: bool = False, + device: torch.device = torch.device('cuda')): + # load nnunet class and do sanity checks + nnunet_trainer = recursive_find_python_class(join(nnunetv2.__path__[0], "training", "nnUNetTrainer"), + trainer_name, 'nnunetv2.training.nnUNetTrainer') + if nnunet_trainer is None: + raise RuntimeError(f'Could not find requested nnunet trainer {trainer_name} in ' + f'nnunetv2.training.nnUNetTrainer (' + f'{join(nnunetv2.__path__[0], "training", "nnUNetTrainer")}). If it is located somewhere ' + f'else, please move it there.') + assert issubclass(nnunet_trainer, nnUNetTrainer), 'The requested nnunet trainer class must inherit from ' \ + 'nnUNetTrainer' + + # handle dataset input. If it's an ID we need to convert to int from string + if dataset_name_or_id.startswith('Dataset'): + pass + else: + try: + dataset_name_or_id = int(dataset_name_or_id) + except ValueError: + raise ValueError(f'dataset_name_or_id must either be an integer or a valid dataset name with the pattern ' + f'DatasetXXX_YYY where XXX are the three(!) task ID digits. Your ' + f'input: {dataset_name_or_id}') + + # initialize nnunet trainer + preprocessed_dataset_folder_base = join(nnUNet_preprocessed, maybe_convert_to_dataset_name(dataset_name_or_id)) + plans_file = join(preprocessed_dataset_folder_base, plans_identifier + '.json') + plans = load_json(plans_file) + dataset_json = load_json(join(preprocessed_dataset_folder_base, 'dataset.json')) + nnunet_trainer = nnunet_trainer(plans=plans, configuration=configuration, fold=fold, + dataset_json=dataset_json, unpack_dataset=not use_compressed, device=device) + return nnunet_trainer + + +def maybe_load_checkpoint(nnunet_trainer: nnUNetTrainer, continue_training: bool, validation_only: bool, + pretrained_weights_file: str = None): + if continue_training and pretrained_weights_file is not None: + raise RuntimeError('Cannot both continue a training AND load pretrained weights. Pretrained weights can only ' + 'be used at the beginning of the training.') + if continue_training: + expected_checkpoint_file = join(nnunet_trainer.output_folder, 'checkpoint_final.pth') + if not isfile(expected_checkpoint_file): + expected_checkpoint_file = join(nnunet_trainer.output_folder, 'checkpoint_latest.pth') + # special case where --c is used to run a previously aborted validation + if not isfile(expected_checkpoint_file): + expected_checkpoint_file = join(nnunet_trainer.output_folder, 'checkpoint_best.pth') + if not isfile(expected_checkpoint_file): + print(f"WARNING: Cannot continue training because there seems to be no checkpoint available to " + f"continue from. Starting a new training...") + expected_checkpoint_file = None + elif validation_only: + expected_checkpoint_file = join(nnunet_trainer.output_folder, 'checkpoint_final.pth') + if not isfile(expected_checkpoint_file): + raise RuntimeError(f"Cannot run validation because the training is not finished yet!") + else: + if pretrained_weights_file is not None: + if not nnunet_trainer.was_initialized: + nnunet_trainer.initialize() + load_pretrained_weights(nnunet_trainer.network, pretrained_weights_file, verbose=True) + expected_checkpoint_file = None + + if expected_checkpoint_file is not None: + nnunet_trainer.load_checkpoint(expected_checkpoint_file) + + +def setup_ddp(rank, world_size): + # initialize the process group + dist.init_process_group("nccl", rank=rank, world_size=world_size) + + +def cleanup_ddp(): + dist.destroy_process_group() + + +def run_ddp(rank, dataset_name_or_id, configuration, fold, tr, p, use_compressed, disable_checkpointing, c, val, + pretrained_weights, npz, val_with_best, world_size): + setup_ddp(rank, world_size) + torch.cuda.set_device(torch.device('cuda', dist.get_rank())) + + nnunet_trainer = get_trainer_from_args(dataset_name_or_id, configuration, fold, tr, p, + use_compressed) + + if disable_checkpointing: + nnunet_trainer.disable_checkpointing = disable_checkpointing + + assert not (c and val), f'Cannot set --c and --val flag at the same time. Dummy.' + + maybe_load_checkpoint(nnunet_trainer, c, val, pretrained_weights) + + if torch.cuda.is_available(): + cudnn.deterministic = False + cudnn.benchmark = True + + if not val: + nnunet_trainer.run_training() + + if val_with_best: + nnunet_trainer.load_checkpoint(join(nnunet_trainer.output_folder, 'checkpoint_best.pth')) + nnunet_trainer.perform_actual_validation(npz) + cleanup_ddp() + + +def run_training(dataset_name_or_id: Union[str, int], + configuration: str, fold: Union[int, str], + trainer_class_name: str = 'nnUNetTrainer', + plans_identifier: str = 'nnUNetPlans', + pretrained_weights: Optional[str] = None, + num_gpus: int = 1, + use_compressed_data: bool = False, + export_validation_probabilities: bool = False, + continue_training: bool = False, + only_run_validation: bool = False, + disable_checkpointing: bool = False, + val_with_best: bool = False, + device: torch.device = torch.device('cuda')): + if isinstance(fold, str): + if fold != 'all': + try: + fold = int(fold) + except ValueError as e: + print(f'Unable to convert given value for fold to int: {fold}. fold must bei either "all" or an integer!') + raise e + + if val_with_best: + assert not disable_checkpointing, '--val_best is not compatible with --disable_checkpointing' + + if num_gpus > 1: + assert device.type == 'cuda', f"DDP training (triggered by num_gpus > 1) is only implemented for cuda devices. Your device: {device}" + + os.environ['MASTER_ADDR'] = 'localhost' + if 'MASTER_PORT' not in os.environ.keys(): + port = str(find_free_network_port()) + print(f"using port {port}") + os.environ['MASTER_PORT'] = port # str(port) + + mp.spawn(run_ddp, + args=( + dataset_name_or_id, + configuration, + fold, + trainer_class_name, + plans_identifier, + use_compressed_data, + disable_checkpointing, + continue_training, + only_run_validation, + pretrained_weights, + export_validation_probabilities, + val_with_best, + num_gpus), + nprocs=num_gpus, + join=True) + else: + nnunet_trainer = get_trainer_from_args(dataset_name_or_id, configuration, fold, trainer_class_name, + plans_identifier, use_compressed_data, device=device) + + if disable_checkpointing: + nnunet_trainer.disable_checkpointing = disable_checkpointing + + assert not (continue_training and only_run_validation), f'Cannot set --c and --val flag at the same time. Dummy.' + + maybe_load_checkpoint(nnunet_trainer, continue_training, only_run_validation, pretrained_weights) + + if torch.cuda.is_available(): + cudnn.deterministic = False + cudnn.benchmark = True + + if not only_run_validation: + nnunet_trainer.run_training() + + if val_with_best: + nnunet_trainer.load_checkpoint(join(nnunet_trainer.output_folder, 'checkpoint_best.pth')) + nnunet_trainer.perform_actual_validation(export_validation_probabilities) + + +def run_training_entry(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('dataset_name_or_id', type=str, + help="Dataset name or ID to train with") + parser.add_argument('configuration', type=str, + help="Configuration that should be trained") + parser.add_argument('fold', type=str, + help='Fold of the 5-fold cross-validation. Should be an int between 0 and 4.') + parser.add_argument('-tr', type=str, required=False, default='nnUNetTrainer', + help='[OPTIONAL] Use this flag to specify a custom trainer. Default: nnUNetTrainer') + parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', + help='[OPTIONAL] Use this flag to specify a custom plans identifier. Default: nnUNetPlans') + parser.add_argument('-pretrained_weights', type=str, required=False, default=None, + help='[OPTIONAL] path to nnU-Net checkpoint file to be used as pretrained model. Will only ' + 'be used when actually training. Beta. Use with caution.') + parser.add_argument('-num_gpus', type=int, default=1, required=False, + help='Specify the number of GPUs to use for training') + parser.add_argument("--use_compressed", default=False, action="store_true", required=False, + help="[OPTIONAL] If you set this flag the training cases will not be decompressed. Reading compressed " + "data is much more CPU and (potentially) RAM intensive and should only be used if you " + "know what you are doing") + parser.add_argument('--npz', action='store_true', required=False, + help='[OPTIONAL] Save softmax predictions from final validation as npz files (in addition to predicted ' + 'segmentations). Needed for finding the best ensemble.') + parser.add_argument('--c', action='store_true', required=False, + help='[OPTIONAL] Continue training from latest checkpoint') + parser.add_argument('--val', action='store_true', required=False, + help='[OPTIONAL] Set this flag to only run the validation. Requires training to have finished.') + parser.add_argument('--val_best', action='store_true', required=False, + help='[OPTIONAL] If set, the validation will be performed with the checkpoint_best instead ' + 'of checkpoint_final. NOT COMPATIBLE with --disable_checkpointing! ' + 'WARNING: This will use the same \'validation\' folder as the regular validation ' + 'with no way of distinguishing the two!') + parser.add_argument('--disable_checkpointing', action='store_true', required=False, + help='[OPTIONAL] Set this flag to disable checkpointing. Ideal for testing things out and ' + 'you dont want to flood your hard drive with checkpoints.') + parser.add_argument('-device', type=str, default='cuda', required=False, + help="Use this to set the device the training should run with. Available options are 'cuda' " + "(GPU), 'cpu' (CPU) and 'mps' (Apple M1/M2). Do NOT use this to set which GPU ID! " + "Use CUDA_VISIBLE_DEVICES=X nnUNetv2_train [...] instead!") + args = parser.parse_args() + + assert args.device in ['cpu', 'cuda', 'mps'], f'-device must be either cpu, mps or cuda. Other devices are not tested/supported. Got: {args.device}.' + if args.device == 'cpu': + # let's allow torch to use hella threads + import multiprocessing + torch.set_num_threads(multiprocessing.cpu_count()) + device = torch.device('cpu') + elif args.device == 'cuda': + # multithreading in torch doesn't help nnU-Net if run on GPU + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + device = torch.device('cuda') + else: + device = torch.device('mps') + + run_training(args.dataset_name_or_id, args.configuration, args.fold, args.tr, args.p, args.pretrained_weights, + args.num_gpus, args.use_compressed, args.npz, args.c, args.val, args.disable_checkpointing, args.val_best, + device=device) + + +if __name__ == '__main__': + run_training_entry() diff --git a/docker/template/src/nnunetv2/tests/__init__.py b/docker/template/src/nnunetv2/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/tests/integration_tests/__init__.py b/docker/template/src/nnunetv2/tests/integration_tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/tests/integration_tests/add_lowres_and_cascade.py b/docker/template/src/nnunetv2/tests/integration_tests/add_lowres_and_cascade.py new file mode 100644 index 0000000..a1b4df1 --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/add_lowres_and_cascade.py @@ -0,0 +1,33 @@ +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.paths import nnUNet_preprocessed +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + +if __name__ == '__main__': + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('-d', nargs='+', type=int, help='List of dataset ids') + args = parser.parse_args() + + for d in args.d: + dataset_name = maybe_convert_to_dataset_name(d) + plans = load_json(join(nnUNet_preprocessed, dataset_name, 'nnUNetPlans.json')) + plans['configurations']['3d_lowres'] = { + "data_identifier": "nnUNetPlans_3d_lowres", # do not be a dumbo and forget this. I was a dumbo. And I paid dearly with ~10 min debugging time + 'inherits_from': '3d_fullres', + "patch_size": [20, 28, 20], + "median_image_size_in_voxels": [18.0, 25.0, 18.0], + "spacing": [2.0, 2.0, 2.0], + "n_conv_per_stage_encoder": [2, 2, 2], + "n_conv_per_stage_decoder": [2, 2], + "num_pool_per_axis": [2, 2, 2], + "pool_op_kernel_sizes": [[1, 1, 1], [2, 2, 2], [2, 2, 2]], + "conv_kernel_sizes": [[3, 3, 3], [3, 3, 3], [3, 3, 3]], + "next_stage": "3d_cascade_fullres" + } + plans['configurations']['3d_cascade_fullres'] = { + 'inherits_from': '3d_fullres', + "previous_stage": "3d_lowres" + } + save_json(plans, join(nnUNet_preprocessed, dataset_name, 'nnUNetPlans.json'), sort_keys=False) \ No newline at end of file diff --git a/docker/template/src/nnunetv2/tests/integration_tests/cleanup_integration_test.py b/docker/template/src/nnunetv2/tests/integration_tests/cleanup_integration_test.py new file mode 100644 index 0000000..c9fca95 --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/cleanup_integration_test.py @@ -0,0 +1,19 @@ +import shutil + +from batchgenerators.utilities.file_and_folder_operations import isdir, join + +from nnunetv2.paths import nnUNet_raw, nnUNet_results, nnUNet_preprocessed + +if __name__ == '__main__': + # deletes everything! + dataset_names = [ + 'Dataset996_IntegrationTest_Hippocampus_regions_ignore', + 'Dataset997_IntegrationTest_Hippocampus_regions', + 'Dataset998_IntegrationTest_Hippocampus_ignore', + 'Dataset999_IntegrationTest_Hippocampus', + ] + for fld in [nnUNet_raw, nnUNet_preprocessed, nnUNet_results]: + for d in dataset_names: + if isdir(join(fld, d)): + shutil.rmtree(join(fld, d)) + diff --git a/docker/template/src/nnunetv2/tests/integration_tests/lsf_commands.sh b/docker/template/src/nnunetv2/tests/integration_tests/lsf_commands.sh new file mode 100644 index 0000000..3888c1a --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/lsf_commands.sh @@ -0,0 +1,10 @@ +bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 996" +bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 997" +bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 998" +bsub -q gpu.legacy -gpu num=1:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test.sh 999" + + +bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 996" +bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 997" +bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 998" +bsub -q gpu.legacy -gpu num=2:j_exclusive=yes:gmem=1G -L /bin/bash ". /home/isensee/load_env_cluster4.sh && cd /home/isensee/git_repos/nnunet_remake && export nnUNet_keep_files_open=True && . nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh 999" diff --git a/docker/template/src/nnunetv2/tests/integration_tests/prepare_integration_tests.sh b/docker/template/src/nnunetv2/tests/integration_tests/prepare_integration_tests.sh new file mode 100644 index 0000000..b5dda42 --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/prepare_integration_tests.sh @@ -0,0 +1,18 @@ +# assumes you are in the nnunet repo! + +# prepare raw datasets +python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset999_IntegrationTest_Hippocampus.py +python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset998_IntegrationTest_Hippocampus_ignore.py +python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset997_IntegrationTest_Hippocampus_regions.py +python nnunetv2/dataset_conversion/datasets_for_integration_tests/Dataset996_IntegrationTest_Hippocampus_regions_ignore.py + +# now run experiment planning without preprocessing +nnUNetv2_plan_and_preprocess -d 996 997 998 999 --no_pp + +# now add 3d lowres and cascade +python nnunetv2/tests/integration_tests/add_lowres_and_cascade.py -d 996 997 998 999 + +# now preprocess everything +nnUNetv2_preprocess -d 996 997 998 999 -c 2d 3d_lowres 3d_fullres -np 8 8 8 # no need to preprocess cascade as its the same data as 3d_fullres + +# done \ No newline at end of file diff --git a/docker/template/src/nnunetv2/tests/integration_tests/readme.md b/docker/template/src/nnunetv2/tests/integration_tests/readme.md new file mode 100644 index 0000000..2a44f13 --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/readme.md @@ -0,0 +1,58 @@ +# Preface + +I am just a mortal with many tasks and limited time. Aint nobody got time for unittests. + +HOWEVER, at least some integration tests should be performed testing nnU-Net from start to finish. + +# Introduction - What the heck is happening? +This test covers all possible labeling scenarios (standard labels, regions, ignore labels and regions with +ignore labels). It runs the entire nnU-Net pipeline from start to finish: + +- fingerprint extraction +- experiment planning +- preprocessing +- train all 4 configurations (2d, 3d_lowres, 3d_fullres, 3d_cascade_fullres) as 5-fold CV +- automatically find the best model or ensemble +- determine the postprocessing used for this +- predict some test set +- apply postprocessing to the test set + +To speed things up, we do the following: +- pick Dataset004_Hippocampus because it is quadratisch praktisch gut. MNIST of medical image segmentation +- by default this dataset does not have 3d_lowres or cascade. We just manually add them (cool new feature, eh?). See `add_lowres_and_cascade.py` to learn more! +- we use nnUNetTrainer_5epochs for a short training + +# How to run it? + +Set your pwd to be the nnunet repo folder (the one where the `nnunetv2` folder and the `setup.py` are located!) + +Now generate the 4 dummy datasets (ids 996, 997, 998, 999) from dataset 4. This will crash if you don't have Dataset004! +```commandline +bash nnunetv2/tests/integration_tests/prepare_integration_tests.sh +``` + +Now you can run the integration test for each of the datasets: +```commandline +bash nnunetv2/tests/integration_tests/run_integration_test.sh DATSET_ID +``` +use DATSET_ID 996, 997, 998 and 999. You can run these independently on different GPUs/systems to speed things up. +This will take i dunno like 10-30 Minutes!? + +Also run +```commandline +bash nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh DATSET_ID +``` +to verify DDP is working (needs 2 GPUs!) + +# How to check if the test was successful? +If I was not as lazy as I am I would have programmed some automatism that checks if Dice scores etc are in an acceptable range. +So you need to do the following: +1) check that none of your runs crashed (duh) +2) for each run, navigate to `nnUNet_results/DATASET_NAME` and take a look at the `inference_information.json` file. +Does it make sense? If so: NICE! + +Once the integration test is completed you can delete all the temporary files associated with it by running: + +```commandline +python nnunetv2/tests/integration_tests/cleanup_integration_test.py +``` \ No newline at end of file diff --git a/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test.sh b/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test.sh new file mode 100644 index 0000000..ff0426c --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test.sh @@ -0,0 +1,27 @@ + + +nnUNetv2_train $1 3d_fullres 0 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_fullres 1 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_fullres 2 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_fullres 3 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_fullres 4 -tr nnUNetTrainer_5epochs --npz + +nnUNetv2_train $1 2d 0 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 2d 1 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 2d 2 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 2d 3 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 2d 4 -tr nnUNetTrainer_5epochs --npz + +nnUNetv2_train $1 3d_lowres 0 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_lowres 1 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_lowres 2 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_lowres 3 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_lowres 4 -tr nnUNetTrainer_5epochs --npz + +nnUNetv2_train $1 3d_cascade_fullres 0 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_cascade_fullres 1 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_cascade_fullres 2 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_cascade_fullres 3 -tr nnUNetTrainer_5epochs --npz +nnUNetv2_train $1 3d_cascade_fullres 4 -tr nnUNetTrainer_5epochs --npz + +python nnunetv2/tests/integration_tests/run_integration_test_bestconfig_inference.py -d $1 \ No newline at end of file diff --git a/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_bestconfig_inference.py b/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_bestconfig_inference.py new file mode 100644 index 0000000..89e783e --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_bestconfig_inference.py @@ -0,0 +1,75 @@ +import argparse + +import torch +from batchgenerators.utilities.file_and_folder_operations import join, load_pickle + +from nnunetv2.ensembling.ensemble import ensemble_folders +from nnunetv2.evaluation.find_best_configuration import find_best_configuration, \ + dumb_trainer_config_plans_to_trained_models_dict +from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor +from nnunetv2.paths import nnUNet_raw, nnUNet_results +from nnunetv2.postprocessing.remove_connected_components import apply_postprocessing_to_folder +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.file_path_utilities import get_output_folder + + +if __name__ == '__main__': + """ + Predicts the imagesTs folder with the best configuration and applies postprocessing + """ + torch.set_num_threads(1) + torch.set_num_interop_threads(1) + + parser = argparse.ArgumentParser() + parser.add_argument('-d', type=int, help='dataset id') + args = parser.parse_args() + d = args.d + + dataset_name = maybe_convert_to_dataset_name(d) + source_dir = join(nnUNet_raw, dataset_name, 'imagesTs') + target_dir_base = join(nnUNet_results, dataset_name) + + models = dumb_trainer_config_plans_to_trained_models_dict(['nnUNetTrainer_5epochs'], + ['2d', + '3d_lowres', + '3d_cascade_fullres', + '3d_fullres'], + ['nnUNetPlans']) + ret = find_best_configuration(d, models, allow_ensembling=True, num_processes=8, overwrite=True, + folds=(0, 1, 2, 3, 4), strict=True) + + has_ensemble = len(ret['best_model_or_ensemble']['selected_model_or_models']) > 1 + + # we don't use all folds to speed stuff up + used_folds = (0, 3) + output_folders = [] + for im in ret['best_model_or_ensemble']['selected_model_or_models']: + output_dir = join(target_dir_base, f"pred_{im['configuration']}") + model_folder = get_output_folder(d, im['trainer'], im['plans_identifier'], im['configuration']) + # note that if the best model is the enseble of 3d_lowres and 3d cascade then 3d_lowres will be predicted + # twice (once standalone and once to generate the predictions for the cascade) because we don't reuse the + # prediction here. Proper way would be to check for that and + # then give the output of 3d_lowres inference to the folder_with_segs_from_prev_stage kwarg in + # predict_from_raw_data. Since we allow for + # dynamically setting 'previous_stage' in the plans I am too lazy to implement this here. This is just an + # integration test after all. Take a closer look at how this in handled in predict_from_raw_data + predictor = nnUNetPredictor(verbose=False, allow_tqdm=False) + predictor.initialize_from_trained_model_folder(model_folder, used_folds) + predictor.predict_from_files(source_dir, output_dir, has_ensemble, overwrite=True) + # predict_from_raw_data(list_of_lists_or_source_folder=source_dir, output_folder=output_dir, + # model_training_output_dir=model_folder, use_folds=used_folds, + # save_probabilities=has_ensemble, verbose=False, overwrite=True) + output_folders.append(output_dir) + + # if we have an ensemble, we need to ensemble the results + if has_ensemble: + ensemble_folders(output_folders, join(target_dir_base, 'ensemble_predictions'), save_merged_probabilities=False) + folder_for_pp = join(target_dir_base, 'ensemble_predictions') + else: + folder_for_pp = output_folders[0] + + # apply postprocessing + pp_fns, pp_fn_kwargs = load_pickle(ret['best_model_or_ensemble']['postprocessing_file']) + apply_postprocessing_to_folder(folder_for_pp, join(target_dir_base, 'ensemble_predictions_postprocessed'), + pp_fns, + pp_fn_kwargs, plans_file_or_dict=ret['best_model_or_ensemble']['some_plans_file']) diff --git a/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh b/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh new file mode 100644 index 0000000..5199247 --- /dev/null +++ b/docker/template/src/nnunetv2/tests/integration_tests/run_integration_test_trainingOnly_DDP.sh @@ -0,0 +1 @@ +nnUNetv2_train $1 3d_fullres 0 -tr nnUNetTrainer_10epochs -num_gpus 2 diff --git a/docker/template/src/nnunetv2/training/__init__.py b/docker/template/src/nnunetv2/training/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/data_augmentation/__init__.py b/docker/template/src/nnunetv2/training/data_augmentation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/data_augmentation/compute_initial_patch_size.py b/docker/template/src/nnunetv2/training/data_augmentation/compute_initial_patch_size.py new file mode 100644 index 0000000..a772bc2 --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/compute_initial_patch_size.py @@ -0,0 +1,24 @@ +import numpy as np + + +def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range): + if isinstance(rot_x, (tuple, list)): + rot_x = max(np.abs(rot_x)) + if isinstance(rot_y, (tuple, list)): + rot_y = max(np.abs(rot_y)) + if isinstance(rot_z, (tuple, list)): + rot_z = max(np.abs(rot_z)) + rot_x = min(90 / 360 * 2. * np.pi, rot_x) + rot_y = min(90 / 360 * 2. * np.pi, rot_y) + rot_z = min(90 / 360 * 2. * np.pi, rot_z) + from batchgenerators.augmentations.utils import rotate_coords_3d, rotate_coords_2d + coords = np.array(final_patch_size) + final_shape = np.copy(coords) + if len(coords) == 3: + final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, rot_x, 0, 0)), final_shape)), 0) + final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, rot_y, 0)), final_shape)), 0) + final_shape = np.max(np.vstack((np.abs(rotate_coords_3d(coords, 0, 0, rot_z)), final_shape)), 0) + elif len(coords) == 2: + final_shape = np.max(np.vstack((np.abs(rotate_coords_2d(coords, rot_x)), final_shape)), 0) + final_shape /= min(scale_range) + return final_shape.astype(int) diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/__init__.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/cascade_transforms.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/cascade_transforms.py new file mode 100644 index 0000000..378bab2 --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/cascade_transforms.py @@ -0,0 +1,136 @@ +from typing import Union, List, Tuple, Callable + +import numpy as np +from acvl_utils.morphology.morphology_helper import label_with_component_sizes +from batchgenerators.transforms.abstract_transforms import AbstractTransform +from skimage.morphology import ball +from skimage.morphology.binary import binary_erosion, binary_dilation, binary_closing, binary_opening + + +class MoveSegAsOneHotToData(AbstractTransform): + def __init__(self, index_in_origin: int, all_labels: Union[Tuple[int, ...], List[int]], + key_origin="seg", key_target="data", remove_from_origin=True): + """ + Takes data_dict[seg][:, index_in_origin], converts it to one hot encoding and appends it to + data_dict[key_target]. Optionally removes index_in_origin from data_dict[seg]. + """ + self.remove_from_origin = remove_from_origin + self.all_labels = all_labels + self.key_target = key_target + self.key_origin = key_origin + self.index_in_origin = index_in_origin + + def __call__(self, **data_dict): + seg = data_dict[self.key_origin][:, self.index_in_origin:self.index_in_origin+1] + + seg_onehot = np.zeros((seg.shape[0], len(self.all_labels), *seg.shape[2:]), + dtype=data_dict[self.key_target].dtype) + for i, l in enumerate(self.all_labels): + seg_onehot[:, i][seg[:, 0] == l] = 1 + + data_dict[self.key_target] = np.concatenate((data_dict[self.key_target], seg_onehot), 1) + + if self.remove_from_origin: + remaining_channels = [i for i in range(data_dict[self.key_origin].shape[1]) if i != self.index_in_origin] + data_dict[self.key_origin] = data_dict[self.key_origin][:, remaining_channels] + + return data_dict + + +class RemoveRandomConnectedComponentFromOneHotEncodingTransform(AbstractTransform): + def __init__(self, channel_idx: Union[int, List[int]], key: str = "data", p_per_sample: float = 0.2, + fill_with_other_class_p: float = 0.25, + dont_do_if_covers_more_than_x_percent: float = 0.25, p_per_label: float = 1): + """ + Randomly removes connected components in the specified channel_idx of data_dict[key]. Only considers components + smaller than dont_do_if_covers_more_than_X_percent of the sample. Also has the option of simulating + misclassification as another class (fill_with_other_class_p) + """ + self.p_per_label = p_per_label + self.dont_do_if_covers_more_than_x_percent = dont_do_if_covers_more_than_x_percent + self.fill_with_other_class_p = fill_with_other_class_p + self.p_per_sample = p_per_sample + self.key = key + if not isinstance(channel_idx, (list, tuple)): + channel_idx = [channel_idx] + self.channel_idx = channel_idx + + def __call__(self, **data_dict): + data = data_dict.get(self.key) + for b in range(data.shape[0]): + if np.random.uniform() < self.p_per_sample: + for c in self.channel_idx: + if np.random.uniform() < self.p_per_label: + # print(np.unique(data[b, c])) ## should be [0, 1] + workon = data[b, c].astype(bool) + if not np.any(workon): + continue + num_voxels = np.prod(workon.shape, dtype=np.uint64) + lab, component_sizes = label_with_component_sizes(workon.astype(bool)) + if len(component_sizes) > 0: + valid_component_ids = [i for i, j in component_sizes.items() if j < + num_voxels*self.dont_do_if_covers_more_than_x_percent] + # print('RemoveRandomConnectedComponentFromOneHotEncodingTransform', c, + # np.unique(data[b, c]), len(component_sizes), valid_component_ids, + # len(valid_component_ids)) + if len(valid_component_ids) > 0: + random_component = np.random.choice(valid_component_ids) + data[b, c][lab == random_component] = 0 + if np.random.uniform() < self.fill_with_other_class_p: + other_ch = [i for i in self.channel_idx if i != c] + if len(other_ch) > 0: + other_class = np.random.choice(other_ch) + data[b, other_class][lab == random_component] = 1 + data_dict[self.key] = data + return data_dict + + +class ApplyRandomBinaryOperatorTransform(AbstractTransform): + def __init__(self, + channel_idx: Union[int, List[int], Tuple[int, ...]], + p_per_sample: float = 0.3, + any_of_these: Tuple[Callable] = (binary_dilation, binary_erosion, binary_closing, binary_opening), + key: str = "data", + strel_size: Tuple[int, int] = (1, 10), + p_per_label: float = 1): + """ + Applies random binary operations (specified by any_of_these) with random ball size (radius is uniformly sampled + from interval strel_size) to specified channels. Expects the channel_idx to correspond to a hone hot encoded + segmentation (see for example MoveSegAsOneHotToData) + """ + self.p_per_label = p_per_label + self.strel_size = strel_size + self.key = key + self.any_of_these = any_of_these + self.p_per_sample = p_per_sample + + if not isinstance(channel_idx, (list, tuple)): + channel_idx = [channel_idx] + self.channel_idx = channel_idx + + def __call__(self, **data_dict): + for b in range(data_dict[self.key].shape[0]): + if np.random.uniform() < self.p_per_sample: + # this needs to be applied in random order to the channels + np.random.shuffle(self.channel_idx) + for c in self.channel_idx: + if np.random.uniform() < self.p_per_label: + operation = np.random.choice(self.any_of_these) + selem = ball(np.random.uniform(*self.strel_size)) + workon = data_dict[self.key][b, c].astype(bool) + if not np.any(workon): + continue + # print(np.unique(workon)) + res = operation(workon, selem).astype(data_dict[self.key].dtype) + # print('ApplyRandomBinaryOperatorTransform', c, operation, np.sum(workon), np.sum(res)) + data_dict[self.key][b, c] = res + + # if class was added, we need to remove it in ALL other channels to keep one hot encoding + # properties + other_ch = [i for i in self.channel_idx if i != c] + if len(other_ch) > 0: + was_added_mask = (res - workon) > 0 + for oc in other_ch: + data_dict[self.key][b, oc][was_added_mask] = 0 + # if class was removed, leave it at background + return data_dict diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/deep_supervision_donwsampling.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/deep_supervision_donwsampling.py new file mode 100644 index 0000000..d31881f --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/deep_supervision_donwsampling.py @@ -0,0 +1,55 @@ +from typing import Tuple, Union, List + +from batchgenerators.augmentations.utils import resize_segmentation +from batchgenerators.transforms.abstract_transforms import AbstractTransform +import numpy as np + + +class DownsampleSegForDSTransform2(AbstractTransform): + ''' + data_dict['output_key'] will be a list of segmentations scaled according to ds_scales + ''' + def __init__(self, ds_scales: Union[List, Tuple], + order: int = 0, input_key: str = "seg", + output_key: str = "seg", axes: Tuple[int] = None): + """ + Downscales data_dict[input_key] according to ds_scales. Each entry in ds_scales specified one deep supervision + output and its resolution relative to the original data, for example 0.25 specifies 1/4 of the original shape. + ds_scales can also be a tuple of tuples, for example ((1, 1, 1), (0.5, 0.5, 0.5)) to specify the downsampling + for each axis independently + """ + self.axes = axes + self.output_key = output_key + self.input_key = input_key + self.order = order + self.ds_scales = ds_scales + + def __call__(self, **data_dict): + if self.axes is None: + axes = list(range(2, data_dict[self.input_key].ndim)) + else: + axes = self.axes + + output = [] + for s in self.ds_scales: + if not isinstance(s, (tuple, list)): + s = [s] * len(axes) + else: + assert len(s) == len(axes), f'If ds_scales is a tuple for each resolution (one downsampling factor ' \ + f'for each axis) then the number of entried in that tuple (here ' \ + f'{len(s)}) must be the same as the number of axes (here {len(axes)}).' + + if all([i == 1 for i in s]): + output.append(data_dict[self.input_key]) + else: + new_shape = np.array(data_dict[self.input_key].shape).astype(float) + for i, a in enumerate(axes): + new_shape[a] *= s[i] + new_shape = np.round(new_shape).astype(int) + out_seg = np.zeros(new_shape, dtype=data_dict[self.input_key].dtype) + for b in range(data_dict[self.input_key].shape[0]): + for c in range(data_dict[self.input_key].shape[1]): + out_seg[b, c] = resize_segmentation(data_dict[self.input_key][b, c], new_shape[2:], self.order) + output.append(out_seg) + data_dict[self.output_key] = output + return data_dict diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/limited_length_multithreaded_augmenter.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/limited_length_multithreaded_augmenter.py new file mode 100644 index 0000000..dd8368c --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/limited_length_multithreaded_augmenter.py @@ -0,0 +1,10 @@ +from batchgenerators.dataloading.nondet_multi_threaded_augmenter import NonDetMultiThreadedAugmenter + + +class LimitedLenWrapper(NonDetMultiThreadedAugmenter): + def __init__(self, my_imaginary_length, *args, **kwargs): + super().__init__(*args, **kwargs) + self.len = my_imaginary_length + + def __len__(self): + return self.len diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/manipulating_data_dict.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/manipulating_data_dict.py new file mode 100644 index 0000000..587acd7 --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/manipulating_data_dict.py @@ -0,0 +1,10 @@ +from batchgenerators.transforms.abstract_transforms import AbstractTransform + + +class RemoveKeyTransform(AbstractTransform): + def __init__(self, key_to_remove: str): + self.key_to_remove = key_to_remove + + def __call__(self, **data_dict): + _ = data_dict.pop(self.key_to_remove, None) + return data_dict diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/masking.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/masking.py new file mode 100644 index 0000000..b009993 --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/masking.py @@ -0,0 +1,22 @@ +from typing import List + +from batchgenerators.transforms.abstract_transforms import AbstractTransform + + +class MaskTransform(AbstractTransform): + def __init__(self, apply_to_channels: List[int], mask_idx_in_seg: int = 0, set_outside_to: int = 0, + data_key: str = "data", seg_key: str = "seg"): + """ + Sets everything outside the mask to 0. CAREFUL! outside is defined as < 0, not =0 (in the Mask)!!! + """ + self.apply_to_channels = apply_to_channels + self.seg_key = seg_key + self.data_key = data_key + self.set_outside_to = set_outside_to + self.mask_idx_in_seg = mask_idx_in_seg + + def __call__(self, **data_dict): + mask = data_dict[self.seg_key][:, self.mask_idx_in_seg] < 0 + for c in self.apply_to_channels: + data_dict[self.data_key][:, c][mask] = self.set_outside_to + return data_dict diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/region_based_training.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/region_based_training.py new file mode 100644 index 0000000..52d2fc0 --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/region_based_training.py @@ -0,0 +1,38 @@ +from typing import List, Tuple, Union + +from batchgenerators.transforms.abstract_transforms import AbstractTransform +import numpy as np + + +class ConvertSegmentationToRegionsTransform(AbstractTransform): + def __init__(self, regions: Union[List, Tuple], + seg_key: str = "seg", output_key: str = "seg", seg_channel: int = 0): + """ + regions are tuple of tuples where each inner tuple holds the class indices that are merged into one region, + example: + regions= ((1, 2), (2, )) will result in 2 regions: one covering the region of labels 1&2 and the other just 2 + :param regions: + :param seg_key: + :param output_key: + """ + self.seg_channel = seg_channel + self.output_key = output_key + self.seg_key = seg_key + self.regions = regions + + def __call__(self, **data_dict): + seg = data_dict.get(self.seg_key) + num_regions = len(self.regions) + if seg is not None: + seg_shp = seg.shape + output_shape = list(seg_shp) + output_shape[1] = num_regions + region_output = np.zeros(output_shape, dtype=seg.dtype) + for b in range(seg_shp[0]): + for region_id, region_source_labels in enumerate(self.regions): + if not isinstance(region_source_labels, (list, tuple)): + region_source_labels = (region_source_labels, ) + for label_value in region_source_labels: + region_output[b, region_id][seg[b, self.seg_channel] == label_value] = 1 + data_dict[self.output_key] = region_output + return data_dict diff --git a/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/transforms_for_dummy_2d.py b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/transforms_for_dummy_2d.py new file mode 100644 index 0000000..340fce7 --- /dev/null +++ b/docker/template/src/nnunetv2/training/data_augmentation/custom_transforms/transforms_for_dummy_2d.py @@ -0,0 +1,45 @@ +from typing import Tuple, Union, List + +from batchgenerators.transforms.abstract_transforms import AbstractTransform + + +class Convert3DTo2DTransform(AbstractTransform): + def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('data', 'seg')): + """ + Transforms a 5D array (b, c, x, y, z) to a 4D array (b, c * x, y, z) by overloading the color channel + """ + self.apply_to_keys = apply_to_keys + + def __call__(self, **data_dict): + for k in self.apply_to_keys: + shp = data_dict[k].shape + assert len(shp) == 5, 'This transform only works on 3D data, so expects 5D tensor (b, c, x, y, z) as input.' + data_dict[k] = data_dict[k].reshape((shp[0], shp[1] * shp[2], shp[3], shp[4])) + shape_key = f'orig_shape_{k}' + assert shape_key not in data_dict.keys(), f'Convert3DTo2DTransform needs to store the original shape. ' \ + f'It does that using the {shape_key} key. That key is ' \ + f'already taken. Bummer.' + data_dict[shape_key] = shp + return data_dict + + +class Convert2DTo3DTransform(AbstractTransform): + def __init__(self, apply_to_keys: Union[List[str], Tuple[str]] = ('data', 'seg')): + """ + Reverts Convert3DTo2DTransform by transforming a 4D array (b, c * x, y, z) back to 5D (b, c, x, y, z) + """ + self.apply_to_keys = apply_to_keys + + def __call__(self, **data_dict): + for k in self.apply_to_keys: + shape_key = f'orig_shape_{k}' + assert shape_key in data_dict.keys(), f'Did not find key {shape_key} in data_dict. Shitty. ' \ + f'Convert2DTo3DTransform only works in tandem with ' \ + f'Convert3DTo2DTransform and you probably forgot to add ' \ + f'Convert3DTo2DTransform to your pipeline. (Convert3DTo2DTransform ' \ + f'is where the missing key is generated)' + original_shape = data_dict[shape_key] + current_shape = data_dict[k].shape + data_dict[k] = data_dict[k].reshape((original_shape[0], original_shape[1], original_shape[2], + current_shape[-2], current_shape[-1])) + return data_dict diff --git a/docker/template/src/nnunetv2/training/dataloading/__init__.py b/docker/template/src/nnunetv2/training/dataloading/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/dataloading/base_data_loader.py b/docker/template/src/nnunetv2/training/dataloading/base_data_loader.py new file mode 100644 index 0000000..6a6a49f --- /dev/null +++ b/docker/template/src/nnunetv2/training/dataloading/base_data_loader.py @@ -0,0 +1,139 @@ +from typing import Union, Tuple + +from batchgenerators.dataloading.data_loader import DataLoader +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * +from nnunetv2.training.dataloading.nnunet_dataset import nnUNetDataset +from nnunetv2.utilities.label_handling.label_handling import LabelManager + + +class nnUNetDataLoaderBase(DataLoader): + def __init__(self, + data: nnUNetDataset, + batch_size: int, + patch_size: Union[List[int], Tuple[int, ...], np.ndarray], + final_patch_size: Union[List[int], Tuple[int, ...], np.ndarray], + label_manager: LabelManager, + oversample_foreground_percent: float = 0.0, + sampling_probabilities: Union[List[int], Tuple[int, ...], np.ndarray] = None, + pad_sides: Union[List[int], Tuple[int, ...], np.ndarray] = None, + probabilistic_oversampling: bool = False): + super().__init__(data, batch_size, 1, None, True, False, True, sampling_probabilities) + assert isinstance(data, nnUNetDataset), 'nnUNetDataLoaderBase only supports dictionaries as data' + self.indices = list(data.keys()) + + self.oversample_foreground_percent = oversample_foreground_percent + self.final_patch_size = final_patch_size + self.patch_size = patch_size + self.list_of_keys = list(self._data.keys()) + # need_to_pad denotes by how much we need to pad the data so that if we sample a patch of size final_patch_size + # (which is what the network will get) these patches will also cover the border of the images + self.need_to_pad = (np.array(patch_size) - np.array(final_patch_size)).astype(int) + if pad_sides is not None: + if not isinstance(pad_sides, np.ndarray): + pad_sides = np.array(pad_sides) + self.need_to_pad += pad_sides + self.num_channels = None + self.pad_sides = pad_sides + self.data_shape, self.seg_shape = self.determine_shapes() + self.sampling_probabilities = sampling_probabilities + self.annotated_classes_key = tuple(label_manager.all_labels) + self.has_ignore = label_manager.has_ignore_label + self.get_do_oversample = self._oversample_last_XX_percent if not probabilistic_oversampling \ + else self._probabilistic_oversampling + + def _oversample_last_XX_percent(self, sample_idx: int) -> bool: + """ + determines whether sample sample_idx in a minibatch needs to be guaranteed foreground + """ + return not sample_idx < round(self.batch_size * (1 - self.oversample_foreground_percent)) + + def _probabilistic_oversampling(self, sample_idx: int) -> bool: + # print('YEAH BOIIIIII') + return np.random.uniform() < self.oversample_foreground_percent + + def determine_shapes(self): + # load one case + data, seg, properties = self._data.load_case(self.indices[0]) + num_color_channels = data.shape[0] + + data_shape = (self.batch_size, num_color_channels, *self.patch_size) + seg_shape = (self.batch_size, seg.shape[0], *self.patch_size) + return data_shape, seg_shape + + def get_bbox(self, data_shape: np.ndarray, force_fg: bool, class_locations: Union[dict, None], + overwrite_class: Union[int, Tuple[int, ...]] = None, verbose: bool = False): + # in dataloader 2d we need to select the slice prior to this and also modify the class_locations to only have + # locations for the given slice + need_to_pad = self.need_to_pad.copy() + dim = len(data_shape) + + for d in range(dim): + # if case_all_data.shape + need_to_pad is still < patch size we need to pad more! We pad on both sides + # always + if need_to_pad[d] + data_shape[d] < self.patch_size[d]: + need_to_pad[d] = self.patch_size[d] - data_shape[d] + + # we can now choose the bbox from -need_to_pad // 2 to shape - patch_size + need_to_pad // 2. Here we + # define what the upper and lower bound can be to then sample form them with np.random.randint + lbs = [- need_to_pad[i] // 2 for i in range(dim)] + ubs = [data_shape[i] + need_to_pad[i] // 2 + need_to_pad[i] % 2 - self.patch_size[i] for i in range(dim)] + + # if not force_fg then we can just sample the bbox randomly from lb and ub. Else we need to make sure we get + # at least one of the foreground classes in the patch + if not force_fg and not self.has_ignore: + bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)] + # print('I want a random location') + else: + if not force_fg and self.has_ignore: + selected_class = self.annotated_classes_key + if len(class_locations[selected_class]) == 0: + # no annotated pixels in this case. Not good. But we can hardly skip it here + print('Warning! No annotated pixels in image!') + selected_class = None + # print(f'I have ignore labels and want to pick a labeled area. annotated_classes_key: {self.annotated_classes_key}') + elif force_fg: + assert class_locations is not None, 'if force_fg is set class_locations cannot be None' + if overwrite_class is not None: + assert overwrite_class in class_locations.keys(), 'desired class ("overwrite_class") does not ' \ + 'have class_locations (missing key)' + # this saves us a np.unique. Preprocessing already did that for all cases. Neat. + # class_locations keys can also be tuple + eligible_classes_or_regions = [i for i in class_locations.keys() if len(class_locations[i]) > 0] + + # if we have annotated_classes_key locations and other classes are present, remove the annotated_classes_key from the list + # strange formulation needed to circumvent + # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + tmp = [i == self.annotated_classes_key if isinstance(i, tuple) else False for i in eligible_classes_or_regions] + if any(tmp): + if len(eligible_classes_or_regions) > 1: + eligible_classes_or_regions.pop(np.where(tmp)[0][0]) + + if len(eligible_classes_or_regions) == 0: + # this only happens if some image does not contain foreground voxels at all + selected_class = None + if verbose: + print('case does not contain any foreground classes') + else: + # I hate myself. Future me aint gonna be happy to read this + # 2022_11_25: had to read it today. Wasn't too bad + selected_class = eligible_classes_or_regions[np.random.choice(len(eligible_classes_or_regions))] if \ + (overwrite_class is None or (overwrite_class not in eligible_classes_or_regions)) else overwrite_class + # print(f'I want to have foreground, selected class: {selected_class}') + else: + raise RuntimeError('lol what!?') + voxels_of_that_class = class_locations[selected_class] if selected_class is not None else None + + if voxels_of_that_class is not None and len(voxels_of_that_class) > 0: + selected_voxel = voxels_of_that_class[np.random.choice(len(voxels_of_that_class))] + # selected voxel is center voxel. Subtract half the patch size to get lower bbox voxel. + # Make sure it is within the bounds of lb and ub + # i + 1 because we have first dimension 0! + bbox_lbs = [max(lbs[i], selected_voxel[i + 1] - self.patch_size[i] // 2) for i in range(dim)] + else: + # If the image does not contain any foreground classes, we fall back to random cropping + bbox_lbs = [np.random.randint(lbs[i], ubs[i] + 1) for i in range(dim)] + + bbox_ubs = [bbox_lbs[i] + self.patch_size[i] for i in range(dim)] + + return bbox_lbs, bbox_ubs diff --git a/docker/template/src/nnunetv2/training/dataloading/data_loader_2d.py b/docker/template/src/nnunetv2/training/dataloading/data_loader_2d.py new file mode 100644 index 0000000..aab8438 --- /dev/null +++ b/docker/template/src/nnunetv2/training/dataloading/data_loader_2d.py @@ -0,0 +1,94 @@ +import numpy as np +from nnunetv2.training.dataloading.base_data_loader import nnUNetDataLoaderBase +from nnunetv2.training.dataloading.nnunet_dataset import nnUNetDataset + + +class nnUNetDataLoader2D(nnUNetDataLoaderBase): + def generate_train_batch(self): + selected_keys = self.get_indices() + # preallocate memory for data and seg + data_all = np.zeros(self.data_shape, dtype=np.float32) + seg_all = np.zeros(self.seg_shape, dtype=np.int16) + case_properties = [] + + for j, current_key in enumerate(selected_keys): + # oversampling foreground will improve stability of model training, especially if many patches are empty + # (Lung for example) + force_fg = self.get_do_oversample(j) + data, seg, properties = self._data.load_case(current_key) + case_properties.append(properties) + + # select a class/region first, then a slice where this class is present, then crop to that area + if not force_fg: + if self.has_ignore: + selected_class_or_region = self.annotated_classes_key + else: + selected_class_or_region = None + else: + # filter out all classes that are not present here + eligible_classes_or_regions = [i for i in properties['class_locations'].keys() if len(properties['class_locations'][i]) > 0] + + # if we have annotated_classes_key locations and other classes are present, remove the annotated_classes_key from the list + # strange formulation needed to circumvent + # ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all() + tmp = [i == self.annotated_classes_key if isinstance(i, tuple) else False for i in eligible_classes_or_regions] + if any(tmp): + if len(eligible_classes_or_regions) > 1: + eligible_classes_or_regions.pop(np.where(tmp)[0][0]) + + selected_class_or_region = eligible_classes_or_regions[np.random.choice(len(eligible_classes_or_regions))] if \ + len(eligible_classes_or_regions) > 0 else None + if selected_class_or_region is not None: + selected_slice = np.random.choice(properties['class_locations'][selected_class_or_region][:, 1]) + else: + selected_slice = np.random.choice(len(data[0])) + + data = data[:, selected_slice] + seg = seg[:, selected_slice] + + # the line of death lol + # this needs to be a separate variable because we could otherwise permanently overwrite + # properties['class_locations'] + # selected_class_or_region is: + # - None if we do not have an ignore label and force_fg is False OR if force_fg is True but there is no foreground in the image + # - A tuple of all (non-ignore) labels if there is an ignore label and force_fg is False + # - a class or region if force_fg is True + class_locations = { + selected_class_or_region: properties['class_locations'][selected_class_or_region][properties['class_locations'][selected_class_or_region][:, 1] == selected_slice][:, (0, 2, 3)] + } if (selected_class_or_region is not None) else None + + # print(properties) + shape = data.shape[1:] + dim = len(shape) + bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg if selected_class_or_region is not None else None, + class_locations, overwrite_class=selected_class_or_region) + + # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the + # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad. + # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size + # later + valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)] + valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)] + + # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage. + # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to + # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also + # remove label -1 in the data augmentation but this way it is less error prone) + this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + data = data[this_slice] + + this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + seg = seg[this_slice] + + padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)] + data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0) + seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=-1) + + return {'data': data_all, 'seg': seg_all, 'properties': case_properties, 'keys': selected_keys} + + +if __name__ == '__main__': + folder = '/media/fabian/data/nnUNet_preprocessed/Dataset004_Hippocampus/2d' + ds = nnUNetDataset(folder, None, 1000) # this should not load the properties! + dl = nnUNetDataLoader2D(ds, 366, (65, 65), (56, 40), 0.33, None, None) + a = next(dl) diff --git a/docker/template/src/nnunetv2/training/dataloading/data_loader_3d.py b/docker/template/src/nnunetv2/training/dataloading/data_loader_3d.py new file mode 100644 index 0000000..e8345f8 --- /dev/null +++ b/docker/template/src/nnunetv2/training/dataloading/data_loader_3d.py @@ -0,0 +1,56 @@ +import numpy as np +from nnunetv2.training.dataloading.base_data_loader import nnUNetDataLoaderBase +from nnunetv2.training.dataloading.nnunet_dataset import nnUNetDataset + + +class nnUNetDataLoader3D(nnUNetDataLoaderBase): + def generate_train_batch(self): + selected_keys = self.get_indices() + # preallocate memory for data and seg + data_all = np.zeros(self.data_shape, dtype=np.float32) + seg_all = np.zeros(self.seg_shape, dtype=np.int16) + case_properties = [] + + for j, i in enumerate(selected_keys): + # oversampling foreground will improve stability of model training, especially if many patches are empty + # (Lung for example) + force_fg = self.get_do_oversample(j) + + data, seg, properties = self._data.load_case(i) + case_properties.append(properties) + + # If we are doing the cascade then the segmentation from the previous stage will already have been loaded by + # self._data.load_case(i) (see nnUNetDataset.load_case) + shape = data.shape[1:] + dim = len(shape) + bbox_lbs, bbox_ubs = self.get_bbox(shape, force_fg, properties['class_locations']) + + # whoever wrote this knew what he was doing (hint: it was me). We first crop the data to the region of the + # bbox that actually lies within the data. This will result in a smaller array which is then faster to pad. + # valid_bbox is just the coord that lied within the data cube. It will be padded to match the patch size + # later + valid_bbox_lbs = [max(0, bbox_lbs[i]) for i in range(dim)] + valid_bbox_ubs = [min(shape[i], bbox_ubs[i]) for i in range(dim)] + + # At this point you might ask yourself why we would treat seg differently from seg_from_previous_stage. + # Why not just concatenate them here and forget about the if statements? Well that's because segneeds to + # be padded with -1 constant whereas seg_from_previous_stage needs to be padded with 0s (we could also + # remove label -1 in the data augmentation but this way it is less error prone) + this_slice = tuple([slice(0, data.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + data = data[this_slice] + + this_slice = tuple([slice(0, seg.shape[0])] + [slice(i, j) for i, j in zip(valid_bbox_lbs, valid_bbox_ubs)]) + seg = seg[this_slice] + + padding = [(-min(0, bbox_lbs[i]), max(bbox_ubs[i] - shape[i], 0)) for i in range(dim)] + data_all[j] = np.pad(data, ((0, 0), *padding), 'constant', constant_values=0) + seg_all[j] = np.pad(seg, ((0, 0), *padding), 'constant', constant_values=-1) + + return {'data': data_all, 'seg': seg_all, 'properties': case_properties, 'keys': selected_keys} + + +if __name__ == '__main__': + folder = '/media/fabian/data/nnUNet_preprocessed/Dataset002_Heart/3d_fullres' + ds = nnUNetDataset(folder, 0) # this should not load the properties! + dl = nnUNetDataLoader3D(ds, 5, (16, 16, 16), (16, 16, 16), 0.33, None, None) + a = next(dl) diff --git a/docker/template/src/nnunetv2/training/dataloading/nnunet_dataset.py b/docker/template/src/nnunetv2/training/dataloading/nnunet_dataset.py new file mode 100644 index 0000000..153a005 --- /dev/null +++ b/docker/template/src/nnunetv2/training/dataloading/nnunet_dataset.py @@ -0,0 +1,146 @@ +import os +from typing import List + +import numpy as np +import shutil + +from batchgenerators.utilities.file_and_folder_operations import join, load_pickle, isfile +from nnunetv2.training.dataloading.utils import get_case_identifiers + + +class nnUNetDataset(object): + def __init__(self, folder: str, case_identifiers: List[str] = None, + num_images_properties_loading_threshold: int = 0, + folder_with_segs_from_previous_stage: str = None): + """ + This does not actually load the dataset. It merely creates a dictionary where the keys are training case names and + the values are dictionaries containing the relevant information for that case. + dataset[training_case] -> info + Info has the following key:value pairs: + - dataset[case_identifier]['properties']['data_file'] -> the full path to the npz file associated with the training case + - dataset[case_identifier]['properties']['properties_file'] -> the pkl file containing the case properties + + In addition, if the total number of cases is < num_images_properties_loading_threshold we load all the pickle files + (containing auxiliary information). This is done for small datasets so that we don't spend too much CPU time on + reading pkl files on the fly during training. However, for large datasets storing all the aux info (which also + contains locations of foreground voxels in the images) can cause too much RAM utilization. In that + case is it better to load on the fly. + + If properties are loaded into the RAM, the info dicts each will have an additional entry: + - dataset[case_identifier]['properties'] -> pkl file content + + IMPORTANT! THIS CLASS ITSELF IS READ-ONLY. YOU CANNOT ADD KEY:VALUE PAIRS WITH nnUNetDataset[key] = value + USE THIS INSTEAD: + nnUNetDataset.dataset[key] = value + (not sure why you'd want to do that though. So don't do it) + """ + super().__init__() + # print('loading dataset') + if case_identifiers is None: + case_identifiers = get_case_identifiers(folder) + case_identifiers.sort() + + self.dataset = {} + for c in case_identifiers: + self.dataset[c] = {} + self.dataset[c]['data_file'] = join(folder, f"{c}.npz") + self.dataset[c]['properties_file'] = join(folder, f"{c}.pkl") + if folder_with_segs_from_previous_stage is not None: + self.dataset[c]['seg_from_prev_stage_file'] = join(folder_with_segs_from_previous_stage, f"{c}.npz") + + if len(case_identifiers) <= num_images_properties_loading_threshold: + for i in self.dataset.keys(): + self.dataset[i]['properties'] = load_pickle(self.dataset[i]['properties_file']) + + self.keep_files_open = ('nnUNet_keep_files_open' in os.environ.keys()) and \ + (os.environ['nnUNet_keep_files_open'].lower() in ('true', '1', 't')) + # print(f'nnUNetDataset.keep_files_open: {self.keep_files_open}') + + def __getitem__(self, key): + ret = {**self.dataset[key]} + if 'properties' not in ret.keys(): + ret['properties'] = load_pickle(ret['properties_file']) + return ret + + def __setitem__(self, key, value): + return self.dataset.__setitem__(key, value) + + def keys(self): + return self.dataset.keys() + + def __len__(self): + return self.dataset.__len__() + + def items(self): + return self.dataset.items() + + def values(self): + return self.dataset.values() + + def load_case(self, key): + entry = self[key] + if 'open_data_file' in entry.keys(): + data = entry['open_data_file'] + # print('using open data file') + elif isfile(entry['data_file'][:-4] + ".npy"): + data = np.load(entry['data_file'][:-4] + ".npy", 'r') + if self.keep_files_open: + self.dataset[key]['open_data_file'] = data + # print('saving open data file') + else: + data = np.load(entry['data_file'])['data'] + + if 'open_seg_file' in entry.keys(): + seg = entry['open_seg_file'] + # print('using open data file') + elif isfile(entry['data_file'][:-4] + "_seg.npy"): + seg = np.load(entry['data_file'][:-4] + "_seg.npy", 'r') + if self.keep_files_open: + self.dataset[key]['open_seg_file'] = seg + # print('saving open seg file') + else: + seg = np.load(entry['data_file'])['seg'] + + if 'seg_from_prev_stage_file' in entry.keys(): + if isfile(entry['seg_from_prev_stage_file'][:-4] + ".npy"): + seg_prev = np.load(entry['seg_from_prev_stage_file'][:-4] + ".npy", 'r') + else: + seg_prev = np.load(entry['seg_from_prev_stage_file'])['seg'] + seg = np.vstack((seg, seg_prev[None])) + + return data, seg, entry['properties'] + + +if __name__ == '__main__': + # this is a mini test. Todo: We can move this to tests in the future (requires simulated dataset) + + folder = '/media/fabian/data/nnUNet_preprocessed/Dataset003_Liver/3d_lowres' + ds = nnUNetDataset(folder, num_images_properties_loading_threshold=0) # this should not load the properties! + # this SHOULD HAVE the properties + ks = ds['liver_0'].keys() + assert 'properties' in ks + # amazing. I am the best. + + # this should have the properties + ds = nnUNetDataset(folder, num_images_properties_loading_threshold=1000) + # now rename the properties file so that it does not exist anymore + shutil.move(join(folder, 'liver_0.pkl'), join(folder, 'liver_XXX.pkl')) + # now we should still be able to access the properties because they have already been loaded + ks = ds['liver_0'].keys() + assert 'properties' in ks + # move file back + shutil.move(join(folder, 'liver_XXX.pkl'), join(folder, 'liver_0.pkl')) + + # this should not have the properties + ds = nnUNetDataset(folder, num_images_properties_loading_threshold=0) + # now rename the properties file so that it does not exist anymore + shutil.move(join(folder, 'liver_0.pkl'), join(folder, 'liver_XXX.pkl')) + # now this should crash + try: + ks = ds['liver_0'].keys() + raise RuntimeError('we should not have come here') + except FileNotFoundError: + print('all good') + # move file back + shutil.move(join(folder, 'liver_XXX.pkl'), join(folder, 'liver_0.pkl')) + diff --git a/docker/template/src/nnunetv2/training/dataloading/utils.py b/docker/template/src/nnunetv2/training/dataloading/utils.py new file mode 100644 index 0000000..352d182 --- /dev/null +++ b/docker/template/src/nnunetv2/training/dataloading/utils.py @@ -0,0 +1,128 @@ +from __future__ import annotations +import multiprocessing +import os +from typing import List +from pathlib import Path +from warnings import warn + +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import isfile, subfiles +from nnunetv2.configuration import default_num_processes + + +def find_broken_image_and_labels( + path_to_data_dir: str | Path, +) -> tuple[set[str], set[str]]: + """ + Iterates through all numpys and tries to read them once to see if a ValueError is raised. + If so, the case id is added to the respective set and returned for potential fixing. + + :path_to_data_dir: Path/str to the preprocessed directory containing the npys and npzs. + :returns: Tuple of a set containing the case ids of the broken npy images and a set of the case ids of broken npy segmentations. + """ + content = os.listdir(path_to_data_dir) + unique_ids = [c[:-4] for c in content if c.endswith(".npz")] + failed_data_ids = set() + failed_seg_ids = set() + for unique_id in unique_ids: + # Try reading data + try: + np.load(path_to_data_dir / (unique_id + ".npy"), "r") + except ValueError: + failed_data_ids.add(unique_id) + # Try reading seg + try: + np.load(path_to_data_dir / (unique_id + "_seg.npy"), "r") + except ValueError: + failed_seg_ids.add(unique_id) + + return failed_data_ids, failed_seg_ids + + +def try_fix_broken_npy(path_do_data_dir: Path, case_ids: set[str], fix_image: bool): + """ + Receives broken case ids and tries to fix them by re-extracting the npz file (up to 5 times). + + :param case_ids: Set of case ids that are broken. + :param path_do_data_dir: Path to the preprocessed directory containing the npys and npzs. + :raises ValueError: If the npy file could not be unpacked after 5 tries. -- + """ + for case_id in case_ids: + for i in range(5): + try: + key = "data" if fix_image else "seg" + suffix = ".npy" if fix_image else "_seg.npy" + read_npz = np.load(path_do_data_dir / (case_id + ".npz"), "r")[key] + np.save(path_do_data_dir / (case_id + suffix), read_npz) + # Try loading the just saved image. + np.load(path_do_data_dir / (case_id + suffix), "r") + break + except ValueError: + if i == 4: + raise ValueError( + f"Could not unpack {case_id + suffix} after 5 tries!" + ) + continue + + +def verify_or_stratify_npys(path_to_data_dir: str | Path) -> None: + """ + This re-reads the npy files after unpacking. Should there be a loading issue with any, it will try to unpack this file again and overwrites the existing. + If the new file does not get saved correctly 5 times, it will raise an error with the file name to the user. Does the same for images and segmentations. + :param path_to_data_dir: Path to the preprocessed directory containing the npys and npzs. + :raises ValueError: If the npy file could not be unpacked after 5 tries. -- + Otherwise an obscured error will be raised later during training (depending when the broken file is sampled) + """ + path_to_data_dir = Path(path_to_data_dir) + # Check for broken image and segmentation npys + failed_data_ids, failed_seg_ids = find_broken_image_and_labels(path_to_data_dir) + + if len(failed_data_ids) != 0 or len(failed_seg_ids) != 0: + warn( + f"Found {len(failed_data_ids)} faulty data npys and {len(failed_seg_ids)}!\n" + + f"Faulty images: {failed_data_ids}; Faulty segmentations: {failed_seg_ids})\n" + + "Trying to fix them now." + ) + # Try to fix the broken npys by reextracting the npz. If that fails, raise error + try_fix_broken_npy(path_to_data_dir, failed_data_ids, fix_image=True) + try_fix_broken_npy(path_to_data_dir, failed_seg_ids, fix_image=False) + + +def _convert_to_npy(npz_file: str, unpack_segmentation: bool = True, overwrite_existing: bool = False) -> None: + try: + a = np.load(npz_file) # inexpensive, no compression is done here. This just reads metadata + if overwrite_existing or not isfile(npz_file[:-3] + "npy"): + np.save(npz_file[:-3] + "npy", a['data']) + if unpack_segmentation and (overwrite_existing or not isfile(npz_file[:-4] + "_seg.npy")): + np.save(npz_file[:-4] + "_seg.npy", a['seg']) + except KeyboardInterrupt: + if isfile(npz_file[:-3] + "npy"): + os.remove(npz_file[:-3] + "npy") + if isfile(npz_file[:-4] + "_seg.npy"): + os.remove(npz_file[:-4] + "_seg.npy") + raise KeyboardInterrupt + + +def unpack_dataset(folder: str, unpack_segmentation: bool = True, overwrite_existing: bool = False, + num_processes: int = default_num_processes): + """ + all npz files in this folder belong to the dataset, unpack them all + """ + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + npz_files = subfiles(folder, True, None, ".npz", True) + p.starmap(_convert_to_npy, zip(npz_files, + [unpack_segmentation] * len(npz_files), + [overwrite_existing] * len(npz_files)) + ) + + +def get_case_identifiers(folder: str) -> List[str]: + """ + finds all npz files in the given folder and reconstructs the training case names from them + """ + case_identifiers = [i[:-4] for i in os.listdir(folder) if i.endswith("npz") and (i.find("segFromPrevStage") == -1)] + return case_identifiers + + +if __name__ == '__main__': + unpack_dataset('/media/fabian/data/nnUNet_preprocessed/Dataset002_Heart/2d') \ No newline at end of file diff --git a/docker/template/src/nnunetv2/training/logging/__init__.py b/docker/template/src/nnunetv2/training/logging/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/logging/nnunet_logger.py b/docker/template/src/nnunetv2/training/logging/nnunet_logger.py new file mode 100644 index 0000000..8409738 --- /dev/null +++ b/docker/template/src/nnunetv2/training/logging/nnunet_logger.py @@ -0,0 +1,103 @@ +import matplotlib +from batchgenerators.utilities.file_and_folder_operations import join + +matplotlib.use('agg') +import seaborn as sns +import matplotlib.pyplot as plt + + +class nnUNetLogger(object): + """ + This class is really trivial. Don't expect cool functionality here. This is my makeshift solution to problems + arising from out-of-sync epoch numbers and numbers of logged loss values. It also simplifies the trainer class a + little + + YOU MUST LOG EXACTLY ONE VALUE PER EPOCH FOR EACH OF THE LOGGING ITEMS! DONT FUCK IT UP + """ + def __init__(self, verbose: bool = False): + self.my_fantastic_logging = { + 'mean_fg_dice': list(), + 'ema_fg_dice': list(), + 'dice_per_class_or_region': list(), + 'train_losses': list(), + 'val_losses': list(), + 'lrs': list(), + 'epoch_start_timestamps': list(), + 'epoch_end_timestamps': list() + } + self.verbose = verbose + # shut up, this logging is great + + def log(self, key, value, epoch: int): + """ + sometimes shit gets messed up. We try to catch that here + """ + assert key in self.my_fantastic_logging.keys() and isinstance(self.my_fantastic_logging[key], list), \ + 'This function is only intended to log stuff to lists and to have one entry per epoch' + + if self.verbose: print(f'logging {key}: {value} for epoch {epoch}') + + if len(self.my_fantastic_logging[key]) < (epoch + 1): + self.my_fantastic_logging[key].append(value) + else: + assert len(self.my_fantastic_logging[key]) == (epoch + 1), 'something went horribly wrong. My logging ' \ + 'lists length is off by more than 1' + print(f'maybe some logging issue!? logging {key} and {value}') + self.my_fantastic_logging[key][epoch] = value + + # handle the ema_fg_dice special case! It is automatically logged when we add a new mean_fg_dice + if key == 'mean_fg_dice': + new_ema_pseudo_dice = self.my_fantastic_logging['ema_fg_dice'][epoch - 1] * 0.9 + 0.1 * value \ + if len(self.my_fantastic_logging['ema_fg_dice']) > 0 else value + self.log('ema_fg_dice', new_ema_pseudo_dice, epoch) + + def plot_progress_png(self, output_folder): + # we infer the epoch form our internal logging + epoch = min([len(i) for i in self.my_fantastic_logging.values()]) - 1 # lists of epoch 0 have len 1 + sns.set(font_scale=2.5) + fig, ax_all = plt.subplots(3, 1, figsize=(30, 54)) + # regular progress.png as we are used to from previous nnU-Net versions + ax = ax_all[0] + ax2 = ax.twinx() + x_values = list(range(epoch + 1)) + ax.plot(x_values, self.my_fantastic_logging['train_losses'][:epoch + 1], color='b', ls='-', label="loss_tr", linewidth=4) + ax.plot(x_values, self.my_fantastic_logging['val_losses'][:epoch + 1], color='r', ls='-', label="loss_val", linewidth=4) + ax2.plot(x_values, self.my_fantastic_logging['mean_fg_dice'][:epoch + 1], color='g', ls='dotted', label="pseudo dice", + linewidth=3) + ax2.plot(x_values, self.my_fantastic_logging['ema_fg_dice'][:epoch + 1], color='g', ls='-', label="pseudo dice (mov. avg.)", + linewidth=4) + ax.set_xlabel("epoch") + ax.set_ylabel("loss") + ax2.set_ylabel("pseudo dice") + ax.legend(loc=(0, 1)) + ax2.legend(loc=(0.2, 1)) + + # epoch times to see whether the training speed is consistent (inconsistent means there are other jobs + # clogging up the system) + ax = ax_all[1] + ax.plot(x_values, [i - j for i, j in zip(self.my_fantastic_logging['epoch_end_timestamps'][:epoch + 1], + self.my_fantastic_logging['epoch_start_timestamps'])][:epoch + 1], color='b', + ls='-', label="epoch duration", linewidth=4) + ylim = [0] + [ax.get_ylim()[1]] + ax.set(ylim=ylim) + ax.set_xlabel("epoch") + ax.set_ylabel("time [s]") + ax.legend(loc=(0, 1)) + + # learning rate + ax = ax_all[2] + ax.plot(x_values, self.my_fantastic_logging['lrs'][:epoch + 1], color='b', ls='-', label="learning rate", linewidth=4) + ax.set_xlabel("epoch") + ax.set_ylabel("learning rate") + ax.legend(loc=(0, 1)) + + plt.tight_layout() + + fig.savefig(join(output_folder, "progress.png")) + plt.close() + + def get_checkpoint(self): + return self.my_fantastic_logging + + def load_checkpoint(self, checkpoint: dict): + self.my_fantastic_logging = checkpoint diff --git a/docker/template/src/nnunetv2/training/loss/__init__.py b/docker/template/src/nnunetv2/training/loss/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/loss/compound_losses.py b/docker/template/src/nnunetv2/training/loss/compound_losses.py new file mode 100644 index 0000000..eaeb5d8 --- /dev/null +++ b/docker/template/src/nnunetv2/training/loss/compound_losses.py @@ -0,0 +1,150 @@ +import torch +from nnunetv2.training.loss.dice import SoftDiceLoss, MemoryEfficientSoftDiceLoss +from nnunetv2.training.loss.robust_ce_loss import RobustCrossEntropyLoss, TopKLoss +from nnunetv2.utilities.helpers import softmax_helper_dim1 +from torch import nn + + +class DC_and_CE_loss(nn.Module): + def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None, + dice_class=SoftDiceLoss): + """ + Weights for CE and Dice do not need to sum to one. You can set whatever you want. + :param soft_dice_kwargs: + :param ce_kwargs: + :param aggregate: + :param square_dice: + :param weight_ce: + :param weight_dice: + """ + super(DC_and_CE_loss, self).__init__() + if ignore_label is not None: + ce_kwargs['ignore_index'] = ignore_label + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.ignore_label = ignore_label + + self.ce = RobustCrossEntropyLoss(**ce_kwargs) + self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + """ + target must be b, c, x, y(, z) with c=1 + :param net_output: + :param target: + :return: + """ + if self.ignore_label is not None: + assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ + '(DC_and_CE_loss)' + mask = target != self.ignore_label + # remove ignore label from target, replace with one of the known labels. It doesn't matter because we + # ignore gradients in those areas anyway + target_dice = torch.where(mask, target, 0) + num_fg = mask.sum() + else: + target_dice = target + mask = None + + dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ + if self.weight_dice != 0 else 0 + ce_loss = self.ce(net_output, target[:, 0]) \ + if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 + + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result + + +class DC_and_BCE_loss(nn.Module): + def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False, + dice_class=MemoryEfficientSoftDiceLoss): + """ + DO NOT APPLY NONLINEARITY IN YOUR NETWORK! + + target mut be one hot encoded + IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!! + + :param soft_dice_kwargs: + :param bce_kwargs: + :param aggregate: + """ + super(DC_and_BCE_loss, self).__init__() + if use_ignore_label: + bce_kwargs['reduction'] = 'none' + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.use_ignore_label = use_ignore_label + + self.ce = nn.BCEWithLogitsLoss(**bce_kwargs) + self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + if self.use_ignore_label: + # target is one hot encoded here. invert it so that it is True wherever we can compute the loss + mask = (1 - target[:, -1:]).bool() + # remove ignore channel now that we have the mask + target_regions = torch.clone(target[:, :-1]) + else: + target_regions = target + mask = None + + dc_loss = self.dc(net_output, target_regions, loss_mask=mask) + if mask is not None: + ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8) + else: + ce_loss = self.ce(net_output, target_regions) + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result + + +class DC_and_topk_loss(nn.Module): + def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None): + """ + Weights for CE and Dice do not need to sum to one. You can set whatever you want. + :param soft_dice_kwargs: + :param ce_kwargs: + :param aggregate: + :param square_dice: + :param weight_ce: + :param weight_dice: + """ + super().__init__() + if ignore_label is not None: + ce_kwargs['ignore_index'] = ignore_label + + self.weight_dice = weight_dice + self.weight_ce = weight_ce + self.ignore_label = ignore_label + + self.ce = TopKLoss(**ce_kwargs) + self.dc = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs) + + def forward(self, net_output: torch.Tensor, target: torch.Tensor): + """ + target must be b, c, x, y(, z) with c=1 + :param net_output: + :param target: + :return: + """ + if self.ignore_label is not None: + assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \ + '(DC_and_CE_loss)' + mask = (target != self.ignore_label).bool() + # remove ignore label from target, replace with one of the known labels. It doesn't matter because we + # ignore gradients in those areas anyway + target_dice = torch.clone(target) + target_dice[target == self.ignore_label] = 0 + num_fg = mask.sum() + else: + target_dice = target + mask = None + + dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \ + if self.weight_dice != 0 else 0 + ce_loss = self.ce(net_output, target) \ + if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0 + + result = self.weight_ce * ce_loss + self.weight_dice * dc_loss + return result diff --git a/docker/template/src/nnunetv2/training/loss/deep_supervision.py b/docker/template/src/nnunetv2/training/loss/deep_supervision.py new file mode 100644 index 0000000..952e3f7 --- /dev/null +++ b/docker/template/src/nnunetv2/training/loss/deep_supervision.py @@ -0,0 +1,30 @@ +import torch +from torch import nn + + +class DeepSupervisionWrapper(nn.Module): + def __init__(self, loss, weight_factors=None): + """ + Wraps a loss function so that it can be applied to multiple outputs. Forward accepts an arbitrary number of + inputs. Each input is expected to be a tuple/list. Each tuple/list must have the same length. The loss is then + applied to each entry like this: + l = w0 * loss(input0[0], input1[0], ...) + w1 * loss(input0[1], input1[1], ...) + ... + If weights are None, all w will be 1. + """ + super(DeepSupervisionWrapper, self).__init__() + assert any([x != 0 for x in weight_factors]), "At least one weight factor should be != 0.0" + self.weight_factors = tuple(weight_factors) + self.loss = loss + + def forward(self, *args): + assert all([isinstance(i, (tuple, list)) for i in args]), \ + f"all args must be either tuple or list, got {[type(i) for i in args]}" + # we could check for equal lengths here as well, but we really shouldn't overdo it with checks because + # this code is executed a lot of times! + + if self.weight_factors is None: + weights = (1, ) * len(args[0]) + else: + weights = self.weight_factors + + return sum([weights[i] * self.loss(*inputs) for i, inputs in enumerate(zip(*args)) if weights[i] != 0.0]) diff --git a/docker/template/src/nnunetv2/training/loss/dice.py b/docker/template/src/nnunetv2/training/loss/dice.py new file mode 100644 index 0000000..5744357 --- /dev/null +++ b/docker/template/src/nnunetv2/training/loss/dice.py @@ -0,0 +1,192 @@ +from typing import Callable + +import torch +from nnunetv2.utilities.ddp_allgather import AllGatherGrad +from torch import nn + + +class SoftDiceLoss(nn.Module): + def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., + ddp: bool = True, clip_tp: float = None): + """ + """ + super(SoftDiceLoss, self).__init__() + + self.do_bg = do_bg + self.batch_dice = batch_dice + self.apply_nonlin = apply_nonlin + self.smooth = smooth + self.clip_tp = clip_tp + self.ddp = ddp + + def forward(self, x, y, loss_mask=None): + shp_x = x.shape + + if self.batch_dice: + axes = [0] + list(range(2, len(shp_x))) + else: + axes = list(range(2, len(shp_x))) + + if self.apply_nonlin is not None: + x = self.apply_nonlin(x) + + tp, fp, fn, _ = get_tp_fp_fn_tn(x, y, axes, loss_mask, False) + + if self.ddp and self.batch_dice: + tp = AllGatherGrad.apply(tp).sum(0) + fp = AllGatherGrad.apply(fp).sum(0) + fn = AllGatherGrad.apply(fn).sum(0) + + if self.clip_tp is not None: + tp = torch.clip(tp, min=self.clip_tp , max=None) + + nominator = 2 * tp + denominator = 2 * tp + fp + fn + + dc = (nominator + self.smooth) / (torch.clip(denominator + self.smooth, 1e-8)) + + if not self.do_bg: + if self.batch_dice: + dc = dc[1:] + else: + dc = dc[:, 1:] + dc = dc.mean() + + return -dc + + +class MemoryEfficientSoftDiceLoss(nn.Module): + def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1., + ddp: bool = True): + """ + saves 1.6 GB on Dataset017 3d_lowres + """ + super(MemoryEfficientSoftDiceLoss, self).__init__() + + self.do_bg = do_bg + self.batch_dice = batch_dice + self.apply_nonlin = apply_nonlin + self.smooth = smooth + self.ddp = ddp + + def forward(self, x, y, loss_mask=None): + if self.apply_nonlin is not None: + x = self.apply_nonlin(x) + + # make everything shape (b, c) + axes = tuple(range(2, x.ndim)) + + with torch.no_grad(): + if x.ndim != y.ndim: + y = y.view((y.shape[0], 1, *y.shape[1:])) + + if x.shape == y.shape: + # if this is the case then gt is probably already a one hot encoding + y_onehot = y + else: + y_onehot = torch.zeros(x.shape, device=x.device, dtype=torch.bool) + y_onehot.scatter_(1, y.long(), 1) + + if not self.do_bg: + y_onehot = y_onehot[:, 1:] + + sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes) + + # this one MUST be outside the with torch.no_grad(): context. Otherwise no gradients for you + if not self.do_bg: + x = x[:, 1:] + + if loss_mask is None: + intersect = (x * y_onehot).sum(axes) + sum_pred = x.sum(axes) + else: + intersect = (x * y_onehot * loss_mask).sum(axes) + sum_pred = (x * loss_mask).sum(axes) + + if self.batch_dice: + if self.ddp: + intersect = AllGatherGrad.apply(intersect).sum(0) + sum_pred = AllGatherGrad.apply(sum_pred).sum(0) + sum_gt = AllGatherGrad.apply(sum_gt).sum(0) + + intersect = intersect.sum(0) + sum_pred = sum_pred.sum(0) + sum_gt = sum_gt.sum(0) + + dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8)) + + dc = dc.mean() + return -dc + + +def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False): + """ + net_output must be (b, c, x, y(, z))) + gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z)) + if mask is provided it must have shape (b, 1, x, y(, z))) + :param net_output: + :param gt: + :param axes: can be (, ) = no summation + :param mask: mask must be 1 for valid pixels and 0 for invalid pixels + :param square: if True then fp, tp and fn will be squared before summation + :return: + """ + if axes is None: + axes = tuple(range(2, net_output.ndim)) + + with torch.no_grad(): + if net_output.ndim != gt.ndim: + gt = gt.view((gt.shape[0], 1, *gt.shape[1:])) + + if net_output.shape == gt.shape: + # if this is the case then gt is probably already a one hot encoding + y_onehot = gt + else: + y_onehot = torch.zeros(net_output.shape, device=net_output.device) + y_onehot.scatter_(1, gt.long(), 1) + + tp = net_output * y_onehot + fp = net_output * (1 - y_onehot) + fn = (1 - net_output) * y_onehot + tn = (1 - net_output) * (1 - y_onehot) + + if mask is not None: + with torch.no_grad(): + mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for _ in range(2, tp.ndim)])) + tp *= mask_here + fp *= mask_here + fn *= mask_here + tn *= mask_here + # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes + # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram + # (using nnUNetv2_train 998 3d_fullres 0) + # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1) + # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1) + # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1) + # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1) + + if square: + tp = tp ** 2 + fp = fp ** 2 + fn = fn ** 2 + tn = tn ** 2 + + if len(axes) > 0: + tp = tp.sum(dim=axes, keepdim=False) + fp = fp.sum(dim=axes, keepdim=False) + fn = fn.sum(dim=axes, keepdim=False) + tn = tn.sum(dim=axes, keepdim=False) + + return tp, fp, fn, tn + + +if __name__ == '__main__': + from nnunetv2.utilities.helpers import softmax_helper_dim1 + pred = torch.rand((2, 3, 32, 32, 32)) + ref = torch.randint(0, 3, (2, 32, 32, 32)) + + dl_old = SoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) + dl_new = MemoryEfficientSoftDiceLoss(apply_nonlin=softmax_helper_dim1, batch_dice=True, do_bg=False, smooth=0, ddp=False) + res_old = dl_old(pred, ref) + res_new = dl_new(pred, ref) + print(res_old, res_new) diff --git a/docker/template/src/nnunetv2/training/loss/robust_ce_loss.py b/docker/template/src/nnunetv2/training/loss/robust_ce_loss.py new file mode 100644 index 0000000..3399e3a --- /dev/null +++ b/docker/template/src/nnunetv2/training/loss/robust_ce_loss.py @@ -0,0 +1,32 @@ +import torch +from torch import nn, Tensor +import numpy as np + + +class RobustCrossEntropyLoss(nn.CrossEntropyLoss): + """ + this is just a compatibility layer because my target tensor is float and has an extra dimension + + input must be logits, not probabilities! + """ + def forward(self, input: Tensor, target: Tensor) -> Tensor: + if target.ndim == input.ndim: + assert target.shape[1] == 1 + target = target[:, 0] + return super().forward(input, target.long()) + + +class TopKLoss(RobustCrossEntropyLoss): + """ + input must be logits, not probabilities! + """ + def __init__(self, weight=None, ignore_index: int = -100, k: float = 10, label_smoothing: float = 0): + self.k = k + super(TopKLoss, self).__init__(weight, False, ignore_index, reduce=False, label_smoothing=label_smoothing) + + def forward(self, inp, target): + target = target[:, 0].long() + res = super(TopKLoss, self).forward(inp, target) + num_voxels = np.prod(res.shape, dtype=np.int64) + res, _ = torch.topk(res.view((-1, )), int(num_voxels * self.k / 100), sorted=False) + return res.mean() diff --git a/docker/template/src/nnunetv2/training/lr_scheduler/__init__.py b/docker/template/src/nnunetv2/training/lr_scheduler/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/lr_scheduler/polylr.py b/docker/template/src/nnunetv2/training/lr_scheduler/polylr.py new file mode 100644 index 0000000..44857b5 --- /dev/null +++ b/docker/template/src/nnunetv2/training/lr_scheduler/polylr.py @@ -0,0 +1,20 @@ +from torch.optim.lr_scheduler import _LRScheduler + + +class PolyLRScheduler(_LRScheduler): + def __init__(self, optimizer, initial_lr: float, max_steps: int, exponent: float = 0.9, current_step: int = None): + self.optimizer = optimizer + self.initial_lr = initial_lr + self.max_steps = max_steps + self.exponent = exponent + self.ctr = 0 + super().__init__(optimizer, current_step if current_step is not None else -1, False) + + def step(self, current_step=None): + if current_step is None or current_step == -1: + current_step = self.ctr + self.ctr += 1 + + new_lr = self.initial_lr * (1 - current_step / self.max_steps) ** self.exponent + for param_group in self.optimizer.param_groups: + param_group['lr'] = new_lr diff --git a/docker/template/src/nnunetv2/training/lr_scheduler/samedlr.py b/docker/template/src/nnunetv2/training/lr_scheduler/samedlr.py new file mode 100644 index 0000000..239417e --- /dev/null +++ b/docker/template/src/nnunetv2/training/lr_scheduler/samedlr.py @@ -0,0 +1,22 @@ +import torch +from torch.optim.lr_scheduler import _LRScheduler + +# Custom LR Scheduler Implementation +class CustomWarmupDecayLR(_LRScheduler): + def __init__(self, optimizer, warmup_period, max_iterations, base_lr, weight_decay, last_epoch=-1, verbose=False): + self.warmup_period = warmup_period + self.max_iterations = max_iterations + self.base_lr = base_lr + self.weight_decay = weight_decay + super().__init__(optimizer, last_epoch, verbose) + + def get_lr(self): + if self.last_epoch < self.warmup_period: + return [self.base_lr * ((self.last_epoch + 1) / self.warmup_period) for _ in self.optimizer.param_groups] + else: + if self.warmup_period: + shift_iter = self.last_epoch - self.warmup_period + else: + shift_iter = self.last_epoch + return [self.base_lr * (1.0 - shift_iter / self.max_iterations) ** self.weight_decay for _ in self.optimizer.param_groups] + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py new file mode 100644 index 0000000..821a4e0 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py @@ -0,0 +1,1270 @@ +import inspect +import multiprocessing +import os +import shutil +import sys +import warnings +from copy import deepcopy +from datetime import datetime +from time import time, sleep +from typing import Union, Tuple, List + +import numpy as np +import torch +from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter +from batchgenerators.transforms.abstract_transforms import AbstractTransform, Compose +from batchgenerators.transforms.color_transforms import BrightnessMultiplicativeTransform, \ + ContrastAugmentationTransform, GammaTransform +from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform, GaussianBlurTransform +from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform +from batchgenerators.transforms.spatial_transforms import SpatialTransform, MirrorTransform +from batchgenerators.transforms.utility_transforms import RemoveLabelTransform, RenameTransform, NumpyToTensor +from batchgenerators.utilities.file_and_folder_operations import join, load_json, isfile, save_json, maybe_mkdir_p +from torch._dynamo import OptimizedModule + +from nnunetv2.configuration import ANISO_THRESHOLD, default_num_processes +from nnunetv2.evaluation.evaluate_predictions import compute_metrics_on_folder +from nnunetv2.inference.export_prediction import export_prediction_from_logits, resample_and_save +from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor +from nnunetv2.inference.sliding_window_prediction import compute_gaussian +from nnunetv2.paths import nnUNet_preprocessed, nnUNet_results +from nnunetv2.training.data_augmentation.compute_initial_patch_size import get_patch_size +from nnunetv2.training.data_augmentation.custom_transforms.cascade_transforms import MoveSegAsOneHotToData, \ + ApplyRandomBinaryOperatorTransform, RemoveRandomConnectedComponentFromOneHotEncodingTransform +from nnunetv2.training.data_augmentation.custom_transforms.deep_supervision_donwsampling import \ + DownsampleSegForDSTransform2 +from nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter import \ + LimitedLenWrapper +from nnunetv2.training.data_augmentation.custom_transforms.masking import MaskTransform +from nnunetv2.training.data_augmentation.custom_transforms.region_based_training import \ + ConvertSegmentationToRegionsTransform +from nnunetv2.training.data_augmentation.custom_transforms.transforms_for_dummy_2d import Convert2DTo3DTransform, \ + Convert3DTo2DTransform +from nnunetv2.training.dataloading.data_loader_2d import nnUNetDataLoader2D +from nnunetv2.training.dataloading.data_loader_3d import nnUNetDataLoader3D +from nnunetv2.training.dataloading.nnunet_dataset import nnUNetDataset +from nnunetv2.training.dataloading.utils import get_case_identifiers, unpack_dataset +from nnunetv2.training.logging.nnunet_logger import nnUNetLogger +from nnunetv2.training.loss.compound_losses import DC_and_CE_loss, DC_and_BCE_loss +from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper +from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss +from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler +from nnunetv2.utilities.collate_outputs import collate_outputs +from nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA +from nnunetv2.utilities.file_path_utilities import check_workers_alive_and_busy +from nnunetv2.utilities.get_network_from_plans import get_network_from_plans +from nnunetv2.utilities.helpers import empty_cache, dummy_context +from nnunetv2.utilities.label_handling.label_handling import convert_labelmap_to_one_hot, determine_num_input_channels +from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager +from sklearn.model_selection import KFold +from torch import autocast, nn +from torch import distributed as dist +from torch.cuda import device_count +from torch.cuda.amp import GradScaler +from torch.nn.parallel import DistributedDataParallel as DDP + + +class nnUNetTrainer(object): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + # From https://grugbrain.dev/. Worth a read ya big brains ;-) + + # apex predator of grug is complexity + # complexity bad + # say again: + # complexity very bad + # you say now: + # complexity very, very bad + # given choice between complexity or one on one against t-rex, grug take t-rex: at least grug see t-rex + # complexity is spirit demon that enter codebase through well-meaning but ultimately very clubbable non grug-brain developers and project managers who not fear complexity spirit demon or even know about sometime + # one day code base understandable and grug can get work done, everything good! + # next day impossible: complexity demon spirit has entered code and very dangerous situation! + + # OK OK I am guilty. But I tried. + # https://www.osnews.com/images/comics/wtfm.jpg + # https://i.pinimg.com/originals/26/b2/50/26b250a738ea4abc7a5af4d42ad93af0.jpg + + self.is_ddp = dist.is_available() and dist.is_initialized() + self.local_rank = 0 if not self.is_ddp else dist.get_rank() + + self.device = device + + # print what device we are using + if self.is_ddp: # implicitly it's clear that we use cuda in this case + print(f"I am local rank {self.local_rank}. {device_count()} GPUs are available. The world size is " + f"{dist.get_world_size()}." + f"Setting device to {self.device}") + self.device = torch.device(type='cuda', index=self.local_rank) + else: + if self.device.type == 'cuda': + # we might want to let the user pick this but for now please pick the correct GPU with CUDA_VISIBLE_DEVICES=X + self.device = torch.device(type='cuda', index=0) + print(f"Using device: {self.device}") + + # loading and saving this class for continuing from checkpoint should not happen based on pickling. This + # would also pickle the network etc. Bad, bad. Instead we just reinstantiate and then load the checkpoint we + # need. So let's save the init args + self.my_init_kwargs = {} + for k in inspect.signature(self.__init__).parameters.keys(): + self.my_init_kwargs[k] = locals()[k] + + ### Saving all the init args into class variables for later access + self.plans_manager = PlansManager(plans) + self.configuration_manager = self.plans_manager.get_configuration(configuration) + self.configuration_name = configuration + self.dataset_json = dataset_json + self.fold = fold + self.unpack_dataset = unpack_dataset + + ### Setting all the folder names. We need to make sure things don't crash in case we are just running + # inference and some of the folders may not be defined! + self.preprocessed_dataset_folder_base = join(nnUNet_preprocessed, self.plans_manager.dataset_name) \ + if nnUNet_preprocessed is not None else None + self.output_folder_base = join(nnUNet_results, self.plans_manager.dataset_name, + self.__class__.__name__ + '__' + self.plans_manager.plans_name + "__" + configuration) \ + if nnUNet_results is not None else None + self.output_folder = join(self.output_folder_base, f'fold_{fold}') + + self.preprocessed_dataset_folder = join(self.preprocessed_dataset_folder_base, + self.configuration_manager.data_identifier) + # unlike the previous nnunet folder_with_segs_from_previous_stage is now part of the plans. For now it has to + # be a different configuration in the same plans + # IMPORTANT! the mapping must be bijective, so lowres must point to fullres and vice versa (using + # "previous_stage" and "next_stage"). Otherwise it won't work! + self.is_cascaded = self.configuration_manager.previous_stage_name is not None + self.folder_with_segs_from_previous_stage = \ + join(nnUNet_results, self.plans_manager.dataset_name, + self.__class__.__name__ + '__' + self.plans_manager.plans_name + "__" + + self.configuration_manager.previous_stage_name, 'predicted_next_stage', self.configuration_name) \ + if self.is_cascaded else None + + ### Some hyperparameters for you to fiddle with + self.initial_lr = 1e-2 + self.weight_decay = 3e-5 + self.oversample_foreground_percent = 0.33 + self.num_iterations_per_epoch = 250 + self.num_val_iterations_per_epoch = 50 + self.num_epochs = 1000 + self.current_epoch = 0 + self.enable_deep_supervision = True + + ### Dealing with labels/regions + self.label_manager = self.plans_manager.get_label_manager(dataset_json) + # labels can either be a list of int (regular training) or a list of tuples of int (region-based training) + # needed for predictions. We do sigmoid in case of (overlapping) regions + + self.num_input_channels = None # -> self.initialize() + self.network = None # -> self._get_network() + self.optimizer = self.lr_scheduler = None # -> self.initialize + self.grad_scaler = GradScaler() if self.device.type == 'cuda' else None + self.loss = None # -> self.initialize + + ### Simple logging. Don't take that away from me! + # initialize log file. This is just our log for the print statements etc. Not to be confused with lightning + # logging + timestamp = datetime.now() + maybe_mkdir_p(self.output_folder) + self.log_file = join(self.output_folder, "training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt" % + (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute, + timestamp.second)) + self.logger = nnUNetLogger() + + ### placeholders + self.dataloader_train = self.dataloader_val = None # see on_train_start + + ### initializing stuff for remembering things and such + self._best_ema = None + + ### inference things + self.inference_allowed_mirroring_axes = None # this variable is set in + # self.configure_rotation_dummyDA_mirroring_and_inital_patch_size and will be saved in checkpoints + + ### checkpoint saving stuff + self.save_every = 50 + self.disable_checkpointing = False + + ## DDP batch size and oversampling can differ between workers and needs adaptation + # we need to change the batch size in DDP because we don't use any of those distributed samplers + self._set_batch_size_and_oversample() + + self.was_initialized = False + + self.print_to_log_file("\n#######################################################################\n" + "Please cite the following paper when using nnU-Net:\n" + "Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). " + "nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. " + "Nature methods, 18(2), 203-211.\n" + "#######################################################################\n", + also_print_to_console=True, add_timestamp=False) + + def initialize(self): + if not self.was_initialized: + self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, + self.dataset_json) + + self.network = self.build_network_architecture( + self.plans_manager, + self.dataset_json, + self.configuration_manager, + self.num_input_channels, + self.enable_deep_supervision, + ).to(self.device) + # compile network for free speedup + if self._do_i_compile(): + self.print_to_log_file('Using torch.compile...') + self.network = torch.compile(self.network) + + self.optimizer, self.lr_scheduler = self.configure_optimizers() + # if ddp, wrap in DDP wrapper + if self.is_ddp: + self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) + self.network = DDP(self.network, device_ids=[self.local_rank]) + + self.loss = self._build_loss() + self.was_initialized = True + else: + raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " + "That should not happen.") + + def _do_i_compile(self): + return ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't')) + + def _save_debug_information(self): + # saving some debug information + if self.local_rank == 0: + dct = {} + for k in self.__dir__(): + if not k.startswith("__"): + if not callable(getattr(self, k)) or k in ['loss', ]: + dct[k] = str(getattr(self, k)) + elif k in ['network', ]: + dct[k] = str(getattr(self, k).__class__.__name__) + else: + # print(k) + pass + if k in ['dataloader_train', 'dataloader_val']: + if hasattr(getattr(self, k), 'generator'): + dct[k + '.generator'] = str(getattr(self, k).generator) + if hasattr(getattr(self, k), 'num_processes'): + dct[k + '.num_processes'] = str(getattr(self, k).num_processes) + if hasattr(getattr(self, k), 'transform'): + dct[k + '.transform'] = str(getattr(self, k).transform) + import subprocess + hostname = subprocess.getoutput(['hostname']) + dct['hostname'] = hostname + torch_version = torch.__version__ + if self.device.type == 'cuda': + gpu_name = torch.cuda.get_device_name() + dct['gpu_name'] = gpu_name + cudnn_version = torch.backends.cudnn.version() + else: + cudnn_version = 'None' + dct['device'] = str(self.device) + dct['torch_version'] = torch_version + dct['cudnn_version'] = cudnn_version + save_json(dct, join(self.output_folder, "debug.json")) + + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = True) -> nn.Module: + """ + This is where you build the architecture according to the plans. There is no obligation to use + get_network_from_plans, this is just a utility we use for the nnU-Net default architectures. You can do what + you want. Even ignore the plans and just return something static (as long as it can process the requested + patch size) + but don't bug us with your bugs arising from fiddling with this :-P + This is the function that is called in inference as well! This is needed so that all network architecture + variants can be loaded at inference time (inference will use the same nnUNetTrainer that was used for + training, so if you change the network architecture during training by deriving a new trainer class then + inference will know about it). + + If you need to know how many segmentation outputs your custom architecture needs to have, use the following snippet: + > label_manager = plans_manager.get_label_manager(dataset_json) + > label_manager.num_segmentation_heads + (why so complicated? -> We can have either classical training (classes) or regions. If we have regions, + the number of outputs is != the number of classes. Also there is the ignore label for which no output + should be generated. label_manager takes care of all that for you.) + + """ + return get_network_from_plans(plans_manager, dataset_json, configuration_manager, + num_input_channels, deep_supervision=enable_deep_supervision) + + def _get_deep_supervision_scales(self): + if self.enable_deep_supervision: + deep_supervision_scales = list(list(i) for i in 1 / np.cumprod(np.vstack( + self.configuration_manager.pool_op_kernel_sizes), axis=0))[:-1] + else: + deep_supervision_scales = None # for train and val_transforms + return deep_supervision_scales + + def _set_batch_size_and_oversample(self): + if not self.is_ddp: + # set batch size to what the plan says, leave oversample untouched + self.batch_size = self.configuration_manager.batch_size + else: + # batch size is distributed over DDP workers and we need to change oversample_percent for each worker + batch_sizes = [] + oversample_percents = [] + + world_size = dist.get_world_size() + my_rank = dist.get_rank() + + global_batch_size = self.configuration_manager.batch_size + assert global_batch_size >= world_size, 'Cannot run DDP if the batch size is smaller than the number of ' \ + 'GPUs... Duh.' + + batch_size_per_GPU = np.ceil(global_batch_size / world_size).astype(int) + + for rank in range(world_size): + if (rank + 1) * batch_size_per_GPU > global_batch_size: + batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - global_batch_size) + else: + batch_size = batch_size_per_GPU + + batch_sizes.append(batch_size) + + sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1]) + sample_id_high = np.sum(batch_sizes) + + if sample_id_high / global_batch_size < (1 - self.oversample_foreground_percent): + oversample_percents.append(0.0) + elif sample_id_low / global_batch_size > (1 - self.oversample_foreground_percent): + oversample_percents.append(1.0) + else: + percent_covered_by_this_rank = sample_id_high / global_batch_size - sample_id_low / global_batch_size + oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) - + sample_id_low / global_batch_size) / percent_covered_by_this_rank) + oversample_percents.append(oversample_percent_here) + + print("worker", my_rank, "oversample", oversample_percents[my_rank]) + print("worker", my_rank, "batch_size", batch_sizes[my_rank]) + # self.print_to_log_file("worker", my_rank, "oversample", oversample_percents[my_rank]) + # self.print_to_log_file("worker", my_rank, "batch_size", batch_sizes[my_rank]) + + self.batch_size = batch_sizes[my_rank] + self.oversample_foreground_percent = oversample_percents[my_rank] + + def _build_loss(self): + if self.label_manager.has_regions: + loss = DC_and_BCE_loss({}, + {'batch_dice': self.configuration_manager.batch_dice, + 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, + use_ignore_label=self.label_manager.ignore_label is not None, + dice_class=MemoryEfficientSoftDiceLoss) + else: + loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, + 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, + ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) + + # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases + # this gives higher resolution outputs more weight in the loss + + if self.enable_deep_supervision: + deep_supervision_scales = self._get_deep_supervision_scales() + weights = np.array([1 / (2**i) for i in range(len(deep_supervision_scales))]) + weights[-1] = 0 + + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + # now wrap the loss + loss = DeepSupervisionWrapper(loss, weights) + return loss + + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + """ + This function is stupid and certainly one of the weakest spots of this implementation. Not entirely sure how we can fix it. + """ + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + # todo rotation should be defined dynamically based on patch size (more isotropic patch sizes = more rotation) + if dim == 2: + do_dummy_2d_data_aug = False + # todo revisit this parametrization + if max(patch_size) / min(patch_size) > 1.5: + rotation_for_DA = { + 'x': (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi), + 'y': (0, 0), + 'z': (0, 0) + } + else: + rotation_for_DA = { + 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi), + 'y': (0, 0), + 'z': (0, 0) + } + mirror_axes = (0, 1) + elif dim == 3: + # todo this is not ideal. We could also have patch_size (64, 16, 128) in which case a full 180deg 2d rot would be bad + # order of the axes is determined by spacing, not image size + do_dummy_2d_data_aug = (max(patch_size) / patch_size[0]) > ANISO_THRESHOLD + if do_dummy_2d_data_aug: + # why do we rotate 180 deg here all the time? We should also restrict it + rotation_for_DA = { + 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi), + 'y': (0, 0), + 'z': (0, 0) + } + else: + rotation_for_DA = { + 'x': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi), + 'y': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi), + 'z': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi), + } + mirror_axes = (0, 1, 2) + else: + raise RuntimeError() + + # todo this function is stupid. It doesn't even use the correct scale range (we keep things as they were in the + # old nnunet for now) + initial_patch_size = get_patch_size(patch_size[-dim:], + *rotation_for_DA.values(), + (0.85, 1.25)) + if do_dummy_2d_data_aug: + initial_patch_size[0] = patch_size[0] + + self.print_to_log_file(f'do_dummy_2d_data_aug: {do_dummy_2d_data_aug}') + self.inference_allowed_mirroring_axes = mirror_axes + + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + + def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True): + if self.local_rank == 0: + timestamp = time() + dt_object = datetime.fromtimestamp(timestamp) + + if add_timestamp: + args = (f"{dt_object}:", *args) + + successful = False + max_attempts = 5 + ctr = 0 + while not successful and ctr < max_attempts: + try: + with open(self.log_file, 'a+') as f: + for a in args: + f.write(str(a)) + f.write(" ") + f.write("\n") + successful = True + except IOError: + print(f"{datetime.fromtimestamp(timestamp)}: failed to log: ", sys.exc_info()) + sleep(0.5) + ctr += 1 + if also_print_to_console: + print(*args) + elif also_print_to_console: + print(*args) + + def print_plans(self): + if self.local_rank == 0: + dct = deepcopy(self.plans_manager.plans) + del dct['configurations'] + self.print_to_log_file(f"\nThis is the configuration used by this " + f"training:\nConfiguration name: {self.configuration_name}\n", + self.configuration_manager, '\n', add_timestamp=False) + self.print_to_log_file('These are the global plan.json settings:\n', dct, '\n', add_timestamp=False) + + def configure_optimizers(self): + optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, + momentum=0.99, nesterov=True) + lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs) + return optimizer, lr_scheduler + + def plot_network_architecture(self): + if self._do_i_compile(): + self.print_to_log_file("Unable to plot network architecture: nnUNet_compile is enabled!") + return + + if self.local_rank == 0: + try: + # raise NotImplementedError('hiddenlayer no longer works and we do not have a viable alternative :-(') + # pip install git+https://github.com/saugatkandel/hiddenlayer.git + + # from torchviz import make_dot + # # not viable. + # make_dot(tuple(self.network(torch.rand((1, self.num_input_channels, + # *self.configuration_manager.patch_size), + # device=self.device)))).render( + # join(self.output_folder, "network_architecture.pdf"), format='pdf') + # self.optimizer.zero_grad() + + # broken. + + import hiddenlayer as hl + g = hl.build_graph(self.network, + torch.rand((1, self.num_input_channels, + *self.configuration_manager.patch_size), + device=self.device), + transforms=None) + g.save(join(self.output_folder, "network_architecture.pdf")) + del g + except Exception as e: + self.print_to_log_file("Unable to plot network architecture:") + self.print_to_log_file(e) + + # self.print_to_log_file("\nprinting the network instead:\n") + # self.print_to_log_file(self.network) + # self.print_to_log_file("\n") + finally: + empty_cache(self.device) + + def do_split(self): + """ + The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded, + so always the same) and save it as splits_final.pkl file in the preprocessed data directory. + Sometimes you may want to create your own split for various reasons. For this you will need to create your own + splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in + it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3) + and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to + use a random 80:20 data split. + :return: + """ + if self.fold == "all": + # if fold==all then we use all images for training and validation + case_identifiers = get_case_identifiers(self.preprocessed_dataset_folder) + tr_keys = case_identifiers + val_keys = tr_keys + else: + splits_file = join(self.preprocessed_dataset_folder_base, "splits_final.json") + dataset = nnUNetDataset(self.preprocessed_dataset_folder, case_identifiers=None, + num_images_properties_loading_threshold=0, + folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage) + # if the split file does not exist we need to create it + if not isfile(splits_file): + self.print_to_log_file("Creating new 5-fold cross-validation split...") + splits = [] + all_keys_sorted = np.sort(list(dataset.keys())) + kfold = KFold(n_splits=5, shuffle=True, random_state=12345) + for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)): + train_keys = np.array(all_keys_sorted)[train_idx] + test_keys = np.array(all_keys_sorted)[test_idx] + splits.append({}) + splits[-1]['train'] = list(train_keys) + splits[-1]['val'] = list(test_keys) + save_json(splits, splits_file) + + else: + self.print_to_log_file("Using splits from existing split file:", splits_file) + splits = load_json(splits_file) + self.print_to_log_file(f"The split file contains {len(splits)} splits.") + + self.print_to_log_file("Desired fold for training: %d" % self.fold) + if self.fold < len(splits): + tr_keys = splits[self.fold]['train'] + val_keys = splits[self.fold]['val'] + self.print_to_log_file("This split has %d training and %d validation cases." + % (len(tr_keys), len(val_keys))) + else: + self.print_to_log_file("INFO: You requested fold %d for training but splits " + "contain only %d folds. I am now creating a " + "random (but seeded) 80:20 split!" % (self.fold, len(splits))) + # if we request a fold that is not in the split file, create a random 80:20 split + rnd = np.random.RandomState(seed=12345 + self.fold) + keys = np.sort(list(dataset.keys())) + idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False) + idx_val = [i for i in range(len(keys)) if i not in idx_tr] + tr_keys = [keys[i] for i in idx_tr] + val_keys = [keys[i] for i in idx_val] + self.print_to_log_file("This random 80:20 split has %d training and %d validation cases." + % (len(tr_keys), len(val_keys))) + if any([i in val_keys for i in tr_keys]): + self.print_to_log_file('WARNING: Some validation cases are also in the training set. Please check the ' + 'splits.json or ignore if this is intentional.') + return tr_keys, val_keys + + def get_tr_and_val_datasets(self): + # create dataset split + tr_keys, val_keys = self.do_split() + + # load the datasets for training and validation. Note that we always draw random samples so we really don't + # care about distributing training cases across GPUs. + dataset_tr = nnUNetDataset(self.preprocessed_dataset_folder, tr_keys, + folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage, + num_images_properties_loading_threshold=0) + dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys, + folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage, + num_images_properties_loading_threshold=0) + return dataset_tr, dataset_val + + def get_dataloaders(self): + # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether + # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + + # needed for deep supervision: how much do we need to downscale the segmentation targets for the different + # outputs? + + deep_supervision_scales = self._get_deep_supervision_scales() + + ( + rotation_for_DA, + do_dummy_2d_data_aug, + initial_patch_size, + mirror_axes, + ) = self.configure_rotation_dummyDA_mirroring_and_inital_patch_size() + + # training pipeline + tr_transforms = self.get_training_transforms( + patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, + order_resampling_data=3, order_resampling_seg=1, + use_mask_for_norm=self.configuration_manager.use_mask_for_norm, + is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.foreground_labels, + regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + # validation pipeline + val_transforms = self.get_validation_transforms(deep_supervision_scales, + is_cascaded=self.is_cascaded, + foreground_labels=self.label_manager.foreground_labels, + regions=self.label_manager.foreground_regions if + self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim) + + allowed_num_processes = get_allowed_n_proc_DA() + if allowed_num_processes == 0: + mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms) + mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms) + else: + mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, data_loader=dl_tr, transform=tr_transforms, + num_processes=allowed_num_processes, num_cached=6, seeds=None, + pin_memory=self.device.type == 'cuda', wait_time=0.02) + mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, data_loader=dl_val, + transform=val_transforms, num_processes=max(1, allowed_num_processes // 2), + num_cached=3, seeds=None, pin_memory=self.device.type == 'cuda', + wait_time=0.02) + return mt_gen_train, mt_gen_val + + def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int): + dataset_tr, dataset_val = self.get_tr_and_val_datasets() + + if dim == 2: + dl_tr = nnUNetDataLoader2D(dataset_tr, self.batch_size, + initial_patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None) + dl_val = nnUNetDataLoader2D(dataset_val, self.batch_size, + self.configuration_manager.patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None) + else: + dl_tr = nnUNetDataLoader3D(dataset_tr, self.batch_size, + initial_patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None) + dl_val = nnUNetDataLoader3D(dataset_val, self.batch_size, + self.configuration_manager.patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None) + return dl_tr, dl_val + + @staticmethod + def get_training_transforms( + patch_size: Union[np.ndarray, Tuple[int]], + rotation_for_DA: dict, + deep_supervision_scales: Union[List, Tuple, None], + mirror_axes: Tuple[int, ...], + do_dummy_2d_data_aug: bool, + order_resampling_data: int = 3, + order_resampling_seg: int = 1, + border_val_seg: int = -1, + use_mask_for_norm: List[bool] = None, + is_cascaded: bool = False, + foreground_labels: Union[Tuple[int, ...], List[int]] = None, + regions: List[Union[List[int], Tuple[int, ...], int]] = None, + ignore_label: int = None, + ) -> AbstractTransform: + tr_transforms = [] + if do_dummy_2d_data_aug: + ignore_axes = (0,) + tr_transforms.append(Convert3DTo2DTransform()) + patch_size_spatial = patch_size[1:] + else: + patch_size_spatial = patch_size + ignore_axes = None + + tr_transforms.append(SpatialTransform( + patch_size_spatial, patch_center_dist_from_border=None, + do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0), + do_rotation=True, angle_x=rotation_for_DA['x'], angle_y=rotation_for_DA['y'], angle_z=rotation_for_DA['z'], + p_rot_per_axis=1, # todo experiment with this + do_scale=True, scale=(0.7, 1.4), + border_mode_data="constant", border_cval_data=0, order_data=order_resampling_data, + border_mode_seg="constant", border_cval_seg=border_val_seg, order_seg=order_resampling_seg, + random_crop=False, # random cropping is part of our dataloaders + p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2, + independent_scale_for_each_axis=False # todo experiment with this + )) + + if do_dummy_2d_data_aug: + tr_transforms.append(Convert2DTo3DTransform()) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, + p_per_channel=0.5)) + tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15)) + tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15)) + tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True, + p_per_channel=0.5, + order_downsample=0, order_upsample=3, p_per_sample=0.25, + ignore_axes=ignore_axes)) + tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + if use_mask_for_norm is not None and any(use_mask_for_norm): + tr_transforms.append(MaskTransform([i for i in range(len(use_mask_for_norm)) if use_mask_for_norm[i]], + mask_idx_in_seg=0, set_outside_to=0)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + + if is_cascaded: + assert foreground_labels is not None, 'We need foreground_labels for cascade augmentations' + tr_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data')) + tr_transforms.append(ApplyRandomBinaryOperatorTransform( + channel_idx=list(range(-len(foreground_labels), 0)), + p_per_sample=0.4, + key="data", + strel_size=(1, 8), + p_per_label=1)) + tr_transforms.append( + RemoveRandomConnectedComponentFromOneHotEncodingTransform( + channel_idx=list(range(-len(foreground_labels), 0)), + key="data", + p_per_sample=0.2, + fill_with_other_class_p=0, + dont_do_if_covers_more_than_x_percent=0.15)) + + tr_transforms.append(RenameTransform('seg', 'target', True)) + + if regions is not None: + # the ignore label must also be converted + tr_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label] + if ignore_label is not None else regions, + 'target', 'target')) + + if deep_supervision_scales is not None: + tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target', + output_key='target')) + tr_transforms.append(NumpyToTensor(['data', 'target'], 'float')) + tr_transforms = Compose(tr_transforms) + return tr_transforms + + @staticmethod + def get_validation_transforms( + deep_supervision_scales: Union[List, Tuple, None], + is_cascaded: bool = False, + foreground_labels: Union[Tuple[int, ...], List[int]] = None, + regions: List[Union[List[int], Tuple[int, ...], int]] = None, + ignore_label: int = None, + ) -> AbstractTransform: + val_transforms = [] + val_transforms.append(RemoveLabelTransform(-1, 0)) + + if is_cascaded: + val_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data')) + + val_transforms.append(RenameTransform('seg', 'target', True)) + + if regions is not None: + # the ignore label must also be converted + val_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label] + if ignore_label is not None else regions, + 'target', 'target')) + + if deep_supervision_scales is not None: + val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target', + output_key='target')) + + val_transforms.append(NumpyToTensor(['data', 'target'], 'float')) + val_transforms = Compose(val_transforms) + return val_transforms + + def set_deep_supervision_enabled(self, enabled: bool): + """ + This function is specific for the default architecture in nnU-Net. If you change the architecture, there are + chances you need to change this as well! + """ + if self.is_ddp: + self.network.module.decoder.deep_supervision = enabled + else: + self.network.decoder.deep_supervision = enabled + + def on_train_start(self): + if not self.was_initialized: + self.initialize() + + maybe_mkdir_p(self.output_folder) + + # make sure deep supervision is on in the network + self.set_deep_supervision_enabled(self.enable_deep_supervision) + + self.print_plans() + empty_cache(self.device) + + # maybe unpack + if self.unpack_dataset and self.local_rank == 0: + self.print_to_log_file('unpacking dataset...') + unpack_dataset(self.preprocessed_dataset_folder, unpack_segmentation=True, overwrite_existing=False, + num_processes=max(1, round(get_allowed_n_proc_DA() // 2))) + self.print_to_log_file('unpacking done...') + + if self.is_ddp: + dist.barrier() + + # dataloaders must be instantiated here because they need access to the training data which may not be present + # when doing inference + self.dataloader_train, self.dataloader_val = self.get_dataloaders() + + # copy plans and dataset.json so that they can be used for restoring everything we need for inference + save_json(self.plans_manager.plans, join(self.output_folder_base, 'plans.json'), sort_keys=False) + save_json(self.dataset_json, join(self.output_folder_base, 'dataset.json'), sort_keys=False) + + # we don't really need the fingerprint but its still handy to have it with the others + shutil.copy(join(self.preprocessed_dataset_folder_base, 'dataset_fingerprint.json'), + join(self.output_folder_base, 'dataset_fingerprint.json')) + + # produces a pdf in output folder + self.plot_network_architecture() + + self._save_debug_information() + + # print(f"batch size: {self.batch_size}") + # print(f"oversample: {self.oversample_foreground_percent}") + + def on_train_end(self): + # dirty hack because on_epoch_end increments the epoch counter and this is executed afterwards. + # This will lead to the wrong current epoch to be stored + self.current_epoch -= 1 + self.save_checkpoint(join(self.output_folder, "checkpoint_final.pth")) + self.current_epoch += 1 + + # now we can delete latest + if self.local_rank == 0 and isfile(join(self.output_folder, "checkpoint_latest.pth")): + os.remove(join(self.output_folder, "checkpoint_latest.pth")) + + # shut down dataloaders + old_stdout = sys.stdout + with open(os.devnull, 'w') as f: + sys.stdout = f + if self.dataloader_train is not None: + self.dataloader_train._finish() + if self.dataloader_val is not None: + self.dataloader_val._finish() + sys.stdout = old_stdout + + empty_cache(self.device) + self.print_to_log_file("Training done.") + + def on_train_epoch_start(self): + self.network.train() + self.lr_scheduler.step(self.current_epoch) + self.print_to_log_file('') + self.print_to_log_file(f'Epoch {self.current_epoch}') + self.print_to_log_file( + f"Current learning rate: {np.round(self.optimizer.param_groups[0]['lr'], decimals=5)}") + # lrs are the same for all workers so we don't need to gather them in case of DDP training + self.logger.log('lrs', self.optimizer.param_groups[0]['lr'], self.current_epoch) + + def train_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + # Autocast is a little bitch. + # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. + # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) + # So autocast will only be active if we have a cuda device. + with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context(): + output = self.network(data) + # del data + l = self.loss(output, target) + + if self.grad_scaler is not None: + self.grad_scaler.scale(l).backward() + self.grad_scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + else: + l.backward() + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.optimizer.step() + return {'loss': l.detach().cpu().numpy()} + + def on_train_epoch_end(self, train_outputs: List[dict]): + outputs = collate_outputs(train_outputs) + + if self.is_ddp: + losses_tr = [None for _ in range(dist.get_world_size())] + dist.all_gather_object(losses_tr, outputs['loss']) + loss_here = np.vstack(losses_tr).mean() + else: + loss_here = np.mean(outputs['loss']) + + self.logger.log('train_losses', loss_here, self.current_epoch) + + def on_validation_epoch_start(self): + self.network.eval() + + def validation_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + # Autocast is a little bitch. + # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. + # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) + # So autocast will only be active if we have a cuda device. + with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context(): + output = self.network(data) + del data + l = self.loss(output, target) + + # we only need the output with the highest output resolution (if DS enabled) + if self.enable_deep_supervision: + output = output[0] + target = target[0] + + # the following is needed for online evaluation. Fake dice (green line) + axes = [0] + list(range(2, output.ndim)) + + if self.label_manager.has_regions: + predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() + else: + # no need for softmax + output_seg = output.argmax(1)[:, None] + predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) + predicted_segmentation_onehot.scatter_(1, output_seg, 1) + del output_seg + + if self.label_manager.has_ignore_label: + if not self.label_manager.has_regions: + mask = (target != self.label_manager.ignore_label).float() + # CAREFUL that you don't rely on target after this line! + target[target == self.label_manager.ignore_label] = 0 + else: + mask = 1 - target[:, -1:] + # CAREFUL that you don't rely on target after this line! + target = target[:, :-1] + else: + mask = None + + tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask) + + tp_hard = tp.detach().cpu().numpy() + fp_hard = fp.detach().cpu().numpy() + fn_hard = fn.detach().cpu().numpy() + if not self.label_manager.has_regions: + # if we train with regions all segmentation heads predict some kind of foreground. In conventional + # (softmax training) there needs tobe one output for the background. We are not interested in the + # background Dice + # [1:] in order to remove background + tp_hard = tp_hard[1:] + fp_hard = fp_hard[1:] + fn_hard = fn_hard[1:] + + return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard} + + def on_validation_epoch_end(self, val_outputs: List[dict]): + outputs_collated = collate_outputs(val_outputs) + tp = np.sum(outputs_collated['tp_hard'], 0) + fp = np.sum(outputs_collated['fp_hard'], 0) + fn = np.sum(outputs_collated['fn_hard'], 0) + + if self.is_ddp: + world_size = dist.get_world_size() + + tps = [None for _ in range(world_size)] + dist.all_gather_object(tps, tp) + tp = np.vstack([i[None] for i in tps]).sum(0) + + fps = [None for _ in range(world_size)] + dist.all_gather_object(fps, fp) + fp = np.vstack([i[None] for i in fps]).sum(0) + + fns = [None for _ in range(world_size)] + dist.all_gather_object(fns, fn) + fn = np.vstack([i[None] for i in fns]).sum(0) + + losses_val = [None for _ in range(world_size)] + dist.all_gather_object(losses_val, outputs_collated['loss']) + loss_here = np.vstack(losses_val).mean() + else: + loss_here = np.mean(outputs_collated['loss']) + + global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in zip(tp, fp, fn)]] + mean_fg_dice = np.nanmean(global_dc_per_class) + self.logger.log('mean_fg_dice', mean_fg_dice, self.current_epoch) + self.logger.log('dice_per_class_or_region', global_dc_per_class, self.current_epoch) + self.logger.log('val_losses', loss_here, self.current_epoch) + + def on_epoch_start(self): + self.logger.log('epoch_start_timestamps', time(), self.current_epoch) + + def on_epoch_end(self): + self.logger.log('epoch_end_timestamps', time(), self.current_epoch) + + self.print_to_log_file('train_loss', np.round(self.logger.my_fantastic_logging['train_losses'][-1], decimals=4)) + self.print_to_log_file('val_loss', np.round(self.logger.my_fantastic_logging['val_losses'][-1], decimals=4)) + self.print_to_log_file('Pseudo dice', [np.round(i, decimals=4) for i in + self.logger.my_fantastic_logging['dice_per_class_or_region'][-1]]) + self.print_to_log_file( + f"Epoch time: {np.round(self.logger.my_fantastic_logging['epoch_end_timestamps'][-1] - self.logger.my_fantastic_logging['epoch_start_timestamps'][-1], decimals=2)} s") + + # handling periodic checkpointing + current_epoch = self.current_epoch + if (current_epoch + 1) % self.save_every == 0 and current_epoch != (self.num_epochs - 1): + self.save_checkpoint(join(self.output_folder, 'checkpoint_latest.pth')) + + # handle 'best' checkpointing. ema_fg_dice is computed by the logger and can be accessed like this + if self._best_ema is None or self.logger.my_fantastic_logging['ema_fg_dice'][-1] > self._best_ema: + self._best_ema = self.logger.my_fantastic_logging['ema_fg_dice'][-1] + self.print_to_log_file(f"Yayy! New best EMA pseudo Dice: {np.round(self._best_ema, decimals=4)}") + self.save_checkpoint(join(self.output_folder, 'checkpoint_best.pth')) + + if self.local_rank == 0: + self.logger.plot_progress_png(self.output_folder) + + self.current_epoch += 1 + + def save_checkpoint(self, filename: str) -> None: + if self.local_rank == 0: + if not self.disable_checkpointing: + if self.is_ddp: + mod = self.network.module + else: + mod = self.network + if isinstance(mod, OptimizedModule): + mod = mod._orig_mod + + checkpoint = { + 'network_weights': mod.state_dict(), + 'optimizer_state': self.optimizer.state_dict(), + 'grad_scaler_state': self.grad_scaler.state_dict() if self.grad_scaler is not None else None, + 'logging': self.logger.get_checkpoint(), + '_best_ema': self._best_ema, + 'current_epoch': self.current_epoch + 1, + 'init_args': self.my_init_kwargs, + 'trainer_name': self.__class__.__name__, + 'inference_allowed_mirroring_axes': self.inference_allowed_mirroring_axes, + } + torch.save(checkpoint, filename) + else: + self.print_to_log_file('No checkpoint written, checkpointing is disabled') + + def load_checkpoint(self, filename_or_checkpoint: Union[dict, str]) -> None: + if not self.was_initialized: + self.initialize() + + if isinstance(filename_or_checkpoint, str): + checkpoint = torch.load(filename_or_checkpoint, map_location=self.device) + # if state dict comes from nn.DataParallel but we use non-parallel model here then the state dict keys do not + # match. Use heuristic to make it match + new_state_dict = {} + for k, value in checkpoint['network_weights'].items(): + key = k + if key not in self.network.state_dict().keys() and key.startswith('module.'): + key = key[7:] + new_state_dict[key] = value + + self.my_init_kwargs = checkpoint['init_args'] + self.current_epoch = checkpoint['current_epoch'] + self.logger.load_checkpoint(checkpoint['logging']) + self._best_ema = checkpoint['_best_ema'] + self.inference_allowed_mirroring_axes = checkpoint[ + 'inference_allowed_mirroring_axes'] if 'inference_allowed_mirroring_axes' in checkpoint.keys() else self.inference_allowed_mirroring_axes + + # messing with state dict naming schemes. Facepalm. + if self.is_ddp: + if isinstance(self.network.module, OptimizedModule): + self.network.module._orig_mod.load_state_dict(new_state_dict) + else: + self.network.module.load_state_dict(new_state_dict) + else: + if isinstance(self.network, OptimizedModule): + self.network._orig_mod.load_state_dict(new_state_dict) + else: + self.network.load_state_dict(new_state_dict) + self.optimizer.load_state_dict(checkpoint['optimizer_state']) + if self.grad_scaler is not None: + if checkpoint['grad_scaler_state'] is not None: + self.grad_scaler.load_state_dict(checkpoint['grad_scaler_state']) + + def perform_actual_validation(self, save_probabilities: bool = False): + self.set_deep_supervision_enabled(False) + self.network.eval() + + predictor = nnUNetPredictor(tile_step_size=0.5, use_gaussian=True, use_mirroring=True, + perform_everything_on_device=True, device=self.device, verbose=False, + verbose_preprocessing=False, allow_tqdm=False) + predictor.manual_initialization(self.network, self.plans_manager, self.configuration_manager, None, + self.dataset_json, self.__class__.__name__, + self.inference_allowed_mirroring_axes) + + with multiprocessing.get_context("spawn").Pool(default_num_processes) as segmentation_export_pool: + worker_list = [i for i in segmentation_export_pool._pool] + validation_output_folder = join(self.output_folder, 'validation') + maybe_mkdir_p(validation_output_folder) + + # we cannot use self.get_tr_and_val_datasets() here because we might be DDP and then we have to distribute + # the validation keys across the workers. + _, val_keys = self.do_split() + if self.is_ddp: + val_keys = val_keys[self.local_rank:: dist.get_world_size()] + + dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys, + folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage, + num_images_properties_loading_threshold=0) + + next_stages = self.configuration_manager.next_stage_names + + if next_stages is not None: + _ = [maybe_mkdir_p(join(self.output_folder_base, 'predicted_next_stage', n)) for n in next_stages] + + results = [] + + for k in dataset_val.keys(): + proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results, + allowed_num_queued=2) + while not proceed: + sleep(0.1) + proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results, + allowed_num_queued=2) + + self.print_to_log_file(f"predicting {k}") + data, seg, properties = dataset_val.load_case(k) + + if self.is_cascaded: + data = np.vstack((data, convert_labelmap_to_one_hot(seg[-1], self.label_manager.foreground_labels, + output_dtype=data.dtype))) + with warnings.catch_warnings(): + # ignore 'The given NumPy array is not writable' warning + warnings.simplefilter("ignore") + data = torch.from_numpy(data) + + output_filename_truncated = join(validation_output_folder, k) + + try: + prediction = predictor.predict_sliding_window_return_logits(data) + except RuntimeError: + predictor.perform_everything_on_device = False + prediction = predictor.predict_sliding_window_return_logits(data) + predictor.perform_everything_on_device = True + + prediction = prediction.cpu() + + # this needs to go into background processes + results.append( + segmentation_export_pool.starmap_async( + export_prediction_from_logits, ( + (prediction, properties, self.configuration_manager, self.plans_manager, + self.dataset_json, output_filename_truncated, save_probabilities), + ) + ) + ) + # for debug purposes + # export_prediction(prediction_for_export, properties, self.configuration, self.plans, self.dataset_json, + # output_filename_truncated, save_probabilities) + + # if needed, export the softmax prediction for the next stage + if next_stages is not None: + for n in next_stages: + next_stage_config_manager = self.plans_manager.get_configuration(n) + expected_preprocessed_folder = join(nnUNet_preprocessed, self.plans_manager.dataset_name, + next_stage_config_manager.data_identifier) + + try: + # we do this so that we can use load_case and do not have to hard code how loading training cases is implemented + tmp = nnUNetDataset(expected_preprocessed_folder, [k], + num_images_properties_loading_threshold=0) + d, s, p = tmp.load_case(k) + except FileNotFoundError: + self.print_to_log_file( + f"Predicting next stage {n} failed for case {k} because the preprocessed file is missing! " + f"Run the preprocessing for this configuration first!") + continue + + target_shape = d.shape[1:] + output_folder = join(self.output_folder_base, 'predicted_next_stage', n) + output_file = join(output_folder, k + '.npz') + + # resample_and_save(prediction, target_shape, output_file, self.plans_manager, self.configuration_manager, properties, + # self.dataset_json) + results.append(segmentation_export_pool.starmap_async( + resample_and_save, ( + (prediction, target_shape, output_file, self.plans_manager, + self.configuration_manager, + properties, + self.dataset_json), + ) + )) + + _ = [r.get() for r in results] + + if self.is_ddp: + dist.barrier() + + if self.local_rank == 0: + metrics = compute_metrics_on_folder(join(self.preprocessed_dataset_folder_base, 'gt_segmentations'), + validation_output_folder, + join(validation_output_folder, 'summary.json'), + self.plans_manager.image_reader_writer_class(), + self.dataset_json["file_ending"], + self.label_manager.foreground_regions if self.label_manager.has_regions else + self.label_manager.foreground_labels, + self.label_manager.ignore_label, chill=True) + self.print_to_log_file("Validation complete", also_print_to_console=True) + self.print_to_log_file("Mean Validation Dice: ", (metrics['foreground_mean']["Dice"]), also_print_to_console=True) + + self.set_deep_supervision_enabled(True) + compute_gaussian.cache_clear() + + def run_training(self): + self.on_train_start() + + for epoch in range(self.current_epoch, self.num_epochs): + self.on_epoch_start() + + self.on_train_epoch_start() + train_outputs = [] + for batch_id in range(self.num_iterations_per_epoch): + train_outputs.append(self.train_step(next(self.dataloader_train))) + self.on_train_epoch_end(train_outputs) + + with torch.no_grad(): + self.on_validation_epoch_start() + val_outputs = [] + for batch_id in range(self.num_val_iterations_per_epoch): + val_outputs.append(self.validation_step(next(self.dataloader_val))) + self.on_validation_epoch_end(val_outputs) + + self.on_epoch_end() + + self.on_train_end() diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerLightMUNet.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerLightMUNet.py new file mode 100644 index 0000000..ae41d42 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerLightMUNet.py @@ -0,0 +1,141 @@ +from nnunetv2.training.nnUNetTrainer.variants.network_architecture.nnUNetTrainerNoDeepSupervision import \ + nnUNetTrainerNoDeepSupervision +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler +from torch import nn +import torch + +from nnunetv2.training.loss.dice import get_tp_fp_fn_tn + +from nnunetv2.nets.LightMUNet import LightMUNet +from torch.optim import Adam + +class nnUNetTrainerLightMUNet(nnUNetTrainerNoDeepSupervision): + def __init__( + self, + plans: dict, + configuration: str, + fold: int, + dataset_json: dict, + unpack_dataset: bool = True, + device: torch.device = torch.device('cuda') + ): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.grad_scaler = None + self.initial_lr = 1e-4 + self.weight_decay = 1e-5 + + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + + label_manager = plans_manager.get_label_manager(dataset_json) + + model = LightMUNet( + spatial_dims = len(configuration_manager.patch_size), + init_filters = 32, + in_channels=num_input_channels, + out_channels=label_manager.num_segmentation_heads, + blocks_down=[1, 2, 2, 4], + blocks_up=[1, 1, 1], + ) + + return model + + + def train_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + output = self.network(data) + l = self.loss(output, target) + l.backward() + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.optimizer.step() + + return {'loss': l.detach().cpu().numpy()} + + + def validation_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + output = self.network(data) + del data + l = self.loss(output, target) + + axes = [0] + list(range(2, output.ndim)) + + if self.label_manager.has_regions: + predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() + else: + output_seg = output.argmax(1)[:, None] + predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) + predicted_segmentation_onehot.scatter_(1, output_seg, 1) + del output_seg + + if self.label_manager.has_ignore_label: + if not self.label_manager.has_regions: + mask = (target != self.label_manager.ignore_label).float() + target[target == self.label_manager.ignore_label] = 0 + else: + mask = 1 - target[:, -1:] + target = target[:, :-1] + else: + mask = None + + tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask) + + tp_hard = tp.detach().cpu().numpy() + fp_hard = fp.detach().cpu().numpy() + fn_hard = fn.detach().cpu().numpy() + if not self.label_manager.has_regions: + tp_hard = tp_hard[1:] + fp_hard = fp_hard[1:] + fn_hard = fn_hard[1:] + + return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard} + + def configure_optimizers(self): + + optimizer = Adam(self.network.parameters(), lr=self.initial_lr, weight_decay=self.weight_decay, eps=1e-5) + scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs, exponent=0.9) + + return optimizer, scheduler + + def set_deep_supervision_enabled(self, enabled: bool): + pass + + + +class nnUNetTrainerLightMUNet_100epochs(nnUNetTrainerLightMUNet): + def __init__( + self, + plans: dict, + configuration: str, + fold: int, + dataset_json: dict, + unpack_dataset: bool = True, + device: torch.device = torch.device('cuda') + ): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerMedNext.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerMedNext.py new file mode 100644 index 0000000..c4051a4 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerMedNext.py @@ -0,0 +1,259 @@ +from nnunetv2.training.nnUNetTrainer.variants.network_architecture.nnUNetTrainerNoDeepSupervision import \ + nnUNetTrainerNoDeepSupervision +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from nnunetv2.training.loss.dice import get_tp_fp_fn_tn +import torch +from torch.optim import AdamW +from torch.optim.lr_scheduler import CosineAnnealingLR +from torch import nn +from nnunetv2.nets.mednextv1.MedNextV1 import MedNeXt + + +class nnUNetTrainerMedNext(nnUNetTrainerNoDeepSupervision): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + original_patch_size = self.configuration_manager.patch_size + new_patch_size = [-1] * len(original_patch_size) + for i in range(len(original_patch_size)): + if (original_patch_size[i] / 2**5) < 1 or ((original_patch_size[i] / 2**5) % 1) != 0: + new_patch_size[i] = round(original_patch_size[i] / 2**5 + 0.5) * 2**5 + else: + new_patch_size[i] = original_patch_size[i] + self.configuration_manager.configuration['patch_size'] = new_patch_size + self.print_to_log_file("Patch size changed from {} to {}".format(original_patch_size, new_patch_size)) + self.plans_manager.plans['configurations'][self.configuration_name]['patch_size'] = new_patch_size + + self.grad_scaler = None + self.initial_lr = 1e-3 + self.weight_decay = 0.01 + + def train_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + output = self.network(data) + l = self.loss(output, target) + l.backward() + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.optimizer.step() + + return {'loss': l.detach().cpu().numpy()} + + + def validation_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + # Autocast is a little bitch. + # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. + # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) + # So autocast will only be active if we have a cuda device. + output = self.network(data) + del data + l = self.loss(output, target) + + # the following is needed for online evaluation. Fake dice (green line) + axes = [0] + list(range(2, output.ndim)) + + if self.label_manager.has_regions: + predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() + else: + # no need for softmax + output_seg = output.argmax(1)[:, None] + predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) + predicted_segmentation_onehot.scatter_(1, output_seg, 1) + del output_seg + + if self.label_manager.has_ignore_label: + if not self.label_manager.has_regions: + mask = (target != self.label_manager.ignore_label).float() + # CAREFUL that you don't rely on target after this line! + target[target == self.label_manager.ignore_label] = 0 + else: + mask = 1 - target[:, -1:] + # CAREFUL that you don't rely on target after this line! + target = target[:, :-1] + else: + mask = None + + tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask) + + tp_hard = tp.detach().cpu().numpy() + fp_hard = fp.detach().cpu().numpy() + fn_hard = fn.detach().cpu().numpy() + if not self.label_manager.has_regions: + # if we train with regions all segmentation heads predict some kind of foreground. In conventional + # (softmax training) there needs tobe one output for the background. We are not interested in the + # background Dice + # [1:] in order to remove background + tp_hard = tp_hard[1:] + fp_hard = fp_hard[1:] + fn_hard = fn_hard[1:] + + return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard} + + def configure_optimizers(self): + + optimizer = AdamW(self.network.parameters(), lr=self.initial_lr, weight_decay=self.weight_decay, eps=1e-5) + scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs, eta_min=1e-6) + + self.print_to_log_file(f"Using optimizer {optimizer}") + self.print_to_log_file(f"Using scheduler {scheduler}") + + return optimizer, scheduler + + def set_deep_supervision_enabled(self, enabled: bool): + pass + + +class nnUNetTrainerV2_MedNeXt_L_kernel5(nnUNetTrainerMedNext): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + + label_manager = plans_manager.get_label_manager(dataset_json) + + model = create_mednextv1_large(num_input_channels, label_manager.num_segmentation_heads, 5, False) + + return model +class nnUNetTrainerV2_MedNeXt_L_kernel5_100epochs(nnUNetTrainerV2_MedNeXt_L_kernel5): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 + +class nnUNetTrainerV2_MedNeXt_B_kernel5(nnUNetTrainerMedNext): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + + label_manager = plans_manager.get_label_manager(dataset_json) + + model = create_mednextv1_base(num_input_channels, label_manager.num_segmentation_heads, 5, False) + + return model + +class nnUNetTrainerV2_MedNeXt_B_kernel5_100epochs(nnUNetTrainerV2_MedNeXt_B_kernel5): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 + + + +class nnUNetTrainerV2_MedNeXt_M_kernel5(nnUNetTrainerMedNext): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + + label_manager = plans_manager.get_label_manager(dataset_json) + + model = create_mednextv1_medium(num_input_channels, label_manager.num_segmentation_heads, 5, False) + + return model + +def create_mednextv1_small(num_input_channels, num_classes, kernel_size=3, ds=False): + return MedNeXt( + in_channels=num_input_channels, + n_channels=32, + n_classes=num_classes, + exp_r=2, + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down=True, + block_counts=[2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + + +def create_mednextv1_base(num_input_channels, num_classes, kernel_size=3, ds=False): + return MedNeXt( + in_channels=num_input_channels, + n_channels=32, + n_classes=num_classes, + exp_r=[2, 3, 4, 4, 4, 4, 4, 3, 2], + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down=True, + block_counts=[2, 2, 2, 2, 2, 2, 2, 2, 2] + ) + + +def create_mednextv1_medium(num_input_channels, num_classes, kernel_size=3, ds=False): + return MedNeXt( + in_channels=num_input_channels, + n_channels=32, + n_classes=num_classes, + exp_r=[2, 3, 4, 4, 4, 4, 4, 3, 2], + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down=True, + block_counts=[3, 4, 4, 4, 4, 4, 4, 4, 3], + checkpoint_style='outside_block' + ) + + +def create_mednextv1_large(num_input_channels, num_classes, kernel_size=3, ds=False): + return MedNeXt( + in_channels=num_input_channels, + n_channels=32, + n_classes=num_classes, + exp_r=[3, 4, 8, 8, 8, 8, 8, 4, 3], + kernel_size=kernel_size, + deep_supervision=ds, + do_res=True, + do_res_up_down=True, + block_counts=[3, 4, 8, 8, 8, 8, 8, 4, 3], + checkpoint_style='outside_block' + ) + + +def create_mednext_v1(num_input_channels, num_classes, model_id, kernel_size=3, + deep_supervision=False): + model_dict = { + 'S': create_mednextv1_small, + 'B': create_mednextv1_base, + 'M': create_mednextv1_medium, + 'L': create_mednextv1_large, + } + + return model_dict[model_id]( + num_input_channels, num_classes, kernel_size, deep_supervision + ) diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSAMed.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSAMed.py new file mode 100644 index 0000000..e7b08f7 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSAMed.py @@ -0,0 +1,306 @@ +from nnunetv2.training.nnUNetTrainer.variants.network_architecture.nnUNetTrainerNoDeepSupervision import \ + nnUNetTrainerNoDeepSupervision +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from nnunetv2.training.loss.dice import get_tp_fp_fn_tn +import torch +from torch.optim import AdamW +from torch import nn +from nnunetv2.nets.sam_lora_image_encoder import LoRA_Sam +from nnunetv2.nets.segment_anything.modeling.mask_decoder import MaskDecoder +from nnunetv2.nets.segment_anything import sam_model_registry +from nnunetv2.training.lr_scheduler.samedlr import CustomWarmupDecayLR +from monai.transforms import ( + Resize, + +) +from torch._dynamo import OptimizedModule + +from typing import Union + + +class nnUNetTrainerSAMed(nnUNetTrainerNoDeepSupervision): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + original_patch_size = self.configuration_manager.patch_size + new_patch_size = [-1] * len(original_patch_size) + for i in range(len(original_patch_size)): + if (original_patch_size[i] / 2 ** 5) < 1 or ((original_patch_size[i] / 2 ** 5) % 1) != 0: + new_patch_size[i] = round(original_patch_size[i] / 2 ** 5 + 0.5) * 2 ** 5 + else: + new_patch_size[i] = original_patch_size[i] + self.configuration_manager.configuration['patch_size'] = new_patch_size + self.print_to_log_file("Patch size changed from {} to {}".format(original_patch_size, new_patch_size)) + self.plans_manager.plans['configurations'][self.configuration_name]['patch_size'] = new_patch_size + self.initial_lr = 1e-3 + self.weight_decay = 0.01 + self.lr_decay=0.9 + + def train_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + low_res_label_batch = [self.resize(i.to(self.device, non_blocking=True).squeeze()) for i in target] + else: + target = target.to(self.device, non_blocking=True) + low_res_label_batch = self.resize(target.squeeze()) + + self.optimizer.zero_grad(set_to_none=True) + with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=True): + outputs = self.network(data, True, self.patch_size) + # print(outputs['low_res_logits'].size(), low_res_label_batch.size(),self.label_manager.has_regions) + # print(torch.unique(low_res_label_batch),) + l = self.loss(outputs['low_res_logits'], low_res_label_batch.unsqueeze(1)) + + self.grad_scaler.scale(l).backward() + self.grad_scaler.unscale_(self.optimizer) + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + + + return {'loss': l.detach().cpu().numpy()} + + def validation_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + + if isinstance(target, list): + low_res_label_batch = [self.resize(i.to(self.device, non_blocking=True).squeeze()) for i in target] + else: + target = target.to(self.device, non_blocking=True) + low_res_label_batch = self.resize(target.squeeze()) + + self.optimizer.zero_grad(set_to_none=True) + + # Autocast is a little ****. + # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. + # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) + # So autocast will only be active if we have a cuda device. + output = self.network(data,True, self.patch_size) + del data + + l = self.loss(output['low_res_logits'], low_res_label_batch.unsqueeze(1)) + output_masks = output['masks'] + + # the following is needed for online evaluation. Fake dice (green line) + axes = [0] + list(range(2, output_masks.ndim)) + + if self.label_manager.has_regions: + predicted_segmentation_onehot = (torch.sigmoid(output_masks) > 0.5).long() + else: + # no need for softmax + output_seg = output_masks.argmax(1)[:, None] + predicted_segmentation_onehot = torch.zeros(output_masks.shape, device=output_masks.device, + dtype=torch.float32) + predicted_segmentation_onehot.scatter_(1, output_seg, 1) + del output_seg + + if self.label_manager.has_ignore_label: + if not self.label_manager.has_regions: + mask = (target != self.label_manager.ignore_label).float() + # CAREFUL that you don't rely on target after this line! + target[target == self.label_manager.ignore_label] = 0 + else: + mask = 1 - target[:, -1:] + # CAREFUL that you don't rely on target after this line! + target = target[:, :-1] + else: + mask = None + + tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask) + + tp_hard = tp.detach().cpu().numpy() + fp_hard = fp.detach().cpu().numpy() + fn_hard = fn.detach().cpu().numpy() + if not self.label_manager.has_regions: + # if we train with regions all segmentation heads predict some kind of foreground. In conventional + # (softmax training) there needs tobe one output for the background. We are not interested in the + # background Dice + # [1:] in order to remove background + tp_hard = tp_hard[1:] + fp_hard = fp_hard[1:] + fn_hard = fn_hard[1:] + + return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard} + + # def calc_loss(self,outputs, low_res_label_batch, ce_loss, dice_loss, dice_weight: float = 0.8): + # low_res_logits = outputs['low_res_logits'] + # loss_ce = ce_loss(low_res_logits, low_res_label_batch.long()) + # loss_dice = dice_loss(low_res_logits, low_res_label_batch, softmax=True) + # loss = (1 - dice_weight) * loss_ce + dice_weight * loss_dice + # return loss, loss_ce, loss_dice + + # %% + + def configure_optimizers(self): + + # Custom scheduler setup + optimizer = AdamW(filter(lambda p: p.requires_grad, self.network.parameters()), lr=self.initial_lr, + betas=(0.9, 0.999), + weight_decay=0.1) + scheduler = CustomWarmupDecayLR(optimizer, warmup_period=10, max_iterations=self.num_epochs, + base_lr=self.initial_lr, weight_decay=self.lr_decay) + + self.print_to_log_file(f"Using optimizer {optimizer}") + self.print_to_log_file(f"Using scheduler {scheduler}") + + return optimizer, scheduler + + def set_deep_supervision_enabled(self, enabled: bool): + pass + + def save_checkpoint(self, filename: str) -> None: + if self.local_rank == 0: + if not self.disable_checkpointing: + if self.is_ddp: + mod = self.network.module + else: + mod = self.network + if isinstance(mod, OptimizedModule): + mod = mod._orig_mod + + checkpoint = { + 'network_weights': mod.get_lora_parameters(), + 'optimizer_state': self.optimizer.state_dict(), + 'grad_scaler_state': self.grad_scaler.state_dict() if self.grad_scaler is not None else None, + 'logging': self.logger.get_checkpoint(), + '_best_ema': self._best_ema, + 'current_epoch': self.current_epoch + 1, + 'init_args': self.my_init_kwargs, + 'trainer_name': self.__class__.__name__, + 'inference_allowed_mirroring_axes': self.inference_allowed_mirroring_axes, + } + torch.save(checkpoint, filename) + else: + self.print_to_log_file('No checkpoint written, checkpointing is disabled') + + def load_checkpoint(self, filename_or_checkpoint: Union[dict, str]) -> None: + if not self.was_initialized: + self.initialize() + + if isinstance(filename_or_checkpoint, str): + checkpoint = torch.load(filename_or_checkpoint, map_location=self.device) + # if state dict comes from nn.DataParallel but we use non-parallel model here then the state dict keys do not + # match. Use heuristic to make it match + new_state_dict = {} + for k, value in checkpoint['network_weights'].items(): + key = k + if key not in self.network.state_dict().keys() and key.startswith('module.'): + key = key[7:] + new_state_dict[key] = value + + self.my_init_kwargs = checkpoint['init_args'] + self.current_epoch = checkpoint['current_epoch'] + self.logger.load_checkpoint(checkpoint['logging']) + self._best_ema = checkpoint['_best_ema'] + self.inference_allowed_mirroring_axes = checkpoint[ + 'inference_allowed_mirroring_axes'] if 'inference_allowed_mirroring_axes' in checkpoint.keys() else self.inference_allowed_mirroring_axes + + # messing with state dict naming schemes. Facepalm. + if self.is_ddp: + if isinstance(self.network.module, OptimizedModule): + self.network.module._orig_mod.load_lora_parameters(new_state_dict) + else: + self.network.module.load_lora_parameters(new_state_dict) + else: + if isinstance(self.network, OptimizedModule): + self.network._orig_mod.load_lora_parameters(new_state_dict) + else: + self.network.load_lora_parameters(new_state_dict) + self.optimizer.load_state_dict(checkpoint['optimizer_state']) + if self.grad_scaler is not None: + if checkpoint['grad_scaler_state'] is not None: + self.grad_scaler.load_state_dict(checkpoint['grad_scaler_state']) + + +class nnUNetTrainerV2_SAMed_h_r_4(nnUNetTrainerSAMed): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.patch_size = 512 + self.resize = Resize(spatial_size=(128, 128), mode='nearest') + # self.configuration_manager.patch_size=[self.patch_size, self.patch_size] + self.lr_decay=7 + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + label_manager = plans_manager.get_label_manager(dataset_json) + + sam, img_embedding_size = sam_model_registry['vit_h'](image_size=512, + num_classes=8, # To load LoRA weights + checkpoint='checkpoints/sam_vit_h_4b8939.pth', + pixel_mean=[0, 0, 0], + pixel_std=[1, 1, 1]) + model = LoRA_Sam(sam, 4) + # net.load_lora_parameters('checkpoints/epoch_299.pth') + model.sam.mask_decoder = MaskDecoder(transformer=model.sam.mask_decoder.transformer, + transformer_dim=model.sam.mask_decoder.transformer_dim, + num_multimask_outputs=label_manager.num_segmentation_heads-1 #remove bg + ) + + return model + +class nnUNetTrainerV2_SAMed_h_r_4_100epochs(nnUNetTrainerV2_SAMed_h_r_4): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + + self.num_epochs = 100 + +class nnUNetTrainerV2_SAMed_b_r_4(nnUNetTrainerSAMed): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.patch_size = 256 + self.resize = Resize(spatial_size=(64, 64), mode='nearest') + + # self.configuration_manager.patch_size=[self.patch_size, self.patch_size] + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + label_manager = plans_manager.get_label_manager(dataset_json) + + sam, img_embedding_size = sam_model_registry['vit_b'](image_size=256, + num_classes=8, # To load LoRA weights + checkpoint='checkpoints/sam_vit_b_01ec64.pth', + pixel_mean=[0, 0, 0], + pixel_std=[1, 1, 1]) + model = LoRA_Sam(sam, 4) + # net.load_lora_parameters('checkpoints/epoch_299.pth') + model.sam.mask_decoder = MaskDecoder(transformer=model.sam.mask_decoder.transformer, + transformer_dim=model.sam.mask_decoder.transformer_dim, + num_multimask_outputs=label_manager.num_segmentation_heads-1 #remove bg + ) + return model + +class nnUNetTrainerV2_SAMed_b_r_4_100epochs(nnUNetTrainerV2_SAMed_b_r_4): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSegResNet.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSegResNet.py new file mode 100644 index 0000000..5f9b8f7 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSegResNet.py @@ -0,0 +1,154 @@ +from nnunetv2.training.nnUNetTrainer.variants.network_architecture.nnUNetTrainerNoDeepSupervision import \ + nnUNetTrainerNoDeepSupervision +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler +from torch import nn +import torch + +from nnunetv2.training.loss.dice import get_tp_fp_fn_tn + +from monai.networks.nets import SegResNet +from torch.optim import Adam + +class nnUNetTrainerSegResNet(nnUNetTrainerNoDeepSupervision): + + def __init__( + self, + plans: dict, + configuration: str, + fold: int, + dataset_json: dict, + unpack_dataset: bool = True, + device: torch.device = torch.device('cuda') + ): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.grad_scaler = None + self.initial_lr = 1e-4 + self.weight_decay = 1e-5 + + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + + label_manager = plans_manager.get_label_manager(dataset_json) + + model = SegResNet( + spatial_dims = len(configuration_manager.patch_size), + init_filters = 32, + in_channels=num_input_channels, + out_channels=label_manager.num_segmentation_heads, + blocks_down=[1, 2, 2, 4], + blocks_up=[1, 1, 1], + ) + + return model + + + def train_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + output = self.network(data) + l = self.loss(output, target) + l.backward() + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.optimizer.step() + + return {'loss': l.detach().cpu().numpy()} + + + def validation_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + # Autocast is a little bitch. + # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. + # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) + # So autocast will only be active if we have a cuda device. + output = self.network(data) + del data + l = self.loss(output, target) + + # the following is needed for online evaluation. Fake dice (green line) + axes = [0] + list(range(2, output.ndim)) + + if self.label_manager.has_regions: + predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() + else: + # no need for softmax + output_seg = output.argmax(1)[:, None] + predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) + predicted_segmentation_onehot.scatter_(1, output_seg, 1) + del output_seg + + if self.label_manager.has_ignore_label: + if not self.label_manager.has_regions: + mask = (target != self.label_manager.ignore_label).float() + # CAREFUL that you don't rely on target after this line! + target[target == self.label_manager.ignore_label] = 0 + else: + mask = 1 - target[:, -1:] + # CAREFUL that you don't rely on target after this line! + target = target[:, :-1] + else: + mask = None + + tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask) + + tp_hard = tp.detach().cpu().numpy() + fp_hard = fp.detach().cpu().numpy() + fn_hard = fn.detach().cpu().numpy() + if not self.label_manager.has_regions: + # if we train with regions all segmentation heads predict some kind of foreground. In conventional + # (softmax training) there needs tobe one output for the background. We are not interested in the + # background Dice + # [1:] in order to remove background + tp_hard = tp_hard[1:] + fp_hard = fp_hard[1:] + fn_hard = fn_hard[1:] + + return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard} + + def configure_optimizers(self): + + optimizer = Adam(self.network.parameters(), lr=self.initial_lr, weight_decay=self.weight_decay, eps=1e-5) + scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs, exponent=0.9) + + return optimizer, scheduler + + def set_deep_supervision_enabled(self, enabled: bool): + pass + + +class nnUNetTrainerSegResNet_100epochs(nnUNetTrainerSegResNet): + + def __init__( + self, + plans: dict, + configuration: str, + fold: int, + dataset_json: dict, + unpack_dataset: bool = True, + device: torch.device = torch.device('cuda') + ): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSwinUNETR.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSwinUNETR.py new file mode 100644 index 0000000..919a3d4 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerSwinUNETR.py @@ -0,0 +1,159 @@ +from nnunetv2.training.nnUNetTrainer.variants.network_architecture.nnUNetTrainerNoDeepSupervision import \ + nnUNetTrainerNoDeepSupervision +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from nnunetv2.training.loss.dice import get_tp_fp_fn_tn +import torch +from torch.optim import AdamW +from torch.optim.lr_scheduler import CosineAnnealingLR +from torch import nn + +from monai.networks.nets import SwinUNETR + +class nnUNetTrainerSwinUNETR(nnUNetTrainerNoDeepSupervision): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + original_patch_size = self.configuration_manager.patch_size + new_patch_size = [-1] * len(original_patch_size) + for i in range(len(original_patch_size)): + if (original_patch_size[i] / 2**5) < 1 or ((original_patch_size[i] / 2**5) % 1) != 0: + new_patch_size[i] = round(original_patch_size[i] / 2**5 + 0.5) * 2**5 + else: + new_patch_size[i] = original_patch_size[i] + self.configuration_manager.configuration['patch_size'] = new_patch_size + self.print_to_log_file("Patch size changed from {} to {}".format(original_patch_size, new_patch_size)) + self.plans_manager.plans['configurations'][self.configuration_name]['patch_size'] = new_patch_size + + self.grad_scaler = None + self.initial_lr = 8e-4 + self.weight_decay = 0.01 + + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + + label_manager = plans_manager.get_label_manager(dataset_json) + + model = SwinUNETR( + in_channels = num_input_channels, + out_channels = label_manager.num_segmentation_heads, + img_size = configuration_manager.patch_size, + depths = (2, 2, 2, 2), + num_heads = (3, 6, 12, 24), + feature_size = 48, ## + norm_name = "instance", + drop_rate = 0.0, + attn_drop_rate = 0.0, + dropout_path_rate = 0.0, + normalize = True, + use_checkpoint = False, + spatial_dims = len(configuration_manager.patch_size), + downsample = "merging", + use_v2 = False, + ) + + return model + + def train_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + output = self.network(data) + l = self.loss(output, target) + l.backward() + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.optimizer.step() + + return {'loss': l.detach().cpu().numpy()} + + + def validation_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + # Autocast is a little bitch. + # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. + # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) + # So autocast will only be active if we have a cuda device. + output = self.network(data) + del data + l = self.loss(output, target) + + # the following is needed for online evaluation. Fake dice (green line) + axes = [0] + list(range(2, output.ndim)) + + if self.label_manager.has_regions: + predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() + else: + # no need for softmax + output_seg = output.argmax(1)[:, None] + predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) + predicted_segmentation_onehot.scatter_(1, output_seg, 1) + del output_seg + + if self.label_manager.has_ignore_label: + if not self.label_manager.has_regions: + mask = (target != self.label_manager.ignore_label).float() + # CAREFUL that you don't rely on target after this line! + target[target == self.label_manager.ignore_label] = 0 + else: + mask = 1 - target[:, -1:] + # CAREFUL that you don't rely on target after this line! + target = target[:, :-1] + else: + mask = None + + tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask) + + tp_hard = tp.detach().cpu().numpy() + fp_hard = fp.detach().cpu().numpy() + fn_hard = fn.detach().cpu().numpy() + if not self.label_manager.has_regions: + # if we train with regions all segmentation heads predict some kind of foreground. In conventional + # (softmax training) there needs tobe one output for the background. We are not interested in the + # background Dice + # [1:] in order to remove background + tp_hard = tp_hard[1:] + fp_hard = fp_hard[1:] + fn_hard = fn_hard[1:] + + return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard} + + def configure_optimizers(self): + + optimizer = AdamW(self.network.parameters(), lr=self.initial_lr, weight_decay=self.weight_decay, eps=1e-5) + scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs, eta_min=1e-6) + + self.print_to_log_file(f"Using optimizer {optimizer}") + self.print_to_log_file(f"Using scheduler {scheduler}") + + return optimizer, scheduler + + def set_deep_supervision_enabled(self, enabled: bool): + pass + + +class nnUNetTrainerSwinUNETR_100epochs(nnUNetTrainerSwinUNETR): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaBot.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaBot.py new file mode 100644 index 0000000..b8aa30a --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaBot.py @@ -0,0 +1,30 @@ +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from torch import nn +from nnunetv2.nets.UMambaBot import get_umamba_bot_from_plans +import torch + +class nnUNetTrainerUMambaBot(nnUNetTrainer): + """ + Residual Encoder + UMmaba Bottleneck + Residual Decoder + Skip Connections + """ + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = True) -> nn.Module: + + model = get_umamba_bot_from_plans(plans_manager, dataset_json, configuration_manager, + num_input_channels, deep_supervision=enable_deep_supervision) + + print("UMambaBot: {}".format(model)) + + return model + +class nnUNetTrainerUMambaBot_100epochs(nnUNetTrainerUMambaBot): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaEnc.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaEnc.py new file mode 100644 index 0000000..a0347ad --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUMambaEnc.py @@ -0,0 +1,28 @@ +import torch +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from torch import nn + +from nnunetv2.nets.UMambaEnc import get_umamba_enc_from_plans + +class nnUNetTrainerUMambaEnc(nnUNetTrainer): + """ + UMmaba Encoder + Residual Decoder + Skip Connections + """ + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = True) -> nn.Module: + + model = get_umamba_enc_from_plans(plans_manager, dataset_json, configuration_manager, + num_input_channels, deep_supervision=enable_deep_supervision) + + print("UMambaEnc: {}".format(model)) + + return model diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUNETR.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUNETR.py new file mode 100644 index 0000000..ad82fea --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/nnUNetTrainerUNETR.py @@ -0,0 +1,149 @@ +from nnunetv2.training.nnUNetTrainer.variants.network_architecture.nnUNetTrainerNoDeepSupervision import \ + nnUNetTrainerNoDeepSupervision +from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from nnunetv2.training.loss.dice import get_tp_fp_fn_tn + +import torch +from torch.optim import AdamW +from torch import nn + +from monai.networks.nets import UNETR + +class nnUNetTrainerUNETR(nnUNetTrainerNoDeepSupervision): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + original_patch_size = self.configuration_manager.patch_size + new_patch_size = [-1] * len(original_patch_size) + for i in range(len(original_patch_size)): + ## 16 is ViT's fixed patch size + if (original_patch_size[i] / 16) < 1 or ((original_patch_size[i] / 16) % 1) != 0: + new_patch_size[i] = round(original_patch_size[i] / 16 + 0.5) * 16 + else: + new_patch_size[i] = original_patch_size[i] + self.configuration_manager.configuration['patch_size'] = new_patch_size + self.print_to_log_file("Patch size changed from {} to {}".format(original_patch_size, new_patch_size)) + self.plans_manager.plans['configurations'][self.configuration_name]['patch_size'] = new_patch_size + + self.initial_lr = 1e-4 + self.grad_scaler = None + self.weight_decay = 0.01 + + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = False) -> nn.Module: + + label_manager = plans_manager.get_label_manager(dataset_json) + + model = UNETR( + in_channels = num_input_channels, + out_channels = label_manager.num_segmentation_heads, + img_size = configuration_manager.patch_size, + feature_size=16, + hidden_size=768, + mlp_dim = 3072, + num_heads = 12, + proj_type = "conv", + norm_name="instance", + res_block=True, + dropout_rate=0.0, + spatial_dims = len(configuration_manager.patch_size), + qkv_bias = False, + save_attn = False, + ) + + return model + + def train_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + output = self.network(data) + l = self.loss(output, target) + l.backward() + torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12) + self.optimizer.step() + + return {'loss': l.detach().cpu().numpy()} + + + def validation_step(self, batch: dict) -> dict: + data = batch['data'] + target = batch['target'] + + data = data.to(self.device, non_blocking=True) + if isinstance(target, list): + target = [i.to(self.device, non_blocking=True) for i in target] + else: + target = target.to(self.device, non_blocking=True) + + self.optimizer.zero_grad(set_to_none=True) + + output = self.network(data) + del data + l = self.loss(output, target) + + # the following is needed for online evaluation. Fake dice (green line) + axes = [0] + list(range(2, output.ndim)) + + if self.label_manager.has_regions: + predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() + else: + # no need for softmax + output_seg = output.argmax(1)[:, None] + predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) + predicted_segmentation_onehot.scatter_(1, output_seg, 1) + del output_seg + + if self.label_manager.has_ignore_label: + if not self.label_manager.has_regions: + mask = (target != self.label_manager.ignore_label).float() + # CAREFUL that you don't rely on target after this line! + target[target == self.label_manager.ignore_label] = 0 + else: + mask = 1 - target[:, -1:] + # CAREFUL that you don't rely on target after this line! + target = target[:, :-1] + else: + mask = None + + tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask) + + tp_hard = tp.detach().cpu().numpy() + fp_hard = fp.detach().cpu().numpy() + fn_hard = fn.detach().cpu().numpy() + if not self.label_manager.has_regions: + # if we train with regions all segmentation heads predict some kind of foreground. In conventional + # (softmax training) there needs tobe one output for the background. We are not interested in the + # background Dice + # [1:] in order to remove background + tp_hard = tp_hard[1:] + fp_hard = fp_hard[1:] + fn_hard = fn_hard[1:] + + return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard} + + def configure_optimizers(self): + + optimizer = AdamW(self.network.parameters(), lr=self.initial_lr, weight_decay=self.weight_decay, eps=1e-5) + scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs, exponent=1.0) + + self.print_to_log_file(f"Using optimizer {optimizer}") + self.print_to_log_file(f"Using scheduler {scheduler}") + + return optimizer, scheduler + + def set_deep_supervision_enabled(self, enabled: bool): + pass \ No newline at end of file diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs.py new file mode 100644 index 0000000..fad1fff --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs.py @@ -0,0 +1,65 @@ +import torch +from batchgenerators.utilities.file_and_folder_operations import save_json, join, isfile, load_json + +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from torch import distributed as dist + + +class nnUNetTrainerBenchmark_5epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + assert self.fold == 0, "It makes absolutely no sense to specify a certain fold. Stick with 0 so that we can parse the results." + self.disable_checkpointing = True + self.num_epochs = 5 + assert torch.cuda.is_available(), "This only works on GPU" + self.crashed_with_runtime_error = False + + def perform_actual_validation(self, save_probabilities: bool = False): + pass + + def save_checkpoint(self, filename: str) -> None: + # do not trust people to remember that self.disable_checkpointing must be True for this trainer + pass + + def run_training(self): + try: + super().run_training() + except RuntimeError: + self.crashed_with_runtime_error = True + + def on_train_end(self): + super().on_train_end() + + if not self.is_ddp or self.local_rank == 0: + torch_version = torch.__version__ + cudnn_version = torch.backends.cudnn.version() + gpu_name = torch.cuda.get_device_name() + if self.crashed_with_runtime_error: + fastest_epoch = 'Not enough VRAM!' + else: + epoch_times = [i - j for i, j in zip(self.logger.my_fantastic_logging['epoch_end_timestamps'], + self.logger.my_fantastic_logging['epoch_start_timestamps'])] + fastest_epoch = min(epoch_times) + + if self.is_ddp: + num_gpus = dist.get_world_size() + else: + num_gpus = 1 + + benchmark_result_file = join(self.output_folder, 'benchmark_result.json') + if isfile(benchmark_result_file): + old_results = load_json(benchmark_result_file) + else: + old_results = {} + # generate some unique key + my_key = f"{cudnn_version}__{torch_version.replace(' ', '')}__{gpu_name.replace(' ', '')}__gpus_{num_gpus}" + old_results[my_key] = { + 'torch_version': torch_version, + 'cudnn_version': cudnn_version, + 'gpu_name': gpu_name, + 'fastest_epoch': fastest_epoch, + 'num_gpus': num_gpus, + } + save_json(old_results, + join(self.output_folder, 'benchmark_result.json')) diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs_noDataLoading.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs_noDataLoading.py new file mode 100644 index 0000000..e7de92c --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/benchmarking/nnUNetTrainerBenchmark_5epochs_noDataLoading.py @@ -0,0 +1,65 @@ +import torch + +from nnunetv2.training.nnUNetTrainer.variants.benchmarking.nnUNetTrainerBenchmark_5epochs import ( + nnUNetTrainerBenchmark_5epochs, +) +from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels + + +class nnUNetTrainerBenchmark_5epochs_noDataLoading(nnUNetTrainerBenchmark_5epochs): + def __init__( + self, + plans: dict, + configuration: str, + fold: int, + dataset_json: dict, + unpack_dataset: bool = True, + device: torch.device = torch.device("cuda"), + ): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self._set_batch_size_and_oversample() + num_input_channels = determine_num_input_channels( + self.plans_manager, self.configuration_manager, self.dataset_json + ) + patch_size = self.configuration_manager.patch_size + dummy_data = torch.rand((self.batch_size, num_input_channels, *patch_size), device=self.device) + if self.enable_deep_supervision: + dummy_target = [ + torch.round( + torch.rand((self.batch_size, 1, *[int(i * j) for i, j in zip(patch_size, k)]), device=self.device) + * max(self.label_manager.all_labels) + ) + for k in self._get_deep_supervision_scales() + ] + else: + raise NotImplementedError("This trainer does not support deep supervision") + self.dummy_batch = {"data": dummy_data, "target": dummy_target} + + def get_dataloaders(self): + return None, None + + def run_training(self): + try: + self.on_train_start() + + for epoch in range(self.current_epoch, self.num_epochs): + self.on_epoch_start() + + self.on_train_epoch_start() + train_outputs = [] + for batch_id in range(self.num_iterations_per_epoch): + train_outputs.append(self.train_step(self.dummy_batch)) + self.on_train_epoch_end(train_outputs) + + with torch.no_grad(): + self.on_validation_epoch_start() + val_outputs = [] + for batch_id in range(self.num_val_iterations_per_epoch): + val_outputs.append(self.validation_step(self.dummy_batch)) + self.on_validation_epoch_end(val_outputs) + + self.on_epoch_end() + + self.on_train_end() + except RuntimeError: + self.crashed_with_runtime_error = True diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDA5.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDA5.py new file mode 100644 index 0000000..7250fb8 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDA5.py @@ -0,0 +1,422 @@ +from typing import List, Union, Tuple + +import numpy as np +import torch +from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter +from batchgenerators.transforms.abstract_transforms import AbstractTransform, Compose +from batchgenerators.transforms.color_transforms import BrightnessTransform, ContrastAugmentationTransform, \ + GammaTransform +from batchgenerators.transforms.local_transforms import BrightnessGradientAdditiveTransform, LocalGammaTransform +from batchgenerators.transforms.noise_transforms import MedianFilterTransform, GaussianBlurTransform, \ + GaussianNoiseTransform, BlankRectangleTransform, SharpeningTransform +from batchgenerators.transforms.resample_transforms import SimulateLowResolutionTransform +from batchgenerators.transforms.spatial_transforms import SpatialTransform, Rot90Transform, TransposeAxesTransform, \ + MirrorTransform +from batchgenerators.transforms.utility_transforms import OneOfTransform, RemoveLabelTransform, RenameTransform, \ + NumpyToTensor + +from nnunetv2.configuration import ANISO_THRESHOLD +from nnunetv2.training.data_augmentation.compute_initial_patch_size import get_patch_size +from nnunetv2.training.data_augmentation.custom_transforms.cascade_transforms import MoveSegAsOneHotToData, \ + ApplyRandomBinaryOperatorTransform, RemoveRandomConnectedComponentFromOneHotEncodingTransform +from nnunetv2.training.data_augmentation.custom_transforms.deep_supervision_donwsampling import \ + DownsampleSegForDSTransform2 +from nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter import \ + LimitedLenWrapper +from nnunetv2.training.data_augmentation.custom_transforms.masking import MaskTransform +from nnunetv2.training.data_augmentation.custom_transforms.region_based_training import \ + ConvertSegmentationToRegionsTransform +from nnunetv2.training.data_augmentation.custom_transforms.transforms_for_dummy_2d import Convert3DTo2DTransform, \ + Convert2DTo3DTransform +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA + + +class nnUNetTrainerDA5(nnUNetTrainer): + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + """ + This function is stupid and certainly one of the weakest spots of this implementation. Not entirely sure how we can fix it. + """ + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + # todo rotation should be defined dynamically based on patch size (more isotropic patch sizes = more rotation) + if dim == 2: + do_dummy_2d_data_aug = False + # todo revisit this parametrization + if max(patch_size) / min(patch_size) > 1.5: + rotation_for_DA = { + 'x': (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi), + 'y': (0, 0), + 'z': (0, 0) + } + else: + rotation_for_DA = { + 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi), + 'y': (0, 0), + 'z': (0, 0) + } + mirror_axes = (0, 1) + elif dim == 3: + # todo this is not ideal. We could also have patch_size (64, 16, 128) in which case a full 180deg 2d rot would be bad + # order of the axes is determined by spacing, not image size + do_dummy_2d_data_aug = (max(patch_size) / patch_size[0]) > ANISO_THRESHOLD + if do_dummy_2d_data_aug: + # why do we rotate 180 deg here all the time? We should also restrict it + rotation_for_DA = { + 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi), + 'y': (0, 0), + 'z': (0, 0) + } + else: + rotation_for_DA = { + 'x': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi), + 'y': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi), + 'z': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi), + } + mirror_axes = (0, 1, 2) + else: + raise RuntimeError() + + # todo this function is stupid. It doesn't even use the correct scale range (we keep things as they were in the + # old nnunet for now) + initial_patch_size = get_patch_size(patch_size[-dim:], + *rotation_for_DA.values(), + (0.7, 1.43)) + if do_dummy_2d_data_aug: + initial_patch_size[0] = patch_size[0] + + self.print_to_log_file(f'do_dummy_2d_data_aug: {do_dummy_2d_data_aug}') + self.inference_allowed_mirroring_axes = mirror_axes + + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + + @staticmethod + def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]], + rotation_for_DA: dict, + deep_supervision_scales: Union[List, Tuple, None], + mirror_axes: Tuple[int, ...], + do_dummy_2d_data_aug: bool, + order_resampling_data: int = 3, + order_resampling_seg: int = 1, + border_val_seg: int = -1, + use_mask_for_norm: List[bool] = None, + is_cascaded: bool = False, + foreground_labels: Union[Tuple[int, ...], List[int]] = None, + regions: List[Union[List[int], Tuple[int, ...], int]] = None, + ignore_label: int = None) -> AbstractTransform: + matching_axes = np.array([sum([i == j for j in patch_size]) for i in patch_size]) + valid_axes = list(np.where(matching_axes == np.max(matching_axes))[0]) + + tr_transforms = [] + + if do_dummy_2d_data_aug: + ignore_axes = (0,) + tr_transforms.append(Convert3DTo2DTransform()) + patch_size_spatial = patch_size[1:] + else: + patch_size_spatial = patch_size + ignore_axes = None + + tr_transforms.append( + SpatialTransform( + patch_size_spatial, + patch_center_dist_from_border=None, + do_elastic_deform=False, + do_rotation=True, + angle_x=rotation_for_DA['x'], + angle_y=rotation_for_DA['y'], + angle_z=rotation_for_DA['z'], + p_rot_per_axis=0.5, + do_scale=True, + scale=(0.7, 1.43), + border_mode_data="constant", + border_cval_data=0, + order_data=order_resampling_data, + border_mode_seg="constant", + border_cval_seg=-1, + order_seg=order_resampling_seg, + random_crop=False, + p_el_per_sample=0.2, + p_scale_per_sample=0.2, + p_rot_per_sample=0.4, + independent_scale_for_each_axis=True, + ) + ) + + if do_dummy_2d_data_aug: + tr_transforms.append(Convert2DTo3DTransform()) + + if np.any(matching_axes > 1): + tr_transforms.append( + Rot90Transform( + (0, 1, 2, 3), axes=valid_axes, data_key='data', label_key='seg', p_per_sample=0.5 + ), + ) + + if np.any(matching_axes > 1): + tr_transforms.append( + TransposeAxesTransform(valid_axes, data_key='data', label_key='seg', p_per_sample=0.5) + ) + + tr_transforms.append(OneOfTransform([ + MedianFilterTransform( + (2, 8), + same_for_each_channel=False, + p_per_sample=0.2, + p_per_channel=0.5 + ), + GaussianBlurTransform((0.3, 1.5), + different_sigma_per_channel=True, + p_per_sample=0.2, + p_per_channel=0.5) + ])) + + tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) + + tr_transforms.append(BrightnessTransform(0, + 0.5, + per_channel=True, + p_per_sample=0.1, + p_per_channel=0.5 + ) + ) + + tr_transforms.append(OneOfTransform( + [ + ContrastAugmentationTransform( + contrast_range=(0.5, 2), + preserve_range=True, + per_channel=True, + data_key='data', + p_per_sample=0.2, + p_per_channel=0.5 + ), + ContrastAugmentationTransform( + contrast_range=(0.5, 2), + preserve_range=False, + per_channel=True, + data_key='data', + p_per_sample=0.2, + p_per_channel=0.5 + ), + ] + )) + + tr_transforms.append( + SimulateLowResolutionTransform(zoom_range=(0.25, 1), + per_channel=True, + p_per_channel=0.5, + order_downsample=0, + order_upsample=3, + p_per_sample=0.15, + ignore_axes=ignore_axes + ) + ) + + tr_transforms.append( + GammaTransform((0.7, 1.5), invert_image=True, per_channel=True, retain_stats=True, p_per_sample=0.1)) + tr_transforms.append( + GammaTransform((0.7, 1.5), invert_image=True, per_channel=True, retain_stats=True, p_per_sample=0.1)) + + if mirror_axes is not None and len(mirror_axes) > 0: + tr_transforms.append(MirrorTransform(mirror_axes)) + + tr_transforms.append( + BlankRectangleTransform([[max(1, p // 10), p // 3] for p in patch_size], + rectangle_value=np.mean, + num_rectangles=(1, 5), + force_square=False, + p_per_sample=0.4, + p_per_channel=0.5 + ) + ) + + tr_transforms.append( + BrightnessGradientAdditiveTransform( + _brightnessadditive_localgamma_transform_scale, + (-0.5, 1.5), + max_strength=_brightness_gradient_additive_max_strength, + mean_centered=False, + same_for_all_channels=False, + p_per_sample=0.3, + p_per_channel=0.5 + ) + ) + + tr_transforms.append( + LocalGammaTransform( + _brightnessadditive_localgamma_transform_scale, + (-0.5, 1.5), + _local_gamma_gamma, + same_for_all_channels=False, + p_per_sample=0.3, + p_per_channel=0.5 + ) + ) + + tr_transforms.append( + SharpeningTransform( + strength=(0.1, 1), + same_for_each_channel=False, + p_per_sample=0.2, + p_per_channel=0.5 + ) + ) + + if use_mask_for_norm is not None and any(use_mask_for_norm): + tr_transforms.append(MaskTransform([i for i in range(len(use_mask_for_norm)) if use_mask_for_norm[i]], + mask_idx_in_seg=0, set_outside_to=0)) + + tr_transforms.append(RemoveLabelTransform(-1, 0)) + + if is_cascaded: + if ignore_label is not None: + raise NotImplementedError('ignore label not yet supported in cascade') + assert foreground_labels is not None, 'We need all_labels for cascade augmentations' + use_labels = [i for i in foreground_labels if i != 0] + tr_transforms.append(MoveSegAsOneHotToData(1, use_labels, 'seg', 'data')) + tr_transforms.append(ApplyRandomBinaryOperatorTransform( + channel_idx=list(range(-len(use_labels), 0)), + p_per_sample=0.4, + key="data", + strel_size=(1, 8), + p_per_label=1)) + tr_transforms.append( + RemoveRandomConnectedComponentFromOneHotEncodingTransform( + channel_idx=list(range(-len(use_labels), 0)), + key="data", + p_per_sample=0.2, + fill_with_other_class_p=0, + dont_do_if_covers_more_than_x_percent=0.15)) + + tr_transforms.append(RenameTransform('seg', 'target', True)) + + if regions is not None: + # the ignore label must also be converted + tr_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label] + if ignore_label is not None else regions, + 'target', 'target')) + + if deep_supervision_scales is not None: + tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target', + output_key='target')) + tr_transforms.append(NumpyToTensor(['data', 'target'], 'float')) + tr_transforms = Compose(tr_transforms) + return tr_transforms + + +class nnUNetTrainerDA5ord0(nnUNetTrainerDA5): + def get_dataloaders(self): + """ + changed order_resampling_data, order_resampling_seg + """ + # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether + # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + + # needed for deep supervision: how much do we need to downscale the segmentation targets for the different + # outputs? + deep_supervision_scales = self._get_deep_supervision_scales() + + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + self.configure_rotation_dummyDA_mirroring_and_inital_patch_size() + + # training pipeline + tr_transforms = self.get_training_transforms( + patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, + order_resampling_data=0, order_resampling_seg=0, + use_mask_for_norm=self.configuration_manager.use_mask_for_norm, + is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + # validation pipeline + val_transforms = self.get_validation_transforms(deep_supervision_scales, + is_cascaded=self.is_cascaded, + foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if + self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim) + + allowed_num_processes = get_allowed_n_proc_DA() + if allowed_num_processes == 0: + mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms) + mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms) + else: + mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, dl_tr, tr_transforms, + allowed_num_processes, 6, None, True, 0.02) + mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, dl_val, val_transforms, + max(1, allowed_num_processes // 2), 3, None, True, 0.02) + + return mt_gen_train, mt_gen_val + + +def _brightnessadditive_localgamma_transform_scale(x, y): + return np.exp(np.random.uniform(np.log(x[y] // 6), np.log(x[y]))) + + +def _brightness_gradient_additive_max_strength(_x, _y): + return np.random.uniform(-5, -1) if np.random.uniform() < 0.5 else np.random.uniform(1, 5) + + +def _local_gamma_gamma(): + return np.random.uniform(0.01, 0.8) if np.random.uniform() < 0.5 else np.random.uniform(1.5, 4) + + +class nnUNetTrainerDA5Segord0(nnUNetTrainerDA5): + def get_dataloaders(self): + """ + changed order_resampling_data, order_resampling_seg + """ + # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether + # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + + # needed for deep supervision: how much do we need to downscale the segmentation targets for the different + # outputs? + deep_supervision_scales = self._get_deep_supervision_scales() + + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + self.configure_rotation_dummyDA_mirroring_and_inital_patch_size() + + # training pipeline + tr_transforms = self.get_training_transforms( + patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, + order_resampling_data=3, order_resampling_seg=0, + use_mask_for_norm=self.configuration_manager.use_mask_for_norm, + is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + # validation pipeline + val_transforms = self.get_validation_transforms(deep_supervision_scales, + is_cascaded=self.is_cascaded, + foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if + self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim) + + allowed_num_processes = get_allowed_n_proc_DA() + if allowed_num_processes == 0: + mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms) + mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms) + else: + mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, dl_tr, tr_transforms, + allowed_num_processes, 6, None, True, 0.02) + mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, dl_val, val_transforms, + max(1, allowed_num_processes // 2), 3, None, True, 0.02) + + return mt_gen_train, mt_gen_val + + +class nnUNetTrainerDA5_10epochs(nnUNetTrainerDA5): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 10 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDAOrd0.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDAOrd0.py new file mode 100644 index 0000000..e87ff8f --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerDAOrd0.py @@ -0,0 +1,104 @@ +from batchgenerators.dataloading.single_threaded_augmenter import SingleThreadedAugmenter + +from nnunetv2.training.data_augmentation.custom_transforms.limited_length_multithreaded_augmenter import \ + LimitedLenWrapper +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.utilities.default_n_proc_DA import get_allowed_n_proc_DA + + +class nnUNetTrainerDAOrd0(nnUNetTrainer): + def get_dataloaders(self): + """ + changed order_resampling_data, order_resampling_seg + """ + # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether + # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + + # needed for deep supervision: how much do we need to downscale the segmentation targets for the different + # outputs? + deep_supervision_scales = self._get_deep_supervision_scales() + + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + self.configure_rotation_dummyDA_mirroring_and_inital_patch_size() + + # training pipeline + tr_transforms = self.get_training_transforms( + patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, + order_resampling_data=0, order_resampling_seg=0, + use_mask_for_norm=self.configuration_manager.use_mask_for_norm, + is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + # validation pipeline + val_transforms = self.get_validation_transforms(deep_supervision_scales, + is_cascaded=self.is_cascaded, + foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if + self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim) + + allowed_num_processes = get_allowed_n_proc_DA() + if allowed_num_processes == 0: + mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms) + mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms) + else: + mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, dl_tr, tr_transforms, + allowed_num_processes, 6, None, True, 0.02) + mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, dl_val, val_transforms, + max(1, allowed_num_processes // 2), 3, None, True, 0.02) + + return mt_gen_train, mt_gen_val + + +class nnUNetTrainer_DASegOrd0(nnUNetTrainer): + def get_dataloaders(self): + """ + changed order_resampling_data, order_resampling_seg + """ + # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether + # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + + # needed for deep supervision: how much do we need to downscale the segmentation targets for the different + # outputs? + deep_supervision_scales = self._get_deep_supervision_scales() + + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + self.configure_rotation_dummyDA_mirroring_and_inital_patch_size() + + # training pipeline + tr_transforms = self.get_training_transforms( + patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, + order_resampling_data=3, order_resampling_seg=0, + use_mask_for_norm=self.configuration_manager.use_mask_for_norm, + is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + # validation pipeline + val_transforms = self.get_validation_transforms(deep_supervision_scales, + is_cascaded=self.is_cascaded, + foreground_labels=self.label_manager.all_labels, + regions=self.label_manager.foreground_regions if + self.label_manager.has_regions else None, + ignore_label=self.label_manager.ignore_label) + + dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim) + + allowed_num_processes = get_allowed_n_proc_DA() + if allowed_num_processes == 0: + mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms) + mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms) + else: + mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, dl_tr, tr_transforms, + allowed_num_processes, 6, None, True, 0.02) + mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, dl_val, val_transforms, + max(1, allowed_num_processes // 2), 3, None, True, 0.02) + + return mt_gen_train, mt_gen_val diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoDA.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoDA.py new file mode 100644 index 0000000..17f3586 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoDA.py @@ -0,0 +1,40 @@ +from typing import Union, Tuple, List + +from batchgenerators.transforms.abstract_transforms import AbstractTransform + +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +import numpy as np + + +class nnUNetTrainerNoDA(nnUNetTrainer): + @staticmethod + def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]], + rotation_for_DA: dict, + deep_supervision_scales: Union[List, Tuple, None], + mirror_axes: Tuple[int, ...], + do_dummy_2d_data_aug: bool, + order_resampling_data: int = 1, + order_resampling_seg: int = 0, + border_val_seg: int = -1, + use_mask_for_norm: List[bool] = None, + is_cascaded: bool = False, + foreground_labels: Union[Tuple[int, ...], List[int]] = None, + regions: List[Union[List[int], Tuple[int, ...], int]] = None, + ignore_label: int = None) -> AbstractTransform: + return nnUNetTrainer.get_validation_transforms(deep_supervision_scales, is_cascaded, foreground_labels, + regions, ignore_label) + + def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int): + return super().get_plain_dataloaders( + initial_patch_size=self.configuration_manager.patch_size, + dim=dim + ) + + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + # we need to disable mirroring here so that no mirroring will be applied in inferene! + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() + mirror_axes = None + self.inference_allowed_mirroring_axes = None + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoMirroring.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoMirroring.py new file mode 100644 index 0000000..18ea1ea --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/data_augmentation/nnUNetTrainerNoMirroring.py @@ -0,0 +1,28 @@ +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer + + +class nnUNetTrainerNoMirroring(nnUNetTrainer): + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() + mirror_axes = None + self.inference_allowed_mirroring_axes = None + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + + +class nnUNetTrainer_onlyMirror01(nnUNetTrainer): + """ + Only mirrors along spatial axes 0 and 1 for 3D and 0 for 2D + """ + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() + patch_size = self.configuration_manager.patch_size + dim = len(patch_size) + if dim == 2: + mirror_axes = (0, ) + else: + mirror_axes = (0, 1) + self.inference_allowed_mirroring_axes = mirror_axes + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerCELoss.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerCELoss.py new file mode 100644 index 0000000..fdc0fea --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerCELoss.py @@ -0,0 +1,41 @@ +import torch +from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.training.loss.robust_ce_loss import RobustCrossEntropyLoss +import numpy as np + + +class nnUNetTrainerCELoss(nnUNetTrainer): + def _build_loss(self): + assert not self.label_manager.has_regions, "regions not supported by this trainer" + loss = RobustCrossEntropyLoss( + weight=None, ignore_index=self.label_manager.ignore_label if self.label_manager.has_ignore_label else -100 + ) + + # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases + # this gives higher resolution outputs more weight in the loss + if self.enable_deep_supervision: + deep_supervision_scales = self._get_deep_supervision_scales() + weights = np.array([1 / (2**i) for i in range(len(deep_supervision_scales))]) + weights[-1] = 0 + + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + # now wrap the loss + loss = DeepSupervisionWrapper(loss, weights) + return loss + + +class nnUNetTrainerCELoss_5epochs(nnUNetTrainerCELoss): + def __init__( + self, + plans: dict, + configuration: str, + fold: int, + dataset_json: dict, + unpack_dataset: bool = True, + device: torch.device = torch.device("cuda"), + ): + """used for debugging plans etc""" + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 5 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerDiceLoss.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerDiceLoss.py new file mode 100644 index 0000000..b139286 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerDiceLoss.py @@ -0,0 +1,60 @@ +import numpy as np +import torch + +from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss +from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper +from nnunetv2.training.loss.dice import MemoryEfficientSoftDiceLoss +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.utilities.helpers import softmax_helper_dim1 + + +class nnUNetTrainerDiceLoss(nnUNetTrainer): + def _build_loss(self): + loss = MemoryEfficientSoftDiceLoss(**{'batch_dice': self.configuration_manager.batch_dice, + 'do_bg': self.label_manager.has_regions, 'smooth': 1e-5, 'ddp': self.is_ddp}, + apply_nonlin=torch.sigmoid if self.label_manager.has_regions else softmax_helper_dim1) + + if self.enable_deep_supervision: + deep_supervision_scales = self._get_deep_supervision_scales() + + # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases + # this gives higher resolution outputs more weight in the loss + weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))]) + weights[-1] = 0 + + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + # now wrap the loss + loss = DeepSupervisionWrapper(loss, weights) + return loss + + +class nnUNetTrainerDiceCELoss_noSmooth(nnUNetTrainer): + def _build_loss(self): + # set smooth to 0 + if self.label_manager.has_regions: + loss = DC_and_BCE_loss({}, + {'batch_dice': self.configuration_manager.batch_dice, + 'do_bg': True, 'smooth': 0, 'ddp': self.is_ddp}, + use_ignore_label=self.label_manager.ignore_label is not None, + dice_class=MemoryEfficientSoftDiceLoss) + else: + loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, + 'smooth': 0, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, + ignore_label=self.label_manager.ignore_label, + dice_class=MemoryEfficientSoftDiceLoss) + + if self.enable_deep_supervision: + deep_supervision_scales = self._get_deep_supervision_scales() + + # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases + # this gives higher resolution outputs more weight in the loss + weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))]) + weights[-1] = 0 + + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + # now wrap the loss + loss = DeepSupervisionWrapper(loss, weights) + return loss + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerTopkLoss.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerTopkLoss.py new file mode 100644 index 0000000..5eff10e --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/loss/nnUNetTrainerTopkLoss.py @@ -0,0 +1,76 @@ +from nnunetv2.training.loss.compound_losses import DC_and_topk_loss +from nnunetv2.training.loss.deep_supervision import DeepSupervisionWrapper +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +import numpy as np +from nnunetv2.training.loss.robust_ce_loss import TopKLoss + + +class nnUNetTrainerTopk10Loss(nnUNetTrainer): + def _build_loss(self): + assert not self.label_manager.has_regions, "regions not supported by this trainer" + loss = TopKLoss( + ignore_index=self.label_manager.ignore_label if self.label_manager.has_ignore_label else -100, k=10 + ) + + if self.enable_deep_supervision: + deep_supervision_scales = self._get_deep_supervision_scales() + + # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases + # this gives higher resolution outputs more weight in the loss + weights = np.array([1 / (2**i) for i in range(len(deep_supervision_scales))]) + weights[-1] = 0 + + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + # now wrap the loss + loss = DeepSupervisionWrapper(loss, weights) + return loss + + +class nnUNetTrainerTopk10LossLS01(nnUNetTrainer): + def _build_loss(self): + assert not self.label_manager.has_regions, "regions not supported by this trainer" + loss = TopKLoss( + ignore_index=self.label_manager.ignore_label if self.label_manager.has_ignore_label else -100, + k=10, + label_smoothing=0.1, + ) + + if self.enable_deep_supervision: + deep_supervision_scales = self._get_deep_supervision_scales() + + # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases + # this gives higher resolution outputs more weight in the loss + weights = np.array([1 / (2**i) for i in range(len(deep_supervision_scales))]) + weights[-1] = 0 + + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + # now wrap the loss + loss = DeepSupervisionWrapper(loss, weights) + return loss + + +class nnUNetTrainerDiceTopK10Loss(nnUNetTrainer): + def _build_loss(self): + assert not self.label_manager.has_regions, "regions not supported by this trainer" + loss = DC_and_topk_loss( + {"batch_dice": self.configuration_manager.batch_dice, "smooth": 1e-5, "do_bg": False, "ddp": self.is_ddp}, + {"k": 10, "label_smoothing": 0.0}, + weight_ce=1, + weight_dice=1, + ignore_label=self.label_manager.ignore_label, + ) + if self.enable_deep_supervision: + deep_supervision_scales = self._get_deep_supervision_scales() + + # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases + # this gives higher resolution outputs more weight in the loss + weights = np.array([1 / (2**i) for i in range(len(deep_supervision_scales))]) + weights[-1] = 0 + + # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 + weights = weights / weights.sum() + # now wrap the loss + loss = DeepSupervisionWrapper(loss, weights) + return loss diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/nnUNetTrainerCosAnneal.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/nnUNetTrainerCosAnneal.py new file mode 100644 index 0000000..60455f2 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/lr_schedule/nnUNetTrainerCosAnneal.py @@ -0,0 +1,13 @@ +import torch +from torch.optim.lr_scheduler import CosineAnnealingLR + +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer + + +class nnUNetTrainerCosAnneal(nnUNetTrainer): + def configure_optimizers(self): + optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, + momentum=0.99, nesterov=True) + lr_scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs) + return optimizer, lr_scheduler + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerBN.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerBN.py new file mode 100644 index 0000000..5f6190c --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerBN.py @@ -0,0 +1,73 @@ +from dynamic_network_architectures.architectures.unet import ResidualEncoderUNet, PlainConvUNet +from dynamic_network_architectures.building_blocks.helper import convert_dim_to_conv_op, get_matching_batchnorm +from dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0, InitWeights_He +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from torch import nn + + +class nnUNetTrainerBN(nnUNetTrainer): + @staticmethod + def build_network_architecture(plans_manager: PlansManager, + dataset_json, + configuration_manager: ConfigurationManager, + num_input_channels, + enable_deep_supervision: bool = True) -> nn.Module: + num_stages = len(configuration_manager.conv_kernel_sizes) + + dim = len(configuration_manager.conv_kernel_sizes[0]) + conv_op = convert_dim_to_conv_op(dim) + + label_manager = plans_manager.get_label_manager(dataset_json) + + segmentation_network_class_name = configuration_manager.UNet_class_name + mapping = { + 'PlainConvUNet': PlainConvUNet, + 'ResidualEncoderUNet': ResidualEncoderUNet + } + kwargs = { + 'PlainConvUNet': { + 'conv_bias': True, + 'norm_op': get_matching_batchnorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + }, + 'ResidualEncoderUNet': { + 'conv_bias': True, + 'norm_op': get_matching_batchnorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + } + } + assert segmentation_network_class_name in mapping.keys(), 'The network architecture specified by the plans file ' \ + 'is non-standard (maybe your own?). Yo\'ll have to dive ' \ + 'into either this ' \ + 'function (get_network_from_plans) or ' \ + 'the init of your nnUNetModule to accommodate that.' + network_class = mapping[segmentation_network_class_name] + + conv_or_blocks_per_stage = { + 'n_conv_per_stage' + if network_class != ResidualEncoderUNet else 'n_blocks_per_stage': configuration_manager.n_conv_per_stage_encoder, + 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder + } + # network class name!! + model = network_class( + input_channels=num_input_channels, + n_stages=num_stages, + features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i, + configuration_manager.unet_max_num_features) for i in range(num_stages)], + conv_op=conv_op, + kernel_sizes=configuration_manager.conv_kernel_sizes, + strides=configuration_manager.pool_op_kernel_sizes, + num_classes=label_manager.num_segmentation_heads, + deep_supervision=enable_deep_supervision, + **conv_or_blocks_per_stage, + **kwargs[segmentation_network_class_name] + ) + model.apply(InitWeights_He(1e-2)) + if network_class == ResidualEncoderUNet: + model.apply(init_last_bn_before_add_to_0) + return model diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py new file mode 100644 index 0000000..1152fbe --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py @@ -0,0 +1,16 @@ +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +import torch + + +class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): + def __init__( + self, + plans: dict, + configuration: str, + fold: int, + dataset_json: dict, + unpack_dataset: bool = True, + device: torch.device = torch.device("cuda"), + ): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.enable_deep_supervision = False diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdam.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdam.py new file mode 100644 index 0000000..be5a7f4 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdam.py @@ -0,0 +1,58 @@ +import torch +from torch.optim import Adam, AdamW + +from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer + + +class nnUNetTrainerAdam(nnUNetTrainer): + def configure_optimizers(self): + optimizer = AdamW(self.network.parameters(), + lr=self.initial_lr, + weight_decay=self.weight_decay, + amsgrad=True) + # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, + # momentum=0.99, nesterov=True) + lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs) + return optimizer, lr_scheduler + + +class nnUNetTrainerVanillaAdam(nnUNetTrainer): + def configure_optimizers(self): + optimizer = Adam(self.network.parameters(), + lr=self.initial_lr, + weight_decay=self.weight_decay) + # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, + # momentum=0.99, nesterov=True) + lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs) + return optimizer, lr_scheduler + + +class nnUNetTrainerVanillaAdam1en3(nnUNetTrainerVanillaAdam): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.initial_lr = 1e-3 + + +class nnUNetTrainerVanillaAdam3en4(nnUNetTrainerVanillaAdam): + # https://twitter.com/karpathy/status/801621764144971776?lang=en + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.initial_lr = 3e-4 + + +class nnUNetTrainerAdam1en3(nnUNetTrainerAdam): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.initial_lr = 1e-3 + + +class nnUNetTrainerAdam3en4(nnUNetTrainerAdam): + # https://twitter.com/karpathy/status/801621764144971776?lang=en + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.initial_lr = 3e-4 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdan.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdan.py new file mode 100644 index 0000000..8747f47 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/optimizer/nnUNetTrainerAdan.py @@ -0,0 +1,66 @@ +import torch + +from nnunetv2.training.lr_scheduler.polylr import PolyLRScheduler +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +from torch.optim.lr_scheduler import CosineAnnealingLR +try: + from adan_pytorch import Adan +except ImportError: + Adan = None + + +class nnUNetTrainerAdan(nnUNetTrainer): + def configure_optimizers(self): + if Adan is None: + raise RuntimeError('This trainer requires adan_pytorch to be installed, install with "pip install adan-pytorch"') + optimizer = Adan(self.network.parameters(), + lr=self.initial_lr, + # betas=(0.02, 0.08, 0.01), defaults + weight_decay=self.weight_decay) + # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, + # momentum=0.99, nesterov=True) + lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs) + return optimizer, lr_scheduler + + +class nnUNetTrainerAdan1en3(nnUNetTrainerAdan): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.initial_lr = 1e-3 + + +class nnUNetTrainerAdan3en4(nnUNetTrainerAdan): + # https://twitter.com/karpathy/status/801621764144971776?lang=en + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.initial_lr = 3e-4 + + +class nnUNetTrainerAdan1en1(nnUNetTrainerAdan): + # this trainer makes no sense -> nan! + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.initial_lr = 1e-1 + + +class nnUNetTrainerAdanCosAnneal(nnUNetTrainerAdan): + # def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + # device: torch.device = torch.device('cuda')): + # super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + # self.num_epochs = 15 + + def configure_optimizers(self): + if Adan is None: + raise RuntimeError('This trainer requires adan_pytorch to be installed, install with "pip install adan-pytorch"') + optimizer = Adan(self.network.parameters(), + lr=self.initial_lr, + # betas=(0.02, 0.08, 0.01), defaults + weight_decay=self.weight_decay) + # optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay, + # momentum=0.99, nesterov=True) + lr_scheduler = CosineAnnealingLR(optimizer, T_max=self.num_epochs) + return optimizer, lr_scheduler + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/sampling/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/sampling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/sampling/nnUNetTrainer_probabilisticOversampling.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/sampling/nnUNetTrainer_probabilisticOversampling.py new file mode 100644 index 0000000..89fef48 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/sampling/nnUNetTrainer_probabilisticOversampling.py @@ -0,0 +1,76 @@ +from typing import Tuple + +import torch + +from nnunetv2.training.dataloading.data_loader_2d import nnUNetDataLoader2D +from nnunetv2.training.dataloading.data_loader_3d import nnUNetDataLoader3D +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer +import numpy as np + + +class nnUNetTrainer_probabilisticOversampling(nnUNetTrainer): + """ + sampling of foreground happens randomly and not for the last 33% of samples in a batch + since most trainings happen with batch size 2 and nnunet guarantees at least one fg sample, effectively this can + be 50% + Here we compute the actual oversampling percentage used by nnUNetTrainer in order to be as consistent as possible. + If we switch to this oversampling then we can keep it at a constant 0.33 or whatever. + """ + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.oversample_foreground_percent = float(np.mean( + [not sample_idx < round(self.configuration_manager.batch_size * (1 - self.oversample_foreground_percent)) + for sample_idx in range(self.configuration_manager.batch_size)])) + self.print_to_log_file(f"self.oversample_foreground_percent {self.oversample_foreground_percent}") + + def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int): + dataset_tr, dataset_val = self.get_tr_and_val_datasets() + + if dim == 2: + dl_tr = nnUNetDataLoader2D(dataset_tr, + self.batch_size, + initial_patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None, probabilistic_oversampling=True) + dl_val = nnUNetDataLoader2D(dataset_val, + self.batch_size, + self.configuration_manager.patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None, probabilistic_oversampling=True) + else: + dl_tr = nnUNetDataLoader3D(dataset_tr, + self.batch_size, + initial_patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None, probabilistic_oversampling=True) + dl_val = nnUNetDataLoader3D(dataset_val, + self.batch_size, + self.configuration_manager.patch_size, + self.configuration_manager.patch_size, + self.label_manager, + oversample_foreground_percent=self.oversample_foreground_percent, + sampling_probabilities=None, pad_sides=None, probabilistic_oversampling=True) + return dl_tr, dl_val + + +class nnUNetTrainer_probabilisticOversampling_033(nnUNetTrainer_probabilisticOversampling): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.oversample_foreground_percent = 0.33 + + +class nnUNetTrainer_probabilisticOversampling_010(nnUNetTrainer_probabilisticOversampling): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.oversample_foreground_percent = 0.1 + + diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/__init__.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs.py new file mode 100644 index 0000000..990ce7e --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs.py @@ -0,0 +1,77 @@ +import torch + +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer + + +class nnUNetTrainer_5epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + """used for debugging plans etc""" + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 5 + + +class nnUNetTrainer_1epoch(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + """used for debugging plans etc""" + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 1 + + +class nnUNetTrainer_10epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + """used for debugging plans etc""" + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 10 + + +class nnUNetTrainer_20epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 20 + + +class nnUNetTrainer_50epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 50 + + + + +class nnUNetTrainer_250epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 250 + + +class nnUNetTrainer_100epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 100 + +class nnUNetTrainer_2000epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 2000 + + +class nnUNetTrainer_4000epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 4000 + + +class nnUNetTrainer_8000epochs(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 8000 diff --git a/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs_NoMirroring.py b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs_NoMirroring.py new file mode 100644 index 0000000..c16b885 --- /dev/null +++ b/docker/template/src/nnunetv2/training/nnUNetTrainer/variants/training_length/nnUNetTrainer_Xepochs_NoMirroring.py @@ -0,0 +1,60 @@ +import torch + +from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer + + +class nnUNetTrainer_250epochs_NoMirroring(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 250 + + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() + mirror_axes = None + self.inference_allowed_mirroring_axes = None + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + + +class nnUNetTrainer_2000epochs_NoMirroring(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 2000 + + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() + mirror_axes = None + self.inference_allowed_mirroring_axes = None + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + + +class nnUNetTrainer_4000epochs_NoMirroring(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 4000 + + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() + mirror_axes = None + self.inference_allowed_mirroring_axes = None + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + + +class nnUNetTrainer_8000epochs_NoMirroring(nnUNetTrainer): + def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True, + device: torch.device = torch.device('cuda')): + super().__init__(plans, configuration, fold, dataset_json, unpack_dataset, device) + self.num_epochs = 8000 + + def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self): + rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \ + super().configure_rotation_dummyDA_mirroring_and_inital_patch_size() + mirror_axes = None + self.inference_allowed_mirroring_axes = None + return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes + diff --git a/docker/template/src/nnunetv2/utilities/__init__.py b/docker/template/src/nnunetv2/utilities/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/utilities/collate_outputs.py b/docker/template/src/nnunetv2/utilities/collate_outputs.py new file mode 100644 index 0000000..c9d6798 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/collate_outputs.py @@ -0,0 +1,24 @@ +from typing import List + +import numpy as np + + +def collate_outputs(outputs: List[dict]): + """ + used to collate default train_step and validation_step outputs. If you want something different then you gotta + extend this + + we expect outputs to be a list of dictionaries where each of the dict has the same set of keys + """ + collated = {} + for k in outputs[0].keys(): + if np.isscalar(outputs[0][k]): + collated[k] = [o[k] for o in outputs] + elif isinstance(outputs[0][k], np.ndarray): + collated[k] = np.vstack([o[k][None] for o in outputs]) + elif isinstance(outputs[0][k], list): + collated[k] = [item for o in outputs for item in o[k]] + else: + raise ValueError(f'Cannot collate input of type {type(outputs[0][k])}. ' + f'Modify collate_outputs to add this functionality') + return collated \ No newline at end of file diff --git a/docker/template/src/nnunetv2/utilities/dataset_name_id_conversion.py b/docker/template/src/nnunetv2/utilities/dataset_name_id_conversion.py new file mode 100644 index 0000000..29ea58a --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/dataset_name_id_conversion.py @@ -0,0 +1,74 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Union + +from nnunetv2.paths import nnUNet_preprocessed, nnUNet_raw, nnUNet_results +from batchgenerators.utilities.file_and_folder_operations import * +import numpy as np + + +def find_candidate_datasets(dataset_id: int): + startswith = "Dataset%03.0d" % dataset_id + if nnUNet_preprocessed is not None and isdir(nnUNet_preprocessed): + candidates_preprocessed = subdirs(nnUNet_preprocessed, prefix=startswith, join=False) + else: + candidates_preprocessed = [] + + if nnUNet_raw is not None and isdir(nnUNet_raw): + candidates_raw = subdirs(nnUNet_raw, prefix=startswith, join=False) + else: + candidates_raw = [] + + candidates_trained_models = [] + if nnUNet_results is not None and isdir(nnUNet_results): + candidates_trained_models += subdirs(nnUNet_results, prefix=startswith, join=False) + + all_candidates = candidates_preprocessed + candidates_raw + candidates_trained_models + unique_candidates = np.unique(all_candidates) + return unique_candidates + + +def convert_id_to_dataset_name(dataset_id: int): + unique_candidates = find_candidate_datasets(dataset_id) + if len(unique_candidates) > 1: + raise RuntimeError("More than one dataset name found for dataset id %d. Please correct that. (I looked in the " + "following folders:\n%s\n%s\n%s" % (dataset_id, nnUNet_raw, nnUNet_preprocessed, nnUNet_results)) + if len(unique_candidates) == 0: + raise RuntimeError(f"Could not find a dataset with the ID {dataset_id}. Make sure the requested dataset ID " + f"exists and that nnU-Net knows where raw and preprocessed data are located " + f"(see Documentation - Installation). Here are your currently defined folders:\n" + f"nnUNet_preprocessed={os.environ.get('nnUNet_preprocessed') if os.environ.get('nnUNet_preprocessed') is not None else 'None'}\n" + f"nnUNet_results={os.environ.get('nnUNet_results') if os.environ.get('nnUNet_results') is not None else 'None'}\n" + f"nnUNet_raw={os.environ.get('nnUNet_raw') if os.environ.get('nnUNet_raw') is not None else 'None'}\n" + f"If something is not right, adapt your environment variables.") + return unique_candidates[0] + + +def convert_dataset_name_to_id(dataset_name: str): + assert dataset_name.startswith("Dataset") + dataset_id = int(dataset_name[7:10]) + return dataset_id + + +def maybe_convert_to_dataset_name(dataset_name_or_id: Union[int, str]) -> str: + if isinstance(dataset_name_or_id, str) and dataset_name_or_id.startswith("Dataset"): + return dataset_name_or_id + if isinstance(dataset_name_or_id, str): + try: + dataset_name_or_id = int(dataset_name_or_id) + except ValueError: + raise ValueError("dataset_name_or_id was a string and did not start with 'Dataset' so we tried to " + "convert it to a dataset ID (int). That failed, however. Please give an integer number " + "('1', '2', etc) or a correct dataset name. Your input: %s" % dataset_name_or_id) + return convert_id_to_dataset_name(dataset_name_or_id) diff --git a/docker/template/src/nnunetv2/utilities/ddp_allgather.py b/docker/template/src/nnunetv2/utilities/ddp_allgather.py new file mode 100644 index 0000000..c42b3ef --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/ddp_allgather.py @@ -0,0 +1,49 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Any, Optional, Tuple + +import torch +from torch import distributed + + +def print_if_rank0(*args): + if distributed.get_rank() == 0: + print(*args) + + +class AllGatherGrad(torch.autograd.Function): + # stolen from pytorch lightning + @staticmethod + def forward( + ctx: Any, + tensor: torch.Tensor, + group: Optional["torch.distributed.ProcessGroup"] = None, + ) -> torch.Tensor: + ctx.group = group + + gathered_tensor = [torch.zeros_like(tensor) for _ in range(torch.distributed.get_world_size())] + + torch.distributed.all_gather(gathered_tensor, tensor, group=group) + gathered_tensor = torch.stack(gathered_tensor, dim=0) + + return gathered_tensor + + @staticmethod + def backward(ctx: Any, *grad_output: torch.Tensor) -> Tuple[torch.Tensor, None]: + grad_output = torch.cat(grad_output) + + torch.distributed.all_reduce(grad_output, op=torch.distributed.ReduceOp.SUM, async_op=False, group=ctx.group) + + return grad_output[torch.distributed.get_rank()], None + diff --git a/docker/template/src/nnunetv2/utilities/default_n_proc_DA.py b/docker/template/src/nnunetv2/utilities/default_n_proc_DA.py new file mode 100644 index 0000000..3ecc922 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/default_n_proc_DA.py @@ -0,0 +1,44 @@ +import subprocess +import os + + +def get_allowed_n_proc_DA(): + """ + This function is used to set the number of processes used on different Systems. It is specific to our cluster + infrastructure at DKFZ. You can modify it to suit your needs. Everything is allowed. + + IMPORTANT: if the environment variable nnUNet_n_proc_DA is set it will overwrite anything in this script + (see first line). + + Interpret the output as the number of processes used for data augmentation PER GPU. + + The way it is implemented here is simply a look up table. We know the hostnames, CPU and GPU configurations of our + systems and set the numbers accordingly. For example, a system with 4 GPUs and 48 threads can use 12 threads per + GPU without overloading the CPU (technically 11 because we have a main process as well), so that's what we use. + """ + + if 'nnUNet_n_proc_DA' in os.environ.keys(): + use_this = int(os.environ['nnUNet_n_proc_DA']) + else: + hostname = subprocess.getoutput(['hostname']) + if hostname in ['Fabian', ]: + use_this = 12 + elif hostname in ['hdf19-gpu16', 'hdf19-gpu17', 'hdf19-gpu18', 'hdf19-gpu19', 'e230-AMDworkstation']: + use_this = 16 + elif hostname.startswith('e230-dgx1'): + use_this = 10 + elif hostname.startswith('hdf18-gpu') or hostname.startswith('e132-comp'): + use_this = 16 + elif hostname.startswith('e230-dgx2'): + use_this = 6 + elif hostname.startswith('e230-dgxa100-'): + use_this = 28 + elif hostname.startswith('lsf22-gpu'): + use_this = 28 + elif hostname.startswith('hdf19-gpu') or hostname.startswith('e071-gpu'): + use_this = 12 + else: + use_this = 12 # default value + + use_this = min(use_this, os.cpu_count()) + return use_this diff --git a/docker/template/src/nnunetv2/utilities/file_path_utilities.py b/docker/template/src/nnunetv2/utilities/file_path_utilities.py new file mode 100644 index 0000000..a1c9622 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/file_path_utilities.py @@ -0,0 +1,123 @@ +from multiprocessing import Pool +from typing import Union, Tuple +import numpy as np +from batchgenerators.utilities.file_and_folder_operations import * + +from nnunetv2.configuration import default_num_processes +from nnunetv2.paths import nnUNet_results +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + +def convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration): + return f'{trainer_name}__{plans_identifier}__{configuration}' + + +def convert_identifier_to_trainer_plans_config(identifier: str): + return os.path.basename(identifier).split('__') + + +def get_output_folder(dataset_name_or_id: Union[str, int], trainer_name: str = 'nnUNetTrainer', + plans_identifier: str = 'nnUNetPlans', configuration: str = '3d_fullres', + fold: Union[str, int] = None) -> str: + tmp = join(nnUNet_results, maybe_convert_to_dataset_name(dataset_name_or_id), + convert_trainer_plans_config_to_identifier(trainer_name, plans_identifier, configuration)) + if fold is not None: + tmp = join(tmp, f'fold_{fold}') + return tmp + + +def parse_dataset_trainer_plans_configuration_from_path(path: str): + folders = split_path(path) + # this here can be a little tricky because we are making assumptions. Let's hope this never fails lol + + # safer to make this depend on two conditions, the fold_x and the DatasetXXX + # first let's see if some fold_X is present + fold_x_present = [i.startswith('fold_') for i in folders] + if any(fold_x_present): + idx = fold_x_present.index(True) + # OK now two entries before that there should be DatasetXXX + assert len(folders[:idx]) >= 2, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + if folders[idx - 2].startswith('Dataset'): + split = folders[idx - 1].split('__') + assert len(split) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + return folders[idx - 2], *split + else: + # we can only check for dataset followed by a string that is separable into three strings by splitting with '__' + # look for DatasetXXX + dataset_folder = [i.startswith('Dataset') for i in folders] + if any(dataset_folder): + idx = dataset_folder.index(True) + assert len(folders) >= (idx + 1), 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + split = folders[idx + 1].split('__') + assert len(split) == 3, 'Bad path, cannot extract what I need. Your path needs to be at least ' \ + 'DatasetXXX/MODULE__PLANS__CONFIGURATION for this to work' + return folders[idx], *split + + +def get_ensemble_name(model1_folder, model2_folder, folds: Tuple[int, ...]): + identifier = 'ensemble___' + os.path.basename(model1_folder) + '___' + \ + os.path.basename(model2_folder) + '___' + folds_tuple_to_string(folds) + return identifier + + +def get_ensemble_name_from_d_tr_c(dataset, tr1, p1, c1, tr2, p2, c2, folds: Tuple[int, ...]): + model1_folder = get_output_folder(dataset, tr1, p1, c1) + model2_folder = get_output_folder(dataset, tr2, p2, c2) + + get_ensemble_name(model1_folder, model2_folder, folds) + + +def convert_ensemble_folder_to_model_identifiers_and_folds(ensemble_folder: str): + prefix, *models, folds = os.path.basename(ensemble_folder).split('___') + return models, folds + + +def folds_tuple_to_string(folds: Union[List[int], Tuple[int, ...]]): + s = str(folds[0]) + for f in folds[1:]: + s += f"_{f}" + return s + + +def folds_string_to_tuple(folds_string: str): + folds = folds_string.split('_') + res = [] + for f in folds: + try: + res.append(int(f)) + except ValueError: + res.append(f) + return res + + +def check_workers_alive_and_busy(export_pool: Pool, worker_list: List, results_list: List, allowed_num_queued: int = 0): + """ + + returns True if the number of results that are not ready is greater than the number of available workers + allowed_num_queued + """ + alive = [i.is_alive() for i in worker_list] + if not all(alive): + raise RuntimeError('Some background workers are no longer alive') + + not_ready = [not i.ready() for i in results_list] + if sum(not_ready) >= (len(export_pool._pool) + allowed_num_queued): + return True + return False + + +if __name__ == '__main__': + ### well at this point I could just write tests... + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + path = 'Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/nnUNetModule__nnUNetPlans__3d_fullres/fold_all' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + try: + path = '/home/fabian/results/nnUNet_remake/Dataset002_Heart/' + print(parse_dataset_trainer_plans_configuration_from_path(path)) + except AssertionError: + print('yayy, assertion works') diff --git a/docker/template/src/nnunetv2/utilities/find_class_by_name.py b/docker/template/src/nnunetv2/utilities/find_class_by_name.py new file mode 100644 index 0000000..a345d99 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/find_class_by_name.py @@ -0,0 +1,24 @@ +import importlib +import pkgutil + +from batchgenerators.utilities.file_and_folder_operations import * + + +def recursive_find_python_class(folder: str, class_name: str, current_module: str): + tr = None + for importer, modname, ispkg in pkgutil.iter_modules([folder]): + # print(modname, ispkg) + if not ispkg: + m = importlib.import_module(current_module + "." + modname) + if hasattr(m, class_name): + tr = getattr(m, class_name) + break + + if tr is None: + for importer, modname, ispkg in pkgutil.iter_modules([folder]): + if ispkg: + next_current_module = current_module + "." + modname + tr = recursive_find_python_class(join(folder, modname), class_name, current_module=next_current_module) + if tr is not None: + break + return tr \ No newline at end of file diff --git a/docker/template/src/nnunetv2/utilities/get_network_from_plans.py b/docker/template/src/nnunetv2/utilities/get_network_from_plans.py new file mode 100644 index 0000000..1dd1dd2 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/get_network_from_plans.py @@ -0,0 +1,77 @@ +from dynamic_network_architectures.architectures.unet import PlainConvUNet, ResidualEncoderUNet +from dynamic_network_architectures.building_blocks.helper import get_matching_instancenorm, convert_dim_to_conv_op +from dynamic_network_architectures.initialization.weight_init import init_last_bn_before_add_to_0 +from nnunetv2.utilities.network_initialization import InitWeights_He +from nnunetv2.utilities.plans_handling.plans_handler import ConfigurationManager, PlansManager +from torch import nn + + +def get_network_from_plans(plans_manager: PlansManager, + dataset_json: dict, + configuration_manager: ConfigurationManager, + num_input_channels: int, + deep_supervision: bool = True): + """ + we may have to change this in the future to accommodate other plans -> network mappings + + num_input_channels can differ depending on whether we do cascade. Its best to make this info available in the + trainer rather than inferring it again from the plans here. + """ + num_stages = len(configuration_manager.conv_kernel_sizes) + + dim = len(configuration_manager.conv_kernel_sizes[0]) + conv_op = convert_dim_to_conv_op(dim) + + label_manager = plans_manager.get_label_manager(dataset_json) + + segmentation_network_class_name = configuration_manager.UNet_class_name + mapping = { + 'PlainConvUNet': PlainConvUNet, + 'ResidualEncoderUNet': ResidualEncoderUNet + } + kwargs = { + 'PlainConvUNet': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + }, + 'ResidualEncoderUNet': { + 'conv_bias': True, + 'norm_op': get_matching_instancenorm(conv_op), + 'norm_op_kwargs': {'eps': 1e-5, 'affine': True}, + 'dropout_op': None, 'dropout_op_kwargs': None, + 'nonlin': nn.LeakyReLU, 'nonlin_kwargs': {'inplace': True}, + } + } + assert segmentation_network_class_name in mapping.keys(), 'The network architecture specified by the plans file ' \ + 'is non-standard (maybe your own?). Yo\'ll have to dive ' \ + 'into either this ' \ + 'function (get_network_from_plans) or ' \ + 'the init of your nnUNetModule to accommodate that.' + network_class = mapping[segmentation_network_class_name] + + conv_or_blocks_per_stage = { + 'n_conv_per_stage' + if network_class != ResidualEncoderUNet else 'n_blocks_per_stage': configuration_manager.n_conv_per_stage_encoder, + 'n_conv_per_stage_decoder': configuration_manager.n_conv_per_stage_decoder + } + # network class name!! + model = network_class( + input_channels=num_input_channels, + n_stages=num_stages, + features_per_stage=[min(configuration_manager.UNet_base_num_features * 2 ** i, + configuration_manager.unet_max_num_features) for i in range(num_stages)], + conv_op=conv_op, + kernel_sizes=configuration_manager.conv_kernel_sizes, + strides=configuration_manager.pool_op_kernel_sizes, + num_classes=label_manager.num_segmentation_heads, + deep_supervision=deep_supervision, + **conv_or_blocks_per_stage, + **kwargs[segmentation_network_class_name] + ) + model.apply(InitWeights_He(1e-2)) + if network_class == ResidualEncoderUNet: + model.apply(init_last_bn_before_add_to_0) + return model diff --git a/docker/template/src/nnunetv2/utilities/helpers.py b/docker/template/src/nnunetv2/utilities/helpers.py new file mode 100644 index 0000000..42448e3 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/helpers.py @@ -0,0 +1,27 @@ +import torch + + +def softmax_helper_dim0(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 0) + + +def softmax_helper_dim1(x: torch.Tensor) -> torch.Tensor: + return torch.softmax(x, 1) + + +def empty_cache(device: torch.device): + if device.type == 'cuda': + torch.cuda.empty_cache() + elif device.type == 'mps': + from torch import mps + mps.empty_cache() + else: + pass + + +class dummy_context(object): + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass diff --git a/docker/template/src/nnunetv2/utilities/json_export.py b/docker/template/src/nnunetv2/utilities/json_export.py new file mode 100644 index 0000000..5ea463c --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/json_export.py @@ -0,0 +1,59 @@ +from collections.abc import Iterable + +import numpy as np +import torch + + +def recursive_fix_for_json_export(my_dict: dict): + # json is stupid. 'cannot serialize object of type bool_/int64/float64'. Come on bro. + keys = list(my_dict.keys()) # cannot iterate over keys() if we change keys.... + for k in keys: + if isinstance(k, (np.int64, np.int32, np.int8, np.uint8)): + tmp = my_dict[k] + del my_dict[k] + my_dict[int(k)] = tmp + del tmp + k = int(k) + + if isinstance(my_dict[k], dict): + recursive_fix_for_json_export(my_dict[k]) + elif isinstance(my_dict[k], np.ndarray): + assert my_dict[k].ndim == 1, 'only 1d arrays are supported' + my_dict[k] = fix_types_iterable(my_dict[k], output_type=list) + elif isinstance(my_dict[k], (np.bool_,)): + my_dict[k] = bool(my_dict[k]) + elif isinstance(my_dict[k], (np.int64, np.int32, np.int8, np.uint8)): + my_dict[k] = int(my_dict[k]) + elif isinstance(my_dict[k], (np.float32, np.float64, np.float16)): + my_dict[k] = float(my_dict[k]) + elif isinstance(my_dict[k], list): + my_dict[k] = fix_types_iterable(my_dict[k], output_type=type(my_dict[k])) + elif isinstance(my_dict[k], tuple): + my_dict[k] = fix_types_iterable(my_dict[k], output_type=tuple) + elif isinstance(my_dict[k], torch.device): + my_dict[k] = str(my_dict[k]) + else: + pass # pray it can be serialized + + +def fix_types_iterable(iterable, output_type): + # this sh!t is hacky as hell and will break if you use it for anything outside nnunet. Keep you hands off of this. + out = [] + for i in iterable: + if type(i) in (np.int64, np.int32, np.int8, np.uint8): + out.append(int(i)) + elif isinstance(i, dict): + recursive_fix_for_json_export(i) + out.append(i) + elif type(i) in (np.float32, np.float64, np.float16): + out.append(float(i)) + elif type(i) in (np.bool_,): + out.append(bool(i)) + elif isinstance(i, str): + out.append(i) + elif isinstance(i, Iterable): + # print('recursive call on', i, type(i)) + out.append(fix_types_iterable(i, type(i))) + else: + out.append(i) + return output_type(out) diff --git a/docker/template/src/nnunetv2/utilities/label_handling/__init__.py b/docker/template/src/nnunetv2/utilities/label_handling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/utilities/label_handling/label_handling.py b/docker/template/src/nnunetv2/utilities/label_handling/label_handling.py new file mode 100644 index 0000000..58b2513 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/label_handling/label_handling.py @@ -0,0 +1,322 @@ +from __future__ import annotations +from time import time +from typing import Union, List, Tuple, Type + +import numpy as np +import torch +from acvl_utils.cropping_and_padding.bounding_boxes import bounding_box_to_slice +from batchgenerators.utilities.file_and_folder_operations import join + +import nnunetv2 +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.helpers import softmax_helper_dim0 + +from typing import TYPE_CHECKING + +# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/ +if TYPE_CHECKING: + from nnunetv2.utilities.plans_handling.plans_handler import PlansManager, ConfigurationManager + + +class LabelManager(object): + def __init__(self, label_dict: dict, regions_class_order: Union[List[int], None], force_use_labels: bool = False, + inference_nonlin=None): + self._sanity_check(label_dict) + self.label_dict = label_dict + self.regions_class_order = regions_class_order + self._force_use_labels = force_use_labels + + if force_use_labels: + self._has_regions = False + else: + self._has_regions: bool = any( + [isinstance(i, (tuple, list)) and len(i) > 1 for i in self.label_dict.values()]) + + self._ignore_label: Union[None, int] = self._determine_ignore_label() + self._all_labels: List[int] = self._get_all_labels() + + self._regions: Union[None, List[Union[int, Tuple[int, ...]]]] = self._get_regions() + + if self.has_ignore_label: + assert self.ignore_label == max( + self.all_labels) + 1, 'If you use the ignore label it must have the highest ' \ + 'label value! It cannot be 0 or in between other labels. ' \ + 'Sorry bro.' + + if inference_nonlin is None: + self.inference_nonlin = torch.sigmoid if self.has_regions else softmax_helper_dim0 + else: + self.inference_nonlin = inference_nonlin + + def _sanity_check(self, label_dict: dict): + if not 'background' in label_dict.keys(): + raise RuntimeError('Background label not declared (remember that this should be label 0!)') + bg_label = label_dict['background'] + if isinstance(bg_label, (tuple, list)): + raise RuntimeError(f"Background label must be 0. Not a list. Not a tuple. Your background label: {bg_label}") + assert int(bg_label) == 0, f"Background label must be 0. Your background label: {bg_label}" + # not sure if we want to allow regions that contain background. I don't immediately see how this could cause + # problems so we allow it for now. That doesn't mean that this is explicitly supported. It could be that this + # just crashes. + + def _get_all_labels(self) -> List[int]: + all_labels = [] + for k, r in self.label_dict.items(): + # ignore label is not going to be used, hence the name. Duh. + if k == 'ignore': + continue + if isinstance(r, (tuple, list)): + for ri in r: + all_labels.append(int(ri)) + else: + all_labels.append(int(r)) + all_labels = list(np.unique(all_labels)) + all_labels.sort() + return all_labels + + def _get_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]: + if not self._has_regions or self._force_use_labels: + return None + else: + assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \ + 'define regions_class_order!' + regions = [] + for k, r in self.label_dict.items(): + # ignore ignore label + if k == 'ignore': + continue + # ignore regions that are background + if (np.isscalar(r) and r == 0) \ + or \ + (isinstance(r, (tuple, list)) and len(np.unique(r)) == 1 and np.unique(r)[0] == 0): + continue + if isinstance(r, list): + r = tuple(r) + regions.append(r) + assert len(self.regions_class_order) == len(regions), 'regions_class_order must have as ' \ + 'many entries as there are ' \ + 'regions' + return regions + + def _determine_ignore_label(self) -> Union[None, int]: + ignore_label = self.label_dict.get('ignore') + if ignore_label is not None: + assert isinstance(ignore_label, int), f'Ignore label has to be an integer. It cannot be a region ' \ + f'(list/tuple). Got {type(ignore_label)}.' + return ignore_label + + @property + def has_regions(self) -> bool: + return self._has_regions + + @property + def has_ignore_label(self) -> bool: + return self.ignore_label is not None + + @property + def all_regions(self) -> Union[None, List[Union[int, Tuple[int, ...]]]]: + return self._regions + + @property + def all_labels(self) -> List[int]: + return self._all_labels + + @property + def ignore_label(self) -> Union[None, int]: + return self._ignore_label + + def apply_inference_nonlin(self, logits: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + """ + logits has to have shape (c, x, y(, z)) where c is the number of classes/regions + """ + if isinstance(logits, np.ndarray): + logits = torch.from_numpy(logits) + + with torch.no_grad(): + # softmax etc is not implemented for half + logits = logits.float() + probabilities = self.inference_nonlin(logits) + + return probabilities + + def convert_probabilities_to_segmentation(self, predicted_probabilities: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + """ + assumes that inference_nonlinearity was already applied! + + predicted_probabilities has to have shape (c, x, y(, z)) where c is the number of classes/regions + """ + if not isinstance(predicted_probabilities, (np.ndarray, torch.Tensor)): + raise RuntimeError(f"Unexpected input type. Expected np.ndarray or torch.Tensor," + f" got {type(predicted_probabilities)}") + + if self.has_regions: + assert self.regions_class_order is not None, 'if region-based training is requested then you need to ' \ + 'define regions_class_order!' + # check correct number of outputs + assert predicted_probabilities.shape[0] == self.num_segmentation_heads, \ + f'unexpected number of channels in predicted_probabilities. Expected {self.num_segmentation_heads}, ' \ + f'got {predicted_probabilities.shape[0]}. Remember that predicted_probabilities should have shape ' \ + f'(c, x, y(, z)).' + + if self.has_regions: + if isinstance(predicted_probabilities, np.ndarray): + segmentation = np.zeros(predicted_probabilities.shape[1:], dtype=np.uint16) + else: + # no uint16 in torch + segmentation = torch.zeros(predicted_probabilities.shape[1:], dtype=torch.int16, + device=predicted_probabilities.device) + for i, c in enumerate(self.regions_class_order): + segmentation[predicted_probabilities[i] > 0.5] = c + else: + segmentation = predicted_probabilities.argmax(0) + + return segmentation + + def convert_logits_to_segmentation(self, predicted_logits: Union[np.ndarray, torch.Tensor]) -> \ + Union[np.ndarray, torch.Tensor]: + input_is_numpy = isinstance(predicted_logits, np.ndarray) + probabilities = self.apply_inference_nonlin(predicted_logits) + if input_is_numpy and isinstance(probabilities, torch.Tensor): + probabilities = probabilities.cpu().numpy() + return self.convert_probabilities_to_segmentation(probabilities) + + def revert_cropping_on_probabilities(self, predicted_probabilities: Union[torch.Tensor, np.ndarray], + bbox: List[List[int]], + original_shape: Union[List[int], Tuple[int, ...]]): + """ + ONLY USE THIS WITH PROBABILITIES, DO NOT USE LOGITS AND DO NOT USE FOR SEGMENTATION MAPS!!! + + predicted_probabilities must be (c, x, y(, z)) + + Why do we do this here? Well if we pad probabilities we need to make sure that convert_logits_to_segmentation + correctly returns background in the padded areas. Also we want to ba able to look at the padded probabilities + and not have strange artifacts. + Only LabelManager knows how this needs to be done. So let's let him/her do it, ok? + """ + # revert cropping + probs_reverted_cropping = np.zeros((predicted_probabilities.shape[0], *original_shape), + dtype=predicted_probabilities.dtype) \ + if isinstance(predicted_probabilities, np.ndarray) else \ + torch.zeros((predicted_probabilities.shape[0], *original_shape), dtype=predicted_probabilities.dtype) + + if not self.has_regions: + probs_reverted_cropping[0] = 1 + + slicer = bounding_box_to_slice(bbox) + probs_reverted_cropping[tuple([slice(None)] + list(slicer))] = predicted_probabilities + return probs_reverted_cropping + + @staticmethod + def filter_background(classes_or_regions: Union[List[int], List[Union[int, Tuple[int, ...]]]]): + # heck yeah + # This is definitely taking list comprehension too far. Enjoy. + return [i for i in classes_or_regions if + ((not isinstance(i, (tuple, list))) and i != 0) + or + (isinstance(i, (tuple, list)) and not ( + len(np.unique(i)) == 1 and np.unique(i)[0] == 0))] + + @property + def foreground_regions(self): + return self.filter_background(self.all_regions) + + @property + def foreground_labels(self): + return self.filter_background(self.all_labels) + + @property + def num_segmentation_heads(self): + if self.has_regions: + return len(self.foreground_regions) + else: + return len(self.all_labels) + + +def get_labelmanager_class_from_plans(plans: dict) -> Type[LabelManager]: + if 'label_manager' not in plans.keys(): + print('No label manager specified in plans. Using default: LabelManager') + return LabelManager + else: + labelmanager_class = recursive_find_python_class(join(nnunetv2.__path__[0], "utilities", "label_handling"), + plans['label_manager'], + current_module="nnunetv2.utilities.label_handling") + return labelmanager_class + + +def convert_labelmap_to_one_hot(segmentation: Union[np.ndarray, torch.Tensor], + all_labels: Union[List, torch.Tensor, np.ndarray, tuple], + output_dtype=None) -> Union[np.ndarray, torch.Tensor]: + """ + if output_dtype is None then we use np.uint8/torch.uint8 + if input is torch.Tensor then output will be on the same device + + np.ndarray is faster than torch.Tensor + + if segmentation is torch.Tensor, this function will be faster if it is LongTensor. If it is somethine else we have + to cast which takes time. + + IMPORTANT: This function only works properly if your labels are consecutive integers, so something like 0, 1, 2, 3, ... + DO NOT use it with 0, 32, 123, 255, ... or whatever (fix your labels, yo) + """ + if isinstance(segmentation, torch.Tensor): + result = torch.zeros((len(all_labels), *segmentation.shape), + dtype=output_dtype if output_dtype is not None else torch.uint8, + device=segmentation.device) + # variant 1, 2x faster than 2 + result.scatter_(0, segmentation[None].long(), 1) # why does this have to be long!? + # variant 2, slower than 1 + # for i, l in enumerate(all_labels): + # result[i] = segmentation == l + else: + result = np.zeros((len(all_labels), *segmentation.shape), + dtype=output_dtype if output_dtype is not None else np.uint8) + # variant 1, fastest in my testing + for i, l in enumerate(all_labels): + result[i] = segmentation == l + # variant 2. Takes about twice as long so nah + # result = np.eye(len(all_labels))[segmentation].transpose((3, 0, 1, 2)) + return result + + +def determine_num_input_channels(plans_manager: PlansManager, + configuration_or_config_manager: Union[str, ConfigurationManager], + dataset_json: dict) -> int: + if isinstance(configuration_or_config_manager, str): + config_manager = plans_manager.get_configuration(configuration_or_config_manager) + else: + config_manager = configuration_or_config_manager + + label_manager = plans_manager.get_label_manager(dataset_json) + num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names']) + + # cascade has different number of input channels + if config_manager.previous_stage_name is not None: + num_label_inputs = len(label_manager.foreground_labels) + num_input_channels = num_modalities + num_label_inputs + else: + num_input_channels = num_modalities + return num_input_channels + + +if __name__ == '__main__': + # this code used to be able to differentiate variant 1 and 2 to measure time. + num_labels = 7 + seg = np.random.randint(0, num_labels, size=(256, 256, 256), dtype=np.uint8) + seg_torch = torch.from_numpy(seg) + st = time() + onehot_npy = convert_labelmap_to_one_hot(seg, np.arange(num_labels)) + time_1 = time() + onehot_npy2 = convert_labelmap_to_one_hot(seg, np.arange(num_labels)) + time_2 = time() + onehot_torch = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels)) + time_torch = time() + onehot_torch2 = convert_labelmap_to_one_hot(seg_torch, np.arange(num_labels)) + time_torch2 = time() + print( + f'np: {time_1 - st}, np2: {time_2 - time_1}, torch: {time_torch - time_2}, torch2: {time_torch2 - time_torch}') + onehot_torch = onehot_torch.numpy() + onehot_torch2 = onehot_torch2.numpy() + print(np.all(onehot_torch == onehot_npy)) + print(np.all(onehot_torch2 == onehot_npy)) diff --git a/docker/template/src/nnunetv2/utilities/network_initialization.py b/docker/template/src/nnunetv2/utilities/network_initialization.py new file mode 100644 index 0000000..1ead271 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/network_initialization.py @@ -0,0 +1,12 @@ +from torch import nn + + +class InitWeights_He(object): + def __init__(self, neg_slope=1e-2): + self.neg_slope = neg_slope + + def __call__(self, module): + if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): + module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) + if module.bias is not None: + module.bias = nn.init.constant_(module.bias, 0) diff --git a/docker/template/src/nnunetv2/utilities/overlay_plots.py b/docker/template/src/nnunetv2/utilities/overlay_plots.py new file mode 100644 index 0000000..66a3b67 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/overlay_plots.py @@ -0,0 +1,273 @@ +# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import multiprocessing +from typing import Tuple, Union + +import numpy as np +import pandas as pd +from batchgenerators.utilities.file_and_folder_operations import * +from nnunetv2.configuration import default_num_processes +from nnunetv2.imageio.base_reader_writer import BaseReaderWriter +from nnunetv2.imageio.reader_writer_registry import determine_reader_writer_from_dataset_json +from nnunetv2.paths import nnUNet_raw, nnUNet_preprocessed +from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name +from nnunetv2.utilities.utils import get_filenames_of_train_images_and_targets + +color_cycle = ( + "000000", + "4363d8", + "f58231", + "3cb44b", + "e6194B", + "911eb4", + "ffe119", + "bfef45", + "42d4f4", + "f032e6", + "000075", + "9A6324", + "808000", + "800000", + "469990", +) + + +def hex_to_rgb(hex: str): + assert len(hex) == 6 + return tuple(int(hex[i:i + 2], 16) for i in (0, 2, 4)) + + +def generate_overlay(input_image: np.ndarray, segmentation: np.ndarray, mapping: dict = None, + color_cycle: Tuple[str, ...] = color_cycle, + overlay_intensity: float = 0.6): + """ + image can be 2d greyscale or 2d RGB (color channel in last dimension!) + + Segmentation must be label map of same shape as image (w/o color channels) + + mapping can be label_id -> idx_in_cycle or None + + returned image is scaled to [0, 255] (uint8)!!! + """ + # create a copy of image + image = np.copy(input_image) + + if image.ndim == 2: + image = np.tile(image[:, :, None], (1, 1, 3)) + elif image.ndim == 3: + if image.shape[2] == 1: + image = np.tile(image, (1, 1, 3)) + else: + raise RuntimeError(f'if 3d image is given the last dimension must be the color channels (3 channels). ' + f'Only 2D images are supported. Your image shape: {image.shape}') + else: + raise RuntimeError("unexpected image shape. only 2D images and 2D images with color channels (color in " + "last dimension) are supported") + + # rescale image to [0, 255] + image = image - image.min() + image = image / image.max() * 255 + + # create output + if mapping is None: + uniques = np.sort(pd.unique(segmentation.ravel())) # np.unique(segmentation) + mapping = {i: c for c, i in enumerate(uniques)} + + for l in mapping.keys(): + image[segmentation == l] += overlay_intensity * np.array(hex_to_rgb(color_cycle[mapping[l]])) + + # rescale result to [0, 255] + image = image / image.max() * 255 + return image.astype(np.uint8) + + +def select_slice_to_plot(image: np.ndarray, segmentation: np.ndarray) -> int: + """ + image and segmentation are expected to be 3D + + selects the slice with the largest amount of fg (regardless of label) + + we give image so that we can easily replace this function if needed + """ + fg_mask = segmentation != 0 + fg_per_slice = fg_mask.sum((1, 2)) + selected_slice = int(np.argmax(fg_per_slice)) + return selected_slice + + +def select_slice_to_plot2(image: np.ndarray, segmentation: np.ndarray) -> int: + """ + image and segmentation are expected to be 3D (or 1, x, y) + + selects the slice with the largest amount of fg (how much percent of each class are in each slice? pick slice + with highest avg percent) + + we give image so that we can easily replace this function if needed + """ + classes = [i for i in np.sort(pd.unique(segmentation.ravel())) if i != 0] + fg_per_slice = np.zeros((image.shape[0], len(classes))) + for i, c in enumerate(classes): + fg_mask = segmentation == c + fg_per_slice[:, i] = fg_mask.sum((1, 2)) + fg_per_slice[:, i] /= fg_per_slice.sum() + fg_per_slice = fg_per_slice.mean(1) + return int(np.argmax(fg_per_slice)) + + +def plot_overlay(image_file: str, segmentation_file: str, image_reader_writer: BaseReaderWriter, output_file: str, + overlay_intensity: float = 0.6): + import matplotlib.pyplot as plt + + image, props = image_reader_writer.read_images((image_file, )) + image = image[0] + seg, props_seg = image_reader_writer.read_seg(segmentation_file) + seg = seg[0] + + assert image.shape == seg.shape, "image and seg do not have the same shape: %s, %s" % ( + image_file, segmentation_file) + + assert image.ndim == 3, 'only 3D images/segs are supported' + + selected_slice = select_slice_to_plot2(image, seg) + # print(image.shape, selected_slice) + + overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity) + + plt.imsave(output_file, overlay) + + +def plot_overlay_preprocessed(case_file: str, output_file: str, overlay_intensity: float = 0.6, channel_idx=0): + import matplotlib.pyplot as plt + data = np.load(case_file)['data'] + seg = np.load(case_file)['seg'][0] + + assert channel_idx < (data.shape[0]), 'This dataset only supports channel index up to %d' % (data.shape[0] - 1) + + image = data[channel_idx] + seg[seg < 0] = 0 + + selected_slice = select_slice_to_plot2(image, seg) + + overlay = generate_overlay(image[selected_slice], seg[selected_slice], overlay_intensity=overlay_intensity) + + plt.imsave(output_file, overlay) + + +def multiprocessing_plot_overlay(list_of_image_files, list_of_seg_files, image_reader_writer, + list_of_output_files, overlay_intensity, + num_processes=8): + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + r = p.starmap_async(plot_overlay, zip( + list_of_image_files, list_of_seg_files, [image_reader_writer] * len(list_of_output_files), + list_of_output_files, [overlay_intensity] * len(list_of_output_files) + )) + r.get() + + +def multiprocessing_plot_overlay_preprocessed(list_of_case_files, list_of_output_files, overlay_intensity, + num_processes=8, channel_idx=0): + with multiprocessing.get_context("spawn").Pool(num_processes) as p: + r = p.starmap_async(plot_overlay_preprocessed, zip( + list_of_case_files, list_of_output_files, [overlay_intensity] * len(list_of_output_files), + [channel_idx] * len(list_of_output_files) + )) + r.get() + + +def generate_overlays_from_raw(dataset_name_or_id: Union[int, str], output_folder: str, + num_processes: int = 8, channel_idx: int = 0, overlay_intensity: float = 0.6): + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + folder = join(nnUNet_raw, dataset_name) + dataset_json = load_json(join(folder, 'dataset.json')) + dataset = get_filenames_of_train_images_and_targets(folder, dataset_json) + + image_files = [v['images'][channel_idx] for v in dataset.values()] + seg_files = [v['label'] for v in dataset.values()] + + assert all([isfile(i) for i in image_files]) + assert all([isfile(i) for i in seg_files]) + + maybe_mkdir_p(output_folder) + output_files = [join(output_folder, i + '.png') for i in dataset.keys()] + + image_reader_writer = determine_reader_writer_from_dataset_json(dataset_json, image_files[0])() + multiprocessing_plot_overlay(image_files, seg_files, image_reader_writer, output_files, overlay_intensity, num_processes) + + +def generate_overlays_from_preprocessed(dataset_name_or_id: Union[int, str], output_folder: str, + num_processes: int = 8, channel_idx: int = 0, + configuration: str = None, + plans_identifier: str = 'nnUNetPlans', + overlay_intensity: float = 0.6): + dataset_name = maybe_convert_to_dataset_name(dataset_name_or_id) + folder = join(nnUNet_preprocessed, dataset_name) + if not isdir(folder): raise RuntimeError("run preprocessing for that task first") + + plans = load_json(join(folder, plans_identifier + '.json')) + if configuration is None: + if '3d_fullres' in plans['configurations'].keys(): + configuration = '3d_fullres' + else: + configuration = '2d' + data_identifier = plans['configurations'][configuration]["data_identifier"] + preprocessed_folder = join(folder, data_identifier) + + if not isdir(preprocessed_folder): + raise RuntimeError(f"Preprocessed data folder for configuration {configuration} of plans identifier " + f"{plans_identifier} ({dataset_name}) does not exist. Run preprocessing for this " + f"configuration first!") + + identifiers = [i[:-4] for i in subfiles(preprocessed_folder, suffix='.npz', join=False)] + + output_files = [join(output_folder, i + '.png') for i in identifiers] + image_files = [join(preprocessed_folder, i + ".npz") for i in identifiers] + + maybe_mkdir_p(output_folder) + multiprocessing_plot_overlay_preprocessed(image_files, output_files, overlay_intensity=overlay_intensity, + num_processes=num_processes, channel_idx=channel_idx) + + +def entry_point_generate_overlay(): + import argparse + parser = argparse.ArgumentParser("Plots png overlays of the slice with the most foreground. Note that this " + "disregards spacing information!") + parser.add_argument('-d', type=str, help="Dataset name or id", required=True) + parser.add_argument('-o', type=str, help="output folder", required=True) + parser.add_argument('-np', type=int, default=default_num_processes, required=False, + help=f"number of processes used. Default: {default_num_processes}") + parser.add_argument('-channel_idx', type=int, default=0, required=False, + help="channel index used (0 = _0000). Default: 0") + parser.add_argument('--use_raw', action='store_true', required=False, help="if set then we use raw data. else " + "we use preprocessed") + parser.add_argument('-p', type=str, required=False, default='nnUNetPlans', + help='plans identifier. Only used if --use_raw is not set! Default: nnUNetPlans') + parser.add_argument('-c', type=str, required=False, default=None, + help='configuration name. Only used if --use_raw is not set! Default: None = ' + '3d_fullres if available, else 2d') + parser.add_argument('-overlay_intensity', type=float, required=False, default=0.6, + help='overlay intensity. Higher = brighter/less transparent') + + + args = parser.parse_args() + + if args.use_raw: + generate_overlays_from_raw(args.d, args.o, args.np, args.channel_idx, + overlay_intensity=args.overlay_intensity) + else: + generate_overlays_from_preprocessed(args.d, args.o, args.np, args.channel_idx, args.c, args.p, + overlay_intensity=args.overlay_intensity) + + +if __name__ == '__main__': + entry_point_generate_overlay() \ No newline at end of file diff --git a/docker/template/src/nnunetv2/utilities/plans_handling/__init__.py b/docker/template/src/nnunetv2/utilities/plans_handling/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/docker/template/src/nnunetv2/utilities/plans_handling/plans_handler.py b/docker/template/src/nnunetv2/utilities/plans_handling/plans_handler.py new file mode 100644 index 0000000..6c39fd1 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/plans_handling/plans_handler.py @@ -0,0 +1,307 @@ +from __future__ import annotations + +import dynamic_network_architectures +from copy import deepcopy +from functools import lru_cache, partial +from typing import Union, Tuple, List, Type, Callable + +import numpy as np +import torch + +from nnunetv2.preprocessing.resampling.utils import recursive_find_resampling_fn_by_name +from torch import nn + +import nnunetv2 +from batchgenerators.utilities.file_and_folder_operations import load_json, join + +from nnunetv2.imageio.reader_writer_registry import recursive_find_reader_writer_by_name +from nnunetv2.utilities.find_class_by_name import recursive_find_python_class +from nnunetv2.utilities.label_handling.label_handling import get_labelmanager_class_from_plans + + +# see https://adamj.eu/tech/2021/05/13/python-type-hints-how-to-fix-circular-imports/ +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from nnunetv2.utilities.label_handling.label_handling import LabelManager + from nnunetv2.imageio.base_reader_writer import BaseReaderWriter + from nnunetv2.preprocessing.preprocessors.default_preprocessor import DefaultPreprocessor + from nnunetv2.experiment_planning.experiment_planners.default_experiment_planner import ExperimentPlanner + + +class ConfigurationManager(object): + def __init__(self, configuration_dict: dict): + self.configuration = configuration_dict + + def __repr__(self): + return self.configuration.__repr__() + + @property + def data_identifier(self) -> str: + return self.configuration['data_identifier'] + + @property + def preprocessor_name(self) -> str: + return self.configuration['preprocessor_name'] + + @property + @lru_cache(maxsize=1) + def preprocessor_class(self) -> Type[DefaultPreprocessor]: + preprocessor_class = recursive_find_python_class(join(nnunetv2.__path__[0], "preprocessing"), + self.preprocessor_name, + current_module="nnunetv2.preprocessing") + return preprocessor_class + + @property + def batch_size(self) -> int: + return self.configuration['batch_size'] + + @property + def patch_size(self) -> List[int]: + return self.configuration['patch_size'] + + @property + def median_image_size_in_voxels(self) -> List[int]: + return self.configuration['median_image_size_in_voxels'] + + @property + def spacing(self) -> List[float]: + return self.configuration['spacing'] + + @property + def normalization_schemes(self) -> List[str]: + return self.configuration['normalization_schemes'] + + @property + def use_mask_for_norm(self) -> List[bool]: + return self.configuration['use_mask_for_norm'] + + @property + def UNet_class_name(self) -> str: + return self.configuration['UNet_class_name'] + + @property + @lru_cache(maxsize=1) + def UNet_class(self) -> Type[nn.Module]: + unet_class = recursive_find_python_class(join(dynamic_network_architectures.__path__[0], "architectures"), + self.UNet_class_name, + current_module="dynamic_network_architectures.architectures") + if unet_class is None: + raise RuntimeError('The network architecture specified by the plans file ' + 'is non-standard (maybe your own?). Fix this by not using ' + 'ConfigurationManager.UNet_class to instantiate ' + 'it (probably just overwrite build_network_architecture of your trainer.') + return unet_class + + @property + def UNet_base_num_features(self) -> int: + return self.configuration['UNet_base_num_features'] + + @property + def n_conv_per_stage_encoder(self) -> List[int]: + return self.configuration['n_conv_per_stage_encoder'] + + @property + def n_conv_per_stage_decoder(self) -> List[int]: + return self.configuration['n_conv_per_stage_decoder'] + + @property + def num_pool_per_axis(self) -> List[int]: + return self.configuration['num_pool_per_axis'] + + @property + def pool_op_kernel_sizes(self) -> List[List[int]]: + return self.configuration['pool_op_kernel_sizes'] + + @property + def conv_kernel_sizes(self) -> List[List[int]]: + return self.configuration['conv_kernel_sizes'] + + @property + def unet_max_num_features(self) -> int: + return self.configuration['unet_max_num_features'] + + @property + @lru_cache(maxsize=1) + def resampling_fn_data(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_data']) + fn = partial(fn, **self.configuration['resampling_fn_data_kwargs']) + return fn + + @property + @lru_cache(maxsize=1) + def resampling_fn_probabilities(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_probabilities']) + fn = partial(fn, **self.configuration['resampling_fn_probabilities_kwargs']) + return fn + + @property + @lru_cache(maxsize=1) + def resampling_fn_seg(self) -> Callable[ + [Union[torch.Tensor, np.ndarray], + Union[Tuple[int, ...], List[int], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray], + Union[Tuple[float, ...], List[float], np.ndarray] + ], + Union[torch.Tensor, np.ndarray]]: + fn = recursive_find_resampling_fn_by_name(self.configuration['resampling_fn_seg']) + fn = partial(fn, **self.configuration['resampling_fn_seg_kwargs']) + return fn + + @property + def batch_dice(self) -> bool: + return self.configuration['batch_dice'] + + @property + def next_stage_names(self) -> Union[List[str], None]: + ret = self.configuration.get('next_stage') + if ret is not None: + if isinstance(ret, str): + ret = [ret] + return ret + + @property + def previous_stage_name(self) -> Union[str, None]: + return self.configuration.get('previous_stage') + + +class PlansManager(object): + def __init__(self, plans_file_or_dict: Union[str, dict]): + """ + Why do we need this? + 1) resolve inheritance in configurations + 2) expose otherwise annoying stuff like getting the label manager or IO class from a string + 3) clearly expose the things that are in the plans instead of hiding them in a dict + 4) cache shit + + This class does not prevent you from going wild. You can still use the plans directly if you prefer + (PlansHandler.plans['key']) + """ + self.plans = plans_file_or_dict if isinstance(plans_file_or_dict, dict) else load_json(plans_file_or_dict) + + def __repr__(self): + return self.plans.__repr__() + + def _internal_resolve_configuration_inheritance(self, configuration_name: str, + visited: Tuple[str, ...] = None) -> dict: + if configuration_name not in self.plans['configurations'].keys(): + raise ValueError(f'The configuration {configuration_name} does not exist in the plans I have. Valid ' + f'configuration names are {list(self.plans["configurations"].keys())}.') + configuration = deepcopy(self.plans['configurations'][configuration_name]) + if 'inherits_from' in configuration: + parent_config_name = configuration['inherits_from'] + + if visited is None: + visited = (configuration_name,) + else: + if parent_config_name in visited: + raise RuntimeError(f"Circular dependency detected. The following configurations were visited " + f"while solving inheritance (in that order!): {visited}. " + f"Current configuration: {configuration_name}. Its parent configuration " + f"is {parent_config_name}.") + visited = (*visited, configuration_name) + + base_config = self._internal_resolve_configuration_inheritance(parent_config_name, visited) + base_config.update(configuration) + configuration = base_config + return configuration + + @lru_cache(maxsize=10) + def get_configuration(self, configuration_name: str): + if configuration_name not in self.plans['configurations'].keys(): + raise RuntimeError(f"Requested configuration {configuration_name} not found in plans. " + f"Available configurations: {list(self.plans['configurations'].keys())}") + + configuration_dict = self._internal_resolve_configuration_inheritance(configuration_name) + return ConfigurationManager(configuration_dict) + + @property + def dataset_name(self) -> str: + return self.plans['dataset_name'] + + @property + def plans_name(self) -> str: + return self.plans['plans_name'] + + @property + def original_median_spacing_after_transp(self) -> List[float]: + return self.plans['original_median_spacing_after_transp'] + + @property + def original_median_shape_after_transp(self) -> List[float]: + return self.plans['original_median_shape_after_transp'] + + @property + @lru_cache(maxsize=1) + def image_reader_writer_class(self) -> Type[BaseReaderWriter]: + return recursive_find_reader_writer_by_name(self.plans['image_reader_writer']) + + @property + def transpose_forward(self) -> List[int]: + return self.plans['transpose_forward'] + + @property + def transpose_backward(self) -> List[int]: + return self.plans['transpose_backward'] + + @property + def available_configurations(self) -> List[str]: + return list(self.plans['configurations'].keys()) + + @property + @lru_cache(maxsize=1) + def experiment_planner_class(self) -> Type[ExperimentPlanner]: + planner_name = self.experiment_planner_name + experiment_planner = recursive_find_python_class(join(nnunetv2.__path__[0], "experiment_planning"), + planner_name, + current_module="nnunetv2.experiment_planning") + return experiment_planner + + @property + def experiment_planner_name(self) -> str: + return self.plans['experiment_planner_used'] + + @property + @lru_cache(maxsize=1) + def label_manager_class(self) -> Type[LabelManager]: + return get_labelmanager_class_from_plans(self.plans) + + def get_label_manager(self, dataset_json: dict, **kwargs) -> LabelManager: + return self.label_manager_class(label_dict=dataset_json['labels'], + regions_class_order=dataset_json.get('regions_class_order'), + **kwargs) + + @property + def foreground_intensity_properties_per_channel(self) -> dict: + if 'foreground_intensity_properties_per_channel' not in self.plans.keys(): + if 'foreground_intensity_properties_by_modality' in self.plans.keys(): + return self.plans['foreground_intensity_properties_by_modality'] + return self.plans['foreground_intensity_properties_per_channel'] + + +if __name__ == '__main__': + from nnunetv2.paths import nnUNet_preprocessed + from nnunetv2.utilities.dataset_name_id_conversion import maybe_convert_to_dataset_name + + plans = load_json(join(nnUNet_preprocessed, maybe_convert_to_dataset_name(3), 'nnUNetPlans.json')) + # build new configuration that inherits from 3d_fullres + plans['configurations']['3d_fullres_bs4'] = { + 'batch_size': 4, + 'inherits_from': '3d_fullres' + } + # now get plans and configuration managers + plans_manager = PlansManager(plans) + configuration_manager = plans_manager.get_configuration('3d_fullres_bs4') + print(configuration_manager) # look for batch size 4 diff --git a/docker/template/src/nnunetv2/utilities/utils.py b/docker/template/src/nnunetv2/utilities/utils.py new file mode 100644 index 0000000..b0c16a2 --- /dev/null +++ b/docker/template/src/nnunetv2/utilities/utils.py @@ -0,0 +1,69 @@ +# Copyright 2021 HIP Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center +# (DKFZ), Heidelberg, Germany +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os.path +from functools import lru_cache +from typing import Union + +from batchgenerators.utilities.file_and_folder_operations import * +import numpy as np +import re + +from nnunetv2.paths import nnUNet_raw + + +def get_identifiers_from_splitted_dataset_folder(folder: str, file_ending: str): + files = subfiles(folder, suffix=file_ending, join=False) + # all files have a 4 digit channel index (_XXXX) + crop = len(file_ending) + 5 + files = [i[:-crop] for i in files] + # only unique image ids + files = np.unique(files) + return files + + +def create_lists_from_splitted_dataset_folder(folder: str, file_ending: str, identifiers: List[str] = None) -> List[ + List[str]]: + """ + does not rely on dataset.json + """ + if identifiers is None: + identifiers = get_identifiers_from_splitted_dataset_folder(folder, file_ending) + files = subfiles(folder, suffix=file_ending, join=False, sort=True) + list_of_lists = [] + for f in identifiers: + p = re.compile(re.escape(f) + r"_\d\d\d\d" + re.escape(file_ending)) + list_of_lists.append([join(folder, i) for i in files if p.fullmatch(i)]) + return list_of_lists + + +def get_filenames_of_train_images_and_targets(raw_dataset_folder: str, dataset_json: dict = None): + if dataset_json is None: + dataset_json = load_json(join(raw_dataset_folder, 'dataset.json')) + + if 'dataset' in dataset_json.keys(): + dataset = dataset_json['dataset'] + for k in dataset.keys(): + dataset[k]['label'] = os.path.abspath(join(raw_dataset_folder, dataset[k]['label'])) if not os.path.isabs(dataset[k]['label']) else dataset[k]['label'] + dataset[k]['images'] = [os.path.abspath(join(raw_dataset_folder, i)) if not os.path.isabs(i) else i for i in dataset[k]['images']] + else: + identifiers = get_identifiers_from_splitted_dataset_folder(join(raw_dataset_folder, 'imagesTr'), dataset_json['file_ending']) + images = create_lists_from_splitted_dataset_folder(join(raw_dataset_folder, 'imagesTr'), dataset_json['file_ending'], identifiers) + segs = [join(raw_dataset_folder, 'labelsTr', i + dataset_json['file_ending']) for i in identifiers] + dataset = {i: {'images': im, 'label': se} for i, im, se in zip(identifiers, images, segs)} + return dataset + + +if __name__ == '__main__': + print(get_filenames_of_train_images_and_targets(join(nnUNet_raw, 'Dataset002_Heart'))) diff --git a/docker/template/src/run.sh b/docker/template/src/run.sh new file mode 100644 index 0000000..2ec089a --- /dev/null +++ b/docker/template/src/run.sh @@ -0,0 +1,5 @@ +#!/bin/bash +# $1 is the csv file containing sample identifiers of test images +# $2 is the input path where test images are located +# $3 is the output path where predicted masks will be stored as images. +python main.py $1 $2 $3 \ No newline at end of file diff --git a/docker/template/src/setup.py b/docker/template/src/setup.py new file mode 100644 index 0000000..d5af773 --- /dev/null +++ b/docker/template/src/setup.py @@ -0,0 +1,67 @@ +from setuptools import setup, find_namespace_packages + +setup(name='nnunetv2', + packages=find_namespace_packages(include=["nnunetv2", "nnunetv2.*"]), + version='2.1.1', + description='nnU-Net. Framework for out-of-the box biomedical image segmentation.', + url='https://github.com/MIC-DKFZ/nnUNet', + author='Helmholtz Imaging Applied Computer Vision Lab, Division of Medical Image Computing, German Cancer Research Center', + author_email='f.isensee@dkfz-heidelberg.de', + license='Apache License Version 2.0, January 2004', + python_requires=">=3.10", + install_requires=[ + "torch>=2.0.0", + "acvl-utils>=0.2", + "dynamic-network-architectures>=0.2", + "tqdm", + "mamba-ssm==1.2.0.post1", + "dicom2nifti", + "gdown", + "scikit-image>=0.14", + "medpy", + "scipy", + "batchgenerators>=0.25", + "numpy", + "scikit-learn", + "scikit-image>=0.19.3", + "SimpleITK>=2.2.1", + "pandas", + "graphviz", + 'tifffile', + 'requests', + "nibabel", + "matplotlib", + "seaborn", + "imagecodecs", + "yacs", + "monai==1.3.0", + "opencv-python" + ], + entry_points={ + 'console_scripts': [ + 'nnUNetv2_plan_and_preprocess = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:plan_and_preprocess_entry', # api available + 'nnUNetv2_extract_fingerprint = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:extract_fingerprint_entry', # api available + 'nnUNetv2_plan_experiment = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:plan_experiment_entry', # api available + 'nnUNetv2_preprocess = nnunetv2.experiment_planning.plan_and_preprocess_entrypoints:preprocess_entry', # api available + 'nnUNetv2_train = nnunetv2.run.run_training:run_training_entry', # api available + 'nnUNetv2_predict_from_modelfolder = nnunetv2.inference.predict_from_raw_data:predict_entry_point_modelfolder', # api available + 'nnUNetv2_predict = nnunetv2.inference.predict_from_raw_data:predict_entry_point', # api available + 'nnUNetv2_convert_old_nnUNet_dataset = nnunetv2.dataset_conversion.convert_raw_dataset_from_old_nnunet_format:convert_entry_point', # api available + 'nnUNetv2_find_best_configuration = nnunetv2.evaluation.find_best_configuration:find_best_configuration_entry_point', # api available + 'nnUNetv2_determine_postprocessing = nnunetv2.postprocessing.remove_connected_components:entry_point_determine_postprocessing_folder', # api available + 'nnUNetv2_apply_postprocessing = nnunetv2.postprocessing.remove_connected_components:entry_point_apply_postprocessing', # api available + 'nnUNetv2_ensemble = nnunetv2.ensembling.ensemble:entry_point_ensemble_folders', # api available + 'nnUNetv2_accumulate_crossval_results = nnunetv2.evaluation.find_best_configuration:accumulate_crossval_results_entry_point', # api available + 'nnUNetv2_plot_overlay_pngs = nnunetv2.utilities.overlay_plots:entry_point_generate_overlay', # api available + 'nnUNetv2_download_pretrained_model_by_url = nnunetv2.model_sharing.entry_points:download_by_url', # api available + 'nnUNetv2_install_pretrained_model_from_zip = nnunetv2.model_sharing.entry_points:install_from_zip_entry_point', # api available + 'nnUNetv2_export_model_to_zip = nnunetv2.model_sharing.entry_points:export_pretrained_model_entry', # api available + 'nnUNetv2_move_plans_between_datasets = nnunetv2.experiment_planning.plans_for_pretraining.move_plans_between_datasets:entry_point_move_plans_between_datasets', # api available + 'nnUNetv2_evaluate_folder = nnunetv2.evaluation.evaluate_predictions:evaluate_folder_entry_point', # api available + 'nnUNetv2_evaluate_simple = nnunetv2.evaluation.evaluate_predictions:evaluate_simple_entry_point', # api available + 'nnUNetv2_convert_MSD_dataset = nnunetv2.dataset_conversion.convert_MSD_dataset:entry_point' # api available + ], + }, + keywords=['deep learning', 'image segmentation', 'medical image analysis', + 'medical image segmentation', 'nnU-Net', 'nnunet'] + )