From 531bffdfc41f6c55c9320c55639610859f7dcc10 Mon Sep 17 00:00:00 2001 From: lithr1 <1102340779@qq.com> Date: Sun, 7 Apr 2024 14:30:24 +0800 Subject: [PATCH 1/3] sgmse --- bins/sgmse/inference.py | 75 ++ bins/sgmse/preprocess.py | 53 ++ bins/sgmse/train_sgmse.py | 87 +++ config/sgmse.json | 42 ++ egs/sgmse/README.md | 99 +++ egs/sgmse/dereverberation/exp_config.json | 69 ++ egs/sgmse/dereverberation/run.sh | 97 +++ env.sh | 1 + imgs/sgmse/diffusion_process.png | Bin 0 -> 243841 bytes models/base/base_trainer.py | 6 +- models/sgmse/dereverberation/__init__.py | 0 .../sgmse/dereverberation/dereverberation.py | 25 + .../dereverberation_Trainer.py | 190 +++++ .../dereverberation_dataset.py | 107 +++ .../dereverberation_inference.py | 75 ++ modules/sgmse/__init__.py | 5 + modules/sgmse/dcunet.py | 627 +++++++++++++++++ modules/sgmse/ncsnpp.py | 419 +++++++++++ modules/sgmse/ncsnpp_utils/layers.py | 662 ++++++++++++++++++ modules/sgmse/ncsnpp_utils/layerspp.py | 274 ++++++++ modules/sgmse/ncsnpp_utils/normalization.py | 215 ++++++ modules/sgmse/ncsnpp_utils/op/__init__.py | 2 + modules/sgmse/ncsnpp_utils/op/fused_act.py | 97 +++ .../sgmse/ncsnpp_utils/op/fused_bias_act.cpp | 21 + .../ncsnpp_utils/op/fused_bias_act_kernel.cu | 99 +++ modules/sgmse/ncsnpp_utils/op/upfirdn2d.cpp | 23 + modules/sgmse/ncsnpp_utils/op/upfirdn2d.py | 200 ++++++ .../sgmse/ncsnpp_utils/op/upfirdn2d_kernel.cu | 369 ++++++++++ .../sgmse/ncsnpp_utils/up_or_down_sampling.py | 257 +++++++ modules/sgmse/ncsnpp_utils/utils.py | 189 +++++ modules/sgmse/sampling/__init__.py | 139 ++++ modules/sgmse/sampling/correctors.py | 96 +++ modules/sgmse/sampling/predictors.py | 76 ++ modules/sgmse/sdes.py | 307 ++++++++ modules/sgmse/shared.py | 123 ++++ preprocessors/wsj0reverb.py | 130 ++++ 36 files changed, 5254 insertions(+), 2 deletions(-) create mode 100644 bins/sgmse/inference.py create mode 100644 bins/sgmse/preprocess.py create mode 100644 bins/sgmse/train_sgmse.py create mode 100644 config/sgmse.json create mode 100644 egs/sgmse/README.md create mode 100644 egs/sgmse/dereverberation/exp_config.json create mode 100644 egs/sgmse/dereverberation/run.sh create mode 100644 imgs/sgmse/diffusion_process.png create mode 100644 models/sgmse/dereverberation/__init__.py create mode 100644 models/sgmse/dereverberation/dereverberation.py create mode 100644 models/sgmse/dereverberation/dereverberation_Trainer.py create mode 100644 models/sgmse/dereverberation/dereverberation_dataset.py create mode 100644 models/sgmse/dereverberation/dereverberation_inference.py create mode 100644 modules/sgmse/__init__.py create mode 100644 modules/sgmse/dcunet.py create mode 100644 modules/sgmse/ncsnpp.py create mode 100644 modules/sgmse/ncsnpp_utils/layers.py create mode 100644 modules/sgmse/ncsnpp_utils/layerspp.py create mode 100644 modules/sgmse/ncsnpp_utils/normalization.py create mode 100644 modules/sgmse/ncsnpp_utils/op/__init__.py create mode 100644 modules/sgmse/ncsnpp_utils/op/fused_act.py create mode 100644 modules/sgmse/ncsnpp_utils/op/fused_bias_act.cpp create mode 100644 modules/sgmse/ncsnpp_utils/op/fused_bias_act_kernel.cu create mode 100644 modules/sgmse/ncsnpp_utils/op/upfirdn2d.cpp create mode 100644 modules/sgmse/ncsnpp_utils/op/upfirdn2d.py create mode 100644 modules/sgmse/ncsnpp_utils/op/upfirdn2d_kernel.cu create mode 100644 modules/sgmse/ncsnpp_utils/up_or_down_sampling.py create mode 100644 modules/sgmse/ncsnpp_utils/utils.py create mode 100644 modules/sgmse/sampling/__init__.py create mode 100644 modules/sgmse/sampling/correctors.py create mode 100644 modules/sgmse/sampling/predictors.py create mode 100644 modules/sgmse/sdes.py create mode 100644 modules/sgmse/shared.py create mode 100644 preprocessors/wsj0reverb.py diff --git a/bins/sgmse/inference.py b/bins/sgmse/inference.py new file mode 100644 index 00000000..9e5e2a60 --- /dev/null +++ b/bins/sgmse/inference.py @@ -0,0 +1,75 @@ +# Copyright (c) 2023 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +from argparse import ArgumentParser +import os + +from models.sgmse.dereverberation.dereverberation_inference import DereverberationInference +from utils.util import save_config, load_model_config, load_config +import numpy as np +import torch + + +def build_inference(args, cfg): + supported_inference = { + "dereverberation": DereverberationInference, + } + + inference_class = supported_inference[cfg.model_type] + inference = inference_class(args, cfg) + return inference + + +def build_parser(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "--config", + type=str, + required=True, + help="JSON/YAML file for configurations.", + ) + parser.add_argument( + "--checkpoint_path", + type=str, + ) + parser.add_argument("--test_dir", type=str, required=True, + help='Directory containing the test data (must have subdirectory noisy/)') + parser.add_argument("--corrector_steps", type=int, default=1, help="Number of corrector steps") + parser.add_argument( + "--output_dir", + type=str, + default=None, + help="Output dir for saving generated results", + ) + parser.add_argument("--snr", type=float, default=0.33, help="SNR value for (annealed) Langevin dynmaics.") + parser.add_argument("--N", type=int, default=50, help="Number of reverse steps") + parser.add_argument("--local_rank", default=0, type=int) + return parser + + +def main(): + # Parse arguments + args = build_parser().parse_args() + # args, infer_type = formulate_parser(args) + + # Parse config + cfg = load_config(args.config) + if torch.cuda.is_available(): + args.local_rank = torch.device("cuda") + else: + args.local_rank = torch.device("cpu") + print("args: ", args) + + # Build inference + inferencer = build_inference(args, cfg) + + # Run inference + inferencer.inference() + + +if __name__ == "__main__": + main() diff --git a/bins/sgmse/preprocess.py b/bins/sgmse/preprocess.py new file mode 100644 index 00000000..22e1e860 --- /dev/null +++ b/bins/sgmse/preprocess.py @@ -0,0 +1,53 @@ +# Copyright (c) 2023 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import faulthandler + +faulthandler.enable() +import os +import argparse +import json +from multiprocessing import cpu_count +from utils.util import load_config +from preprocessors.processor import preprocess_dataset + + +def preprocess(cfg): + """Proprocess raw data of single or multiple datasets (in cfg.dataset) + + Args: + cfg (dict): dictionary that stores configurations + """ + # Specify the output root path to save the processed data + output_path = cfg.preprocess.processed_dir + os.makedirs(output_path, exist_ok=True) + + ## Split train and test sets + for dataset in cfg.dataset: + print("Preprocess {}...".format(dataset)) + + preprocess_dataset( + dataset, + cfg.dataset_path[dataset], + output_path, + cfg.preprocess, + cfg.task_type, + is_custom_dataset=dataset in cfg.use_custom_dataset, + ) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", default="config.json", help="json files for configurations." + ) + parser.add_argument("--num_workers", type=int, default=int(cpu_count())) + args = parser.parse_args() + cfg = load_config(args.config) + preprocess(cfg) + + +if __name__ == "__main__": + main() diff --git a/bins/sgmse/train_sgmse.py b/bins/sgmse/train_sgmse.py new file mode 100644 index 00000000..11a7a004 --- /dev/null +++ b/bins/sgmse/train_sgmse.py @@ -0,0 +1,87 @@ +# Copyright (c) 2023 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import argparse +import os +import torch +from models.sgmse.dereverberation.dereverberation_Trainer import DereverberationTrainer + +from utils.util import load_config + + +def build_trainer(args, cfg): + supported_trainer = { + "dereverberation": DereverberationTrainer, + } + + trainer_class = supported_trainer[cfg.model_type] + trainer = trainer_class(args, cfg) + return trainer + + +def cuda_relevant(deterministic=False): + torch.cuda.empty_cache() + # TF32 on Ampere and above + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.enabled = True + torch.backends.cudnn.allow_tf32 = True + # Deterministic + torch.backends.cudnn.deterministic = deterministic + torch.backends.cudnn.benchmark = not deterministic + torch.use_deterministic_algorithms(deterministic) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + default="config.json", + help="json files for configurations.", + required=True, + ) + parser.add_argument( + "--num_workers", type=int, default=4, help="Number of dataloader workers." + ) + parser.add_argument( + "--exp_name", + type=str, + default="exp_name", + help="A specific name to note the experiment", + required=True, + ) + parser.add_argument( + "--log_level", default="warning", help="logging level (debug, info, warning)" + ) + parser.add_argument("--stdout_interval", default=5, type=int) + parser.add_argument("--local_rank", default=0, type=int) + args = parser.parse_args() + cfg = load_config(args.config) + cfg.exp_name = args.exp_name + args.log_dir = os.path.join(cfg.log_dir, args.exp_name) + os.makedirs(args.log_dir, exist_ok=True) + # Data Augmentation + if cfg.preprocess.data_augment: + new_datasets_list = [] + for dataset in cfg.preprocess.data_augment: + new_datasets = [ + # f"{dataset}_pitch_shift", + # f"{dataset}_formant_shift", + f"{dataset}_equalizer", + f"{dataset}_time_stretch", + ] + new_datasets_list.extend(new_datasets) + cfg.dataset.extend(new_datasets_list) + + # CUDA settings + cuda_relevant() + + # Build trainer + trainer = build_trainer(args, cfg) + + trainer.train() + + +if __name__ == "__main__": + main() diff --git a/config/sgmse.json b/config/sgmse.json new file mode 100644 index 00000000..000eb3ab --- /dev/null +++ b/config/sgmse.json @@ -0,0 +1,42 @@ +{ + "base_config": "config/base.json", + "dataset": [ + "wsj0reverb" + ], + "task_type": "sgmse", + "preprocess": { + "dummy": false, + "num_frames":256, + "normalize": "noisy", + "hop_length": 128, + "n_fft": 510, + "spec_abs_exponent": 0.5, + "spec_factor": 0.15, + "use_spkid": false, + "use_uv": false, + "use_frame_pitch": false, + "use_phone_pitch": false, + "use_frame_energy": false, + "use_phone_energy": false, + "use_mel": false, + "use_audio": false, + "use_label": false, + "use_one_hot": false + }, + "model": { + "sgmse": { + "backbone": "ncsnpp", + "sde": "ouve", + + "gpus": 1 + } + }, + "train": { + "batch_size": 8, + "lr": 1e-4, + "ema_decay": 0.999, + "t_eps": 3e-2, + "num_eval_files": 20 + } + +} \ No newline at end of file diff --git a/egs/sgmse/README.md b/egs/sgmse/README.md new file mode 100644 index 00000000..ed46552a --- /dev/null +++ b/egs/sgmse/README.md @@ -0,0 +1,99 @@ +# Amphion Speech Enhancement and Dereverberation with Diffusion-based Generative Models Recipe + + +
+
+ +
+
+ +This repository contains the official PyTorch implementations for the 2023 papers: +- Julius Richter, Simon Welker, Jean-Marie Lemercier, Bunlong Lay, Timo Gerkmann. [*"Speech Enhancement and Dereverberation with Diffusion-Based Generative Models"*](https://ieeexplore.ieee.org/abstract/document/10149431), IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 31, pp. 2351-2364, 2023. + + +You can use any sgmse architecture with any dataset you want. There are three steps in total: + +1. Data preparation +2. Training +3. Inference + + +> **NOTE:** You need to run every command of this recipe in the `Amphion` root path: +> ```bash +> cd Amphion +> ``` + +## 1. Data Preparation + +You can train the vocoder with any datasets. Amphion's supported open-source datasets are detailed [here](../../../datasets/README.md). + +### Configuration + +Specify the dataset path in `exp_config_base.json`. Note that you can change the `dataset` list to use your preferred datasets. + +```json +"dataset": [ + "wsj0reverb" + ], + "dataset_path": { + // TODO: Fill in your dataset path + "wsj0reverb": "" + }, +"preprocess": { + "processed_dir": "", + "sample_rate": 16000 + }, +``` + +## 2. Training + +### Configuration + +We provide the default hyparameters in the `exp_config_base.json`. They can work on single NVIDIA-24g GPU. You can adjust them based on you GPU machines. + +```json + "train": { + // TODO: Fill in your checkpoint path + "checkpoint": "", + "adam": { + "lr": 1e-4 + }, + "ddp": false, + "batch_size": 8, + "epochs": 200000, + "save_checkpoints_steps": 800, + "save_summary_steps": 1000, + "max_steps": 1000000, + "ema_decay": 0.999, + "valid_interval": 800, + "t_eps": 3e-2, + "num_eval_files": 20 + + } +} +``` + +### Run + +Run the `run.sh` as the training stage (set `--stage 2`). + +```bash +sh egs/sgmse/dereverberation/run.sh --stage 2 +``` + +> **NOTE:** The `CUDA_VISIBLE_DEVICES` is set as `"0"` in default. You can change it when running `run.sh` by specifying such as `--gpu "0,1,2,3"`. + +## 3. Inference + +### Run + +Run the `run.sh` as the training stage (set `--stage 3`) + +```bash +sh egs/sgmse/dereverberation/run.sh --stage 3 + --checkpoint_path [your path] + --test_dir [your path] + --output_dir [your path] + +``` + diff --git a/egs/sgmse/dereverberation/exp_config.json b/egs/sgmse/dereverberation/exp_config.json new file mode 100644 index 00000000..cf5c4872 --- /dev/null +++ b/egs/sgmse/dereverberation/exp_config.json @@ -0,0 +1,69 @@ +{ + "base_config": "config/sgmse.json", + "model_type": "dereverberation", + "dataset": [ + "wsj0reverb" + ], + "dataset_path": { + // TODO: Fill in your dataset path + "wsj0reverb": "" + }, + "log_dir": "", + "preprocess": { + "processed_dir": "", + "sample_rate": 16000 + }, + "model": { + "sgmse": { + "backbone": "ncsnpp", + "sde": "ouve", + "ncsnpp": { + "scale_by_sigma": true, + "nonlinearity": "swish", + "nf": 128, + "ch_mult": [1, 1, 2, 2, 2, 2, 2], + "num_res_blocks": 2, + "attn_resolutions": [16], + "resamp_with_conv": true, + "conditional": true, + "fir": true, + "fir_kernel": [1, 3, 3, 1], + "skip_rescale": true, + "resblock_type": "biggan", + "progressive": "output_skip", + "progressive_input": "input_skip", + "progressive_combine": "sum", + "init_scale": 0.0, + "fourier_scale": 16, + "image_size": 256, + "embedding_type": "fourier", + "dropout": 0.0, + "centered": true + }, + "ouve": { + "theta": 1.5, + "sigma_min": 0.05, + "sigma_max": 0.5, + "N": 1000 + }, + "gpus": 1 + } + }, + "train": { + "checkpoint": "", + "adam": { + "lr": 1e-4 + }, + "ddp": false, + "batch_size": 8, + "epochs": 200000, + "save_checkpoints_steps": 800, + "save_summary_steps": 1000, + "max_steps": 1000000, + "ema_decay": 0.999, + "valid_interval": 800, + "t_eps": 3e-2, + "num_eval_files": 20 + + } +} diff --git a/egs/sgmse/dereverberation/run.sh b/egs/sgmse/dereverberation/run.sh new file mode 100644 index 00000000..b1aa00f4 --- /dev/null +++ b/egs/sgmse/dereverberation/run.sh @@ -0,0 +1,97 @@ +# Copyright (c) 2023 Amphion. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +######## Build Experiment Environment ########### +exp_dir=$(cd `dirname $0`; pwd) +work_dir=$(dirname $(dirname $(dirname $exp_dir))) + + +export WORK_DIR=$work_dir +export PYTHONPATH=$work_dir +export PYTHONIOENCODING=UTF-8 +export PYTORCH_CUDA_ALLOC_CONF="max_split_size_mb:100" + +######## Parse the Given Parameters from the Commond ########### +options=$(getopt -o c:n:s --long gpu:,config:,name:,stage:,checkpoint:,resume_type:,main_process_port:,infer_mode:,infer_datasets:,infer_feature_dir:,infer_audio_dir:,infer_expt_dir:,infer_output_dir:,checkpoint_path:,test_dir:,output_dir: -- "$@") +eval set -- "$options" +export CUDA_VISIBLE_DEVICES="0" +while true; do + case $1 in + # Experimental Configuration File + -c | --config) shift; exp_config=$1 ; shift ;; + # Experimental Name + -n | --name) shift; exp_name=$1 ; shift ;; + # Running Stage + -s | --stage) shift; running_stage=$1 ; shift ;; + # Visible GPU machines. The default value is "0". + --gpu) shift; gpu=$1 ; shift ;; + --checkpoint_path) shift; checkpoint_path=$1 ; shift ;; + --test_dir) shift; test_dir=$1 ; shift ;; + --output_dir) shift; output_dir=$1 ; shift ;; + # [Only for Training] The specific checkpoint path that you want to resume from. + --checkpoint) shift; checkpoint=$1 ; shift ;; + # [Only for Traiing] `main_process_port` for multi gpu training + --main_process_port) shift; main_process_port=$1 ; shift ;; + + # [Only for Inference] The inference mode + --infer_mode) shift; infer_mode=$1 ; shift ;; + # [Only for Inference] The inferenced datasets + --infer_datasets) shift; infer_datasets=$1 ; shift ;; + # [Only for Inference] The feature dir for inference + --infer_feature_dir) shift; infer_feature_dir=$1 ; shift ;; + # [Only for Inference] The audio dir for inference + --infer_audio_dir) shift; infer_audio_dir=$1 ; shift ;; + # [Only for Inference] The experiment dir. The value is like "[Your path to save logs and checkpoints]/[YourExptName]" + --infer_expt_dir) shift; infer_expt_dir=$1 ; shift ;; + # [Only for Inference] The output dir to save inferred audios. Its default value is "$expt_dir/result" + --infer_output_dir) shift; infer_output_dir=$1 ; shift ;; + + --) shift ; break ;; + *) echo "Invalid option: $1" exit 1 ;; + esac +done + + +### Value check ### +if [ -z "$running_stage" ]; then + echo "[Error] Please specify the running stage" + exit 1 +fi + +if [ -z "$exp_config" ]; then + exp_config="${exp_dir}"/exp_config.json +fi +echo "Exprimental Configuration File: $exp_config" + +if [ -z "$gpu" ]; then + gpu="0" +fi + +if [ -z "$main_process_port" ]; then + main_process_port=29500 +fi +echo "Main Process Port: $main_process_port" + +######## Features Extraction ########### +if [ $running_stage -eq 1 ]; then + CUDA_VISIBLE_DEVICES=$gpu python "${work_dir}"/bins/sgmse/preprocess.py \ + --config $exp_config \ + --num_workers 8 +fi +######## Training ########### +if [ $running_stage -eq 2 ]; then + CUDA_VISIBLE_DEVICES=$gpu python "${work_dir}"/bins/sgmse/train_sgmse.py \ + --config "$exp_config" \ + --exp_name "$exp_name" \ + --log_level info + fi + +if [ $running_stage -eq 3 ]; then + CUDA_VISIBLE_DEVICES=$gpu python "${work_dir}"/bins/sgmse/inference.py \ + --config=$exp_config \ + --checkpoint_path=$checkpoint_path \ + --test_dir="$test_dir" \ + --output_dir=$output_dir + fi \ No newline at end of file diff --git a/env.sh b/env.sh index 10ef7ff1..1722c4ae 100644 --- a/env.sh +++ b/env.sh @@ -28,5 +28,6 @@ pip install phonemizer==3.2.1 pypinyin==0.48.0 pip install black==24.1.1 +pip install torch-ema ninja # Uninstall nvidia-cublas-cu11 if there exist some bugs about CUDA version # pip uninstall nvidia-cublas-cu11 diff --git a/imgs/sgmse/diffusion_process.png b/imgs/sgmse/diffusion_process.png new file mode 100644 index 0000000000000000000000000000000000000000..6f8a6db0e6bbfd8c58d37ca94eaeafe0a7844fa7 GIT binary patch literal 243841 zcmX_nb3kO@|99I~TeI12v)j#^H{14R+itUs&9-g3HruxC?YVvS`+Me}nW?$w9=y*> zXC_!iN*E4=4FUrLgA)}IkOKn)?*Ie)KnM*5{KYYC@CWb&VaF$`01Z6cq4k5nz`lTq z3h*j8r=GN_n`;Os?>~lqZ)l|-BB2zkr_iRDmz)J2hlfg%%!c(iabew-I#nvNS~QLm z-f;5cS|26BL;cmPcKG?9E1$m9 zhW0t`QlWyDe`fvHKtP&Qk%F$JWkgdGo1u{r>nw>=jts>&cJ|?e0}4GoJ)s(~Za=Qt z-x(QJ%B7EwkF+dXPmCFZO3_tlnX0_2A}=ETW|)`XK)D^0Gjw zf^&Au$aH#c?Q3Aq2Xdpu*}<6CyH*d9)i4(~D$ zxL?pxz1;V98z|!p)|fm zV5rQ@eGd;0{EO?7oQhjcIy%4f1|n9;ToG`;cT!ihc6a%HVzb`p{Fvxy$ zjiW&dn!K%~sHo@@-SU5{&@-k7jB;pb$YDueU*9h{ICy|e#K_1fDK%AtyjEC9=y!6m zuDSV1RqD0qRuN)Ho?P(Ikf@=d;hcx(&LEPO*Xyk$R|~?3|7VQvE>|Rpbeg7KcT9>j z@2iao>ttkT7y$IDZry7&3!0IUQ9??p{2D@9LgK{PXf*<(D|8+EWXlI9HdV2cwei@BSl`4M8rXOa=_*M#Tab_2ln{P})e?~n7)-*Hhn=00#ao-8y! ztZffO+U*QRThtz)@7g9ctht7M#-Ib^Xwb#_Z-=Oik9_S|K~In_HgeeJEFzyB3*@zI z_z;;qhLbS9?{dHDl!3YVnKnf#gL}1}^3Z^5n6u~K?bB%@@Hpns+kwj0Xm!2}MvW82 z6pAq+O5&)CU64&*A5)DDzIbIPtK*_LZ@q>GCI-UMaD4b*5x9CxNuGKX6u6Qb{92&j zYG9P%xGd+E7+b3dMIo26zB^IPoJHZZ+xx?!uK#c6$kMa24rK8MdY^rEpotf2eYqMr zojjC^v}Ua#?zI9o=G))bQ>+kaxv=xsi=&c?dh)K*-1PYW=3bf+r*Hf5dLIi9Z`H7* zUZ}vXN`}Jva!XK1NT`0fG*DN#_r)=v(e1QuzFF)6JeAv{886+Yz##G8?z6{88$pwn zlypeD@K^dRRL#O0gVMNs=sIq-Z~b4#2Yu#lZ4;>vH0L}Acckh5SVFvKYrm^R=qwVf)J~t)1#Ji#n^!K)tl1vcrk|TV& zDr?>mmgn<8e}DaAvheu9fwB6_*yyOkuG7gHkM5o+WiGY$N+gVzqm`Pd zp@RJb1M`x$m;?m-FOSzL?R47h=A6Ipy8!4m8cSV73f_W)>N&NqUtXnvJr^_zY*Ux7 zam;&2OG~pr82Eb;$g=`WJy%ap&*|)8bDb`hBfL7+6gB}tfeKAIP7=Mj6e~O$Hf(@f zZ0&NNZdJ+juhn3_tO@kn)uYVg;mW|VfZL1B9$<;Lw*j003Z;n4y&A>qM?l(8Z?7Mc zBS20?BrwW;Wr0lbVtE|w@F9J=&rQvX?qMzdxm@r(XKo&5F5~+E8~38{ z0JTEh1D$LH+DgbstqK6*Wg-8Lv%aeS{{AJ=Pka0OEoV*Z4qFX&a-#sKU&$C~0!_27 zIqOYZ6i8?60o?~n<#dSwP^5JEZ=-T7hxG;+FfTN8bPTMlM!glA!S5KXTh&-}PdH>A z#RS`@8(mdZ#o{$1$YOTi945A82ceAIe@%UB2X=6*UDBZ zIPh2jHeOg*z!NS97-I?RE3mE4reh4nhNy{FW#^8;*kU@bCo9 zB#RWbCbIZXt6Atm-S_@r-JP$G4-P`VmqkMKCSTT%ESXFNDlTiKObbCLto`wfd2gtj z!|8%sVFHhvi3+cWK(7g4IgAsZ_fo9Z>bWw;k3W8bKe%6WJC9Di_S6ChV#XUEfS$7v zOXrZomr+Rxi4~iU%S#)Nzb}st8MI%&9s^{dNrS$;OsCaaC@d_@oC#F-&DE8y+Z&_N z*a0wo4*My;e;apvx&dJ9_Tj<#`E_PyrrFN%tRtx{@vF9FhFi@xzz);(Tx{b-@&)zg zdUE{>3%>!_QN4c+?aQV^4^@BiuyR}*JDzxS#dK@1?O;E@20j7&vtB$0&?UF9b%w&$Hvd+h04xX zM1UD?z*#C|!cE(!MFpK+ldWgfR7s3Ap}94z_S~Kndmli& zEM)+TTkExqub74(Doo3=?s30b4>q=c{j}<|J}Vr7Yx3q* zu^makQyz)SmaPr2z6U_o4&YBZedmVZ(Onk~6O^9kw@2>IWF4~{E}P?~ru8dZax+DGlds-~%Y<7F)>pV@|>J}vDd$Vf`vWp>Y z2(BGxGnbQ3bIb>=<)YhdSi#l@<`S%~I~aXkiJbN^VY zn-xoRu@EO_f5lMhmu{8wxGn2c(Li%}xwu0@?=>6(0?$=7pa3VdD9bhIt^ww?1=t&q z3m)S`7UWW?8@>qK8vwnvKJKO9@_433^8Eb`P&{WPSDML+#1TbLY-evTMDwhP);&8w znig32F~LPnA#ON$coqZ2tCX5!tGbIZ52Is#S7`?P)KCuF^`nMlk&%&%fEooB=*+pD zqpxmts2>o>KaXv^5PASYcy@OZi*+V}? zidFkiq~{H>mdkGZ-A>ad16^I8zkmM*QuSQiTs0bfh>VCRPbpNOia#qs5+N(%65&1; z2qTT1VJT0@t7El4W9F8aDcn%A#u^fPx7+CL~PLQfQ?meI9HF~P; z>l2hp{obYhI;*{E^LD@PmEq3ql_Da=?e_M1PoVbl_hp~Qt8g1GU_i3KsM0QX%M$!b z$A9zT;d^Xs?7RPgw2ZO%X=+-0nj~63`sdGzL;oAV_se=ILHT1Ri1#Co5i^`3#CMtZ z)Qk6?956$k{(PJ5Ml%>7$gvEb@yQ(F<+K=p1eQ5<)mNRUX=rJaAOm5YNMQN@u68^- zA*B5>WdxWE@D%#Gy4kPhDQRg2wzjs@?3z^ZEO@&j?CDOff4MA(iG^M)H>UE$-wADA z%PkxrHEX~l0lsBmD3*Mg5D7aL4mF)}4 zWlHWFHV%$iXxf+s;JpB)0Wc4M4_o;Ll2THDavKHUxY_k^^Al1Bf?G#D)m=lgSCz`n zR$nOK(;B#1hStNyO|1;ppACNg+{l{J#%lHrJluG>BzU6-r1hynaAfw!)SXv692`AU zQ&UY)El}tH=vcZbkx~w_Of?;?7-Ei}qe$8MqGMnXxGajUtpk{P4RDbhU82i&IBTQ* zA@aKvc)mN`9*ib#z;oK{_HS`@B=9c-+|Aym*#R~?LFGN;%iqi@!<609cg-{Kn=D*5 zMz3L-V^x&M9cA*R(i}Bbd*6!HWT&3!C$Oy?Jjaugl3KJ{q|UAXbH!)n^Q4D=NSdNQ z`-98N%LB~OpywL9GBxDVP<^iS(1qZ6QAhXX=K{VTqWTigkk@L`$djx;8!4ZYy?yPf z2{kGRFkc~nosp%A{|;CeJpLe-tIU$IRq3QvOzIT}l(m?U+rmC8B6|dU&cVy=fnQ1s z-N4hb4kCau&LmnZ&$|HB7{6KhRb(xT2nba}>=}~q9qVxRq?!GZ29|=GyPZypY09p_&Lw;OEeeSDq1aFdG}r! zJnQvY*H4xP+P<5-BIy22?vD%r6}4K(Cb>y|Z^Id)nEmrdLVd#$H36rsO*j(Yz|pb3 zt#Bl*QN8uNXDWPT(JAfWwAIE*E&0I{s4H`6B{yJsC=y59?_1V28^(bmW4Ah2w`%Ka zvfFR8-t4vz$H##UP`B@ovPc*uy!>myV&8zqVn}$vX;s~$M^}Vqw8YV6;zi zFUX~DS-0bc&66uS{jh$->;a&{3lETGMAV(?+uFtedbC|QjgJYbFYUeZ_?6ELrK>&k z^xES+-~ygEwS4ZS%x@dgg4W)BZ3Y*xQ;+XN`S97tZEg-G9jLo4NP_P@W~6G}{N*@@ z?CTypR4&;#I7V=+TDyW#$d~xohpwWc{5b8?uge)9}N} zoEhKAvc~~vxq<6oH~p4MgDS+1ggOy;v29q-JESGZ{>^9cTz)8YB3EBcLunW!{%uy8$Mk%InP&9*?~j zFp3A~=SG&6maDy~fJ|^$+Zubl2ZY_p$%zHyA_rGj+By%6o67_+bSd`ZTVK%H*qv+1 z+B)!ANqJFA8m}6U+Bv5gP$Yk@BR4_wQqeBPov{xnLv^})VV)->z-|M=%`Kmq9uuEgRIDUCyHtgSft}rTy=H$Oa5U-Z zmeRREzH|y>jDr(L;vlo{>)`JGlAiu&r^)*9YIhmCu*E`r;h=5>lQB7Hqc`EXj{b7k zp2bIWQIy38X!`E*o*y~!I}1zy%Zs}k1>@y#I*?yvl^-gdDevE>rlz)z_HfbD1Cp-S zB3v(~+$86_NHv4~SqAiFubnm^H*c^0BrzVBFTX1~76u-k@s*X8X3Vv+sUlfGq7yjB z?B?63`Z|e?kCObO7p?m-mlO1HI>Bxr%rXm>G_!w6?BZA;-aGE`MB%2|16vE>@XR>#hR1^cLAtzu6oF?XNk5=eFpq#y4f#2B^B@wF9XG%9n6cHeZkt6E-q3_w-DI!GkFZp)K@5D?lWR57ZW$*I##z zg8i$j>LCcdZ=6~7Aw6AN>=4NOThb zv(X7;hV_ftfjM>#O_uml|3VIHv${Tg!rzt#uCC3ex2xux$(A`KTpg9%>nlKQc%S4!`%H2dSPPDV=SIy?yF}Ee! zcaxt+j44rC@$VXcz{%Be#N>FK#j(vY_G``GpJb6L7VjM*6w8)}kzj{q7{ zD%Dupmc!vlVZI;Ye=(hOmdpkaTv&YIWMf;@f_f1()X~Y}ssO1y<*Eamq%Rz_9*%aL z=Co!~@Y!w3<+!Zjo%Sb^xl9Z#ao&@@${HZ*K9yaSD|!dVbmI>i0MEMK7;^|N_x1`X z@87NtMj&=ri-VB^VyK#yDnkKKc$4Y5fTLc54`cl=9ImVZ@)PaEzom@m52Ct2j0=E3 zL*FBi2}gV$SfSs-P~$2Q_>nMTpyhEpOFs2c$yC*|vl`#{pVu>d?N0^>#YrFY5aJ<0gVv<7h(N z`@pQ)ZqW%m9|$LsEALsou35>(=HIijfCBFfK+1AHpVYjZuK%T-P^;M(j;DTCw~IC!kP2#A# z2juTSUhNQp%XU?_RMprx_`a>^AoSebS9%|?-&f@VH8vOdIQ4}E$SnXH1mq!+uPg7& zyA=`pD9=G!(v!9@xrM=!p~wb62_SdaoG&)?xSX0aEJN3Nmp_-;6LK^O|L<_&FL1bU z`qB}MfvHC13@}Nwktz_C0cp%$y?YOklW^$X)<5eEgs%I&r#@73Kwgi9gR>0;Q;|0s zz}5nU`5KvPNBLUne-#^WMugObL*9u71UCKsfFLrkx4-$-dw6m}18hs+!{yVw*3_T- zrkU;!ijX?2_tz8w{sUTj0phY9ASXgV#b^Zv4Tu?Mz!q4ESFtcKbOUNQ?11^-S%Wf= z{GBx;uK$drES+zK1SW9e~jSnD*UP0>}>R@OM5gOFac?{j1rD)i|*{2mEE>u7w56e^FGy@N`Rb zf!qrjXG91yaPnj6-2GqB>bt2UdNbVJ-R76_j(OMy6$0BM6N1E z$LBdqhC&A5POhQ?n}^PS0qy*Fnt*kc3KQQw9dLq#0pwi3F#`<^P3?4ER~aVG_TsVS@O**P6Pry8B<36yHqJ&i(#U1d|sX9Yp77phwx5tLd5Q# zDqE4mO#3iI@G>+T4gMmqaEL7>8@X)78P^V_VcKU1aF}j2w+i)T_L|_W$t4sd=dE$BAKFKH|${z7Go z_r~X|8Lx{OS598{+;7*~FIF$-Z+Gk4DkeUKd|YZ|`)5KRoL~N^=oF*3PgaACZQlK{ zF1Xa<0w~J&h{5bxrdtDwTZ01$lzgkb$ShoA_=w;|kns=L>1m`&grkKKMh3xg(zzb4 zrK<|t$Bjzp!JY`XN6QML8f}8hxe+#RmLrV&l@t4epTz6l=q(1fc|>*~#RP z#DrD4c@Lo}A;dn@Y;FvqR)KzqP<2Uaa0q5_g%Rm8zktSvbw^M=HOQv8DXAj~lrQuV z%HxHNC25F!D#eJkc*TPC46GHfbvOiM;iUCV=~5`IUi1X7@F1~I8o zV0Tf~-*{tEY98^z2+ayxs?{bNleGs=DqwB5|U0V~BXDaMh zW_z3V3NRSs@*Z6CK#f1T4i97}=!Mk=>4o2^;u}veq ze45&=34fKu=r*e0)BIH zG?MT41)7qI1-&)_wZ@gO3Y0-II;>g{v;c~@n*Jb)S*2nK8CvlXRCo1YsHTeCH)A*% z5HxKDQa^-tD=QnZUlZEa z#098!Iv&nDMrYoxcx<=4es69`^Y9-2e&wQk(j1l4j;Yrb`^Dcs=`I_t3dbim)o({q zk9fn5T!zL*zGa9)WS}QL*n%t!;>&x?ZWo&y`*y(Qez^XVd13`NVQ>iiqtY)K{<&XS zr9!5W)fEpyRzZkJBcrhKD)NyZ%b7iJ#Tj>Y7S{DP#iED|t1RVV2xTO>%T_nA?p2>O zhVG&GxGCKf_zxWP5edvf)@h zu&`o-v|$3|d{p=|Ayn06VNkX9Bvj;G~Hx3vu!iLWdJ4^c<4Gk;6^WI!OH;phB+-vuxczS2;;7%%ytChiyJOqtkU z#WeecY!2R|T#z=zg}X$xl*79lsS>r*8Xi=hkE}v7$8TzL{OqyRD!L?gU7AR2=3-p7 z7%#`9cR|xTxBCyl><#X{Ot zyFy6FIv|08rsAI;l72DTS_6>Xb>ju2ZZ?P(8W{oiQfWM~i zXmiVVU7=>sxxAbt53stC=KZLV33jG8imqnLh~vL1AMrN2Oyfg=cz~W#3OZ4AZ;axD zjA^$YJU@qdI4p3fPS_KZzdl{_)}$QuYW?s$MMuqznsIGYw}GKtLjh@e%I)r^j8cRa zy30iUYX`>zjk`5k=ZnE6^yuIZUux!*ugaQQM{tkTmNnSWxJ6i3U-&MJIy!p2o-7b< zn@85%V^_z`3OmU|@s+WWz8u^`yfy6*R~n-Ne zh64RKM_?&@VsT?cdOH+?&qIq2Ik=?JTGAvfWK*aeS>xhnkeQ)o=}@!9{6hg{CuysO zNEzy?A1(d(Ql0xy9kbPkFUnl~X!Ta`7UOlf{u;$>;6RR_=X``ZqbsQ#%5l=LxL>yI zR|z(W9HvGn6U01}lA1#*r)~gESVe=ed?h1>)CZT{_-jtb%nC0WdWB@QO3UeQ-F5wl zvYDe9BC^cE4s=-~sZ^{U=@*&aOtox6Z;oNf*?sU(}$LeS@?;*+4yItNqSeemxjoP*c&XBHyVG` z7%h*V(@eP}0w!E8#I@FsR0Dew<|>e?BaxwRfr_`A%rX~~PI8eb7KB`yn61RgNB7}{vtvlqePGvN|+8}?TIc{{RlJcLM8^}2aRj$^# zndR8fCA_)d)!l>^P0n&#(4lc-8={F7y+#k&VhONVIN6xhRHN1HQWAdR*a9NC*a|a} zR4W@U&uk3(CPz>nl%ILV2>k_fQAakBsVA9TaYIEu#bsj~hjJ(N#lweFHFXv|t8y+@n{%Y z9jS9}-`a9%@tLK>TgS&gP}|yKfT#Qhqn4`4R)Pt>D9U!|wcK|xAIU~$Ewql+lFdX< z916izR1_Wx3Oh3(EtQEeux-vjRpmUDl`mO|QXU+lfm{6&_Hn8j11GV#99dw#(ob;M zR%V!Yn2Kvjuc9q6Afyr5{e#-pAS!8{gz75TAyJ%QM?CMeErs6XO z%7<*jXeae?NlF@XYC(DGqpCihc=sr@+yi1WYVHbQcjEm^3HEGU(BJO_7hU0qp}1Ld zaX!C)r%-H8n!#-i;P1**B8!;XbdKheebN7BFnun{T?P`19$$+SKk%JF7jNH_)-H^4 ztbVaojIGa_w}C;sF)|g9MOZ%=szf#(ZWm@N@0tzLTnzG`#bx8jp8q@G5#%K zq>d6}PxX?mV_eD)QhmBjFyfv9yk-nVzuR63wIS$vI^^~jYX>UOr&JsV+s(sBA>SG| zkV+~3-ikXEXDq)I>&H6jkfab9bdjTl?+e|id)wMr^j$Z3iyH{`+b$$l8B_d<^Z8m# zdF41LH{BC^k+$kkOZ(7=0sr=3@^$^tOm0q6D~{Wj|H8`y{!PR5!b(eKqV}=x7WXUc z(?izYRwCW)_x!KQ`AE0h12S+{CU~DqJ61GcNh^S0`nl|@cqxj0L+Ge_M%g;qoL`Nx z_>91l^ln z{{FSg>bbR5rKr%q}!u->s`~6elx`o*F=c-~EY2O2kCZ>;*%K zmy0g&P*{f)sqzWJrcTiZn(|pWSkc?asH#$yo$u!~w6cvwVrp6r?mCImhiFS5g+_)z z$q;XBPlc|UD`Qk%jU4=P-Qc-%8luiAbb2I8*TF$`_8y8bOGk%qkqmJvQ z?gb?aC@A`B4G5ZrmRzEln)>G#B0>v>bs|xkkC1)ok2L$=~O}Nh$cP7$S7~=peN;>?~`4abmT;f)EP>m;Ne1!D%#JkeX(=hBaQXRr`XDK@4e06 zsY&jxLmd^l7)_@MU&unHa?Hb}vKc(bHo&!D{9RoXAcH{4b{OMCOC{@P)~Ao#CpS%` zXLdwI*1wmy3$=>~C67GrlcgZRRefQ8U`N}&7~74`Qa`f-`w;?t#Pg3WqP3!?-ekp; znbBdLj#GcZygw#yE=ilE06+7?;+0~u+T{6?dk$+RRu8KiU-kmAk+_`Fp8|&rHB`IR z3Y005km}O@5_h?d?wXtLfEJnB>c8C3YEV|@?L^*}ZEacC5jIVkf2kX70DT}(c88Kj=PH-3z^Pub ze{@hKEDhpWq^)wL@zkuiIHvx zfTF@DV<~1E(OCn*pM5*v zbc~`xFV@6)EH=gE(_h?)f-G+A+KLT)RZ+bx`qw!u)I$|N3js1DCH4pR21MCuLhU)o zh)N_%!8vsIrY*$A5+>{r>0u-^o*r^iitjAiPay`fgm?UY#}}umkvAKWZ@s(9^TdBI zm#9iELE>ArIloL^m@)Q4KOUa8Frl_gLok!XCD&4}t5@ENj~Ai*&_dd|1tt2xFJ3EN z8vz#D`J36x$*qE!K7Ng?=&Flr`WMckw;v}W+~_`Ygf zMC3gFQjwT`;##>z246&UF=1mo6k~!~{<$?lL}e2{1M?AH6aNzRXK^@c)9AQa-)(&v z_AhbV0RY_|z+56d-BQ(j`!il!y()KZ)l~@NEVu9D~e)X}O+C!Qo72iVzQ#0)@o|!^GJ% z^2S{vc+W|dvd4oIy^%w{l;`M6^y8Bpi)?-GOt7SufJkwz6ev~G8boz5=it|*Me%2z z(&8Z`jQ<&r@5YK%@Pof1(>|!8BuHgr2=EDBi1NJ+6YpU z3kPRV3Q;VJT4>alAfW_1>so}V# zF^DG=x>T(UBAB6U9RIl~v&+t_k@xd7Ygp19n`sMoR7FP7fx#w%*UejO=8Fus+7D1< z7*IbJB59VIMHN+X*~ zWk%=TX4kq(av)qw0S9zy`d7z6I@SR?)=2;GW5Jmn{O*O#E!!Y+w6AwNl{T1v2HfSs zFFpFaV|Q)qevy8>%Y{fZlZ1eOZZ!fzHnGBm6j6F(b$6_|D*sX29^E1=B(u@;dzTes zwYa$Pc6)@6S_s-K*qn*sz|E@)5M|*zkwZ!cqshi&2<+OhHdR?h8*kIvqs42i3PqPf zO#E0pe-iQq`hEmy8$HCHKf7NE3XG0VgsJE7^QfQX49p3LO+qu=%#w-yy0 zDMqyZynN<2a9kI;79{N%4cHoy>%DHmT^|&7Z$+*C8oQmRk^I)GR&c_)xE{OP;sLK* z4FBow`^lPt9hW+JJ<0kC+9p+(k>Y2b1b0P;rZt6=QJ8>F8rVoQb0kN@-DNs`QC3sGxo zcc!38+!!~SLgXj~Bdx7*M|rd@qT(sjCE4^#TxgP8qan*h&KGq z6QX%kvTmBT2>z~Zhwd_%P;cNY8mSa2KKc?0X27-2xs%dxs_Vh@+3wVQO(O!4f<{L^ z!Yh|tEYC8v$RY7yLO)cWeM_}@Fc7`ulMY!jGI#=43F9=q1mlkwV0HaiJ}XZpo$n%^`pVe~xAg_(*av7emF=z0U1j!wTtGBF!Po?a) z$^l|~!^sr8kXbmvEKClzDQ#({pg2v414}uE&uL{qof97kpR$da!GVB#y{iuGhZSsi zR6b%qU(~x~+F+x3NZR?eWivv|;M2>clIarK`LQ zO?+uhLO}3`N}mLMiEE%ovaytnfsm~f*Q6xFP=((_&{z=KU{a5ZpOK<&ELaFB_U_5} z0GSfhtL7V{k|3OE_8^NkFNz{Gs&v94^I54%(AV4)%unc0NyC*(EeoHfBG@Q+q8M!& zXMpyc3LiKRGEw~j`aF)`TXj_S0i+X)?ok$nZ-NqvIN~TPD#n8w=@xV#J_kQ093o6ctw^km+)9|3$`WB5VSD)| zTN~^WqQgR0_@o2Z^Cd_I$>UPSD| zLei^jZ|T_P!ssk-^knuA2@;dUwl{*YGWKQQM|SkTLN-KF+*LUQgbl`2+s0+1DXHWuW61c4=rn9q3MTo~L_%i- zd5h#B^8VFTJCn1(M zhJ;(}jD>Gwh$QDEnq?O<{`ASe0xhpFKoY)|heR5|8GM8n z5?a=bm>ulResp^zPRPrdMo`f}B9rp{ zE&Liuz@|@Qowvt2t7KHTK^Y*&A_>`19CuK2?cQXXr3=Y-C8$`*K`pzql~1rO@}T61 zfwi1Bj<#YlL78E*lD=jKlqX;m+NE? z5so^0qger5B_v^Jh-o~Ru~fm$8sbJI(RN{{rm_F?A#MiEqCQ^A`tdJPf^y10A99h! z7(-m7-ejXzTvhc-YP889JRgzhxsSaRKPJyrZ<=Dbm8-BHTX%9PK7sSp%i4gHMO?Wg zOVPwQG^~6o_=v!31E|BJ_G?&Pwf_8|+H;-Wo<3G>=Taau61Rt7^;n4gRSF>ub;H3j zth92&JZ^-RrR1(*a-mDr(_YHY7?2kr&Ny{4sy362@7Yk?xcAwV@TSSfmE@pKQ^?hf zW){yjA{Sj8O7pX{iu(kOY&c1|{m=r7_9|@S<=fmsY)zw@?Su8K0ke#|Dw@01H?j~2 z7hyTme9U2fM#S&v&Iz57{0Y=PAI5XApY(B9v`h0B;zMM`YG`Oc5xP}d7ZPf+Ad)Jm zdAS=mLVlMF?bX2x6V+;DeavANnAM<^cEI%!< z@`G0nxx!^>rUbokD=9`xMj5e5W)*F7PwFJtgg;;;5kr2H$R%?GxBz;!4{BP$>1M+z3t~3Y-@5n_*J|}z!9BIyrKPJ9rGi2paN7dQQn&QY)$5w zV#)@34Q(UjlJ>oYD|6I_(p$%Bpw<0tL)C4=^CiMp$%g(%P#LTkcEHhZskw(4nPM33 zIsu||Y{D-SIDPIXU>B5`0s{(ZIWgS@P{F-5b za{B-TG%Z{0`6j=A7zhKW{?f-!Z(B=Oe(^XK5*ii74Ix;+hZ#=Y@1P@-k<5IXlu|H^ zVnQv)PiXi99*)KVbk^ab0+;@7`Zhnm1m0X25_Gf5FGTY`QYtK{EpHMOHa*iSfuXQ0 z_IyJ@3^PtV7Shy%pa(5ALK<*EkqDNPiE4rDMQhO@_}nk@{4ArNQwu5o#QlM#&lVWb z>PGeL}6SMM1%~Z0_jvuKRDPwPty%bLiML9oJwXnK_n^u zPSXDa%GDP&D<3S(n zr&O3Tv-1%7-A}&Plipv&B$ZrP=zfuksy2#9RjCShtLJJ`&d`-3tJmzM5as8Q5VPZ+vXxpIaorXGns4O{1z)w<8!u^JJA~{hO z&8>myFm{zviC(f#MAYO9E3IY=hG6@)UWO?Ac2zT{Tm6s8TWd@6` z^;vnLcm+wj>$vLe9Dfdj$qM;5I`5WNhQ)1nAN4Mm!T;~O0DYhN=^!k`V8gc!@?2WY z9&f%fol7te_neookq3Tyx#9h&4TIhykWVxpzT^49ADw`~*QLa_nS{uKU?Mxt zb*x+>d(>UZ%gpLuRqK3m{#?ag zgAO4xwBS@pADnoKTwX-3mrH!uJ0l}DeNFnlRxo?rOW6K4(}-(j^~(}%wG*yU=iS}R zp8S*Shk9Xs@MxZ7V-;oGnw__IT}PB3#xCMGdAUosy+T&6WxYxT&i2WLe;3=ad_&ox zO8^FHI8&f|c(7GcCFxuJuJrkjBPOrQrmbC;DwB^W92;vt>{6ZF+eRK{1^b!qzZqjC z{YWF)H~KYRL2@h<4dX0BQl1F8_W29}xkct`IX*iSxg!Dm>rx7u_? zqehM>1-h$cvk^hO@59KE4e3=#t*mF^6|5t)qvy*%uPkJ4EJB@c&n3UyT0F15c(@h+ zJ#|>mXN|WiJ!h`h&A6&e1Z(ZOYhRNZhBXVMC_<|K^oQP6*VLtty8tKPcJEKjGP^F} z4*o%V0vi}tA<{EruvV4Jv=t>x?!*^YLsle_n?3eRvHpny&D2DT_Z%K% zD`+?Qae?)%rZ;L2ujl>KuD*`e)_bjr$5-jML!RsVjt4Zgeb^cLH17h+_R7R63WXh0x{py%8BRysl<^&{3CXVe8m1M7l6%8F7T7vWa`^!j4DTY*O zO+!V%5oKWMH$YAo1Gz9%v1ON5%(|x#0{60q7^(4ebfLnCf?VWGl%|b=V|y1;q)U#C zyJGGx`g9RI12JNkcEl9uf&&6WHReE1@OYePnd%7>J!BI$gd}jO=8ie0V2X}+85(j2L5FQsqZ{nk6ATzAY0KwgCh}AH2D^fIcc(%UfGF(#o zgiF1Ws~-cK7V;yEp^q9Hda(Cbs`%KGrupC*zz0#5?!Kd3x-W%uq=tIx0lPoJuVLL7=L5vnp$2v4bvEew= zG(jP4OWS#zmsHMB1j;^z8UBW2I#476G zn7M0~tpVY=TSDb6xl#;F;L15(RuLm4UG&_|4QJzym$m0*x8kFb@|`F#$@9VMDSJ?R+R$j}CY zQVfQIM>EGdY+3j%A5R6fw_MbM`-6xQfhK6aJ8f8Zn%}J(mK~JRkZa9k1WnQ$$v~zc z$_hz8@KHjNtKNeCopHx!D`e>3xRen1rx zJ{n|_Y;(z9{5mHa2o4`8o^1`Qjo_m3{PX6LXLZM)Ka6DB5rZI;f`@~aS)+*v8lTuX ziBAC&JRg6kcrk;@_1NjWbhsdSy6ef$or`oa^1*-mect==eW1_L`U4>7ZGiN)9`v;= z;Pv+Wf#1I=W$0gx-?H7e$tZfgs}S;)=JyqdOz{;+e8c><2Z>5(tS$)A5vAV8o@Kq^ z>;_D}B3Ji7H-$YreWR_7uYh)MM0<~zCaYhI^H>3rZG4A;6Ef$;nwT7|9^F&KkzzOT`gChwj%mGRoGUe#}mQo#1#RbdM zlBp6GYot;HU!hWlR(b#2;1N>x?MZ==KEsNVKua4|gcPu9h!hr^My%aSM)@h3C}~4Y z7k3~dI%TM&@u8-2Gh&M5X3S8X5b#9lsnd?NyFf^R)CbgQwU>?zky=xv$=+nQrHgA4 zjwY>0B64p2h#&*k;Y);&WO9m&4Iz3emOYM7j!X~Hrl56OVr-eaC$wRW(h8|El4}9O zs{Xo9!m!6o)jb?7LQ5PqK?JlgbX16>i9YVHXGAV1xRmHpLyn;ij>~QX$ukg#l=g&1 zY{*nX#}f|BgiHSdMUJ7wn2r=PR$)ae>YkkxkwRuzF`!Kjoiqx~X?B-!I^il@G1Z5R zwM3$st1(HGUkw_-(sa=8yZ$&jbp@!5~Bkm3{XTa>WU;4GD4v> zJ_SbNkab)kM1cwg6>APv$u3sRc|jE$>bRjaC!A*Iq@-wL!!9haBIi^evvil-FHh)1 zPL~=?DwxPqlsX|v2^iMyg3^q+qaU)CE84i@P@b_%Rj*f;0}6GYOe!w?HJh*_7YCfE zGcutOvY?GSwxK1b*bmAqr^Q3+5J+g|{(=K}mnwLMreuw0E3PRh8LC6(e#3jI3*v1VmK@$VfM|^_M{WF%`9ipsR`!%^SbV=Y- z#t|p9zt4H zC>~}3R5Sec$y!%nk zqgwFQ;(}lPJmX>6aGnKPXUPX!cI%9x7UU}OexWI~XRacT-Ht<3GPRoHX+staAK1Wk zYxqAuJ>a9UeBu^@~t*Xgs`O4A;b+>y*bwn@V^1J@l~68 zuzUlI_k(~}m;E+q{Wb~8w+%*a%N@RrF@C*$p(hmju#a|%o=tbbEIdZ*KEADs6+?Zp z2cspWIzgczK@q2Hx$fs1l<5-(Vn2vcQWK&hGb4QLX#5%_ER*a3bzD&SYn0Fo^bsKi zw8}^+(1;BJ#n2qmge6_5kV2E`f-Y{5N}=~rXSJV`>k(2|l1yyE)jk{lltj4B?>~Sh%pd^ql!zq)SzO8r$wL;QsU_u{U97j zux5yk8AE$Y73Wxyp@gMkP8TaC>KuV4;&5?`h^aRPlHo){zj)9gOW_8CfN6nczAvEn5@ zv`8tja)6PRm;!BT@Ki(*$jt;L6jEAz>~pDI3i#fCPDp`myk?@#h~BXh4TtG2B56kA zfQ{c_g{JY27&|gC+)t}I-m~WbxF9*v5}zEUA0b13e=||C3xSv#j&(*R$E@Rqkvb!| zj)oOlyn{_s8rg8HCtUiDni-Y}m0yu$LPtZzHD~HRMky}SbCOJil#?^Shkzhar~xVN zS%2x7`xk7xIVa{_slc`8!{znQ%#DAb@HHEMmpm~v&4y(i9V)E4F_4t=k;@p7&81AyD+nEuQ$dJO2G3e7MsF?W$g%~gqV>n> z-)uRwmiH%$%Uu9pa%ZYJA4q<)id?i6yRPJNm$+^n6H{_lB<5}6y6wo*fSnHrVHg-m z6B7oncwE)A(Xwh2KRwh)?fB1MCrWMjs%m&v*A!wvD>HsM zk0d;e7d$8pW1V8a=UFD30t z@}4|n-G2+yc7orpBUKkTE(DpM!I2P5OyuWR86Tf^6w=dl zIg5}pu^#V-T(^!te@}5*M4r6JD5B@ZBIj-y_>bSe;`2qpUq2pmoCQ7@cl`MWTmH+V zA-|nx+#N(DlB$FEi^OLOuu?J7iLdJ&N4DgmN__Xo^6%~*@sF35=WWCJK#&Q~oh#kRi5_})I|x9tymx7e+9H>++6V7zWmA|aK=$ezhj zLZf9x)vfmCv-rkgUTcbPX>D)!jp$_)ua`HO>l-$@mqFzFW~Ls16X??-`o8P+zr741 zMu(INA+lZu@hU~4-~Uw$QrdUljqxqs0>a`}555VWzj<%oUcQal|F+-v>^KP@Tb?(M z39)BIrH~xu_YoqeP8&pkBqAX-xES%NVW>_SrbB#c*v2g(IIJuf>N6VFblnaa6frqU zd%!>+(&rdtSef^%J(3U`5WP0HahtyQg=nc=FH;z0ClvaCotrU`8Cvu}EyO-uBSc5= zo-#k6Zs!^ZqcX{rrJ|0Hir(=U5CyKz%U8Buiq)>fAMM&g&KpQsr zBpBKwT-?wF&)D3f6BWDe8Z9%t>nO~WI?NENPr)tZ6fG2z9yBT}wOjRi>C`8fME1ox zXvi2M3}~_4KP#S?96@-hINO^KBT1%So|AweaIx2FM?5hFN^wA@hg5Mv7q`8vA?$FW z!i5T}%RcoYX)-Znqb~5F`a z2^N7%p7VT4N{O9!EYgB4TXJE!4ofmd9O@Gal~V}Ik(sg$HFICH@M|mu5A63@rxi*j zLX3>W0R<&mC^n%VgoNmcbQI#0E^QghAyZB<2$pfifx1VXc3gxfH0hWmz*5kp3Y9eT zHj<)2%bb;1l4H4a&*;n%F-WW|IZ9`=VoWA2E8o)KY0?%UC0ga2ns*p!gN~7{A8^&p z$yA?^VnjkSprWFoqmVg;9JA^cEc_Gx)%eGlRC3krsAGrsFcC+9q!b0{gc6!roa0lC zP6LGYT(~(qzXRZIDOicfymfpu&RK=X(6m%Ra3GI4A7?DPNJ++nf#Bf*2$gs`3#>!S z(<(r}gYQg*YJw`@R`9N=Ejoc{=FRIAY2r3^q$UINChT1_BB4^fclttE^V%ADb zh!iq%*>>E?Ejy)AGO`OTAzAL0hOg@tm(DS+HSZTg4l+UPzXJH9L(j6YtX=;s{p1La zvyR`cC2cfJR7su;5k(>4(PoYinoBn$!y>)puAT5`>sWis`2Y}#LPaVU@qoth{-opm zBgtPsReZjJkOV`Om^)AH6F;4nBuaj>bWE(lO2NiiG>Y>QmR;n!akO3%Riwkyd3fGz zxNZ$y4BR&b_e#q@trs9lJ{W4oR#BS3*R`Vyk;-{?G!#(~#Q+GL=7xbW6k6eu;kto? zEb+&Ol1J+ii%xNv1uh%-a@%JImqzf#&aw$x##)h!jAevf9r?2}Pp%SAYRSe&23j&( zB>rM%8R$Mr?=Ta*KME{6!DpMuRRbRkp-COv5cs$dJl!hp79N3M)j{P9?+z0ebz<2` z&dR{bB_3Zz+MOhqP#PF(&4+h7r1yOO(vni*lV!vu%_Qp!KMA2wqGRm?U4X2Oh!m-U z;L$3Osl;{(AB-eRA34txBhz~&hNfqL2Qs1Y{NxDE>H(2JrXs&vNNTURGi>?6Sz_S^ zT-JhLZ&oyF#Pd4iei3Ore6~`M5+QZ`;-%#zhk0EPQ)J$1HrF{PSw|)nPb$mr*PaJu zZ!xR9WST{Yg0YS$6#Cw;7g*Io@r$n`X_=F&z@qUCOeB(6cCYb_{x*Q~&DZ{GGKOyhOm6~Iu@??} z6MDUUo*Qt@Td#{(;4h`bO|>>s1d;ZKNxZIj)*s8Nf>Ixm5_D?Yn|f|Avrzl!vD-^8 zDR@&xuxEH)#jJ@}*K>1iZ$^^QS30}dkDDY1MM~loD1IGvcEc2>eS$>VZ*v2}_xv?`H8QZQy<#?+~%O`AOvT4LphAt$tH+ZWDIhbYy*S-q?2O>YAh z-Pah|v&Jzbq6)ZFGtuYxOH1pwsFY!BL5iMC59#xhbsq~T=_|HT(}flzC4veg z3$|TFQi550!NNbK@+(H!Azf-2h$D7xPHx9kaZVC}NMi2RI67347-86@1y%|gx4`Km zN~_p~4Q-cUFx0W7O*KP~Q9^TQPgulj#&W`C_mV?(kFz3Yl7_5fy`P_PSYdj2vgjbRLARh#VnsWK!~?TQk*D7XFGXNsjFS&$=aZzvNg=d)<1-saOz`XWrF> z6ll_lwclcuU?>VkW{i`9fd-?;`=@Ki3MClPE4znRX zz{Xd!cm{IJ%B|S=Ifum|V>x2(C0(qk<7#ihG@t}hYP6Ir-IkaBDIJcHJf=&Yi5ycf zp^66&F5vL5b;?O9BAM?yTK}tbJU(7}&Ma!HsqgaF$ zYd-_vaeK|nrb1iCB$NDZ=D6&x`PT=>oaCBJXr{*T56cCEtYEdxxo#mQ!M!qP-b8BO zuuB_eFZ&sUX>M3{J!AA>013@i?Kv+7%<9C>ckEitqNy3^f}xW9bjypE7a2=~I;z;be{eUthwmx#%c|IBo-YE@# zvsj{pVxZToeTkMiLp5MxG?M;3*cXdXOpRdcBCD?DUmaO48buow&#H!1n(;?t%Zv4h zmGiV>!}rT0zN%U#Rx>t&%SMvxnkW(jZP~a;E)Ave{CW}bc)ouSF`I(H9m{zkc=Tn& zr7@Af*WHX~ts@gh+|5h6U}$3hefsq_k;y)WOzV!eYY6OkI5^^Ivtgiyd_OsiNK{_( zqE;Lh9o7aKmK@6?3ga+c;;L<#lo2fqFKUg8j*Zuh3=A~f9|x9Q7X>Mg zoECxOLF6nK?40DK>L|Tp;uJsr(*gdg6^n&p;TJkT?(b%gA_Cv)ETZ$jNS_r8$;{GpuRw_m%}rUWk<*PenZu*dl`XHkOV@6JpleD(0lWm-fG7;+V`4)O(_v+_w4R#4f73P zf1Sr0_CQ|kndlC<$^4CP^j;$NS;XvR5N|N&Z{Mri_ww7A-f!Ff+kXG1B*rqr$BLa_ zkclBykFlv_<)5*L*T|$u`>}`(IhLZ2A`=dgH0yNHOC}O@VoQpG=pu>GOB674p~9yY zFIu$9@u8(KN0j=AZMy7>JqSrIM@S5_?r|#S9smF!07*naR4b&CeG!QmspEn^`_}Y{ z1XN7)8BR1v6j>S*l*7fE5Ic6Pdr5^kM9YjIJdfJH=gi(`8@5Q%7s}WP-^XvI9Wi+{ zB?5s_1tB=*=@KCXt=m$_1GLaIVS~~+1w$+uMrTxEPALyrcF*v>W-RZrOIsRt`=S*! zxfwH12N+6}G_3p!Qiv3C!iJVEZs<}?6b&iDpg5y-JH&oAKnaBw2CXz>bIdL+DMb%D zgi;t?(6}|EZeJ=CJPoDUPd#@%BQ7MQ=y9}IT~g=?D=vu1VN8LJ6;_l)0g|2xX#5hZ zheRR<+ZQ~Mpy*5B-nZ{C5M!o#KovWlZl2ME9TRiLR2|`Pbg88hD>Rx+DoQjyMKU>L z!-91k8LJ6}(x`Aot~4VZA&sbAMV%HDa)3)6+tzc>-s3@WmqpjI^c{^4%)<*ZhFD65 zR$^1mRNdh^ydck})M-cSEr({p($7%)XG+NlO^BT9dswM3GLZtTe1nfI4vkEaK1YjHS#*}JsI2W603bu}KY0YM9C)Ni?|R7{~*Hfkk{tBTK5-5tE>$LCD0RI;Bg2 zZCX;tjsYXa@-FLmMMgm+v589pc@I*R1vDmR)TuYs5hXMCoJQpkEJi7$5RAl_MG_QZ z#5yievSlELEaQq?9C6)5hVqbsJ_2A9Jb}o2#fVHN4ot}|!ch_FX!vqva8YnvY`L2s z^0ew`qT?j*^^&7O;`7~>KOK$8q~bU0HI>(#Sb>p&Om*x`9tlCkzk^^Hn8zgoe%r~0#_85b}YRSxbtgdMjWHNFu zJLGQRXi~$^=NsM|PIx%Vk=r5P8EIy%;Ma2vYRA|r4y@p+9dd5_&-aZBeBI2j3^~uI zTsLr9Za5vcRDQrj4%oPaixML;I??jUxf>uzWm8{B9{2XOcEVX074g!1K%1 zjNdGdIL-{q7m4c>80f@*c;NWwr6=FrW#iSfl0X74@}NVLz5al%p1O1!!#SQ z>591_~owUkALVndS?0T>qDMw`r=gY>d2#I&dO;r1tZhP z-+g=_csNKrUs}GnC@GC25J=HrWk&$=((!JBVJ%sC$J%9F#RY$IQu5WjM5)9F=WCvS zHsGr3XmGq&4Y{a#Q?!*GAt@FYs{BXeIU-c1@2j=({mbn#m_Addu8?gHJdU@-we+g*4DSfyp6p_AxdEeFt ze;XjX(J0*LZNH7}?0>Y%dZ$0oh7DD>Bt+jcu86)OTkJ<&w`CY_N(bIN|C?7BU$-c| zW=wk~_eP(a_Kfwd>q-HH+Oxc2AC=a-*l#oC{nj_v*QZeQ=@R|aa{u>DIp?_V&u_bb zf9T%azM^j%^t{aqLzj9ZQEtbi7!eMvE}4q^?9vQm&%$MD&;v$4BU59n81G9FE2?h3 zFCw9F^pRXseV-yt*mB(iWg;Va2GWpc6T)6UoA$*#tQisoRLwPo9;4(w)(o>}>7nv{ zb*aLRAPI~Z5b$6W4LbxdvFG?$k??e>Ata9`qqI|Yu|vvUrzp`>Zidw*xg1f46;>4p zdME!jT%oi;i;_UT#PhsfmJz$ zKEuVmp)5tJxS-^KCamZ}MIpxf;w=5lfmVvjRTO$e6E_s*urK!EULuXeh~9MQV$%;0 zlEkNqp*o|WppJe1@5Wy+u!n@$5b?NBGg4E0tjX1oIxI*ck*N`x9pYq*LKA?(jF7_6 z`1L+Itw1V`PZe5PN}Y4zE;yE&ZK^2JfCu(H0)e5<80w6r-!iLL7)I1-MaPh1c}ATo zTG3I(B_nx+N}7r-tw?lHk-7nOYAM8|XWx@S35}78g`X3)ASJZ1<08I5Aj#E$136*S zRXA1%G=(^1-9d*!ri_j2@F;enkLxQ%!Bm|xu?B(Xt7c0^Mjc%5ZcmOZWwcltR$R6_ zCY)i_n5nqO+Rt#JSI%DgYj!L-k$2Ha5-4y!XQXpF8jQ5$!tk5zXE++}*aIwvb((V| zAFvX03;KqjZLgTb6!~mwo1Irtvsu3v)q}o!64I(KdiY+rHVnn74^}br(lrcUAo~1d#)g;*)dB>bkhXr1g z7*S$mL6$5owRCbHK%WdRYKKmeWvjW9MgCz`@&7#2v~fo%BDu2MFCLPj;JnCjA;U#@ zy7e6Ak{rwD>wpo0QXQg&VA=M7>d{*8;Y9JBLBWwW#FTinmiQ=nC%Z@E9qUFiRj}*? z1rvUFps2$D1)L8fpDi;Ujy+HQKhoa(M~>{y_WRr|)?8~ZF5PVQrj|6LnWynAezsxw z`G>Iq|KR`BfMFOg1~8sS4<5}TP3uW&sTY#XrO4W{a*2%XZvG*v$Rb6}`@umFnU!%P zGHyg>#<}O7?>SqAz_BM3tq)97K_UgI+TkFx{IZSQ&vJ}Ve6t266LwS0x0{|%%Y@x5 z^5vz+MTe1shsB7c75wW}M;FgIPVaHAh`eYb2tz4zjEpD|`TYt$j}s( zWW>abd0KZQB!nOsCz|~t@^mBE^ns5ik~0Er815sVkEXv!e*#<6qg$N@O6c=tti=@V6JCW7GBkndu7T< z_msx)+~(XFTapCMD$VJ}pvB;We6QGL>jeLD?wKTTFM}$D(V5Wm@}0fl9pZw z(&LOpm(mAM=LE#TQm_z)FQ%TKpXa37^TT;fA|t=MEcpBLikZ&&p$eQ^MePM&%q_3l z8PC3V#9p%Q6~|@IqALjAb3C$8OSV1imOXdJ5WJvX3N}l{Wi#CG@8^=Gm9$au^<`kT z8IdZFi;~V7&ijV_Jf$S^SNkz{NovH+#nPBAj^k>x5RL9s<><= zg>?LIs<~_xBlZ|8us)(CG)>M$WBBSSQmDvrX7KTd#(VZs&+ji2jL>{G3w%1S`Q{?! zA1*u(#|b~08SbQ!RWH#B?iQART}=3(h>UdPS)+MSUI%kb1KVJDc9QXO=}3g)gOM8U z+a~hsE6K*D>=y#ntjXJiJ_J74@0sYx$0A~V!lPgHbY2poq|k3v1^SPJqTAr;UGVfS zqx&vn{4R@pd(#^|@W0XK7Jz-9gyN=&xCYQ~lHG1?f*{05XRB)&L%^t%-fan?zh-|m zG4v2`1N)n2HyYzN`r6k_^IOMv`?#+Ej$x1`2z|Zw+V3|G_GY}L4ZbO0d;7e_>r*A- z@MH7FB2BzL-W%Juw>$n{0=w@X?|-SSc(NT39Z?s3mddbF?x&>o+RdUaj-61_b+jxCs*S^ zqstnM7;eJCpd~~B(F<}tp|wNtS|rfMHEEcVsxg&6rSn^SuuRMWA_$@o*Pf>>ZCFz# zJEY_+`xgi$NaTp#HwYo|ShoEVlV!AFiy=+|G zV>xE+mvnB;s^4IAhD;1br;ODBMwN7NeQoD9ShFQEqYgvNv5pC~UlE*Vn%@~dWx*4I zMMQ}Z5rrXyp0#~N8cR@;Qtn}6%~an(1;tf-L2Fl}(qn|i#x1EXaG|5I8;r_Gbb*Z< zTDPS%Q!MFqvR%SN91_Iv{Zi3z<(3r5;2nJPoiQS(5T31Dpv9aJBUQMfr)DC@BqGC! zjZS}bs}XHV~w#4$p8&f61=Dk+&$#T9MXk}zQ=4(LV4 zTo23M;fs>7u0+lWVY6<0w!l`e0e|lCj*u3p?o5MNb+_5}DJmWLF&^@k~`h z)q8^M>0*lyf>aokwz$|Z6Jw_G5LC}I+fi|eWB}rGy+bAx=XOa5VvwlBiaz#40e!TX zpvY806ME{{flwq;^TIu)6hml=(T37b2sp0X8biT0uF2(?PHef1J((}jWY#R*sDKCxEhC7?ZZ6sQEuTybe}7r=gGtH|1ZQz;u4xRVQ-1p1)ZvgO+*GLd7p&XJiZ z2u~D}g{#?>b5MhK>3JJi^@3NmVqz5gS>&V+Xz6Ku;8|_yeB^_2m;nmWv6opc>Wqls zv>vAC#|B>Ymck6frAJ%K!aBT{{QA=K@!YXjSpIcYT@Qx$a8Y4Y;Hq!QbdC_P>>XVY z>?T7@^E?TZ+9QNwZ9Tu;JjY1EPo{I8w`;D>43#xl8oDU)F}-$48vJ_4#?ZJ(?EgCyfHEtu#LA;SHUWjA-6*}*DKA^3W2I38P8{V-QNGl9b*@Zr?6 z^H5Mf_H=fTRR|(BIJUjvU%sERn+E3Nj+dR{$z{X{O=%p3POwo@TTky4yYq^NQ^7^Z z_;xksVPV-zBQAOpCHT#`CRc)odp(<8@n9;bD#^%rKHKS#Lhx)0FKWZ_#G*vt!nQP( z5bW@G}DO*kw( z=bd6UI2O`**t*D<%NijDOF%}E`QDZU~xJw{^I;G^l@|AAKKdc(dl+b?b19>Yzm z_sL$~E{HVJkWe6zAPgoc=|W9yuP`bhkt0kj2{O>TivF7Q8Kt|Zn9%w)Avk)wMM%Xc zIi$9i_~>wS*wE7ZhD2p}-Q$F%cQsN(Y-}(T2rud4ka(7v5hJ-v%Nm4727^&4MwKKY z9}NAYMhHzGYeI#oRjNvWo6 z?InpCF;P2oeocsxlrf#G@U|a(e&v{v+NWVfsb*N;5%Fx|z)lAj2*G1yM!_6U&&r?R z!x|$Kl2}r+AW=C)$+A6RobF>(L9TXip{Ms-QZ*q}6EHAS2Y4B2SmUVSx-TL^7dF=+ z28vV^?5O)}{5b-_RPJ-?o($j4=ou$Rq;ka8T~O*dO6EianVu0uz=kc}OIo)^3QfTj z7dl$EVq%UlYOvVKMaI4y^U?>bi8>&zvg(5-H`ikS^0f`jXc2i)a=RoiB%%0H3hA1r4WQubceQ>nmlnqyGoJR@G z6)lYijUf?=T{)$t<4!iPC>Pxsqc~-*=NKtjyMc{&K`=J38;2W8Arw2Qpz=`1nkER+ zkT79N5Df_hP4M76sK_=h5m8c#f+I8Mq<=+iw;Uu7sKS;P?Q>@47%c^b8nI*cIkzne zg;WVLN^GoIx>HhJk_v_Pl0*wOc7Y@z;>eZ23CEpe%*t&^=ZiQo`J zLZD_WM+kP=^npZ5eCX(-q>V6^2LSwNoT2tIo^2JoY0s0kVol9|GvCFiH7OI?xS_XG zgj_IEGb*d8e9!$nXO@hpt>^S&vQO3Ex)*g#@7S^cXG{xam(be$6X2L z((}TW{Q6vC%bd_>)nz0EE9~5N*rPegT5rX97y})G0@w^|=DbIiZ zKO}tpOGmeCm?p5B^-Q$EhQR0ZhF(b;FWJ8@xO|rI<-&pJ`CyXJx_~tvDFY|9WTI1= z-f*%C>}8gpov*oDN*<(#RL&6Sc`!Cywvt&g3^bosiXx9#X<2lVNg{ZBWmtBai5|i! z_D$q@9ob^}VpLEX!LARy>euY)8RwnlH>-vZMYv;2n#uNSM29IG(mHiIrb(S z{_$kQ^R_3IhTSwl3Bk8b!^3&cU*DN9|Fb*%{oe~pB^dAY+!KPcCbI4WI&}Q`4ydAF zSx5F#7#Yj6TJo|DOpRnGjr{tmr}2`H#~CM$=W)H|CwJhx^OQt-B7@sW9BC|p%6qn?E z`+5U>2kC;kr4#1$wr~38Z@qSN{%@YY8}A(X*+#8DyMAwOQaC&Yfq^xX=-GWW)TkJ_h9)eaLm$B@(c3XkNYbc*wajUAZR9>^%|CFzr$9N*S*h6`kOO1+# zC@o9ZB8B9t7nm>u)>L6b;!C>F(D*f1@fCNHj~J_xQaN6>Tcm_#STPs-w861uO`$bo zO3qzP8!Kc?*o0GZrX0%$w4$Yt73X$A8YXnqSUN%kq{0TvMC@?to>P%xeTz{gO<3a) z93}hg>M7eW*!tMmFp`G^eHWsl5CvCUa3t;^LW;-o$}JG&)N#v{J$Cg0S}BYx2~<>J zgNu$*j%Z^`8d4C750m@6bf;8)MK0%f6xX6%%8L8}W9~{Y?#tB|EJx1v3013e)@tpUPksN9c zJ}k@-%FsDZyPi69?C3d0rub_nb&^I_y<+1R?CV2Z6cpO=$<*?TOGO^1ti9!L{8>R@pjLGLt=8pTCtc`%av zpg5%Qfq&^L_EX5UWn~kTP}CN_y2v@+tN8WH4I`uYbZ)uY4pIphBZ(4xd8sJGbYOXt z$Y~qsf~R&h&~eeETx}&)FIn3_VFXXR4O1i7+LZ4$8`g_CBPp?`5Rpt1OXCGmfGu z4};9Yn4X=9;5TQQpMRN>Bk4oJ=X1?`Z27PS8x$JYEgWAjA`d2!uUCp6?g@Unx5IAX zIp4yvfiLzw`^uBZz%Le>qa>m8u(#8q5wtZt+oU{M4Z&NJQQ)$Z+%Ix2I|;61(KkFS zb3UJV&bpirj~ua(WG_k%M*-UlR&B=IL?-i=FHbd9-;#=w+6sRDJmb<@{`Su`-#+(z zcN*ExJh>V1zy746y=wV~zk3C~qmUUtcp&JO9T&Y}l4ug$@w`s?+aEZ7QwJ8p@S<{j zv*~z{7kp59x;VIl7^ATf#))S)$$4_2IIZD%6PPE%ckS@Fq(759S`};^Obq}gm)6sTy>GG*Ic0v-~Nbv028 zZW~%9h!AM~dPp&o8W|HBe}Rh~P28f%hJvbiiKfIwOCKsUDPwVr(Zx{YMmN}w(V^s^ z`&44kQwynRLxqrvU_DMq#%2%87B70P`Xz}hFtQ*gqoE?I;WH9K579G84p@g%Vh?M7 z$~r(}SEOc4uLo&_kP1RT=%JA8wELQIaxhqd36R=c2T=5Akup+;^sMPaO=dD^ ziOwN{iw!9WxtRmp6OzuYSu{^s zixZSi@u6p`kI40ys0AwIND0=rXp=z<*DiyCLe6laM<7Yn2#7RpMW)9XGPYsy`uios zF!H|Q6sPfI}-o6Dzu~Wmg{Ify=O< z6oyZVKcS5s4GkkPW-lILNLcwbsYvi4VAO=UnlUmd2?sPTk~7CBg^n3PwoLVejo&g) zN>2NsaH4mHvCf&xyC@+D!qSC?AOeoTmTK7#zot7$$yL|0wvurS_~@xw6J5cX;7XFhVoYDV?{B;v7Ye50aTa8g#q@5>i&-f-1gbq%u;G z<-NpqFikL@%j(kHe9QIR}ZP3d8J*j|Let78gASsb&>5|GYTm zX*ER)Pbvj_$&^(uP%`q_Sg^H_Yk0a%*{(Go<$|v_7mW0XK6FeH!i# z>n`xCrAEiVk0u&ETVZ&Fygr7aiN#>D0!bnCM7x=+$OHJVK9wp>ijq|Yb9lL2tsZzF9 zu(AplhOn23b|@A2=Y{7e%?VA1au9vwfBzQlWrn*ufvDiC(~<|HilegS%aelh*7N5( zftPKAUN^Q_WU{uFNmr%JOy4grW1oqO1Ju%!V;ZEKo6U(j% zT-J(TTqJxvl6*WiXzB3R5$f<7{N4t5?=qq93J~4}e18l)zi(gge!cIzjPlL1+xA^I zZ0Svln|0z=yIb!ohInJa5c#P@z~}_y49?d;M9P#9n%Dh^x8wpsn4w44R%k+AgU{DM z`S$sE8z{$k9mH{Sgz-A);&$8XLb#iMA_i^>mj>B_WUy_!2GG}3o8TI($7?x+;EmnaY5&xsvQuEWbb&Cj@T?CkPt0Ea<})trRZy!!S^4j7*RSlt_lpmne`bA<`od)L}^>2k_}! zO>GyH=^hjIaMu!T8cF4Oc5+i*eS7Wn8e zD#g)ZWP!r4ZO_qZj`b}PK~#=ZjVZv31naBgy@rs&?-bMPjq4WoXbr+U(I49W1CSDPJPa{@19EO}e)FfKqLf}dN zm>o4^*X$7)vGxlZQ85y8QVCXcSP_QvPoSk4yl#0xLP;tnNFkZaJubo%Ui1sJNSUhx zn%J^tG4we&dTIjIa9w@PRoHMZ$vI$`Tx*1#v9XSso-on|>tPgfF2aJ1?}3_LbkyOJ ziI}mC4Y^E-(vd68L}bijN#h-T?9mv;dMM^Qaf?AGEm|ajf-wk9>jIgWfKaUc>EH?? zQ}*?k#`j#=4IdW!d>1x!zGdN0Kq$Q5CzBKAa>6oPvIz?S4w8HHMMdyKs-h2?=XJ-( zNcJ*A?IM?*XD9U>Wrjs7uPFgZ8yu%?$A?8S_{b3u25M=g<*&*+vd&*-LW{`=D5*s3WiDc125_fsD8U~n31dcMpSLcS>1uohK z+pihxBXTijN0;p7mM>VMcA5T58_Z&y-`Z@TYqU z*Ey`$%u~U)Rf|CLqrHy(7S?^u#U|%JFI#e*@X^Gf0=!x#TuHd*64t_X;dFqjP&f1c%wjDp1L?{A{jYy$*zV#qHTI3uSJ@+!tZ!QeE zl>FDn9bcXo^d8Qez>E3~Ce`AGNqrB~`-ed2U5UiMwY<+A;u>1sFu0Lh>)_o;-uFc( z3o-oAy$N@Cdk^9@YdaXW7it$RazY%yi_saK8~lEi&>#)oJ8t9z@tQ3eY|11;7^KjI z*k1$TcIb5HIo7Zj~>)Y)7n{{uW zyKw9A-D~^Dw9N0W+2Y2 zhvdoRdMdpqlQR-Ecnfd31yU+p=$Yvqq!b_(Gd;zIj+UB)f|veVY^bkYI|Pm%XBP+r zN@$``D3Rj)C8gM&FkK#6 zv`m@lI|LDtGGS{^alXY!jq@G5<_?+IrScbaZcV9nXyRZAm&qAj*pSE(Su*9Sd4}}^ zgI($|K6oztV_fJ-WyVL@7bGELWmlv^GiH}4R&)-W>NA34mA!u3Abt@t;(qm%CXhTm$Ln<^n z&hgRXMNfVmiR?wkfq6g{$2cDuX~j4ZiK+L6kN3bxGyn&vq=2D$O^m$ilXK zJ{|)p0?(h%k6Cn*ZRq)S>uGIdKTEkY^89kSLSgue9nH@!Uy^db+A3NX_;3V$>_}zG z<8>I~iDx}$9lWd+9hN(J&ODKPUj@G3T7I~*q3TompgGF+X&uO@DWzmpYu2^kv>Djt zPvZA&I64aaU8@0J-)HYuO)HvG$3PoAe7Of-Mt?{Pft`Q#(dv)|-=ln%#gW6tl_9shc% zdC@)LzIlk0aDUXW^^rTJV_7Q-?bzCkkrtdanq?o^^n!J-xv(qN{fJM-kyJ`z9P{za z@!`0n2n~AyF4~;q(sSN;etw$cyri;*XU#eLSw^N)uB@P&ME?4Nl9O{u-3eZ;pbHg7 z6kPStg~(6tSdMI@aXA-u#r*?ltAb4(neRYu5{lNdwwB*rXuiAB+!?_v(M*$o3yzTq zr1FjKLH{9}`xcbsZGd-sc^8=eTYBL4?c;q@?C;y}8+P~o?QR~gLEVjHKnS!<5l9^E zQ0x}LNB0_thCgswxLvpxU$d}L+_0waL@58kamcq{k8k(>-ECsDf3Uxs-rlg?11o$J z=NxYV-Wyp3uiM>-N^YOOH)s9b^LBgr!vT%BwpM%hy4l!(hzQ|n!;0QjIA_s1!N@@l zp=3g5YlO-$Iz>cDCZ=rtCF}MCA01LCobMn8_RAlT$dWFu=vd>UqYE{TmQqDrXesm# zj+#V`5X#WlD>}bL3IPH#J)sF#Sl3dTJzQ*17@Thgy{tCZuPI#U1|M7!TDPJOXUy{t zS=bkB-5M|(AZ0Aw?PPr5)$hhK$7bTR;Fy*3xrf?m6M2qQtk~h4FYw%MClCgdz!Gt#g5F3 zQNq&)g9|OC+@}r|iSEecj6OD$dWXg@aG@pDB}!y8e)HOTO$b4vi$M?Ud!$LZ@=uXc zai~9`W=)|CF#M48ev6WZh$BxXq``rdL&!()Xq6zvz;ucbxe80X=phIiS)tresN!N^ zU(YB=sltXQ?KvxZPA2!5#=9UKiE*e<&l|vXjkObPkQ=UOMNZwbXvgSnZMM$fAEi?NWMjUUn%G$@AuSO!O{C z$wzomATG-eahQ^I4 zlE6%R^r&Z)lzhJxJlSsfzD~%~j1ehWo{{Sjt6nlqV9|k$fr}PaeamULU}naAKF&Ch zf>4i{=NW6iCPvBHcZ`){W;DO6w*2&{Ejx80$UG zWjyXSOf>voCk$70HP6Iol!ak|e3 zWx_b=_-c`|wvn$Y%S3_DfumxC2nnqXv@X!aju*9L(J6?Mk0yaa6ufjj&$?4Cd&R!F zPwPFc7j!Nn2rMm34fI}dyw`9{$Irf<(geXL(?PoN&1S<+I)Lso-?OJCoO{dT{)GK- zkJd@bq~b?OfkCjpZ@H)?Wet~|;D<*&moF60)}C<^_^3=!Do~~t+opW|e9XbYhB1b3 zI!A34JDEl+Xo6>6SYE6pd(v@v-tx<*8N1`i$8$qI4LH5!a0h<PqXVUWJqmt8Q@MX2+ax>vhl2dAf-3lIk6Ocqsx+Ol0 zIjK|r!v`J9P0v3r8bXot=Tk+hBqyEWSI;DOv%uqx;HNv9Y3lgm-kSgPoo3Z2{-+<- zESD)?o+V61j(`8bnv;v1yTuzaPv4&Ge;1s+0Y>iwt~c%8)$QIaw+ph~-tOI4-amZr z8zA|%#Nut1^L^m@h7E-nB_Z^5aYZU;h$uK_P5#J_W{b=y}kvm;~RGNjZbjA4&Ar`ythI2kYDvWyn}aB-QLHr-(LR)1P^Q8 zcdcHFJ#KA%``W&_MlN*RND7R~Q9_YrJGihV6(vD9`miBWB~d`k4dNx_(2R3tJ-guz8aW0>e8y09Hw zB)rCohR$yg5ppr2^5;zT0hXSvThRIysVdR3pr-}`QW;bjfTWg##ZMx$A@xcq5P{6h zkV23#LLkv1Ll9{N)OPS#6|p5^F)_o*jxyQBhXySSQKWQihJcaSUl*V`wA8p@Y5WEo zIwtY}L_`XK55woe2Tv2W*X(Y=CS1~l6^Y47{hd-2Gx?Ss}^h+7N9!=vRgkzzr?;W>B01rOZ&Gpido*kL;>BK_pgjMM?@Q zN5m8<#f(0M8#{-j2@N$X)}3QlJ)uoCF*x?r9V`Xs@|3MlBpIncF?Xr$b zjGPcfG1xp(&4oWBml?IUER-0g;Yy;TVih!6j0p5tkuy~fvB@Cil6jn>Qh`E|sWF>y z!oGOG!|b~hQX{0H_XXE+K%F&CVO64VCAl7$q1Sy+sWT=vQaQyJ%?c|N|NgXPEDCfr z;_rUl@p7%H`U&?ljgpeB>)FPNnI4+MMp`n}hK(!9`VJozPd6SV;k}9Dv!%vLL7f_^ zpvk3WrX{O3Qfk3iCw$2Hd>z@#EmjI9(s0p1CJi4IyEn1Eit{#LrD9>G?C8YbT{)&k z@VxR|Hxsl>d^i?-G)}a>V5S5=zwFrr&%U-i)DYs}X_N?#TgTVc&|sXa5l4N+>e_Lb z<;;!c!AS5`H6zONzP1_A<>y zD+pdOR)PmJONc`wp_YnK=J?H(AXf>O688&D6LOBXYp&fn3p?WFb-`F_j+=_}*6_G2 z0mVh*$#vi$mmo@@1rJJq2vDdYUU}&^vF%ED^Cs49N8+%T34rX0{mPj};u9w6l+FB}cX6U;JLj`P%aGvgE)@j1>InFtF-NUThRQrQmz{2i#AV zxpmA`qM*abp1M%XWX`B_^buZM=d61HBGIP?BNt>!PzRWno|BE`=!+SLS%j$hlLsAR zB~hj1lRuRFum49&?{i!atC+B(L>>z+SCLPz6NMhwzI&zPy3Y~JnHL?OZ(9y>&BqxW z7LGsK86mZ0-bC2i$wGT!?+HKiyD6W8HiXi zUQ|45BV#)5h+U>C^7Aer`!WC3;Uhv4Tz872THuzN`E<)@I&gb0`^b+EBz0$4Rv9C5 ze)2NnYQaaPV-s?YnwFQ1rj3!Gy>R@a{lF=jnauGzak+wDUP>}sL$F+~EvwGHrDb&D z-K4c|tK|MJad%r7e_O@)+w2kVinVuB#NPdRQ-!?|W8$sn_wKfG5SBNGjmaZ1jN>j6 z@C0hPL`h3bfuVWsmenF9UNxt^3X+JgFRW_VXXtJWKi|C;4{leqP!`g$wS;`R8wd+gs;z30~Wy&AV~d(5}RBfq;HH(?6dP36|nhYF)| zGCjuomdan@lS5LWF;Y{U2-N-pBQr8F=CXTE zu192QLQg~G&v9W(vL12s<=2M>DJ2R+p=Wr}qh&^$R>T;XWQW7K30@0KVGN}jc&=Ss zqh*0ghE6PL*>E5pa@Q_6@mD~^rEajjBuT=PgrW|YNFj;ABQUJP5lB5Sc+-kZmb7j| z6W5e-j#ie&4UJ(t*&|ln3qo{cdPYi-F062|CD#+IDiI-IiAaHv99ET7 z{sKh8g`V1P27w$kf*O|^LU6jLxGSOnEpsMffse2W z9a3ggY+1)E7R*p-5VT`CoKwvfCpvNzweJT(O`xTvVo4P)=u$mY#uFd~nHbShQRopQ zhf0FE-6h7rHqaqhB|H7*Z0ZO7!egJl>{PVS<<9S z5D}8%vTxZ@Gd5y{r)42#Y*IxaPPP>H`vz&PybMiZxbQ zrn008TSU?&H~c^$Z$2@VTrqGw@CcC*M)w_+koCQ2h^V(TI! zE&0&S*|>tLQ+%=zJh|U<*1%CMSoa`rKI@=}KpTd}Qi0;(MA3VR7Kyv1;G*+%$$^BG z%Q){FjOm%G2`}o7i%zi$ju+jA``HdDNm@T7q=g`eakv&)6z>-rBVpw{N+i}UF*T7d zE6qm}i%~gEfGibkyu%8`-4d2Aaoz>SDhCpm?wZHt4ws!|?L3=Uao$}}hxb6hK^8d+ z4Ft_u?-|=Gd>WC2U||jKjUy|sxNZ}-@C4RX~SQ3*?p}#DEhPhRQUSfjbpFdvHtvp5UxacPQ>>`p&`2KEW zW&=J*CVAwKMu|WAaLd`b=6tEyvx%h>990pCW~2q5E*t*kdo`ITXkE^Zm3*`pnA(i< zO~OaXy~CEj{5sKii?@ouIFsDV6<=N(+R!4EOZc^wdjvlbe8Lk36;vuq{vA+k6PjMjHWyuUeCZ;jhKc8+h_9KLPx^7ie?llS?vKl@kwyFdRc ze)H3FayiGxA$2X6Q$q4=-35t&lA0I;xtbzGBBX#-C4F3j5RCO6g<8f-s-lqyz6mo$`;9^HXViX4rix(+IBaezp^6wirI<4kduXXyyEB~MV)cmDSGd@*$R1!~PN8C5$Q;hn{6dCdzUPY zh&o~Al$p3o6|PY!BNB)yVnjivC-~U1?XN(l0ppfDN?2T|iNaCnIf+E;uLp0v%K7jA z^?ysm@jw4h{~HNKPsL2n(IO*7iA^OH*LYe;l0LRHX~Tipr3x!HZ0XYGwe82n0gfJs z1RRIv4hl&U8Y2`QPs<7;G<$lNFZB~S3^`3)v5i+4C0K#< zTCrg}@SepCpE|azI8b+Q2o0VjA~TayXlPQ;sXt~DS{wnB=JX-4j4Ohu8OxGfW(3)? z@fD?<0FbMkTpBhk>7yVcM+ifq6qXS_1}@VDJ8I6EJ7ya%@gxibJ!D_)ap7NZ?q1L* z!+t(y>1)>MoT(Vo#ekPRK{$krjAYK%pR#B7s8dZJTl(b4nKBj=uERNAdKTgUE#cA~ zQ_9gzKth0+W5}^db0=Q_0UsrIi;|s8)5ex4J7Tb`(;1h|S3`8W)YQqNMPkQ}aIuFZ zsM)g6Gqe&|3J{5pe(;F@@=yPOFaPbYdHL*$RoZ}zoHdTeV}nK01vsi5J_Q_pn4l#1 z&QwA&v_A28+VQMvP${PkiSJD(Y@KB5z_Dd7x2&AtqOAx~ai_=_8_CIaV%rH7}C>R1Q!!0ZKQG_r028?6w=U#NaF)8LeH8uMRY2; zXeA0sNSH4PW7#V1P_ zG*8B!XH7%vC0|!?;Tn#cj{ErtBLy)juDZzc=7_s?pFg>4$o0V2R!ClUN9=rej}JaR z>Xj%$OFlCg+fY$RW9B8x2X zoq5kyr${03{z7tZ?C{z#*OBYagG@Nl@Zn^{dyATNA9%VJY&yeb@95LeMDL=Z@{Z@# zGaj0fyh_}kMV!;@jy=ghzm+`O8jfp=5sB~Ijf{-o(%0-6fs2B>yFFDe_|?mtUtMTk zde5{_6q!TYC6_H6$~^i?Z&k?LQLEUJ1Jk06mG?B+!B(%tJ3_+ zj__(R|LrR9o1d@j3tE?ycEPT=k4g%U8=^I1H&pD1gxKCR7zti^g@rV)1*no4QVk~Q z7`vM$IQ`a9ZylL$P7H3wPiTa&H@}NWX@-Qk8!xd^1w*{Cys113cuVAeTc!Km#}EI1|H=FO4}bQr_;-K)ANl1^zr=->%AE}n(Gud#q(BHs)2%7= zE;7IdxnjrA7%$N|CAFN6DNfT;oE^)ZV#SYHdgiWQjhsgw&%|M#M2! zhqPhK#-9TMB_#`ahng*I*brmF)7{t|6qy>Ku*}4q*00(6i{Z?q#HS7yhvc{*1r1BQXvt+xn`$QdkS?sr^>{$F2@6O<>M*(>qrk|VCT;Md zp$_YTA_9!nJ_%1BEBaU=wWJdjTYo{JW`mGVJwnRYK`BO#5F*jUB_Z`lr77$jBO-*3 zHg*HHjf^I)KtS8CF|t5NfeMy7UgK|KjQdpM(~#OHL}F_15OEYtQBqU8B}$sXK7v4t z4Cg9Zzd|d6k@;{g`Vgy1{=0wu&l&5SKmT9;ij{BC!jLH_WJX8BNq9y|kv;hUgQZJ7 zN*X#G62;12Fc(^0eLZgzF zpHN(#6Js`{z@;rOx^qO#*)0Qt92XT^-;pWHW$*c_-m;Dr4#!;0*fAsSmyjvJBn$lG z#~Y5sB$dl6$4$S--|dMHisdnuMSUiH{})SFK^FQ zv+9^T`GiakjdE>Taniz%7Bk)-jXBy1oR@gH;r;QHsh)8^pHlnCwRbFg!w1urt&b?7 z`MPm5p{Mrnb$^K>$12B2DQd4sQSke_L#p6jW~f7A8*G3eQnNj$lkw})kz=@i(E^$~0nxL_xNbr%pEOF*~X4>*(x8~8pbGB7zB+eUt zdfnoarcJ{+%&npeg6j@GdZLI?6P;qDH0xd=1w0u=_6tqp$DFp7M}IKoYB68(~63ntmNqxJXVt*Acytz9Gus?dGwYJ*31CRQUEWUJFklZo2XTA&6coSU;&qk$m4Lkw z%u;9~ffOS#c1Yn7!oHF6LO~k1pDFp9f~UN-e|T5G-X7VvZ|{19Z;$Ei@q2fy#jSg9 zJi~9A1U-!(!1lSFqOnM6@u6X=?+!EqmVig82|A_M%kV-d+Sr1C+Fhb$h7$H>64GGQ zzzPoZLkLN1Ktda8MrH@CES0|?#mF^BRQ-xvPU&Nf(S{5EoRQflMvtRH2!YWGVdrR>fsnNB8ly%y+1ylO zGBk-UEwQRV0#5Ygc18-3)~yHpSadi7BQwXPj%9nywAd%ch*cR82(f1wo@3Q`5X`Zp zO>3+wnA$@ccTJ$Zc})T&!8qR|*VCJba7W`C#(EFT;Ng!9Np&H0n3RziI(65bVay(P z^MTEL+4W5I3@u(YKY|p7nb@OCJxQ$4QZizJN+mCv4S~dtd@vvZ5lEqsaYW@38JaEz z)?veWmm#7?2*bX8k6Ns`>Kj(>6d@dlLwq#kdci7f85TGd3$dV%4cC1`OGitErzcnQ z!S_oCDR8&E%fu+2Ru#Eeu%l;asp*nq8yo^f69N*++}xoGJ)LOCMS)C~nwsPcR%k2* zA}N|wVWU9`$=L3bkZj@=ox3JiA8^+UD7+>J4&)Pj@?85BLTHRC(a8?4pSUI_N2v}G zA`VM_LNPaoG@(UFMHGsVB5Qw6OGo7|dC?z}%Q-3q3RU7m%{{$`_W~^kU;Bz~$eHkJm`A8Q)M{ z074X;G#Qsopo@_|xjW{%-Qn)E<8NPTGNmY$q>wW@?>NvoyP4s`nc(@hppTL!M0_U^ zqUZ0HOTOqlF^svFk5CLt-V-hP(QL|PTXK*GMmqBQANKS?^7AiBG8GWgvOj|F&NW|L zmE12J*G|*=K*1q}8XBoy)QSr?u#lftfje68U?j=4;<(x&`b41=TMu0XEe$?MK3ioR z*Mg}@Jgtt&MdToRpEk@{7{ek{T=Xq<>d|7%v#lYp<-K{pkukFm(IW80svo*tgyFok z+{-OHR`T`Mao#4BwA{;b3>iKqHqKMA;V0WmRMM1b!oHnQIZt6>rZjczF*5MQx}fqy zHG0hcK<@F1pIkfkGezqIQcK<|6-^MFSCOfhQq!{|_X#m_)+(B4Sf&$xa-H#ycK12R z1*eU{NZ88)pRNT51xzG#!P14q&sH^eiky8baUL>}bM9-BN@Ox4lNn0EPAPaiu6epL ze7Uyl+Qe_Rf!-VXI9!K!vP7l>|M#&%Bu&){g76$3c+S=>6FDLmmUZaSm0^S8>y6^C zyPCUs$y{4Dea81^fpw=jZie2X7wwkp#Lzf{kBY_*1g&Kk$&1J;Dn@9GfX%NqJYNPX zCuyBwv=o%$07%>$tvJYgK6RESMakolrw;~^BA1Op2sIc#v&gfrOWI{mx5!v^hNmk* z9co^-mpmvQu(>Gt)e>gL@DCqMI9dw!bH^X=89u!*lmylxXVpbIuc+6Ok&^5ef~$T@Oe35ZG@ayGU$bhb ztQyBBQ^6M-g4ynbCM6H{6<=2c z>kyGy%cmDDzc-(8@`zybWUKwh#CJ zPrUK%zkge0^7if3{?v_c^|ojAcirBIu~)+OZM4`RTHi?U8(dg=wdZh4EDKPMDI+#< zShAH;4Iagtq^j3PN}!Y-_9q7Ka3nx7C^fMCoUe)WZ|n=NqmSQ=*Xss0V2EQ*ub+w# z8X+yiqyv;3#CE*txEInu62y3OzC+T*O^opy`^f9DxOtx6BvE`@MfjDVz5VxBkA2(s zE#7+k+bA>_TYTzKQd7t|U99MRLtzqDjoB?8Gm-^CYL@*A05UN~NI@IcSdp>uSF~# zsoe@K8g|TMyzB_6MGDK*&M=~+3Ks~Gh!btmLzOo$$dVr=%241VZM zcSf#e#27IYL_Br6#xf#_G&J&|2(rMa37Lo_0;Qfa();wGzR^IAOpS)mXbQO0;M1_8 zqJ?5C##E^%CFuOnG}ilw5*Z^g9hwVM7?x~FC@~q(Wih8^i^hB&_0$L| zV&JsV%uM1=p;$Wx`;Lq}E;KG#Dp$}2*v$iX3%KYk%OHtygb)!U;i?ZM^>WL#hu@oy85>U<1&vqq$@BLY z4G+zT?=KWvFL|~pn3+MeFUp?UXKcLSbdys8M{A9{hKU|D$8pn>(DWfPHl9-DgeZ_+ zvS}56^VBdkg44$Hp#h8L=g&1$CHQDov6Yt59NNpFkaxHA+$jc@{%#bUHW^(|)FJVk z3yl`=@nvAD5{HYP^UCs~wtR0MIc+4Dof*8}8Tcsq>&rl)JfF-X%U<)@OT$4P_;k7C z!N_tq&oD|MQsR7-QRs-m@uU4f7cA2d`46X&CsWCLd5$Ybe0QheqPBc>IuLgb@<2+8 z->emtb2w3RTq#su;8hsF(l@tTVQ=pWrT;H+`b|&hHyf~i_dW(zuY%;?5n|tb{JUvy zueT6GgU+jpr9-@vujOl#&=bNS;?h z`PFgWVg{#l>v?>$0q>1a{?d{)gL`<)@efu?xks(NdoGM<@v87MLQhgJajO6Z6 zQB4_@Kcnf_SUI9)g%W!3&qhs339Ac)5CayChFneQQ$>g!ovYBwVnvQoIX*f{Ib|$n z1FkMblvFf+i4~&({vSP?a5h9O$0aUxXqmrmGP80rh+WyStY6|tBoDcq4vku=XUvQ{ z*@8P+&dPN>>$ezL;OLNXsO(SG9ZtGWslz2oTKZVCl^1B0Q}@^OsiDYbBmyCMX7Z3y zS&qygr~{57&WNI59nNs6MhSz9153J)vmpZ4%t=II^ca!TiAR5GfoQq|BMfU8eQ`F9r`>p=KyxKhtQ5>rNyC*sfC~;VTC4< zv!6fV!d+6@2TZl$xIJcHJ;Vx2lyR`7bSLbYT~aEL7@%X!jFb2|6R}UOERFBEj%P?& z@v?tO<#LR$OywOiVNlW0rkaL|Qp_nCvk@y0f|4nv$SLJRa;3S6?cuUd{Cc}$A`5nk z7A;1MRN$5^LYHa%(2t9ZuR7t2#(qQpk}s?AtlNZLY`}6BE2Y5%_e#StEJTU9k$+ ztlWfot|=po)Ih*l`M}{Uu$BQAGiEv?(~f_<4?jB>c(Ed3DRs##SvJ0B6BN0O+$%jt zbwo>k^lJO^_8IjK}n0V7*>B2dc-RC@IMV zlq&FS8&PPa9Ih!+CR#7}<;vkA7%^slRPyO{%c`?nb}jeI3@tjQx#O=c9XTaE6{X5S zI@V&v*Ub)BZQwgIjg^|iLSbcOW)g*x^hxml9K%5_@lo())pM^9d@{?pyO;Rz4;1pZ zfxrEG$q#lCyQRm6K%pa7O~%JFOGt*fRn%3^&z6QZNbY2Y$*keWA2wW_WqddeYuRvT@+>2ojsP;jb?{9@rf|7$@4GxM-(*e=7NHU6Hjz^}o=LN4es( zpYS*5hW&Er{drzR1c_NG_@}#y-OTfy-IhMWXO|PECUDgy&Kj6nILs_Bw*|jiMNVAH zolNohO0s^T5F&8ZM}D-UcrX$)!SKtg$jWz2OGDKePFlq;P7N~yE)LA)yW_-*ZGt2a z!trPl_{Gxkr|(Ic7BUs7dc!9VHq_7Izj$90wu!Nh9JhiUtC*XBl(4Oj`QBuQT$EhJ z;rx7zd@`HU2l#9)nA^Z{+i})ad^Fl+kqNfW(?`kw`Ety0x8nRX=i$`Tg^W)x4K5~j zGsjt{IB7LnBqE9XBg3_i{LT7^UvFppgZ+#?zG-;VZ-mji;Rm;WzDs0ySEPO$*6g;J zx@~_LytFBOn{DDxuHLO5%*o7%KGXx@APHP- zky5j#4`@>aCFuH^6g5k6LQI2=VPX!DahNIQa*i^FUT^7ckV#{?2SlQB2rY?epi@-+ zB_XvGa*v6<%XR%FTIck!Vkyp&S9#Qm= zppP}BSWw4H*6sOFxi%lMkoT$6ij_YiGh>V#(NVK(Ul8*^t`9B4|+t> zH+<-j2H{E^#!(u)ABY)ayG!dgn6$;p5y^L))}K?_U5qX{>z*N%VC7yiu?Nl=vseHC zAOJ~3K~xB-NimYk!Rny|JRE7h*mQU=2-Fm5jFp0sm7I4qn>27|`@TL0#6(W!A zBSf6iQPIQ!m8VGf&=ZM_7*mIydv=EibfRX*^u!RUd%?b)Q8`DZJ5IW<*;kL**Ly5Y zMJaMt;ey~3xf)Z58A>RW>N#ppm}I+13`+D!l~B%b>aN*@OCINklq%=vo0m-G0wn|2 zedLk`Astok@xGx7=V&Q#)cmHqqLd@9;{`HE#$unZ(z!rK4s~?y(75 z`n07qV@Nw(hnhkdc5T9>1tyFK+lNfpR3c;ItM)g{>^+J&X5||I8t=Ggb5=f3ij1o! zk(Gk4wt?@@5_|dZUUhE5*KK6g_h=pX?Mh>0&c=1z8^Oo3F<)*4BPn^|YyR_Pg^oEB zm6MV{OISILjw5QX`G<=cpRWa9*8wHrlc}PPiR<2Bgt@s=YCbCWxSJbJ+8&=2_jAR) z60ZFS7hu^6K3RBBfd^U7uQy8$GEL?j~9phaJR>ail1IaesQ(WY3o2J?%FYrM?DiatQ=m{ zfti7a6T#RD%v#fi2|gwsjy&O6Cz>)3>reN(WPjrhgpoT*&!cv|D7 z=O8bL7=)HgRE9#5Gi72WR*xx5#dOqj{B_M&t>XP+!fq~kQE&Nd-LT^WzungC+nhoR zMl$kf48OciP z5s;ITmuW;QeTwiHGIFTQtm;Bn0X-hDnj8!9P564_yK-C8x zl#W*ofFoCi#wAQs$w!mOueRrWv36+onI-Ut_Zt4<#h5NY?K_t4g2S=n7l#LobIWHh z5?=Oerl};6BW~rEX&Jfuv-81awUi&aNZgc zk@BeO*m!82AytAzZvZ+$#lHCO9tQr8$BMl~QKW$@C;8-o<;kT+bdrOEfJ!`%wi*z8 zJRg%v!@V-n*ubyOB=7GO&5g?M&FbO10_o+86wN!uVb5#I=KXM>2SejBrz| z{ib7hvzY5;QW})Z&}yJYc;69%MJj=kDIxe>g<0=LH1ZmDCbL}y z*xnec>qj^J(l?Lb`yR))p8w51zeyPHuGJ37wehcLV}&s}TIDDVGD>_LmNd8aoTMazUl7u3F`NcPbpfgox97AX{`?NhWg7(F7>W0VloVar$^ zg9*6MVSS5_mcr~IguwZsDQ-vs94f&GiT9pFmPntAU$C&N;d&uycN5@IIK;WZdb44T_bF9a^KElMCMV3~g!68T7lGUQfP31| z$H2GUSGX9t@C!z2iV`W8VMzPShmtf=~gIyRh#E1I|^ zmwQ-h-b?Rr*0m%gtlT9Vv7yG0iNhg;L?%Spv2_j=Q-m-Gsc6`ucmC(yZ%kTm$YtcS z>WmMv8K>>QCVpBO=2^gCcwP(I*pn#9)TCHH{FbYn;G`78vbNCtV6SGD*|V+ zrk2(XHY*nt(*(Y%&#_^~x_1~QxHn2ERitVa3Rw0IBcPNCW09Z{_~=;lmXR!YQAbiO zSayzc+mUQExjg2xrR2T*fI6%YL2~IO2v6+=AB{4ewVtC?()KeB^BI+Q7zOi0^YLWP zEP;iE%EAX@&7)0D>WAdJO|N<0NM^>cv4VBzILre_nc}=t92GUcyz+QI+#Nn!B)n=J zA5T4}jY12-^IBm<>FSiv%nlRA` zNJ|KsMeE3v!gq>4yS(78K4j0NtoukRVNaJl92xc#!6$ne7p-J%13C*h8~MeNC(9N8 z=)X<)w|}x_vn(hR!Jj|XygySMsTpIPFiYU?UbOVi@yWjD+iGYOJSrTs!{M$^yv-OOrZ7EX+7#>a}dy_*RUkZ#+JlxFI&yBCA#}=VetuL@^$O<`M&^)v=k$Nl*_*3i5=)8^UU%m z|7YMJ-ABiUN87;Pel{Ytj*osM*nS@Q@|ox3BlsxGS#M`NzG`_;La8E!DEZ;k^IN4P z(Jj`c{PI=8vW1`QTVB)|y^l1(;iINd~MmQdhd%Vr@ZqR+1eq*WCzu9td{oKvU@U@6~m(_by z@V#rDkT?DpZqy;)eEv6!vbUSE2D`*=&*B;zcDc!&<~``7H>w`;ND#;CnpIfr+z<=dFFcenIrYVvll$jz~Q z^SLO%%3)b1+JN_tE^etpKX7IHEqOACp(s4#xJRNSHg>qs()lgKK$#wps~LT0UY~-a zrw?0V45VVrI6J0egY%9<{T?W2{pR(U+t{MD8Au7y;b>4&6Y*Hrk;>5^%oO<8A>)`n zHb|k7GH2T_NL5QjQ1wef^i0hyl3hpYv*wZ}He}M!#fq(eg+O3bfsY<58dMlO;!-9k z;?Mvn3{sh4G(EJe5rRP=i}AqXjYBgPL|VVWsG<4J`W6WIXzAP)F=iCvfX;7lq2+G& z6B3cL@Xu-D6+r|_d58-=SsYP?3w*HTW{)5ui5}6rdSD_8kM%W$nd4|F^qkIDLl6eT zWO8L+P>4N@$jQwN88x+EB9Sy>v*Ic=ZjlV>Tl!e_Q(0rJm@PvvWJS!VfF(ng|Ar(+fkTON1Nu}k=E-B?? zXv~vp2tW~*Z|kQhQD9^~c(&Q6r^O3H=Ms{rSi725IC{Tg<61^y%I)Ncr`-i(ku#H1 zYVWAynmgu0y6A~9Vs{h#R4T?MaMrbyjM(;u;~Wms1SJHEUZSLD8(JRZW6s-_v$o+< zDo)Y_9~D=v;C|^zRG_w!kr6!J3VN@3RvStQD_^m+9wlJm7u*&59OatMMPAlD>1gQK ze^4fTyA4<&n5BY?zNhjKQT+C7K#o1HVC#E67;Dp8I)88wY&fW#2PTVbP?VprmP0!@@0nlZUZ zsa)hy^NJso_n8^Ry3-V-%u>yH-;v27R7gpI^^y6s~&9Q6jh^;r^%_lGwE5 z+l|J4){#lWW0`Z9T5ivKj`t$rG^h1&lp6kOwdK(!5WT^emT{^;rYw6+A~QCP<2WB0 ze-~$hqdahuMrI})ah6u6>?IkiF7O{OBqw>sqrZ}@*O8+%VY&6(DHM|go~;A7jN;r{&>P;L zNzOL~e{m@()!-LLIo?0ovUsitQS%pP1{dIN8MsqM{_4{3^Fz;Bt2rnavo%pn|r;j~Gq^PK=?10k}fZtgl zuB{U>dLsU{sEs=-gA|I0AL4j<+a@vW=OzOgzH{s`;C(w(I$m>phw5F3c1LvBef~8~ zf}Ka$@%@Oho8x!Wo^tc`>(|~L+ndLD^SXYYANXx+N(dg~a}qtmg`PxaxM)cjQQ32h zPIr~`NE=q1@fAv>jFLmFuc_>U#EePhgoK<6`w|-}#_1tKXtYd8bs&0A)h`&Ohgj7h zgrtfKv>d9n+kOE;FiMVwaDv?pBm~P?AJdBsD|d=kDOx6os94!&AOi_gQc)rjM+|}7 z?Bi%i#Tf4`Yqn&fVC7#8n5(40dCNo}VnsiAzJboI+4wVj=t=byr6iFW=Z1u{v7FP1 zfdmm^!26ymEC`81>6EEB#>s(5Vq{7kE{92w5TvpgqMV)J(meqJv-A!_V2UJ0$DX`P z6tE7b=$Mi1M9Cz*g+Q{lrwBPT!_CZHT3^wJS11yy{*r{8EE(fsxAU#Fq{(RD21kpI z2@=EBU681Z)@>-{9tuOCXOtY#hYdFLSl2Pd+AsL&!+9NRiSDPpPI)A@$u^q3A$lpg@l#+pK9=%5M0BM1X$6;CfZGLe%ek*D2bq_70lGFAI%(Xn+y zbF2mE_y1 z<-%X^QF$9JG<}4lEO0!l`Ru&pybIV6Fj6v4Q?B}iG8saN7Iuw|8Ywa!wHH)=ghp`D zIIK53b%R%X9cv&SzVqw*=wniV6H?5^PEW5yJ+jFN(u-?#nF{D}r&lgq2k0y#+x#kCx$jjLCoo3Z}YVSA{np8y=U582_Q3iph34vS*etS|dNjf5ymyKp+hXlwzB-}0n zzcuY37F_n8+FQ=sl$C8c%7$+GkEWI;2*!z`3xmg4<4L9Fhm(S*&4R5JWNyl3-*HlC zrb*vEpiAwJv~ z%Y#z$&DN6x%N|OV^P-Bh)T~^~J+()vhkO09xBTkLahNHxR#EqoA0JpwJIUF$rFo?( zwBl~*xR1nzh&Eg9rzNx0@uG1Et;rPp{W|4&_ce{X#Z_DI!Njn34i^;PZXKACqfyKK zy`EpbD*0mV`PF8Dj+%Kg=XNn>V=E5FOR{22wNAJ@@ibO)ds6Xlo=blJhnBNfnrgOas#g znwJ|(d)gs|VL#P8t9n*8Ffx)`&axGT{bZoC$PhTr0%uK*4KPhKZ5Z+S!ZSCLOhFSZ zO)q%aTGo#f)_u#KO8MDAWMv&^wd7GXG&LG+S@a5(Mn1V~nOebL|E1>hs}*yzPmG4l zNH(@5qF6MRk4KVc+loYaZs${4Z}`<^MyZFepHyHqL z3n+C%D82oeYvCq#?F>Sw9sl&YY3$~<-W6Uq_a*_o+s06#9V&pHn?h}1SK&4Ov)IT^vXl|U{n|R}!kFUkK5CbcC5YKw3 z49a9D3xW?DdmldZUp?Bro6ZVq6q(;bxO$7R|q4zaGei(<{7ZavXq^y3VBNk#Acyb#w>D-o)K47HgD50t25*s@V2_s!{6)wnBhGR(IE7bvp zl+JAt5`@$gdXL~7BR!+?S4f6TQmxD`W_iH18JSY5eH_EJCJHKlK_Uwhk>f%~7qZet{s8$`Q4{!pIC2 z6+v{&^(~ZCw4tW<3w-Rz)C4Ist=ke}MC!q2U_*-#3b!LU_~__EgO&+eCZuvqAL?EB z#t11C|NOuICn#z7AOHAI>1|68P{^F#4-}Qw_DEsK)C40_&cgzbq%mU?w*zaJZkTk& zjHmrK*dVwH7mRg5?K|3FNn%DF*BqG{r5s^QMu?VM>4ekn1&yzmsY8^=IMBz;^*)ue zbo6ww!Qn6>5V4H(h|1TbB-~1F0fvQt2~x4I=WKmVwF?>v!gH7um}r>l3CCGV6D%s` zq*7ziSbF-nr3)3M8j;H>j-HXu*@O*sxFQoddwQ}Nt3KFI0MMoPpbfF@pV60|j+OT%jK(_FfWq3*NJHW7I zLrY5{4O%+3vEp`mfEJ1u_KdBs&?17+q~s`RDOJfWb4*Xmc~>(MQxYB6OAG({>gdOGM<4@k_zxtz3_^W^QDa)rT-ph0D78#X|)Go5H9bYUx ziAd2X##)ogVfpvCh?J%y(~cLd<#sj*BvWLNYTMeRLG6pfG{^ zGuU>+y+YD3(gjbN4K{W(VZ+7_)xL+Ll#^WXyxGwD$Z>9X**Nx-6oFx^1^3525rdD4 zDmE-^HT*706iP%MPB&b3o=4k`y3cVzGSiCB&U>Cz7bGI#I8V5L4@x}uG687z63toH zaOFErvXm=dlPN`vJz9BgmzK}31TWf_)(b>T*u*W~C2Xyt^OA47R}|wlO`Gw&J7tmz z3YqYv(mb5Eti9xHE7`hX(sHj%_@Gp5ykMjYbX06C{Ayj{T|#04iCM9qSu&M!Pz1JC zQ^g8FO05R25ycgcvEcVU!G>)F*YoM_7jyho)dZgD1_B zM2y)pnp8;+K7NnYgGCWLgcL;w1B+Vm^gLk{J%=W6I~!QSS~(8Ko=F;zNhI?Hg;5-3DMl*(X0;qV(ECe%I`7%G zuy!^7@W1&fAN+K1{VGuZ@@?zGyOqrEcecKH|96Gb+XCuZm|Zumshv1-uoIYXi`_RO z_@+z~LLz03k_iIEkn3%C0TqMC_Zm-jy`R)fD3aOkH3m^cH#AJSdgtG@H@w3UqTN+| z-RtA^#9S?#L4% z?i%BUh~!~7Zj8CuHPVTAW8@-l%iA};>TCY++f8%dH_`a6=lWXw{_8*e-}v&cUxENj zO=~x}U`ch3Rw=2Okm`~s2I9rk95dDEDZL=lFLYiW>Z6b2;>l{-h0kgEw!^av%9F+nOtqK61o8!CKo2q`GU zm?|#l{FaeC#ORDVHpHNbA|lmbg>kVZ$gn#P==>Hf6FOhBb(biqDAhiJfeaF&#d}Yl zP5{`rGcr9TRpX&bDq+*Tqz^St)C1uo`auZCEiU%taz<@eI65Gb%RLs|6IwsS3Jai* z71p&N15!fcRsNNj4a5Q4UM7^{@MTVPe26z_>cb=R2&+nrn*4JLHtXp zC}cq!8ydC<3{$a(2$EI2KnTf+IpR)sMCTP%ykf3yGgf127YM>}Y9ABC(1=-u1u-g2 z%!#7sarcOpo+fT+;tEYVVBSJU6oPd)rwcW8T(S0-G`?jb4v9hIiF6^biHp}f+jQqk zl~NE8WK8H;acpj5V}Phh7*WccOTQo$8AQpRJZ7Q}7@3rFe?iWeBo>43RT{i(X~T+% zJfajOO|W!f!%Q9Ey~YO5NjgDc20w93X~Gp!fIw5Rz{ZFnV^18R;_$tVBH;08GBQ;# zPsXGoq4j}tcaDqw5DhH^y=bZ81qmf}zd@2;{slyg6H6{CKkuh{)Xo#9p<%A&UqGQbkAAfS6fBZ+k;9vdE ze@Q%Bkt&7t(1u8=9F2{XT9Zga6oEu}Dl53?JL=HTxP+%`L#a|?9PA0tyNan!_{q#L zO>*|Kl=tTyn^sf#z}Pq*l!iJi+0P7}O}R6(DAUk&1=AGvQ^gnOia#C_V(~rA$-Qq_EMI;VcAK}DnoRN9>ICnP?(ha zMZswk31J|hoHctjh?W{a2w6>Mv*wBm?T3;(4NleLttwaxdM&h2Qy2NIDY@Zn%_EbthO^` zkQ}80jtwFCPiGPAfZ*$_fv7kf)jZ#51d@sc`>o`&OU*3xOlCc|$MEGcu;hY89k4DX zct!05d#UFKd+_VcX24deg#FAiRyp^Il*W}@wvoG~;`urSpz;zUJ-HSNJNlUOYHO(7(8OAYNau1!T5!4oB@)gX!)e#@i~st^eE7*j0QB1d z2fxX}@J0Z?joN~nKGSRV?)Pu&#uB>_gJpqh(e`fD_@>Ww*ze$CaD!_&`auBij%yH1 z*WBIR>q6sv&p>U^oj@$`!Q+ER3eYNf?d?@cqm{vj1Sf{7R|u9j75VFg zxj48au90hE@V^ZE=d}QiugB=7zZb-J#`ybf4)0#a7(MHFhVXGnu+kYJf{#Oxg%&B+ zH?)04mJ}@dXVkht=%Lyeqa=h#A6Cd9I7l9_im#~c66abvTM?rtGh<9F@N`VGTa;qL zS^v#zG@VunDtc^ahH4aoF7$MMOOXtug-lG*B>3n+4k=15v`C>zRDn@Bk;rNHYZ6ls z2~=T$_m+t~WUh`;x@F@6mprBN7sG-@^e8phJF5Nyr3|S!VC`Pwk`5CLO;`=$H#&m# zbZ$K~;zfr<;^JVrDYH4&569%ALy}OYb5ceu>@zOLYS7CXFR!vQNPb&Co=a zsVPwfj2L0V7AyOq@h=(Cu_Xc*{nuoA%2?fEUq3)9LDesCvB5`2u1nHb(uE48Mj!-@ zTM~&VHL$09x8augkXP}`-6W;}1kShguBFf^LP-P~CkHRFM55%7>?b0ewih(AV$K~T z5lMlT4iN?t%T{gKlY1O-htu$wOJ^8`jF;|PQk`Oi-i3-7UUW~$&1^_+izQnTxC%>@ z5IjuoP{oQETQV`Dlm}##yYPydRHh^`=ZY&bMrf6i$1xXS$%uUrLnX?`fKn+!43(zd zSt2b5YM+4Q+^x9`4LKuP1wWfBzE}lDZcZJyM0$)cRB_EzOqeleqQ@jsaqgDX{(>3% ztk}?mhDboklmi~%1Efk*kg>5I8;6PeMx2r893>L2!n57vrbMe8DI!~6F%>iVSTRvE zk~pTL<enCUmhzg@T2Dg%^<^Ec@aXK?d5`awv`& zi4hjhdDr9pfJd9^BXr29{Xnv~rQSoxj?N3#;fhVL%;NDN^0{Jea<2Lf5J}@OG3|H1 z7ekc97UO#i;u{f-Sx{s`!WTFg<&f|mPhqFVPiJTe5X6 zFY1Vmp0oZHQ+bDzJOv^rMWk|y$92bhh33^V@O8B$5d}XiOZwE%2KZ*{kWt}+APR{O ziqqOqrY+xm3AgeLA0i?swjq$qoIdnC*#=ycTmj1#5-Ip%1x~j7U=%Q|AcjB}J0@vH zeyjL1hE&1r<5yIB| z*{1>}3;yYkMtt^-Vre6XCdb4vF2Xik@ZKn8YM^m~{X~$fh>?+RHxNbScV<0L9wpSB ztE;mavAu?_dHKmDQ%y#=>)fn!D_YWE%Q-e(MbA`aJH6A zbfj^D-#t`hQnJ_z{{1gAPR5ag)U&4ykGGBw$DW@b1pe|&^V>6cARUVuj8y#Y-JVxb z^X#!=q#Tp0EBe~7XL257DR(B0ZJ&@S#nmcfxr!tzGS375#XZO4i-P~@f7L_dO^6^Z z^Q>W>j_88m^JRkbk;0TnB0t=(sI25wEkQ@lFBE_MvY^n8z7?EzlE+t({VZ^x6t&m* zR`a0LWLofKneccU*v}#l(}G9y8HZ1M9-U{5 zT|(b0pd*nf^ThMXQP1C>8(yppKbz;&y`i>(_wxg8rP*sOqu&lVxas};K7a2U@$}~B zjZk{iICb;2?+R+MvoOS6HS6tteE*{S@T}YkY=7hZ*S3l`zE>gMoJS0fQcO5ZCv@Jk z2@|wTkU}Db!J~$VTQy{MY zj&Hp`zV#i5H{LxwPR*E^zKs6HS%Ft3Hq(L$?BPv-SquxzahDu`ZTb%Du zQiB+X1wMF2<`#&->Y!srke;yf?@FO)?3x%o9*6fW<8+^Smg7T4pa*>q6`&4Ftam$N zh$hfsWR6sdkv@b7r5<5LgY`W=I&7%vU4tQKDo^NROVh8AYM|DK;6OyQ$jS7GC_H_v znV4e&jxJPaVaUaZZMYo5C4fYx%;X7I?i3#!ncf?MfrTMWhtLU+LrH_t8QcC6r+Ttv zGWab;P92s=A+fP$B8FQ)ju@!B3&z=PR`CLm7BsPD6V4dP1M09~U+7k-*73@I z%UmCkt2r~1KuCDhKPHP)QmJs!(S!|zA#E@Vf|jAFa&G1bt8t>Gj~jfHtbKz7Mry>N zDS6gB1)(|MK33MKsHpskN?fsZD{@AR)P%!auW z6+O@WSDeWE^s!|k_K`B+9dx3n>pK=cf>7kLq*O!I$7nZHta`z{5rhyaQ&2G*(&>E5 z<95ya<&@*RVCy1*o|8hNM1mmaMcb1pK@%MNX~EnCYA4ycK(10goWNyg+1iYs?Mogt z5-o-}WF&mObu3yqOeN2|3r?~Ll}|{eCX?2HW;CBK~pC1^E(}%o~Asn6!*uD zgKQXFTFZ&reN8qrcSODs4IQt z0$M_8O70Z~N#u(wcvb~|;3eNy3IEUKn!VI8%`y(O=Q!`UbKmjxv&fh0$UF%+FEC=T zT@=RAd5Cck(226VKkoRutAtm+=aU1;{!wP8(Otm7_hCKtV&uKh-G>x=g z@VL&%GRMg{(0EDXV44h*sSjp7dv`o}Az8gj_|@~2-a%m^cPENX3*a~?B+s@(QE!?? z#z_mp(FD!?!t;3TX`SM<(HJRtywOY&MdbzWO&YvRdAf=GYHg{4;s^7Zbv^jz?@byW zZ-%~`F1*o~`ujb;@P@STeFT92T!r{I*&nX|y_Q+m$h5%sAHy5b6|Y6D5IenhjU0=+ zkIC1&7P}j_RtZ|9I4rI2vC$KDK3XXTL%$ClaTi_?35XruEZ*=E3uy?!?kdVVp?e)< z5#!DZAzp`D#26U(zkc`JFoO|*6oX4&N;M#?w8Qz12&B5C_YEO9de`EEMTo&a>_Z$p zzQPPxv|YvfbtOH*n{W+w+^10^}E;T-RE|dX_1hL@#}r1Y7A+_s1b&O zfW^liBQsJl9-7SpbYYFs8bqXVYqZEvQd7ALl+4J?jK1HZv?0VuDh;(;49Bkoi+F|) zff(9d)b9||P^u&PxTSGd5Ceso;$n-Gh9nmB)C1=>T2jU+61L$I5r*bT7h1;2F#?JA zJ-ON=5h+es>R55rRs^Bh`UO!blt{3#N5o+wW`kqX4t55mG@T!4A|pA+%ebr3l^B`h zLrcq=+O6pQ;JFQAh`*LHMTkhp2JbqQOi(_>#h!p8RRvlIHvSw#My5tc3?7H`mM9$S z@C+9^5>+CE#CeOB2J1Uu@Se4Ph3`8=8tG$$3&YJZ5jjD47R?hfT~g>nuDTa^dW=di zqQv?d6EZ{;D3jBM4QU)PG8!dQ1mHza44!pZ>>43+3YBpcD<+gQvEz0&rgNGZ8C%zp zs1d0cv9wFHGNek;c*seq{ECr2LX&Xj&$+8k7^@kTvq+zissbehPr4!TPYTU8^td>s z6eR~q&c+RBwNi|b5-Q(fqsK&zj*=pd@xoKNmOJJy8}Dgj$2L^7VT+K4u}q0#pIl`) z2`~FIh$V%XGD!?gXvsy+q8(Ph4$WP(2waAjw0%Q}1HH70YZku2h`a%i3MIdTIYx6UAaK`Ddqo1*qil8l4j|7&$rp?TkH|JMP=63)m!!S zbPvVR41pODLji(r1lj^xCQ3panibKdj3`j9HDF(Su^1T8eX@*SSr&oN5$h$tK{ z+KLPN5+e$Bh$s?7 zG~7z=u?aOV{7aTxaiDG?g2s731S+v0V?>CSWxQZ4kB~?b(+~Ppeu@Yatc++1R&fcy z#2B6hNgE>1YuM2P9+dFe%8?nz%x-Aoiiz3bB#%6<0xvgkTK1LV#~aJSE_i#i$D=A@ zgy2bSv0ia)8&0y6J*~;Lp!R`_wxyHpgI7P?CDSr4gr}7pd9f^p%lMk6ni4B7NJkUD&!8ED))~@8!mF8>X$XV@qd%xi~ z3&E$$hCNgAqHZ}z;5awP9HuU3;|!&aT+c0=R@3^x-fqQj=8pGmRFuH!FGa?WbY|9#c6v;%yEfi7shVcI&Bv>-fVqgh$W zP9iuRwY;n}pPu(NuGWE(f``u}b5~I&fzxaNF>u}pf^v*4bUvpIp%^x0B><>HMZwze^e0(iGu-0$Ciiy!>pzv18hoBzPiKYEB0c8ljqX+s4u z_VLATjtvbv`YtU?{Fe9Hg`U_j5eFEZ^cnG@&#uela67;T8#~*cKhvj+Nz;=Qgu&C% z*abpL5|#DIc&?8Q4s^7>>R+=2>l>o%SG$QEqRFWIRUe%!69#Gz8+$Zb5SEeH=P)^? zWFIYie`Ki)mGx+uaP3}_h>R!$rP{-Ygr&Q9ZK24`4k0>XaHO&X0bQtYvF#bPTPg;s zpP~=ZlFGrBN$gp-7li0=p}}nf9X7E~uM-Gt+^`DQlxm+UTp)Eq9hSIg879YQmD1W3 zK6K=Ahe%+kk9)!Z5u*}PJt9&0mM`6V=TkW((Z!ZOZ4oM>RL`4MqQ?d1dY4oeC~5Ga zBQp~`c5CbCJ@Q?ssN(|fElL=Kl>FB}_|K41^56fnzho7!d(L)3Z z{SX=GjB9sJrW8lXgs1{G3XYN?duhhgdWwyab(~^^#7R#^$#JqnqBI-dQj3~g4AB^F zWd}5&<|rHT!Js79hOtTLf7UM}AV%ac|J5)hX zQ?c<5=PZ};DRr#zqUO--qGC?%8`doG1SV>TjSe3pQ8*UvicU0SDrKY-T!sC|SzWN?TL#icz zexjJWj&JPrp8DS`H~eB4}p49kS4&m)(s zgtte5(?QF$$(Xf*N1HXPYD_NSuxJ_Nk@v@rLc>L;dH5`4RwaDzotnS=q~r6I;Hqn| zE&&vt$cG1>^L1d7S_-3hv@E%9B1P(WxOSZ812h?Hr$|!CB=vmw_KHUrMW2+aJw!$8 z5+aI=)^jJ@;ck&p7{|nDmTSYK3rJ+-?s3blZzxu?6k*|B=}}Q|SU9xwm@NoElkxc4 zGwT|vt|SD(NhY~!HIJ5&&le3vNEm8K{bJ3s)ub}x)^Nc`*Dtx;Oh_59n`k16&sUNz zMs5|J=Nrv9>sv@an@L`F6$fd`*rdIx$XEQsufu8{*ee4c9<{_8UVh#XeacV^E<4S~ z=LvR`a8i1ft!FmR`R<-2)$rB>&GlR})`7o!SrJ{qAcgT>#na1@+ry5s?6_!F?4${& z$8cw_WjZe~QnFimzJE7jSCXT$<)fKrs1s5ldG}Ag&Yf@F1zwBao5DzZ#XtC>aC&vS z=@4u= zuQwMIU!A=7|GfZ$`}a@z!4Lj1fAz2bnjih-BRaq8#j9{AVbC%qkp+nUn7;~r===sB zEkgW_bFb4+uz+Fi^)^;>UK8CIwn*ny{h^ z8?0NSl~RsM5%-*0$o@V2n^)`F<4sGXqA#gK^tqLhy>vfQ4w); zam~o=_Wt15hkFnsZK$xm-g+woji2{X&2iIvp<^VGC5|40w(h0~A=tP%F1D<@OH|Yt zm9cV{ARt#0GC4#9K_UzOtN->Nk*b8h`X7J9%B{A7Xv8YkxX=%&YQTtuOeK9NO}M6^ zMv06%HoZLwMItiVSkZ-s<8+^h+{Ph0=6;C?5shNwmkiX1+sO%?*R&zf21_b>At+>^ z6^{4IW6ryZfS{#g9U4-G*dWoew>>!F=wi!`IG}ccD}UK{pfLS0q-Lmw?5GKeND%@q z?3B6f7{~#Qlayk}PEufWM)AOGb;o<3Z%c9BU6(>5}119ysq z-NXo_h2 z-`kHpTCMVkZRGn%=IrJ0xLxwKae@>>&Z`6$;UI-J2&!lyDl#!*tW)mhBSwkfVRu17 z&NxYUTtzx>D7D5Y`0?@ynH=-hKruFoPp>7{Yl#SktIl#-8cLO8MC7-tfRBS&t@l|XMdbal;=!mRpqVzBMJqXPU}ZfA znc<>RT(^c0C5nKH+o3p}FgAiNXgaSr&LS%-aZz$%Yw(7{Ea$9>Jgz)X)`CdF{efcM zWL(vXl7#oabBE*myZpO<@pFE8?U*&1JEi3DDr36RJSYSAhL+PwMHM6u*Bh>yn&UL% zs+L@L36(V%8BkF%uQj&O{N_q9uVGRqoD4G_UAHKqd2d{>mpb0v-LO-7h8SE-n71h- zBlvU~_EL~z+ECS@Seg7bC4UKTk_BSRfH8MUl8u$sZ?Xv0sZ34eX*dA2rWqGZ;|Z8yVX zokC;ydq)l5zO~_EmGX40I4BiwpJse}Tyavi{Q4^Au#gNjOk2sLb;-9U1>f22c(^uH zUXp6XD6P5J_aVpL{NEQ- z*lynM=HtGqU4BC_$9|x!|7~Bb{9W?E@2e{J7K~nu(of6Nhe5>G187@2h3G>$Lg;<4 zjt(0;R20kA+vh3P+7V(+&Ew-w)C zzI~Ys{8bB2qcW68NW=hiVCm1%!cfRP z1cEMZkkZh)6)`$;UEt`b+zcTETIRUep=3hs`ap#cJW+Zk`VI{%64}v)6}4M$I|l+6 ze9xK{9gSa-nF%)3)N#f{?{no_*8U0~8(O;|Q3Htmwxu#8q*Q*2^-mckC**2~K=Yzm zGSrGk_60)79z#YC+HYeG5gNB<6R$~BNoIy5vTyTl+?-4fNc9jGD~u?RA)%upRb#xY z(PRh&UZ$+OYiwwkWVa~fgb*F6n9#}!>lS!nNmPmy2EhSQK%qu-p+!YQ8ZvUxfr?x= zFF>p*k}*;mHhzr{md39bhy+9*Vd z9%Zu6CQk8Kw8|*WE-EVOFe4C|aDt4gw-}+Ah$F833r4y`$CNr&v|&XmC%7mPp(Qnf z+9t#Z1JhA^!PK8~uQ=s--64hGFxz42nw|nc%|4?g~ znR||6$U1ab3r$?}(V`=lLkcxQ#{sLj=5hNuxAk2LS+Ziy)Gl#;!-aaxKjciXEajSwPkjQu~fY zoYKaM6M4#EqFHwnZY3wU7&vb%t+!NR!&Nv#L`9~dG-DE%;-V*4hNa(7Fs4um({M$M zf|ub_hH(N?AzeZ$6h@{b@feV_am7NMqoX629upH*;sT3e#~hH6F$;5=xWUDWy>x=s zuTnNWt(dZ>_ql2!QbaD=h7+H$lLVZ!+!;h()&*lDNJONzo}mdGmyRD!9i`Oddd1^v z$bJr`&N(R}r^AMirYT-Dd|pNFv-YM z`1^Nm@%T)#@gSAvHw$=o6uFn>JgYi(jAqf*{LKr+wC%W8Dx}g}ZY(>g>Z`mu$2-H_ zuL;gKmY-aTKH)31d~YYg2gyg*k+Y3sFH2Z^$89s=I5P}%$MYch)7zeD*YTUB#PrCuN6k59 zkswbRY?HALj&bVoF6B|x5u@a)i-;Jx00lw%zLRGxJHe`hOV?2OnxUFdn2t;u4hz9~ z<@jXY^8Utg-nRU@nz3Vc$W4dT4KbFydl)%iX~sHI3qzYIZWkL8q4?AHYX0uC6zd&6 z2po=Cm+){YxHn#NGLYCHsWvITk^Ip?M!BPrA#frs`#Xwf=Z2+~6e{r6NyU%Pb4~}6 zht-CW8E{%Erd@|`VbU1R+sNa3!*}nM=$YUb7a$G=55^9i2d-Cw_r`&v++mIsUaa|G zzvG9G_V5J0c{?TKhdh6J#jj^Wa^-2Gqw_gWt9~})8~Yt=7nlo0suDV2?T1}{#Pi~S6o8~2&vzQ&>QXg-V+@9sunHRuEIsz`+AW?98#Dqd)IHb*V0$( z^mf>-PjQnnC4^>6nYgjPV*9#)7~Jc9k0Kyt+E-?`6=Ahy2Zzveew}Z60jwk&yCB3! zk?i1O-`ekdgNg1eQQ2qAEgyp3he+FxK}-Txh+8JR=cu*L;P7w2SY&%^E92Iw!MD|?Rf9YeiK z7h7g_hEfTMC`iqaDqLdynndiOMc=y1nGj=S;n(aZ87sf0aZ3udiwPNv?gd5_2qDR0 ziI9dS&at5*QCV-vV9<-o&=9OcOM{Stb$5=?f?SSCKh zDbs|F?-S5=;xRrrTv%geMR1mVdCIYz^i%@jKm@!99HsBzL&t{eK2^`HNkqxMKE+Fi z^$phbE5qOX3(l*ipeaFIlnG|{n_8V<7|zgcQpzYVKFGGs&tG|bRq7ouV!j+t7EjFAu` z3%8;U(>`1U%dtsWxd)6@j;0@6+_-BpnX$2oOlk@eLi7j{oLnQV&EbFkK5EGoR zXpvz9tXZ(D#z+zb8!E9t3e89inTsWry`s~re&8!C*fj%|;TkO`^rJg9M|vL{1uwhj z*l6hdio@)XQf18IoI0$?(CjCtBqG^%0wL9KCm-=}J>zzf5o3ZS^3n2h4$?7#lAm3r zd@v3?t30h&Tz3)cVJB1EDk6WlTk_kv;-`xlF77ZiHJ_{$T}-$;ax87h!}T-fT}P$| zTz1zS=98XJ8?HG`Gd7*3>IBbhPg6fga(q0Y3lZl%d!=MA4P3Me7b6F`!>N|wax$ND zQabL($XO-%q&h={oLi=#)bM1{@$n+zXFm+=C-BZF(#1fd=lp24vmNeIL;|1GPx~RN zY=?Q9a$ZYLM>SnY`D`T^XobVGv>m5;LI@et#?yMsK&K2%WbGuoNy6%P<~aA5#PI8C$yg@jGP1N0Jlrc4SGFV533~}>;b?syOi`LKo|Khq zs9nO#jbf5p{%p^4wN7|z5QsrD5F_3lNA?B|tu%KBf}?TACl`kCFt9fi=m#|){d>!4 zk?`rfq0j?}uLU2Ud!EmGWLss2 z7!f%x9P2JHNfp!9bN^0Dkwt!Sne%)T`Lp{=K3xsCZ0B4~J$D8>hS=`6^U4(cDq-zah4f3WE57p6iW@?BOJmsbhm};sD3DSD z8B+B79fR+Q1hEh6X#E=L1X?Dq4?%0!H-^}@1A5W*%wa%^gb+Gj$2`YZ$L-aEJA+YbN7;QdO{PHxY$yvl2Y#?gY3gAWYY6hWiR9_ zKSfE+t~zDmUos2lc%Ta#qX3D~q^4p=pD@-F)}h113@bXO&a<)0p7V+! zDoE7;L|-lML_;DWVStXFh$99|2oWXw*7#5yAf)2jKckIn2I7#)P0?GlnF$&h)i$v! zphX{X?Ukn$4K{2@^?+;l9H|UhOxa?sd%kVdbYjKEUK5qY2S*|g*{Bv35;9c~wWD$~ zmLY5@8=gcDsoMntMJC35u!WytLyZ88$VkngKaaj+$J}Ktru`FZfu&)rPO$VtVIhQm zs8dLsZ>YoO^?O5N7|0zYg1NsU+FhJ0I5a~pLq#nX1O(pqE#;|OGnWljbPQ$6K$d*s zK1HI)^e##i?CCLW=pZ<Zs?Im#vYP|@i*Fl4MpbTRbdC8Fe7z9f%hMzUw} zR$)$###znOU9t&t_SGF^OgIp?S+c<5xatB43D$dLOfXtwv}IufAx5TMAk}ap-li2T zQh|*YiKL|AYQJ%X%Q#szD)W(FD))<{@2dI5E4-c}`;&wBe9leM0T~ z`G8yIePr~U*(nZB&V)^zF>^H&S&)ed0Kcuy`O*9W9fnA0xs?|DNBcRyTn~7-UhqLV zL21n-ajdOkHx1O@An|TU{B~(rxxm^q5#0SMus;RvogpA{H%GFv@XvM124CH_=2qFq8;gf}+4S|!rH9<{aP1lrGCHq0 zb5kBK9U|-zg5>zqixQkp?h8-hm9DMXRWseD~W1aTfXbs=x)y#zAUYj#Qutg4P@CN-}RPXI+YyhFnK>l;#)D zGNM@Ww`VoiKcC<#k5>t2jl>7|&R+l7JxLSJ>prRPcCN?=1NM`^k?5FP!+oETNy$|m z=v*Hja$cv1m~b*`NLC#$mkHM`Jl+VVH3*Rqd?Yi1I|p!a9eBDn@>d6@}64rGkYOJZvuL81V-O3KtVf4cAuk+odA}!*SWL za&14D*9NYh2_DyDW?f)VbaXyp9RgP~$DWp4R)$PT_EgQh&iMMCW2d(4jn_OpOUM#O zrW0zbxvZe>-e^|^`k(%;NB5?1`h7m$FAJobLiu+E@hh%uKls*HUi-THSBbOV70&{; zKHR?T|3;L?FED<2Ev7?n)esVdM@or}4c67T&=Eq1^)(^XTiyC5=;DlL0mwti+=yM!X!lh`VhEaU$;x8P!Rog zn?1qd&3@lJY&Vbp|Bn^rcW54O1oX}GaR2^W{NM-wnE&gq{+1vA_~%HG(8e{LuTe_k zgQYb4lwubzJ0uzxH)xTwBM(6Ykb=@2p=7`Ma$9$;VeO|qB}7CjdxcRcZP*Z_>j%hU!m2yRg^pAXsr{VJSJ+VFrKgJ( zonMp4l94_^={`1i(Y~beQ=$wElOv?ooQ_Eo$Q2ZZ3+#A`Ngj@%|6=4!&g>{EvsmJLRwbiN{rfD7F=R73Pb zjWHoA$dWOgtF|E;7AN{4!(0vU!qd3x-kZE#CjQ``{1+&x_~-xZUm=9V(EFi>>=2`5 zU?fIpaxq}VHM4L%~gNibNLd>0_?_1uH+N6hrosqCZz&P|6Wn7#tl#RbpeHlsTKY!jM3K zRAkhmp$#poxW>hRiUsS?KrloRF+wAw!Q&A!lFF1FJ>o&J%Z^F6mV=r^3@BtqDRXS_ zAU#7dCKn?v{28eziFD*jvknzbII=jP5Cx^oaX8j-iKjzT(!{ENuWSeZyolr~nU30942F|7pRz`MPCJMah#YPrv8F;xW*G`d_T9Kkd!*3iZzMI*hEXF zCP=NZ)Ceg^)d;Clv@{fYhzN>Yl@ubUp<*m{STbcOb|}RdEfn{^d7D4^N8jY5fAuNP ze!FJJlw>le^&M{wB#9U>kt5D)N#pz2;psk}s}hROvFvm75i`(_L=BgBM@wq+;D(X#iqPMhtuQf)YEH>jAg4lQe| zIL!;hzH ztP?0ww6N5%6_ricc+bOS%5Kr}V87w% z0x~UWQp3k*fvY;9keaWJ96JW))5utN#1MG4NjNDyA0BsfF5$tRWvt-N&W3jnELELy zzDbbu4COXR3hh`{Ipa34YT)!V<@4u`CmYY$7)~cO&L>>834e5GkrCc~M{;{N^70~K z)=F*{g74j0bF|}`ZUQHRhHo9WygR8eYfrPzI2;HzT}mbr?hoKamC%O3q!j%5cRJon zR&2VI!g!4G?5BdWmBLy@=OT+v@}p;(^(x{0p``N}AKvNM(Spm`prt~_gm;E1S!%i7 zMDCS=0dvK5wntN)8t+W1=hzTTeb?GjoV2I`nZ6?CD(`WEk- zo?|NY7q;^nr?wT^uPg=qp4_xo+!Wk>WmarIIE37Keq%q-C&U}pE<}_}DAG~S_Vu>+ zBMZ|H*@TYH*YqA=N$5+|gy?xS80fe5jL3HQF9u!>4hn8oCn!Q2S_^v+*;!SfOQ#$I-;iYjQ*DSN#QBf{+QF zujyQcFv}l>5Mc&&n5Iz4^ z4zZy@Zih$^0wr|MPmZ1t0-djV@&kcX4@t#2;_2v41!^LjFcL?6_lwSQRrQKuzftTh`891nu38k?7h4pf`|;&Ax2~@y60@V zYm6Lj!%wnaUt)A3BW6 z5K<871-4NGhWdyuRHSl%j}56AVSS5@YifJFo&FE_Xi3cgBeEVf6#9yjlA6wM`0xJ7 ze@UV;{@4HE?=YgT{BKyY5fv+zL=h-t!Q3v?I^B{dsD zOsM=Ett~rhpD23{?@$g&NJ*4p;n(D1K&~=^fHDqx&w6)>r6rdm+St*?C0a>N@|1Px z$V5Rx2D;@$?*alX6jf)~_=+g!=vd%IWGSu*QP2jD7MfIK6r!NnE`lVQJLZH`Bse^o z%1IeAQhSW$A&uyVW_RQvrOK&dMeUa;p?Z$2N{L9cFh~qad6y z^MVWeh%PpC)F2`*cydvqg<@y~hsl^acwDsD*n*T8422w##vv*yS~15G>FAjGGsd!H z6_+gBOO!}hiEA8@uYY)-KmChu^MCv=e}g}}=3bd1h?FMsbOXC-pz(cGvM}(?<29GH zgsRgtKB01g_jWfN4ISre#gnCB+Gvbaoa{vM zQRL_6lFLr=ojV(5tCZSm3gbB)Zty-KGm(umJiX4js3U9VxSAR64g}x1zoyiJUr!Ti zuee({6i}MTlPa?4;9}i}r+j_CWf>#?@}cFy#89TN+(^10*-t%{)4Vsa6h)CNR+4h>9liV8xzWXN+ zIsV2y;8n@Rwa|H+-teU`eKXhlIwSV=?QJ1@Ex^7Me&Urly>?jzZx$=O?f}2dqWyA- zK8fJO&`s{Ie=TZ|?H@*=b%D_p9%ipC001BWNklvf_158ImTDQ|5|uo7t5u*@0<0;UlxMAeq6um0_UqPY~KDESi0vt?|(sI zMtdQeV_n0QeaePY#`=gcM_h!@Q6j^{R7o98q>vMoRJ5$oqE7J@<;bgh^nQhif>O@d zae+}KN*07@x#&-jLXpWjxxt~wW69MFA6tC%fGSkAcer;OBspd3bL7%`@^O9YafoLs6DJ;5WSfoQ4K;+5FUbd^wKL2zj9)0+l` zAxMi2yM0V05PgK$Bz7;FBXciR&}xB{uo|8wq?j7xqQl0H7#%(iRN0IWI;2coV;=`R z9w`hXc|bwUCaiIOKx$3!ma$x5)sEILlUOwpDI|j*h#^v|5raqrioM&?`z=a#xX@Fo zDJX&Pik?;CVdE+F3_+kY6MC^@JDlTOPbMoeRp6tg&@-BNo`xa!??r(i2ul-o9IGSn zNAz@*(s16tWRQul5q+fdD{?(yVhVf!8`B`;I=1OzSwv=P!8%-`LP5aO`YR614Q9n} z(6D9e2KulLqT8PueYQ6libRgr{U;iiny_ zrS_vCl+5JE zwtUD{xMD6xtcNX^@hM004mCMn43F8l$W3#DEAJqd1lfQX=z=5ehl`^mA^sp`)bT9L z0xcxbWdtluxCCIXYtD7SueL2qw`S?r1Djkt>VBBB zc!FrrCL%(~EEBwJErT~~tl`}eocDsGO!B9PcWCX1_op4>Ma$2hRM-%?l{tQ~yJRZn zY{NifJ4DpnoNGq9UAVx8c5<1o49Ylj1$U1W?ii6aeRJ=yL_ zA~ENdDUi~V({ZDm@U+=+T)=5p5|JnwIV>C(t>TmCIh`MPI-HTmDThVIv!=r$@v-G( z2Zv?k!J*^dJae?YW|TuA700>cPSx?O)nr=YePk1t9LfU@3k@=1@(`ZiJrrzR;CVZ6 zyM|A;8?t!7-<}r;nTjt?T5!>^=>t=f(Z`Ni<@x??_}R;hl|R7Y5y2y3#-r{fOWQL9 z&t{eJTa))_-3HH^7;o@iIpR(cx#%U2cP%gM2_H^#esk9G)1_nQEuU;#j55qs z&Sh`;)~(3hafTLf(hWS@blfi&{9?1BevoncnLxS@qjM(Hz`y_95kL9ij=Jjj$%$e6 zG~>eqPah)D33e`tujK&8WlLw1>)bfWlXZbpk?-EBc(#G{Hs`coGBQx<3@r?um$WW2 zuPwK3SpMCUg5E`D8T=og7u>9p4za=U@hazAvq%$O&^B`p@`9Tq!$sS0)_6=WIU_^# z0>742qsWWSa5{AKZQ#R+WMU*2LGh=5rg`)u$DKM*=$vf}&vuIA4DO9PqAs~vF1Wq0 z{NhwG%>v%Ee7w{QzQo88PcI#7Yf->ym$LC6%q_joxLtJ=!m%1Om$s+U6?)|P&Q18) zBf;}N4c5=HoT-*Pm!$O#SE>kB@R%r5A1By;=_E;%R3=Z!HgwQA<@kC-Al9G(q#TUG_U)TGh z6K3o>ej(&5LPU(|KQ|?_w7*E?UL+UUIDPo}4iJ>4LS{2!bo9PKkuYgWD6AXy4qZel z4WY#Oc0XgM_Rp7BBKUR3_f1Fr^<&9b-2Qgd<@Ni&>EainVv-$zl!Oq-)f55P{t8P= zFE%v(5*ai~YrII+(B2JnVS|z>>l)<15OxS57(!aoZ`_hhl_;h0!eeRJ4rfTAsPr*y zSfZrDhXF^6mIVuahv&^NaXythZ2L2^qC#TOvLq0&VF!p*yrF7V?Grk&!k7|-r?E?N zGof=!cJ>mbavHfJkH=`0r#x%)3~om!Qgon@>3Og1O9YWj*GMJtB4;z4r95|tXfldy zgo_>Cw?wHp_n(uA2~sM8uuRPzHvT0$e~DHFO}He4Ky40CD8})CM(uF)L?k0|K+YHe zT)4-q-315P`#9Mnfl`dw#tbDjg_+X1Em|71%8}a8vw{dN4tOkec1ZLQsT7r%rkrY2 z*wBL%^ll66kU}w5H>t!Fkpm%J%&p@Ym0YkMP7;4R1S)laB(Zy&xJ8Ma!NVqAGL{Qw zV#*}1u%csY1wv^Q6`2@w<+s#gOddgs6!IRyZ@jm9DWri0q@oZ(mp`3B2Hhj^Y z(DyABW9E9nG#g=*#STScFK0Qo&78A#!6vTA&mOi#La?Q#YI6MbxLP0@@p217zreJ4VwsC_(b7t3Q3Xar$ zjEdw!p{3?daey0kG~o&#c1+Ziu{>aDS7|UeW)vc$mludoa~i&2rWV{UA8_ebG%OJa z=HezY2oB5yqM!{e&)tR)B{FI*hAoSt;)Xfk(yiErb1oc|a!e2_Zsa3uP&^M?ny_V} zr}VL-vmKh8Anji8!?U*GapQP4ykM+y-Yf3%eArT&jyr{<@foXL@#mjvb~bRYIO5Vi zVr4b=ike+;eE-n${T(bIIW0_Q)nm$>3;wpi(7=s$^y&C%edt zzC}jCaoIA}Bkol&Rq!Xb6@T+QXX7HHB67EmOpRh`9d`tY8tcPcAOCdC; zO~HJ0#XDlcgOTUFJLam7tej_K8*b#|y+>A3$ezc$j*n(J%faxEFX4-ADuejV=>!4% zczwYr9`N<5Ld(c@@T`WwJR9-v-8cO3^T-e&klwR}tohyP*Euc%gutC#@x1l)!Qyb- z83%;VD0OOFb3r1d=VoR&CzTg1g{VToR2)dN_El7^jvh3%>V&Kq2RAh zBeN`W-YTx#z;RGGDHsUYm{x}CEQ#txZaBn!t!2YhYn`QhwnGWwr7eb>(HS1pf!)oouTroQTXuiqZ{r#P-(_jq5CDSizr z7D+<&+Lcu@N6G{vS1NnO{LN#Xmd*Df)wzzi=L9EFD-j~!XKLSaOZN=nFWu<$d|nk! z?1>Lj7!Zlcn@*v`N=zJKAbO+_RM`X^N`Sn692mlY+Arg0rbK$l;F5quN>udt z(7ZL^_j+#ktMb!tAGcpO$oT7y-Iv06tdA-6f~`AEv<(D>nh{N8@X4XmDo;fRs8l`? zHrTjh=PnuLDH`D!5(`vlGBF_t$K~*dD`6?sf{DCA9~)e-#Mo2lfswfZA>!l<2oR!U z6Rt2>i6CM`#yG!0ZyRz`qzFayi5naTw9Lrmlxa?4Cd-J#Fv)JRacA_dMXPM@&?|Jz z8H;0rOhv)D9?`JIv&F?O)npq@XV+})83kjEOy_|jLu!RW?;Gzb9DNd;VW5v&ifn;I z5@aBYH6w9=l>=Q^65@c162~=HwWjwwtZSH?+gLHM_AhAT6`9G2SbXfs)gd+vNU2aU zM?{g@a zd}>bPuJ#2jNt~B*PNAoCV!da*N_0#`8{4=}loOdz8O6kmc+^`4T5Q~ajQf?5z%pJE z8IYntg^Z~g69rt_o?PYW0yPAj>F9&x)Nk++T9&kS!&DuSQL&9XN|h6s@kMvZO*u`) zDpYvVlAw+jv;SNuq@f8rhTvHHo_1|H@jIsSn4F3M%Mcn$bQ%Ox8C__3)<-Himhl-k78pZ}12c`17=%CaA?_o7~ZdH;lI0jqu ztF^}Z9<4Q3gCG;2l|)E5s5Gt5x#~R|>o{)(A58-W#V^hk$AuynfkG-qMswC%lu(pe z#z`9~6uj&#$A#l^TQD*O(ko7vl97(&(a?LrcIc^%WZfhxKp!GLIv(vhOspBa#|6bL zGp7%pTGn(oOhZ`8Kj8JbHSwxXz9tMqH}`YMP7ze4ouGb z`hcHod;FCo1ceKZ!_082T(Ih6a#}+Gp}6cV|Lz=q@6b`ojMf`OmvfMNPTP_X>KP|H zi;@y42aZa|NSACJyj+!ZzTu*Ge0%0uWSURcktgk%ovV2&_A3HD^0Z&_y~T`DMk?c( zX;8($JM)&a&T&2r1ahA2pz)5*NiLR=J8DfPb6Tf4$Q?)Xme0>>vUSH;dfu%yXI+L8 z12dCxv#9y}DzLW6nKi=l4;K?gMxkWjlmB}|zl$6ehCjYN@Y83K=g&Q7y`xfs!Ank> z6pKmg+kr9B5s~wl{d6p^W8|E6ulWZwUeZ_SuZVLm?$U z`}-B|lo@vm&&SULaX*|sSjlIXIbWYzPTI&%*5`;g;$c1I-8#(>e7q{LF)%f0p!Bp8 zJoAE5YTmmU(1T>6;6TcLg7eay~fPaoKzR@~P)RU2vx=aCy3@ zx>bR-IrHhjA5Ap3isF^J(Qk{>FWt@81@PNq^m_2?^^EPcaJm+bZws>PpLuQ{n z$A8ncl>1wgwI73jm7%?EG`r^c#(3=*gO&y*b#mZDU?-P=NYREE2*DwQLdq-=AH+VK zAu?d#O|iWn{u1G}r{oEt+jDl`bi#!Ygs{*2#`OHu{*Bzb1}zIpGsdWrZFf%NSB&yG zbv~zeO>#Cv`rwpOlbedc_r$Qru!({|k02+#EN<^MUpv0xwXa|2`d)7-nvq`Xw~%yS zaXen%y11orOL93ys7q-1R` z=zT+`OQLry`=^N?EHq^{#i%)MUmlRl305?OVA&2A46b3KZUG@t6T}W5dTi(@&6G&M z2aAmz6*F=%LMXunPf#&qJ-j4Diz$w=q9MkJr6ov1%^^lq3~X69C#mgF%u~i$4oGr# z{Uu83#FiEVURX>l$wZZoyh;~XMwAS3iw!$Ew?k$Ur7|onUiKJ~2p~cz26hagArmDE z13)20sGwPf6Dn<}M9!1$l)SIl#2yiHDwzXYECVGu11mORMH3lcBq&mWuz7bJF7W!&(NZvkg1B>#GF>{66h%ewr)u0 zfAn1V6QbAqA->4czo3h$v}GcvR9dHoueczHNE;k;o#Tb0G?FaV=%^@SNh!t*GSv$E z=&+$<&6-*lSlLpOR(o2Oj7-5nmeaN^eb9{Lm|fVB2}MuG#&4)Znb2ZFQBu>0mR_{9 zA!q9@D8vZqExD^PQlh+MWO4?t*@YG<3=@+`8ZLM$9SFfdko0j$87p>ijTm~?{*q($ zK9!hp=DwhpEv2X#tC|b9#K|cYBXX&^?ECb&A~Kw42%)3SW~^+75}LVKunQfMjxMyw zkT|=w8dK;xeRc~$5FK~R8PA6$e{yHd)x{y}L2?yUsyUB=do>*7ik%m9L2@&n@ym6j zah~@lk={clGe!z-m6pG|Q1~eLbbCP;TE2TYVj;)mQqlMxBLy}p-W?a*Dioh@1zLjh zp3^pxOQ@Bk%(r}hmZ4(KPoD~Ek$B97f=qh8S(WrLa^~0j<==vrZ4#xmR!HUfa52WV zBQgh9r;a{KPTeIFQS-=NkXy~f9Pni8aZz$m7$o4LrwQucN??6|ZSeE;Ye6%9{z zlFyng>)vo&RE&(|(bh7}19u9;dVr~cTV>1Bt>E?~W0AqdmE(xxeHA?%>5S)1z=wfK z8HV7fvxd{IB9j6o;KND8M#;wYyl88Br`UPVncvd-1%k-UTo8OJfT*=*>m-efT=b6J zr6xE-88x4-CFgy|!_f#~JAScQa<9C}gF54+>-mGDnggx)cxib#tT-?S{MkE_iHw+1 z@zF$c)=P37c{i)s*~m8+Ev<`0avCSu`JR9CtDc3bI2d`XY5Cqg_~dFrqcYS<##jd` zHE^=axwMX44V0=P2Ft}@`Qb0JGzu;Qe}Cq9*+gosm>T$E1IIq_k;6hzZ0FpsBM%R@Ott4u<=I-v!)cGs z9ZJJbp5=V8RxC=%o!ZfPf%6I#3+|12ycqHK=NV_MCDR%gB+uHwW!Ll3LNL+;tUUW5oaIfV^RQ&m~jJvhwgQ{Y2Aec|qe0pwpZzSn_&-+K7r&k3# zEBSaCIheq4W!UzLOi3~k_;jr~8&-@>$%AsrL<=5w78MnXOtN*7FPfJ3Yr~|FeC^mV zguu_AMdo?SX;-7A0Z(h?;_y4VdCF-T@n zFdKU^`K1Q^HMED<1<+Uh`K4GAZ-`uYQ$&4v|Lb4Z@dY8i%0YhpXI}qXiWw+XziM#P zsz|t}7+wv)T_e7f%E@%OpKQ;m^gIb4EItnV*0F@)5(q*}4BZ&rKBjOzR44YoB?-@# zoJz!(&-uE!?%Ih>f2Q2=b?vg;-~W2R%I#}mwah5ZEDb;HvvHZO_9D)|5umL-oDCp7u>&ppMUW${xkmdzy3e@;g5cVQU;?c zyzd#<5eXPl4zssgGF^g;gUj6$aGDqCLkn(PvnG5 zReR=fjt>J;XtwSGA3CCd&aLU}HdXXWjmOfuC23&H;Jv3C)+mwVVuzt1r{LJ!#e@ni zGNOuTWiS-n(hmq&hS-r)(fJKS+@|bva1_}nF_&G7C%N=mn)E(VW>4Ob5W}mp#?j;V z5daAL_g)%05kk_~RKGluNAx5CsZ~M5;e+Kr|1bX|GHLjW|KW!WK0?$4;pyqQiZAfd z5QN8z4j*^S)DbOB%6^M6p1_e_V1#Dn&v~yr!Uvd}GBuA;GT<4jnu#gtV;T&}MMV>? z2qJ^kP7vQRE5Bw@0V- zp-*jVV!%YrCiHB>6|LKX6fEN<5<@06S_LXu5PU?E^TIx6rYkCOz{)L=L31a+&DOV^ z*r#+1Y-7s9ySO8lIhhi)zGLNIQiu^_Jtd03OpTamLy!_l&XrrEh>Uf~LeHoev8AD) zz>5t#zb1qNEfiP7j&pxTA1_m59||EoN?8&Rt0tqsN{^8xhw5lA);nBmSq{&6r4$3f17{#Km9BE=Ucux z%aIt`rr^hyo{L_vwvw^-+@EZ4^sM?3J_;_}BR*?Vqf-+uQ8IFSl+!!OSr>TLc$Cn* zJMxTFh4&@vxIv>hE;N^2#>zQz6*uSxfk3hD%OMJh3lwf#&IEec*pW+ zrBNaBJBI@s2SRR`m;xmf?~gLZs^lPtTzSr080GN&dyYe$u^AMP+bvy8ja;n_OpKur zB_lZkB5Mn)u)~Q<4zrR?FBzgE(=B&L1KZHD^MXgsKnyio>+n8D3q!3Hg^Vl*$I5!v zL!uFFtR#>#))~_b&O3pUfw3+yTJr8>$%DFNnp>h0{IjnIuGRy~z5q16w=70Iy&CxW z=7dZXEV2sg1wICDk36eh(RoGZBVI(l=r3qo;91kMiw#p_ojUO8uHiSv8M$&i+Gutz(M3MI;A4E8oyN z#n?z%4-Uo6L(A1Jk}KHRNU0O0r80^;6}&%n2&wtTcNCAmkX%`T3ZB2a>NvLzFV~8# z-En(fGu6;q!K1C>ubvowvFx~C7u>7@yB@HsIUG8qYa!@AYnEG74DIezf%E$@tXG|Q3~yPU(hW8t(t$Z*X%z@U|Df*V~s( zY2uX_dRrX*|G9+XE53evi+g|hb!YhX$9gk&EM9$YUvwc9N`9HgjWHlaj*zLeLdo=> zb0p$JkCHi+T98rE_$!+JYTv%*(~{o4k~l<5+!GpN+_Q+^Ui#k;(g~@MLcfYVB(bdb z*tD=O+mr>qj$HWA<9u>LjLMNx;e(^I8zM0czL7+8ghYRc@l`$YwNUq=0o;E7 zUdP70e!O0#l;M@D|N3hk`Re2PrsI1pe*gD>^%wlZ-~EzIjqtH&2wT?ui$rXY1yZV% zWeuLeZwOdAw`1#;6lQ`}Ifa=JaTrm)q7B&4Bp$L*X_!t(#`>5cq$amalps@RT}4A~ z#t4yWkK1rXp(p9g5sp4=F}lK#A(bYVBgQO{Dq}r7#riFW*$rCXAdw7lhZpJMXmDG` z%m^aThYg43CcWR#gf+RT5mMnq%1GJRV#5yWJA!o7YQfapMk&KWzek9U5YsAx+@E7I z8U&gs1GSl>b%7QYh=`9J0zodPlyZVr8J%BZLq{LBL;+GNI=`b6Ta-+MkeXC1;Dp1( z34`At_Ze1!B+y}W&EU8A7%21@slFtcgpkhd!EMO(gp4W`h|uFgPdi+(8_vn~7$r58 zoD=0917}|*hgIlQ{sAfHOu>KpU;Kx79RJOK``=-O!^i>=JuZ53F=B`fH4B`u)KYRg zzsY)NIO%^*aqsUw001BWNklmz z!&FXC7>rZ|5xJ>vvI$$xhf{hnfYJn71_q{TP9V_721#Thrx=kcx(lg^F{31vLM+`& zGC9JEj;I{1Ta)O|f*?EtJ(FBBHx=7HGFDTf40z$0>M^;nJho41M2Df|-SQ5EoHq8v z;E_~}Oo0*wP3#z{nzbLWG)&b2mCTrBC7*YnaW8k}_=r%7oxfyiQkleoIpQ!o;=oLp$^}tcRyMi&Q$3<# zi^XBR#k!8UIpU6f2LXIIIb;==v^H?Jnox@wl_+r{kuZknQPJQ<8nD{2;fB76i6y(x zar>hqe(-0%!^i*YuUI~AiA26=dzQUqUUn2(kx9*YAE`~jiw;g(!#ET4z`5OVWai9s z!9kT#<~vSz8XqG9%_2`jLZ_YP!9;VQYqZGt+1g>FXU7@O`V}e~N@b8S!^NK8JyPtv zk=bF<^&M%i1 z>r+9+B-3O4`E)4T5kV#J=ORj9k2cuN1@=jHg%Yt9-dUTvJF&S;}tcIMItza8^Zs%jZ z`JH!p_xJAc*ESPqgWThG{l^*NK9lIR0JUvHUe4KZh=V6$^9 z@&ck{kw+f49jJkaM>Wrvims8|e@AmZD4tzOHdgR#xT5WA4zdC*JZnF&b8x!|{MG|Y z?s7I)hROsQm(w}LuU7Es1*|&5vr9u30yiqj#n962&AYYDkY&e6NNVNz*~+u=0ufV< zxs3@Y*@q<;gQu1U7@46>Omv$f`(Z)W`PQOom9;QpxK-P&-g60}ZnXVft@o|y?O z!iJ}-f*+qM4l;ochAb95m`E0r$RB<~5MAW?(s0%Vb}sV0qtsmZH_MJ&wM1IOvR5>* z=ePZ&`lyWZP-FDuZz@wDw5t7ORsNl(;Z$H%x@1^eJQNIWXQfPe%~It0`~V6 zLct!HB80-ols8L_Y5vt?8(m;jmYS)e$LN9(9qu~wC?&mLV_lPSyaJTW_aaR1GrB{{ z`R?~N;?paVdW!e^sF2E}{o1$2DODxXL<|XCB@_gYk15L*e99c>xOyq5(%BaKw2zV9P#WY-Z+uXsUXhjkYxtgZ9MFc0hedRP{sz#zPR17Q;Qj>`q zPsb2iYIOs<7nDkAA^{NvHg@E)CZi&V@QM$O5Gk@F1GySg>BGbj4qLRS7(`1cr^Fao zg;Shx46$Q5JY^fsK?Z`f1mSUh!1{)M?*f-*md^QD;OO_OBx%J%NzKlmW9g{$jEG06 zv_ess3A^EfE^LT6q*Q2G&JrDDQJ&M1^- zh>lw2)M8G>jCH(3iUJ!0SK*4m8A6080zNpbmk)P@iWDlwsR39Xka#lx}2$Bd`D3=wJ$ z^Mad~Bj)vfrV}C?(tM+fFW6c|2$C)aPKS=c zXI!>PIOa;Bg`jhimG9Vuh9gmP-oSE@tldEGBefPNp?ER0+%Io&kZBqh8NB3aH*ns2 z?$n0cxk4*PsT4*DDrGnxNq+lQ!*XYE9v+Mg*46kBxc!Ygj2AQh?f>{AzW;E;tc+Z4 z$}~V11@G35i(b)s%Q%DYz1MQ%xZ|hKGJ-A0GReIU;o?j)(t_0>SQL@pKJpys8sh>d zOUZYS8vg$Iz)#Ot+$u_xfG6vmsmUpIss+F3;91*ay`pU;Uz=D~jpp-B&RLs|ajhkb z-1EIV9i0`N_m*$n95~r3Kr=OhO%ErVG=H#lk|LA5Gq-Fy#l3Myt|Y5LaFCZwb;ZUe z+*6}o@`L-1i>sO&?-<&1$w(M>PI6Q@P8vlYB+qsie7f#&VZzBaqlt#?AlTXD*5_Js zTzcMJM0V$bpTDfoGN7bqQ7HbCA2>u4>4uz5Q?Twmjg#E0bAI!E#q-meQfoe#20mU5 zyjyGT*NS@wJsLRMW<)31dBvxzz{Oy=X<#NDKYpxOKJ6HGnyW$b)2khipBZAGvs^>h zD+-}7GV-)_JYGw*FF5NQTOTP!$>~PXI879qO@tT~rAmmz8^8B;9)9aC@J8nMT2{S2 z`Tx2j_)q2VR|}KGYgpiKn_prxq ziJ$?OXcuqb>R#`}+jEFx636kC82+l`%v-m;eSxw+g2B>+3u<{t>sAOMsmz?lCRVAG z13@@c$j~xp7Y9b_kW$UqhEsAT?7}6HKo&|Uv3Zwg!wJ1?@S&&o4b}}DO2n1ZsT?F9$*#h!{G%NqJdSQkexl1~NHG=WOg!90Erd)^vVFWoj~6kWm3C zyNd{n^@5Q)pkb3TduBoBm&6F&u*HU!Ox7%iXDPu0Xi=p7#uG(IMH(WbR0|@GA#~|{ z7vRG{p_ABGvS6OQixOFi`b5&jflBy5B@FZYfIMWl7%4+0T722~-wpp_d6CV0`)#Fh{>b9o0T;Ub)ZNUrg@-C$&euo5jw zYEzS;S-TA-V?d(3VJhZiYDS2bK{SlS7(s!HmO_kaSwj@`&Juyuux6r;iNZlN%+;7q z^sJr3;}9YeL&lXoLFtlh^w_ZBK;K4q$vmrRy`zW)d8}CaXUwt#b_@(&;_Az#>l2T0v;w`Iv{ zi{dDXtYS@BmD8E`7}J`3v36#SG~f*+uuo*3h{%XNeCzw(_jwZ+-f^U6%&lV3k=6%3 z>U*B`mOZQ3w~-Gv(8k2Q$}%^gRp5Nw@Om@nwDVk#j-`Wb$b8?k(Lrh$-GtMa4N^uL z3R%!4hmf91M+A{R>YT_W19z$kT^#uN^~jyt^5#s_1;@{>A)shOqD!8^!$B=L-C8y- zaWZlIX0u^WT8vZ(DRQiKNbD7c_28+b;l%EvgXcz7FxA;K6;tHecNi20R?~TSb87hH z+A=YcyG`PwRYFV9Qd7u86oLxH=1FdrEXI+w>zSyEo5hSVB!cviYkV*~ZQ*uZbJoLR-11g4VN3}tBj+6~90JC zLGquxzM}oK=Fz5LzewyGPZuSHf?r$~I4=?2vUP&D_8l8nQK*5dG4Nr#VJ-_^sV59! zWV+{HUSMuq#=2r{HtBqyT3f`E)M{CWqtz^IS?AL*J7d=Xl_oL;Y z6f9k$_cIzRIU6kBxx3{-6ZrMtYMM&&)`}5}b1nFd zO5Cd8v`fsY9vds(nh!khEu+`0o#1rv+;0p=rDW?Og)Dfy?OBZj|Ixj`>9*!?9((T8 znwzEJc<%6xrWS`x%ndrf#JPb|PIsMIi_FcD$ytb2@)cP(4$7lQFa;A8~5PXr$ybsxEp$90(C?s%eV-e0*sbeYewH zzlfXGxX|I$79k{KToK}k(>*bcv|Qnn1EDB&Nfd!qIHOc^gwT{a7fhRQjuJX2?+Hbz zC!`b^;x&Q5IBtPN7dFi8EtIru!#P4|2EU#Vq9zbgUC%L&Z#6LR3uE0h@Hi zo;qRa2l~`A!LW{5dZMt@at=asU>nw9%~kh^yY>cy>ya@cb>zr23?6QXn^Z=#b&|r| z;xe2wkuyrExgOVC`BM(m9qyO6*aSFIw?HPY{f1tw>D)Q@ns0FCRy3+$h%1b0*v3nI z95FO>@tQq#6Q2^LPIPnxzoL_hC;}5vaA0-T7>h`k21p%N86K23IUhWmT?bv8fD4K~ zwAg4Ugk@$7o3P-q%d(G=4kbYe#lXOd4Mp3BZVL^Sv7n)g1@J$wC#!H zG5AFvxNsX@n;531q#fYNc*R^!xjT_;T+c->IJ6ZLrFehcXL117yjk4DC&?$9f@keO ztpv-^oM}QP5eOQ@23CzG_dwiiUK7Qt5H%!!MFAtNq9cl8lHCnfnrdReXFPp3~l0mBY5|0 z#CfYEtF9$w&Qu9ymcrIP!jd^6%mYi)2n-Lzj9wmBycw{-$H#n&RzkE{j z%gve(*Aig{Zt6K_ZNkcg_6cPK?=39?#Xq?1SoQ_)F9jRt*efiM&~o5k3T*{dU$8xYCTddEL{(6Mect4}L7-Y_Y)y#DQqPySjmF_D0P z6giNd4=yz~iV5dE*|!nCV0csi>dPI#FBYL+E`+`;Z}@Hh=IgiTIAFzByUD-pcje2i zQWmfS=ZoEF=?iBo#11!>H%O^a%4S`y5cn{ng$4P-h^BLoQ|4oYn2WK(Hbin4IVQK$ zR+=n!kQo;xRe{kJS{0eQD_&?QGu1-NYNz+LJFc+G7%gc?349!QAv8z?Qp~c3kRmUl zpk3%mF}_T9%aV`uGT!+!#&F*L`Rn<@h{x`Q@KxE#7ik(_ybqsW1m(wr2RY&G&;R@{ z`RUJoosX9iE{qgripJ25OUBp%ijW5SxS~-9SW(fZHEVaqC^n3teVH=W`xRPRj*B;Q z9ITK?r7^O~WQQ~oqstgAy~o_#X6sH-(lUk?Ar(p#c`+kA`}zh-RfIHT#F;9xDKBm* z%!EoG(ukvMehdyPD>`>gA|lar{yKO3bY{N_G^Lp0;y|e;%=JyS;gTV&@gbY+N;$!$ zjwm8id4LxKonPT&UTC@0VP%C+16EF{)d3MtA*xJRpd@(5+Gi8qMD0@rtyI+x8)+ z@@$4njLxKoK$erZ*kv8OfKty<$uNX1N)3pidE%1WSL;n0h+jlB8rC*IfB6NeqM?-J~*-QWOl)BZOxWPjSN2 zd&i~w0HZB4IY)}X!k^CXi6n9|}1N(BHC&MH5$vR|(U#Sa*pzs6+mp{Xl3RFr{iep?#1QH~{%qZ4jOCQ(l=^IE{GLy$ta>B8l zv57q{b-Z5R=OQc!!c(AVRL?Vi$xNQ0jbW26@I(ZLkTScwl$KKWv~kNaEOBHSj%TD4 zb5v|Vblj~Zoi8Y)Az@I$Fw=&8t9drAxeA}|0ta$&woO+VnTO}5yvt0@d95kwW1^9B zDpfG}fE5+r`~Elh{XhODkN@m%Sv)!CudmjWSbqPu=kKosE=jZ=*tB!rTXeKRb1{ze z5h@`VQeLnV`{Ndg1ly`EWdNp<6S|evZ?BF z$YBviuenirCd%Oalsgz{pLA<#ZI~4!Z%!;%y+)>dPD>ZLTh=`92965F)u4E^breEj z1l<45eeS$^ga7GI|B83^4R@;SuZbygSi-?%gAWDITE(;Bnvc5$|K&F%Csp9-wjw0K zdFPqhNNICScJpw?ho^=*1#UDO{_1(j4_<`_4=TE=hzGuTr{}vf!|mA~b7@$*j@6*~ z-CKcSx`&mT%k34lby!*ObY1e%cHpcn=)7U$1;&h=^&8&5oN~M$c(y3GUrR2Aj^0_` zp3c}fL+c~Uu%LC4liKmxRB<^(esVsLjN!?uMkYn0G;hv5zyEs6kCvzO-HeOgFm#qm z8eB9~qGB-wetKm%>jm#$7hDaINeO|-s;jsf3}62o%AGlZ)OpW5ZNAUh91;GoHjx554 zI}^q2y^-EQr`BAxio-JU-GBTVul~*h;0v?|@uh<2%LT6d($BxG_!VC$O24A_6>5hE z%OZ=fk4^q+*Eh!o+nvT1;!aB&qa%cFXDkv~KPp7-4DO@@MitcdFn8SWS%)eWBwlv! zw6Ym1uL*OGmRoTT*w+o7Y1VR7+qj> zNsKPbE7FV3ZBCL4VZ_ItlybV8RwW^3iNgy`Xd*!JAUu8n-pt(M4#3qA@OwdVS|w+2l_6Cnl3G9;}wnCL&=hDyvRD@ z7%1%=5VT>5l$uiQVN%xa*7lG}@1eD!P!kjdh)AItyclvfR^&L!Y?$hZ$s>fMQOB&~ z8DrSc`z1j*3Y|69L(Eapg__{Rh!BD?t&!61CI@xCA0jZM4J1KKj)}Q}5|&0znTR<- z=E=*<-pU&5G$H^at891+YqZc*YDTH&XsI)1OjeY7wu?R<7*j^sh14Tb9_b1p7(>s0 z@hAToR#p5j|Lsq)OxT7Ej-F+>LJLi!COIKbYJ8G}r0~fhMZyyZA`?;yO7`@Gh0jG< z!4xG7b2VX14xu!Wyf_UhQHmK!IPTed9F--@u)*P|MNN`pPJs)8h@oRkz~gtXxgbD` z3J6T(jG8?XiMcwUizAz`BqhalSRzDCtqz!(35$5aI;>EXT=5($rX1Qir{jkAhes^K zf>+90w83-kuNfK8qGGBV4opoSB7s0h3(-(A!Qt4(C4Jg3#4S^C#F4&%NmG)rB%o)- zCTsY0|O&n2&5#qa8FnaP>UlVbEl8>F%!|S@GE*+MzLaF9dYh^bZVH&V;Zr~ zR7_cgYX%}~E-|UWrUO=SLnG%DvP4VE7!#-d35f!PV&%@TsX~i}V|9aD)giT<(?*Au z2^Rz+SssnQVZ_sLz&5VA_3a6N^v{2fKl{@kF}=QIVrm4E07*naRGSw$x+EpZNoh&3q%ncP8^)*@gW=h5MeilA))h0Y_+S$`8yr>| zlvF(3LQInT6Um)A(Feniua|t#ZWv<1sK6`rjF<$^+r(;&{QeDCxP*^_E-3!)(({wm z8FM`W-SPdqBkx@pKJC^FK|nIxt`&_AoOJ>pN?a<~I>V0_D}J>(XVo5XudbNt$j?@1 zyw@%eSl*r*p7oK|X)Z^{?Q+Jg(y(VHl-ls&D)4lZc(s{wWFv)2oc0BWMZ!wWapAe{ z3%VeA*0)^y$f2EbvrxRZ8t8HS#}8Y6biQHm4O3lkTnpY^2;P_}6a|`^yUm1ss~Nmz zqDn4?NaqEwG;mmG)=qO+3I6Cio_C*G`UJ;?qSl($DUPkCv4+bYUTGZn8%^tK3Mu*K zRN<52{msBhB{;G^hb|<`ajCgcIX-Pi3Rz&3!9~s6@7&|g!y_L3`OoQs;b*IBrgr3w z$%LkWhx>tdFDgRP>{-M2_X`f1z~7xscz+pLjgdkcG>MojL1@BuAhv;T?e|>uB@)F4 z7X_-_vKaG3=~x8@Bt*ru9(c6aW4|yEEx&vmS#CUkd`I%V!@#qx!3DvaQ^TYb)G9~& z9$LwJn>By8K1aD3V-$S6)_4>@{06)|8QAoeCtJZ$p_vtiNuYH&DK)T5JJQAp~FhUL|fY6IPEfa_EEn;pwL2dW1cu;&Do;i zPziK2biQCSDjv=peiixP?|Y7ud&~oUoLz;UmSg%IqmD@4Y`o| zqK5ZH=l1gnWEmayRr0`ZfBqtnf}}_e;Vq_|rUh!=mr`8+aD$?cNYQXaDt2{G>KPqC9n zWZkU~BU%+$)8NCv7#%4klu`&3Nn|~9CQw8~^hhBHDUj0mk}&bYfR{dpIQx1^NBZ2= z{5mt^XIJ?P-QzDL+~vp8J!KW2FjXg%>@&)&aUH{!7y~nPi*+~!1wOVcM2FZJWZ212 zteWISND3I0k!XWk19C?Vs1UjLjuw{DZ&rQbUSj49wvZH=TOCVvQMF~TYp586dra}lwrT2HF5U|2Bj7thL z-6aAV5P4!!$vIZmRLod}Cv3QL2Gnl@daVgVWgj%8dRg(NUi*%@72 zU}a5Xjxov*L?p%?-b{c|H7Z%6D(L-!(RUbA6UnB{DHCGyTn&$qR#T`6FEry)C?cLK zo-j){I8-NS6}TR=L}|g2iI}1(fq^k?n2AI7RKsOhvSCA=8pgQh(kHAaS;Z?#45R2F z!Y%U}Q6vhfP(qX9Ay?^)VO;V|)D+QBGbJU-SvaMQ*EH&Y6LW~fU}Vo`7_!+8NvTTK z;W1iFu&G7~#h#o&lc%@}yGpjRLgPx^C679EeMrNVi` zlsN*+nk&jwb7(Am+;WwkVVMw9V#|^gVf5GBvahfWb0XR3XLZ3$3l?F^#$VvsFx3Z~ zk54(2cd>H9bzEe*hfvJ)3CE_PVUJF1IQ1)L;)H!$;iBLwo&$!na84VR?5PuKVOfWs zAP2k{xTo*1@)Jms>o7o442&3|nMgxIGKQXrBhX_oRB}cyvbKCZ1}bGpqC?0?O8N8I zss%Yp}urT3A=81Z65BMRC8t&>zrb1|-nNwW4i z)$_thW=b+QhLjX{IpO2s2@_M`Qi+z4=RHKR&dGY|iY7E%^c5>9*!aY?%OmoMsxe45 z9x5f6V_1xVe}CE0j+)j<_N_rHNuvt3KJxa=P^d_4B$K2#DJ{zpmM%AuZr6c#4tt)o z`XX%TJzZW`)Ja3Ks-_ zzbKF>tc10*+^S#`6hnj%0>86Y^0ckEQ^IO6yi(8D_`qL&;yF+Sr!L@w<#c0%y0P*-k#82Lc6v6Z@+1fB5{8rFj8nL?I;$r+9Z!@GuCBkQ^3@E(q@IZ~62} z;EAl=$ez(WtdBXWEZ5Sp9Y@~Y*RT-$yTybyDry^eYc4n}My`FN5QgX5iXR*X3JaGV ztev1#BloJr>9$6PhW%>H#;}d0R)R(+jEp>-1xh1HDNjG74a|$k4<8!BVvg~NjZ3`J zDBcg6AAhW9L(3QkZkKb;N1Z2~+OTeWrn=&&QrxKni!pLBM7}lGd{fSNe>?JQJ0h+k zF&Vn3cyO55dtf;JAkjK__lZFY!Q3V!iF>u<&BHBTEB>EPYX0^t%SUaJ+?xn))&oLl z-hRE~lb;Aa-8z&^R0>L|d8M*6%A;iB)7EgilALv77kmCW&F^PVzbt!r@%-~+@${wV zUteT>e%oh7?~70hwd>yc&qW-6od)&uhO6E03z^4)G8>yx%n__Hs+SUg7;{ACi|%qR zPzPd>$Jzoxw&J|lt=wrE9Ttt&(;d1@?z*xuQy^kbN*=S*edez6i&*7|V3&kMLdxQ0 zi7AD~m@*r)gxQ5SY#4oqRVCKcq!jV7Cq|D}8YL`{b(bl6LK=3R<=k<6DR+2z-9Afu zgUoXLLg!upJ5t1p1;}S+!!O_0uL~pj?7mptNbw< zrKAR#m~u=c5Mw|@!FIgFsG34eadAT_ra+=h6FQcx`sXy}kcqm17d>O@Fd`@6DOpkP z^u0c`gp?;BW87xM+YU7t(ukD>MwBQbg;307gHn+`49u+H>F}IBdXDWLF)3uyRHCAk zmS^KrYB{Hp87SXJ1zwPpN1(ZiPcf;a^$RL}hq;*%QsR8Lq-B8>B~$Z|A#HI!PXLS_ zh;hr9dPzwF@+|oOVO9RixON=lW(PQYDi4)$Q+~%}%Y@A1tC{n?_x*{OiL`|KhT=*4z8i)y= zx-;51M@zvNwiKymWMHD_%vH_cM{1b^3|8Yc-Z-YRVjVk%w8V;C(V9ZJDVep%Tcm$9Y0xl-kHt093vC07^CF*u;q=W=F)l2hYd}-b+&_x==M9JfC>dkK$G}7vJZ%MA2Ps)*MsV#$LQ(`V@~9ms zs5q%gtW3;`$ZNC2S?@V10?+$_%b}vX$m8DEr-DznIRxXRgu`Ow*DJVHC)TdygNvQvEM1V34Q%p3gyH>-lv zR#T}L@z|d03&mBN13z9n>Gg8icB(WYi*B}3HIv&28pEq{9! zXk@|f?w9<{mEcjo;-B5Fup+VOH6ba!b=T9QSe$8QS}?bPkCu}47=Vc2?!di|Ji&}d`MrJ4->cMU?D;3RJe#59gO%rQ0gr|S`$jX<)yt~p zOPt>?KfX@B@a4bzI{okGi?3Wr<$`y|x|Q~2(fHZEf7?FeZhsOf3#7DIQ&ZbHNq`T- z4t4e-IAd4b?Lr`4uuw}q%~&RP$>=vbih{}=;23bB+aa{njwq374snMuBW10v5ay*` z7!n~lNy8+2)dK%ixTQ8kS?$e|t5(x@Xy0Zob28j&Phf5nhC zOw|d!UxQNgtnsNu2!+*k)(9s{Oj$xuh>9+*fXG0P4+BP)C}9}Vnjvh6(NXCcwLTyc zC`3&qrfBnm(vbPAF*#~oav7czlBbsY1Q{6OhAu2YK&=i@2->h>@N1MT(6VCXpOXZX zYW9*wQltrE>L}ENxt!Cc%nmNpd>6Km@3|CNs~%I}K;8q9&qvFO)$o+UOz6XcJ#h=G zrbH1CDeJyh?v$;+0GSw5$4HCY$t;5KxX@9XLkQsF7KOpZElEj+u%!=cgb;Z8EW;op zyLR)7ovyp3(Kmod^d6%tKv1bUiG;$Sgyuj0lRso`YJU7b{5>LBq>LmKK{y~R= z7M~JbT2YFIEBB1k;X?{NXOgBUG(7`bv1S?0u%g0|4N5grj?FEWzRU6v921obVJ`;S zaLvBir%@G!u$0W`eD`uO-qF%UMC#9fQE5-lz#?3-2p1?(V3lT@vN`gWxko@_rRBCg z!Us*IN?vQGs95E~ISy!HxmiwGgfr&)CR$7>l%$OVZFFqK3Yh}mn$20KE!Tcc@cC0L zMVa3}^k`wVOlk)9A8Myf2K5hX%L&d0~B{W*=AGEpk z^Jc*HHFHxiRT^VD4y(wc?ZA5%g2#)*t*YeXu0x_20~{34hQzZr;Znp1!E4jN%}RnA zDYZwaErc2?VT=N+9N#=hw7%k^Gqlcf-US|S0!9kzX^^XhTW?c)hg zFB@v3xzmihDVOGIP@Huu4opF%6MuVl!M$nV@2({4(Xd~RJn06SvO^_FA1uFfvgPoO z;O;%)HTSE? zrjz{m)Nr>Ee6loH6=_4nP$8(fRcofc=lt68@84Bitu%+m^6YZLX{!k-Fc*ePZ@Hc) zPR|^_cL;@5ymizvw}EHZ1s^?1{QFNFJy~9NWF+67*Th)RI>}KJ5&Mb7Dk1_5LGox* zFgT450xcz${VL9Qfvoy-`pf33CKYoil-+9UT z)&J`MyCC{B~hceAPhpqHC*Q$LN)!$lKG4asB7__0__S-Ki>BAXTyBS7vg; z;MT-A@cAM;r@d*E&{)%8RPHMJ)Du%iiN)YRh@ITPQs|nvbJ0s7ckJ3ojxly9WpjE} z++oai=PX5zKLv83tITd&4ywpO8skfISMS#-DJV@I1%)u?zzdbbInpl2Js%^-$V#EJ z{y2SxHA}m55XhJR=a*XFujqVVFn_eK&U{)r6To zVWdS$iB<(#6&TUbg_d<#B4onj@WNB78If!tL`b@Ljbk7RN9!+g;iV;GY;mdIq3tBC zzhdjJ5GpSMTpaV_Aq9@iJ*;RL*Z>ia&p{VqNfIG5XO)LY4ls~AzAz&pxomJF8-?VK z9qhx%$cC-Iq>CL!HH0wYb}Zpi&zR_aqKtUo?GQ}@s~QSbfsl9{g_)p5i9}*VNg+$3 zPB~Rh&hR4ZJ_o--%Ys5QjIqrHxt^e;Wzj!j>sLsn(XznD4j*%;du|UYWWypnXE{7a zD$T?kQ>nc?;SgCDzZp)m&RSab&HYTmNGn3}7*(NFMIok;0@jrH&=FI>r-9yW8T=Zf zOSCeX=SyDX6ryGwF7T;ih$|*)pCNT@{5eW${^g(ir%0js@BZsQWAqZDW)W7XR6vqU zrWIGrtfr3vEo!E^WG;>vlh5Wt0t?O|1`h2E5ldWt zC+8p<6n$*j2A`)hBoL97H5D^lO03y%AP-OiHqlWtp`xI78J~AxYovl9Mgl!I&231A zH7f?zRPum{ZaA_T(C;IhjVpxoxTr|MFa*IeU9+zaNofnIq@yJekO(HaL6MDu&Pxo2 zWxD3saK#ub7ydDAT+y*&ND-eTTO6%ebMCL`DLs(F#3|Mht6q26iD6EyP6X_MPQg2!~MD@L`fgu!Y?^dH>p&~S?AGGGtrht8wNZ- z*q?J$loU!KV~N7>=KWlZs-Rg6lB?F>qTt~)@yRwKuqZPAYmAYbl_Vt1;1n*v5CT$U zG~85{eB5R^*6Y&~q>$XNHOHkP;(0u*cze(CxEpysw%Kr~V84i54u-Jr0_VWi-=6kPUjQYvbjczCPh<5kV0Gs%!VF)E(* z8x9N2-I>QE#hdelk2Mb`g1G^KLdXFn3R1M(Dry>KdE5?NgwiShi! zZyG*YTA*a#!m=y)^-}TfWx;w>929|G7JT;)EKeT8=_+x$72K^Q2+#Qtxm_nd*$S+Y z)Jiiqkq`?)ELgb6QFFYI+iy}SpWGh(o1p)9O2b(S7;_=pERpM8R=j`c*i&1k>d-kp2KsS84 zNhoQU7}zg7hX;KcefumgyeV; zJ)FPDB}SJoJHa82yVN*^lyX;S=0Z8`XbbEZz#$I1{d+(ih% zgr^T}68=)t*!6xO(PKo#RNYF#HKtOC%p|7`1i2hBF*os|M~DoG#O%bp~gs| zY3u@{b4KErNTl-{z|*=7tzQ#Wq;{MAkYz_5Rv1+><|bl4G}weyYNwM0rJC-E9?-`v zM(>9M)sPT7jLz{y8rHbjlCtIkp%f8MA5tlY+>=c*J*2TKd}y(uL8}5S4Mx7&7{-uJ zBC70w-tQQiqg2f;QU$XhVq;52MH`oNafA0gH}bcLGLY zl5N;wlprGUF|dx;%*~8GM0R3@i(7gc5R!o!kc$y2OB*RchKvHDpbJ|{rnngKvZG`` zz@da>6D+&fu;YTcJR%nZR^f`JJxg5L2u5b~VvUSBg&a`F1v+N*vFFr&$=n>G$Pq%Y z@M{+CIR|D6A)}?G5SrSzw4x@9C50H^q{j$@#Lzm=Ranx|(Z(%p+>wilL6%{IV4{W? zo#TQ+$C9xc;zY}ZyGSQW!H{b~Wi%qB*h@=?5DF~{4)Qs3HN}dWyZH$l+Ys>FP@* zSo7Ww9`IlM;csyH7ymb({A!Jk>8Nkk&Di=>C@0$uE2(X z3xZje99*Pi-NAWlIqz#cjt{3Ll@b`4bGMxH_Bah5zB{!vK`_%2Mb5}b3^|poILIZR zZUVJeYdCCWk+C@HzJEuXA=R(&dF zI>;p#jY1;0?oy;oe&$RFiWJzP;dDn7XmrOI5=7MLf+y6e(Ec?jE+Y79(Sh<>jeVnr0nROWYa4)UB<16M+HqCBL$kv+OX^eofni^@X5NvIlb-mtZb(LnQ^yB1sLO!I>G?_~Vd zX+xGHwrYZWIz51V^Ps3D6RxkSuB#Nw{5kjDSZw#Sd@SEVm_^ z4C@6qs-Abo0Uri@wHEy1dBMsrcyF9zP)K0aM^e=_eAzU-Ig03z^To#SdA;Gy5tJ%5 zA2wF;&+cz{zN?77VwAyeoUECt$j`1auB_wjQRJPmW7TW&3a-|gv$Z5R$-}y5?H$LJ zW6>J4fbSmGZ2N-7X>L@Ji5YNr9QdoVfY5NZlT3`{{SO7DH4Jf_HiEZ)_dVXm?yO%C$hyf+@6iM(c z+6LQLWz`n|wP&!tsPZUzjp8I_n3qGRb!Q#yUt<$Pa%j8~6X zFrtzNpi=(W=#*uy`vt4+6qhxuy~l<&5h3K57%XkrQin^#eyDA(Zqw+7&NXSsO_x+^ z%-WuVgkCmGCc8nFszlwt}hq6JK{ zI|R|QaE}@36A;k(no^C?ow(gXPxTDm%c@BSXeO%-Mts5RQ<>v-TU@z&czJ zF>Jgf3ne?21R2?S%SK$Lc!!=dGXpNVEv1aK!gFFqh>$T9L)Nb0B3u%qO!c^;Mk86p z1yebtl9EU6Fvh(NZc&2+;c#1&OZE=xd@E*pUAJ^sK35L5wiWdRnV#T+gOoFp@L+ z;JHx__{4sNjgpnO48;M0HA{bqX3f^+G~Jpz<_$(>jI&z~%$Tifalw&i88^fUeJ{yl ziNSDGC{~@K3^$mXAy>Xd2SXb>7V(TimN-&P`b3WytBj}38LPNv6BcAXCldvw7$Om@ z!-9F7aG+;giw&i=ly1hUyFy5Xj^KUEK+hS98DoZcA}$8b{c}VNDUuTf21y?*fj}25 z8+Xn$yTyrq6GK5z-M;ZH2jKts!mw!tQT9~E^1Z{z`EJBvWeHx>hZ(i^Ty~x({RJ^7 zZsvm4MW#kG$%lNty<(gXId?Tm2u5SA2RJ zc($|LEF!0^=e?=sx)er?qC+L#;PF zUkzCFkq2{!hy@~Q9@UQjc=?#4?3hvoe(P4oNuTkk-m(a5-W$(3FeMjl;IJqNF_l1l zwn=3-E5G8b9&oobL=pL5c*0rZxshk=!h+h55Eg1{xac~bH=2{maDOUsvEhsJ9@T4h zHZsp0zo0@TvV+g-C3j4Y z5E+QX|DR+zPwY8g)fIw6PAbo>gB@3^0bg8|Jl=YQ6trG2$>8T#C1n2WkhK+;mMsZT+_@Masx?s_T zRM6E%e)^<9c*)@`@N^?shaIPF#T(NcBLpi8U7ZtzMv1^A>-oG^KrefK|AyggtNC&_=W#7iL^i!(nh81& zHVWPvlzcd`7-d-p$HA28DL=FpH4$W5ee1y8pPO6IS*o&usoh=`UYnXbrG#a=2WVuDmip)e{V)5$q@p~vm(jRkw+!(L1pl_OOW zAuk5nUa)zSo0!~ey~lx+*_c6o2!fXbeCd15&@w~oY%g34rJPcTY2t;7RM#7#CDREZ zf$JYOq^ZVof z@GCz0*;iPS+nbzj1wLTN2qG|0b41WYnbw~mh$7Ir4ZUy3b+tdj4{Y2Sh_q?i+Dm+} z6ncb|8KoGp^;Za?snm?RU!X{vmO3n%t9!I=lQ_G=5%;WP7kg}|2|keNDlwO%Bv(Un zMre`aMURph2kH%)xWLC$mA!RWc;OKea#51Yaf%{@K&d8#y|^o4$;zHH&ri6Smt6K2 z)NV&5$K<+T?Jm-_gG%TvB3fl=rIAR6@`$ayL}{}>j_;5%U7NWcrR=Ow+$d(8Vd;}wXb_Cm6eBWZG{~rs5+agC zTw=($VdfMRbY8Lzm*ldfqay^#E^g>!i;XI6oWceXJqOv0F1EB`o2t2^V#PT+8Y(%X z4K)jY#!Q`HFr3>bSYer|LssGX<(7XaD;lw;i(3YAL=!!|6I7x^sgyy_MVYQ(A+SDT z?T(f$l{&yk!$6J_<_(XD6<7X(LRREbbL}sf=nUusApl{yozK~M z$6MdO!|(m!hy3{8{SEGXfeVf&{emmI;Lc#oZ@ zjL>YIAQw5gC^$3&-Wp_V9Bkr}^KQ$|3MN@P*}L{DDp4S0LvKqKoj@zk+at~6t)LB_ za$hOC?mdN;*Z`&SOtQ%LXYlSsqom;1n}mH5z`BoYEL`;+w=2QW7;+`JQ8;Sv_+`DK z@qxoEr*?t+lO82BpKm>%chAv8Tu25>lola!K(X*!N+pRtCxpnO_KGG3T3Uvxpmvdk z-xB4PRWqdN(*`(Gf=X*P)^SovZVdvTt)NhWrpc(C;@O$cA?gAxCp>w&J7Bjz4(IA&SV|k>~yU9o|b$*9t8qofjOJmh0ZJtSheiv>~{W zOO#Zse8;8TKqz?J*L<}4j2p#(d6wa$LrY7g)6jL-ryA_LH!Y!8T&+s(Rt0a}5-c`? zt5$GpF8GaqlEGrfm+Oq*{?Q>fZ&)U|<)IdQFjr}IC>48ORBuQnDNN|x3V%#YytN8l;{(f`YOYticax6x-7r);|ka%5;zZ#yCd(LkfIFLBs z?Rmbbc2x*Lkqs%#5G6G(bcC2d`7dGtKBRJkNFb3O(oIQ)^9{~hw9Ih9;(U+amq|#O z^1SgSZ6W>inXbsq0Eh@F6AeLS>9{y1VngqC^uDI{OFA|vohQ(2X|58mWo{7-+( zpZ?jO@t1$~pCN{nXBCF2zC)%7YSy6}yO5wNjMWDapa zuE!t)olCEg_bs`ukQgA*J$$rO`2o2elA8&R21iS-Mig?)W%~t6=47hG#~v*;V|jx= z{*(U(A>rTtFMrNVkFau!iw!n94%7iFzd#8?CNqKn8wvt}shQw{VBK3T!WCM=+Hcsg zrWcM4YeY0`;w2+9VxS7F7s*8waJQ^jyADv?%nqoOp?88TRs;g5p0S)UQ3pt6XhOq* zI%23RrfNnX9ouk?!jOxS4J)#JIgZ6MGsgsBnP!nTHuT=oh6Q6YM1;Vd;smWEMri72 zU!IPsm=R*+v_D0}l5MzV>C*M6gW)7QqNc_HV~#M=aI31An;|wtj8aq#SlNaP_l!&r zY4NC_7^s3a1h#R-&7$DEvxq3UX%3N8)NxIzhTO>RFfnstP=uIFgg*9kGPNS=Xi!pc zpibz#ql+D8-_qBHo}p1J>?NBJ$ip}dLkh5Xmf;#=q|ifz%F$#DM1@9EF`|$K2WEtp zidDEKmzp3HOJ`|A&j%mg=J)^Thb;foPq^8fGS3yCt|0np$_nmkO4IUiQzlNcY#7S} zw%)U{sZ5~ta8v|Z8lKylsR_I}7_jUV7p*0i83hAs4~Kb4E^;P%NTqUeA^B=;=mV_W zid$t#9U@Xd?_rk1JCjJ`6;XJ8y}hCjn%-5cdP6C4Zsmecw>{77l9Rk3M2S>2l`3eQ zqSOKxB$+ncEpw(>&i#Sss1Q8bXx4qAR#a3JN>Zqd!=m80@PwG6PghM~RTr##D0RZ3 zWsF|doLir=<9#{BUOYVy#&Bf}=9<=7+a9 zdSlM=FMh_TbR68!JbX4JxQtoh_~=q`yRvjna=IvpF-N0Oln8QaC&&TO3m&gM))_8$ zfuCQs{9vrOT}W(DTz8V+JZia9I}ve?=QJt=lt!ZW1bo2x#ZrVV_s+sl3e6G-{m~qD8@!H%>-I1 zzBkDjWs>vOFfftt-s<@5GsVYG6kDt4Tx6(ntjov{oUIg1tGGM#ymM^%_2m$P;JlUG z8yE`dx%Z=Yx%=VE%&+>Fe_#B(e*3l|w{J!jUKdcW4@-RmQ$}(vl`4tR;zQ4?%n(LhF<%_0c6dga9E^`GJuIK3eRa z@{pTEFtDLbNG(_6{GR3dT0sc#T5j3DT1O#M``ruY_iHhT7c2Y!=k&h5*-2vg!Gkwm zI=`R&{2HwbUQ`52sc8M0Hf+)+L?(``r6$+Iy({i1RE5$RN-1(ZOvo)gz=odMFYv*V z=@GTN!ibzwC6_!C12)|$g_$JmoQUbbpu9Y^LE5d~v4B1lOSYk~-f7_c#7FxcqX`5nEexovLKxgHmKtZ30PFcNc~ zxl>$dF_diLnx}Sw3xQk?sYF4g6}!;0h?msil1h&7K_o#=Po)OLVDKUdJ-f#Oj^qq4 z0wxw%S|-dXREAPLZ3r~pGn8{OQ8CdIZWj|`5ae=z^MX}KsJ}pD#dCIHLraaJNOTdE z6O_lulChfMBUGw{s1nMJYe4e6tEt77i7L2s8=BZrsWA>m77M0oh>HOsQ@+v%LkxzD zj7gr}!!nW6he!~D8`%siYNCK`tZAvy5r%Stmpub9qYgd2=*TIM(U8T8i5~F8J_C_4 z;uxYp%A7)0XceeK!#(p36E!6xXAw^6*pbPCmW~aVNFmsSH7+>Da?YYpuSZYKgC8F9 zU;g3m@Z%i|G zOCD51hH5|nR=r?sH1{e&=M!_>NX>QUII~N_-nlnA36D316-PyC!F$?m8R-lu1dTV0 zv*hRsa+FZWC^@YKwS%)pAuuS_bEnME!tmXR=JRbw?IPEG&(3#@GX*idmtQu5gIut& zX(0Tt5WF=Mlq%&g7hRwao_9tPK|~16I1}u8%gTBxrMd2)_k!9wx)@p678eS}Mq{Jr zX61RbYiT`*sJJzN*5#C15u#w#dv*@SCQD+fi|m}kMNjJ$xzZ>kxfJyDT!$yj^_*N( zeBPWhmIusCjzDl!NR}Nu-Z`X%NtSb`GTbNwi)+DWYl#u5q@wnclTtC(g33tlQ~@Pm zZ56rH2-)+zZIR^cY;vF*CpauLv&{3>4{kHNcgQdQ&!6+ti<0LH#aGJ|du(0CWh?N} zaM?!2MZ~!@Co?YskJ~MWx#azkr|%2ymXdpeK+{)T)tVSHYL`ra+g0ky^xp7`>w?ES zPwP#3AIV@|1=fwCc7{?Zwhl6>SayQbM)K~&6Fh9AX1P+_s9HW=t;waJ3-F_Rfsrnm z8N-LBi4%y?(s zknbF;*05+izg#vPWSWyv!JKg-bjXt9BGDlRnP=-fM+H3ED!$yLf&Oh*aHrHvGB|G} zfyh6*v*oK*#*-_!(JmRkQLxwretorNss~&&8D-J)JNG*N{jVcuTghRjm}L?r1hx~b zTEhpkp5sDrb7t{2BUh1e?zm_)Z;d@)ZgS4MWX1g5V}ofTcdLMlf=UUNHt=8^ka)gY zWmHPDXfh76z(55CIv^-nw1V?a@b2%u%UeH8&aZyK0eE%$$N0T39L9erBJoYf@S9Gq zAcnYS)$T>{i-^MC8^YqX$JkdI3;j}@NTHEBHJb<_>0N{K4(D1rp@{qP0E8n1kJ9=j z_RI&15CWqMLQDxo2n_QBw9IJx%|4_r_6}~uhmH_?j2fY25?4xSh+tiVka{ns_f^Sw zl+YB}gj|G(Dm{u|eW5O~?+VoTj$<3mTMwglmj>`50G zY^aF@oNo|OU_=so85LW9^{RM;0vQ#a7AXb2-_rCOq&6s#)B5GUeplmtL@3ZmZ0HHm zGtZ6*qNnzWh!F5-QPA2mdcME~$E3W8AYwy8Iyv7W7!hSdjDn1c=v^Ajkp&{A_Bk&* zRFnh(O;{pgLYw*6bKX7VmU)jzL?Y3$V%a@T4Q{c;$B>Y2CXj@74&Vnd)0uIix4ucPAJ0Zi@{qMy6s8D+HVV zoLpC!SfGL-3PqXCFtXYY-lglW%BSgi6%rp?y0GIw9&_QoN>^Ew1n~?-LGK1^SJnW254-b2OqyMa4i!hN8e?G10IJ&yd10FmqP6 z$Hl-v4%oO0R(8WZbBGl!OMiuq1I8j}>sLr6F_hfUw+J!dV?=3B^ckhnNSU15OpF z)Lmp0ND=7lhLwPf0Sk9dA9ieg%}^XMRAWk!qC--79G**`}_#Y|1<#g-5vBRN6>b3JBcW?Xfa5Z7oTQaCCxLJG++6V%NWN)+VTEjr~0 zp<}@c?K2!b*YN`F3zpqE*ZvU)#eJ^*DPz*`?H3`C134&4suPWJW4d@9YjJFI!SEm+w=o=L2W zoOe9}hn8@oln@P%>YA&z|pxW9G`77KG{iH+jCIBM`sHE#@ietsc1ee5{YCC_U?UTQ`< zB7$Vs1)lCQPO6OW&I3=ENvAxk16!+j-i*0B2ppF!wUa#DN^Vxrc*UQeLN97Q7^M@B zPI%sXOY!ilhz$*AP0p2V>4W5;(5zj@${KV5^046gVPMf|c23alo$90fmB!Iuk2`!b z((ujOw^0GUEnE7!;C&rsCSJOLN!Yp99^;$G^I!jbYIw86Xi|n&XKCT*u@Dj;97<}ucWF>f6)35RzQ>0^zjujqT_q56CXN6AAOJ~3K~$XC3z|c; zXqBhu_vu(&2r#OAnIrbmlc_-x^1)+6PiZEpsZ7XJQR^*QmsDbg(m9Ifu&<^S7)m+A zE6dhh(7T2Z)0RZawDWX6tyc)VbidQ`>^BuJ*OITrG7#$JaNgIQ`qzgSzZr{o{olTh zwd>*r8){rg$NEu3tZ&G5a!EJc1zP6Z$lqdNpCnf?NIJj8sB)iWmT8>`L=h?VgadBk z@$BM)q8y=P!7f}Ogd*SL?E)#uA<-C>QK}h1WN0ep`VcK-VzN^3R`DCu?C5<@Pea|W z7^pcCi4!eTIb)QSJhc~eatBf1qhn_`O!N_%8u83O#JPq+en<>8xttKA$B2?Ptms3B zlo}%kSl=$vxL}0j~Zm^7o>-dC#po{}H{)|Enk*pnv~-9wB_*Ld$*X4xSEjmlGY-NlwVT<$kvq^W=3aFE?6 zib(A))1-aqu)V^d>B2d8^}9^-l9g?FVjm_%o-D~JIWfn$5IMKcs6>H{8Ov~vjs+G& z$q*?uD<>F>5x4Up3XO{cwBFFi6;&9JDZzJ!-{-RHaUrp?yV&6fXj!o7PjT4^xQt8; zX~LSQa^A`B@T6S;u=SVpvEycbi(7fdd2__95cDCU?Ud zkK0Rv&=jIdIowJRB7EK~=)7auYqT0LD+Goimu(A+L_7_&b~ z>uAVC#;5(S8T5usP8q0*D|^YZe@G?g9Ow60b%wdRi;_7;NJKQu%#^eKjMiT43lZ^BtvD{J-Zl#EgM4Ty`~!?h@h09HkeuU zzg!EZIXvo~(*?_b87F1V%|a3)OpNA!mGSn(@@Si)BQ6+LwnG82P(0rC^ggxbm9pZr zmJ}+ItH`1?L?5WF;y}&0>|#Q1m@QM#avieC@*~(rh;T-mQ#VlfcJU8J{jC zfAoFJr=KXK88Wz`m~|;4Fg1#%734Z_R9bTBc-~lST%zg$$EM(U8z{Bp_-@br+loK^ z>m9eMDIUwE58NKL{Mpkhs;1!nV1&p5zy0=_vx|xcCy`%1Eja6X4xQnzuPj}C#{clW zd;HB$0zpPPuQ<#jpFB1Ultqc0rL_!nYA%~;!C|QhF|`9Od&yxAKYKPJWS+Y3DGhve zZn!g0tX$;GOLFD8u5&bj9~?FO=hxj#t z^lbw9+Xkp!{QYf_i2eQJD+0rdeCpSQ+3R2P&5wCG=+@dTF$VPhmos}3M43&{ud)46sZU9fm8@`Y>cdqU`N@Nz&YQ&k#n z6Dcbi@N}$G=GfaraR6GsqmmOQ*=;7~4w1kvT(Iq*QRpeTsvw`!C{5k1_M`_3@kQi8 zy>xhA6nmtM@C)(lUR&cNcwfXHzE0qHeS`8k>EU(P_?yn~-q8+@U4NBa-Oy8-G1j#R zE%DJ}RE`gUCe~@e&agm6(GFSkYr*#j<%q zsmDmEu)aphjNrEvW`ZJT<hx7nFX8 zBe6L%HK2_vVwCLI(8V6>8k9(3IVCi?%#c*+cwIba8?Vu%{P4isCj>zy4Yk1fbCiFC1^`|J2 zGnHe6jHqZRM3nl(SMY^u^mUJ2IJ(se)ntfSq#`B4@}HB_wU=IO`M{BbH%-kCyj_w`pz8^CqzL z*DSY&I#xmUtnXT6cQhDtQTZ5Re$%v@u+F(LQfOYTCZ4hVWGE%fhfqe zM9Cq~YL6kK5F_3n-DPD1!C5?>fiB5nP9W09NX39__k>fdp=()`jF8$hYf;HT)}DAF*kvWevK57w+B;t zNA8uw|BtRW?XfI7v-6%ko$*dFXXKnUu!>}pY)Ld_w`5s{0r`Uf{Q?;c|1|pt_=9Xf zvfHvGcUywcB*mdP6ssyL$HsdUkN?vXw zpDa9{noJ9>tjDG|2sr?SPl-FFq_7h%Z6KGD)1qMGBtB^V=1C-16Fy&AGS%_^sOEGO z`F!1?(-cc&ZXLB!{A4;t0ORb0Kdi3!<67Zxe0*p)UrW|La8ep(CUIi}HVST+hFg_m z5gMNSrsJxET02F-Ck;LJWMPI=n)1d+&f;;I+)jo@}U-0Axe>5}95bIFfR1<$>r zAmEeWqiJAjW+0|qHt*IvEP7 z2fK(k9R81p1{(v%h39o+I4NLlHI0MAoB_F&C!Wm>kl2Kl7$j{R=ol$Q*3NP50xz~O z%>a=YXM!XIGcCF4hH2``Ne;`BwHL@FIL!lJ^adA3{F|qqzc?=V^;N@7C-~RDDmhRJ zcO%G^M`QTxCgR4H_hv0+?)dzw<`+*jck`Uj+Xb1(NK&$l4K*V^zbIH=3RGuU^oCp; z7JbW$wPH^l(Km`eHHyPJ5UPsD>w?R5;@9(rS`|#j0smStUmf!KmpR`XCpHcyCenDx zb-!gL6z`7*vd2*-@KNz_lrSO?(qP@Gm16BQf}TG-kys2{FDOi6?GgtW{PMf+P_QqewXFpU7_^N0*oE! z7copA#aGC(zgr}K^Yg#?IEt7C8v{U05f|*vmf!$M>sHj+l+uickd3$dFTTSOYXb5a>X=m^o$yM*-}DFYvS5(D2cQx!ID zvA*54%Z-r2;6hIyx2)4SIu#URN+D+iCp1!Iy9s!|TcR*b zg5Rw9+jWIEB#U92@uuM7-Szyo?cpm6%6Dxj$A2qGzV=;o0Eimc}i1{_mOs$)3K=%l1=TXwlLTQW}6EX;}A(JS56J zmT|yu9p(>6NU~I7<%TFME^QD}acmx8MbA23;Ki0ojB!M^!S5&-mOibKNugD>TeI=5 zexaimTZAaM4$m=^Y+2*OQv_x~ z2kHb;gNuU~yr(CKHB#h=)FBcyK@mh8`q`6YVk%10bJIqiY+f-oGuC27Yd5GMFtNrY zv)hcW@zIbmrWQ3?8a8n`aI=MDPo3~0J|+ZDE%zzqWN1tao>~_KKXk~=-D{K(Ow%#j z;3%a5>A6XfQVt}HRcNVY#tZ)i6L|-n4331Buw}z0F4&5eB2{FmLJEUrMJEK3jCp9V zF|bWn9HkEtNfLnrb3&;Gp}q(k*50xV7bsycsv;9(YWC3xF5?x-!AOocFcaDkxpX&N zwu!OMxLw@harcBi1p3sFlas{~M*?A($|>vE5t3pePZ`OQN=r5|(1aB&*E_cGaEp*a z?M_7`Cxzl=*RpUe+hFm)v!4DJj#TxF^8A}UF!iVEA3u{QgRUi1l1kN`GOUu1U&%Dq1{lar#Y95w{ z7^yfZ95-EH6C$P3oaTop3AH}q#6+StEJQ+yL>D3#ZQ#63j10VJ6}8fsE#%U1);3Jk z7%daeo0g(zP%>w0Be@Z5-EjOr$tBMk&z{Nocsc~t{&<;KwKGx-Jg5Ve5M(BB-R7J% zga5LN0{xZop)4c3jUUe->M0`~26@q&zXKrh(?|C#5d^k@0aiK^WMkdh+&p}~05|T-- znYV&jZaK&zznN!T_kxc{hX^FgUhu<7#X;7QsYDqxR%oswjD+C*a>PG1n;-Y$fTok!LqKrINJ4Q|FpLJIR?9J^%XC#8Fmq;ac{B;ZEk6REdp%Po9lf zIr!neqfm|~YfTdjb!E721)t6(cPE-{3|zE|+ozFRN0FD;lFkPn)RKSnp5oWf1*M8y zcbaJ?sFmc@+~ZY9yjsJZEaNyADB-zYfXp-6G&lj?J8UqjWbP}@S}>H{*|*#~s`%tg z^LQyasUmmo^&CzUe|TB)*)zqxoubi@z9x0PJ=uL%(EeS|d-v_z6XFDjw|>+&;x`Rv z*a%4BozL~=weNoXL4YR$f*hp`MrH^GmMIFXAHrWTdTi*B$>V%WW-4-34Zc}CQt4e7 zC?6(bxM6y$YqTubaD@%S#7{|s(nI4?h!&q3GBZNS2{HPiemEs!^2jt)o%U%>ZboEk zNWYKN5o58Bkp(_^eCk=Ib6S6clMN$z%uL@$t4l7opAY`wU4g_9%+tHl22V(D+TC`{ zVlf!fg;a<*?2g}TEZ*Ymc4)S5ukW`v9qC(-@%G=|z0P~;Ho2NH4`(C-rP{;B7V8^o zb3{s!Yx@{ONvZZ|-2%Z7bo;9DuEXdIDfQt0j&V5Nj$5SE^l^iCmWeqbr=}MhN~X-a z=P0Rn8w0x&)&;%W5TYOWx3V8XQK2JOBT`EAv165P=-e>%Bnl?^9X9^_tyg>LFOV^j zq^FOT6ZL>DGzcV~o|FV6Sj96=^}_)Kp8_s*crhlEHCO&~QUs$*q%?zY2?3)D=KdKe z1%hzYY7c?IC6B}t!q7~Y%Zwv#vz0XkHIq!xdcit0v^yW>sriUn7MzDOe2iGpVMK}D zRU~^_j4tW?8beO0_c*FP*oEXQAy|wq22ZhHk*P7rohtCt$g29wDOQJU!<~9dRA50L``SS;cFP}3qBq_WbFgNXC&2e>Cf1P8z%CQN98?ACHZpIb7XFDq)*uRj!V1Zs((iA ztASZd7)T+?@X9^m!sp!4cfe=E1R2T3IrdD>WxpU27^{jdbu?+id)W~eeMC|)VuThY zU2N#X0;w~O)os@4hO_>hty*(izt4q#%}gH*HQp%i$hWpt^xaiTQ;l%8* zZz`UrEz&BQxJJp0wRdb?AXjkd=bZbFkUAogKD9LQ1{DpJI;2T+j3~Gc*B}%!!79DP zr$bgANFL8X!C8d`Gqq113S11#;{_ve$nVy#kRmWrvt1se!>EEp;{E(RNQx_ejh2Fx zEPa||nKsX*rxV{QPCcapgEHdwzE#nVE`A^$4-S$O%5evR7<+&C)G# z$)a^h6MD8{!4{b4`^1!pQ8CLR=lz!Zw==qJLA6cn=aJv7=M-W}Oo@;(ra7#v`W3?mZ{>V#6B>u$^6+`QmneTxuB*wiy-%+?A%x%Aw) zHESCOT1gSG$uic0<2>hg%Z@+a6T>lgCppkH2ZiS0VaH-^`2A|aRaE?wW6kl%aTsi)`Bf-;aMP)2sHj(d71+7zP)saL=t`y@ev2ls(-ea-2WSPnd zGm|(j5>Gdlqrx!LieFy$OpV~6P&7f1Q*wWlvvnP7mr$V?u#qzIc{Aro9&%b4Y6DA` z@F8+qX<|@Z^+Vt)lOx`*GhR22%4ED>S7=#r)q9>a8&0wV4vnGElIQI!KAP?`w;B5; z<6c=Z8F`Lpp5Hxn!wqtBiY65~#*X;eQOo~#uK2@dMM`jxPqE4K*H30BA=oPytXt2Y%?#IPp5MIA zh7e+E_-N+&ay8-MsKwQuFQ1l7W#liux8j#)ioba_!A2-dpK%ujz&4dYkj7mEm@6nG}}gM$-d(w>vq#kW zkn;|VkSuM&Xvu?0P-#yS6OSf}{o9V$ml+2p=V9%cJ58MlluBd-zPw5Fthsc7$1B5r zR&aj=FPe;vgZ(^FX-#E%4okt1wSm*h@x?k2Q_iB-{MjwZr<-eh&@>xxN>FE-gAyc>Up}r# z2yRbX_9yo3eP%~{_@*%8JKq1U$KIX#e)Guujc9uJkzMPXRJ#CI_}25jEwsOSrBuZZ z5tgClaCGuU086RRG9x!NX~%30(Ua+t(u@gd2>!f5h~;{W*@@!89~{VaO_@zVB#h49 zmPTZ{02nHVcVB9zONx9%oy{n-DU~@S*CVvbha+iykEysrEoQ`MY2u0?Jrgx!!Vy9k z+PI*NH?(0faDnwOsd&r##WHLbh6-jOG`y)@esj&ln;@Zsea+Y$vzOmvrcV$EHvWom_=o7Ol#R5h1s#!MWcg(i^L zq$@H}b13hyr}x>WmKZ$}jZ~U0t#gv0s`CS@$#839jWrjUk0ljtorbo6eRfOm02 zNS;zn5mDe=%f#HF4LAJD|M_1~sFHv6zy3!mCd|~BYro)?drm1POw|cWDn_yz&7?g%2|{JP@fP)Lz{Z6(-;GSj7kea9DWB*vpYKqhu~D*=2m{p%C3khQmZK=Rr0EP z&Ve~5lNl~`m{hXxEtL*Ct3VB+;k;k6N^_2~BgQ&stOYh|wqgyzQka4V`4P9W6C@=> zO=%pp&bV-w9A^_oYDDM3M$1SU_RS29CL{+b=T*N(E6WG<5w)1I^jjuc<7Gsql1xJ( zORTVHN^Vybduqx_cEHkGTCrdou9(OPM$~8_b{jK@60%qg$ThKtO)X71rW(F7*QaSAp%xxIa0UC;~rWV9{uEqfBMh<9>4iF{}(U5-0*Un zV|~t4OU|}}KW%S#X_uVlBc$x{p=RS9xe|Q3x?t`SLiNn#l#llWnN;lMkvrKQeKd@+ z21mznUUFI}lFLY5G4B!=*7KujM(1;sRAgDpS=&)%mX*~+5~JMm<+j5SxIZ$iyOQTC z#k0*YNpr$+k{dpFxE)9U-9A3#JnQD%%12yuf!j5#&AuuZuA|f>yqEBpEAh|AB+l?@tWVR z&N(fP`RTFb!>MAX1|IB-re)zGM|sXvXLuZ~_uLvOPRhV1i=JE+luEJnk;A;8kDj}= z=QryHEd`d2JuMmQ5sj7XWf36~l~P>UfS}~j$ny7pc8WZ?#V`KVC)850^_r_++RG!#2RHdBIsDsV0FJjpJ&YGc$0$^(<}1 zhsTae!Ec|BS#|}7xnXQHvrG|^HNI!ZI4)Fb*`Lf(ZAA0D`7DJt_L3#VtvM)Fcr=I8sVGaUOQ-8{S$LhDm$kO(pVI*Ws&m{rdI! zuD^?K{R@DLEn9a3JDNp^9a3f(Ss^n)5Q)yu0WsjTk|Cu)?>AIxHu#{$;1#X12|{XY z9KwlhS`xiQ$N}jSq8*45I0`ia425BS7+3%RAOJ~3K~x^m#055Nd2atmu4@WCq0SDm zewfmh=77du4L?ySVN^z?rjSdNEa=mQgz8zXi4Xk{2qi-y zS)^A)A__xo4u?sjRMgC9+0e70HismJ+UHUisFb7P9P1ml@ro+jC!}u2%vMBQVpKsJ zR%E+Pq@%&8f}9#Dv*G6@IW&e!4o9J-oRH$cj0_2K#>~{1dAuT|NQi<<_moOauyKph zGaBFXuz1KkG~_O$kCB8#hk~g*AjY92WTGnir0GP%D|^Y2IOQatvL`ZL^jA0nV-64^ zGLj=afkldxspQ;0XJ1dS37*>5zgc(u7>* zTs0j|SSsn5$}u$tr5%-+vhtSec!raaQk1(}L*iH;F?XxMYma2AN|tfOo;apqMT&6c zo>NiNq#H7slQTuih!JXs(30eGpI-E=#fp77<22g?;J;i7UTxvt2xdjkf4+!JRmPDT zlZhE~s~BaHA5T5co1FWVCzr=8`os@M6P6Z2@)R=T(CqVK8}TW!_L9@GWNadR3fy#x zdFR>CkTFmPw1i9~J~(Xn(<*Sa7L-yl4u+9YoD`Ca&U4dye9YN|mO#&)dV->0Vg#Rj zG2;0)aJSZMyyUj1Y5ia^*!ZDl*2s((UBlJ|9^^A}VK}Y=clTS`jp4F0+{q94XuqL# zia)Lt8>iXC;r^Xc!nqP}GydPFFwG^G?uOTWig$fU$F@V zC)o_66q$_VDzbKwv4Q)wp*E4$c`7Z*rQ&zXoGZKKes#>&2cB#^UCJp`&8@w$?WORxY zP|X~*GkovJ^W&opzk8gMiHa(7e14O2t4=hloF9w=FV=#9;@L9eL7AbY{R|!P#|9Wh|A{1Sy#2u(TbG zD;OC-W>^t13a-0|i%{!~AB~TAy41Wswk%&*L~wk1X}IhKKiL;7tmM(yv#}cMGWN5? zqdHKkV;2J*-@oSxYt7n9ZWW#{mWE&dUhqHO7Bo)rVyn>EiiyrB zRfZt2u%J|+uO%mqrZk=>OL$Za*tefmo>!fvR28j<$|PE=L3Vs`W5_A#^PaKF=sUyB zXIp+Wnc-t%R7k$uKz*8V?}2)&W%SpD(RZM&fN%SFzsonuJCDV0Jm;&AH#DUU6HAf4 z={0>XHd)nPJ%*X;C`vCyY zK#(ZS6d1xo*Zv6tMWy!`XSW87RA|}si?=IfREZ}M*pgBr(j($m8xHZ#^-4P~so<+h z=Qkc}c9pqrLU3RAcfWfr-(CARZ{I|x4eZ)njq$NTkx|G!q8Qroq|_J%%kITn`a{#- z46o4@Nk&xC=$Mhr5Xf_}qtFxbG@*|x90RwwG!rsW5rtzcj!AER9%91iVn@V?_~CFKnP95vJ6W^l*rw$k4%~%ENg#`3mq8+9!Ve8 zTw_5(<5v`NN}skAa*7vExCzL!m|t5uGYr=utA`$UGoPNfU4A zd_$ooWO9V1efzx#mE*$=9^T^#~CK)BH9~xW@X1&JT8mm^x~;Pf8Jq8Uk2S z(#JKqnjn;9lg#0U0>AD@rY}F`~#RbxtL7Qp^ak z!%)+t9xW@jQJ_)G)FBeVRd1O{i;82U47(s*hYb-)%_^?gR}+$m%5I1^U>M%Mm^@)N&-n$Y_qlU9S9^B2OI2G1pEKg(JnpSWakt zi%p)Mh8P`fSd#04N*UhV-qvwNz;o3ZF8atvBg1~CdC^-6laNU>)gwM#c>vNtEx*=o zxJS*MXO<t6C6ftzSR!K#(~;-cD3z6Nh?L~z~@)6v2hw8$8n#0Qhe z?-yf!JgGTr;j+mPM3#Nx;aKown@DIr9v@LixK%j*{kg}5idmlUNIu_q-X9fQbuiYN zm=x=jFhcT^nd7F*SlWh-bsXh2t6(t_Tui)ZB3fu}6(yC*sdS_d1AgrBrsk?sEN$R} zspc@x`P=Ii@6|JA+R(+o(hbEVSt=0-9@LT-TZtm#ljBvJ@G)^zXkN62*23ELob?-A zFkG~T6eD}tFdVz-B}E>vq2QtkoOL}LCs{j98wK4)Gt!WuSzifi6_{qj@%z$CnssD9 zgFQ2F{41R}t{X-|A~775mM%#Cuz;Jb;yTw@A9%Ty+#d_pE^%W!ln@*mO`!z)dElT( zJU_3v+!`K~6}6FAFHjOfifpVvCC$cbYLmE_D-L8r7Y?l=|D|2?qlf!6OITV%ttBxD zS}!^8GNO<)&T~*&_HwveN5+|>9$UVD*YfFE%^w$vn@*z&*!BfVDtIs>IPgX0|uHYv##TZFHA2iaRwmk>BW$w$8vnY5 zLA*2d{kB~C=4-yow+nA4xw|@Eh&w^{O^d{Lyas`g1|<&`VY^2 zV|(~_fAzQg=GUJgg+R&S2J6z$_!VO$2up}Po!fwjm|Zn=Oo3cYKtiFXD4Ef^HCZev z)s!aOAOtja2(RU)9**tB{;s59C=a=^j|YOquBbw*j9ia#X^jnAd}@)x5Mxg+D`wdd zE?EjWp^h^w4Sifu$vu?HaIqtiFtQ-YL=piZ665@YeQ}#0BbLp8yc0uxbIyoDW>m}w zNQ}&Pj_DzoI2EUO95$>e^$a5mqHuI!H6WJ+RM{RynxaKUrS_3R5bzjP1A){0E_-^P zS}ShcHEmepVn@bgpi_{u;cD0>rokTLlO;q)X{OZKj3zEvyGtaA)-Lc^eB5HF5pp1e zXbMU>!z4o&H}rl>t@mltH6b}{*bb#3(W7O7kb*)ELCu0O*WFWm?1$=bhK4#DwkVbJ zFaPqNlgXU_{eSsew!xATRHEX&Y7djLfqozi>)23;9F;PPSW$?YQr1HWgI{1|MyYbP zzF{6-?ZSE)M|z);83Irbs+_Z~8#Yw3<3LY$1rC;N++st^p*$fMC9BvFBXkUemnP2H z7q{qTM@U1mIwm-L)2t>HQb-g-8{FJqahe@+x2(Ca4L${8G}z?GrDmSa@CX)Z@SmzQ zkV3}V;DcfwUk^b0tfq^OoD!b`j+Vo`!X`&8CJ+aXc}#-CY(|$nWg1gyL#Z^^;TkVG zj@1FBED%ZZ-e{kZs<0uVghUIC670whn$`yvToQz*kU0nWh_RYbP(cz@Dq|V9bji`B zjxIDbEU;|xv8T{AOU`$`&>5wuSf({i+Mr}a#u66;wXDg=Q7OkKk4p|GdLYq?pl+ej-2UM)|LC9nh`;*ZenoiQa$G0`LvZV&RlM#T$5n)=u|cp3LqP3SyP;*v z>)w%xoYP7$w;qua55_%G1gx*whQOI!QHe2{F)m2<@(htAN=T+zVqr6`JH=HeF%sIC zC^dXIHY{!A@z!#bOCC4p{9tO3GN)8InMyosH@s>G5j{4N+Q4buan&gnR`9yt5Ta&k zG)j10*>;#>>6W#J$J-5;n-Ld{;r_Je%aunAv}Mk^j*EVULUU9qq)d1cF~Xf{fcamx zhTE0K2+c?fzBjdewsH8FF*2Ulz2m@?+%6PS44Z@|4EO)LwWQKHbL+WqJrDTz!OJ}R^hth}QT6@7pv4prU<`4F1@`OSh?jpm>j0+!P}an`N*eC?T3pf(@s}Y;j@#rVF_}TaG@$%pP znrSYXTgiDR+4zV{kyrMLv(|H1K&1?K%8Yq0Y5O72zF#Ipt+-xB{&<~mN%7OumW>S@ z=7Kg1BIT}@kf^T_Z%e(V#Zacxpaa*|FGrd%bE<>hL)3};6)R-@t%Kh==kGYbJHb4 zfFI5xg-TRWQky{6N&fbH#Z|xI&u0}2D|p!o3YBr35B1o0M~eMiQYgXmE5)xa1th~H zgT@Q~+lMViXbL1B%v#R284Ifi4?*(W8AbLo!S63&q)Kj;nmwa9yw&q^nbSE%N(v2Xx*8Mr7`~aCaV?Pu&1j{HpMym`h^TFGOrcg6%*s$(i z6Y%J~V44moQ_m(`qLo5KN$`$VEE&sNXtiY|4=KdpUG}K~6j?0jX$Ma$9sv}hV!gAn zEW2kYm9wAS$0&*Q4V9TP7Do^QMh%{EBFw{cLi9|{35BQ_s|ikkPo9nqn|Osxg51oI zXb!~@V_6{t^z>9}kBPiJG|W+O86FS5KT4#EXfarA3dVHVmOzgWfzEGGLZeiH^&K%; zGBu+0ONfzNPX-%{zt{zmpfqDd0*zs9ud$&6qZpYZq!^gfE_Spm$;60Rb&n7SWKHka zWT~P{4bfXj;8M?-eZ{^$AX8gpDtX>Gd`j4~CKFJI2~F7W+I`8Pe!y5xc-5aVHG3SZ zQ`TRW6a+&(!g+VO^Xm=Qe&yTYyec)^ z_{A>lrtrxVq69Q4d9qZpXO0oFd5c;zYWVNyPPRv@F8TfT2BR!9G2`L5AhwA=uGeT5 z9O=6}`l#izPYW_Pf>g0dH@FmdvKA!0z>#y_jyM@7T4%ZLx7;gge2hGtbv%rowe1-R z!-W-$MwTb9B^#Fz($hOlDN2s=iZ*zjH9eK1$RotlXk(4t#P8m zCBayi+^ZcQOeBAO(V-}5eL@Sxo(3%gx3dW!RtNYbfWXp08w9yb7%7=%l8K$575wN_ zQwqoVO~6LU5B4pGS>TIR$!~w9`FJY0UmCU{@%dbH-57c=sg1)1!9gL(b<1^^v$PI` zC$HDsm4{4B;;&zf=z&usS&woYfd?kAupYk=T=s#d+r*DPC}5r_Mv`ux^XZk~tQGV) z{_#(H($5Q8A342mc>Y|Vz2Nin0{8zD_GUklooAZg@7vBETkciGs$y{yDXB?G-80i} z4?N?+xa~Xp)<6&1@L%Hp!+-$;2E5!D@XY{Y4?N>;yBBr0D5)ulqDWSewd5LmobB6u zaiSI$+4d=9W@JQWMB#jqIPdqo&-;5Uv|e#1&slCQu6KxJ!yQ%fWb3%;0{`hdEuTLd zv$KY!i`+R0oEgjglb)Y_9r*Mr=XM^by``#Jj=bT^bxrSv4wMU#KR9$eS}M+WjKf8AX2;Vs2sUh(-taoNv#KT8O$Mk_slZE1D690g|VFFid>~+>g9Zs{Dcz(QEAiczBeK$4Y^;qPAbvhJjacIj&&$o|P)8B^IlR_dGB5ZrNrSlsSJzTWg zZi^BUkt*_J-FOtf^ryJ_MY6vg{U|1iC!4XtX>ldVQg3fY!wu3rrcPMEnk}0{F z(AqVft%+<9fb|`z86g-{3M~yvRy1~rb={zzxg8=7__D;5Op-G)HKsBr`1q8z->`8l z#-t=FL(2r~4PA`5u*Lg&2(}OdYj}vF?RaiWP%>qRTy}fKH@^IcuiHt!i`?<*dc9f% zzqCC(-v}-3Q#Sntpod4K7;GV7ht|np10islJ-}jF+beAB2-2aP1R;r0AViB39ZC(z zxl{8Nt!SAghjeVnnKBXwti%;u z)37BE2P7dQkt1BJ>E()C9FXf7I;Nx)=%k=`EnTd!(vvem$rLRDW6D8s_JWQLH}aaT zEe9xCSmJ!ofx1UR#V*dt#Dt!fmAyd6ltLZQ*cH9n4MYM_VB>zUQ-B9K8=T zvZIg@8y$ubGhH!?DT}y71w-c}r8s0KmMr59wOX->mRstG;8L9EQG1+aFIs}MRHneX zF^SMT7kySrj%<+alzNguAhTq-(28|SH<<|NM$uHa$cag^pm2T@DBmt?6Vi2@fv2uI0;O)ojGa~?Kpe%oGi zB=57cg5%r}ooC}M>rkUqgAx@(I8;@22nSkvBUu=7f(l`%0 zUsLFojjb@s@%;mb_X#KI@SgbPs^|No31!l8(HV58Slhs^_uMWZ6M@C{jFGVt!Lgv4ikwDiaM^in;g>l$Sif(2)|nfR@T$|9fi!ujiY1B zo%EQ41WGl8B#d**hd*q&JU4uLuJHj{r#Q?K{_wpURIK>hM;Y^-=Yy#u;5gqI`T*aX zwmjY{zF0?23(NVY!7v$8Aq`x$199hWF=6KfXSwFyQNwQ*C2a>q;z+ILYAd+d1b%cV zc`I%C^%F_1Eq`^h;9gPj{(;8H32%>ULXmN?lYG4FNeH}m;P}74NZ8uIERigqIPM$^ zio)`XPczQD9V!|=IMAG(IUYYr`2GZ*Z30>iY~a-{V^a&JsV6T3@680M@LXOd{P~wT zi>BqKiyUN{kM6eEon`h+u=K!)Omg-> zvHW{WrV>883Vb-~n57AKvLhUxr#p*@8H?WvWH0DKVEW#a@9iAFd>nauG@-~f_r@!J z`Z&WFi3ozvR)O9{cAJ9I2%c^eIuEUvTy`x_x|Waj6^{P4+WKlS^lBmXU5es!-SE47 zzpsx=i5I^X@jPeJzGVJ>`+2_c3_{?1kJp|M{61kV10jcCfW2M7`Hr2t8Zcyg%$M3* z1A1HI;t&%E5$C(T19oVsZ|xNut{Ek#Xq6L`!|$itr8F3sk()6BvG<$yd&`3&#K5Y4 zMhG6M62ji^E2SAQYWqQ<-Zw0}Cn$MEs!OW$7^4cBy|Ua1wO6JULTW@bfZyZJJW_=H zSr2;)Ng%|)bN});H!0z@Rd`j!eSJM&KfXCA_j09&uZhY~LDR8glHNv(oSnZKVxdJu zi2@lTiQ3Zp2JbD#jL797k+3f+8xR?()O31KmxA>6OykUle(hiv3_K-Yzwa{qRCIMZwN_d>n`&Lr*5EE^dYj{uf2>KiADs0iV7EMmSKsD79=DD4)hVNZ<+TSf(#%em)(wcvI)49 zi*QcoHXtAq72DpE$iN~jkU?-KImLz%K6G5TCv>r9qz~|W)9-Pncw!XC`Ix_Z<`HU5 zVaEKhoFcTRi-HfPk&ovIKC_%9hEFyZOianWO7e84*!7yW3ok$_5_R3q(|OzG7+;mVL+j6Upz^9w`Jj?S@=d zTy>7g{sNo|*n~)saM=xP;6fL?d5}>%c-FLh_C%s%!I(fU45^IVE;2f=_}PuXMa8=_ zc)AT7rGm$Gj`5NZJm0I1h)8-7xSJk8fZwexA02q6g`{^A`XJa@&);7S9frYB-2(}4 z#wtF#-Lcm2WTS9aaM4F@RTd-Q@jAx{Pa#sCUO5hxq?eLg zh2>|fp7Ta>l4{;Oiu~UF1=sDEXDiL)RZ81yDkJFwbiLx|UzVt-*m=prO~d0)1^?`p zW15V3xU+n@P#hV_j}9ZN+VF1W*jmXCe5Ht^R!tr?k=dEaqI zr${MSHwCAK!>&%M^=N0Amb-TfL6}+#igRd93uPcZDe}&ca z=LoMo_74?hasQ|o27knif)O1pQfqQCLq$Cxlfs79t_Xg(@V9n5Jg*qiU_y*^t|3H+ zQ5mH=pc02DZP?f=HvKteIwO%4Nmj7wE^)r2>o<(^Q^wgXd>nxPN|=Ex5$e5(U>MO; z$_zFPGosK3q?Bknr=UVhO%oTKs2SJo z0wYQyk#U-liWy7W(8QWE{niWo+yPTs9~j9aa$!IkuH$OhjKwuw*bReQu|QLh3PHV( z3YS7~oIXI%b8VlIdBgkVTj(yO#i5jAz{w?q2s(mhh`(*U$9h34*8|U5r&t$w)Ll}F z63>`6T8zv|$&kXMWk<<`M{P?mp5xrGXdyb!IyNBoeJ&A@2$Go6%N7*{UF@jHS+D~a z7^?}n%(!wZHhzihk2pwjgcWSVhJd4_pte^`k_j!IPVC4i_uUywDg|ADqa@{VyP_AK zC;~CUGF&hgXSDvBLRB1yL#&VVeghJ!IHnQZfO3*0l7!wT*yxc_GEy_@u;f4;;)CZb zFS%&dfSHSt#`ky}6LCfqVCmS#VFNr#1%;7}jil0ohr5(KSVbN_cKshn5np;7+ADuX{e4YSu0MYIn|^^q8~UFi8|`3@rK`%7c)Wkv!ly zv(zr|%jG%mj_#6#l#@i!d5KYwsmR-7$FJro7j2+-(D_J~3koIq-Z&cuxs)K+fl>|K z#CaQeXDnD-$+H{DrngKIgN=dDR)O2)(824&FdX&vD01FNkdBeov`%r+4n&aCOw)SJ zvX5j+GtrtDB=c_AjJ`dJJgj?G{f-|Ur6j&&Wh35O9yL2OV}kJ9&Q6#Z!;N3jM2(Im zwOcbyBqw=EmDC&t!$t40K_JAyu}(<%dY3RuC0z*gL7`B5Zwy)t_%Dm#i#)B$1O0PAkVRA5Jhba+oc+*j4=ED&zeF%Nu3Ddcjw9q%ti>S;0{z zI2(hBJGSQ|syy&$q5084z~Q-SB{4#xJs+M$KD#umdc{o}`MWDg!1LZnk}8-Q%XKe# zGoNv{>XAvujg?$1bKV`-gmq+=3!Kp0n|RiZW?}?yoj5R-FRuzREtwX9lf2@(F@Qrj zkHd1=cihQF+$lUp2v)u1qHp*>3+68HtdSI25*fJEhZ#(^#Sm804!zs$R*{OHH%o<& zkmyKhEH)?(62JUTu|sS8@7X_q|>~f0G-Fc&_>izFla)*1Cti@)=$P5+tM;DUrUU6+2Q{pfMyQ z)N0EjO<8wO*|t|mnV@7cSR^EPKj6UtKnRwfI^=6$YZ#mZliFhWL!5e4Jqluf*1 z(SJQqC1jvbGjb;An9+%vR<20opfI}FV^o5Y84@v&A7YOp#fXeU^MG0}+1N{3x5amJ z5?xTH2M`0TtLf~97#%Ts1ad$}NkxqPi*q#;zv7$7vlw`FEOAJUk1xJ{bG-sDhYI71 z&);o@*}rCpZ0r?Ul{9`1fLD&jFUj<6`LB6A9nQ^zGUJwn=?>)6ow&A^M6JOO z*vmpfrc;(-MH{ztZbhmKj>H|Vq-PUXH2#KEjSw*sL!dM#*w~*0s)hFIZZkCF-_Q#!vP*Aqtal$}^W7-$_+^9H?lI0A_( zfnoTtNTx(TL|_l3R}Dy3wxy*<`-GkzM?Y{+`z`bMjFg;IX;LN8@eY^$&A@m@Bjkuc zWanE}{)R4YQQm-72x(aO4IP#y+@KvC={Go3hqT^NkkiPVC|a7(u#F3Zi)5mrbv233 znCXIL=rClIs-X3r8#kv7J5*FCsmXMKjyc|Xle zI#SuQaF#+PoSNBiex2M$ItL1+xe6;FGSN9_>4dfK>0{4S3raa=Z37WYkg#?wO;`*z z1td+($<)x1>4PE@qaksQ4W%+%gs*5rLnROCgTsk{qUXlVsl*9m4p6$Ij420+XJ<1W zcS|CJLjJJ-6dw#n<~E&b$YV(<4RU|IF*r1l$2-e=Yu& z^OmVe$&!Kia?`iezN6A9iIfB)tKL$k!)CM%icANZ*f6(&C-tCE{dx&w19z*)q1Ie= zo{=`(Iq317;o(~E-|IcMOUvi$9e+1($YsJi21#35kEZj zC>#0R)^a;Prci?G*0b|1A04J#))6lRA74u@+MdL893%-pek-tE2_%N=R&uwDe6oGa z#At3Mr$FS5QqTt2IfrG-?Q+6FTGIpf%7p8_=P)r;8oZ2*jpM4-T-chcCLvRbHd-d8 zV3PIRPXqtYQ;pJ*jkBb27@RzB1Y;xFh83y0wC@0DYGfYM|fCKXF> znHk4{%4zz*r^}REWx~7<{GaE7Te-!H4f9UXUr2f~ z7duiBNf>dIc@FZ9kFRp3hmx*~ym6~zv38_h^7cef3dL_%l65;+7P_`#X9eFsY&c&j zKE8~+TQ!tZfiZ%x?RmUaTy%lozrUt=JR&8f_mQ<7c*+0q-42xs{_0WAmEAH*I)WQ< zR2IDVgOtyIGbdLSr?Z;BeO!?%SoN9*Cq1aZuWsNV_2eT(q9m#6S*%k^qxp}1FYs(3 zdAQ2CJ=MI^HXKeQ*S_V?E&`t}kNEL}!0EOjT0@8#I}g8i((=_RXWXTHwX<}=bDAjb zWQI?l1isiw4reLe_dH$?HRo|^$h02@9hxZI@Ekv^mLu$Ske(mkr4If>{?XTee<-Y< zW6Os0vF~;WKmYEVh1yWyNvQ^9G&WqvM-T+Lm||l?6IX-hkd#8s&?3S4e(*K!QA*xB zeCP-iL%9+Rqy+>8g_u(!1R7AuEjE+rLOWrSeyu^tBB z_JeFvCd3yJ&v9>kcy*)le6_ym$9{f0eygZ`wdj3*zQ1d&hx>N!hMtyyCshL?t`c6Hkr&Q*YjX1~I7RwH!b3|0QSQ8P{VTq3pBXe>!#?vuL z?$Cw>jaxENw<(zsG>qhwoDsF$ArgrZDUO~%Pi7``p&??)Ov$dFvyK;R_2GaC3xTig zS7f@Pi(4MH8>admkw7NKG3sx(U6|98lgWz89C00PmjbEe08Esq-Lxc!S$l3U7Y-lLV5v^O&h9TP73q$P}IM)q@Y7Er<&5Pg$Ee#Qk z)&+?y5y*iwq9jtJ)MAZ`&CteXu$I(zL#j%0F~N|qZm(XPuRS@*JrHZtN8{dQ>p1*fLZ1 zkzvYNHbY`Cy2p7zp#@Vh;&E4_MTQ9}9dn|T6zYIfWVG}oj2S6SAu|rsKpPHNxs*B# z(aulpHQ!6$M8$kSr0Y(v;9p^M-sJRF-Naug?7!lq9csg#v%xU?%|99YgyZE50~ zcZ&l|oFc4X?OJ*)o(=blDSy9NvGg7xQg+cI#K88gV?RVu+kjCyh05^+HsO*)rqpqV zC6LL4d05hiEmL)e6MaA(Yi``l-~qQQ8siy@18g6uMaz||IpPjV4jYX+4CqE7BT-t4 zkkg12jhnM`ZvpV}qNeeI<1`RuhZ8xKk@y(6Xe@753L7NPx-BxMY@7ve7$us|H;NnE zFfl2?OD?;bC@p_9iJ`%6n96<;g{ zQDdSCzG{~=E@5g$OjF6m_q;!;c-FN%ej1_C7!O+;xm`qZ?Rnf-el+PQwd6@{XuP6O zg7+o`7q#T!rXbfXM+JOVZ~404FqR2_ba2W|r#Q?BWx$Luk6Uu^@m4;&Y65S)C1i4!Ub&$`Ig zMJ$ouztbQ!{P|Z6t@UKgn0IUb_|^fdc8rLg74N6xn|BC?j7MzeE* zw@knY`0Y)}zkjqr34`_}|M<4$?4ae*(r}i0yca{eUJl)dzMJsK398QVgR=$CTuvVp zMmRosmhr3}*u1HPQQ|qs9KTx*HiwN3to#l4lNo6=WLmIk6?E0&Ck)8OSbynxJMn0$2iWkM&%Okuf z*u);`rDV4EG7eUQfW-@o3+)gW>|1QC2`UTkQdtwUOB(7 zuIbC$;>8-jdc3-x!^eXMZ}F#p`X~JRfBQ53?(cqqQ7J+S`cUIS3sR7X0-+Q;cRfT+ zM~5Ji>WUydtzXmH1>^KK2tgaR2njw|VjKn#5koRrz+ppAE=NEhH50T<2?RPeIBAh$ zNHnX%97l^n5@MhYE0j(L#GTOOYC;=V)S@2}zStnq^z8Oi<`t!=Fd}6gE{O~ZGC&`< z^l?M$Rv=&*p3;RiAqd(SxN=KW%s5V_6pX05-N0v#L$Q#`ln@7=suU7LWEC#y!UiQ0 zYPqEGOKQKMb}J;3OpZuZh7!q;_!m9Ba5U^V6le5wEbLb#qM)%Ww($}zQ-l!g>=lRs zq@Rf?r8z;!;eCRc>;D2NHUGnZ_peE*NM*`VlCklY+P7q3MnXjvD}0RX zd_4?##T`~OSkV!qW*)C-!Q+vT!PMGMF z&P%qT$Fk!{A9E|8a@Ei2MNI@Yenlg;tXbk>6D>>@5(QgeV0%48CWnAeSY_dO{aGn@|(H;f*ro)J#c~WZpYsgq?$?kE9}D zs!Jxapb0IdoKP7}A1qg0AP7GUAO=B3L7`G=HgwTZ%AC2YspNs3n1Hs2%CH(x0lx>?pG}x#~)WFE?onWd9gpk~H zp3AO7=r!xM;?tXkrdLc8O>Pul?F4tmie;}^bve^Slb9VRxkC!g)7r7M!+=)pG%jk0 zIZdzeLE=Nsr|XW#4b&E5@GLim&zCu0?E*_@S-F}_9CA0C(1%FtHMLh55(0|Lwq_G7 zCRVJSV3MS4?fx1h93RdE^WJb?N2-0He!f#2Bo!x_;bv<%UuU#7Ba;;y7Y7oEgr6LH zB$^OXel%0KAepy{abhr1vFHPfPE#d{cSeHuKD@{A{X@=w{tMDCnD-gEYluhSTFn)=7NONE5g-^|(gTZ(%!cdAf$&Cp_FiA{9?+ z!Ez-~HsZYIcbA^~mFBCh;;IWASDtsqEi)Y0l1bUK?gWcYL(mwJ^7Cic+;=6yACjpN zZSeflTQxeSMC(~JIb)p+Hm9}bvI*Q91q!K{yDi_J!P9NV)uv$GYi3=``MRfd1&K^q zJ5T2|J{Zog6~BIx@_3sJs{G}S`+3Sy>bd`iKjhwr_kmZ6U-6v<(6`&(FYo_{6ysOF zf1NJyQpFYDdcM~^$1mIV!)=LDIYws~nGR`JYAB3Ak*O&oeZoMO>Iu=(*!2shuM~=~ zw-F%3fGd&;5uq~2RQePvYBuhgPB77hVGu;zK33T6 zC3PVLS|xic!;EQmm#MkKMBO6S6OA&KC|2KceU;oX|2K1a5+7-R8dEWX) zf(spGa>z*D!l+^(RY-yLL(<(?-ooe-A%_$usEn&D3MT#37U+SEjxRKRGM5(Q6i&fP#9dS z8Ou|Q%E>}S8#ep*jwhE>luU56+)f^lhA}<{8a7;q3xbarm9p_Sw4$N+p0E4Q2DM(m zSj_ODM+!|MbEFuu-(Bd@(vX>w(i~CAV{Rw67|AJ#7;&gh@xo%GW$mu$#1>DFkpmE3 zDn)IVD5*(Qj`NmW4@_k#G#M#MX6*bGJG&qvD8vDM+~H!or$Gq3w{&qsqAOarLJEzR zDa65NABn8mCpa-gll$nfzN1iMj?FC`fn3i(M3hkc$$#~qqowA5`S1S?6%&;7j1tYl zHe9<$L;)p3(o|t2t#_mp1QpQ2pkgvyaDzk0$R=)Bb_-_ulw2iLI$_ffMNdP^I?So# zjyA54QZv>SjgP39P>6~q_O#wp$}w$hn5qdoyJXQn<<#6G7XxEgi@_^5F*%i1xF}e- z9vLgP-g9lQsDmR>nv{$;(ldw&QVc^b3XPA$#jA@wP1w=VFq2a@p(aXCVLV0{jL?(` zeBBJ2D=CL$vfG8`_h&hYH0)etWgSukR$<9hPAPOo9a@wWq@+Y)InBnLqy?Q-T!d#> z-;t@3Mc<-H*!pwM%q@0)hX|5@rjMRN9*|HBQiF(WLxVt3hc!|TbPE?E3dNzGa?wAb z6*W#oa&7P-(1(^C%K-@|G=(-a`@bnvL#cB5;E8*@`;G;LOaXxfZksdy(NDg|ANf~ZMU*X7qUfBYiw%X5JlZ+F?l#PH#_b~E zI9IeG5`yHWb1ZDnM32}x&%F2i-gM}mJ#9Lk+BpliX5~8GE)pCGTdVl~Ne`yxw>MkH zrecIzbA-2PZ8X z>-gj*<#sMmGB8d&y;o={dGCIt-j$@P=ku-OPHspw{OD}Mqjk#XTfw|nJX#3e8ChIZ z%=-qTB&$wxlqeQE&CeH};0#JgB$CPmG7V4Lp2`T03rCp?R*k~rxjpT8aL+!P3G{#SvN-w&2o2w9hxo3+ zsqZ30Jl7td7r_!KH7{HB&+mUdE*W0>Q9%erqDN$=!l)F9!TJIB=RNb)N?*;+*2L) zDzB8uP^_a~SRG!{9pZ3B5fWm2S^A3o#^D<=$Ku7mm*@R;_5Srk>_aa8gn##M|CYb` zn~zZi6@D-x05r5g(T7+|k#%cw|Hfs_Uj zJxU~4cJ!eJ5iw#wn626iq)9wC`RB4GB`Aq3Y^ZiCtn9i^(a6 zh|Cz=J>1=9d*hw7covb7NwxzKfSErtbF*vicJA+;^BtMvWJ8FSO!QN-#%-v(HJO-@ zsWFN^b==b)ESVT#Lq{D~Jt9qbq>yx>K`KSSGu1P4S@c3WtWjiip~Yy)+TUPg!9Fa( z``7f2ObpObVuK}Tz?r$vE^P48G7vrEIN%ZV-pED!5-$QHHDs#BM5kD|ui3dfARwY- zppFn);$z?JW>kiuAR+5(u>;hhFH=A4~>iKRlIAoi_AsVqp6KA0&yNG)-p zw+6Xbld4g_!H5D+!$_Z!s}VbYLqkL9*GScKuQN5~zyBZql1!%jZ~yDxpfRldHC5cu zc}*q;q!bKP!N3^yzQ#pQ6<2hkBB9?fOcO;RQwHLQF0}020d+u4N<{370F;1^ zrng7Mj;&qM;mLJE6MkIuw5!!Mum$W{z2wPlGl&%kQ zmA+ydZz-hVgW{ah2ez&u2oN$dkcLfMPzQ@rJqm3bZkd`nXX%)UHoWN8Ty<+q7}L=( zOj^1qsY3&@BO)2;jD>A^SwrguR#Y@`N5+t|JSSqgbT0_O5CxR6VjW6eG&i^ydVjB) z;etSi6fG1XwiLR5-Alj03d=q&Q3!H1U>{3Lv7-|;t=}?FQpTpBvVlE2nz%$DaP-f0 zo(yo#lQF`HhOtVRsbf}Q#oDb=A|v8^s)pZ@ivdHGv#>8o<%pTd7^y+eY{v5FN9X+P zFF)aL{?-3QeO*z9hz|;4-^MvlB~rHJD#gdZuXk6RBoi_d$d#h5Yr)R%ndFK`lMYiz zTH9OqHrBJYfhGhbnxjM$g=E(WVvIDt!7$|h+~aE4G?FK~{&`NNBXhGE|9D_m4vh9dI;ij={Yy{=_U?lkd%#&%w zKqtICZMbVKQFsced2W~NJIz2P94CQM68UHzdHXGk4*~6SW?5jAIf?|zPO$MUv$Vqn zL+b;D@hFip(a?oR8wCNu^Sxl22G+LsTQ;%bevvTG1(A-euaPJUnepYm!-vSDVaEL; zGDs~ieaoWigGqY_n^2OFf`HO`R;?pbJ(lj#C-=zi9rIWJ^4Ayn1YNErpi6s9UBIKHJoE zA@G;))tqFotW!$w_{m!p^P>TO`;{a+Qp{q`ZDlykH9tD)D2(T_?s#{Q^2t%f1~?f8 zh8nJS0OLR$zbQ$G+{+tsrTEcl$3R0WG;I<2;4i+<(R=Rze_(s~V_*LNw!K{xJ0Rjer&aLLr3d=e*L;xE-zAp`@mB6{TIUcl{~ffjUM=N#nMM zwzpm=_O;3!N+S9Yn2-u%a&kH0M4fVEPEoNSjYA@lEq53aj2xhpqVYR+-3=`}nz&_W zZ?Lf!Xua?g2q|c~Ep@l1b0uZBpm7@<4Ru)KV?U8e)DS5l67a!decSsag*@0a612*Z zGCk)IqOX<~x}OG9F;u5`Uo((LXlbyt7@1>zg$*S>dJ26+Ckk0QR1WuB3PRyLsWJVgN4CSm3 zVFt}aPeBM0ImA=rL`&s2tim-l8=UYYj6fKo?2Ai?iZ|&4Mk}>Dbzd(r$ z8@s+eP-zZ9eb287*+JhKq9q&g=6cz z>Aj~okjXJh7&0{hAm9nnp^#K|(X+1guy3%79gW}MV#ELNKmE5LBLB<({I5WG5>ar| zEwQ3wpbF5SLc-Q>*@Z2N3?n4x*?`7Iaxq~SR-|Hxjh4oj4CI7{27J!OFWIpsc!?D? z$NHF5=2X5S5Gmq}u^MBk`}RC(n52@{!${4k;~wz5RU8q5q{Sf-lwpGi8Fzle&IXS3 z5JyXC1_%qGG5AY<*1~_x)xdDAw*0 zf+UwS25Nxy9j&Oy#E@As;>sZd&WH^1lWudeyJ-jSlIJe*_(7bHOhesI!py;mG%k%1bJ3eCtUzOG7k zu4R%)oT^bSwU*}vDiWAe&cF+XR zd++2p4Xd5t%iR?^4mn9Rd*32ZJSi)V6UoMxT(vDWR7~^~Ej6Ftc<#!S+A98G{eo?% zd5{e-s-OO+GU2-EI7tS6mVR4WP~FFv*9hjT$Z>nEv6JMV}N9*rW;s*0`m{KDleW*1yNFLXF7G*)_B=<%+0|Jj%0U;fiyMkqFh*8jb!J?7OQ~2Sjr;(apd<}~- zVQwP#i4@{ik+&zFX)cE>I*h*8t}94!>9 z*6{VNr}G?*E&u%el3XVI_Nrr+1PTd@#`5P!3LiC@G&EjfWF!W`e{vp~7LK!{6h9~s zo0^L}@?dDWs#12&@&3edI&fT;8Ab-4ZWDIiv1uh|_X{q!8F`32EIg^t`1)l^sx`Ah zvDg7o@X@s6WfQq71EUlg1p7`Ah2*+6*w!F{dE&XMQ#PHZwhBeUD0O65j(_kY9(?i; zc#S&aJ7NdEAwRr%{hLDeyUf@(kNtKl*qb8i^~CW|QTr_c_xkJ2`|lII^6;T9gy`L5 zLiJO);1B|o)cD{i?cxxm>MM1%&LPGgO-6+Hih9sn6O=T>=qcIIx(#RM9>;pl*46C9 zp3>j2jh2BvB~?>`2)&n82!w>z?h#R-rKak32Q-;HOkF_`QPT7iIaSd4ioLy~4ZA*d zuX4tD-;PyxTRKSKh!+8Q7FG48-oj#8yIX3vWvF%U zE$%7`lM%~NW+Ey?r0OS-Q}d9VF(qrN&?Co;9-xCD*HhZCB@MkrqPE)}S9Ul} zo2eN)SMj8I!dQ(C!1aVoNiLg~oA8vx5AneN!-Z^qLWZ~9~)DVJUCn`$6!-s|@ZfQk@ zv~S@hH0+VFproV>6({;WWn7>cQpXx2GLqt#A~XdB)1=_i?zx|j zDQ%>*Fi~^1z9A74hD^9_Ry3mGp?-(lNUqyHz@+1dMr>L7EfX;!XhUlUY~4#9*K2lh z!-;;Ilf(O=VvTbdNgU9`9ZofDe8)_jLp&5oC^*js?3@N)F*XU_8|vT~lGECrn)28^ zC!-*j12Wl9tdRm6J;ILJ$_^m{OZONl;DI?u`oLS}2NWX53CAw%$P-Nwr-&ff_$33G z;GCkOK?p}G#%%qPC;}sn7^^9>#Bduco^($UQSpBM77Mpw-EC+X(*=RSa3rUo))XRV zqR&~_B`yX`G}NpKLXwbkB+tl2!P2k5JI?emE5Bt&%`nc`y9P_eR2>0uZ{qlG-inN$ zopM#zgeds!+LCMVF=16}axM7Ty_V-I&$CkVb+yACkbspJoYnPT(>Ja56y1{FB>??0!2JSU@$5$Fg=Ss z(JgNNfeC?6e``2O1?Tw;&xFbal+Zj|D)yG)##?PRSn5UYFiOjRWM@KuJYzCwS ze6?5LhrFG8{`D_3xe^2h*=XcUpXlsqXDJ{(!@4?8xU;InIukAi^??1>~2{$#GXYz?10Ecx{_ zL)8TC7d6giT+BSfEb#nALLwvYjvF3tGOSB+PBTlt!Na~$Z~V=v@c17QuHU}=hZ9}# zt=s;wsp1>AiLb=gn?)JlJ&xDM$KZ*v4?{`Ovr_>Yw*v&D3N=bA|k&MwQp>qw+x5Vgax;;|%;!{e4 zMBopT-w=Es1k^bYalRrRJknBrV<+*xdt*8N*z5e}4DTRM8tmUqh;U5 zreyEs9mpwl++st=MqXomLmCH26k4UUl=#pw%kCj#LgklqU5S<{V|{_rDIr>B@&Pk( z!ZP0CLyhr}OT*6lUeF_mq7Tlh#Qw?-j3G51h$#3Gaoe~>NG_O%>F7{LjBYHo)L=s%WUE|oe z4T;D}F#Kl!cbp|3qD0c$WeCj77$MHt`aL%8Ig=(FgXn;f^37jbn@qbkWm_ z784T&G9`u~jcdtNzv&(80X9rfF=HAZ5QXE;?xam_lgT5uoexKRcI7pLQL_pq+jhk)Ip?F|PY6L%hZaS`w)2eD zh=EAhxC-wLDh!#3oYvNi^pKTb^1J4m%58Wj{Q*t0XB}@S~74& zz-dvl-A7j4C7n~8>GxQ5n#y^)7`STpoF+N%Q z{BpCy#f~3OCS2E&WoJoLhYby>7yy=SaQx+Y%B|foHU;O&kYy7{WS|Ryb!|zE<09>S z-H%3*=T)Tknz@;=ZY=|y@L-toWbbK0$5~q7e8P1V`MT`rV#QDu+{;fG8NnB&LxhBp zk-RtRXnl(0Esl&Kgx;z*HIhP2$W&z63X~8i?J-JlEDbjFA;ziBcz+n#TgA&d@O(Gs zff-UL1EMC!nsL!|6|7&Un{Kl z{J)Ee>3U8Zpbj0+s)9l(3e`dk{NknI!x21~bzD{%&&$Zef#P&v84M!NZY{rF7Mx^~ zFK&lO*>O<>N+Y?h6xIjk#^7}W(U78fD>qCN)0fs*$D^USq+;u!0N|eyN*jxT- z;rMJHx%_6(*#obsr~g<@yWd8y_1TsZVQre zpxg-7jUbbrZTF2X{g(LsHgQ3|zW&=M#KjxOzj@7f1@fC>>)S%B_uR&Vuk}r@Z~WuO z(@!-KeLzae@u8?`aCEnR4^zQiB=ysfKx z+le{Cs9{elz_YMVDC86oBM8;gEJBC%y-#(>XF|Z2D{(T^(q$*>cJmS{f(nQGP1Vc)Qp4)hZ_Z=!G48)w7Iim3`O{iGd zXV_RV&_^f{3DJ^^F(FgV|mDvgmTBTkW06HV{6ZbV769Wip)NCYe$6*W8O zXl2Vhc|fXCemohnaXCAivFQT0_7W`zXraiYLCXOJZk9*08wKc4}7u0G1 zVaS)&=ct%*oK0w4V5D*e(r{<@%yh!KTd{X{4Aq>yuTU{1B6xo=#wf*g-P?~^4->6< z+1%peF;2A1^#COUCK!a!9H|i}W`K{9m+c-aTkh!r6*V{Q4Fj1G{h=^L4j>9#&y^Jkwm3Pk?^=)^<^v~XDDVA47qK$Xrb7LidI;nu#B0Y&=@J$ijvlM#9%38 z%2|3sKwxnM;YozSQ*q<2=|V+qS7drf87y_IkkL@Z1-ThBU`8Spi&)ZzKq^zTjMSka zkp>Z9VV^TI4~cTuS79^k|Hk;hXB$hV6sNf*Gm?{B(1jbCkmACCy^FXYd5}B4H;#-{ zLFG<(VPEpBdC7l$eu}{Gys7wf|CEt>grLBQJ#P<&v>s{~>2Q2rzr@)BvrE~zz(A)w zD0(3^(J7gh+|-tP*??3=8aChuylfptNIEaj3PxhUgF;cehzo&>0ydo@QE-%ao|c|Z zKFm3(JN|JEYwPfG$w_gF5rV04G%g`kieG*a`6tDPC=zUh>sD}4cs^Y^W|?M^H~O1C z001BWNkl$t1~OSk3y;rPHgj|c8=$NzUb7V1Eraz9ruca zmrafmeVA@w1Yhj$c#xfNnn&&rJ%4pmahe-$nuZVz7g@^Im3&=a@u%}6-X4zlO}QkZ zXx)gN6Exm(ktr&xXuMz>cAV!Ej*2ap`-EKQxB#`6{ARP_%I-MQBWCH4d$~l*gx_pi znpmRrlC}9_sf;! zuJz0^PwNGP#Pd&&hlHT`=-5%ljw7M);sld;{(hOEMC7ew%T48&Ma|AiM6`4v<)+fC zLrK2PxZDQ{-SNp}#K?49R)W77inGAqKh8k5Ty}x=Hgb|`j#J5ozy}k@`MBX9ZZp=c;I<8% z7mksUY;EL9j!06?hvN;O-zt)J$Ag(hG#S^|5gA)jsaP~QjaL{6ca1^_#XJkVGcWmc z+j7$=PE*M~2(HV7hvS;F0@kHroOrUV<94t4YHJxv&HV!I&02oHfM2aWDsxM;9HVJf&$onF@%b5l4fTux*htPTlGK{zlxq8V7$9PW!e5J%p^jC2-?+{(nPVf z?^S4gX!}P=1TJ<=#0AP2g0SeA(#1V>=z~#(nGvI>wH2*j(a8#*bok(VCU5Mgt4byy zde3h!2E!p(m*PZUJKL%~xtfxx2~Ko4-(g+T^HtprDHTSI$jyKv8PmC@p9Y7vuLLII zqen^IBiUSw53l%jffzkLwy&)Yz0<+N!B5;T*Eg#xURhAySj}&*$v5j!$2CpdLFgYR zlOt?wQ6j|)M;A->?v_lCv92VF0HU{X5G6767L3Yn0YRdML}~FMqUlj*@~}oBc(h1Z zH!ld$QL7z^D$pwFNdzLJ>XtwtQA0A3lEiYjL6X~pcDcuGm6TEh0dccekrR$I)rQLuoJquHqe$ zz*r54v}8(gmX3%XbkGzsrLr|A>4XqcR({LIMy%6>2sd4cR0XH1AQ%0HeVC-|I}2h& zt|MpZfSWS14%dh%D8wlPG)6ggeut0+yRao72w1cXBr>72j&)cwQyF75{eWC%tXcF(J&{pJiHkkPO(D@SW6@SUqEa*nBxmW{{r99M1Vu|f0yWaJ zaJSq~rhHW`8SDF`QgYPA_&m|fvV#AxaQwd9fbcxv0WTX#y&9&V!as%#VIh|Ly5V>tSmYwCTLh+M1EMErpt>n&bC@C2-;Fr5QemXwl zIP-kD(`;M8d#4Sfea^m9EV{`3`<`^Cc)C~2Q<$ZKRXf4p`St!5A2Q}lt3Aw7x?VD8-g~e?l%{=OR5kNPMLydkG*Mxr zhGMO+o zkyJ`bCl5Hgz(qddvJ8A){*FQ?{KdofkzP^OFiA8!moQHIsc)_XMh2Yd_}RM+w~Lft zZybq_G**$=z`uPe7^xv&ZXKrud^D@sHYs;);FIGHDFjt(`1L+;JPll&SVkY@{D*(r z5xRu2YPbuA$NRuQ3pUpA(WDQg2G{$nQz>{f&=>*hPU6J2$92Y(XY~dl0)_$*d|sA3 zSQ>tGvg7lz;wTn8Sqyl&(cH@nh4%dLsN|P-!#?L^YveMJ!+#@x^1+lQc5H zhd&xIy1V1SX~)-_l*>-?&Zy-_5B6+pO};6Jl_rUnuNRuJf$xteT<$F!yFiwfvUB`* zZ_ik_hM}qXU^GC9ND(xZmsl$}%X|It@!Yd*HOg4DjQqRnzF6qpk;V%_rXmRid+V@; z;^|VdY79qt%W~ZlG^Fs%5<#K_pRFu~D7dU^KC5c}(}Cf~_Z^oTL+jzrZCKgJ4+qD5 zcA4-aBWP^I;Q7VUvkF_@FJ=_hQOSsqfvSNM_cqgja$6(iN2of9!LcJ-2NNCuoS8n&d@)GI8Y4^fw0DI zvARPL@xdXbAyFAdrF5a`LvIoCbV7^)DI`u*tYgJQ+~ZWdMN3V^4o7!jyspWUDVgbS z6I?*ao;4h0z{Wj9XlzBmA3U}Sr8NpgriN&h_qWAiLq%nm2&vFAWspv2q@nKC{j}IS zVid&a$dd_)F8Wi-!4hMn^SwWm82gFaD-W_zK;G9P;}P_TuKx86-*&191+QD--ZZ`Z z;SBiAYrXGqz9JQ4h<%HjU`H2f2IdTWfC&5UhFndk{T_s%78|726mm@KO8PxQ9|+P6 z>Ea$GGpcTZr@{IjlQfVsdRFj&Jel@lKh!jCOH>{k>O=9uiZ<>KVK4}y#fF}J34%Iq z5JWOP!TF90mO5BEO19w|7aK(EAG-}V=$JB;Ge+hNF9Ly%TI^{2jt~V64JO6|l1MSp7eq;4 z9uadaEl=HJj^$fS%>}I}(d6iu(1=aH`M{EkIY~Ogg_bg|I7{E6iJp!UBk~@xCmO1_ zMj#lgV;13Sv`kP!_wVOm(K2C_9ushEyDOxUq;gCbDl9cdWt4W=zYmG8!S3>iVzl)mIl98<*-Ex}=X6q%RQVadJp0i6#R zDH)0hjd0X7AUt=@^KLrADT#LhA#yg}B11%pgeKJJIHBt3dCXSfu}t(3qXKswBue8$ z|Hpcq4S7^3p6ngo8)kaSqFZ8=BVokTdIutMq>dSA!P@7HRY5KKvmn1J7j(fR$WUaY zD(7Bu#6YGH6*qRpPV{x_I^>kr)5gG19Ihgdz7)^G?`O|CAa>Tl!jw{g!dDSNEm3((ybYC${@~g)U>fVY{DHw zIp9uQqA7UW++*i!I#CitpIvxom~oz-AcW)N@k74a2R5DK!_l1IY&Qf^;iG1nw!Ekm zFDl2q!Z1w*c_FA`&Cl06q#F=*!atw?h~qTj@y;U9LZzS zBf?@3AUkF{<>TQIv$SD%SMzKS%O<5Tf$J)A`z)h%fr~;Ri2VGw2_H?=Ys$gnT|zD; zv&8Y<^n~B9GWM?JuC<&dDfb5%8ygrXI8GvO7ZEQ;2qfDMzAOb-VZoozPB=eU`E8hYq-*IURUf!lmyyG`_mNq*6Z0H!71Z(=b=NH`;wLOBU7$=H# z2k(p&8>@LyIaZyZ&_`sVql%HK9x_cMRVT3l=E;=GdVaSM{BE1@=Vz9&O1P|H_Q8bt za?erfc`yw4B=F@XBWn$VM;YDM8Dr=8s;(J{oT2jk>Q^_+WW|%qQ&t_kH)^@6QvT*i z!k5*8BW*Zu9n+y>6&&lWVq`pXIpCeqfW|vY7kE}G{>{slm0$9+*?@uSOE7+V+~U08 zok7B;)l}4ICHTe56dx1*i|<=P*>Sa(e6@|-8}!4=M>!>cA$rPog+!B>45M?j z)TFV%2S?-f02tF>4$+}Rf)fHCYJ@Pz*z;RoPjBNN+DhJ>+Q)CL!FRpa^u-?D;zZx- zm6#C{17XkJUEyNGz#O5yWGpWTczo;*8x)T@OlK3F)3^;$^iygdYu4R@#+4Y+qv=vJ z!1=a+Hy+kBM2ma<`szXrsh_4Van7)C`-(u7N7h5{f(=c4vA){ul-|lH1 z-7`{E^zV)s@u8*Zc4*aGYhnnD^D`RPbDX{JF=G9ON^z#%$K%Om#x87Fb3@LQ%2j}8 zY(|6tO|VEL35v!w)XLMwJw_A+;fNwKQaM{&<3-D@_3Yf1|DUcm>5(kUv-^H`db5o_ z$CzVfRu^kzv)N5TASpsc_sq@DJ@@>7M+(iHJt7Jh9V4;FXYF$mF=1cL z5K*H=L8>&3S1jBujceFX_sK=c({904zeV7<+F2BeCe|PWB~ucqSoe?!!Q6~V;+8aK zT)Q>gEkcXLAVp-+_dNBVVPwu_zZ!-&L&AQZQhQGw9eF6&SRiVSlM|$XWt3d{QwlLu zK7<$%A~H1@$7#yOf{sv#F`ZcORz5+vDYu=a_ge~)a4*wbwVqt1to<6IVjX*|l}KT5 zcnldLDtrv&B14mMZqKPyNf(9;ekI3jZ6uMBeesYkXx4s3p%PLZ@V&&4vUZjx)Ciey z>(|WGg!_5HjlJc{p5s-9AtlPdjx|}VFk~PUIWv6p+}JHHN_-r4Isk4v!AYK>@3wqx z^ep{?my2P7{>W6UwBW;|760p}4V}E=#*R74X1J)?HyL|6!3s?j3Wdh{$XT~zKTY}G zI3wLf4C!miV7{f=KQ0>@c}4IUpQ zE=Z=yASiyfjdcjFD zq6>qliXq{=R{U^o`FP!7C|FxT?*)Y#7Cn2WAXSmpE3RAE&%g!CB!Ojb`0aLs$8p^% z_EXIt7E{)CV65((U}PYA!%?C+$U4>zZg-0FCbHp@;0oT(6M8Q|SpN6l36iMTy1_|* zZ1|G_15R4@laggWp!)W7&Z^fO8%3ceSPz$N zV4jv-T4;l$^+T2K^Ib4{t!FO_jAg=M zrrA&7vMz`yd>j@#KbZGC-=wU2_}Qc`2~8{c$##m8fmgQ*E#X^f;3q%6AtI=53{$C@CUBeuL~FQM@AIvh<7HR#=)f}F z<^1}(y(Y{_^aC+&VQUU(IZ~% zhI6lk;#Z5neyTXhreulZe5<*al|Y3yf{U(amI^{lxNan&HbmfGJ=#)B$>lcZy;0<4 zJM8SuwLyA8(+S?6Z?GQ9-Gs5!92A~RO6JM$iQlh_Pwt#Uc_)X$Yw7ug2#w#shyAKl z{bVdyE3&9cYwi#bL>f*IPR6`lX;Qqd51pBWz^WT)#{-)u$ zwqMcMCFZVyOh!c;wzPH-K1Di1pg2_TVueMM5q%{1KyGG3-a6LEAW1|;?`k@6iT52K z8KwIeIXos3$;^bhzaE6Cm?A^u%70GhwnQB;q$m=m@-Y#|+FyK8BW=SF$LK;$p(jlB zlq-KtW7im!5JHb8rIb^0WjIQXxpb#gRAizeYQ?2{0%%OkNL7gyLlv{U;~DqtAc7Hw zmAD?VnNiV44GcRRz1xxMl7=lM86_o~=oyO{GNvd2Ud%{D#(rWrYj3%=YjR!SV~R$T zsRR{sZoOa>_o>4MFFJPq7Atp5`El)&_5R%&7QX~woM+{oQ zTvga0=wpfwDVfX>Qs6?z!e8QJ!$cjk^&9q+BevZ&jc<{{P>6~?cAWROc(LPNdIYiJ z%AM21Ex|Vg(SlEg;uR!`OejT4sw7dgtonwfZ)yE483Ual61cRlSoY`4(ql$y%4vLx zmMM?2F`bP>U(tmIDIF`@lWCZ$F*2s4QWHhX#zN~HxBiqS?&#tcEi!^_nWqyno#3P9 z%DrIc8k%@a?+ytuFiBF@eM_Y>*1bVW!9K?bDcJdjE(pdtbkV$;J>Iwlo+BoaPu zp0M$2prw#AI;Q~1js;!ZA;X-hs^~;6e7?D$AP{_OUI4{-t~B4 z5lY}wJDks^>JI%CJp&|G)-9o==VwM)%_&`F&x{q{T@t`P~B|R@{Po)JfL(k8aD<0>EoMc0D zn++Zz0`J{3{O$7{F?zl#O%us1RIQR^({>}#~1C^0{FpETX{LQ6g z&q(%D&8v3B^Nq$x$xLTFDjly{OV%5zM1HZz{lTQ7(ml7`AXFhh6nwr@e6m||Vp8@p z&5d32WUG01lHOo%BmI=M$!kt)2-pRyH^~fCGQk7e(JXzXCt=3bG{4QF9qX7 za6bo|!gVLPXcKIJiI&(P`F2rqBWgw>@~yE)D9>+B7aS)kAxIJxxa=(_VxX635qWO} z3#&<0;HC@w+f(R5j}?wgryP|-mb&qtQ34?f-Yw=__B#xkDwV9gXOb$GeZpR9DMt-2 zZw+IU5g0ZawS-CP_-t(uL37hYUabpmI(V{c`L7?tyZ3s`QO-~P%46s-dEnz!U~CkP zh|H3RQ-WWtTJ9x=dFpv@U!nYlC(99y%lSWlXZV+odxDHCT#pvXP`R9X5;a3h$5GMp z{XNZDosucRx2GvDca|*8m?fSpaXf1TX%t8TE(-2fl1x|hS>Rh$&+$yL_n_h9zg0Y6 zro4Z@p${34%AQPn-kp0sx-NNHM=B*aNCWf``-FeAXPFe1=bM7ReCnAbf|>EG&jSCx zZF#X~kMQHn(msfyfmaJ*-ciDFy#JWdR` z_S8;OC7MJ;KHGLgVfofblZuXc(NjxJtTj7pIDYI%uUnp7ojb-$ftFwDO9k?O_%hS^b*Jy^afCNBy>A|S{rT7L^Ex!~I_t|_ zO>qiB4D8*nWQAYfU-4zLoA}~!c`_NO0I!*`uDb);4})Hf+Ym+1o;o6v84Gtq>(@Bn z)4LkyJA80B-;tY=EE%I^LMisxm-m^NISK<35;4BxUM@&=MXF2morrAhcF5HkC!O_u$4V~zyS>ZxWA!dx^5kku$LU9+>aG}G64uK%i z6^SVkcZ(3~Tk7tX-ZdZ;0}^cr`@3L=&!vPoAkV(ol790V#Wx4t2ABVf_{bO6;LR4k zFB{(ezyIsk{N|U>NX>{s&#CP-h1$oc94!-aGo^92L(MR2>V6F&Qm7eSTo2q@Y3_<6 z5@PUZZHO_D=z>DdY26C%dt!(nK#BwvH8!^Nq9qX-0f$iuhJw~_(K5lvlvVeN#&1wc zLku7STBh`_At2esYoc&ya(4a-Pash#_p?I^nXquzEW-jvzzIj~7f6Qo9T6Q$Xl&@o zq~_S1u%D(Fkz*+FSZ?hr3RzN$eR^uN)UAP90Z3AOcBD z$@G{gJW;@({hR*=BMtxa|M(YN*(F!)GrG_*k;lw+MHD?AioM;&7;*M`R!OM=?IWFA`1cFje$kmu8 zSS%JTL5UF-hooj=X1tRhvg`vr!z8s5kv_KEgykS!y&#pEdqsl9VWT5ef+o~V%>k7- zLJPxwHs-t4JqRTs3f?J?d8a(Y(xYQWCJjr!qh*JTDI;AB4y@?uMa#8)iIj$2mtdqp z#GJLaJnOE}C=y|4qsPZcN6&S*VlMaC(Q;|mT-z-N$rvRCl^EVbCMM%yUZRBLs;jy1 zx0sNTNq+p^Khf zXgEo8b~fYbt|o?pMI(7!6gaE7-NMuQ7BI{+ffj*v=%`dmWhD2C!FbtP`1$6X%f4rl zNT$YcV{h@Hpmu_jQt_ZteCI<5>p<@!j*h4G6=OYSVg@E{t|D94)3}U6Nphu-3=Z1H zDSGeubYqen+JWzCX=Lgyy`1ARM=@W@9b)>cDMS001BWNklp*SR0n#)@7tl9EbSzx5+-gi!T`@I8x`hWbI+6hVxMg?Nj zYVFsI8||L!{z2%QadU4pPm-Dv~Qf2fX`!LRVW_ zE9iZ~m92SENA6{kU*7C6A>&W(N#1$?gxd?nr%T0AVF8O2lAG4EpLYDSw`(51rFnjt z620JP>iO=sN+wRRYYo5KZV6~!>>_;#+`B)tLuQR&*Cd>6Gpv_f+M0!RJetB`>3M71 z()0lr2DIJdQZmT|_YNbQjo`8s6xw42{8xXl!?igt>Yf-4lSFa8liaqNG!dM9kn)q~ zj$d6wnqX=Agcvir5Sffy=320BG?_@a=n`^Kv9*HxfBY@p{lOQU-#2f6AF<(^Zr^m@ zzmN9t`yzSB{Cd7Fl)q`K{i{E7D7lbA5kxwOUjjiWlmLNd>sxlQ2N{Sl+)*KhHz)50 zZlIO}=1B@gAkxH|nHVun-(}&~)Um;dh7cV)cSEMe#KKJ@zu(&&M7qB&FEL(-B3a zXG?7t*w7&La85Vj66<=bYsk!mt-Hbr%iKI7s({1dd`lm9WK8H?gOVu{jS$e$;JqVB z1~;+ocQj$iI6Wj+Q>^Ilp=W7Nfq)Y=rP?D#Fgk-M(6S&C6+K%LJ*KC-%MwSFYRUDC zkvgIfCCjd-i47eAVj#)^uV;OW^*z1Zkf{+l6FOGlDx}VsiV4#+vbB~~zW^z?@u$eB z@i-D)VDymp-ns?OcNmqC>nR}V>lh zWJ(kfAMDUFNW_MgR&Ft|U@TAA#B(~o0ip12ixQejAEHEx6p|1HGAdA-TxCp5%3dZ| z)-5hJ7?CoSb5f~Ux`q)cLKqs>cg)@#Pb5lDsb@o5Tv$*j#Yu9=FY1q3hXNH9F81WH z;GA1BH=zz4J61%Y@B&tT$)lv850QnxVJzlIA!*|V;S=U^#AV~zIY*@@Y-3LlZoqfR z86%xDHkz~k6kvUiPGg4Wykf&r!nT9!; zOlU$!BQ}hg6LBCNor{1bl~dYaN#ck~%m@Ucuyi=Ul8{ozo&?E`HKmy0=<%TipbrWy z;5c)Hptx>!%oyWh%Lj)${6$9VBee}&3(Y1h$P>?pqeHg6q_&bSTyd1#=fTKvV^?&3 z!a?RqeIVHy=B6SM3hO1c^<21!3q5NmC|%0We_^?HX^M?4 zGm~MAp>>ALS3FD;`nIRDg4+3k(JCAd)0~F|EPFV}A}i;3w(I!Q9|&%44ZnL8SX#lo zvgfvw{JT#Cl`-^q3Q~&H()2msn>Q?+<f)c&7Xd|W3f&6=)52i zk?B3pCr>iEUT}YJi(ePaRKiW`d8bNQ^$J&4T>RZYjzV~%Fa)t9*P1R$z|z$TKY6CP z*a?nv!DFR38VR7wGQxAd!s`diCq z7m+S_zExQ|pAm^n(w>(qPpcBPR)d6D5}Br8{jf{$`u0ZbA>M6+zq!r*rkndZ?(lj5 z?(5NquZvi|I2IXjTyK6$2#9xXTI8$3H~!vp+!gW)iIfI&hr9|R;Nvizo-$_4j7q2Y zD7gt&1Yrrm6GK2LgOCb>Lm&wFyWH_$o9@Jloxf!y#?0j&%BRE#UiPfJQ-bfY-cqJh z%5tBKiisYR3QZGRJdq$OTpY5uiA+dTPALjfsnJ5x$H1c7U`0o-hBAO{xWx8%Y*dw! z=p!VW⪚4o*zCs|GFt{$ilwfwvI8}WqP@D(#4(N9ftXQe zi71BaL!ge=7?ln50@pCn$87u!^Xw6I93f)B!~#Q0Vdh`FZW57`%A8)87&C0CN@2)l zh4&sEG(mYL`h;FI35hpq(>KKqT&U5K| zMn+Splzon<qjMiIw%etkwCYNCkPu)~HOij1wgMJS2&9v^DP zat|#x?D}h}b=DHK{}RO*0LxWUC58+XIvBlP5Y$h{_N zgpvtDfRBMbS`tx^sGMFj>|%>=H8JL-Qjju`DQe%b0QEo$zYSLywT~4Ijk{&f9226X zi^G6lDkFs`P-27-mL2OMryM(4-(yscivk-QiHXFJu?b7|yUhY066UGmIJ@IhN`t58cbhFu=vi^Wv3iIInpsjHqF|J0Z1m(tGYcsv zX-Ogjm+gjwq+*`xfv+ty=857}o3QRJb*%Yxw;%}v)-l|@X1(Z>182LGMHi96a?v5O+YBed!v-YBJksDD3v0SB_kuaY&|YW+6I39nIJ^T zd*yw`W?&zWMS=B^n^thXmV9?E`G@;~e|yoewJ9PBYG*OJW~S#@53-+;$iV$Pr422g zZ7gG*Q)`CrDANEx|kHG7%k{Pqiye-X;ztJ^!C7v)Vb|sj@%yFo_a#~3&D)ob?U%&|uaAF|)BB}hejQ!-BF`({ zRKBl8+1FzgUlr1?pF;{wrYlObPsS7#6`j~2g&7ZgNr>vYJ^n8 z;0ZB+h_rT1h?Y|Cv!^E*X}GjE+;C3sA`(RwOA0DXlC$=w1Zu2nKt+^LWE?S4iXvw8 z!K39+{TGRJ!Qx_|50 zUDCV388Ip)3ed{XyJirWLL%aTm+~RteS3FY5aU2Ts-u{{>BE*_XPbp>2Lb}1Z6>EI3 zq-IPI79XL|dk8s@N^IyD$>U+?gcS)RB7v=6QnMloF$`VC3q({DYK9Q{?r%}ib63g` zEnTcB#So<^RKk@z=TY(iP#9AB*i$OQ$~RcCqZ1nGp%90-peUt6kg*B3Z0seum@+j7 z9GWRJT~bTMZCsI>ik_Bbyx~D|z?EGRqa+AN5W~;LPf$^k2*p&+Soj5FUEsZB8yCE4 z6)L10>Jt=#B;+XR(W0b@EoV)_+DA%JQCm+!%2-aQeV~pzQX&`5;bT5Y>X)H`~g(uddQs=x?= zRCAnYDb)xgG>HssT*K1WBn-9K>$pbu!{3D?p^mFNF0dZB!E6!2P`f}x(TN46u2_dH zg_^SRHHj?QHxB?fNHx!FU{v*-RdK%%DADoRbxRb152`Vjo#l7Uf(Pb+N)6|-ObxA%OfyUGBWIoE!6@>w zR@5#qNdi0X5hRREAk#eo!;7}%I8P|G;%AE+l4L{KKj1LU80CV}bkt44cg7=5TgRsN zWGeCpdy(rVa?=DZJIl_(C~ue-g1^2nl7V&GokdASpuHXVr+NFgyg@MO2*I1@Z;UT|agIZR4Y zC8&(#hkJsq73^ZdahCDf#xpVzAK^G3b_%Wsx4@vr0*h=MB!+t z`+`qjbbNfZZ zD#>S0B%|ngxL@hq0 z`1alw7c^-F6C?OTW7+kZ@4r*?(aW598aX;_c>1~G@qQqCkYbveAAgi`y*B*krRSf& zz2krTc*Hyv+$$|E2wpXwPd1L$bp+u#5*1@Cx3?+Zf3)TAUgfAJA(e_*64^@}&$sZ? zYj|8L_Gf~_`8U!Y{yx$BrQ`ei4)2?f6?~Zy9z^V$B01hUi$cUys@!Kdu@JJMi%z3M*cvK|( z-CFUgQ#3RbVTKhwPIOG;5t%6Hv;(EMp)Ddr<=hyk`yT!+wXZcO1{hOH1*4Nn3VM5N@UtnTR@3$C{ zlB+Q&Xx)ms+o6@=b=`8Mj>y%NoiNx~bCAD_3mZ0JLkxhl1mTfkaP~!rEc`2!GIV~6 zZ99z4u{3z>AP9lp?DUr|M2UQ8ppiSY zSFBmm#4RF7M&_8UzCpxTpX8f>~Daqsf>|1%A5hXxwfXjzX~xOGaYK9*?M5 zkgCWQk0K!vhIQQV)AfP}$q5~nE8F31Pv;lZzDCCkK}4&RT#i}VYbNGCo)(1RD4nwI zOVU`d5Elf|(b4jm{T$~#`)Wojde;7C7!1Wx_bV3t8I2ebge6sDsyN4qW}r`qfxX;@ z4H;uPzoRloh!AkTrS=Pq>;~6A2&B?~m$;AmAjKA%)_3y~T!(%f9BuJ!h<^OwE);DbCv^d&wau z`Gk#cxpfORE}%(JreWB{hPP&e+->B&@=UsT{Eh z8&2&rM(U8Mo{|g6Rk&nN%~^#7ZMQ?|4CjXhc9tzcdH&{f%&b_mi8UI{ez8N6L8!QH zBxSZGS74*#s!Nz8g0n`kb(XVT#7KBtMIKh3`xV@Fo{hNSKO0YpcxsywW5O&+`Pu4* zBRwOLf@u|ll8EBfMswaoIxnbH#*^(0|I23y5AuYQiDP0Mo37yTw5PF- zMY|?=cvT*cQR$a+)xRP$kwdujKd^nBuk1U^7%IKn^n(Q7s-T1%1Evi z(?oEP2kJ%SQ8A?wd+faBC{t8gkjubn9hn+QRGLZI^WriAgtM-uKP&m0Mk3{!lkxzgI&M0{Vcyd;ff(WBpk-GpE<4Xf zTXUinmrnBON^_J)wld{lw&TTR#y@#nbG1ylb(%Aq@Y&^#AMK5}H>>&hHevZ}O6#|H zz2k#YF=_=aO7>;K{i$U{ij9J2XNr^D^UqEceRLFU#P%8gX!@A9E6M$F&BN~~E|yTe zXqc9PU?ut3;0Y&l!QlXYO8E4nsEqPW`T z!@|>90)oyPG6_ZIsOkC9W6Ql;!{3~1u3EwUQZh>|JCkFp5$STrA0GC&9*$DSc@5lj zJUB>Lt~H!>h!8Qd=dkG5b{W6As!-#=58m@Eo_E~$DWgR2@F*!W#@PcZbwJ64iJVf25hFRK5+ht3Hs%sl zVntvV7xZC+^Bo}ul+;LJkW$|XK+q~B)fIbcpMy;Ehx>u=Ofr%jQdOBLEv(5?ufi zL}2Hy@xhVkq0}PA0Vr^Q0t~1(ldl$uW&+2tuHuCKDx% zZ&-7S7oI-saP+isLvAL#o4t#V;6wl%o_-kmWW;s2qGrQX9P%(bA|a=tAKKk$ScX%Y zxS`a0cLF#P@z^lrosB6#f{qzc1#E2S!UpRay12pU91%T27}oxr)(%)Rr8FukGBqaE z6+)`J>+Ols(b_H6*BF(Os)~R8-~B5xW%%>|>Bl_lPFaR45RnmM)_%*54jTDMU^h3S>-Kh9yOuQOAZ}_Cy3WS|;j{T;+74A!WkclUK3?3)ch>J0Lp$T;2I``mNhwbr|y7ZP)|Kg=pr#A?f2 z@8SK3K6#qh(xm~wo+(9;h?blYBdk#zWMgJl;i5(}rqU&z4q}d0iG4fftP6yw$@G9< z>S^$)VQxn(!*=+*&{I}n!^l`9TIQMXJ9k%na&th+&D4yYH}olT<#yaF$F$Mo(lAQj z&q_#&Qbq`xJWbf86>V&osyUuOLJ`DKP?AdtXj~GuE-*3~eGJ#jI+3v{DXpeZid)$Y zGF=dYWgBZIdY{|n1SE3V?^wl#eRDvmMl{h8l3}7to`pwD)SOcGh)NU0k!wp&!>0G_ z7(RbUkV(ZNoKdr)&?AoR9TXXoN@9XdyksPMAQ?eC?|tu>fAY_MgMazI{2><)8g7;a z`xd^euV{S6N3()9WMpbUn1SK5ZAa}gn$YrARWY)OmD4C9g*J3?ScD6q;>oVzX&o4= z5eIpJ5si_^(pjuzoOhmFGD@R(vuyeDav&kRIr8kB;-X7*(qj>h%Zx4v_bP)(PU9?N zld<)Y(^|1hp4-KklXkGA#UvC)bC4-!`Huf@an9Xx#6hNTp(G~F!Z#dbC1;)IqDz=q zaA)NC(I;DsN_={1X@llryFtYYErM0>=A=W3W@6!B-jQ2qJByNz8&yV(Ios$clxA)< zF&aK!W@yoblrc30dzq#Snu9!1NQUzziHMbqm@J36;j^XZ@isBbGp1ItbsY)Cs*4n2 zXj;RnA3V6fc95uD&ObcLh+)FajQRfYu)O?>h2pD~XKqVeVzBuISoEHwqU5cSWiL|{ zI-~Kzd$XQRuXwYpaAA1vMX%_h;?0lV;Jx?v`1Fr{MjH&JO-$`Dx&6%#S{|M&Ld@AO z6fOmhi^Q!mun5A#<$&F~J&xSSb1piIi<*=SF-V#F`v3qS07*naR1}P4Os+Kv&y6DE zxP-kT@oZ;N&d^4KO^G5CZoNOH=oRyn5HXB(fG+XT_vgfH#Ol1`jrop1V6`3b#z@%t zz_PV08mO$tr(x02m>!YHhjT|^JU?5m7=;2WmiOl^-}^W*+0Xdmk#MU@e0ML=dChrL z{P1^54#t6J4;9&-V%=Jd3LK9ezgU*+nhYESuc`F#T0gnlux=GwDLA;7@rwtJt9sbi zunJt#JlF^~GQ-VEF)us^rRSov)Io8hQf%vt$6L+qN+>Omgg<{^`25*|HW^H??0>Y! zzq`NU)5Qw!ri@I+{bh#LioVf&Fm_bhaMEfXuM2)~lt`lZo5wjvrL}L~Mi8Y@VqYUYyuS4IpMDKWWz32@9LinhX3X8H;<%_#=)or` zC6(GElM0`Puz`dpME^4E;??868u%M!G0dr*B2#cwCf*rAB*#TVN{Ve-5=E$4GS@R^ z*@!}wgLruP+$EZvT&84FV#%49L&o}$1G&k*K4c^Z7*!2onVd+E7soE1)6t+%lrm+k z4u<49eNAD2p>rH+`p~{QmZCt?br*f`V8;}QDIj7nxTqp-#OOb zjJp`FzYxEF^hbZppZ~>=5tT^d0Zkw3VewFTlxTcvF)F7^+hH`XEFMpj7DNd{N1HiD z59hB>TOiT;&4Arf;8M$6ZZS7^NgARWfcv5hAvlc689dOQQVsSH(S}mZ&?=)(TOtuF z6*Kl2>5PvLESY40Ws+GsuQ{}P!$ctr!ZN5ppX#BjZZo>L!d^$zCK;Zi51trvx}Z=f zW_jZKHyzQBF^mW(n$*&!hLu}0Qgg;OqYXVIgOW(0Ept02L`yCiduAUYLz8h}?{FiV zGcpB^#GU*IoeJu-yH3LD*t#>cF&Hx-q9hb}@u+0zWIZsL^@KicaG|BJ6GSDZ_Lx%U z1PR3G>Cze(c4#Rm)jnFwbwoDTh1fv-(8gX{hMiyf&~7@fB8SNi)#uQ#`xQ?X5%lBqzIC*6xU>dE1Wp8I6{eIqV}kyNjD=7=O5k&*0^%a|^0xpL=p(o?CDHtxuD0pb|T1c@0a zk5-mls%e~NqziIwFrv9pR%A+3s*-uGQOR)G4}_OXcSfd09H_g)?-*Y==MCGmqmPjQ z!+Fl3EDRsN)ss2RsvXjcT#OJioFuH$l-jVDYf7zX1LRgXE((lf2$q@6XhNb?IhBm4 zWK5|uDxEXYmR;ys`I=3Doh&D;V7;#bO(A5iG#OLl3`*;fW9sm6G(aobjRh%bLfZ1- zT=?5H&x$K6& zbHOmT8MjKfRSF>`Tna3DHAFV+1;2Gq&{1=_(^SSV%Pej5T)Ccu+;Y2&bUs6;irb~a zM@=6Gi^gFtbTLxujH6t$pFtaiY3|wEgSmKYw4B!lpAMS~c+_wjAs`PLE7{_W5B#mch_o?~m-`9x07ww;nG z#l3lpr)57gymh!^Tc_)QjKr_q+49Rpj|xLU@yl(C6~n5FXeHEMnUWIGg5x2i}Vg^zCRm}ij!M|~A3CqJB1Tl%aXzZ+`NU_VBu8;dRgNE7Vo`nw{Zwfjiv(Ws2M1 zd%3=yyy_soTBdp-fZ>&>{^s+%5@>zgvNf07Fk|Y_vTi#zp=X=6I5wD6GD`c*jQ9Om7BT*2uq4!%5J;eWtW04+FyA_!kQ`k9yaGmle*Kgo*NcM|MaxGyx5#A7mMiKw5I9_H zhLpgtMd?W7kS{lZhg-u@Df}=Dqv*HB3S*#Z=Tti9wA-*w3!2`N(%{#cs3|w{8AcSH z*Yq(^DZ|+AvC>;y^qjXFf*!i}AqEOna9eJJ1iUL5=|kFB^R!;$>4&8{B2gR%a+{bU z4K0R&3!S2n%ZQ3OK6;$&SanZ9LFWfn@vZW^!?}tbkzu)VB!^UXLMts>cRqxaBuA2g z!QA@|fgUA>aehRRCS~rZm^OoG7)lBlHw666?D4c z*zQxNF=uW;nU##S#=ALn92VaK4W-V|Y8dSm2Kvb1nzSOiDOFwZ(~B{8YtOc`tUJrchn}4aJXlRw_K}V6SjQEL zob#Z0qu65|Y6>hxrg(om;iOlb_6?$L{!p@lp&^)ZLHab4M`Mj7z*kp1)>ODYM7ZZnF!0c zqYgc_tjSD86KZmDoH$epZ2T#uo?;|p(K(v5!6z9KE5jNc_dz^JB&4BB`tj@ryRxPZ zjbB$m_^D#mCT6zcaTAE%GS4ITx1Jk?;_ZpyypQBs^Z7dQWwU0i zOL9@%E@2sj6e1xCQ949LHa>8-D#%(*p@ctG!d9rJX2nShCh>59D z^qJ5`N3P(^33sZV&sP~f!jD%SGo8q@noJD)X~N^K<6D!Q)9ujsdT(s0Bdq(5r`;7d z?3n#*%-fT|{Y~J!8NOdW-q&1pkRaYD^zj9$!5+)E8H%HUpw6K=|#!M$6M~N3Ty%& z;jd4}EPFT@1@`ie_h+8}=ktQo)^S)^awXg-6^(;sr?`I^schixVaFF|86SSIz-uYi_^o@Fx)$g?m@v&gfLmkH3IN+bvoY^UQEwH+<_xPN^-& z`ySgxJ~HrU4|?vFi8pTaoIh4%s;BD}&o?=3=$Rc@j;EgWiD57AsK&ywv!nnq1cDb< zea0JAWMbj1{T+pZ+6&GLAsHTS6;G~&KRpkOOyYQ+Ato`ODr^vbdS-dvXufq2xqAf9 zFW}a996$a74o%?0H(UPnQOQ;BXv5I#n&^T*JyF~{EVz5avp048@y~LC1U@>n+^HOD z4yzTsX+3-Mns>$XuOAtHwKDwXVZ*!okp{yi2*0)0ak45{TxA&9(x${4eSpzV8>E-+%M3 z{{w&Vmw!dB$5iS77kBtLAjJ0cF#%6ZpvmatW{A2?2A>*`A#S#Dmq<)Z^Z{My23v>p zRJ193SH62j-Wh20&QBd?oT+hx0#z8)L}iO_z55xxh|OKiaK0{0B{rCS3R^|NRzxoO9ly8w_psYAr937P@*f&$U=sEA7GqZc#%=WnGo^Uh2 z$uh23giGGe?y>S4#=7FtJ!PzC6wJw`pwN+ADhdN@x1p37duE0xMMr)8DH8ChrBGvZ zRGhiz#Hg6r8T;9UQso#ja+Pr&PMPX4zkaNUs^;@mM;#nK1}@!_ML6ZMzhtHl&=NW6 zuDD@uqR?#O1^XFfGU6&+P+l)R6;za}Vrp`pH=a$1oYpm@g2N(m-YNQ^8D%~D`Hodv z5Rx!8mPKFl$Uk5kT6%A2{D6J3YG97mJ~1;DRubFT(8q+v5~8Kj!h4f~wI5=>->ovX zy~R)>2rjL$GD4ze;V&?0L=xDAE!%X-9>;{#;ibnD5fn;A&cj1AIV*p9ZF4Y8RE5$l z7yT(~mK>OU6oQK_y>F=15*X4U>vY8=FDa$sq<_eze*z+W^vS#Y(|`6G{MjG;A)Idb z{=TqsIse(cgljTpreY)$&>bNf+O*~S}7zIT(!cYjckHr>mo|kd}pTle79q2O00sDwrA&rKU*$YxSH?ICfppY**W1> zImC?LyBXM~6?0>Gv@Tfe6s2-lrTO9eHHWt2FbizG!D!Dx9w~I-Nj+fK3M1SthqStx zHB?6UlCw{qIGdE-IjWva2h_tvk;`S&*WgEV|*K<^8e!lAY zaGG&oHM`#O!K`H2C#G4C;90og`!cr;mwjYvNOD0ZL^|%4 zIdw?1Ui03h)xz-rqr17l6Q$D!sxOW`+<*DXYF;E(QzFsnZ|Ck?q=MI1S2R|Vs zp%lxV{lswr?;UFn4lUWvaA(r9ahg?c*m%pGY0onUH>!k^oc*JltS%#8br*a%DVS=- zlbzvgH%v4xS};0s_oidl77rm1pBLT~t`y2#8NxWNn3MyVriR-WiJ@BVjM6;3 zC^^}b{PIfixT$&C)u0=mYzykP;%Ktvrx!VUW#s#BxAd*SN5en(Ao1QE%WuA2qejB= z9M0Qex>C0lC+h(+@K1hw%%ai!$!8j4E57r6%a>1=d~a6q!Bi1tphO%R`06~P=!DK& zmVU5c7-Klfdmb$2|2sZ?G-8oEHQ?Z}j;;rJ4 zHmLqN0w(S+m(p&gKGA{dpUlo`6hdQ40M@@A~| z$jk^W=Gs@f;i7xQqJNB&1}g=Ho?}gc)04jS2TV_yk>x=G>DlP9KEjz zvA-6$>N*;^C8cy-z{Km$?>En7;`Ofm-(7Nh^`3b9?YH>--~Z?Q$^ZRR{^qa0BuOCC zgYR>s4u>a+rPU(aO^L4OW-K7h&8Hqi3GIiIYH+t{Ce>qV(5iJP^ba@NBw^ z>#qIqdP|wm#x*Gk%kC*wm6VKGhcjH-(#M8O4?^a$eaP4xki-+CXC!-!)d4@2z1GPZ1O|eE=~KQY2SXk_u>16f&hvEf?-7 zJ83Xt$&>*ydR0@(6e~H0c247amf;yaJL<3^HzT4-C_Nyt0v&B!qLrmk69i45gOpIp z;$lsVj?9dR1SCNnm$ZJ3^DQ&`28EsxuiwXonP0Pbdt7K3ngr$irM8#nA?V3Qi!xWWZX#)xfd5D6^e8A%DA zK*oqR42Y&&mCW^&bzBjozG+8z7Zlyz@;zEi{mA50aRinHF4YvIU@@eB!Dl~EjJB@fywwxMI+ju>gnMb~hn z%vm}|C1V75>K-#vGjhp!*0y}rovDD{+0ydsl~19OAJ{E&(xB2ZYv zzMWF&0c|wa6AC%R(evKNNBr)8`H%Q_|NCcr`q`SR-p~Z$$tASjaDTU>_MY$W5s>^s(*PM4Ai3RUZ zHKYs|6c4wGHVPM=WnwMIc|xLL(TwmxQ;4SVmY=U`qUxBLl4os0CYFOd2P`2CQN~Ri zFn+sGGqS>EYbmwC1v?0^(@J2qt5|dv7d@A4&eRxg zmY%f(Enr3X;&CDb`0l*t*0@7L#euba`h3I9Ht_Ii!Lx0F1k2l_5w|9qfBGAqtR0R+ zOv2JdjA*7(^JEuzv=D+bJiN$xyz-pZnh$1?4-b0w^Ne@K3Y$ty%Ba1ec6LC4ipI~! zh;~#7Vk><0G@@F^d-qzNKh4R}6jEZ8=2=s7yR;lOTmIt9jJ_9?Bw|-H^#wn;z2W|{ z;`6m7Q;I@^QIThj5a@7m!@arUcYa&ZNW|_EpDr^_yPi*1i4-j}3n{{bjpZ*+Tb{2R zn`WSn+^RC(7$+u~=gU>a!4JN}JKr0qGUn@o=Y`mO5rO-4;=>Ce`%ON;tM2d%8^h~% zi5CKDV96#iaP2>RHClgVaCrS0zb3|0q7Mx&)MR=@nWiMB^l68qW>VZhDMO3_VMygc zhK_Ma9wKO!<6}=ugO^Ew5PPotGrnwuoA!{m${7baxCrNMr1gO&1TOuCIxdD3DmkKH zPSk-Y*VQZ$!47FlYDFn~NE8x#SYshEps(l&($lG-%eV^*HvKuy)Tm&wsu)CxnG$1< z5>4+LTxe+fHFY{4{H>zVs^BI6SnSIq7or=^iM;w9U)*P~I}AY%LrtKD^YT&%zg~oV z-P;Y&#`U#t*ASAY5B0TRFIe}_38`mlZVt!ZD}3ls*F`}eHfS;QsbQC{sL~#Zz|ub@ z(*x6V+g}Yt46{dWM~v+ehMbK*qf;AL>+A$$@L>15~qw!aic19O!oHS^oNGg&< z*poYh#US4@AAc0Q#FH6}&HmD}NDjfy!Q$DX>wMYv!Q7VN2GZf1KtYERKxqf$mmiAy^~ zB?@h+L&L=Ek$g!=iB>k8`39pZbksngxX{K?XbN`&A68qdCkOmZ= zg0=7HoTZd8MB#buIPs6USKVYT#{{oHC9Eo#>lp`G#nXC$reLZ^oOI_b;t9E$kf)L) z8A>5iA|yi-dUAnW59xzblk?oIFj{!WzCrI4KEgIQT6%~zr%i?;$3)FpI>V(MGr3Ks zEGZZ+-IhhT#$W_Rd9KvaWGB~RJ~i@0LS0aj&f;uT}5 z2ysY_Lt7#}z21>YNs1*;noH(-G@$vEQ22K6lrz|{tTOBzjE(1JQSik&F|~$F8Ma>I zJoGN36FA5$JMWlU%{(`Zljo8^ldim5OG{w89b4XX7KH4K@$_FwPM*Nmd-O# zBXnVTuybs}mS4Z6`0{biPaeQncI4uDYf|!{_MG${ts*yzib`1;4_hC}bMAb&6-=;ly@hZ z+G}P;Q+vpDZyn}rz2UqYnDYCX z3@LbRK^q)j)h)ksY}w0&ts6YFJ`UgS?~X^bez1WoI!{a)(_C0C5})ixKAdHIu`apt zHTO>|{_OFV7+ZdHY^z3_0Q@C2ULiA?9T(QuGECJH{^ZU|w7BJsg2 za&RkBw!-nPhTB`s){QyJ11q2Sa#iuxNHDo#yQsKthxd8uJe7gVHu2_IVS3H73oKp0 z$H@2gHD7I@PnOz6+N9|_cz~R{4gdfk07*naRJe(}GlmZ)V_bwx60@QwCdFRv*_(8@ zD|qkS#MVQuh9=CLvkt2gmrqB0d+In{Iu@;BWXJdbd-E-!(@ZkO$*yGM&v@W}DmqU2fQ@Hj?Ec=A_07ZI8sW;Wvck`E`s#%Zov zji`n?Xl|7ic?>+e$$4)b=OqA6trKz467c0 z$1|E<3G5fo^?H!PE0o#`8T`T@{6eG+H&vq1$PNnAbH0<

ESkgJM^yN+|erIwmaTp}2>*ug%b zBRbANX(TFA>d<7k)C?Ac1ic6?OMJNY1M59<8Bt^tD!ZZ$D|~9H`vox$!4OKB!Iq&d zN@)r;!D~nFw)oHxV^55J5Wq?yNv;o)5M%!`=pxB$g-Uu+jJ&{|rRz{Qg`~*WLreJD z&lla-7vpp#3Z)b!6E^<&a2!KVB0vHY4PDrxL{Zv3T&&5>lre`q_rIWv+o4O&5O+P2 zeb(^=A3Jo+aG^nKOP_W$ae-19eQGdLfV4 zrfj%mDo2>4FsURGICEcdWZt0EB`Jc_4M_q%bu{jZi5wH8!^eRxkO=h2v6M5OZ#RtF z86S+_EuNf;U2NzjunTK~2r&h;2zzGmC2o9+Cs0U^RvMwD4UswuOP4q- zB6I6GQJS3(XfZr@;LbCG>J zWn@Z5l2IDrv`&1wZZRTEtYK<9PFmQvdrYmO4uO^Ls8h|Smvb(9M+lmAFqB#`%^Z_n za~Tw~JmuGz-*i4Bj^4VJRt=pE%3*oO8k)tB9pC`_m#KviS zNN7hY=l|GqXC7$gW9*lX!`X(mHM}wQtag@%n~wYKIrroZax~dg*qmxYP;^0XVZ?dg zGsz5o9og-Kr&~q+A3Hw0|~ENiLdYml*32m)$UZcEO{J z<9pw>eD=BFatD9%IAdmoO`nJf#C zjZbtj@zKqeU!CSmjAiK~UT;w{o4y6S|3-`OK~=g;=^lw-oGo1|1NPt$joFwW3ge?pRnr}gf#d~Yqz59SGZUs zYG8Bv(BeajQf3gm>Xn@$v(?}Q))^zSKOi#v6$`)Qsxz#+eAr61nz707!4MNPVL=@h z^l=z@y0p9AK98)zb5{NtJAX+N7i|0)yLgGGrjNr|e&-f+X+xXV1NA`7DD;%;VwjK| zF%BKo5FNd5=>6{cxT)(Tv>}iokzOu-r4)&=BVH%o4afYNY>{3px(#AMUY@%bCC3Yk z$*Z3ItMf2i-hO+qJ^b;%{Y!rK*Pjn>9?2+WhE9gsugFb>(HXfOvkMEfHjK;>S}ZQ@ zDAe>?khO%=UN5*LLJAmN;8TayBdjh4c5&*+WrQR} zsDM)~J~S997^!`t1UkPZ(bM`38+T5jW}p*E617_m4kI*$o#SFn?Uuti03toc6ae_t z;-jPSE3^!_a34EzH9=z-nM3}&-~X>b;NSeKA9FX~r=i8CL?ke`6SOEMdXHm0Ah5dV zF&LWE(4;lX^c)e$bw-beXfWB5W4lii13@_oRWQ{PV(hsJPhJ*Mxm4u3!sD2!BSiH> z#B(~JFoqQ;G_fO3BSxlR70%hj3$FZdpeZ(DlxVh zq>3a%p*mOlG}P#n(Xye98>|eJh+HdXX2h9Wv+XTUYegM2eS+uRj*-?FaqN6g9Rdl5 zR}I_PF)=xr4t&*|bJ8uDo7n(ij7xlib!d>7u?sy->`1`6_pD-#i3R^ZU2payOPZed z{k|>s+-k40_uezp;*i6kh6F(YV_TAaZ3Div;lIRxi!THNf?&w7Y=brg%alZdoH0F| zwWp`MPoJeu?YYMKZN7*)eNOklbx~D;%FL*YFEaCq_kG^q^FfvI+YnK7g)S zJ5LuPg%a2(7^35yGU5FRtewD$jz?L^X*Q6moMj&%3R0!nPZhO`oaBZsb{wXfB8>!Z z2n3e4<7OC%JVA66I%Ar7K6;?|^p)dfJ+MEM92`0R;ryD@e2fg=*+{A3RdEtN+gsi{9eDr5@YT6uYXwg#$*Wp( z*0ubue!<*K*!T_S{f=fR@KG>H*F0-GtT*)0lZeQ~L~^Zi5iA3S9H)1T1!jE_Iktk#yRMiLQh zoy7;qqZ#~@_dq$t_nr)7j~$mUBkKWvbsOm%5Dh|F*4Fbkmy*s4f`=!Ip3WvzS;LE6 z!ps=nEi06a?5CcSGVp36Xa_-opmmDNUL#`UH@k)^Y1!CFsZw4vl2`?PcjZ|-Nj?kA zgy+vb?nr{?UKY7*B-g!TY7+kVUSu5yuIquBmUN+GV;#F8ut-w6=%}3lDf#VEvB+Vt zir=i4$nBKSNuIro9H_|ECgbmy4P83mz31`FP`7gq@}7r9V7XBU)iC%3A0pdc@nkl3 zpFX`z&|a{1k;jGSsx~}r9e@0e=V}X?go9LaPy|XXnQF_;V99ik5DC9rH6U9aEq2%) ze6dVWBJgJ?1F4j(2ElUh{9rLATAsHF7aa&0x$PzQ3r8Dzj7WJ=8`k|8lf0J(e)x&y zo&AB$GDG0$lA3?|{gRm#{9?W3Aaitnv|>K|)9>)$AC0sJ1?InhyW01f>-$X+^!Cu( z>!07UG<<#Ew}tFmkNaBqB9ST`L*DK}7QX2l=B+)w-Vz9zV01NBMUhCU(IO#H3GxnF zp@h0KUW~yOt=$m9K$=X*O!bEG3n(?(1fZ0uJ+-@K=dbDf z2CWh@Rgsz!qtm-%Ch?)ehXLn0eCWsD{n#QLpU;Rx8}To9y4*pldiQtwmBBM^et-;R|$Rbd%O#1ySk zy0{^e1%uzANh#EVT#fOsA$V+TX@*auf zLXqkTy|0l%g*8TIl=1)_ z6RhZ|#32v!19Fwoh7KtV$LTRX3SM>r_cMmT-J`QAZGFEDZ zAy<1OGN)8W^kEHBlBk?ir2NxcD~u>WhS5f%(osNDQ;CYX z&dFs#!uWxls+6gkVgpzjPSQPcF?xzK87NK0adLo*J%ohYct#s)2B!%@qGI4Wz91D7 z`q0qD0f}PIRFooT@R8eagAF?-@_-N}ySQQB%sI(23YBtN3g(IBXRD5ZErk|T2BgrG zGU4g)0v9ch();8pC6R&`-79MEDdd9Oz%I62`R7b^&OLL;%#5j#7ycE4@W^P`v8IhJ zEAPpfb3dPvY1sPR*u5elry`dXJq=!Zj`az-7TmfuyRhV9cu5xoOSd5cWjJ6e7DN%4 zBnj4gf`~lG4%k-(xhhaXGZiyx-w}nRU`ii4hR_j29QTD~VoDSxl`8q8Km3Rv{nPL8 z@Bj5r**@QL*?Jbb!iR)+%aRmLm06xP8@9b*=Q7UQz|F8_VKm>H?h{E62$Ya0VK{37 z0eIf+=o~yQM>2p638|9AsA%l?KB}eWvJ2eyful5Mq9xC2xR)93m70l3SlXVK{RQt8 zGo(~(hZGqF56d1O&Df11r!W8`r&ff77gv(6+BKid6Bb(X+bhLC-j_Vv1zt8{{OppJ zddR7*;!!Et%N0MkZ@Fm)cGhr`B|I(@KHv0QE=Nq>;0?JF_$as;8gesm(Za>B?ML%ZQya`ILuQTr^!X&IPt7o$<0kdWhG~uf}>RM-fZC0b&3lf9aBD@c|NaK zw84|al3%R`-d(_924AcVhgD#*=h$ph*1h7Q5hO~}dqE#GdzoWrC513NDJO)GkV?s6 z5_x%@QEJI2i=JkH2aArMJQsXN;g=9B19Y@3tME?FPCg<@t5N&#pVZs9T;CiqdIPXfu=Z{I*`}Wt{ z=(M+z#$vc@q0dOw6df}>14G;)r6Vzu5!V`elt>7{VSPsoaS3AUkO6)*94E_*>cY$%dFDsxN~PngR&b3JA4wsc~Pk^@ReY_u4e zVN^~kr<8HdzFsiV8BOR}yP7Sxv~f-Eo3Z-nmJGgU9CXttWl(Z7+=t)@!QU}_E#t7D zze{g>vvyLBR*Z0$@HPtHFj`K8;oc!Sz>s)+cD6fW5Om3Y=;gh0|^P0 zJYuGE4otzy4qUp(TAUHRr}10%^d4QXcroC;Ce;~-ML}eWB;m5(VpK{yY}wiiv@)Z| zR_@b>E$e8B!C`d9nky1jpcz9Fy4d1nN1jg52epo!Ag<1|j7g^Bp=mw1m@K()yH{eTVbbVu@sw zfIxYTj0rF6j)_h^M*d?5P7Hf;QYzP*JLafu5^)&Xhwc zHN|?#IxeyFR6~W70T)2U42vTXDQ(!0sBz8Pu%?vzq#_-8>;bxPMG^$=4t4fo=cyf3 z#z2fbDh=P6c~;JIUTYqw$Mi<=qN!O~$GyDdd$SxR1f>zYswMNZK#K9*Yik2aNIse; z)Pv$lCFq=_ae?DZu(N{w+_H7hIY=;cUhs?6SmE7IEWg`&q|kUFS@o7rHxVrrfu8f; zbJdpY+-Uhb$|SGao+p(7B%keC-YIgvH(8)X;G*$-xo!}C!nNITkQs=Q=Uv0xR4h!w z5F(X%Uz2x(?BM})h zwa02u9OPpVLLnpsCxu^xX*r&CEU)4PyPaW@ zJEaqXv{#%h$3e1G3v7Vf0UneB7b0hE;5akX7M8tZ)q5VCcpmF9diT7K{OwiCPhM$u zvE`mA_}{w@DVAa&&D7X=RsI4K;z+gX0I%-Ift4`y4w z+U_u7&SL_f{l6V2Mza|->BRA1X8Fylj3*OV4uXXdd^i=n+NO9Uxq`-y1QV^GbBcRe z;L%Cor?*ocO#;XF0}Jgr78?7@lvj;Fq>+=%a+)hz8<1kW7<)8t*))cquTnmoScK3d z!t;J5xE=)GEpuk6<8Ln&Wop?7P{6aZj7)hZrb0&ebg3z&qPKyaRg}i_@l5f-)bY#P zg3C?Bh{%4bD1CvAaJEXY-tgs3#$M{sDsrR^^CbCN<>+tvJKwawU&~T)S9N+@7`>_R zD&A%Q$G0BiO(@2{U%ZDjlh2Mc;RaiETkU)5+ZB&5*J#GPU&pTU|W>Z zB&r}!M|6`Aidx=`b^>qd?e4DX1wxv;3Ui=gNf*{!^^(WoefCUAo+=K+g!{b5dB5Xq zIHMO0gi)Ltk>TQu<*?&2d_^DXF`eu#y4f=h*0piXDn4WDkEz_8A$CZSkjp7^j?qTZ zQ?m)T+zu~kh9%zDuO%w42iJ^BzaCceVZgeU5FO&K@{167-+xVY7=6RZ9q#N-1mAhJ z-^3b!o8a-ywfgqD0~E=eMCHI(J)g)0JHKL(I|Pw7Y>+~c-Qmf+hy;eOIl1fZ9GPee zwTD0vV^0(U8(R`lkjXiMh!ey3|3*)$CS+7!e>iU4C5D_#P0=#NXZ={A6cVCJTLpy0GTP zPPp}>r+ThRj1L@UIq&I|+6%7xftBB}bxRESNGI?aUPmgeIrlxeo-h>;So<>?Rv1|z zqoN6GmRztGp3sIs&jwHAzIx2stPv=XJx%QCW5dRtLlh(?B~uG@$grVdh&znT5OIuW z4H3Mstiu_d-B2aTXpx9^v<2ODR3`d#9Bhp4ZmPe+(o(8@w(go-&4>bA9BAzlDI}fU zP>MaK>Hr&Bw(=UsfFUJQn$CHa@d6o-7-Gjv%po`iIbg^z7}nw%?<}R7BJZM2HTn{xHzhR*!NTIpxdagUcahmh6924XoP9}V3D0#M%{9FA|(|mS_rmIGc}qUyQB{N zn5M{@IaAhg$(%!W;hKM_FG$o3fhL5A7lBSRtilya7z7d@Ei%GXS11Zv>#^PvaI|sF zAbO^9!c>>sH^+SDUJ+$Z?^X=TkcsNf7BXQ7j-B5EkSay*11GuURd0FP1SUrD!OZg2 zR`c1e=3!AmjNC6uj#9ziUeB|oWZOinaBQsNNh#PE#q+jDkWgENk%IjUR)gZh`M_r@ z$AeO{Fp9srz9eNIDWUb?g5j%O;L8}P631a;$(1JM9iq@olgP=w<<~DW5(#B8TFTDq z(U9Hj%RKBf1taf(zajBz}gHhi_75~Js|P$(G~0w^I!WF(P- z)+&B^Q*vW#5IKuf;$x(|-JWy18!ZwU6O<4f z=7vpZdD(fkPGf{d3PB|`T8vEkL<^2`#astc?TG|@)LiZ|GG(|P8tS;=y~&*O*6{O< z#f8A1A1E9VB?S+vo|n6ft80%Go?NB8H*Hwkl)085a_TS^l6IX#IK@P3HqP^9eZiA_ zpZzo=Q4OzJ&0(t9*+{AdrIBogfjWuILdVS@x$Fbi!!6S!<4`XswPFx(yNc{wd>=GBJjIqgnSofB$*H$1}^; z^sL+fO7X#A;A*-v`XK-SAOJ~3K~!a^l87*ZrZr4*$LWKC{w2(vX9${G+i}q)ERvjD zdw#z3{HN~?9PN8f=YhZfJR*anbDqQ0aMKF5vBvwHueRfPePa~?uyWwzx#!(k%jfHi zn=UZbhRirlk1dxsib*=~`BpPThd?ng5%tyx+qV+l?!?wNMA6&7z2WYDThRXg*V{t) zHFE1U=Qn{E?uN3y{dnIN<9Ay^lb8ts&k!1HXmMc}sRO~|e1B)77%Pr};m*GpF)Bq# z^R-7-2rx21DK!qlg#aSZx-Gk~qxJzG5`+jN&ML`~R75ho@K{4BOZrO%&tZ%Ts zr?X=mafpuIbqub-`2ia{>bRl}E4oX8{aa(lxauXn)55!v;% zfA=mlL`sbi;;mt}J8OdwcLa#n;U#ac)$gy@H&fl@@BRIqcShU8|M8c9#ozxwzd$0< zGDXV-8!TGr5t}6h2uWkFnW7C6dus8ER57 zBcY&@`S@H!fk4u^HBxC}^z?p9<8F~s)7ljtOCmBVIivAQw8}6sLjZl;k;{@o<=E&5 z(KFL?8n>Zws}WP@TeQmV>?O%akPu^Rcnsrf*+<7h-J=epFaE#z@BT}aRQzB6<6o0Y z%{J5+Y1mT>5^31a0x{$)%#H3sAL>24IUfqh?5gNJ_;%+S^5{O{gQbiIZWnkd?b|yiN{AlCR0R=yc(Y4 z;^;rDW5YyDI7v&kv0)b)?Ca#ID0nv9(uAH=WVFFE2+M&kktA3#ic%qZHtrQ5$QTE@ zIv;RBk{d;oo=VG+7!xua1G%iQ5e$Z2^myTTRA^SNAA?L7Mm{zU85Js+G1$dj;6o(W zDMx8WjFL(VB$}Bqqa6&(<*;VWnp~9ZsRa>*$Frx80Kq(ikMm<@dV%#0AE8n+`q-m| z!p0ga0-2FaWzNcP*w`z2EJi7s*szV4XpykcGdk~C`w@Y-r{`2EL4@qCOV(0|DFY23 ze*Ydn{^$RgfBUa~!tKiyE;w%78AB9Ib;<9xo@edAg}p^$cvN~mnhGu&#k?5Uc9G9F zo;tM5b&f}JT6n&H(36OaWgl3&o>zTCDmA%|eAX@*{Ae+|96CfuSceru@Kib>)s93- zgmhHe0G`If&PAq1u(A#N+95<>k!WfwsZ8XPJtz{%AK&YEbP(9qh6jb@qx&5%YtOnJ z(Pvc(n?BGv$<^?RB6loIMeib??Dd3LFj0oiNlGQCo#y#Y@|}Z<{X`HWeBPY_IR{C` zK?>8vlE~5Xy0MCbZ1hHdQFr(lc)y&Y$SAbovL86!vq)_jddXoHP%`Cy8R@;mg}}}_ zd{kTvmt-hT@|=TWAk{6OZnl&79V;eNKD)RBW?M9!sB@*o$?6Ga<5OV{w=lLyQn9r3UJ z}(DH%xif2!8m-E&EFFsOTX|O5?d~3bqzZ(a{YC zf@1IiBLrGXK5JU=Ia8zgcq+K+;2;wq2A;NGa-2^gBp@2@m62tysM3M=W&z}}?gULIL> zhN}inr;fde<>$8d_lMQqRCnJ%(Y`5qzfG3-rkhv@c^6hO`eC(9NBV}?AUBfz@Q_EWfkg5rW zl2RU!rW2Grr?xA`X?sh!6BjPHv1KDNVzk7&_D*D+a2=1>xc~TdfXJI8IoeIWHH`R; zHGQ*wZ{6z~>ynBI6Lmx?3I@NT_iN-G^LSr9;l@2D(^H~kjAV8-nVFKw8A>N(l`%wW zza$7pDt2uB86Zj2Sglp)kr=_2J_cI312IPKdO1=hgfuwXJ9M37a2+#!#M*D^{TQSm zqF|9eVhA142cqyC=_!}?k`NtpxyMAzF;a5vwxl8D+PxqMPp%d$)G?cILF3m<)iF|z zm@;GvB5>2~XylSqdS*s&RtL^kS3JoV93%zNr(D_EUys!`>*TXA@(Bk7rl90mChaGpk}kTXNU+L zJ9cqR<5uH9X7nf&ZLjG=OQ}k_P-A1lP1w=fEgxnd(MQiszd{JbInPjDkjiYV$Oz96 z1`-kqky4qGubL}hjALFT6VCcq-1;43aw8n5haBk-c^RHEuw&x`o1sU_jEfHTOwEJ^ zWh@9Hkf@&9@SKUtn6OVB9HmN`i#e5nR1XwZq2-h|F1c_p4<|UQ8So(MG45PMM3hWb zHg;u9;Jx%Aw|2_PMFK4d!G2Oe6zsg`*HZP|lZwDZXW%-n?3Yw}LM9aeRkot@J!O*f7YFya?hMr@HJm$lh=EYiblsT*yWKvOT$G^M1Wv){a8QJ#}lrTJN zE&CRp>KTk=}{pzxq?nbX)UxFC-VOVz2aLLE^+xb&kbO z@pQK!5t8rDEJwEqt~2~>DFMfR28$$M1oVyMFCMP(R`DM%2XdJ*FB85yll*VLt;s~r z)@r6z&sCdK8O48cdY=b5oNppOSz5lTHNERe%|IejHY=3%@-A2dBu-T;5RQNf4{xq zPv=K0Qp*qPU!SdaY6rb)SGZFF8aZU z6ZahNb_y2)pKTp`Y2ZIUR$OmV+%D&{D3~PhUNv?UJeu!+m~-U^4sO=`da2noIrDrR zm|fb)FE1;yTfsm7v0}Rvod4R9rzBKp%w1MgO;iyb z4GB3iN(R|8ge|*pH72sFghW@PKne{$IE;)asYl|1m~yI*csw~_k_@bDr0F#`z2n?% z=v+%GTM(8sPT7}_&_XdW37U_%kry=TnojN*e1r2Aqf-)?{<5u}V+=yR8G9c~%Cg#bP%wqlDH z!(COcK{G}*3n51DGLACYCklmQU~oH%^nlXrWBnE^V*ga=A~W=e8F-EGBI2 zKofdSl9Fv`x#~A)+0w@bLC3?ZJUT~64`2L zrfR|>kxUcM&SoU3(>0PeackNIMT;N*HF`N=3X(6o_C4^wxQ;;_8AI8;{uInnd(u{r>>x-A>i<~W*63E zVh=c0S zlzZGv*R)RaawmD(E$L|}*++?nL`}GF791p!U$4ivs1Ig>iBUZ06P`CM=lu@dYnB$= z&hg=-pmCm?xWR=niBUvBp%fuVie$mox0EWOv5|$Tn5U8RpRL*%M#=?(lt+2Rj}|Fsy96l&sgiuYapW2z3M0n> zs8lMPpL5*@%B5lJ#@EVOyCKGe$JMysOMAoBy5&GW;`3d~!$R`Cxu44K7=LTM^Qw*BT zuAudTBo^HEjvqa0`04Wre>h9WG)5EolgEzR+m6qd885c`_+AlFd@|c-ZY-_Wq;foO z$`l3zm$qk-q_`j`bj8)+a9*))3^%>U#snigAMV%mF6F0}nzLT=<0o6r){4LRh2Z_F z1!H)kJwnWw`GM~)6t^Ma$-?sPtmd*$D5{2M=OrOTw2!>HD*4stW9ZkzLh`}A79%5v z=M&CqMWG|+ZWex#LK9{rQt)&ucy|_2%JWAb4(yhOFD_F4@268T?YV6V{?C6Ouu*W? z)!2{|iA;^B?


OVqN%$cT%QTnF~j$bUSaA%)^zVcAOpty6rqfd_d)sU_jn7G^jPk-4_7|A}GXN_a+4EKsrz1R@=4_7Pxbnk%o4;tP% zY~E;@-> z2}@oJbrDBlHr2-*#`_fJ0gZaZxqnI<)-?SUA^NWwx`BX<5hVS;5*}vJ1C_IC^|T@MG&gLTk75cE?&=aIFe5 zH{m!rWv*vTRK-kH%)*?VJZ0lIY}5@QT;P0%4L#2Hyyhv3C{a?QRXPga7_b3|@s92Q zA@p}cWTP)e2uTRs*-J*e+?z?iJ2CvVr~8{#_-_W3y!n|>;9|?xokLV)su~|IL{d4n z>mz~MJLc(q3bPo45L5~=(1ji$G+O0IG|sj3Y)SR>ZqETD3k)eKB`rC%y<)0Q@g_%! zgp?V1oUpHsIP+f;gu=y!-fmDTVLP0YsTsMMF~p8Gw9Mp;+u?$!#{t5eiYO$A6dwm> zVnG!v203HElE#WUEVy|1yMC6FK@NE9v%Tz887#PD8~bDH-cYYuYBgXt-kyPU6f z9k=e5!Hxd2d1|Obd&e>s2rb46senNaAlAqzICozmF(X^omQ2Ki!{mT_`Gn(GVq~Ot z9naeyB|TSmNx$kKr-f;R}@1Ow8$65oR1<%wjd1p+#&y@R4J&SKj(vT(5Y{NENSLtpYdnHdOgKynmc3=uN6xum?U$Sf zn9B(ZRdJf}3 zo=>k5&bywbmh7BmVGNChgDh~_g%PnBdU{_&giRgj*>WK6QOBBFcSWfTmu-oSfq&R8 znW-5cPxfg&D3Rgf5faby;hbKKanD``Zo)aDRWxx&=htWqkFrx_$f=#Xd)e3cFp#M^ zhh~8qa$;0O5iudb<5{yK4F$mk5;BY!NI67A#g+e(t-GYqGmh0gHu0LCz%D4}Vos_u zI@fd6j+n-u-bT*5$YBz=?LE=uEW--)nti>Xiy4L19G5$6%z3_xY^~&~3v6v1+pW5% z(wZlQ!3)i0@7POH`Y@#xp1)n*a+*yT;=pM!XX7^%IojK>gw(tgq4+9rbkAmxqD`O=R5EBJaX7p zofyew@2ErI_qz@o4#-vDv<@unIavwsymyoHPZejaukT2HacTMW)e92wkfTg< zoCl_9$BjJUN!_vYa8hc1SZda8%&wOtGBQmgm$l%Hafgp@tcL@FiW{t zdQO|j%f_;oYDOv|Md0U)Kl#VV6tcRTybY3w^ z6pyw&Z;lgAipaDyynoU0+cV2NQ{2oA`zEJM0()7<<#{BPBfdK?*mfS147o5)t@(C%IV+JDXR1 zM0my1M-d-0vbp7H-SYFxK@Lb`qzW)iB6lVoI>}i#g7@|XclH}%FZunagL?h`tl_d7acA6f)hVi=xRZNsO(MU% zaNN!%K4es_<1q6a6`senp=%}YOeJg4vRWk^=WsK3R8F((6h?Xwo}E*?Y!o}+bJmok z63#A0%u~rIjog{^e0-I1-a2xpd3X>wNL#AlksiKt_l!?W7+mK8|*y3WzB zik+_&s$VIAUVZlUv*K5dz8*;Xvb+0b$MAK5^(sa&$R%F)io@4p6kn@y`Dz?vu$XDI zEQZ`+SkiISdy4q7%7pIhEq^q<%eSVE|LT52k~luTEcxx#3C}9eS>ND%WEq~Zv1c@Pdu^9<_|PLz zcyIC1A+IS5PWMO&&$^12eM+hK$i)aH6j>ZGlLw6Cluq8JiYvC^jGbN5y9)1nTxfA_ z=+sJ?4+U?0HIz2Q9+b>b%HV^8IIO~v5Tm~C0=L(v_xk)s{|7qv-(1}Am1EGyYM>;@ zoW`y(I>QG?6DzdPq+$;t0+qdD)xMzj6=V_>TWY(Ya}`RO0qG>>*jVA5C&t14R`*ww z=>ZmjbC+~six8TL-p7T2jzc%XNy}6Fd)nBNu?Iq+RYnM&Lhs{ZgS3KF=Qvp*L_#Ch zjN=V@Dpvj!tuqoiC6_q|CdGz;7jV|pEW=B-;cO6VNJC+!IBTinnghMht$fC+>-l8s zdDgb9-6e12clf8b;J^8iWAVj~|M*n%teWzu-qLwN<#W1FQMpS5g4C6CR2ar&dQ6BO zt#X?F3It?&peCI4PjLtqagFu6L8B_>I1xB8Q)En7`ZcZJpj0;83)hsuPX0X((aGD!~*2qx-y;Asa-=XMx1!sr4cGDL*lHN(P4 zWdnjR8rB;UKU;a`;2W};I zkipQ{HMg=8HV!5XB9i8MP_h&yxlAdvVC4jry|%R zELk;?+w(R5{>6?PX~97v85IeQ6OCdQP;TOQ6UK3V3RRfhlT?*v%Gw`M#3`Fk0gGf&kgOpL_FVX*4a z)^Tnxh$3(z#vGfB7jX5Eb{xRY*$yoMKbQu4$9BJ#$C<@hJROF_oK#wvDFaO(#ne))f{dEu|P)28%oit{Omo-8pO=bmR9!Gl@P_#lu3!Sl~e z)vW*kAOJ~3K~&09=8l(5Mxi5_R*ZGv(JtY66*w@Sld?hi$Um8TI;W{Z!ey_JGBP(U zjTaP3@uufi zk?_NlnxGZub;_OB#jpBCLHm!(o^KYT|M=tA#p@q3#tr|!rb4{Z>58ws)@y3StLJ{R z104}4Oe}~ZMJPeQk;n{m#3YJbOXgY5p~_kHjx&4CHeS-X9o`R82!D+}lQ9`=Wg;>) zic|_}YpA1T*GDcpiQHs-u~XdJYq+=H@^H`d;rW=4*Cmg4mb$wd)WWE13WFbPSyB*! z9hL$@Va$Y7j~MB_0lYxO$Cg$Mrm>0(iZH>`p^#`{$kYs>5}dE8`xQ;UWk8&DL!^PB zdoJa`XY?V!H4WkwVv6f7aJ-gDd}-x_>ud1pDv58L|8HLR*N@Ka5FwJw87L4)#@P`f zYFfKOA}G~9Q44ndoJhb@W4%C-Q0P5I=7274*tt_WS1~nr2x_p@D5gF#Il z6Yi97WFjA}d%kEtM+nWrJ;8;Zd3uK^)I z!AaWjXD5Sf;Mb>zoVS9Hw=Fpt(%b;wvFM(nREp<{iM~mQ7Uvs8jAUwt35{ zN+59098>w8OSdHeMx`8@f-ZPIZJsca2YA2F#5y`3vB9vhJyJRv-(U%x=z=8|?5P=qMntnkV zON^A{s$}a{7&*mCfsB%zJ#271cAt`|J&Y_#lQ94dR~)BoX7n1x%D-q$9^^#7l_5;OmZ0f3~1Z zB$F6!|o^cK>+n%!cbZ%sVY!gBC%O8aY=2}`9GTdugC|nqNH0m|WDft>D2t^1M+@N1pXA zQXg6V>VNnli=TYX!T&5Xv6W;Z zvOixi7W;g*&>T)XK0VjWbJ)uqkIzajt>N~AjPvKuIZQl@PVjJFFcE?OaHg0SfoZB( zwu+x!2+G9q=l31YdP&_!Hg&>SC%iqke75x5n>a>EOlzj~CB8OQy* zJ@NB|->!Pzdn@6|BT3c6J!O!opzGmJzYU88yjYCsRgVk)%UVW%wYYp$9L28{wO=ia zzF8W7eZcMOOxV}O>#JzP*R#OlE3f@cq}Fvm@|BjDVai5`gi+jMq;h0B{46ebq!#2# zaU)N7FpeyH$>ZjV%lMqeEwH|a7?3g{Q7Jw+l)RQ)Ah?}J?v_w9p_Y;5ZcZ*Fy%&76 zY5B={!a;60CCU0`X>~WAO zwDkBGxu^q|en%U6HvWt@Y=&&ItFhjqrN#$$Ews=O#C7LXUDF7z10&a2ULUTvDq|#s zVRQFY{`brC{52)@t84YDF#hJTuU=DRJHNnUc&h=E` ziXxd4yvO;Lfgxd0GULiV8u+zR(!`cyxyLxkc&NWk3JAz?4G zK%i8DQKC6C1#5dq5*<3!TK5{3@XVXYC2IO*qQm7HZTgr4F zBQiGjjM}cQwX=ad88cD02l5CWBTFoTvCJ6B12R!kv!M?g8oS1Y9xFAcc0nVy=xBz5 zn1qIwTj`9>_Kdml@>?>QYu3(LHVPBW=j-AV~VVIIgRiu;yZFKydy6P{$b_WGn!BS|w23?z z%j-sqXW1s)$~`Z=VA)$nNY0y#{j_D>Te6t(piIb(d5#o;zU{f_cf?rm^k)$xHN6mw5LO?T3hJnV~ z!i!yCE(M!L@!Kv#2>58dz`7Cd&OGzhuyK*Cg*$~M)t1$^V6G*Zif{>CJ47yGi2TXj zj`u%JIM~bi)izT5$lpHcI6mF*oju3O8-zdLi>hTLGj7NcTdTQM!0)yj8ej79D&hM_ zLmf~#$zR+ExK?3M)IOqx;6?_QO`!2$l;Yl~W0LiJvQWHzzvrJG2A+K;S#At%7$S;; zLDM6m;6XWOVk~!N9f@i9>?~r(8Zk9ArJ-SPgz^V`2%py)l ze&1=X&Y`JN>Q=Lt2oBPok%oKIlD~VY$YsK01V%fyJHfv8+}v}NGr@~ZyOKkb(0PgX35^#-DQJ)!7d@3t zIG9KtOgzu4VV~gV9~crTdAm%gp2M+$krFhmS8_$ynsjrK!VdzQ-MHJV}+3Orcez(49A%MjjTDz{!9{C%+~{ko(iL7f+E8O%J^aiueAz7**IiwSx-M{s zj;Rn58{1*XO-Pi?hU3CtM;=}exqUU@GAvrcSFYVx8O>ks{)XtWzD1xhau4fTX8JBw zyrgz3v>YTGljMXp>?qY79TQSfvUV@1rKAWm+Boosl`?3RgNXQ7tXY+zNYKKdq(%zp zZ8O|IBH}`Wh!W?!Ap?y;2yk|X6kt@2k_lT^aiVV$iJUh}q*MgCBY1GZW4ng2nsM5t zjMWGe4Y?ZQu-LF8(<78u$XL>=il$#vIYFv#^ZCy4;K1>nTQ!s08R~GtXTPiY@O{Z7 zOZdfQ&(Bv^w7#K_8zyES9TIkN189772pNgNg9jA^M(<-8SjKg@#K@A$S6p;0GoA8q zGIT{2wc)ZGJntJDdC_^6wkHOMcL600TBk^1t~=zX*w|rYfr}kRjj(Zu30b$JOpfS7 z#ga30l2NJyBK}JcN}*>6B#F$~`cta@5~&9Aia;{eH%LgibWbs|#EYJ(zJU}H7kf70 z66aeAQ6fY_ieZ*yEcykFw*#IH%ZNFxs2QsRq))KXG0b8+j8JUifL#NDMlq99GzmLD z49pslk%|N#11k?MULm8V@(t(xXJmTJk-AIoJjjN<>+XMu0u=`q@7*Hd!*xJLILu0} zx(-dkDsK3Ecg~~TA!F0B=si9ZNP1!@z$dh#W7RBSJ!YvU$f)^U{XYBV0jbDH zlwut!+Tf_;25(EMaDnp!7BZJ5t*B{y#YE3=1W*Y!G>pU)8!crl+4&Zu61wP^F~@sJ z<14zjWy{6DZ4VM94T?cB0+3+%{ZcSWB1#52zhk1toVS4}CEuE+tU5ti^gO9E_RO%G z^UHx1Lou=LL<#SvozH_|b`T{S4Rql-Hp7W-UU0U}&y%_Z-zYPPN; zOW=)sh7^#>fYB1?Gd9*Eh@9k5D!5Syo>qb{njM?ZQv77VvYqx7CKUXUFTn?nvcRp< z^2ILWS68r?<^!)*4+5i&Qye5ygb$33;L%QVv#@;s&6e3AJp3bt1Wq5fJX;vVMWpxe ztEZa(blx*d5^^0mYb2dhq)Kw$25fK`Q}F4=Q6_@1O1WAJG9US{s`>ujpoqV06N*~$ zSMRl4JdYGIrE-!-TM#0mWXG?o6^lA&tTOHt2^$N&%P3@|kAYn;*h_|mQ?910D#aoW zQ}*)|Y{=L*0}KECZO^10bGMW{YaK3XS|15fu&8oy2^ErmdDrrMlhOycRVsEaAmT7l zkSR}FN3bA9Io>+9eZU4!6f-WYr4^c73pxpP?0H&ceDBb*jUAs}G{bAB~N#n_ZObqMb8^$=<_48Cl0-*trBhbD;3{TuWWei*4)VpPPYlQ zgZmRf@S0zJJQRl#K_VcP66Z9_DA`K_z17sM=R13jwokY-s+ng!yDLrSCHJ#HDH{IO z_qUuqP0(nbtPCe*pd7V4-(*M>QV7PWVr>uPl)= z*@7thHmr}ddfuJ z;JkyMUN-!v#|f7g35D|9ye0VV-Ini-8>TWNxB?dptGJ|QP3}kS=Esn?SibuP^MV)Zj_asU(HxuT1pDfNXa!zVWq)I@4x)j$VXh9Ds$&RHxhqfVGfW&va^@C&|+jxs!FtwT-g`6&{4_>Q9#u2 z-~RXi8YLC~%m4gu3DP4(Mox+sfo<5}qM~61L`E_nc6Xv@mM9)IAJWJTPB>cMP>Tuy z%;hl%!OUb#Ou$9B2v_tTmhOt$TRJ+XiNQyK3d4Ck(ziKEj%aZrm=eXFCh=dD&hS3@uVCffBzGmfXHa76#w&TNX!_%gvlnHxj;Dkf zaGXo3u4I}N#F((B<`@h{DvS=)-jS&U84WXCB85Upg{9@llr*uz$PvA4=$vKSUy_;> z&kipleH@T{M|#ROY*~eK5C#zyN~RcTaIwMrmP2#Md*AyufALq}=imKrzhL!jMPVY} zy%|tKb6N?6?D@faksGPyY^|`sm2H`s$h}dZws4#&ZamCLM9bN>=O8g8Oc-m!K@Mk4 z;8u}wl%}XSLLwlE8X7N-g7{)`)ce3v_KNgkBvjH2giSMc6can`k5^_C)mX(E~BEh_2R(}Ar6 zM8nOjq)@Q2gR0jDu$kwjU-8kx@n~z=r{J%jW}G#WO&>8@GfE_*#B)4{N4uInIu7%M z+qq=jdz5sX+YPfs(0I*fyNaVUzm_~W=BeaPY4Oq0dd0GD332Gcoi>)zINly5?400M z>6sWZRqTbR11AD_60s zOLkUpYaaRYzbqK3oX=l4)~@C6o-3^I{OCQ$vq|Kn>FI;uNdrnMCX=4qDI8{=k2g8T zv&c*a)>iYR3QWxq8TB#HIY%!8|MEv0E?mN9?I{w^kB&P8J#Ur?d8&Bi&pF?WdFPJe z)-5=@=y+afW~nDHEG8&sg{6%RI~xh!@KLqnMlmR`Qx(}n%ZE$F%4%k5%ULbCY!dcT zLD2Bi$A&}W;pmXX7ZuB?Yw!`K#_`Yo%ZD7kcNYNtD&p{}7!}uD zzpoFsy()yl^=DrX!d;iPd_A}OYVZwSTie2yh2ijTz%hm3uM4>U9A)BlN7&M1gCo}h zDPb>)J2Be+-@wyFrH>?X_ft>u&m2_0!+Pn!UugvgL% zmYODB(S0-~qFOWhrVvpJhX2}Va*bdh)TFN-17adkuh?-U3bLqEa zqC`c5iUR9*pcrHlAt0he2#_&iWQq{5;*wG)+&3rGq2XEE<6O#lxMZ#po-~3|kMTiZ z1blZNo;Dp17LNnLa0pOL5& z#`1t5Jgry}y(IVs#h9@&T!tMNt|JvGBQ+vP1||IM?1HWZ6Y4>AdD{sqj&7R>96X5;bT>DmTPZ0)fB1Xz&giUvZEdj*}@y!h5$G zl5xXkrMbAs>4PHGo-14P?lk9jD}@q~^F~s+p|}YVKHh>Q@Xm2yv5b7N)-+wgJcrNz z_l})^%)>*=yAK29iA9+`?(3G{Z{T^|aJOukCUb62;FDd?PuCs)d_Utb%lU9Ac>B=t z)^`-Y{263Q@X=yTlr35(+$(1M`g9ngTlO8_+Ux0@w(ng6kCW2v((X( zYt1IK+}#r#&U$tYEIZG$E6KmO(GFSnu%wTBY@Ole*z;(uK?F|gA;uFUT-FMyBR5Ve zKKytLQYdKe_6tbC@Sr?E-%Q03ZNK zL_t)6pFE%N*0d$lp6}moxZEYgZD6wzOf+Q1@ynIOZzP2ZgT%zOoa0#el$$fpCuf>0 zv3zGdrqqfjb;?=0MND8{7SJUzfxp<^L50RUq5hx6^4wHJm>y6 zW2_xMYBpAphyioqL*yujo4H}_OV<7cO~1tli1@;=%q=ZJR1SKWMxn!DTysVB` zhn^~QNEFL9@_DyFEfq&{LZJjyF0!^gFMUT(N0gDuzNEEVLI}jz51C#ekO zeu$nB`>$QM82y)>cZB4X+WB?-_v;adf2=khZkkk$u45g6MCZfeLMnPHY;2H1V&h<4 zQ%W}5`PIQc9$=y=(wwbc(1}lvf9wP+5cC%bJ1Cxg#=`X^ali(;7?_Jgw3n-*(1(;UbBr*Mc;;bB{_@4OQ=He2+pfm-l#SKV&c7WIvg*hz=uCG7^LwabyPm>PcR3 zkhk2(HJ>gMezQF%w1eJ#t{@Q+FEm7hmYP%;#>#NfZ;=%AvFF@v$S4spB@)=$8n0Rq zfdhSl!m)K7Qsksclf;BX22#cdnb3zG9|X0mkx^q}Mk54mSYs$?V}evUgq$Q4034(_ zjg$!KY2${cb%sVTcIovy$MCnS_YxaD&HvBVoAp?hrRRCin)Y<&6I0HWnUytlHLFLG z-IPhuys>OZGJI{=hX09wjA8gjfDPLcK)|9UjuN}buCDH`uFA^FF*0L3^B&jaiyfJj z-BJL7II$ye_KCeBV!!MAp6`7_VBIF{XM)lOR<&m@$+&DHi?%}u!ymm<^Ym=Y^A41h zY^-BoIzrnq(sQ&JQF%e>1RLilOe7Tr$5qF~fEJNkL&Kttd^A3y_JWhDw;{W3rpyKq zwB*phNLqGIQJ02M)-sy5JUcCjKq3TlJ?0lnN9#QvPeik^0x1MHioPgvXO?s8;}L)N zw;d;Ck4I!_L~F|$!8=95d98R{?-;c$heN^TX3VX`GtrVw zD@Y{Njp4~xk<&7=mkO@ADc`@g<>hM1gNdc=U}GI0>YnxfMQO=I!i@~}Qh2y7m?V;s z>DOY>CN#A|2Eoh9^UJrlg3lI#hvk~z8}Fk<#CnaAfuRu`W}1a_yx8Wv zQ}n5%$GenN7&5K+WZ0nuoMd}^Q=L+0lHZ>V`FazmU0^!#kSYePU}+;?ZUQ?$qw$fw zM3V%;W#jn8N&j=)N+ME#mXQyJhGl0_e!$J#Q+mx;X9defaQV`(_6-8~<7*vt*%BC{ zg5khOKDy_~o;b$aluSBe5PbPE=c_f$Q_Y@{{ZOLGSazN}UCXO=#wQ;Le(~$b;VAN> z_gY?^X?8;3@(DMNT8UHGnb4|J0p@ilq`yI!-f{DwxzgP3|2Mxb^T=T5VnHHMUml-M( zys8ooQ_sd}zBvy(-R-EH<7Pf38EQVe)Hv4TEC3zAai`OI*R* z{tv%ho&EOvzV(23T{ZsgINfhY=_Eq-tur4Z1Q1fu_etOOmHq9Zw_ecoS@#2@xo8|` zLD2YsMNnDK->)1EHIgkNX0$X!0xoof=m|0Ot$LwxzV06@4fN-~Gk`xl^vE2xb;gU8 z<9R9B#wpLc7d-S!+IB}ON2q8JK~TljRg2!z+KSf*hZM38e+WH$ON{K~8Eajz@dq4m zjXgc(#$d!zE=Xl$YcpQd_gFi}Wmj<$C4@*FH?+2TQ#BV-6Jwtur)2sj)h&jA6pDx> z#NHVL!4(b>u3RAcnCZUy8+zeQD^Vh%Kna5h8X+{Lyu^qRsTm=JqKZqbtZ<>}t+&Y7`U?<(!{mKBYH~4W6`oOY z0Z8&>iY8&*l?VYlcZw1Qt4bh}G9U<#mIX%SjO3U?DYiCn+Bg&mtq;7Y6#wxe!w11- z*igBK3X3G8rEjrsyHh6F4Qjuj3zuk-k%){kuG!D0e6nX56&`OSi>2Y=+Oe?-xz{Yi z8D+I53eQ}7y4a_CRcy$`1T8^D)#tlJht>m#f!bg8Jg^XK!Yd3J>oB1XQ|^l)AB-hK zm651GOZDjo33o5-FK+z>9iKCo1kG@A0gAL><-))RaOD*n|x^Ihjbg z6qgLe0Rl-EeIFSu8piSv6LT6LX+=pbYQ|zrCME>e56MooVdZyR+7-FdJjf2olw@li zk2aE*WlmuPI~Vx(r;<-E$Jp4iZ96udM@zvj?64tHijpaF=IjwgWD}M|BBkrYG(MhI z3`UNBe(x5W1;ctk7i!vAGZWWnyrUESV4@Kz@(Sr# zcMTOaJ5h0<`V_C{r9i64VVd&2J;OM0e1ET_NOBsRk!elqBxhUASWIytB4rD)z&k<9 zj+7w-F+d8gE$z|79pAXFu zYaCK&Dp!y&p!N%rE55vpi#}ZtSTr4vHXRT+8W&fQ4U&P0+!+d#jD#rJIYFT!*G7@^ zTJWqCto@E{*df`6s9CrrB^UhllMhLR#)pd1&$!&eWnf+(8;*xh$8-iW<38RHtN0phP4!aJfr)@iz?zY2d-6VLGw=;7{hLzpU}1k7v)Mq;i5s zizUyi$V3l#Z=xyP7!ftWb=)2Zq{w)H&#Nc(s}_R1)C@ z-~FCqe^3%@%km5|nXzmIiIV)?g=N_ZCYfeO$7z|Ph2gRdC@E-M%d>4xp?hKbs#07Z z1i)~AI7SM=vJ32G0oRNqW+4ma<$aR%?wAkSRqBl52_NFpumkj7`c}ZHR5e>%hOhT(F->elVDEe^i5LIc*f5 ztt*HbZAiF#eMh*MpxcbbM@~wKkqL*XVLFejy1@VSGtIMV&3|(@@cA;liva<)F@td@MfideDY zPxg*DE)|crlA~O3kO`(qkC?pLNmf?!Pu`Dw|DR2G`4>z6>3=;&%N@_Zvi#;ZmM>n8 zxU4!ho#biRFi9fy@svyjZWn?!2$r4ZFbiy~;QxM@P{_bxl5uxZQ)dZ3`x=fnh8J$j zAI@$vO=|*<-~-Aj%3Z?E_aisPOK#nC{QMU~UM^C)5IJcysd7A9DK0ugCgGpl2z=+= zC4c$zJwDlMdA89^a?6*Cg0+>@!7v+FT)Xc0`dsmDruYx5fCw3x_8bm9m*tS!NrVjS zCy`SJE(%U-$%C<{NIiE31(}d^q2I*5ep3Sdzr4m2es}myyph22Eg|~4CM@3)PU6je z{97R6x2v~OCU2gjPg{FE6!vyS`R!okD;Ps4gh()|V31y8=R6m!BRtLVRTG~!?H1OP>c6>0g zyf;WW&d9j|&~OTeGs4 zEc}8q>v>rTF0G}G9oN!~yMvU&WXMPzpyMH!1Z|RDXdntDdo6-^Wp!q+f;O%= zV5zUx>DTdm-U>tGePpegEwdM#;TyvX_{ck%|eG6Pz{;FPnAX z;6&3WAL2;JkWo{|img5E#g@vhf>dPBkP~sSW~2@=vY_nFvA!miBM!6o&@x9z!T<0- z|4WQa_<#SmpQAB!p~fi5g+FH(x6I@o*OL)S6zpQ3bw7|fD_3&qFR3VL;uc7-GGau5 z7M>_OR{oqitntEA#3?g9BvFDc1S}mw^gh2@N^&V#hhsD;*US+OEs0cY*%AVrb!T*R zC?PO$z{XkDt;S+0UH{k81*mQ0qO&wkBY~&(IaO>a*$_n^l`Uk)ULFZSvvpe%GKMB& zX9Le_%hOFt=+>lDiLN|1ijF`2fu$?q;ihHhcMRl+(mAZ(A*CjwsHssgvIsB9)Pz>H zL;*s0B7H?W2+7*5X+%pKEM2gi>=J%`X=uFRD9gDs_Wb_!EeTM%gj|d%jNwK1oHkgL z&{!%2Fp?u$YMKxcqQ^#3ncxCgT6~P?D=WK*r*vC3-5HbIP-w@Zi6j(^bWY`4CVE8W z8DwKF{h$50{ z$=#tMSMax(u zFokWG5@|VW;5TcyH3-b}jwf5#*g&E*MqAFd30g#6touPMJ?OY`TeA)sKYJqCwUJj% z%k|7~r$}+pVR3+^^noBM4s&?8g-xeewied}+*!qoN0P(gmawi!>xSERHMxKXw;H}U zQB<8~FOi(JC2KBtf1L2u+8~AHakJ#v&Y?-!xQ2r?>$3tnm=u;58_84;5D4z?1@=D5 zNxO!xmzrm%hQEH;FpD{40xJTK&*3N&yeJioS5(e(EzkJgOwrnu|L~;Xq7j_bf`e4B zpIFWsIB6|I1-WsQAwye9$UTpi5h*mMt>Z_BErpI;cJQ4WTh>*Ik(SF2j1-(Ug2sEk z**V5(><3tX`XP6}cMEun`9%msly3&5-tf6^M3drM?)PnZ`mIooZ-nq$;+1c$Qodz@ zBjHWU{db~{iEoCth~GXlfI=`-DLzKVvY;@6gG6$B5V>8y!&!TS<8DLgEE~VL5`?~= z{ude{QjDze(p~*L{p%=w$5~jhET8c=mx}AJ?s455FfxKfTV^TT9~8{=15TTUWmmFR zE7tC$pPctCA$BO)M+}GHF{%%uNK8ShMx=5=#(-3&1l1r!Bqe7Xw_H{&AV}q!T$%p5 z1mrA z#@OsJ&8}giWoKV8P&2elslt-AJ3$6{g_(@#oe3(W(&TbV(>;d>V{;8hhmwYYo|DND zRa`Plu3d$3ND?V}BqlT5$_CsVcy0|MT1_}_Q@+|Rh_X+>6Dl&42ehIg5r%=BT-jvp zh(wIa8M4o5yJqRndD4tALXf?z$W+ZNOL%`c=kBm&WHQdG9PdZe-tw}0+#@NWBSuY# z{Xku&4Bjk46ZqJtt%#6a!dz1x@ z^X-+JLQX3>q%c%|1u@`*Bh^EM6gX_ZIg5SdvJ#RuY+1P#0?kks9LYmc+5i2WyGt(o zg0b2|dO_p+-UUHWvHmt&i4QC`AV4}g&AW@9O zh&HsWIVBZiOx#0=z>DrVh1dhtQM)BU?GPf6hzuKAPTet+IA^FGLIvi-$i7I?NVc8A z2|+3pJ_gRaY3~^0D`x7DLM9v(f}It}pctz$Dsen(PdJh{u_Qd&)%^R*Efb;0M9L@o zitZQ&!tu#8W3+pR5J3u$#c*6VI2=ROxBiRdCTrI+*HaG7KCkSG0GOybl!rVRO=+BG z%^J^=Hukn~U$4QF@YzD~*T*?Q_BYX0x4{Q+wnLhPLTjS${el1>wMQ9hln@-~ zDRpcqeMc917m~~Fk}_^ULY9wNx{}uBh&UidPiHHv)C9Mt4JAu|P89dqHzP6yC+;a+ z4Y~l_FK%(Fw)|{S^3^tDtPS%-Q5b^|pkl|3La{AZys9UB`m$h>1hf()7;ff~f4f^! zh%q%Ck1NSU#GXMX@W)4n$|k(31)}Z;zt&x(w4Qn9xLpMPP~Aic$!Vkbcx14#ie88H{S!(?g!`JTJdAOdkvW=nnkqS1Hxrfbp*6 zL!m((@=<2^^jt77k+Fg2RYXY7L7MTaRb-kdQjyaI!F$6IkIHlY<@+hiZO6lv;zfJG zd&LoBqxgCuD7(lk^8{IOYBhOGcvS|zH&2jseElq8UnIP@2j{Kiv?`FsGt>i4o0flA zpK_oNFiO(6$a&*fsg~=7;r?*ISJer~8GD&Q#K_WWW+r1~;G?-<(@v%fsty%X{=XM1 z-Ypc5c8cfif}8mP&IYax3XW?Fry1+c@?YF(S=0#uXhOv>GgKaavDAzbM?OlqC>7EQ z7Nz2{j(jwgG%@mE+VXUh@!|D`&KrKcthlrd)2hJ5fKq}VKWKS*D*4GzbH=*iAAa6j z*loK=;~k4u@h5+JNb5Fqb;Yx_9rX;j38!G%LUJ=e4@uyr*fHRSd*aA)ZFYTa@4PQ!K6cgi1L zBrM|@!j1W(j}yK=&sh9B80eIX&Ld;THKVCriKiu+at+_j^x!CsUdX%NutF8G^<6wil+MyE=EgawXtftFSjM`S6Ap)dcI0>W@l#E0i znH0=*K_Uk1WZ$kYjhe?F1*V|Xi6jJoIADJ4mr>E?vIrCI9Pa+ae zWj$*LXRe{KirV`=23K@QDGAn6*&QJUj7m|`;Cx4j5hWxcczkdqdWeq>?=3O(ZVSBL zHVFM~*oAnFnOud92+6nIfY$*eznj$e?OWq_%=FFvT&hEfgkidebv4UyLZ+uQevOMQ zN-7%Px5ITXD4{WO#2}fYgrW2+HvW!;BH8Ln79@lkfnO>n2 z5}_nrSCQ!aiUszhVuYd~2#=uVW-%jTILLZyqG4w7A#y#-$Ye%k-$&4!=P&&UP6i~3 zGj~d%#>AMA=z@kdDoApjGn5&b79btD$Z#%2I>C8a@zwejxz1_hj_4$XI3&R^*83=# z5~ODsW{~s&6lu(n!n2p&M#>D2gOHMoG1tryM7W-53N86?uVUAJz^+SqyenC_7LQ`v zN-pCXBT{DO21|cT=U*VDVXAM_ijFRBQBpI^j%dS*jlW=|4oOr_1l*MiiV$E!gQPdW z7f6PB4nnfA3xd3IS@9i1eSj%OgxJ!!4ZCnb%8=G?3BjYZBvWIwcf=66@C!P><(>S9 zk({z^GYSsa*cFn7G?}sWTZ%Y_AQ42$WJMbs=e45>in+c;N{&NOU}#0dSvbQ4a59oI zrinWw2@`n@6%;8MT1q^DE;_ncfq+8HKR%Ob7t+<(XN{23f?1=GYb zG=b6vmaU>N8A=3ft07#$M%vbLn^mrCoeG~XD%gD1S*y!azZ9D z7EY6jA;B4PQpS2p6*i;>mff!Ra2TyEi|2if0W z&Kme~yTQ>RUBW!^j7-8=9ay%WZ#J5FX4y-69`Wn7WR?dmZOOf2&aYRFowwAn;(l?B zX#!I8Si^BO=EY8v2tlr3mU`A*;Cf-$*p^S18y;?RKAH4L!OC@HLi8aVNF;%^Z4uF+ zW#n2i=YydkhLj_5lkXlh;5AtzIQg>S&M=a$0w3?SeC84$P-sgr6iAT+@OPJ%W-IvK z)1HY zN8^MlN-7^18^dAVv1p+2hT2PpNkX$5b5RQ38(F^D&XGtCCTmOz+j>ky@y@=XNF^`M zJwAH=ei7NYn(K+--bgYso+mrat%2YmGweEt^^$03%+rK-h7B((!C4bI$P;FnU_bAu z-4P&ISxaXHna-GJkyQ&nKQm0S$g=Td8rFWzftisw%~nO0bw8`q#RV^#H4hTWy`f}T zMlRZhosS$H2)-jEO{rP5ibdD5wSpf`#wa29e7(;^Ckzez+5LSag8%0$!>)s+->_UJ z{OCrYDN;`Ah!2`sc8f%LR<@&bJwo{6EF;$rV?ARf$qIps&{)aQ1lCF9%U5HRkfb`H zE6#YlnXsrN@6Ifu3jDJVb{yW`qq>Ov^kYlj3NC^{DOfrH03ZNKL_t*KPMPt=Lh!eL zx8hb2nWqWgJ9PZxE5-3j@co;Ts+qBAB=f>D9kkq@N1nWZzkO)PLWWdO){fIA&~+I{ zS>)!O0Yy@id&{$(CY6#ji+o*`L=hRPAtPlO8O{Ix*_sE%grD4Wq+aopy%Fm=rP#p3 ztsw@_`%}k1y|HCm8(yta#u|JPNE6S`PbQpJl0UdD*c(}X^AZNR;zhG&d6sdn2+WdR zZ&Xeafc^cB59>L<*_@)!Bim}92Zs&IMauO|@_4J!LScPmPxsx=^Ga}Y;LuWHWZ=g~ zEuWo^`0!AY=8~^o8m{Sr>sk7?_f7jpjwcvT11$}+&?MUOd#nIc20dIfpt;Zq~ zgCm57@Fu)LT#4`BwOo6BPEQC8+qmIHRgwsWRTTkGBvQvM%leE&<$d(3D5yn!RTWKe zz9a;Tbvrs&6Qb*D(o$no&MZl|KOAr^%b5;4wxleoz>8I+Do337GuC0ju3Mm8LI{ym zWduAvI;?MSzPS>;{k4g_=4^X0pXdP#HQ+*xxb#OH*nQp|jJcUhu8jkk_5>2jt~a(n zYlob-6JFM1&Und2T+p~3#9my#wmI{`5)io1;6pE_V)R7(TMiHYt(3RlU8mo@*1x-^ z{MP3><_y&Q$|tt(7SwJ{u4f1>aMYB+UL}s@xY!To#pscUH^X@2ipkMJH=y3_LCi zl!CK1!9>O8qNHMpj0G{I>|)hdC`G^Fh_OLM0|J5<7=|d(p+m~fp3&-(ACE`O(kTad zOP)p+7%n#OtED8yDO>Mpf?{jWNMw0sHz&!H8Bqw<@wBhZOHUVDoUc&E;Hl`M!8yxN z&GBwd#ftMvb1$89&nC5YZ5W&(H&>WjHc|)$kY&{M?DWJMktZsL`xcTHsKVd6h>wU zU}w)zvPX2f;1CGzCbwDHmQ`FJrQx*gxDYFd5sktJk5YkW?NbUl#l*4s{%u<(IUqN$;-Opm3_*!=zuYZYx(X{gGBIYL3!;dWVZlg_xS7qUtm8#j5rU!$ff%nU+fs6Am+YAt+RfNA z88WO;(vka=IyknmVkky^7fA>*F`|tvRa{YF2*MFXhn78#H8pTAAMxq-IbLM=tI&}y zN>VN8qUUKR(7s!iB9hZ2JO1H*m}yHL~G5o=rVu(`wq!$mpey>VooDE|6M z-_dwy==t?#$z^q&+e62?6I?W&c`B*BU~4_&Bx0;$vs6qI!DZ`tx+~Eb3Q^!fyR5xY)ukO?*x>fqRQDeD$~aUu9`?^Fa}(I&iHXJj()y?NlBxgti*`Ns01QZz1Q zl7v3WTtu!90;PuzL+c}50HY!wPYfrGXVt)dqIqYiSlK`q6gu{ED6cBZrt9xNyLQEc z!4xGVPs#$92t1KXThWA$yTuGiP9$fXI=;F{IjItAal!qu;YS~Pe*Q(jfj-JRo>h|8 z8#Y#BLtxceh=SZ|2HT9w5^^QDJ@9;BG@qVnesh-cbQ$SFV5}uFdTvfy+AibzVS__* zc^W9KVt**hNo;fHxoB`u_i{n3DZ~)|J~pPguMqykd9@nz#Zo;#Jh~RS-tyJ8BWB!j?Af$i#%w zt+{MZk*XKFxta9OAV4XD3Yris{)&?gG4%FNA>e$=s&hQwDTYRHXD0aMT}KkOY%bxe z=Q*FgyvrB6eO|t*eL;Dm<^ch6{Vj zNq2+UdJiEj2WiSdk}x!$L;c~YtSmiM+Y(X6=o6X>;@mi z)wvx~q&*u7P^d8XoV5!}rXEL=x z1WAyQL}cWlPe?2M0_z<%bmT^$RG(7kr6*GZluRJ@!5xZTwAFqM^htGz9+9gFnI5x? z=U7TS9YRDhIi&0^kwSpzH##v!q=P)Y&V^qw3}Y_hk~9pcMTr$HHZrxLfmicgBI4 zlenBr4cM|q5lE!K`4$%(V=-c=6Eay4gXLQO0jKqbi};E?c?80ZH1h(vK6foq{_0Vr@ZJ|7XE}A<`z|4Qg#a}c}Ppm zrF~8lcAT|KoYkzlgilwIh2OEKG{4*`ZkPl_V1A_0WniiWSuM$YUzJoL;k?^Wv*bV@ zaHJ1e*@m6#7|4BAuAvSsBROGYG#lSBR6}wtc+#D*t%kT*Q;U)$mN)_?c#3$450FT~ zNQ`J`aNd$BLx*Fi5)QBaZ+jeT8_A_XtCpt~$e7Uu-*@J;ASLkS#^Q;j#v)?|vg1iD z80Zu&5{~l7Y})bVTGOC-S;LJ2e1!8>@Mu?3N5T7poGwHfE16}An|aC?n;ngF%#)nj zd7e}?M`_Ca-0*nUkQ*4uJ|(#gs18 zTvnQ8D>-Q-)+^o{L+KJlBk)1+c&mw`V?P!Aa=qk>a>GC+ zjMNwr6}Jb5Z3}Db$V|g>BN!@$)-4Nb*-uBf7}>i%;Lkpk{I7pe5|J!y&0Z!DddVOd zW(K;DGm<&kSa9M23DiU#zv+9h(LG0`3gCV{9ch!*M^;ye6tqJMw;*5+VCIGXBZWD z*(Q8&)RAb1n&#Zy^cYTvl0^3R-s^>6D(>@WlQT>dF+{pr@axYLHamq0@Wr-IGcA=S z*Bb94wKsh4aLebXBcARwclSCznl=3SlblT}us-m^L&x3&c=$`nPya>ETJ7=NoiHDI zK7K!?D4@P*_-3tdR8v-34i7Lw6* z#mXx}^klN%+%#3>{3Pe|ZxUqYIjP6|@Sb2?lSxY7c zN^f{~Z29EQmby`V`h396k!6;QX+y?5QJlAeWKZ$#b;rBgz_UxZXj;}*^TFJ5UIl6g zH-{FB=h?PEOUqv!d%kNGt@r%>!t?%C@ZoFm`!?768guAHk9>3NO5OEB=qhRK?aH8d zOX$AMhl;lzNuhg}fh(~VWk!#mTBJ;fQS_CtH*BzaOLX<0=M?OjAyKp(seMA6;X{H- z<}~R$bl$TQyWUIEm9(*8Z`VJ%d^EuyL zReF1(CB*KHw{Oo@i>qrK-&}_`s~`E+4(-kAhu>O{+O7~$pjC55V zV`lm`N~CQ41`#8Q6m-kZEouD*A6o`yijf(0w?s>W(FKNrsTy)K8&XI?9V}X1Qd-5L z9?kE$gbx8_LSLg~O+b3!Ilt|j+4RQI@u$85WEVvOW^jFbh$E!MXfnK6|&3DUE+ z$5>aRbV|*dgY*U|372+3BiD?@0XvqIzCwg0COM?@6^F@`Mdz@gqU|>9XYV7mX6;@P zf+I7UktjIpYC2WoXmC+s#DH9P2$7TdAreg+6Dle$n-|&&Hq7jli~5L2D{gdda4+7bv4S9KSegZQf&tDYEDAVZN*X8m#c6|<0x4kLRFooT z#5Ua^#(9WsOqUpA#mgG)gp^tqgi^B`rySG=c$=~ttuwD1I`mljIV06a+laY4<5sfA z!qn8>P|78Rx9kRM1WlhLj3}8iYi4+#a8R8Ba8NyG;m7=7bOSGPQl)sZu&gDXp7vB$ zpy~*Hnbj_B%R%jGI4v56Dr6&)9AqUA4;FM}Lg;!lR`SJsiDAL5&NhM27#FZ?9M`*s zTiuY|BqY(EerV{cl<#gAe6g@hRuRU*vmz%7Ya$=uJoHmbh(;*OsXFERnkgS`w?0p4_CP+XYllwNwe~Le5KAxG5vuXFWEYHzm(A z#URwYJ6z|jQH@9%o}MJEjG`A9P6`O6CDo2`rTFWgD%PTi&A4XfHG65pm-7XWvw&&6 zM&ksrmdvV(L7?f00P7X0Qe?)W0?8MP8sTHMh!}>BRqfc0Bnww^TERE^Azh~_XO^B0 zm{*=OkTg~jxizd;OllC+l&)fu38p#xyBjOcvL0X01V_1}i3H!i?s;~aGBciDU-06r z;iJQZDFp*z=!TL(qY&P3SSX<4QC4sxj=9%wcwRcbwWeq)$EV8$wNLre_cB(~fKQ%8 z%xgjIGoG(Xe3T-}gvVd5_|dK-Rf?!radOt-%d-L2OYZay0)-croi#@?^2}$7<1%2o zSM%*oLvB+(c-Jz0Qjph{j}K##Y09&eWY~lKZ*B3je>x+QZR6*91bu5cd+c~Ni}~Vz zhw<`^!{r&@*%)&DLJ%aD!z^HX!(m*&rZ(&kOHP}H2j_jpm1JZ*vrO{MwSu!kqhrgw zig}(JY8SGd)a-Rj9*$d0cdOg#yE8)UKytc-qz)KJ%U8dusqQsAp2Ym@K=J3-4WF;( zXg?%TlC`8j2Qeozd1-h`zMdqAo=5GMJUmh?OGhB1_TPt&^-jX=jTQfRMT3!j-D-pbR}D_9z}}qaynTO=osn3mf7j5%bcNlh2**tyrLbLe5YCmvzPp zV+)*X1)o+4#y0KQ6D{gYxOPfk3XLbAjgVBAF%x)&n7h30f0TB(a`Imtfl3H<2M?6Z zickqUdJQFGj4j9*gN3H4u(qL{lN1CZBG!E>yCl&gQaz$^B{)Z<`{Ztl77>M?w+j~E z&BpsC~moY2F(K^dp#7 z9!nd_AG!{)Qrw7poE4TS8!Y{tnu5a331t^WOzAUfku%h{N#p>8XAml8qJt3-`iSF& z!qfp{lW|P>N6_trO`LBiZMz zZpeOWSqm+9?`7EaKF>3tNXU*GzBpOq)7gTij_K-cGG-i@Ln0B8>OM6EvFw9$tn37> z6TJ5r(Xf)|gtAK<4r!W_+GGeNII)jVGQcAV&`1?hHmi0gL73K^Fc;h}pQ5B@6z<`j zWZ_QQ=eusJsjY{?S{hkWQPCAWrgB1$HN1-$`!f? zdQ3S*sFYZSbVS12@hx_Ho<%7bXO4^MHb0&P925n+aoaE#YsG#lP|}jwg3?G9eufeO zUzgA5h%VQHn^@7Xa8r*_tEYUrsETj2;RTRO(S*=zXltrb-k5rq}m-iXulipBAY zs#H9=2&fv*X;I^A!9Z-Fh324sPVF1E)jpN8ZB?|J(^mtO^qjg0+J(qMV|)iq#MAPE zKo?vKuhUcx=UP#{F0YdrN97%T*=1ArFwT&h2I(ECN*JmDX3IfCS-ZYqorgo>a!V3bjR^<0kw0is(`K^@@F?Ie)`;y z28vBRV9|6rTs2%vL)IceZY_^ip0hf~`hc}auo-mO>eoD)#zaDrn~F#Zt|wgvfn{kF zW3%AT_Y=-1JVQy-Wz&4UCA##3a~cd;j-dpzvRO~7a<(q zRuR&}yq0WtYX0u%l-*#DtsrJvH{6N0QPPvSjGIY^SZLC~^ZqyCRl^$@-B3Q9@a&1_s1b}Ji9~W-B+M$$a=M_%9a{V_lBOvT3)~oM649ajwTC zb9}rM%w5gRUd~PmOB?bZZdUyBvw#oxGT!{4;@Q)L_xBgvm<62qh%a7*>}}Wh%+XYW zPft^Zp+{KBJ0nY^OMd?m^aI5k!RcP#%ctDp$=HvXsfBu z3c-3*^JJ_!J}vohE#gmZ=PWbDSq;6!u_@uyV_g$KrCrizldrs*+f$x&pDY+eEY(uDG zx_SdmjPaIOcM%etw=}kDMK%H{6jDXRqQ|XH#GAtyJsJ?NcL{U{Qy6CF8O2eDqr$PY z8E54S8dq@TWLw+j5e-*vtG%2jYsI>|9P$%@6alBffTMbi$K^G?SoZlw+2M_`V}F0b z_5GNe+c9J{`I+J1*%}Wf*LkpXJX%gU(}yh0Ic2lNI)kSUJX0qD8x% z9LpiKUEuMQW{$@asuYQ!b}J-;*rzn2M1+zq>x8mH<|bs#1rkX^fp@Ou&B_#~8Y;JH zhXQ?rbrr@{Bx=|W!r2w2Ux3(P(kOm9KLQuG3`}oXx;dR@h;JOR?IXQrq((ezjsQvQ zdsxv>+at70sA-$EHli*+*bsb2ZF29eb;5p!qe+L0i;_222_GG_Z|a39SWuvZ<lR3gK;lD^)-OOFcLLAJ^+u%c}~O63SE zY66u|HVeFmZm@-OrpLNU1LoT8Jii_ z2Be~kbS+&(iI9_K&eBbY=&~j@NM%Fj7Od+L0pVpOw&u)TkTGSXHtC6!SP8yf2$r^{ zF%^x|Jjo=Dg@YoeA1E3FxvSf{Yp*E9iiOMAP`eCNf-yr9b4Tb3P zEPo4EIp(KJ-dYAsE(Ct8S=fw6*%;#;UDnV-;Du;2plU?udn|2*!_yUA7Jkgstr&?7 z8fTbQmN1Z<6)R4kTYfdyyphCgq;R{tVt*)DyOW@zh~i?w`Bx23k5m5XTrsIF0h;5c zMHbegq$3i>=7gLXLqDP?Q);))OiWn$70#Cof)2vQB%%Y-u@)v+t1!Zl+XCw=B$`Ak zN?UW>JYyu+83hSa2$r@XlmT-;CHENskLIvyJWp4Gj*L-4&Jk=m<#qhmeN6@4eN2hKc5(kS0q}{(KWNG&6u8)4Y5>g#vwx;^5Hc> zVGVT?@S<=`tB^$_h?K+NIInXMA%(Lb4AAi3TnP%*JH z*!QmU(@#&SjX?`dV-h}}S#l#WB4Rt%I0tK?WG{{IP0Ojy8c#O>;}p*p9hSAlQL&u{ z96$3sYI^KP4Qq)YSgz1RN9SMNgL^}|ijCtq001BWNklKYb46Ir5#yQ^WLz{{!VMyi8ZrX%31EvQX+(>6S}t8sOa#^*&(SKu@{V3 zPhe8l#3EoAwc>xTquEO$9_MpzL|Yu>2^V?H_rrw!B;mKG2^YD>In8^chDQs-Md8>@ z1j;#{K6E_FYSyEWIGHlN*kD;pwqwV&Sa4!O2BG2Wti`Tv#Sw3HJyKZS7*y22vWhs* z0?so)=$ zMHca_47oq^JeY@!$CBIqn##7Y(!Lkujl_A2vK70-idZ@x)g7u?kK3C$fAOy6Hy=gx zx|01(Prug?1&Y%H%~ss-B9naliKmovPFIq7wS^P#waK~L?Q^(F*zQ&=E5-3jak#R4 z&})ap?+pxtq@fcTj%&wWyvBn$4C0C(h&k>eK?uqAH#Z62>TvyF&dG1a1YM8b*Zk)1 zLyk}5cK6l^vRadj9a<_1Tk$;8AOhAjq{{I5L7(%|L9pUKUCWuwJJiDR;a0}AUd_{s zlsi4o#}|f!Ma6EU`OoiXM0ghE5a%RkmF7;@^6^E=Ml4yHg0r%EUHraYaQyo5#}G5F z^0}AoO6{Fwd;2n%E3VL4aQQuPDW*K$H6Y}ro8&JE1+8FFGX5hW{VHP&yd2ts9(}IG zT{7z^te~Pq`7U_DqP}ReyiQY#Hk(=70X>-oLj{9cdkfBz0XO{oq~$PZS$ z{TUp6b-~Fq#b@V|=Xp-y3yf>1{pzJa4@FESEtOea#!5V%nn<^AoU!bYxpRzbC`?Md zgkYuk*O7a+YO6;g&QmL%4vL zl&&7IFf*L8#IoOtm7w27ZLriXqoHM13#stdlQmPM3`oNv)>P!pl%CqgHU@;Gab@e) z2~Vs%jVnlHjHRKLEAS~^7zV0?a{_Axg~%AFgho1MO-U_^mS&(7J*}`tp`~Y1*GL(n zfJlU_s|~c2EbR(IiN#@UiIxci*~dCb;g%#p!p&fZ(`HG=ib%vv?2^(ghJ> zGUd~+Bfk9?G5^iKeH-l>%*7aI3>PPYho6l3`j?iEkA^HWi#H*mZg{>pCr~>G-xlZO zqM~7fCu*ZagrrI_wF?5d!B*7e%sj=~m~feLv4p z`c^!p!*6Ha0;|v1NsrP5NUQ&#n;Q6Q6w3J5vlgQ0E1u?aO54S&hRQc&##0q3 zu}VlH*iKeFJ?j$blA|K!&H$`)SRe4L$S5%g6O$^zR^oWRR2UD}Vo4|=x1LFzvzlw> zO+*Ap>3E)xiRYW_B#t=EP*$QuOzumrC2P!@qO~1_WIKgYDBf6iES3>B_6^$~Y_avP z-$1&C<1d!{=m#nB;+%uA;Bc>GtJYi=E+L1s6ARb=2GzGT+>*~b`pUsDO`p#L+v&p5Yn45<8cWboL^g1QG2^1n?umfjv&A8J1 z@J-G6iRH;eac3B^3TsZvhE!>~TJhFEaAQ#N#TaI_;BXZ(h)Wi=V=b~c?^$1~@qWNb zDYz3v^g_#gq*6pg(e-r4mn?qQzn(-yjqd2Qs$xKo3~axeBPxf70GzXPTb?$e;#t{&-Urw z7$JLICVx2Pr+;|N;RLDUNb6XO~@UjO$(T`e1b6$Be`%2;Jm(5<{@{9K`x6NDL?$rR5eJPm4RT;v+ z7@m4Xf$*0uv3vPl-GO;O}2({^_*j%`YGD z?*21wyql1`eVx4@CEWSWjQsnWM?bNA{P~Cni#r@<1t;~C%q=g)L~+UcZ7zR>wsA&^ zfI+a$hT7#?r^9zQ0!ICYaVdE*6&x?&!Z@bmoVhurb_Isk{njD`549~Q?Gle`(Nm(0 zgea+zLQ^e4FJHd| zIM=>tq?CjzrFJF4ODeO#(a;UIao$t;46Q@*YEEt@n4rQsbGayJ(RNF7j&p{N+$8oX zV>?GmK|s>xhh<7FbF}EUn=v*=bo4p~#Ypc^xV+7y3rQNRlh9>NcbV8Z&zdvt z&v%%e1>FDND&GF(=d9fbxcw(1I`?kz!?!$Q+9N-j^5E}Y@Ut)be0;J-Z5+loSnsGE z%-sn}gfylmP!XYuDa?Xcbl6av-0G~+)tlVCW=N&sFb_F87ks%o=h>>@0MFcHNIKL$ zM|e+9?65Fr%VBhZiEyH?L$ge z(vf{iyFl28g&Y&RlvG40*<)q1b^+u%crWnA;#|Xq-ess0IwB=iAY{qPDAvVwoT~B8 zFs@G+gk5@J7vTa{wnR9=!dwu_4z*jb5w2ln#MES$72P#!Jx47f!ZaXmJda$-bwZ9- z5i29H&ZC4N5t@-)M~R?aOuL54NJ1Zx3OK741SBLfX3=>5?l(1mbN>N9{M!X@Z&`+W z0lhoxB)50?)BiE$-S4jRgWn%={~uQT?l|O0X3>BXGo%Qp?3C;A7Q4|Ke7&f_Lq~Th zTtON}XkqC^@Rz%NI{g9jg{D~6#KLl#h0JTu$16wfa+ZF~K&`VIbePqav01Y4=Y)hT z>mC=)1$}v~tymG;2o%OwWbzEt7&>wbry8UFj-f?_t425e@wdS|=g8evQFKyl1r&+;8GbK?e z+IR+D(=;8HwI|f==hI)mCNvvdO9Fa<8lwxVr%VUJ0jGbT82L)$SczP_k zd#zyLJimXMV&#~mf&iKx^W8%&RKl-2d2v2IJpPur(yUYB=iU&U{h^1l>R7~m+=b;k_95stN zpch$=t2w3~(pbsdt_bN6_De>a5hE$6yK9_1S@C=cznT?TS0U|y+g;7WWz3&mD~P3fS*- z0mVhJB-VoatBSp(!+Io$l;k+~WL1~FsAaNuQpb%U9IhO{nw+u`^|_vCjDw%O2-#V8 zP{Oay9F;8@jTJY$F_X&kx4#V;MI~uy!6=;deD9r{hmT@z#69vx@x{zCt}SQP66e-= zQACWJIk(duky3nmC>aC+cZVgr`;On8X-@K*^{8f5$COU+B+q$M873JhEjVqa3`5Dk zx?Pc8?_lbfXJ1*)7Y)~vh(PoiL@-%;X44kwH?9Pa%b1JOkpvYL1#4kQY2oN3=E+j< z$%Q65skuAS{GWf*@cw6Cv-?&=@A{A%fBpvl@qclInS8@D@b zMg_n6!V(QCKHPLnpC~r^HJ{ELkqhaChF)NKyl98)KNxy~HOaTWTk+m^C6lilAOE3C zC#ZRHF=OA?Ocp)PGB`LYc`}G+JaJq;&QgD=O`hI}1upUa* zYM6;#PV5m!)fX=#0;}@8eR$_+Y>x3YLJX;_V`(a~qUQc@&iJR_#k}*6YgFz&#+S(G zm{HipH=1P$)&%rbh}9{f%}{Dg>83>blB+8`mjUP2Z6if#O(YVua$Fk-zOmk527=R7 z%(UF$w1Bgw&(u^b8_Tk;S=oYxJIA>iAw=5}*A(r!ed&zz@|@?@9Hj#A?XZ`%<>k4( zYUXR(^4e=rx18L|^ZNfsoAKHLgCqOQbHq@q9!AE6SR);-n4JHA|&&35;X!3TE;}OOGL`To)P$%!cEaS zrpq?QS9H}T5{Xt3rJEy3$V^Q?F8R~lRojlX(%=QwMx@e{M3QT9pVRz02i1h5`Wa(i zpe_d_10S=-b#i9Bs9sRIlHKGshnb{tmgTYLpAQ?lVa2Wg%d%>Y5HuvAp*w*2T=HaL z2&E(pLMjtd8jBYVwWtY1N-x@@v~3AQCtRl^`y{G^F^W#qWq0J+3^Q&B&#_eeVB;Ev z3BY?EtsG~iVB9QlNKzTH!nRl$ThI+R@U9_HAq^P{Md&+3K_Blb3g)cLITA(Fw2MNA zCsrwq^H^QC3n1S{g|tZ8*QJNDS(1rK+nOjEoNwqQn@FJ%QX*x5^`6QY61sH#fR2X6DdM1DZ!DmLVGw)B8AGhroXtjTMf zlq({o(JrDZyNvxQ{cw~0aGU4#6P8U*SFMvFDeauw$#n|X=d8J)a8rcvyx)0;(^Bx0 z1I@p=Q?e~JDzDj!V$M(758%?ZukFoPA|(`P8WR%u1c9ff1;u+Tr}a|+uBls0DuKfh zH|zXAA7%VXhrBavsN6Y+{)k(54dTRdBNaRv2h3{2W)vgjyKF~2R*gf*n#z?3-yu{r z1Gi3X4UzIFpAZKMiRA6=gu!+|+N{vaoFMQ#IqY(pN&eqc%cAsb2I~w}?=q4lNtGaU z`xqx_oMRwUayKOuJsOeYL`*+Okq!#qBSf>REV(!*P$5%uLE{UQi_tD1rHhncWy!#A zlao=11sct)nJ@@8c(e093%4S0H4AsfxjhD;c7pwmrZ7E5ksvpYNGTr7VLO4%Fy@W! zcAI-gU>iee4WlUFqH1UY&8)6D$Pyx%@Hkgwrfv5;%wl$<9&hw_xY0Fye4&s+^KeyB zIgfXnESd`{YBu7SN#hxpl2T?2BDmX=oJhe|;J?@inT!VPTzBPu;kHFagaL>p0ym(^o61DXMA(m zV=M7ISvCCT|31h6CLz$znT8$C4P@srhw~}trQz)X7_T@l9cOjLehQV<{OZCI>zePa zrzqcJ;v9Raz~yV)8+dlMB{y$2+7lfm7~UynUt+q@SBNYX>+c{j;=~r3nFH=;=NHo*ef`gYldOKC^YEFX9<6KW5vNr@$uM_1(sVK*iJ(-?|EaRVLDSB=7L9G$8>an4lh6cjO6-y zPB-hZSUCo}g1^{^IXP|_+`042OTn|HraJK4yQZ05SRQ62L+w}(6ZYaR2P?SUwXEs} zVb?fVg?!&TZf*${GU116#l!mnu7n?5Tkv2KV@lXc9H)6qZQ!D`I4`*H3;cIpNpmhM#>_@F#<7G-begr((5$d!rJgEO(@*D;y^-@xbqAIBTiita*bc;6jfFe=@w2#T% zlE#+phqlzzwrVAy3h+3Lts!uH_om~6@9$y4nA{4^AD>_j8%`FQ&z393bBV^Y;)KSR zl%{=L2N+kivP5f8+=-H=pPK|h=^ejqg6wwDpHZq5ecbI zFj`WEg3NeU<|YeOlDV8xWEfwxR0%A`o7OFtuB}&YmKfV$Y~F4;%LpM_%v*b&19*); z{Q5}Z)uQ9&isz;4&*img<0n!mx@wEUFWV|&S5ldxB{>BBmcT%Q^94>cG&U#D2}%TL z5eet;G-wgimphblahZoL+bUyvIMEJ2I$=mWok)Xc>{2 zQ?yKwLgSPr(3)5b`1Pr3x8YVbdDD=yV(unw2H9J-qg!-jmveKD6`nw~ZFI_c24asn zCzNi9mN80pQBLva-&5?pwL_?2RWzJFIKxj$etYhDu)1KRHj!h;!~7v7#brFCKnh7| z3bYJBLMpcqz8wgRWDiY5L&ZsR!t;8?lX9D1eI$uROd)1$2kTr<6peQT!cjRzS8ImB z8jjW(+lue;EZgI_c|ptwq#;!)9nooXs;*}4kI3B&EdxT?C2wZLs*m+G2+M^zCKNp+ zirQy1uENvc6$G~B<9781q39yT5m`MYRGoHbz|9Dw6oEtuMPCoe>WbXYF-?jP5yoj8 zJ+`Ak`!m@LhanPuLZwOEO*)s+h=n<2r1p7QeT{IQNOw4?J(a8QqC}`A0!Lv=gs7P} zj-@?C$(Vx@1QF|cL`Fu%oKW?snwE*(TrNO_g!8IoO?MF{v z@c}Fv&%@tGENjoS2lwX{XJ$quJ%Wf;EwRpV=APm)SZ8@NewR`$@lGRskCmU`jb%KG z_~)PJ?8OO#D5a|tZuLEn=7IpY(+Nn0V;GgBT9X+`tOIV!4eqaUmXvIVYeYhGQnmH3 zH8U!2DVr{pNH9odc1Bvs z0V`8ty+C-uNbS+lYXI!^3NB*HdI(Rm0Ou7w?O95T6s^FsUeZw&hdCsHCXfwEz+M{C z3oV=RfcrB?WhEO)mnfB32Zwo%p<-6|sOJIJK`)Y2Mq|7uQj+~>hgGJi6$COw*)Ew? zJ)-kCxvr^I1hF^YWIrzB78NvR#=NqFb3)ACoA8*ixM?8m_r*RO;m^b^L zLEP@Ft?GcvL~MopPFwG1f{k-J32#_}7{#5hX)G!J)-`c1+ z%>qhuwPPxoRSoN5Kvzq8Dxq>UpU=Qp#~Xdc)D+z9D^Bu&&%U(SFCG5xJ#VZ{=@76c zJo{4gDreTiudwDiFHjWD?TX4F}I4Z$j>*YkY9MB%eYc zJrcpwmE`*ylJ|Es<0VXHip5WA=Eah6p~GM!I~v81`$?FoglQ5Ng)X%N$fWG>>pwA+k)B~9?v1Y)NP>* z`Q$ids2u%{;s5x>fUHpLB$jEVxYKJe+OhH;Z6w}FM3t}`4LHp_&R7x+JF(|_P|@2= z$!0ODx}vx!3GW@jY?1 zvbl18ube+2UK6-iRmWHH274)xFEuXimDm-cea*eZlU?#-uPTZ!ht{r8T1tk1xQsb~ zzbtIuwVTr-)hnEdu@jvolV7;kbmTW~U+ zb5TxNngzLOe`cX2*3~#?UOLuF1|)hwKipt5++@EyAngRKGRLZP98^mRYsr}q)4|vh z>npr#ox4zXXlzMk>zDBpDK%1Q6mg5#5m5Ohm7kM_14@%!4h{Ot;l2iF5CjORTNj+y zoZhRdhS(aewRz8&T1d8^l5}6)|&8DVjL}5mDF!A*gMM@il>J3lkI(sqaxT z2L+Y}iAD;|#5|`mE4tw(jjL$vf?5`M8Uo!R73(bB8O9YTB4Rlt73&yP(m^xS>zq{w z7+WG`OOpshfxD7I^eOCu+GTX~I$hSWGd=*auIU|ab|U`l-v!+I%bP@94N6jNU5%k-?vCi`O@voW#gvKyD;%j?F3{9|K`6RJy2H$z&=0nl znr z69VCgbwKF41VS^?9a?X=u&3OPwuxoPp?QJDlhUW40Pl&2=!6l*TO5|aD?*VmmfJM6 zOM&VfnTTnfC(vMRi}E4I?IAs}yw0ttXuP2oB>@46&j5IlYaY)8mrcXNa-X4EMHHI# zIHK{MUa&Myrp9vapOcsoW8LMVY8k|m&Ddcul%mA9JtDm{bcW$F{;Tv0j%vYL6j3?P zWo>9g!yrm&tYa8DKAoSi6_43UVtPSDszEBp#LW5GwB(|h^G@#ukx=9!q>!Gk-_H5` zG^VU2=hKL(bJR-l=D?GV8umX7NVTVQo`r?T85V8G{ygBQw&X5ns53sANN%oq{_ww8 zp}Uz8?W}Moig@FnU*PhV#kA!spC0r3M>G7)Qs;@I#$mlj2^fV6DLh^{jw{PJ(sTmBTbX1lDf!`xF2}i{6KYmFnpi?a%vLIx zwH}G)MZW9;xRqI6=AK9@I)UWG&Y3#HNcWl9OImqOXG2g?3_z zjRcaOa(E#yczTfm6qSWq2mI{PQ~Qc>6j8KLTFH83>C1+npC%YAJ6TH-ON8|lW;rlM z3R>$~?bLjBrpUCwdqwLdsWQAQEKlZ&&CGLDTIR;FnMr>4J;C;W_$r;-Ypm7q`1>aa z>)1A)KX~vAdpM)(9VbP|}re0nVT z*?G-xZ3OHN42Sc8_g_Y6VubK?L&-%G@>}Pe z#rAH>cogD{#MlmHvBTt|;`qe#_(1UU%Q*)kq|{TuA*7~tB~4SJbU?_`A?&LJeOd9H z|Lzl3b|s^Ka2q}7(+eX;L&Nr+4&uFEqpdY%?V0}KF$eD-^Z4^I5B3auQ#h(iCT>b$ z=d`xLVNpV(M8H6=@M^ZoH&z7S`fAPkZ*Nl^RqQ`gJbY2{=wQOUfTC2KG&PltC`?W5 zTFezsY; z0q>4ID}BkGcUS3l9Me3a)9fE%E?7ShlJ;c z&^Uv$5e0&SW{+8O%0O?URg91!t)Ef3359SJ&6J{@;CxFjSR;~M^5&9S6=ZUVl*=1V z8jQ)^a*VAB83j9(ERaf}2*{grLY08VJc$awSeU;;=)c zmY?_9m$X!zRgzdk+fIq1jG3DPiddvnEWil5V#KlCBb3m(;EI3u^wkK9;nH1Fl4CIB z?vg+((Hu{S5uMU8Gmu+YX^_$~H3imp`G;>QZrw}Ro2J|y zE~py8{^gj}Y{)n*dHxH*-#iJpY%E3N*@-h!6|f@4{QL9$tGx-!IE;C781c&8h`;_H z9C(T7>}nZBlDsXrldSOW*z)>a$L2T2*fQqPLCV3I;dK9u*h&^999NE!jK~*;xh>Il zOl=#KsxijVVi<{#xv4OInY-Q(Q=Id(#&T}=xEa63cA9~&`CxvG2S&jL*0v;Ckf?}B zJ7eGNabbsOuQ5JnpnHTO125?45bGQZHw7fI%;*P8@tX+Av{4wB&{ZQU>sXB?LglQb zhLO&AJPR2_A+PnfNMp&R(OguP{R_v(-gBq#xtnDe8*tfH#6()fBa8DO|+UCr1b>KAV>u7JVM(6&sNQF9}7$W=At?8Wu*fc`Ko< zLO%V|M+`Uiu&?ei9Cay74DO7#{@Gn9D~J>7Y016EC(M3!!lTC-^MeHlg;Wle)MSf@ zS5_pSo@FG`b5a@BGQ~J{bP{;)zZzo?7c_Iv!N+r+?s-z>I4D9cZN+7!d3(@h+^=bB z`0;TBXwI5*ZYPSs2AtQ+mbh7KNK}l&aj#$VH)nH{>@kQm$CY9sWBLIUu4U3XM%FV5 z6K-`9o|YGs#t#9Gi;u?1Ggb;?s)(2i24}=MmE;;gx~qpt@kMtO(Tz zOU$e_>~>mmFL~oPHQgIAD9Pf%DV;&g;Bmu6op5JRv-iCggI%)Qtze^DQ+Uf8eams< z34)R}*(V4T(bj+;{jY~ez0X(vXqQ{x>e1iofDqW}5Hqi-9=&8U+u-eICzN}~kb6G- znV`xY-@MxpbW-e|Wf;T9lZwygf;V5uI6htQ^*a%7+;zP3?E#m23!c5yJe|aB4O`lY z;@0?rGEBG;6?DcefA)hKyO4Cl1&i7^S#d2{bh7Pei@M^t`UZ7VS2qj_e(DDuZSjb#s7t0h-Iyp zkr&ptw5H`6Z}{TxiRCElOEI=|oFf@>r{C~9zYYE0xlOdOhVU@``C}H(ri@-VZiEZA zT!6M4s9DBK@tnrz2$ti2;~RvNgv1D;=msHC-SXn3;9q{%WB%2T$yCU@gD(BFCfJt5 zyX(BRrdjWl#GzwlEkaC6f|(}vF=5_86cN&=SmE%(;k-o?5Q!9HVbZ|!^OSI3GG2{& z_dUl~OOLtG)RQi=3(478hsz0^&ETZ;Oj^llV>mBsCe4D}E@-J}eNO8t>UQb0c|6`P z(=$9=MNDbDyslXeu5QN5Ya-A{8RMPB3wOl@FC%RH7oztXg}J^4%SY>Kj4PSSL%dn? zlS>>RBv2_@rdVH*l5*1S5$TLjXN01Ik_xFb!V8?RG`2)~#UR?I_AQI%l*%rMf{Z{$ zENC#kMS73J5vd-HE3V&yw7x>ifYQzgWVSrGiGs$?L0G)(lLUshR}J^Rov`vx?-H$z zX^zjCeSCoQHM>ngQcTIp6vryzBIMLu;*}?n5w5M45n;h5UHVk6=2qV^>H+#25G=BY8iD(tV~FT7A+gTK7y`HSQt2NPmpK^dWE%ci)mXC ziGZ~@;E9nKXafZr$-~6Us!^m!{f;ftnG2$ zR!rI&BRkCf8AG{2Xf#@C7XAz)T3UBX8urP}8JA{`KvJj+LQ+=aThx@`1@q>N#?0wO z8#JyWQGE;*0!5@Vj9W7GN@~hxLaIk(!SHJG0K-^e+YBWrYICnwiIr`Kn>{z z8yFW5kl}Giuj#VNFpRk{7xdH`i3+$d=hUJgqKk2!L@7Fqal(@YLkcEHuLxv7Uk*98 zMK-ep=Km^P(pAbp0grWX z6SOa|qUF*Zl8GS^35EtoOX+F~yPROm{bc$5tIYAOm6Ac~IL{S$$t#^6Ss=KqH5a94 zukp-mMJQofC2WrjI%bc2je5YSPZoK}`c zbIGD{I5nfMI~dz1(-E6-4`bn^T2=t>7mkLCfzDWuHTZU!Df>_|>`7j~6*Jx)k=r?| zZ}yo!EvZjR^2-IYx!|46mhV2*bj6Ty6cV-}PZv*Fv>9t*L~auiO!frj;{!ha zUdzGU@~HYLzqgt2auRby$#4J0kdeMxu1KP(Yn5kfM4Tkd?P0Vqrakb-S2VNZ) zJiSzu24==^cpCFYUosSy$zH>2fBYI=3B>>Sx2)fQS+>gVizBw*9&&M{c=XeXTYqqa z=YO-$e>yCw@jSc8_`zXL>m-_%*Sf2WLd`+hqJ(5C)4ZBRJefP5<`v4u-~>u~wv!a+ zVWgMwbXqtf-z5wy&YGMgD7o7Y`0R4I;Jq4oMj@QlDW|1C2Q8m1=Dad1xMvG=c+4yD zCV`A;oMhT)b~B0B8Ly6~eE&tlkKWJu!_AOU=(s2}2Zd!JEU)h7eDv{xAsftXK`+So z#q)^PSwayK2)HN(<2q&E7HE}nR%z_d9Y-YyVfcUUOWqs?Y$uXTudsJfG7JM=%;d6# zEE(`--!h6T9_OBow*v0olx+NquM=&LaZO9{qo1(%vdfDn=iJ(Y^P_~FR8l(68#@KH zfT(F%m<~^;0e^UJ$~$?&=Vu{h0WZJvoQ-YxtG`S6_chv=6`&T-@zl&40m>hFZ#@08uAaGasIU6y&cbsw8i*>xQH;)9kKi8S+c9L@{y3hP>=@JOlAav8G~s|f8Cfsn)l zMX!bJW654)n78nJ25zc(`1BbK`y@h=sDObSP`T!6T%K|64_UY=^Y((gIYS7ARw2gL zcn_h9*{>7sSL^JY>{9&Q1B@)`>5O3-kt$H4p{JK^IFXm&1RW^}w4#e|_Y^2usFN!U$Uf-*Oh`&Z!P+ZqeFPUq5dIb!1GY>B&^n=?-8hZxl$ydor~p`>c3IMvXK1(nV5-cgwaqj;AQ zyG-Q~UOOtL7*}CTL#9`j2TY*|R7xjY#rhoQ4Gz6rB=QzzQ>Kk$AXkZs z-db8L-x_E_<&etZyvJF`K~Xc~lr_CUBqHW*gY}-{MZm#l3%)WaY5E;r32@RSl09kw+4=0r{Up=K|4w9Vtkv?bFj8rQ`ZTR5S%rRg-IFc6eSh& z#&cf5idtbHJcV3gW-rN9hQrYpJwg$)@Kc0WoVKS7#5xkV5$^y3M=J465~@M`fP_+V`&SAMN4U+a5bB8%yuWjv;ylj7uJ(1%Y&Ihpy>w! z;{|VY68eEBH;&Uv^JJzNMUKU16W;$^a`tZy2wTfIJLhx(KR(qo*7NP{mWR`bNo_f5 z_85k@5F%hBzQviSDV$-XG_}kSG9qp)$KN|6BH~~XQ8#@apDoJ(RvX3t@@F%KQOI^$ zvXRX>uTmN-Fy8P=FXXt;e01pvl;M@Gp;Zzl;$+!#J?sakrx=jO62U z#oeAD!1HLXhyu{U(~A`krY#$Z+uvYWNc+7J~~d~3__!N(cL=Z@z6`%H_FEN=K(|A78Y8)=1gVB+$0@zo(^|n8#5zzGU8n9843wdP}i;w`CB=Y#eG9hXHrCB!{_TY9z{d z%u&NP#+D}w#cEa4IL%w5mPk5|^5qo#&4HvF4CyFIC_K;R9i)K9HoV^Jb1>69ez-IV z{$R`U{~d)4LrGqyoK=F3_C#s}tS8hCT{zCqzMNv~Ye4=B_m=B;v85>X{;Jr(+p91L zeTDc65DqCrz~H^PN)B*WLK(iOLiPqP%olF4u??*$uS7Ig!t2^$ysp{3Vt!dJ$641R zUBIG+)6(J9fyCZ9~y$ax(!uq3mH?fyWaC3EoSTkT|a?ttWhQ zmp}f;Tm056 zyumw<@hwh3AOoy06zzn@b3 zvY^Mjoqz`=0xCDd<7nF{mX<{HF|I<35G6u-;RdZQiRCgg*L%av9TUh5g~vNj-}SQmiBc7BzUMXw-tz zO)xs|Badex?P3g%sl!%ycfC@rd zR}+dptX>Gooeh7_iBU)<-g(kqEQ79~}uaE)`%q3l&F}J5w zX3jX+WF^>S<`&e>F$`j6?F?K>NAAM((_rb{?DF;h_A7iVS6qJbjN|wBX@sO8 zWjf6`UN|lfO8Q!HI&*lT=&2CLGWT0*gY}vqJ>`z>()D8|VaZO~V>FD(Ix!a)1^X{c z#%_gaBPnJ}sHs(o4=-|zFPORsUU)JwB#{xHEDURheF9xDZ3MB>%#3BvO~_gW=d#rbi^q9QK ziFATOkorCV^Gedwj;T=y(QtoOvk~<02+eC8y>RiUGf|s!6mbRGPg7*e1X_!_>5VKF=A*J`nN5H4MW9>zD0i)@vF9 z4oXiT4X3rixQ50@yc-xws~Pk?0>xCt^m{GcMa^m`IJOFfVlz?n!X1Vo%o9H7{=_nhV@9zfO&jtw;+yaG7&Z+jXNuD&HPxiRp}`1>!g1&% zroiWdT$KuSRahrFhdlum;)I>_j!+|0tvD z1hT8QU!_jNF0H+MAEWvHRqM*PPe8sP+CtntoZ7x z=ife$nYNNZ3eIZDv^4}$@XkPRP$p<;=ms@gndGcgln#c0V5?h`TgUy0zzfTrG~s16 zXS*X=Nj3795KsLjtLs4MFI?9zMKD)9@RtHfN(CsSP&nUSjW;Ep>oNttaBjs_OI(yu_>h>Gl^bJj+{g)Zjf$6I z1)m{<7!hO`t8rY3QMnZKUMvNZvr9KM2^x}-C%&;lxH6(Sn=<{$b3Xr`<@jKSMJZTR zf>{NnbxeK5tf|ObN9_cqu`Jq#nVV7jIpQh~&bX4s)mMcNOE(+D(n+^9&MjNGuCLwl z=gTl?>zZZBi15F1Sh+4J5klhKG9&ex)%MqE9#;n>EkhImDFX`5v7%w_j);91iKO(G zNDoRVq=&kl10_Ov6ajO0a+Q`*ld6bRXJ}RkyabsscNaL}&{x&dP6!&26Zn{pTxISL zac-IFVPr)ld!(|@%pQ?9=X9erDl?~SXN;mXt=~d zYz5O}$;}`v;Q#<207*naRErM|`Oe=i`0+7R&M@;wICqRDCZWf)Eva0~VWD{Z@19Wf zHW1DtY(mtFS^rv1Brf=BDv>yvg~!Q^V9=v1BjzSXN>8LM-9D6spsW?$tic5A$DO-GD4eShzV69nvtxcugb5q{?wKNtv6PslViI zbc>hOIa_*#m*1hc$oxeoRj)=54^jC{}cUP&JEIQ##Kd>*}ms?CnoG&GN0}e1efHkrffq_>lWt&CUwPL`;t@+=;@fkHJrNz zr8#3&ZF5-9@WLZhjcPT0(IXHU)*EV9p@m{xF-P@2P7dg)9gJvcoLREk zq2fhzNJPfE-dPq;+cR=^2|zz`ytIacYR+L{kf4Qpve8fbDmC$BX(GZ?9s!^W^^ zAg~QLyPkzfs9R4%%iIbMr-n2%SWyBUjtb8^?|QNuf}8(vhhU|LH=cj==`p^Q1Ovsz zaloRI?Ek;dQPqTxk0l=;E7B$lpj52-*xds6lz58m6p}P8N2_% zYux$6n72Nhkgi9heR%xC3&y(v!`&YF(*<=cQKOij?@Km2l2k7DYBqKp!PbCw0lk|Y z>fH0eU+hzS{0x0LV-a>({rVbbKRxD0_Z@F-G~Dx^**u^+NwA`&A8N*N$Z=sfsWisf z<-}0dxM0Fry~@0i+|FVSn*f~^MD+liqc6u8*CIuL)-gLtKyD0kKcyG+IkcXS<}-{@ zbd;uYF&E7_u^RBsC}QuqW*|XI!8mDIOIik@=Rf*t$xk0@W=7Bn0vaP(?-;(ar8qhh zc-ykb9djMCSJZTZhEb&07#LdT`S1*0>j_TgAzQtM{fXjh;|3pj&P&UD;b@%Xql4wwRaeKF>SQ#?B z+2Pk!K{J(n-of+wx$Ie^Spobg`NH%&b9=zZ$|NA{2T!b8#n%6dJtV?*KU-F{# zjMfq&$NPLSbu~~6Z#{BFgWG4%`Yz4*CvA(v9la(eTkb<{|2~k||xUJbK zzeG6ob%fP&DJ3Lh-1gk5`8tng*@iWCtZ zdv$YMGQ(1axWAwCE7UK!lpgQZ7xFiE6B-NNI2Pw6VQQ(GkVO-q2N^PpiPzRiZ)d!* zb4GY{#NR%N`P>|VkT`2-ZH<)6?rVc16pG1%iVuGBj2~TIpoAfokm(SDiYSzfI|J7G zF{^#g#&AZr=SWu+>5U=r)+)hZfY1uxHnj5v?Zt%rXv*b-9QTVO+J&O51dF-l{G`Fu zj>v10P_P~>8C*-3+776!q_UEEE10x`i?*S#%@<6z^@h?I8f&TAmWrAMQwn=Q*-o#P z;+7T8*0o&o+1^{^7o7IeomgHMz7W9I&lU1I4&(}F`NACriKb{TS@?50szX4`%wJ%o zC8mS*mRWO1FIpjzDLvMxm{a?bu3E?VhE_C4tqEj@+7^U*nO<>Je~i{49#3gz1S%p^ zt4J*=n<=yQkWgo&^f+uDAfzA+Gq5dXdx;hi12twMmUu=>&fLwI)(N7lIDfVv)fOQP zL3coPxFB2~5wG^ydacLGN<(>^^Y9{N+E`Y?8_e5FT33<Z4eui!;> zgcpW^8ZnFoDs4&E73(B;A?SAvJL4H?B3XGeK?fkSnC>gP#H}SeJ4f|~xaorVC+AEa zoU^C`j1ef)FhB6@U22pG=xEP+pctktudiC>MT9k*yv$?3Gix>H(F${u6Dg?M6eAiE z?J#bIqK%leB~DfJb;d(eQ}`vm;=Ezro-G>-flwtZ%o&B5V4Wceh78m;-Wz&y6`Z1= zQl9il`;S0zZdba{qqm${b&P7_K)qFWJj4c)*Z=u8E2?EZiP2dG&f?>g=4X$X{B)l(vZ(Zsdg@qAH6ru;<uJt&cMN~4vhN^{hKR%{?W3&dD?mo^ORW?)A*J_kdOvDtVS`rY0Ldt!u@%{R(ywv zx!|mwFb=jE$uW(F+*Q=x;G`t;eS{aRhaGBfcv5_Z5(#6q!%z>2L`dZV8edTRmd4E~ zm|&@ih)6_CpvLsnGJ`W#j!;?N9XA}+EwS%&*?0z#T=MXZVJC?ghMv|*x}r-2d@}J& z+m>;v83s!>aXo^EML{AHZlodihI9V>X^51P_cowvp|OIaDxj|ewu2NY9PbPa-#b!# zR332ZVIa3iWW;A@iiJPs8~s&obz>gSV~)y(%i5EKB`2R4oIRm`cTUjn;*KwAb4k;7 z7_JINs{;<-FZt`w1F#`)bRE~Z@Y4n=E69zeJ9B*Wg9(52u;O;CiIrf|I&zor-o;CD z8IuN{K{{tbea?ht{5ksMPt?$`ht-!$fA|g?aL^)FBnYD`h3&9Uh z8h))09qo8oNCxSEe{i$qgIeG`Oq+mF7;s)YTpcZUH=N+Cneo?0l2O#9a+;k)vzd4< zYKtIdEe>gn;G>I@fle65A;Vy4!X-U8EF;!OEmh9@o&PEP*N~A2(wq#qf9V37P19_V- zkqdGPxj${YOd#<0weE-3kr_S>D+Os=e^P+KJH+XPI z@UQ;O4nO%n&w27N@XNEDkq#W@l1;J2{3g5r2Arm*WgpeR4!P}cK8we30rNFr%S^IZe#bKxoIJ6q7 z&K&Y4&!%pAgx$^ShWegC$Z*3XBhVx~D9hQW96pu{ZvON5ZD?2K`;4G3a0Vk&P#@KoM|@Wgt9AbJ+=jK-Br%pM-k zrTqegB#kyW?VccoCO2Dj{fb?)!Mut2pME}L?jAFdV~hzD(TvmXk}NJbi1#=w6bF&z z<+aA)nR8ARRtPE3#Aqqm(_^l>hRt|Dau#jR%eq4WkNiXOatCo4@L#kfDyI`I&X*Kw zLatIa(~MLb1d8p^jF(Nzy0IwZ$wJJ$Ub5;cgg2C|v7*8{hxLI%Oz^_vL`Nz#yYUo> zLrV`nuP!V}U9j=&n z&&kw;j*bzVL~@USLIuI4KVwTBa?;JISmH!rL)~H|6t!3|jTGJq{;zkTeOF-q)0(nh za`=Dzid@5d-J{|;2L)_KBbIAT78IkXr|F;&8CPA+f;pRNg7TXCh33sWYd-sRPHQcX zR)U3FGZG_wxykwIIdxvJbGG1@)^gsJ)SaTGWB(rq>)!|hBFNl)z}UQ{KYEHhl#zHOW zzr32W84I$c;HS@PTr=gB!f;qvUR09n4nXs~8v47B8u;vFgzFR+!L#-ag-i$-PCuzR zt2>S>iIEMpcf6A2y!V%DvOj#EySd?B(R0@INE>r!`k0Srn)6k_sw<8X$*KYWzk6nl zWg2VfGY~oc&lmX4(%FJcNg_NxDEcUH-onZiG&WEu#o7g4n{+%YHHn6!Gy`{wY2$8yr;H?i$-!g%h-+# z$4!ZKg8f)f*`Y_cwTiwBY;`R^Jsa`K%rh~X#zmA3?1_rr2EM2@A_^R7MN%ZZw$bvF zx#Y45?4|-mhI5A3Mvk&d*-8|(^{f__{k@oPzgg4$%J9v@mVT}Hn^VK87ff|TVFHOV z92PyV?5$a@B+r%!hp8ip9nN+9=U;#ohOJDo>>VFJSKL-5*4G?N1Nb#RTdXK;U@sH= z;F}LwFFbemVfLhF%S60bX~tR3@BBl@)kl)Yml|drnM{~HtGIo5&QBhV*^C7bmx3p& zK^FF6Iil_YKRvtTlU1M)lHRYlm3ijBI7Vg`(?`7hG+a)b!Hwb^pnN6ae0JEsy6JeeJnM24eD z!$=2Ifb&KpmyYvZuoVjm>1a*@8%A-INnV}&z0I$bdN?DCVZgc3GyJV;_)8grl;&nA z4mZIliMpwl3O5@u?>n>-0|y)&K6Jy@_NL!g3WE?akVIvCn+#jM%m!fLEH%xp{V*R0zsobM1qqr9V~ z0s-TA%Dww7(XEs_*AFP$9_tiM2d(osm(lc&(ynQ!v9_ae6_P;HHPoF)i3o|N^D6)% zIYNY*9D!0D-dALyN1GTyOh6)qBhcc5XYG50GqiMgKh&*+Fc_gi7#>Kv8?MoX78fio zShO@GdO|L@NyL<1)!b0lHz7ZX(J9XN^uD{{vxf>Q=Z3Am5(Xt>5P@Jl)>}g8QEIpX zgy8X^zX>(M%3n~2<;~{b(AYTvNcDuytx-~w=}l_4#11>(p_L+7B9*cs_i>^p;BQ10 zilsj%H4~=kEv#>mQeueE!mt)s#AH-%MxY-CbnVEP5b$Ufk#U%PKt1vP7?azY#u7B+BdU(ooTRAgA&vTCoe ze!yG$&<_tIBBEeQCDv@3U9Oy`_X8^)$%M7IWZj-2g~6DZ)J$$3y<+eZYg^)ILcl`s zY?^!YG)N52_l(#?>m9Db3jzVH1WMN|`WZWFmz7_UkTdr)+R$T#B?~G0W`h^qDT{E$ zQT!%NS7Snik&0TbQ9dRLF*AQjN5_l)jNFbm?VsSK05tpZ7KPg+78#Ly~WDS z@lLa$Hz9Nw47=(9SK%c-Gzi>akCG{=)~tKMtx-g*1E0-mw&R>xf65Gn*A;tF%z2w} zacOv3R;=6w340W3Oc0*6D~UqNmdRN5J(*T)e087Sod)hb512@yLq@0qv)XfRcPP5R ztS`|ffNI#z#w^#Gvb^NIG7yt6F$F<7lu?}4*Lc~n5p9zchJ2l{7A3KYxf4GaL@&XU zX?R@r;0?!3OD+`~X2Lkx~lbF~^3O!UF zO(KaBAcUc|4N5xx`V>ZL2tvQC6`H_)0S}j+dxfPBijBx{S~*^yXo7USs66{cPkVDx ze~=!Lh#apyMnR-IzOxZg21Ah#Jo8m1*jE)*9QeQ{Y$TGW%Z8oG@@BHh8IrYyi$+q~ z!D5%{z;Uz25b=BalGnd4dHF(9%{pw=;YH5F7d^L+YPMX);)!FXBlaVBG+)vN&z2G7 zrbDN2a(PZ<_lPazvS%a$o#;q1&Jdo==x>r%+VBq?=kMZN?CV zR^%qIba37V))q!O;bjH;si$)wQGB`pD@$%A1!FS(yj>HUZ*^t^E^&{Uox@~qC_(TQMoEcj@iaAcq`f<@{1v%hM2kVbT3kUbPi zaM&ugUZ3#8iRAh;XZEPWddqGeNXLP7Y&gC4+{t@xy&_qZiuIM}(-TSM;jO8qm68aB z2yk5+KA8nB*OH?x%MV_uIh#dPIvRui2nFmHDFze*7n6sXb%WMAS)UcNo!|d4q{{nAcp0qt*YI27` z#|>D6)I;&|&8b}645{IUGaUwEjS!x}OW`4in<08=3y?Q04&L{}!0ft#yY>CeX4&6_ zufF6|D)&`vSL+@tS z(_4$}E!MVlO-)m-aIL`h!wEi98Qw~sFLF-SmPHqscRlC*GitYj07k_)>FI5G(~lca z!rZ8zCBX**ZeWCiygBbPpb@CQ-Tbz$qOl9MqgO~}&StX9!e8L%5l)h7MJ7f}%?`70 zMk*$F(bD*aaMSQ0@U*t14JE<{GO=+Zt>|!~Wo6H4T!}L+KJ;XAO008)(gbJm){~nJ zJPz+2m)%o(SCN@5z@z+N77GM+Q~2kvugSl@%jP%`A8e3}hNnLr57&ym5(E)5uQaFc zyyQ>+e8uzT0FR^h&A_KhP$8ifHKi4d4ikRos}=Q&6~QZ%v?OB(X@n^fgbuiP2Niax zuWH0-LN622_5vLV`Vf=uWLT?ltD4ShP?}7w2-46zzo1|CsQcd|H7#DntQHQ7=2~Q= zuE7QcWl5uB%43C14AHLaC^MX9yBGVf;YVd|wk817lSh0 z`as{yq4zo{CMqM7372-pNEDP*SkVwTEB@fe*Idj?j&j3k?dU?q zL9~gZ;mn=V=v$2MPWg@39sgjeKu5T9i>9SMs}MU|_^Tz<9zWVcA8vuGc=7&>-3PCN z)U;?Od4nvkh<9@?pH4`&Hi%P5_G9KBUJ_3uqK%l#(*)ycle_M9Ik=dU z-`=Ng0`t!o#G?wmGo{lTn3EOTW5x5?HrG!oUj6DWwh64<4r*2-+K$irh|}Yg4WoH~ zam~C9C}JwRU=;Oe1Upem?K{r;89`XK;~kv0R8%AeuI<&(Fd_sUEr(_oCt5n$01%s@ zZRK_{VbLi>2s9RCWGy_3J@xA zkSgj>v8ojxE-H2th4(SFlPs;}-D}BK9C&@=$+X~k+p%<)NWaNPR|&BkxULJC}N*QTOlJxYYg=Rt1j}SVdZv~O4s4lOd>==FJ zfce_<{87vOHzs6xApOcVpZ|x?c>39rfBM~hHjYws9C-AoLye}4@`&Z}oaNI?+Fwh4 zbgw7gQl#AGowqxZwB_^Fk~`xO`;kIo*h)G+Sp;6qd%m_Ec>82TByzMW_;}G1bWg0J zVF}@CZs(e-Ruik1y-e`cP0PhH=F_F(2*fq1RIfOg+K~8 zTN{$x(K-0!EN2>bta{J;GuX)&ys?!KjRNmK3pDSfv|Yj^_LSD~*2Ixyj-zkvaq{tk zPfh|kJ^$spW7nh@Avmi&yYq~#Nyk;A`E;I8_MXod5-njXhKmN0IOd!8T7LUS57;`0 z$R;CZN^@z{jzUEv1a+l)n7RSGsbCn(|>Hq*B z07*naRB$!|R;5*Ct6VJidW9Z?<+o$Zlmhtgl zt+{*0QT$>JuixQoZza5P;SraYRMFQsn=SbM*A=h-^%+$o$B4%Afp?gX@V%83-sE5xyN?mi1#BAIT zTt4$9d3-UYmP_R2HR;5lGD(O$!hp_Gl8tSO!&{8*j=6Kx@Wc0?@$__`nOkFND4RJ# z4v{U7<>7kGzx|6TSATIq=N%CdMVwG1n#2fF1DWw;rXi0@(x@knJW&!bxj-6$j3g>D zNHq+RxIkZb*v8S?e1{G91IEjuAOR8l-nP<#DuJ*=v+A7*!B}-n&6_93@B$ zl`nC@A{nq5^nl0ouBNA=h<1oVLglYn^k=NNLP|**?{J zg$^l|oRf%zi5X*T%7>SM{y%!oyrZ$2&ed$?EngkoVr~=miUHGD>NT%SYE)=Ae66GD zEYkOMGcbu{d@mzPGRh}w)@2WIhi`jiCeT)}c-bRPPDnQ;(@jO)R&-ZT>}N!oLUn=k z-YznWX@q8DKSgr~vAcu4n&SwhZyX|vH|Q=GWSc3!yhnB^+vgQqFK zxO6N;$yW3VR`kSSgFdu$t|ZYT8n+xY)iTB~Sll8cWq(d=G72=Q98rfsq%mZn1Fp+vTC%3q^7brYINcNI3yW%cqZ8e<9Maw6v zPYF`88{NeQk4G_41zlJU=$11i2t2mF4_IaQG>U_rHQ8px%NLT}-G--MoUxHv zrgvjXGtf|~ceLf4aG_XSogt;->Lg*5H|%_ML~>^v*VNdTR|FhQSkli*_ID;^Z|#un z7|Qov@W$_Kv2n1&`Pqt87lZ>%@yY?O%~tqDjlR3b;_-9Z^Es2ZVou-fcr}8bRtd9} zV8e`YRuH*}k-kG^=ZsBC5qF5r@S48Q+BW1OaMf6xkW5WXprr8@g2f1f6D<%hVfc(0 ziGmF^VbK|uog>$ZR69Odl}vO&6NZ+LSSeoC9k=tC^UI!MEx1=WbX5>V9vd{Vgt=|e z(s3t?xoRiOXqdZ-J(?gWB1dH5^LN&qzUc9dW#h_ndF;6RyHjM-vJu5JS;p=^NSJO5=6_n#JRJ1Zbss2w zL#8w8Uh}e(JbX{^#~(judnDQJ;jM3be({H=JetM4xm)w@@r<2PanPUe^IvK%P0bI# zwc@=;DVuR1j~de31sktzvFuZ>KKUHkcFaGj*m_g(tCua)dco!hLJZ<+O&iVm;zh#N zR)@R2O=&yCb!*V9i!;_|lGDnwYCW@NP()j&_{lRUyFgD#tRgz=X;sZ$lwiG}bdq1qTXG%v zjg6d(HsbBmo)@bMO^kK$a$z{08!8Q-U8dB1!;5msmM#$euv8NQW^KnSxu)|1>tWtY z%HDB*EZK_%L1->kft}3p&96D$es|da+|FP(7BsOS>z4%U`Q~oJ)0H5&n9ahnuoZ{7 z;YH=SY79Gxpz0+q1iro@cv8Ye70@z9_>T1Jhis2E`EgAhA9G+NrYD-?!}FiaiSMMO zVa`{#OLliR+5X;bl#o3B=`mlqcZ>T!+~uBur<;NGX^HsgCGlMGLdPUA{O%paysr7# zwybBy&L|fWr7kB9*grG zi5U}#j369UKf~YDdKooX*R1bwuDNk#$eZ(N!t?f$KRMoDb=+{xOO#LuY3SU7-Zw<1 zV5*NO%oiNwV-C|ED#aJ|nkH04szAgFr3^_Z=%~1~*L-x9P@|ap=Xh^XQW2@aRxO1Z z-Us{*#^I21*ksvI6BxK-DTYP`DHJjcs7B{Xs&0XGEs@SJGQl(821ceNjEH1RtRrHj z$#goLi3LdI2F_S|5*eYSBM6HYppdxGBJgOTZ%&B;Zxz>-;;QL5>sL5Bv`UaDdRL=$ zLTbi03~+;W4dLd&N6Q!~G`+3peG3S*C~(1IR74sLHAGFgQ4EU-b9X|a5&{w-G*y`6 zgGC64%@`j%I>cmhLL+mo?Q_O*IH4sv$NC=E_oQM(L~^687HAbCLqt!_#2nCv4uhf) z1)c9ua*rq;#2P{l{Bjm;;H|?1gA+X-f%6F;%-6hJPq4vpMMd)$XH4~s(ymFwn8+(K zW683=L8=+M#*o{H%6g_|V~`Xu;lLas2^1n@Z7uIzSQb<)-8r?L5nV?-iZ@9SjC91( zbu3t*gTslIgP)J6LNg3tw#E=K$r>`*BdlR*Emdf!!ABe9S00uyS24CFdtP?`={-A%#>pP(BSuMJH1Vug(A1tq-SM)FXdGO& zmsB3C=y0NAQ}0vzmN;Zg^c!@dr3>ppih>$c)*_?xYeX1)kk{@7iJmfIAL~kl5Ch+5 zU}YtdlH?}h#4f4ik}a`~2tAn?qsgecz^=H>s=FjNV^SG2Vmw&;gu%#xfuCxDM00A_ z@2laeAz=v;$9bCgZk3YxPjr?G-Z z*YL?ziS>>|L(qX!`vrSiGfGPKQ%Tvt=D+)pJZ=dN=2e7I4Rv=(CJvZ)lBqtxg_zH- z1?8oqFwk_C6%{W;%A>O(fGY4T`i5T#LG-D>tB!eBGmR5w{Uxc0+07!rao(2%xu6eo z4wdHDDhEVNpD%p-BG7Y8mH$gX8b=iR3g}`o{ z4ox&#bJ^CM)Xz!AdzesgTzXQaNtIwDy6M_>DQC50Z9NBR43%c2;434`XLCcMCX7a& zTe;_>YfbM32T9Cf9T4fb?Io^nBxFX z{t}vg$w|H9ucDl7)3Rx%Z00>5ol6SSQJn>>n9)0r_lB*>oQ-0|&ILZ3XY6H~k%XDoxOK}>AxU*Z(+*wapPlu5bW$aTGrfG_i28F zIC3XBVk|2o_}P`;ZkB!0hXZ}bGceZfm$(#a8s#$zqK6gki0hy7rL!Md^Si9P!B^&La}Vo*{t@ zJ6yrF@8NNHPk8ZZ6tF>>yPgImD3M?v#TQU0W)}yF8LNRTMikqGjAGj+gDwV#H(2v$_O{~sNlkmab z=seqWkC_+=Q)5=1q?VS61z;#P6-OSQ`D+Z$`;VfN0b6ei6u;#GRit{zpJ1t(2>r+-q<7e&8*P`6{adho`QMiXhe{6_()nPJ0E- zpvEjOy#mmiquHJLsjbe77^R-Jmilj*Ao^_ z<7LxRGx%BTX|&=q?FEVjUAXfGJV{W|oL7=z+;Lwe$C|?G-0KT>Qc04w@;=Xz{sqo- zs2+X2^(RH|FU6IiaTytC^md)a0;!rU@+_G&b&!kLg2vb4jnX>}+xaHG&HyQPg zN_>5ZLP~ZNtN~uOT9r2ZG-q`UY|5b6=dX^~z~oBStW9q5HpGYD3(FyXgR70XwYPXQE}r) zy(>dJ|J=0^Rz0}Ir*5As_7Ag@7ITjUxFz<}ED?G}tKP-Ava5637EQkPEUDPd`)a3Y z?52%;qh{JMFsiO%Z2((buc;D`ur6w;d$P7;nBC0DPMh_wX!cq+a@y>#2Rv}@jL}30 z%?lrpnX|PB)ol8PJ%$qcjot8NfJhE9wY+@5uUq&+^LtMy!oaCD&((*1HeO2GMW;p> z8qB@XGG6-vVY#0rLZY$|6+lqx+8do>cgQxbAs){gj#LA~wULraVD5SD;U(i^BfPA) zLGCP9tI|cgOr*T-$T)D4u6=M=)l=GjW%9SQgRX&m#yqUD(pnO{Sbm4gO2z{j#Ah?uq24-p{-n=FDP|)m{iis5!=cB9BiMhxWydt)Jo-JqtikPQJxPC| zla#+X%3HY^BR8ntZxbckwFC_XNVYdJX;VJ=V}%H`-7E$AEhwbw>Iw~RXg{YFl97k> zs@R&=9j|d@AKDzDz1ne~y$>15i`Ex)8yeFi%jW()aI)K*#S+B8#;!BYx^79r4!Ut( zH6dua9B6}I;^>s;A0eX(kFF2pbQ(oF!b^f(j5@%djHf}?_{+|+5!=qApqCF;hSDJn zXMGv{rYZ|AghWE1v7mD+?xa%+-KO5N7fN5o)i{&#KH-R9nlz706KLhUh$DA>I$_#d zv61hJS5TnXqwe(u&acBx+V4u6Khr#YJBl}IB#PFrh4@HL!4~Wm$8r;hII52L^k#Fv zN20l#kTf>s*qY~U-ew1(8^?!^$`H!{*Fz+RKE2?cb@7qlp5#Ak09+QWNN^Cd#pbON zf~+2U3!0jVe{0hRCt9Hti)JXLLM(Cv(ctv8s z>`{|MyO+9I>q?_lJFba|YvmQ$;H>D$+8s1ySo0aLtfez4#hTkl;q#O8FZZ~glz(WM zLOIeF#OIhg9Thwvw8=qpmV&yhhm<(nIWP9O55L>tqA~K59@^vRp)>G>7E{uZ$B%_F zWV&|AE^|!Wgh|(5p)8cyESd4$fQ?-P(qigM;8H>ExhcCalLK#NN6u^^S2UvGr&jbx zxBhsQy?jJit$DZ*W0T~|SzT4#Uo7#k&Seu>vFX;-Tad?6SwHS9cMJM#sGmJ>T{YU(&mi5 z#7=I#V9p9yr@6jqoF04N5@mvR9SN506CJ}OwOmM=e^_9JiqKZa#BEySq}SCQ{rv=# zUqXiOx;pE4ZR9xEL@1s-1(zojK1;3ah=xSVNwIqo&=LHGB_37)&8*AjxLx+lkPaii z+H=Z(sOb#-Mdob-&6sF?z`h}a-~^xbi5|!HK^JgOL*7*( zq)au#Kj)6AqviwovlMb?mjNUews$czGLPBm>Nw9Szkl(HbR~c>#-a7sHY#CL;Of08 z?K?`I8nm-HEMwr`2K+BqRRg8;E;ao;8rG$B=wPQ{+5&!0rglg?Bg0(GWf14jB@lN( z(^~MQLWl6=`~6batJnzQaXO47+1B?`1*p6qLXPQMss8a(KFP#GTM4x5tUz=8BYIfl zzY*(X-r`+Ee`Fz%MB)16!SM%1ztuk6bH%I=8Z;DZmVk8v-CpqidoRROK@ony*U;d% ze!YwqR816g5vx@h7K|@bRk7n0xL!RMN1I@A1=VmhZWcXJBw9T)ZSgN6sL3<#K9b#y zx``l)xN1}iQ}D6bJrikYa{qdi(_bqL1ywM%2|#}m`1+F-@KqxgjbZhjw8w2k8yovH zAyxi<7maHHju?m29t~}aS3~nXpQiYk8CQA&3EMxnE+oWa>8yKY;)Qosi2oZ=$n9`n zyVH?6_JYtS>~Xhr*1e}9@YmVYq1JET^%j&~bVyB&v+d{Am+{MZs0L5b z)D$S7f&$A_a?Qo0r~be_=-}s*5vO)$nO{lCM^D_V5m(=46FA#5Q`)9KR9qUaEt?~t z5!JLane!(f+F>tb4z8AmbB^x~i0;?ar9uw*;Wwsu9**-T%UjxR0sb2*4{x6u>G8H? zA0S@dET9?R9ZQB7GJL&kC8+$1-4fw|k$HbBY#F&>(&E9(eD4ZtnZYOs#8_c`ne?(N zlAz$hhKv*Hux13^?f&kX^Va>z;sGDspT$=>6||mRwMiZ`nAMR}u`l_XY$ zoMte>?3;=f8q+V1|NK%~6t8_Bx_59KBvl*1o}}yiYc`9Ul%C|6Tw7LUfgIx=H;9uUU}6A`DV$m~yXA)%9?P zcC|7u({X!CbN`RA{Y36oHt}I5X~P4r^xi=1{w%WX1OK7h)_j3@)BBm}W-#G-sMHCP zXy(M|;Z*J1V_AXYFAB6}K8o?~nCVL<<;l%(RnW4#oOB1y_#mHoa3q>1gU_1lA;$T6 z<;HMm-WB4LB^|lsN#CruJt@)gwLG5XfmQF%6ZUg|x7DBee14Hf=!cdfzkG&!hBU@E zK88|6mh!FU@WNbcs=6nBgRJfbV?F62KOW_~z}NlpB0KKTmG2St;@*Ov9>m+D5cKpr z&Sw2*of=oRWwbi}%^R&~M91_i3X6C46jtV*{lhvnw!xhg{0S91CKO)98_o@M6kdN; z@?bO2Il?_?WYm;Kh4^A~3@g9?#-Y8+|L|C;3cikjTkQL5E4-kruGkM% zuQbfK@B7-Lx~4>o!#l68E?rxEn+foULcLra$gJEei|YN!uz-J<9lZ1CxIG8%X)NPG zmA}8-^G$R$tykN;eXpB##ZqA}S}W{cO)BJO`s5GZ<Y7XHZ$(I zy%uRHIOusvFB9*|xY$$A8B#m_8_m_3EN>QOB^^m;#p{Nm@c|+ciI!jUGETuF@VGS4 z$lJ~zZ6*AUIAkfWrR#SRRX*nZRf1X2QCfgK!(#tsa^QLnzsKtEv}^;f|N7iAdrNY| z1{Noep9M5}Y|LMJ#|8c}4BLFLdi78Db4+8(>fUy0VtH)l7~Du3>0fqYrw zeE5%=5`DN1UrCpXiQ{p>xqfh4C&U^2QfnaD}`r02gQ(^ZNkraDvFQR zR*euybRYg$_|Z!oSg*YO$$tjEZL2;TY_)|SdZ5d_E945(OLB8AU(Lkmgc!Rm@YLLm z>T=C4eX8DjZ9oAoaSu%!s!OyM7(jFR&naQ=ZI~z%fNg@(TtR?AvQ>h%pcH^4HsjU8p|^Fz0CXEQNE;OoD!?w$0nO z*p@YcI(q|-^&k5;q=}ox@x}Iv%@weqZPl#A*vG=?a>T^=m?Zd^jCWjEXxg`8hZ^;!8yK4McN#ug{R)Yu&NAEA_KKQX6zQ zU1nE}CWI4{l1@GKQrVbOR44z&8W>PSy?U3drc16^^s6Me{&9ckfJ1=!ZXaYAi2C~b z1%-r8ji(C+?Y-`AgGWY3PgV{-i0^tk53Gt&CNKkge7L;1`>0&xPxp(pt=i7d?@Q!F z8XHc1eZ5xTVANyeipKtC=H%cI5EV7dJOSLMsiVW1r;Rq-CZDg$M$@Heiok$gM;%3s z)QD)2$rdD++6hlI^8!05e!3NT(Z1v{GBUC+Q5slNH^);_QgZdl8^pv!M|jjeTLz|i zel_J!|2T+(jWjMioYRhpcy;BTTnWCv@0O>c_?R#*FSN*_qoZ%jYi9O(TYZtMCqaLt z>r_fgOMQK!E;lx98ih5Fa~}G9h7v@@#Rs4mzqP!8h*aHxL{1YC5!w0p)D@>XBrhy3 z(g9okPV_~NO$94TCG>BI zhhFf!t0I3K<#oy4I2npEMT{F8v|7OxZ-;8Ns_P>$d^yS=3fLP%>|`qv7FG4;!{Fec zWb)fU1Okzjm4$Nr!$>~D$h_3`Zcr!UG%bY4mLDGh+u7lIr;<+A*51A+U4BX@6@U+7 zTFr#svKty2*4EX5#8$%Umk}(Fmc$wd>#zPAb^k!bSbg?G-S>1;ZLuqycGr(a%yT)$ zhT7ohy_rsuPH3%__Uw^Z%RwLV>_pp7dxi4x5V8sI@sGJ%U%YrB;%cs|QL0n0?eNpl z$%&Q&CBSb!x_VFgINf3H1YX!l>HY0~`7xL09#~SaiNgB(`skisp_<7_wF{0fe?jch z(cjKnVRLR>Q?kahKXOqQdH~s4rpQIU4nNx5+_ZBwG3TCh zqx9@J!32T^T@H48+p&@3(>N81l3q6+j~NY@Oe$_u)KwIqR%9cMDsdLTj|SoD8M#qL z8@ze)InA~wNlBOd8y7AJ{&hnqF@{(V=Vt#$|Au$iDxTZueNu9=T}TKxl0p2=r5)Jh z8v~45gSBtJKnkJq5)0b$f1)rNF39NHS>O>o!}7v3Tb#X7QBk?CvWexvvE}6*Jk;D& z)7X7|eL-Sv%BtaB_Qw@_l2Gymu0V$QIkXVmh&ZxY{bbDvw;!~uzzMwBf3dJ}VjUFJ zGFDW^N9mbevU`O?sDcT-cvnjjZMFIL z3!CS05-Qubb##c%66yW(1Eo~9{AOb=6bkfjC3n>a<5lI)8>Kmtn1+R3%KBRbhF#0a zV?RrmVUqU$^M+nUkCq`B%d&+0(iXZ2R1_W*_ScO zK(LYG;Jw3usk4z91dqkWVrZnY-FK{c1ud*#$kno`)omk@6?J2J19fgsYVA$u4%-qPObnl=*8xL-cuH4gtDKuNxzGQ5bEj0GpIQtW zry~5&+}W>T(Z9c6`JD^bN=1OW0R*>(+o|Z1Yv_`@+So+ZC=kbh7EH%PDfQpFML;7L zAOx^@G0O>yp>#aLa;Wc(dn@Zn;s3`W)w^r36(Z^-qTmZqlLP5Z(@Q-@{E z-!Z8p>5Q*4_ZufcSDALDK8pmN7oeM|kyZ-yCvl1(BH;-2fX-M2df zGC|~vgUOhhldaNUjU|qN<>Q+>j6I?HN$d1~8ik@c(X+bBgu;}r zo=*O@-&Sy^ZOqr7d+=zOk?5?x;0_;p|F;IXGukaCW8|&(+_?Ys=~jHd9F0b!hkQLU zsQi;K>TN2zf*;JP+l!~wmBqopUdize_zck@VQizxM|OI(D1%Kby(~S zZQk!s*}ga{c*uQdr)h2GL6P;{#((4jrKLbNe=C2UI>_ee=oo+>W}+fmGc(+uHfT?r z+~1sTqrs7;S~`)Hoelr_^Jlf)SxBcUM_N+8pRf7xaM#w%AKK29qwz9*Qn~?kkThry*p}m1I>m)Cu9Wv+FA!xBKgFhI=b< zJi`)=;~F+#4y%Y#?}54zPc}4$DdUx0|1qKx?m5oyhetk{yuFXBo+qF)x7*Y=PObh8 z5fl`RDl2EOfj-3f=} zJjV#y#5)8`03RJSioSxqb{KSab|wKO`heIuI{Hw^_2x8Am@eRHo(1z$dio{7t*AV% zGbg0_+T~+F2VtOz7EvYiepdfpUYC?t>rPKkzjdH7=k}x1N4{zrnx>8_eUmt<&1iic zC1}=^OVp`;p;mU&zUY8c*63nnPW|-h+6ySma0u>EMwrQG(G9S#CX9Nc8cU3Kp!)W$ zHwi&o=|)#j5iMW)Fa@|L0}$J|u>j${cCauo050S6Rp;0{Z;6;M+HW-))30`QBHKzE zJ~XdH&~obJFHTTn5BBTD8e2;OygNB7^w2LLc0QU^Rp zmv+^0Dl9rDZUkTEJqaSet)pE5$|y~PL0gjTek*Y6gsf7V+6vk0>$@q-2kqJG@43jj zf*N(TOzYu==xt-3rVSZ_66b9mtFiwu{$_svJqL~|I{-|sryW4?o|u@Rrls9zZ9NI1 z1FivDIokx3!QQK@AUJZ+v0h1PoFYV%)z`ccOLSQGAe(}6XQ*z(2^U@Nl`zd{>$k$! zOzmb~X4-s~5G1ePWx(_F?%P}G?lmv1_ED0$oVUV5E7AMvYO*F7*N~p)WaQnB`=@b* z185zpY??uvUli5>LkN?)&H3C_H6PmBCr}nODrefB3wl%@MwDZj2UKV0^ z`Ks1Cy+KSvHL|!)Zl`YZd zELFBTJ1Nhbjf7FZLx1{S^5;5z`H1|G*A;)MY{48y^M;3KbKK12^P-~qIgy0+57Yj6 z$hmn)U0~?a|MHZ*m~yt2KW*^M)-`}hW4RL1-@Y+xYHOp;;<|Biat6=C0Td>339Q%e z@@(a^XzO`6m<&RA!#!Iw_X4^>%dePVPN`>3mBvG8h*KI9Nk!f{vrid-yGQ0fN&Y2J zzrebI81AO+xIFeV>nGx=gPTnm{Q**2H-}1% z<he@{3NlY+sIvK(Zq)&dpvqdnxHio!g4;q# z*C!*Vpa9t)d)I$-NwqJW!4LFw^DHfs{y;D4zC>~t5vlz_rAe!jz)PI2i8L;T0w0rC zmmDtuytpSiKX+Ng6`#Q~oUw9*d>D%)P+#KU0Ag#*GXlDTix~~@0^pq$h`!Y~!(q5} z?aoDdz0T0w%2tl2&YF9fuI}E7?!{4Z_uQk@y7x<75wHHI$Nm}L^js1lM z{sIC5CS3R- zmZeye;odAxg&E#{zLVLfzwPFHC}uCJf%HYz8k+Vuf$c=JULbJ|o+UpEl=%;L*o(pc z>v7(t<|s+?QSq6>&d-H8I60fAE~|QWCrVSaOPPm#<{+aHYb`EBu%hn0edSLfI_<-C z=Es;cETlXd0rdn8YKi`%7UezLwNlm_X!TR_rSi+;xII;@Vrru|&CbQu)qd#_E za?Yp6e{p&KT_f|t;Mi<5?D2^GBne=1{W%*x5mAe2k>6csvHe+I2;m+hN*&syKpDGHUb+vkte|7vOAZ`Ze|?;^E1I)EHx zLk%bi?Uc9tJp%X<&r%%2ae!w0F1e9r++JwFUdW%9F<^V-Dd3*QIkya>WRxbCe} z3JPj&_B};np^wY5bkMv0F7pxt1oM|igYfAyFWc=FSYX=(!2Z~-1%jJ#xMYG<8 ziPdy1=uxN{vo0XX1B1&qRLi-uHBVv9%aKRN^vpox$06|H7{3^3l`qvA3^>^s^(!3u=H_?L0h^ zW-bUYqX~2)rcU5BJi}oZMg|5dn6MJ(Qz2)R=PvPbkAlcQG9^K$i$D*L>M_=NT1prm z=qGV~*DEx1C<^f>MHpc9bX#usKySV;nckhKpK4;{55Cx>VFQ5Rw$dDKNNx|f&IMY6n1QKZGh z`kkr(1F}r4zRH2DtoLGVqKcfsz*#I^VyxRijJ5qg04<|r^2bN7O%W4*2u8`(F7@@G zSvUfyUX39$w)_~yh4N1UIDf4O`*>G2_4_C|93C!(Mh9?~{2Jh>^77n&h*1Iy|K9Ij z6Yw+?5fejxL=jILT-~3mzkMkB^%fzr=<;KgYT&NKff_SRcBSjj#~#P;CJtB^pUQtN z8~^9p;ey3BueI*3t|xqX+8e+~X(d)VLaDBFZ|0=K<0i7uObY7?X_d|jF6#FwzP>q3 z*s5%#y}_r|`{9dwP`y3!&^0oG<{^5X&DXydJ%IXW4udlCo_`mb6RvZrx4?bi`hcaPU z6eeLrR23C}y84M&PKh(1jJ#qZ4sw&LEiVtt+sBlKt`ztZKRU1&GFX@E+X(cz8vrHj zh*FdR7$iVywi;}KA1k{l669Q1JApamzrEHz0|VHAF1U6Q#IPW~y}kXY3tIMPZgGk!)I^nFM`X)Cd4>W=0cxpV zxrXxRFPy=cS`%N(UB*Uys?gjYClc4V?^Htp^uYHgC*Cbfhu|tPhW`W=tWH>w4`eho ze(nvNzsTA2*Cdu&Dj-Y%Z3Ppy67c@iWbEF3dq6-f{94F?$~juS?tB{=JwJ7i_T$463h+=<(pIdNw+8(m;7H~L literal 0 HcmV?d00001 diff --git a/models/base/base_trainer.py b/models/base/base_trainer.py index 8782216d..5c18274c 100644 --- a/models/base/base_trainer.py +++ b/models/base/base_trainer.py @@ -78,9 +78,11 @@ def __init__(self, args, cfg): self.criterion = self.build_criterion() if isinstance(self.criterion, dict): for key, value in self.criterion.items(): - self.criterion[key].cuda(args.local_rank) + if not callable(value): + self.criterion[key].cuda(args.local_rank) else: - self.criterion.cuda(self.args.local_rank) + if not callable(self.criterion): + self.criterion.cuda(self.args.local_rank) # optimizer self.optimizer = self.build_optimizer() diff --git a/models/sgmse/dereverberation/__init__.py b/models/sgmse/dereverberation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/models/sgmse/dereverberation/dereverberation.py b/models/sgmse/dereverberation/dereverberation.py new file mode 100644 index 00000000..4965d652 --- /dev/null +++ b/models/sgmse/dereverberation/dereverberation.py @@ -0,0 +1,25 @@ +import torch +from torch import nn +from types import SimpleNamespace +from modules.sgmse.sdes import SDERegistry +from modules.sgmse.shared import BackboneRegistry +import json + + +class ScoreModel(nn.Module): + def __init__(self, cfg): + super().__init__() + self.cfg = cfg + # Initialize Backbone DNN + dnn_cls = BackboneRegistry.get_by_name(cfg.backbone) + dnn_cfg = cfg[cfg.backbone] + self.dnn = dnn_cls(**dnn_cfg) + # Initialize SDE + sde_cls = SDERegistry.get_by_name(cfg.sde) + sde_cfg = cfg[cfg.sde] + self.sde = sde_cls(**sde_cfg) + def forward(self, x, t, y): + # Concatenate y as an extra channel + dnn_input = torch.cat([x, y], dim=1) + score = -self.dnn(dnn_input, t) + return score diff --git a/models/sgmse/dereverberation/dereverberation_Trainer.py b/models/sgmse/dereverberation/dereverberation_Trainer.py new file mode 100644 index 00000000..564395eb --- /dev/null +++ b/models/sgmse/dereverberation/dereverberation_Trainer.py @@ -0,0 +1,190 @@ +from models.base.base_trainer import BaseTrainer +from models.sgmse.dereverberation.dereverberation_dataset import Specs +import torch +import torch.nn as nn +from torch.nn import MSELoss, L1Loss +import torch.nn.functional as F +from utils.sgmse_util.inference import evaluate_model +from torch_ema import ExponentialMovingAverage +from models.sgmse.dereverberation.dereverberation import ScoreModel +from modules.sgmse import sampling +from torch.utils.data import DataLoader +import os + + +class DereverberationTrainer(BaseTrainer): + def __init__(self, args, cfg): + BaseTrainer.__init__(self, args, cfg) + self.cfg = cfg + self.save_config_file() + self.ema = ExponentialMovingAverage(self.model.parameters(), decay=self.cfg.train.ema_decay) + self._error_loading_ema = False + self.t_eps = self.cfg.train.t_eps + self.num_eval_files = self.cfg.train.num_eval_files + self.data_loader = self.build_data_loader() + self.save_config_file() + + checkpoint = self.load_checkpoint() + if checkpoint: + self.load_model(checkpoint) + + def build_dataset(self): + return Specs + + def load_checkpoint(self): + model_path = self.cfg.train.checkpoint + if not model_path or not os.path.exists(model_path): + self.logger.info("No checkpoint to load or checkpoint path does not exist.") + return None + if not self.cfg.train.ddp or self.args.local_rank == 0: + self.logger.info(f"Re(store) from {model_path}") + checkpoint = torch.load(model_path, map_location="cpu") + if "ema" in checkpoint: + try: + self.ema.load_state_dict(checkpoint["ema"]) + except: + self._error_loading_ema = True + warnings.warn("EMA state_dict not found in checkpoint!") + return checkpoint + + def build_data_loader(self): + Dataset = self.build_dataset() + train_set = Dataset(self.cfg, subset='train', shuffle_spec=True) + train_loader = DataLoader( + train_set, + batch_size=self.cfg.train.batch_size, + num_workers=self.args.num_workers, + pin_memory=False, shuffle=True + ) + self.valid_set = Dataset(self.cfg, subset='valid', shuffle_spec=False) + valid_loader = DataLoader( + self.valid_set, + batch_size=self.cfg.train.batch_size, + num_workers=self.args.num_workers, + pin_memory=False, shuffle=False) + data_loader = {"train": train_loader, "valid": valid_loader} + return data_loader + + def build_optimizer(self): + optimizer = torch.optim.AdamW(self.model.parameters(), **self.cfg.train.adam) + return optimizer + + def build_scheduler(self): + return None + # return ReduceLROnPlateau(self.optimizer["opt_ae"], **self.cfg.train.lronPlateau) + + def build_singers_lut(self): + return None + + def write_summary(self, losses, stats): + for key, value in losses.items(): + self.sw.add_scalar(key, value, self.step) + + def write_valid_summary(self, losses, stats): + for key, value in losses.items(): + self.sw.add_scalar(key, value, self.step) + + def _loss(self, err): + losses = torch.square(err.abs()) + loss = torch.mean(0.5 * torch.sum(losses.reshape(losses.shape[0], -1), dim=-1)) + return loss + + def build_criterion(self): + return self._loss + + def get_state_dict(self): + state_dict = { + "model": self.model.state_dict(), + "optimizer": self.optimizer.state_dict(), + "step": self.step, + "epoch": self.epoch, + "batch_size": self.cfg.train.batch_size, + "ema": self.ema.state_dict() + } + if self.scheduler is not None: + state_dict["scheduler"] = self.scheduler.state_dict() + return state_dict + + def load_model(self, checkpoint): + self.step = checkpoint["step"] + self.epoch = checkpoint["epoch"] + + self.model.load_state_dict(checkpoint["model"]) + self.optimizer.load_state_dict(checkpoint["optimizer"]) + if "scheduler" in checkpoint and self.scheduler is not None: + self.scheduler.load_state_dict(checkpoint["scheduler"]) + if "ema" in checkpoint: + self.ema.load_state_dict(checkpoint["ema"]) + + def build_model(self): + self.model = ScoreModel(self.cfg.model.sgmse) + return self.model + + def get_pc_sampler(self, predictor_name, corrector_name, y, N=None, minibatch=None, **kwargs): + N = self.model.sde.N if N is None else N + sde = self.model.sde.copy() + sde.N = N + + kwargs = {"eps": self.t_eps, **kwargs} + if minibatch is None: + return sampling.get_pc_sampler(predictor_name, corrector_name, sde=sde, score_fn=self.model, y=y, **kwargs) + else: + M = y.shape[0] + + def batched_sampling_fn(): + samples, ns = [], [] + for i in range(int(ceil(M / minibatch))): + y_mini = y[i * minibatch:(i + 1) * minibatch] + sampler = sampling.get_pc_sampler(predictor_name, corrector_name, sde=sde, score_fn=self.model, + y=y_mini, + **kwargs) + sample, n = sampler() + samples.append(sample) + ns.append(n) + samples = torch.cat(samples, dim=0) + return samples, ns + + return batched_sampling_fn + + def _step(self, batch): + x = batch['X'] + y = batch['Y'] + + t = torch.rand(x.shape[0], device=x.device) * (self.model.sde.T - self.t_eps) + self.t_eps + mean, std = self.model.sde.marginal_prob(x, t, y) + + z = torch.randn_like(x) + sigmas = std[:, None, None, None] + perturbed_data = mean + sigmas * z + score = self.model(perturbed_data, t, y) + + err = score * sigmas + z + loss = self.criterion(err) + return loss + + def train_step(self, batch): + loss = self._step(batch) + + # Backward pass and optimization + self.optimizer.zero_grad() # reset gradient + loss.backward() + self.optimizer.step() + + # Update the EMA of the model parameters + self.ema.update(self.model.parameters()) + + self.write_summary({'train_loss': loss.item()}, {}) + return {'train_loss': loss.item()}, {}, loss.item() + + def eval_step(self, batch, batch_idx): + self.ema.store(self.model.parameters()) + self.ema.copy_to(self.model.parameters()) + loss = self._step(batch) + self.write_valid_summary({'valid_loss': loss.item()}, {}) + if batch_idx == 0 and self.num_eval_files != 0: + pesq, si_sdr, estoi = evaluate_model(self, self.num_eval_files) + self.write_valid_summary({'pesq': pesq, 'si_sdr': si_sdr, 'estoi': estoi}, {}) + print(f" pesq={pesq}, si_sdr={si_sdr}, estoi={estoi}") + if self.ema.collected_params is not None: + self.ema.restore(self.model.parameters()) + return {'valid_loss': loss.item()}, {}, loss.item() diff --git a/models/sgmse/dereverberation/dereverberation_dataset.py b/models/sgmse/dereverberation/dereverberation_dataset.py new file mode 100644 index 00000000..852240cf --- /dev/null +++ b/models/sgmse/dereverberation/dereverberation_dataset.py @@ -0,0 +1,107 @@ +import torch +from glob import glob +from torchaudio import load +import numpy as np +import torch.nn.functional as F +import os +from os.path import join + + +class Specs(): + def __init__(self, cfg, subset, shuffle_spec): + self.cfg = cfg + self.data_dir = os.path.join(cfg.preprocess.processed_dir, cfg.dataset[0], "audio") + self.clean_files = sorted(glob(join(self.data_dir, subset) + '/anechoic/*.wav')) + self.noisy_files = sorted(glob(join(self.data_dir, subset) + '/reverb/*.wav')) + self.dummy = cfg.preprocess.dummy + self.num_frames = cfg.preprocess.num_frames + self.shuffle_spec = shuffle_spec + self.normalize = cfg.preprocess.normalize + self.hop_length = cfg.preprocess.hop_length + self.n_fft = cfg.preprocess.n_fft + self.window = self.get_window(self.n_fft) + self.windows = {} + self.spec_abs_exponent = cfg.preprocess.spec_abs_exponent + self.spec_factor = cfg.preprocess.spec_factor + + def __getitem__(self, i): + x, _ = load(self.clean_files[i]) + y, _ = load(self.noisy_files[i]) + + # formula applies for center=True + target_len = (self.num_frames - 1) * self.hop_length + current_len = x.size(-1) + pad = max(target_len - current_len, 0) + if pad == 0: + # extract random part of the audio file + if self.shuffle_spec: + start = int(np.random.uniform(0, current_len - target_len)) + else: + start = int((current_len - target_len) / 2) + x = x[..., start:start + target_len] + y = y[..., start:start + target_len] + else: + # pad audio if the length T is smaller than num_frames + x = F.pad(x, (pad // 2, pad // 2 + (pad % 2)), mode='constant') + y = F.pad(y, (pad // 2, pad // 2 + (pad % 2)), mode='constant') + + # normalize w.r.t to the noisy or the clean signal or not at all + # to ensure same clean signal power in x and y. + if self.normalize == "noisy": + normfac = y.abs().max() + elif self.normalize == "clean": + normfac = x.abs().max() + elif self.normalize == "not": + normfac = 1.0 + x = x / normfac + y = y / normfac + + X = torch.stft(x, **self.stft_kwargs()) + Y = torch.stft(y, **self.stft_kwargs()) + X, Y = self.spec_transform(X), self.spec_transform(Y) + return {'X': X, 'Y': Y} + + def __len__(self): + if self.dummy: + return int(len(self.clean_files) / 200) + else: + return len(self.clean_files) + + def spec_transform(self, spec): + if self.spec_abs_exponent != 1: + e = self.spec_abs_exponent + spec = spec.abs() ** e * torch.exp(1j * spec.angle()) + spec = spec * self.spec_factor + + return spec + + def stft_kwargs(self): + return {**self.istft_kwargs(), "return_complex": True} + + def istft_kwargs(self): + return dict( + n_fft=self.n_fft, hop_length=self.hop_length, + window=self.window, center=True) + + def stft(self, sig): + window = self._get_window(sig) + return torch.stft(sig, **{**self.stft_kwargs(), "window": window}) + + def istft(self, spec, length=None): + window = self._get_window(spec) + return torch.istft(spec, **{**self.istft_kwargs(), "window": window, "length": length}) + + @staticmethod + def get_window(window_length): + return torch.hann_window(window_length, periodic=True) + + def _get_window(self, x): + """ + Retrieve an appropriate window for the given tensor x, matching the device. + Caches the retrieved windows so that only one window tensor will be allocated per device. + """ + window = self.windows.get(x.device, None) + if window is None: + window = self.window.to(x.device) + self.windows[x.device] = window + return window diff --git a/models/sgmse/dereverberation/dereverberation_inference.py b/models/sgmse/dereverberation/dereverberation_inference.py new file mode 100644 index 00000000..d0233193 --- /dev/null +++ b/models/sgmse/dereverberation/dereverberation_inference.py @@ -0,0 +1,75 @@ +import time +import numpy as np +import torch +from tqdm import tqdm +import torch.nn as nn +from collections import OrderedDict +from models.sgmse.dereverberation.dereverberation import ScoreModel +from models.sgmse.dereverberation.dereverberation_dataset import Specs +from models.sgmse.dereverberation.dereverberation_Trainer import DereverberationTrainer +import json +from os.path import join +import glob +from torchaudio import load +from soundfile import write +from utils.sgmse_util.other import ensure_dir, pad_spec + + +class DereverberationInference: + def __init__(self, args, cfg): + self.cfg = cfg + self.t_eps = self.cfg.train.t_eps + self.args = args + self.test_dir = args.test_dir + self.target_dir = self.args.output_dir + self.model = self.build_model() + self.load_state_dict() + + def build_model(self): + self.model = ScoreModel(self.cfg.model.sgmse) + return self.model + + def load_state_dict(self): + self.checkpoint_path = self.args.checkpoint_path + checkpoint = torch.load(self.checkpoint_path, map_location="cpu") + self.model.load_state_dict(checkpoint["model"]) + self.model.cuda(self.args.local_rank) + + def inference(self): + sr = 16000 + snr = self.args.snr + N = self.args.N + corrector_steps = self.args.corrector_steps + self.model.eval() + noisy_dir = join(self.test_dir, 'noisy/') + noisy_files = sorted(glob.glob('{}/*.wav'.format(noisy_dir))) + for noisy_file in tqdm(noisy_files): + filename = noisy_file.split('/')[-1] + + # Load wav + y, _ = load(noisy_file) + T_orig = y.size(1) + + # Normalize + norm_factor = y.abs().max() + y = y / norm_factor + + # Prepare DNN input + spec = Specs(self.cfg, subset='', shuffle_spec=False) + Y = torch.unsqueeze(spec.spec_transform(spec.stft(sig=y.cuda())), 0) + Y = pad_spec(Y) + + # Reverse sampling + sampler = DereverberationTrainer.get_pc_sampler(self, + 'reverse_diffusion', 'ald', Y.cuda(), N=N, + corrector_steps=corrector_steps, snr=snr) + sample, _ = sampler() + + # Backward transform in time domain + x_hat = spec.istft(sample.squeeze(), T_orig) + + # Renormalize + x_hat = x_hat * norm_factor + + # Write enhanced wav file + write(join(self.target_dir, filename), x_hat.cpu().numpy(), 16000) diff --git a/modules/sgmse/__init__.py b/modules/sgmse/__init__.py new file mode 100644 index 00000000..386d0a02 --- /dev/null +++ b/modules/sgmse/__init__.py @@ -0,0 +1,5 @@ +from .shared import BackboneRegistry +from .ncsnpp import NCSNpp +from .dcunet import DCUNet + +__all__ = ['BackboneRegistry', 'NCSNpp', 'DCUNet'] diff --git a/modules/sgmse/dcunet.py b/modules/sgmse/dcunet.py new file mode 100644 index 00000000..6fa34c02 --- /dev/null +++ b/modules/sgmse/dcunet.py @@ -0,0 +1,627 @@ +from functools import partial +import numpy as np + +import torch +from torch import nn, Tensor +from torch.nn.modules.batchnorm import _BatchNorm + +from .shared import BackboneRegistry, ComplexConv2d, ComplexConvTranspose2d, ComplexLinear, \ + DiffusionStepEmbedding, GaussianFourierProjection, FeatureMapDense, torch_complex_from_reim + + +def get_activation(name): + if name == "silu": + return nn.SiLU + elif name == "relu": + return nn.ReLU + elif name == "leaky_relu": + return nn.LeakyReLU + else: + raise NotImplementedError(f"Unknown activation: {name}") + + +class BatchNorm(_BatchNorm): + def _check_input_dim(self, input): + if input.dim() < 2 or input.dim() > 4: + raise ValueError("expected 4D or 3D input (got {}D input)".format(input.dim())) + + +class OnReIm(nn.Module): + def __init__(self, module_cls, *args, **kwargs): + super().__init__() + self.re_module = module_cls(*args, **kwargs) + self.im_module = module_cls(*args, **kwargs) + + def forward(self, x): + return torch_complex_from_reim(self.re_module(x.real), self.im_module(x.imag)) + + +# Code for DCUNet largely copied from Danilo's `informedenh` repo, cheers! + +def unet_decoder_args(encoders, *, skip_connections): + """Get list of decoder arguments for upsampling (right) side of a symmetric u-net, + given the arguments used to construct the encoder. + Args: + encoders (tuple of length `N` of tuples of (in_chan, out_chan, kernel_size, stride, padding)): + List of arguments used to construct the encoders + skip_connections (bool): Whether to include skip connections in the + calculation of decoder input channels. + Return: + tuple of length `N` of tuples of (in_chan, out_chan, kernel_size, stride, padding): + Arguments to be used to construct decoders + """ + decoder_args = [] + for enc_in_chan, enc_out_chan, enc_kernel_size, enc_stride, enc_padding, enc_dilation in reversed(encoders): + if skip_connections and decoder_args: + skip_in_chan = enc_out_chan + else: + skip_in_chan = 0 + decoder_args.append( + (enc_out_chan + skip_in_chan, enc_in_chan, enc_kernel_size, enc_stride, enc_padding, enc_dilation) + ) + return tuple(decoder_args) + + +def make_unet_encoder_decoder_args(encoder_args, decoder_args): + encoder_args = tuple( + ( + in_chan, + out_chan, + tuple(kernel_size), + tuple(stride), + tuple([n // 2 for n in kernel_size]) if padding == "auto" else tuple(padding), + tuple(dilation) + ) + for in_chan, out_chan, kernel_size, stride, padding, dilation in encoder_args + ) + + if decoder_args == "auto": + decoder_args = unet_decoder_args( + encoder_args, + skip_connections=True, + ) + else: + decoder_args = tuple( + ( + in_chan, + out_chan, + tuple(kernel_size), + tuple(stride), + tuple([n // 2 for n in kernel_size]) if padding == "auto" else padding, + tuple(dilation), + output_padding, + ) + for in_chan, out_chan, kernel_size, stride, padding, dilation, output_padding in decoder_args + ) + + return encoder_args, decoder_args + + +DCUNET_ARCHITECTURES = { + "DCUNet-10": make_unet_encoder_decoder_args( + # Encoders: + # (in_chan, out_chan, kernel_size, stride, padding, dilation) + ( + (1, 32, (7, 5), (2, 2), "auto", (1,1)), + (32, 64, (7, 5), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 1), "auto", (1,1)), + ), + # Decoders: automatic inverse + "auto", + ), + "DCUNet-16": make_unet_encoder_decoder_args( + # Encoders: + # (in_chan, out_chan, kernel_size, stride, padding, dilation) + ( + (1, 32, (7, 5), (2, 2), "auto", (1,1)), + (32, 32, (7, 5), (2, 1), "auto", (1,1)), + (32, 64, (7, 5), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 1), "auto", (1,1)), + (64, 64, (5, 3), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 1), "auto", (1,1)), + (64, 64, (5, 3), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 1), "auto", (1,1)), + ), + # Decoders: automatic inverse + "auto", + ), + "DCUNet-20": make_unet_encoder_decoder_args( + # Encoders: + # (in_chan, out_chan, kernel_size, stride, padding, dilation) + ( + (1, 32, (7, 1), (1, 1), "auto", (1,1)), + (32, 32, (1, 7), (1, 1), "auto", (1,1)), + (32, 64, (7, 5), (2, 2), "auto", (1,1)), + (64, 64, (7, 5), (2, 1), "auto", (1,1)), + (64, 64, (5, 3), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 1), "auto", (1,1)), + (64, 64, (5, 3), (2, 2), "auto", (1,1)), + (64, 64, (5, 3), (2, 1), "auto", (1,1)), + (64, 64, (5, 3), (2, 2), "auto", (1,1)), + (64, 90, (5, 3), (2, 1), "auto", (1,1)), + ), + # Decoders: automatic inverse + "auto", + ), + "DilDCUNet-v2": make_unet_encoder_decoder_args( # architecture used in SGMSE / Interspeech paper + # Encoders: + # (in_chan, out_chan, kernel_size, stride, padding, dilation) + ( + (1, 32, (4, 4), (1, 1), "auto", (1, 1)), + (32, 32, (4, 4), (1, 1), "auto", (1, 1)), + (32, 32, (4, 4), (1, 1), "auto", (1, 1)), + (32, 64, (4, 4), (2, 1), "auto", (2, 1)), + (64, 128, (4, 4), (2, 2), "auto", (4, 1)), + (128, 256, (4, 4), (2, 2), "auto", (8, 1)), + ), + # Decoders: automatic inverse + "auto", + ), +} + + +@BackboneRegistry.register("dcunet") +class DCUNet(nn.Module): + @staticmethod + def add_argparse_args(parser): + parser.add_argument("--dcunet-architecture", type=str, default="DilDCUNet-v2", choices=DCUNET_ARCHITECTURES.keys(), help="The concrete DCUNet architecture. 'DilDCUNet-v2' by default.") + parser.add_argument("--dcunet-time-embedding", type=str, choices=("gfp", "ds", "none"), default="gfp", help="Timestep embedding style. 'gfp' (Gaussian Fourier Projections) by default.") + parser.add_argument("--dcunet-temb-layers-global", type=int, default=1, help="Number of global linear+activation layers for the time embedding. 1 by default.") + parser.add_argument("--dcunet-temb-layers-local", type=int, default=1, help="Number of local (per-encoder/per-decoder) linear+activation layers for the time embedding. 1 by default.") + parser.add_argument("--dcunet-temb-activation", type=str, default="silu", help="The (complex) activation to use between all (global&local) time embedding layers.") + parser.add_argument("--dcunet-time-embedding-complex", action="store_true", help="Use complex-valued timestep embedding. Compatible with 'gfp' and 'ds' embeddings.") + parser.add_argument("--dcunet-fix-length", type=str, default="pad", choices=("pad", "trim", "none"), help="DCUNet strategy to 'fix' mismatched input timespan. 'pad' by default.") + parser.add_argument("--dcunet-mask-bound", type=str, choices=("tanh", "sigmoid", "none"), default="none", help="DCUNet output bounding strategy. 'none' by default.") + parser.add_argument("--dcunet-norm-type", type=str, choices=("bN", "CbN"), default="bN", help="The type of norm to use within each encoder and decoder layer. 'bN' (real/imaginary separate batch norm) by default.") + parser.add_argument("--dcunet-activation", type=str, choices=("leaky_relu", "relu", "silu"), default="leaky_relu", help="The activation to use within each encoder and decoder layer. 'leaky_relu' by default.") + return parser + + def __init__( + self, + dcunet_architecture: str = "DilDCUNet-v2", + dcunet_time_embedding: str = "gfp", + dcunet_temb_layers_global: int = 2, + dcunet_temb_layers_local: int = 1, + dcunet_temb_activation: str = "silu", + dcunet_time_embedding_complex: bool = False, + dcunet_fix_length: str = "pad", + dcunet_mask_bound: str = "none", + dcunet_norm_type: str = "bN", + dcunet_activation: str = "relu", + embed_dim: int = 128, + **kwargs + ): + super().__init__() + + self.architecture = dcunet_architecture + self.fix_length_mode = (dcunet_fix_length if dcunet_fix_length != "none" else None) + self.norm_type = dcunet_norm_type + self.activation = dcunet_activation + self.input_channels = 2 # for x_t and y -- note that this is 2 rather than 4, because we directly treat complex channels in this DNN + self.time_embedding = (dcunet_time_embedding if dcunet_time_embedding != "none" else None) + self.time_embedding_complex = dcunet_time_embedding_complex + self.temb_layers_global = dcunet_temb_layers_global + self.temb_layers_local = dcunet_temb_layers_local + self.temb_activation = dcunet_temb_activation + conf_encoders, conf_decoders = DCUNET_ARCHITECTURES[dcunet_architecture] + + # Replace `input_channels` in encoders config + _replaced_input_channels, *rest = conf_encoders[0] + encoders = ((self.input_channels, *rest), *conf_encoders[1:]) + decoders = conf_decoders + self.encoders_stride_product = np.prod( + [enc_stride for _, _, _, enc_stride, _, _ in encoders], axis=0 + ) + + # Prepare kwargs for encoder and decoder (to potentially be modified before layer instantiation) + encoder_decoder_kwargs = dict( + norm_type=self.norm_type, activation=self.activation, + temb_layers=self.temb_layers_local, temb_activation=self.temb_activation) + + # Instantiate (global) time embedding layer + embed_ops = [] + if self.time_embedding is not None: + complex_valued = self.time_embedding_complex + if self.time_embedding == "gfp": + embed_ops += [GaussianFourierProjection(embed_dim=embed_dim, complex_valued=complex_valued)] + encoder_decoder_kwargs["embed_dim"] = embed_dim + elif self.time_embedding == "ds": + embed_ops += [DiffusionStepEmbedding(embed_dim=embed_dim, complex_valued=complex_valued)] + encoder_decoder_kwargs["embed_dim"] = embed_dim + + if self.time_embedding_complex: + assert self.time_embedding in ("gfp", "ds"), "Complex timestep embedding only available for gfp and ds" + encoder_decoder_kwargs["complex_time_embedding"] = True + for _ in range(self.temb_layers_global): + embed_ops += [ + ComplexLinear(embed_dim, embed_dim, complex_valued=True), + OnReIm(get_activation(dcunet_temb_activation)) + ] + self.embed = nn.Sequential(*embed_ops) + + ### Instantiate DCUNet layers ### + output_layer = ComplexConvTranspose2d(*decoders[-1]) + encoders = [DCUNetComplexEncoderBlock(*args, **encoder_decoder_kwargs) for args in encoders] + decoders = [DCUNetComplexDecoderBlock(*args, **encoder_decoder_kwargs) for args in decoders[:-1]] + + self.mask_bound = (dcunet_mask_bound if dcunet_mask_bound != "none" else None) + if self.mask_bound is not None: + raise NotImplementedError("sorry, mask bounding not implemented at the moment") + # TODO we can't use nn.Sequential since the ComplexConvTranspose2d needs a second `output_size` argument + #operations = (output_layer, complex_nn.BoundComplexMask(self.mask_bound)) + #output_layer = nn.Sequential(*[x for x in operations if x is not None]) + + assert len(encoders) == len(decoders) + 1 + self.encoders = nn.ModuleList(encoders) + self.decoders = nn.ModuleList(decoders) + self.output_layer = output_layer or nn.Identity() + + def forward(self, spec, t) -> Tensor: + """ + Input shape is expected to be $(batch, nfreqs, time)$, with $nfreqs - 1$ divisible + by $f_0 * f_1 * ... * f_N$ where $f_k$ are the frequency strides of the encoders, + and $time - 1$ is divisible by $t_0 * t_1 * ... * t_N$ where $t_N$ are the time + strides of the encoders. + Args: + spec (Tensor): complex spectrogram tensor. 1D, 2D or 3D tensor, time last. + Returns: + Tensor, of shape (batch, time) or (time). + """ + # TF-rep shape: (batch, self.input_channels, n_fft, frames) + # Estimate mask from time-frequency representation. + x_in = self.fix_input_dims(spec) + x = x_in + t_embed = self.embed(t+0j) if self.time_embedding is not None else None + + enc_outs = [] + for idx, enc in enumerate(self.encoders): + x = enc(x, t_embed) + # UNet skip connection + enc_outs.append(x) + for (enc_out, dec) in zip(reversed(enc_outs[:-1]), self.decoders): + x = dec(x, t_embed, output_size=enc_out.shape) + x = torch.cat([x, enc_out], dim=1) + + output = self.output_layer(x, output_size=x_in.shape) + # output shape: (batch, 1, n_fft, frames) + output = self.fix_output_dims(output, spec) + return output + + def fix_input_dims(self, x): + return _fix_dcu_input_dims( + self.fix_length_mode, x, torch.from_numpy(self.encoders_stride_product) + ) + + def fix_output_dims(self, out, x): + return _fix_dcu_output_dims(self.fix_length_mode, out, x) + + +def _fix_dcu_input_dims(fix_length_mode, x, encoders_stride_product): + """Pad or trim `x` to a length compatible with DCUNet.""" + freq_prod = int(encoders_stride_product[0]) + time_prod = int(encoders_stride_product[1]) + if (x.shape[2] - 1) % freq_prod: + raise TypeError( + f"Input shape must be [batch, ch, freq + 1, time + 1] with freq divisible by " + f"{freq_prod}, got {x.shape} instead" + ) + time_remainder = (x.shape[3] - 1) % time_prod + if time_remainder: + if fix_length_mode is None: + raise TypeError( + f"Input shape must be [batch, ch, freq + 1, time + 1] with time divisible by " + f"{time_prod}, got {x.shape} instead. Set the 'fix_length_mode' argument " + f"in 'DCUNet' to 'pad' or 'trim' to fix shapes automatically." + ) + elif fix_length_mode == "pad": + pad_shape = [0, time_prod - time_remainder] + x = nn.functional.pad(x, pad_shape, mode="constant") + elif fix_length_mode == "trim": + pad_shape = [0, -time_remainder] + x = nn.functional.pad(x, pad_shape, mode="constant") + else: + raise ValueError(f"Unknown fix_length mode '{fix_length_mode}'") + return x + + +def _fix_dcu_output_dims(fix_length_mode, out, x): + """Fix shape of `out` to the original shape of `x` by padding/cropping.""" + inp_len = x.shape[-1] + output_len = out.shape[-1] + return nn.functional.pad(out, [0, inp_len - output_len]) + + +def _get_norm(norm_type): + if norm_type == "CbN": + return ComplexBatchNorm + elif norm_type == "bN": + return partial(OnReIm, BatchNorm) + else: + raise NotImplementedError(f"Unknown norm type: {norm_type}") + + +class DCUNetComplexEncoderBlock(nn.Module): + def __init__( + self, + in_chan, + out_chan, + kernel_size, + stride, + padding, + dilation, + norm_type="bN", + activation="leaky_relu", + embed_dim=None, + complex_time_embedding=False, + temb_layers=1, + temb_activation="silu" + ): + super().__init__() + + self.in_chan = in_chan + self.out_chan = out_chan + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.temb_layers = temb_layers + self.temb_activation = temb_activation + self.complex_time_embedding = complex_time_embedding + + self.conv = ComplexConv2d( + in_chan, out_chan, kernel_size, stride, padding, bias=norm_type is None, dilation=dilation + ) + self.norm = _get_norm(norm_type)(out_chan) + self.activation = OnReIm(get_activation(activation)) + self.embed_dim = embed_dim + if self.embed_dim is not None: + ops = [] + for _ in range(max(0, self.temb_layers - 1)): + ops += [ + ComplexLinear(self.embed_dim, self.embed_dim, complex_valued=True), + OnReIm(get_activation(self.temb_activation)) + ] + ops += [ + FeatureMapDense(self.embed_dim, self.out_chan, complex_valued=True), + OnReIm(get_activation(self.temb_activation)) + ] + self.embed_layer = nn.Sequential(*ops) + + def forward(self, x, t_embed): + y = self.conv(x) + if self.embed_dim is not None: + y = y + self.embed_layer(t_embed) + return self.activation(self.norm(y)) + + +class DCUNetComplexDecoderBlock(nn.Module): + def __init__( + self, + in_chan, + out_chan, + kernel_size, + stride, + padding, + dilation, + output_padding=(0, 0), + norm_type="bN", + activation="leaky_relu", + embed_dim=None, + temb_layers=1, + temb_activation='swish', + complex_time_embedding=False, + ): + super().__init__() + + self.in_chan = in_chan + self.out_chan = out_chan + self.kernel_size = kernel_size + self.stride = stride + self.padding = padding + self.dilation = dilation + self.output_padding = output_padding + self.complex_time_embedding = complex_time_embedding + self.temb_layers = temb_layers + self.temb_activation = temb_activation + + self.deconv = ComplexConvTranspose2d( + in_chan, out_chan, kernel_size, stride, padding, output_padding, dilation=dilation, bias=norm_type is None + ) + self.norm = _get_norm(norm_type)(out_chan) + self.activation = OnReIm(get_activation(activation)) + self.embed_dim = embed_dim + if self.embed_dim is not None: + ops = [] + for _ in range(max(0, self.temb_layers - 1)): + ops += [ + ComplexLinear(self.embed_dim, self.embed_dim, complex_valued=True), + OnReIm(get_activation(self.temb_activation)) + ] + ops += [ + FeatureMapDense(self.embed_dim, self.out_chan, complex_valued=True), + OnReIm(get_activation(self.temb_activation)) + ] + self.embed_layer = nn.Sequential(*ops) + + def forward(self, x, t_embed, output_size=None): + y = self.deconv(x, output_size=output_size) + if self.embed_dim is not None: + y = y + self.embed_layer(t_embed) + return self.activation(self.norm(y)) + + +# From https://github.com/chanil1218/DCUnet.pytorch/blob/2dcdd30804be47a866fde6435cbb7e2f81585213/models/layers/complexnn.py +class ComplexBatchNorm(torch.nn.Module): + def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=False): + super(ComplexBatchNorm, self).__init__() + self.num_features = num_features + self.eps = eps + self.momentum = momentum + self.affine = affine + self.track_running_stats = track_running_stats + if self.affine: + self.Wrr = torch.nn.Parameter(torch.Tensor(num_features)) + self.Wri = torch.nn.Parameter(torch.Tensor(num_features)) + self.Wii = torch.nn.Parameter(torch.Tensor(num_features)) + self.Br = torch.nn.Parameter(torch.Tensor(num_features)) + self.Bi = torch.nn.Parameter(torch.Tensor(num_features)) + else: + self.register_parameter('Wrr', None) + self.register_parameter('Wri', None) + self.register_parameter('Wii', None) + self.register_parameter('Br', None) + self.register_parameter('Bi', None) + if self.track_running_stats: + self.register_buffer('RMr', torch.zeros(num_features)) + self.register_buffer('RMi', torch.zeros(num_features)) + self.register_buffer('RVrr', torch.ones (num_features)) + self.register_buffer('RVri', torch.zeros(num_features)) + self.register_buffer('RVii', torch.ones (num_features)) + self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long)) + else: + self.register_parameter('RMr', None) + self.register_parameter('RMi', None) + self.register_parameter('RVrr', None) + self.register_parameter('RVri', None) + self.register_parameter('RVii', None) + self.register_parameter('num_batches_tracked', None) + self.reset_parameters() + + def reset_running_stats(self): + if self.track_running_stats: + self.RMr.zero_() + self.RMi.zero_() + self.RVrr.fill_(1) + self.RVri.zero_() + self.RVii.fill_(1) + self.num_batches_tracked.zero_() + + def reset_parameters(self): + self.reset_running_stats() + if self.affine: + self.Br.data.zero_() + self.Bi.data.zero_() + self.Wrr.data.fill_(1) + self.Wri.data.uniform_(-.9, +.9) # W will be positive-definite + self.Wii.data.fill_(1) + + def _check_input_dim(self, xr, xi): + assert(xr.shape == xi.shape) + assert(xr.size(1) == self.num_features) + + def forward(self, x): + xr, xi = x.real, x.imag + self._check_input_dim(xr, xi) + + exponential_average_factor = 0.0 + + if self.training and self.track_running_stats: + self.num_batches_tracked += 1 + if self.momentum is None: # use cumulative moving average + exponential_average_factor = 1.0 / self.num_batches_tracked.item() + else: # use exponential moving average + exponential_average_factor = self.momentum + + # + # NOTE: The precise meaning of the "training flag" is: + # True: Normalize using batch statistics, update running statistics + # if they are being collected. + # False: Normalize using running statistics, ignore batch statistics. + # + training = self.training or not self.track_running_stats + redux = [i for i in reversed(range(xr.dim())) if i!=1] + vdim = [1] * xr.dim() + vdim[1] = xr.size(1) + + # + # Mean M Computation and Centering + # + # Includes running mean update if training and running. + # + if training: + Mr, Mi = xr, xi + for d in redux: + Mr = Mr.mean(d, keepdim=True) + Mi = Mi.mean(d, keepdim=True) + if self.track_running_stats: + self.RMr.lerp_(Mr.squeeze(), exponential_average_factor) + self.RMi.lerp_(Mi.squeeze(), exponential_average_factor) + else: + Mr = self.RMr.view(vdim) + Mi = self.RMi.view(vdim) + xr, xi = xr-Mr, xi-Mi + + # + # Variance Matrix V Computation + # + # Includes epsilon numerical stabilizer/Tikhonov regularizer. + # Includes running variance update if training and running. + # + if training: + Vrr = xr * xr + Vri = xr * xi + Vii = xi * xi + for d in redux: + Vrr = Vrr.mean(d, keepdim=True) + Vri = Vri.mean(d, keepdim=True) + Vii = Vii.mean(d, keepdim=True) + if self.track_running_stats: + self.RVrr.lerp_(Vrr.squeeze(), exponential_average_factor) + self.RVri.lerp_(Vri.squeeze(), exponential_average_factor) + self.RVii.lerp_(Vii.squeeze(), exponential_average_factor) + else: + Vrr = self.RVrr.view(vdim) + Vri = self.RVri.view(vdim) + Vii = self.RVii.view(vdim) + Vrr = Vrr + self.eps + Vri = Vri + Vii = Vii + self.eps + + # + # Matrix Inverse Square Root U = V^-0.5 + # + # sqrt of a 2x2 matrix, + # - https://en.wikipedia.org/wiki/Square_root_of_a_2_by_2_matrix + tau = Vrr + Vii + delta = torch.addcmul(Vrr * Vii, Vri, Vri, value=-1) + s = delta.sqrt() + t = (tau + 2*s).sqrt() + + # matrix inverse, http://mathworld.wolfram.com/MatrixInverse.html + rst = (s * t).reciprocal() + Urr = (s + Vii) * rst + Uii = (s + Vrr) * rst + Uri = ( - Vri) * rst + + # + # Optionally left-multiply U by affine weights W to produce combined + # weights Z, left-multiply the inputs by Z, then optionally bias them. + # + # y = Zx + B + # y = WUx + B + # y = [Wrr Wri][Urr Uri] [xr] + [Br] + # [Wir Wii][Uir Uii] [xi] [Bi] + # + if self.affine: + Wrr, Wri, Wii = self.Wrr.view(vdim), self.Wri.view(vdim), self.Wii.view(vdim) + Zrr = (Wrr * Urr) + (Wri * Uri) + Zri = (Wrr * Uri) + (Wri * Uii) + Zir = (Wri * Urr) + (Wii * Uri) + Zii = (Wri * Uri) + (Wii * Uii) + else: + Zrr, Zri, Zir, Zii = Urr, Uri, Uri, Uii + + yr = (Zrr * xr) + (Zri * xi) + yi = (Zir * xr) + (Zii * xi) + + if self.affine: + yr = yr + self.Br.view(vdim) + yi = yi + self.Bi.view(vdim) + + return torch.view_as_complex(torch.stack([yr, yi], dim=-1)) + + def extra_repr(self): + return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \ + 'track_running_stats={track_running_stats}'.format(**self.__dict__) diff --git a/modules/sgmse/ncsnpp.py b/modules/sgmse/ncsnpp.py new file mode 100644 index 00000000..f5c810e7 --- /dev/null +++ b/modules/sgmse/ncsnpp.py @@ -0,0 +1,419 @@ +# coding=utf-8 +# Copyright 2020 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: skip-file + +from .ncsnpp_utils import layers, layerspp, normalization +import torch.nn as nn +import functools +import torch +import numpy as np + +from .shared import BackboneRegistry + +ResnetBlockDDPM = layerspp.ResnetBlockDDPMpp +ResnetBlockBigGAN = layerspp.ResnetBlockBigGANpp +Combine = layerspp.Combine +conv3x3 = layerspp.conv3x3 +conv1x1 = layerspp.conv1x1 +get_act = layers.get_act +get_normalization = normalization.get_normalization +default_initializer = layers.default_init + + +@BackboneRegistry.register("ncsnpp") +class NCSNpp(nn.Module): + """NCSN++ model, adapted from https://github.com/yang-song/score_sde repository""" + + @staticmethod + def add_argparse_args(parser): + parser.add_argument("--ch_mult",type=int, nargs='+', default=[1,1,2,2,2,2,2]) + parser.add_argument("--num_res_blocks", type=int, default=2) + parser.add_argument("--attn_resolutions", type=int, nargs='+', default=[16]) + parser.add_argument("--no-centered", dest="centered", action="store_false", help="The data is not centered [-1, 1]") + parser.add_argument("--centered", dest="centered", action="store_true", help="The data is centered [-1, 1]") + parser.set_defaults(centered=True) + return parser + + def __init__(self, + scale_by_sigma = True, + nonlinearity = 'swish', + nf = 128, + ch_mult = (1, 1, 2, 2, 2, 2, 2), + num_res_blocks = 2, + attn_resolutions = (16,), + resamp_with_conv = True, + conditional = True, + fir = True, + fir_kernel = [1, 3, 3, 1], + skip_rescale = True, + resblock_type = 'biggan', + progressive = 'output_skip', + progressive_input = 'input_skip', + progressive_combine = 'sum', + init_scale = 0., + fourier_scale = 16, + image_size = 256, + embedding_type = 'fourier', + dropout = .0, + centered = True, + **unused_kwargs + ): + super().__init__() + self.act = act = get_act(nonlinearity) + + self.nf = nf = nf + ch_mult = ch_mult + self.num_res_blocks = num_res_blocks = num_res_blocks + self.attn_resolutions = attn_resolutions = attn_resolutions + dropout = dropout + resamp_with_conv = resamp_with_conv + self.num_resolutions = num_resolutions = len(ch_mult) + self.all_resolutions = all_resolutions = [image_size // (2 ** i) for i in range(num_resolutions)] + + self.conditional = conditional = conditional # noise-conditional + self.centered = centered + self.scale_by_sigma = scale_by_sigma + + fir = fir + fir_kernel = fir_kernel + self.skip_rescale = skip_rescale = skip_rescale + self.resblock_type = resblock_type = resblock_type.lower() + self.progressive = progressive = progressive.lower() + self.progressive_input = progressive_input = progressive_input.lower() + self.embedding_type = embedding_type = embedding_type.lower() + init_scale = init_scale + assert progressive in ['none', 'output_skip', 'residual'] + assert progressive_input in ['none', 'input_skip', 'residual'] + assert embedding_type in ['fourier', 'positional'] + combine_method = progressive_combine.lower() + combiner = functools.partial(Combine, method=combine_method) + + num_channels = 4 # x.real, x.imag, y.real, y.imag + self.output_layer = nn.Conv2d(num_channels, 2, 1) + + modules = [] + # timestep/noise_level embedding + if embedding_type == 'fourier': + # Gaussian Fourier features embeddings. + modules.append(layerspp.GaussianFourierProjection( + embedding_size=nf, scale=fourier_scale + )) + embed_dim = 2 * nf + elif embedding_type == 'positional': + embed_dim = nf + else: + raise ValueError(f'embedding type {embedding_type} unknown.') + + if conditional: + modules.append(nn.Linear(embed_dim, nf * 4)) + modules[-1].weight.data = default_initializer()(modules[-1].weight.shape) + nn.init.zeros_(modules[-1].bias) + modules.append(nn.Linear(nf * 4, nf * 4)) + modules[-1].weight.data = default_initializer()(modules[-1].weight.shape) + nn.init.zeros_(modules[-1].bias) + + AttnBlock = functools.partial(layerspp.AttnBlockpp, + init_scale=init_scale, skip_rescale=skip_rescale) + + Upsample = functools.partial(layerspp.Upsample, + with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel) + + if progressive == 'output_skip': + self.pyramid_upsample = layerspp.Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) + elif progressive == 'residual': + pyramid_upsample = functools.partial(layerspp.Upsample, fir=fir, + fir_kernel=fir_kernel, with_conv=True) + + Downsample = functools.partial(layerspp.Downsample, with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel) + + if progressive_input == 'input_skip': + self.pyramid_downsample = layerspp.Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) + elif progressive_input == 'residual': + pyramid_downsample = functools.partial(layerspp.Downsample, + fir=fir, fir_kernel=fir_kernel, with_conv=True) + + if resblock_type == 'ddpm': + ResnetBlock = functools.partial(ResnetBlockDDPM, act=act, + dropout=dropout, init_scale=init_scale, + skip_rescale=skip_rescale, temb_dim=nf * 4) + + elif resblock_type == 'biggan': + ResnetBlock = functools.partial(ResnetBlockBigGAN, act=act, + dropout=dropout, fir=fir, fir_kernel=fir_kernel, + init_scale=init_scale, skip_rescale=skip_rescale, temb_dim=nf * 4) + + else: + raise ValueError(f'resblock type {resblock_type} unrecognized.') + + # Downsampling block + + channels = num_channels + if progressive_input != 'none': + input_pyramid_ch = channels + + modules.append(conv3x3(channels, nf)) + hs_c = [nf] + + in_ch = nf + for i_level in range(num_resolutions): + # Residual blocks for this resolution + for i_block in range(num_res_blocks): + out_ch = nf * ch_mult[i_level] + modules.append(ResnetBlock(in_ch=in_ch, out_ch=out_ch)) + in_ch = out_ch + + if all_resolutions[i_level] in attn_resolutions: + modules.append(AttnBlock(channels=in_ch)) + hs_c.append(in_ch) + + if i_level != num_resolutions - 1: + if resblock_type == 'ddpm': + modules.append(Downsample(in_ch=in_ch)) + else: + modules.append(ResnetBlock(down=True, in_ch=in_ch)) + + if progressive_input == 'input_skip': + modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch)) + if combine_method == 'cat': + in_ch *= 2 + + elif progressive_input == 'residual': + modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch)) + input_pyramid_ch = in_ch + + hs_c.append(in_ch) + + in_ch = hs_c[-1] + modules.append(ResnetBlock(in_ch=in_ch)) + modules.append(AttnBlock(channels=in_ch)) + modules.append(ResnetBlock(in_ch=in_ch)) + + pyramid_ch = 0 + # Upsampling block + for i_level in reversed(range(num_resolutions)): + for i_block in range(num_res_blocks + 1): # +1 blocks in upsampling because of skip connection from combiner (after downsampling) + out_ch = nf * ch_mult[i_level] + modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch)) + in_ch = out_ch + + if all_resolutions[i_level] in attn_resolutions: + modules.append(AttnBlock(channels=in_ch)) + + if progressive != 'none': + if i_level == num_resolutions - 1: + if progressive == 'output_skip': + modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), + num_channels=in_ch, eps=1e-6)) + modules.append(conv3x3(in_ch, channels, init_scale=init_scale)) + pyramid_ch = channels + elif progressive == 'residual': + modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) + modules.append(conv3x3(in_ch, in_ch, bias=True)) + pyramid_ch = in_ch + else: + raise ValueError(f'{progressive} is not a valid name.') + else: + if progressive == 'output_skip': + modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), + num_channels=in_ch, eps=1e-6)) + modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale)) + pyramid_ch = channels + elif progressive == 'residual': + modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch)) + pyramid_ch = in_ch + else: + raise ValueError(f'{progressive} is not a valid name') + + if i_level != 0: + if resblock_type == 'ddpm': + modules.append(Upsample(in_ch=in_ch)) + else: + modules.append(ResnetBlock(in_ch=in_ch, up=True)) + + assert not hs_c + + if progressive != 'output_skip': + modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), + num_channels=in_ch, eps=1e-6)) + modules.append(conv3x3(in_ch, channels, init_scale=init_scale)) + + self.all_modules = nn.ModuleList(modules) + + + def forward(self, x, time_cond): + # timestep/noise_level embedding; only for continuous training + modules = self.all_modules + m_idx = 0 + + # Convert real and imaginary parts of (x,y) into four channel dimensions + x = torch.cat((x[:,[0],:,:].real, x[:,[0],:,:].imag, + x[:,[1],:,:].real, x[:,[1],:,:].imag), dim=1) + + if self.embedding_type == 'fourier': + # Gaussian Fourier features embeddings. + used_sigmas = time_cond + temb = modules[m_idx](torch.log(used_sigmas)) + m_idx += 1 + + elif self.embedding_type == 'positional': + # Sinusoidal positional embeddings. + timesteps = time_cond + used_sigmas = self.sigmas[time_cond.long()] + temb = layers.get_timestep_embedding(timesteps, self.nf) + + else: + raise ValueError(f'embedding type {self.embedding_type} unknown.') + + if self.conditional: + temb = modules[m_idx](temb) + m_idx += 1 + temb = modules[m_idx](self.act(temb)) + m_idx += 1 + else: + temb = None + + if not self.centered: + # If input data is in [0, 1] + x = 2 * x - 1. + + # Downsampling block + input_pyramid = None + if self.progressive_input != 'none': + input_pyramid = x + + # Input layer: Conv2d: 4ch -> 128ch + hs = [modules[m_idx](x)] + m_idx += 1 + + # Down path in U-Net + for i_level in range(self.num_resolutions): + # Residual blocks for this resolution + for i_block in range(self.num_res_blocks): + h = modules[m_idx](hs[-1], temb) + m_idx += 1 + # Attention layer (optional) + if h.shape[-2] in self.attn_resolutions: # edit: check H dim (-2) not W dim (-1) + h = modules[m_idx](h) + m_idx += 1 + hs.append(h) + + # Downsampling + if i_level != self.num_resolutions - 1: + if self.resblock_type == 'ddpm': + h = modules[m_idx](hs[-1]) + m_idx += 1 + else: + h = modules[m_idx](hs[-1], temb) + m_idx += 1 + + if self.progressive_input == 'input_skip': # Combine h with x + input_pyramid = self.pyramid_downsample(input_pyramid) + h = modules[m_idx](input_pyramid, h) + m_idx += 1 + + elif self.progressive_input == 'residual': + input_pyramid = modules[m_idx](input_pyramid) + m_idx += 1 + if self.skip_rescale: + input_pyramid = (input_pyramid + h) / np.sqrt(2.) + else: + input_pyramid = input_pyramid + h + h = input_pyramid + hs.append(h) + + h = hs[-1] # actualy equal to: h = h + h = modules[m_idx](h, temb) # ResNet block + m_idx += 1 + h = modules[m_idx](h) # Attention block + m_idx += 1 + h = modules[m_idx](h, temb) # ResNet block + m_idx += 1 + + pyramid = None + + # Upsampling block + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = modules[m_idx](torch.cat([h, hs.pop()], dim=1), temb) + m_idx += 1 + + # edit: from -1 to -2 + if h.shape[-2] in self.attn_resolutions: + h = modules[m_idx](h) + m_idx += 1 + + if self.progressive != 'none': + if i_level == self.num_resolutions - 1: + if self.progressive == 'output_skip': + pyramid = self.act(modules[m_idx](h)) # GroupNorm + m_idx += 1 + pyramid = modules[m_idx](pyramid) # Conv2D: 256 -> 4 + m_idx += 1 + elif self.progressive == 'residual': + pyramid = self.act(modules[m_idx](h)) + m_idx += 1 + pyramid = modules[m_idx](pyramid) + m_idx += 1 + else: + raise ValueError(f'{self.progressive} is not a valid name.') + else: + if self.progressive == 'output_skip': + pyramid = self.pyramid_upsample(pyramid) # Upsample + pyramid_h = self.act(modules[m_idx](h)) # GroupNorm + m_idx += 1 + pyramid_h = modules[m_idx](pyramid_h) + m_idx += 1 + pyramid = pyramid + pyramid_h + elif self.progressive == 'residual': + pyramid = modules[m_idx](pyramid) + m_idx += 1 + if self.skip_rescale: + pyramid = (pyramid + h) / np.sqrt(2.) + else: + pyramid = pyramid + h + h = pyramid + else: + raise ValueError(f'{self.progressive} is not a valid name') + + # Upsampling Layer + if i_level != 0: + if self.resblock_type == 'ddpm': + h = modules[m_idx](h) + m_idx += 1 + else: + h = modules[m_idx](h, temb) # Upspampling + m_idx += 1 + + assert not hs + + if self.progressive == 'output_skip': + h = pyramid + else: + h = self.act(modules[m_idx](h)) + m_idx += 1 + h = modules[m_idx](h) + m_idx += 1 + + assert m_idx == len(modules), "Implementation error" + if self.scale_by_sigma: + used_sigmas = used_sigmas.reshape((x.shape[0], *([1] * len(x.shape[1:])))) + h = h / used_sigmas + + # Convert back to complex number + h = self.output_layer(h) + h = torch.permute(h, (0, 2, 3, 1)).contiguous() + h = torch.view_as_complex(h)[:,None, :, :] + return h diff --git a/modules/sgmse/ncsnpp_utils/layers.py b/modules/sgmse/ncsnpp_utils/layers.py new file mode 100644 index 00000000..b0d6e87f --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/layers.py @@ -0,0 +1,662 @@ +# coding=utf-8 +# Copyright 2020 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: skip-file +"""Common layers for defining score networks. +""" +import math +import string +from functools import partial +import torch.nn as nn +import torch +import torch.nn.functional as F +import numpy as np +from .normalization import ConditionalInstanceNorm2dPlus + + +def get_act(config): + """Get activation functions from the config file.""" + + if config == 'elu': + return nn.ELU() + elif config == 'relu': + return nn.ReLU() + elif config == 'lrelu': + return nn.LeakyReLU(negative_slope=0.2) + elif config == 'swish': + return nn.SiLU() + else: + raise NotImplementedError('activation function does not exist!') + + +def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=0): + """1x1 convolution. Same as NCSNv1/v2.""" + conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias, dilation=dilation, + padding=padding) + init_scale = 1e-10 if init_scale == 0 else init_scale + conv.weight.data *= init_scale + conv.bias.data *= init_scale + return conv + + +def variance_scaling(scale, mode, distribution, + in_axis=1, out_axis=0, + dtype=torch.float32, + device='cpu'): + """Ported from JAX. """ + + def _compute_fans(shape, in_axis=1, out_axis=0): + receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis] + fan_in = shape[in_axis] * receptive_field_size + fan_out = shape[out_axis] * receptive_field_size + return fan_in, fan_out + + def init(shape, dtype=dtype, device=device): + fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) + if mode == "fan_in": + denominator = fan_in + elif mode == "fan_out": + denominator = fan_out + elif mode == "fan_avg": + denominator = (fan_in + fan_out) / 2 + else: + raise ValueError( + "invalid mode for variance scaling initializer: {}".format(mode)) + variance = scale / denominator + if distribution == "normal": + return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance) + elif distribution == "uniform": + return (torch.rand(*shape, dtype=dtype, device=device) * 2. - 1.) * np.sqrt(3 * variance) + else: + raise ValueError("invalid distribution for variance scaling initializer") + + return init + + +def default_init(scale=1.): + """The same initialization used in DDPM.""" + scale = 1e-10 if scale == 0 else scale + return variance_scaling(scale, 'fan_avg', 'uniform') + + +class Dense(nn.Module): + """Linear layer with `default_init`.""" + def __init__(self): + super().__init__() + + +def ddpm_conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1., padding=0): + """1x1 convolution with DDPM initialization.""" + conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias) + conv.weight.data = default_init(init_scale)(conv.weight.data.shape) + nn.init.zeros_(conv.bias) + return conv + + +def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1): + """3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2.""" + init_scale = 1e-10 if init_scale == 0 else init_scale + conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias, + dilation=dilation, padding=padding, kernel_size=3) + conv.weight.data *= init_scale + conv.bias.data *= init_scale + return conv + + +def ddpm_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1): + """3x3 convolution with DDPM initialization.""" + conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, + dilation=dilation, bias=bias) + conv.weight.data = default_init(init_scale)(conv.weight.data.shape) + nn.init.zeros_(conv.bias) + return conv + + ########################################################################### + # Functions below are ported over from the NCSNv1/NCSNv2 codebase: + # https://github.com/ermongroup/ncsn + # https://github.com/ermongroup/ncsnv2 + ########################################################################### + + +class CRPBlock(nn.Module): + def __init__(self, features, n_stages, act=nn.ReLU(), maxpool=True): + super().__init__() + self.convs = nn.ModuleList() + for i in range(n_stages): + self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False)) + self.n_stages = n_stages + if maxpool: + self.pool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2) + else: + self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2) + + self.act = act + + def forward(self, x): + x = self.act(x) + path = x + for i in range(self.n_stages): + path = self.pool(path) + path = self.convs[i](path) + x = path + x + return x + + +class CondCRPBlock(nn.Module): + def __init__(self, features, n_stages, num_classes, normalizer, act=nn.ReLU()): + super().__init__() + self.convs = nn.ModuleList() + self.norms = nn.ModuleList() + self.normalizer = normalizer + for i in range(n_stages): + self.norms.append(normalizer(features, num_classes, bias=True)) + self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False)) + + self.n_stages = n_stages + self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2) + self.act = act + + def forward(self, x, y): + x = self.act(x) + path = x + for i in range(self.n_stages): + path = self.norms[i](path, y) + path = self.pool(path) + path = self.convs[i](path) + + x = path + x + return x + + +class RCUBlock(nn.Module): + def __init__(self, features, n_blocks, n_stages, act=nn.ReLU()): + super().__init__() + + for i in range(n_blocks): + for j in range(n_stages): + setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False)) + + self.stride = 1 + self.n_blocks = n_blocks + self.n_stages = n_stages + self.act = act + + def forward(self, x): + for i in range(self.n_blocks): + residual = x + for j in range(self.n_stages): + x = self.act(x) + x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x) + + x += residual + return x + + +class CondRCUBlock(nn.Module): + def __init__(self, features, n_blocks, n_stages, num_classes, normalizer, act=nn.ReLU()): + super().__init__() + + for i in range(n_blocks): + for j in range(n_stages): + setattr(self, '{}_{}_norm'.format(i + 1, j + 1), normalizer(features, num_classes, bias=True)) + setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False)) + + self.stride = 1 + self.n_blocks = n_blocks + self.n_stages = n_stages + self.act = act + self.normalizer = normalizer + + def forward(self, x, y): + for i in range(self.n_blocks): + residual = x + for j in range(self.n_stages): + x = getattr(self, '{}_{}_norm'.format(i + 1, j + 1))(x, y) + x = self.act(x) + x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x) + + x += residual + return x + + +class MSFBlock(nn.Module): + def __init__(self, in_planes, features): + super().__init__() + assert isinstance(in_planes, list) or isinstance(in_planes, tuple) + self.convs = nn.ModuleList() + self.features = features + + for i in range(len(in_planes)): + self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True)) + + def forward(self, xs, shape): + sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device) + for i in range(len(self.convs)): + h = self.convs[i](xs[i]) + h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True) + sums += h + return sums + + +class CondMSFBlock(nn.Module): + def __init__(self, in_planes, features, num_classes, normalizer): + super().__init__() + assert isinstance(in_planes, list) or isinstance(in_planes, tuple) + + self.convs = nn.ModuleList() + self.norms = nn.ModuleList() + self.features = features + self.normalizer = normalizer + + for i in range(len(in_planes)): + self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True)) + self.norms.append(normalizer(in_planes[i], num_classes, bias=True)) + + def forward(self, xs, y, shape): + sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device) + for i in range(len(self.convs)): + h = self.norms[i](xs[i], y) + h = self.convs[i](h) + h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True) + sums += h + return sums + + +class RefineBlock(nn.Module): + def __init__(self, in_planes, features, act=nn.ReLU(), start=False, end=False, maxpool=True): + super().__init__() + + assert isinstance(in_planes, tuple) or isinstance(in_planes, list) + self.n_blocks = n_blocks = len(in_planes) + + self.adapt_convs = nn.ModuleList() + for i in range(n_blocks): + self.adapt_convs.append(RCUBlock(in_planes[i], 2, 2, act)) + + self.output_convs = RCUBlock(features, 3 if end else 1, 2, act) + + if not start: + self.msf = MSFBlock(in_planes, features) + + self.crp = CRPBlock(features, 2, act, maxpool=maxpool) + + def forward(self, xs, output_shape): + assert isinstance(xs, tuple) or isinstance(xs, list) + hs = [] + for i in range(len(xs)): + h = self.adapt_convs[i](xs[i]) + hs.append(h) + + if self.n_blocks > 1: + h = self.msf(hs, output_shape) + else: + h = hs[0] + + h = self.crp(h) + h = self.output_convs(h) + + return h + + +class CondRefineBlock(nn.Module): + def __init__(self, in_planes, features, num_classes, normalizer, act=nn.ReLU(), start=False, end=False): + super().__init__() + + assert isinstance(in_planes, tuple) or isinstance(in_planes, list) + self.n_blocks = n_blocks = len(in_planes) + + self.adapt_convs = nn.ModuleList() + for i in range(n_blocks): + self.adapt_convs.append( + CondRCUBlock(in_planes[i], 2, 2, num_classes, normalizer, act) + ) + + self.output_convs = CondRCUBlock(features, 3 if end else 1, 2, num_classes, normalizer, act) + + if not start: + self.msf = CondMSFBlock(in_planes, features, num_classes, normalizer) + + self.crp = CondCRPBlock(features, 2, num_classes, normalizer, act) + + def forward(self, xs, y, output_shape): + assert isinstance(xs, tuple) or isinstance(xs, list) + hs = [] + for i in range(len(xs)): + h = self.adapt_convs[i](xs[i], y) + hs.append(h) + + if self.n_blocks > 1: + h = self.msf(hs, y, output_shape) + else: + h = hs[0] + + h = self.crp(h, y) + h = self.output_convs(h, y) + + return h + + +class ConvMeanPool(nn.Module): + def __init__(self, input_dim, output_dim, kernel_size=3, biases=True, adjust_padding=False): + super().__init__() + if not adjust_padding: + conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) + self.conv = conv + else: + conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) + + self.conv = nn.Sequential( + nn.ZeroPad2d((1, 0, 1, 0)), + conv + ) + + def forward(self, inputs): + output = self.conv(inputs) + output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2], + output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. + return output + + +class MeanPoolConv(nn.Module): + def __init__(self, input_dim, output_dim, kernel_size=3, biases=True): + super().__init__() + self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) + + def forward(self, inputs): + output = inputs + output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2], + output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. + return self.conv(output) + + +class UpsampleConv(nn.Module): + def __init__(self, input_dim, output_dim, kernel_size=3, biases=True): + super().__init__() + self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) + self.pixelshuffle = nn.PixelShuffle(upscale_factor=2) + + def forward(self, inputs): + output = inputs + output = torch.cat([output, output, output, output], dim=1) + output = self.pixelshuffle(output) + return self.conv(output) + + +class ConditionalResidualBlock(nn.Module): + def __init__(self, input_dim, output_dim, num_classes, resample=1, act=nn.ELU(), + normalization=ConditionalInstanceNorm2dPlus, adjust_padding=False, dilation=None): + super().__init__() + self.non_linearity = act + self.input_dim = input_dim + self.output_dim = output_dim + self.resample = resample + self.normalization = normalization + if resample == 'down': + if dilation > 1: + self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation) + self.normalize2 = normalization(input_dim, num_classes) + self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + else: + self.conv1 = ncsn_conv3x3(input_dim, input_dim) + self.normalize2 = normalization(input_dim, num_classes) + self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding) + conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding) + + elif resample is None: + if dilation > 1: + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + self.normalize2 = normalization(output_dim, num_classes) + self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation) + else: + conv_shortcut = nn.Conv2d + self.conv1 = ncsn_conv3x3(input_dim, output_dim) + self.normalize2 = normalization(output_dim, num_classes) + self.conv2 = ncsn_conv3x3(output_dim, output_dim) + else: + raise Exception('invalid resample value') + + if output_dim != input_dim or resample is not None: + self.shortcut = conv_shortcut(input_dim, output_dim) + + self.normalize1 = normalization(input_dim, num_classes) + + def forward(self, x, y): + output = self.normalize1(x, y) + output = self.non_linearity(output) + output = self.conv1(output) + output = self.normalize2(output, y) + output = self.non_linearity(output) + output = self.conv2(output) + + if self.output_dim == self.input_dim and self.resample is None: + shortcut = x + else: + shortcut = self.shortcut(x) + + return shortcut + output + + +class ResidualBlock(nn.Module): + def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(), + normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1): + super().__init__() + self.non_linearity = act + self.input_dim = input_dim + self.output_dim = output_dim + self.resample = resample + self.normalization = normalization + if resample == 'down': + if dilation > 1: + self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation) + self.normalize2 = normalization(input_dim) + self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + else: + self.conv1 = ncsn_conv3x3(input_dim, input_dim) + self.normalize2 = normalization(input_dim) + self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding) + conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding) + + elif resample is None: + if dilation > 1: + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + self.normalize2 = normalization(output_dim) + self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation) + else: + # conv_shortcut = nn.Conv2d ### Something wierd here. + conv_shortcut = partial(ncsn_conv1x1) + self.conv1 = ncsn_conv3x3(input_dim, output_dim) + self.normalize2 = normalization(output_dim) + self.conv2 = ncsn_conv3x3(output_dim, output_dim) + else: + raise Exception('invalid resample value') + + if output_dim != input_dim or resample is not None: + self.shortcut = conv_shortcut(input_dim, output_dim) + + self.normalize1 = normalization(input_dim) + + def forward(self, x): + output = self.normalize1(x) + output = self.non_linearity(output) + output = self.conv1(output) + output = self.normalize2(output) + output = self.non_linearity(output) + output = self.conv2(output) + + if self.output_dim == self.input_dim and self.resample is None: + shortcut = x + else: + shortcut = self.shortcut(x) + + return shortcut + output + + +########################################################################### +# Functions below are ported over from the DDPM codebase: +# https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py +########################################################################### + +def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000): + assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32 + half_dim = embedding_dim // 2 + # magic number 10000 is from transformers + emb = math.log(max_positions) / (half_dim - 1) + # emb = math.log(2.) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb) + # emb = tf.range(num_embeddings, dtype=jnp.float32)[:, None] * emb[None, :] + # emb = tf.cast(timesteps, dtype=jnp.float32)[:, None] * emb[None, :] + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = F.pad(emb, (0, 1), mode='constant') + assert emb.shape == (timesteps.shape[0], embedding_dim) + return emb + + +def _einsum(a, b, c, x, y): + einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c)) + return torch.einsum(einsum_str, x, y) + + +def contract_inner(x, y): + """tensordot(x, y, 1).""" + x_chars = list(string.ascii_lowercase[:len(x.shape)]) + y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x.shape)]) + y_chars[0] = x_chars[-1] # first axis of y and last of x get summed + out_chars = x_chars[:-1] + y_chars[1:] + return _einsum(x_chars, y_chars, out_chars, x, y) + + +class NIN(nn.Module): + def __init__(self, in_dim, num_units, init_scale=0.1): + super().__init__() + self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True) + self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) + + def forward(self, x): + x = x.permute(0, 2, 3, 1) + y = contract_inner(x, self.W) + self.b + return y.permute(0, 3, 1, 2) + + +class AttnBlock(nn.Module): + """Channel-wise self-attention block.""" + def __init__(self, channels): + super().__init__() + self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6) + self.NIN_0 = NIN(channels, channels) + self.NIN_1 = NIN(channels, channels) + self.NIN_2 = NIN(channels, channels) + self.NIN_3 = NIN(channels, channels, init_scale=0.) + + def forward(self, x): + B, C, H, W = x.shape + h = self.GroupNorm_0(x) + q = self.NIN_0(h) + k = self.NIN_1(h) + v = self.NIN_2(h) + + w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5)) + w = torch.reshape(w, (B, H, W, H * W)) + w = F.softmax(w, dim=-1) + w = torch.reshape(w, (B, H, W, H, W)) + h = torch.einsum('bhwij,bcij->bchw', w, v) + h = self.NIN_3(h) + return x + h + + +class Upsample(nn.Module): + def __init__(self, channels, with_conv=False): + super().__init__() + if with_conv: + self.Conv_0 = ddpm_conv3x3(channels, channels) + self.with_conv = with_conv + + def forward(self, x): + B, C, H, W = x.shape + h = F.interpolate(x, (H * 2, W * 2), mode='nearest') + if self.with_conv: + h = self.Conv_0(h) + return h + + +class Downsample(nn.Module): + def __init__(self, channels, with_conv=False): + super().__init__() + if with_conv: + self.Conv_0 = ddpm_conv3x3(channels, channels, stride=2, padding=0) + self.with_conv = with_conv + + def forward(self, x): + B, C, H, W = x.shape + # Emulate 'SAME' padding + if self.with_conv: + x = F.pad(x, (0, 1, 0, 1)) + x = self.Conv_0(x) + else: + x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=0) + + assert x.shape == (B, C, H // 2, W // 2) + return x + + +class ResnetBlockDDPM(nn.Module): + """The ResNet Blocks used in DDPM.""" + def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, dropout=0.1): + super().__init__() + if out_ch is None: + out_ch = in_ch + self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=in_ch, eps=1e-6) + self.act = act + self.Conv_0 = ddpm_conv3x3(in_ch, out_ch) + if temb_dim is not None: + self.Dense_0 = nn.Linear(temb_dim, out_ch) + self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) + nn.init.zeros_(self.Dense_0.bias) + + self.GroupNorm_1 = nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-6) + self.Dropout_0 = nn.Dropout(dropout) + self.Conv_1 = ddpm_conv3x3(out_ch, out_ch, init_scale=0.) + if in_ch != out_ch: + if conv_shortcut: + self.Conv_2 = ddpm_conv3x3(in_ch, out_ch) + else: + self.NIN_0 = NIN(in_ch, out_ch) + self.out_ch = out_ch + self.in_ch = in_ch + self.conv_shortcut = conv_shortcut + + def forward(self, x, temb=None): + B, C, H, W = x.shape + assert C == self.in_ch + out_ch = self.out_ch if self.out_ch else self.in_ch + h = self.act(self.GroupNorm_0(x)) + h = self.Conv_0(h) + # Add bias to each feature map conditioned on the time embedding + if temb is not None: + h += self.Dense_0(self.act(temb))[:, :, None, None] + h = self.act(self.GroupNorm_1(h)) + h = self.Dropout_0(h) + h = self.Conv_1(h) + if C != out_ch: + if self.conv_shortcut: + x = self.Conv_2(x) + else: + x = self.NIN_0(x) + return x + h \ No newline at end of file diff --git a/modules/sgmse/ncsnpp_utils/layerspp.py b/modules/sgmse/ncsnpp_utils/layerspp.py new file mode 100644 index 00000000..948b0688 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/layerspp.py @@ -0,0 +1,274 @@ +# coding=utf-8 +# Copyright 2020 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: skip-file +"""Layers for defining NCSN++. +""" +from . import layers +from . import up_or_down_sampling +import torch.nn as nn +import torch +import torch.nn.functional as F +import numpy as np + +conv1x1 = layers.ddpm_conv1x1 +conv3x3 = layers.ddpm_conv3x3 +NIN = layers.NIN +default_init = layers.default_init + + +class GaussianFourierProjection(nn.Module): + """Gaussian Fourier embeddings for noise levels.""" + + def __init__(self, embedding_size=256, scale=1.0): + super().__init__() + self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) + + def forward(self, x): + x_proj = x[:, None] * self.W[None, :] * 2 * np.pi + return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) + + +class Combine(nn.Module): + """Combine information from skip connections.""" + + def __init__(self, dim1, dim2, method='cat'): + super().__init__() + self.Conv_0 = conv1x1(dim1, dim2) + self.method = method + + def forward(self, x, y): + h = self.Conv_0(x) + if self.method == 'cat': + return torch.cat([h, y], dim=1) + elif self.method == 'sum': + return h + y + else: + raise ValueError(f'Method {self.method} not recognized.') + + +class AttnBlockpp(nn.Module): + """Channel-wise self-attention block. Modified from DDPM.""" + + def __init__(self, channels, skip_rescale=False, init_scale=0.): + super().__init__() + self.GroupNorm_0 = nn.GroupNorm(num_groups=min(channels // 4, 32), num_channels=channels, + eps=1e-6) + self.NIN_0 = NIN(channels, channels) + self.NIN_1 = NIN(channels, channels) + self.NIN_2 = NIN(channels, channels) + self.NIN_3 = NIN(channels, channels, init_scale=init_scale) + self.skip_rescale = skip_rescale + + def forward(self, x): + B, C, H, W = x.shape + h = self.GroupNorm_0(x) + q = self.NIN_0(h) + k = self.NIN_1(h) + v = self.NIN_2(h) + + w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5)) + w = torch.reshape(w, (B, H, W, H * W)) + w = F.softmax(w, dim=-1) + w = torch.reshape(w, (B, H, W, H, W)) + h = torch.einsum('bhwij,bcij->bchw', w, v) + h = self.NIN_3(h) + if not self.skip_rescale: + return x + h + else: + return (x + h) / np.sqrt(2.) + + +class Upsample(nn.Module): + def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, + fir_kernel=(1, 3, 3, 1)): + super().__init__() + out_ch = out_ch if out_ch else in_ch + if not fir: + if with_conv: + self.Conv_0 = conv3x3(in_ch, out_ch) + else: + if with_conv: + self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch, + kernel=3, up=True, + resample_kernel=fir_kernel, + use_bias=True, + kernel_init=default_init()) + self.fir = fir + self.with_conv = with_conv + self.fir_kernel = fir_kernel + self.out_ch = out_ch + + def forward(self, x): + B, C, H, W = x.shape + if not self.fir: + h = F.interpolate(x, (H * 2, W * 2), 'nearest') + if self.with_conv: + h = self.Conv_0(h) + else: + if not self.with_conv: + h = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2) + else: + h = self.Conv2d_0(x) + + return h + + +class Downsample(nn.Module): + def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, + fir_kernel=(1, 3, 3, 1)): + super().__init__() + out_ch = out_ch if out_ch else in_ch + if not fir: + if with_conv: + self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, padding=0) + else: + if with_conv: + self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch, + kernel=3, down=True, + resample_kernel=fir_kernel, + use_bias=True, + kernel_init=default_init()) + self.fir = fir + self.fir_kernel = fir_kernel + self.with_conv = with_conv + self.out_ch = out_ch + + def forward(self, x): + B, C, H, W = x.shape + if not self.fir: + if self.with_conv: + x = F.pad(x, (0, 1, 0, 1)) + x = self.Conv_0(x) + else: + x = F.avg_pool2d(x, 2, stride=2) + else: + if not self.with_conv: + x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2) + else: + x = self.Conv2d_0(x) + + return x + + +class ResnetBlockDDPMpp(nn.Module): + """ResBlock adapted from DDPM.""" + + def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, + dropout=0.1, skip_rescale=False, init_scale=0.): + super().__init__() + out_ch = out_ch if out_ch else in_ch + self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) + self.Conv_0 = conv3x3(in_ch, out_ch) + if temb_dim is not None: + self.Dense_0 = nn.Linear(temb_dim, out_ch) + self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) + nn.init.zeros_(self.Dense_0.bias) + self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) + self.Dropout_0 = nn.Dropout(dropout) + self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) + if in_ch != out_ch: + if conv_shortcut: + self.Conv_2 = conv3x3(in_ch, out_ch) + else: + self.NIN_0 = NIN(in_ch, out_ch) + + self.skip_rescale = skip_rescale + self.act = act + self.out_ch = out_ch + self.conv_shortcut = conv_shortcut + + def forward(self, x, temb=None): + h = self.act(self.GroupNorm_0(x)) + h = self.Conv_0(h) + if temb is not None: + h += self.Dense_0(self.act(temb))[:, :, None, None] + h = self.act(self.GroupNorm_1(h)) + h = self.Dropout_0(h) + h = self.Conv_1(h) + if x.shape[1] != self.out_ch: + if self.conv_shortcut: + x = self.Conv_2(x) + else: + x = self.NIN_0(x) + if not self.skip_rescale: + return x + h + else: + return (x + h) / np.sqrt(2.) + + +class ResnetBlockBigGANpp(nn.Module): + def __init__(self, act, in_ch, out_ch=None, temb_dim=None, up=False, down=False, + dropout=0.1, fir=False, fir_kernel=(1, 3, 3, 1), + skip_rescale=True, init_scale=0.): + super().__init__() + + out_ch = out_ch if out_ch else in_ch + self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) + self.up = up + self.down = down + self.fir = fir + self.fir_kernel = fir_kernel + + self.Conv_0 = conv3x3(in_ch, out_ch) + if temb_dim is not None: + self.Dense_0 = nn.Linear(temb_dim, out_ch) + self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape) + nn.init.zeros_(self.Dense_0.bias) + + self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) + self.Dropout_0 = nn.Dropout(dropout) + self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) + if in_ch != out_ch or up or down: + self.Conv_2 = conv1x1(in_ch, out_ch) + + self.skip_rescale = skip_rescale + self.act = act + self.in_ch = in_ch + self.out_ch = out_ch + + def forward(self, x, temb=None): + h = self.act(self.GroupNorm_0(x)) + + if self.up: + if self.fir: + h = up_or_down_sampling.upsample_2d(h, self.fir_kernel, factor=2) + x = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2) + else: + h = up_or_down_sampling.naive_upsample_2d(h, factor=2) + x = up_or_down_sampling.naive_upsample_2d(x, factor=2) + elif self.down: + if self.fir: + h = up_or_down_sampling.downsample_2d(h, self.fir_kernel, factor=2) + x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2) + else: + h = up_or_down_sampling.naive_downsample_2d(h, factor=2) + x = up_or_down_sampling.naive_downsample_2d(x, factor=2) + + h = self.Conv_0(h) + # Add bias to each feature map conditioned on the time embedding + if temb is not None: + h += self.Dense_0(self.act(temb))[:, :, None, None] + h = self.act(self.GroupNorm_1(h)) + h = self.Dropout_0(h) + h = self.Conv_1(h) + + if self.in_ch != self.out_ch or self.up or self.down: + x = self.Conv_2(x) + + if not self.skip_rescale: + return x + h + else: + return (x + h) / np.sqrt(2.) diff --git a/modules/sgmse/ncsnpp_utils/normalization.py b/modules/sgmse/ncsnpp_utils/normalization.py new file mode 100644 index 00000000..9a232043 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/normalization.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2020 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Normalization layers.""" +import torch.nn as nn +import torch +import functools + + +def get_normalization(config, conditional=False): + """Obtain normalization modules from the config file.""" + norm = config.model.normalization + if conditional: + if norm == 'InstanceNorm++': + return functools.partial(ConditionalInstanceNorm2dPlus, num_classes=config.model.num_classes) + else: + raise NotImplementedError(f'{norm} not implemented yet.') + else: + if norm == 'InstanceNorm': + return nn.InstanceNorm2d + elif norm == 'InstanceNorm++': + return InstanceNorm2dPlus + elif norm == 'VarianceNorm': + return VarianceNorm2d + elif norm == 'GroupNorm': + return nn.GroupNorm + else: + raise ValueError('Unknown normalization: %s' % norm) + + +class ConditionalBatchNorm2d(nn.Module): + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.bn = nn.BatchNorm2d(num_features, affine=False) + if self.bias: + self.embed = nn.Embedding(num_classes, num_features * 2) + self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02) + self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.uniform_() + + def forward(self, x, y): + out = self.bn(x) + if self.bias: + gamma, beta = self.embed(y).chunk(2, dim=1) + out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1) + else: + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * out + return out + + +class ConditionalInstanceNorm2d(nn.Module): + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False) + if bias: + self.embed = nn.Embedding(num_classes, num_features * 2) + self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02) + self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.uniform_() + + def forward(self, x, y): + h = self.instance_norm(x) + if self.bias: + gamma, beta = self.embed(y).chunk(2, dim=-1) + out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1) + else: + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * h + return out + + +class ConditionalVarianceNorm2d(nn.Module): + def __init__(self, num_features, num_classes, bias=False): + super().__init__() + self.num_features = num_features + self.bias = bias + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.normal_(1, 0.02) + + def forward(self, x, y): + vars = torch.var(x, dim=(2, 3), keepdim=True) + h = x / torch.sqrt(vars + 1e-5) + + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * h + return out + + +class VarianceNorm2d(nn.Module): + def __init__(self, num_features, bias=False): + super().__init__() + self.num_features = num_features + self.bias = bias + self.alpha = nn.Parameter(torch.zeros(num_features)) + self.alpha.data.normal_(1, 0.02) + + def forward(self, x): + vars = torch.var(x, dim=(2, 3), keepdim=True) + h = x / torch.sqrt(vars + 1e-5) + + out = self.alpha.view(-1, self.num_features, 1, 1) * h + return out + + +class ConditionalNoneNorm2d(nn.Module): + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + if bias: + self.embed = nn.Embedding(num_classes, num_features * 2) + self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02) + self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.uniform_() + + def forward(self, x, y): + if self.bias: + gamma, beta = self.embed(y).chunk(2, dim=-1) + out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view(-1, self.num_features, 1, 1) + else: + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * x + return out + + +class NoneNorm2d(nn.Module): + def __init__(self, num_features, bias=True): + super().__init__() + + def forward(self, x): + return x + + +class InstanceNorm2dPlus(nn.Module): + def __init__(self, num_features, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False) + self.alpha = nn.Parameter(torch.zeros(num_features)) + self.gamma = nn.Parameter(torch.zeros(num_features)) + self.alpha.data.normal_(1, 0.02) + self.gamma.data.normal_(1, 0.02) + if bias: + self.beta = nn.Parameter(torch.zeros(num_features)) + + def forward(self, x): + means = torch.mean(x, dim=(2, 3)) + m = torch.mean(means, dim=-1, keepdim=True) + v = torch.var(means, dim=-1, keepdim=True) + means = (means - m) / (torch.sqrt(v + 1e-5)) + h = self.instance_norm(x) + + if self.bias: + h = h + means[..., None, None] * self.alpha[..., None, None] + out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1) + else: + h = h + means[..., None, None] * self.alpha[..., None, None] + out = self.gamma.view(-1, self.num_features, 1, 1) * h + return out + + +class ConditionalInstanceNorm2dPlus(nn.Module): + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False) + if bias: + self.embed = nn.Embedding(num_classes, num_features * 3) + self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02) + self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, 2 * num_features) + self.embed.weight.data.normal_(1, 0.02) + + def forward(self, x, y): + means = torch.mean(x, dim=(2, 3)) + m = torch.mean(means, dim=-1, keepdim=True) + v = torch.var(means, dim=-1, keepdim=True) + means = (means - m) / (torch.sqrt(v + 1e-5)) + h = self.instance_norm(x) + + if self.bias: + gamma, alpha, beta = self.embed(y).chunk(3, dim=-1) + h = h + means[..., None, None] * alpha[..., None, None] + out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1) + else: + gamma, alpha = self.embed(y).chunk(2, dim=-1) + h = h + means[..., None, None] * alpha[..., None, None] + out = gamma.view(-1, self.num_features, 1, 1) * h + return out diff --git a/modules/sgmse/ncsnpp_utils/op/__init__.py b/modules/sgmse/ncsnpp_utils/op/__init__.py new file mode 100644 index 00000000..d0918d92 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/op/__init__.py @@ -0,0 +1,2 @@ +from .fused_act import FusedLeakyReLU, fused_leaky_relu +from .upfirdn2d import upfirdn2d diff --git a/modules/sgmse/ncsnpp_utils/op/fused_act.py b/modules/sgmse/ncsnpp_utils/op/fused_act.py new file mode 100644 index 00000000..e734e2cf --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/op/fused_act.py @@ -0,0 +1,97 @@ +import os + +import torch +from torch import nn +from torch.nn import functional as F +from torch.autograd import Function +from torch.utils.cpp_extension import load + + +module_path = os.path.dirname(__file__) +fused = load( + "fused", + sources=[ + os.path.join(module_path, "fused_bias_act.cpp"), + os.path.join(module_path, "fused_bias_act_kernel.cu"), + ], +) + + +class FusedLeakyReLUFunctionBackward(Function): + @staticmethod + def forward(ctx, grad_output, out, negative_slope, scale): + ctx.save_for_backward(out) + ctx.negative_slope = negative_slope + ctx.scale = scale + + empty = grad_output.new_empty(0) + + grad_input = fused.fused_bias_act( + grad_output, empty, out, 3, 1, negative_slope, scale + ) + + dim = [0] + + if grad_input.ndim > 2: + dim += list(range(2, grad_input.ndim)) + + grad_bias = grad_input.sum(dim).detach() + + return grad_input, grad_bias + + @staticmethod + def backward(ctx, gradgrad_input, gradgrad_bias): + out, = ctx.saved_tensors + gradgrad_out = fused.fused_bias_act( + gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale + ) + + return gradgrad_out, None, None, None + + +class FusedLeakyReLUFunction(Function): + @staticmethod + def forward(ctx, input, bias, negative_slope, scale): + empty = input.new_empty(0) + out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale) + ctx.save_for_backward(out) + ctx.negative_slope = negative_slope + ctx.scale = scale + + return out + + @staticmethod + def backward(ctx, grad_output): + out, = ctx.saved_tensors + + grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( + grad_output, out, ctx.negative_slope, ctx.scale + ) + + return grad_input, grad_bias, None, None + + +class FusedLeakyReLU(nn.Module): + def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): + super().__init__() + + self.bias = nn.Parameter(torch.zeros(channel)) + self.negative_slope = negative_slope + self.scale = scale + + def forward(self, input): + return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale) + + +def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): + if input.device.type == "cpu": + rest_dim = [1] * (input.ndim - bias.ndim - 1) + return ( + F.leaky_relu( + input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2 + ) + * scale + ) + + else: + return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale) diff --git a/modules/sgmse/ncsnpp_utils/op/fused_bias_act.cpp b/modules/sgmse/ncsnpp_utils/op/fused_bias_act.cpp new file mode 100644 index 00000000..a0543187 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/op/fused_bias_act.cpp @@ -0,0 +1,21 @@ +#include + + +torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, + int act, int grad, float alpha, float scale); + +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) + +torch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, + int act, int grad, float alpha, float scale) { + CHECK_CUDA(input); + CHECK_CUDA(bias); + + return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("fused_bias_act", &fused_bias_act, "fused bias act (CUDA)"); +} \ No newline at end of file diff --git a/modules/sgmse/ncsnpp_utils/op/fused_bias_act_kernel.cu b/modules/sgmse/ncsnpp_utils/op/fused_bias_act_kernel.cu new file mode 100644 index 00000000..8d2f03c7 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/op/fused_bias_act_kernel.cu @@ -0,0 +1,99 @@ +// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. +// +// This work is made available under the Nvidia Source Code License-NC. +// To view a copy of this license, visit +// https://nvlabs.github.io/stylegan2/license.html + +#include + +#include +#include +#include +#include + +#include +#include + + +template +static __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref, + int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) { + int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x; + + scalar_t zero = 0.0; + + for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) { + scalar_t x = p_x[xi]; + + if (use_bias) { + x += p_b[(xi / step_b) % size_b]; + } + + scalar_t ref = use_ref ? p_ref[xi] : zero; + + scalar_t y; + + switch (act * 10 + grad) { + default: + case 10: y = x; break; + case 11: y = x; break; + case 12: y = 0.0; break; + + case 30: y = (x > 0.0) ? x : x * alpha; break; + case 31: y = (ref > 0.0) ? x : x * alpha; break; + case 32: y = 0.0; break; + } + + out[xi] = y * scale; + } +} + + +torch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer, + int act, int grad, float alpha, float scale) { + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); + + auto x = input.contiguous(); + auto b = bias.contiguous(); + auto ref = refer.contiguous(); + + int use_bias = b.numel() ? 1 : 0; + int use_ref = ref.numel() ? 1 : 0; + + int size_x = x.numel(); + int size_b = b.numel(); + int step_b = 1; + + for (int i = 1 + 1; i < x.dim(); i++) { + step_b *= x.size(i); + } + + int loop_x = 4; + int block_size = 4 * 32; + int grid_size = (size_x - 1) / (loop_x * block_size) + 1; + + auto y = torch::empty_like(x); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "fused_bias_act_kernel", [&] { + fused_bias_act_kernel<<>>( + y.data_ptr(), + x.data_ptr(), + b.data_ptr(), + ref.data_ptr(), + act, + grad, + alpha, + scale, + loop_x, + size_x, + step_b, + size_b, + use_bias, + use_ref + ); + }); + + return y; +} \ No newline at end of file diff --git a/modules/sgmse/ncsnpp_utils/op/upfirdn2d.cpp b/modules/sgmse/ncsnpp_utils/op/upfirdn2d.cpp new file mode 100644 index 00000000..b07aa205 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/op/upfirdn2d.cpp @@ -0,0 +1,23 @@ +#include + + +torch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel, + int up_x, int up_y, int down_x, int down_y, + int pad_x0, int pad_x1, int pad_y0, int pad_y1); + +#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x) + +torch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel, + int up_x, int up_y, int down_x, int down_y, + int pad_x0, int pad_x1, int pad_y0, int pad_y1) { + CHECK_CUDA(input); + CHECK_CUDA(kernel); + + return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("upfirdn2d", &upfirdn2d, "upfirdn2d (CUDA)"); +} \ No newline at end of file diff --git a/modules/sgmse/ncsnpp_utils/op/upfirdn2d.py b/modules/sgmse/ncsnpp_utils/op/upfirdn2d.py new file mode 100644 index 00000000..a4cf05db --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/op/upfirdn2d.py @@ -0,0 +1,200 @@ +import os + +import torch +from torch.nn import functional as F +from torch.autograd import Function +from torch.utils.cpp_extension import load + + +module_path = os.path.dirname(__file__) +upfirdn2d_op = load( + "upfirdn2d", + sources=[ + os.path.join(module_path, "upfirdn2d.cpp"), + os.path.join(module_path, "upfirdn2d_kernel.cu"), + ], +) + + +class UpFirDn2dBackward(Function): + @staticmethod + def forward( + ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size + ): + + up_x, up_y = up + down_x, down_y = down + g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad + + grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1) + + grad_input = upfirdn2d_op.upfirdn2d( + grad_output, + grad_kernel, + down_x, + down_y, + up_x, + up_y, + g_pad_x0, + g_pad_x1, + g_pad_y0, + g_pad_y1, + ) + grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3]) + + ctx.save_for_backward(kernel) + + pad_x0, pad_x1, pad_y0, pad_y1 = pad + + ctx.up_x = up_x + ctx.up_y = up_y + ctx.down_x = down_x + ctx.down_y = down_y + ctx.pad_x0 = pad_x0 + ctx.pad_x1 = pad_x1 + ctx.pad_y0 = pad_y0 + ctx.pad_y1 = pad_y1 + ctx.in_size = in_size + ctx.out_size = out_size + + return grad_input + + @staticmethod + def backward(ctx, gradgrad_input): + kernel, = ctx.saved_tensors + + gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) + + gradgrad_out = upfirdn2d_op.upfirdn2d( + gradgrad_input, + kernel, + ctx.up_x, + ctx.up_y, + ctx.down_x, + ctx.down_y, + ctx.pad_x0, + ctx.pad_x1, + ctx.pad_y0, + ctx.pad_y1, + ) + # gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.out_size[0], ctx.out_size[1], ctx.in_size[3]) + gradgrad_out = gradgrad_out.view( + ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1] + ) + + return gradgrad_out, None, None, None, None, None, None, None, None + + +class UpFirDn2d(Function): + @staticmethod + def forward(ctx, input, kernel, up, down, pad): + up_x, up_y = up + down_x, down_y = down + pad_x0, pad_x1, pad_y0, pad_y1 = pad + + kernel_h, kernel_w = kernel.shape + batch, channel, in_h, in_w = input.shape + ctx.in_size = input.shape + + input = input.reshape(-1, in_h, in_w, 1) + + ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1])) + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 + ctx.out_size = (out_h, out_w) + + ctx.up = (up_x, up_y) + ctx.down = (down_x, down_y) + ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1) + + g_pad_x0 = kernel_w - pad_x0 - 1 + g_pad_y0 = kernel_h - pad_y0 - 1 + g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1 + g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1 + + ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1) + + out = upfirdn2d_op.upfirdn2d( + input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 + ) + # out = out.view(major, out_h, out_w, minor) + out = out.view(-1, channel, out_h, out_w) + + return out + + @staticmethod + def backward(ctx, grad_output): + kernel, grad_kernel = ctx.saved_tensors + + grad_input = UpFirDn2dBackward.apply( + grad_output, + kernel, + grad_kernel, + ctx.up, + ctx.down, + ctx.pad, + ctx.g_pad, + ctx.in_size, + ctx.out_size, + ) + + return grad_input, None, None, None, None + + +def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)): + if input.device.type == "cpu": + out = upfirdn2d_native( + input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1] + ) + + else: + out = UpFirDn2d.apply( + input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1]) + ) + + return out + + +def upfirdn2d_native( + input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1 +): + _, channel, in_h, in_w = input.shape + input = input.reshape(-1, in_h, in_w, 1) + + _, in_h, in_w, minor = input.shape + kernel_h, kernel_w = kernel.shape + + out = input.view(-1, in_h, 1, in_w, 1, minor) + out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1]) + out = out.view(-1, in_h * up_y, in_w * up_x, minor) + + out = F.pad( + out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)] + ) + out = out[ + :, + max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0), + max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0), + :, + ] + + out = out.permute(0, 3, 1, 2) + out = out.reshape( + [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1] + ) + w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w) + out = F.conv2d(out, w) + out = out.reshape( + -1, + minor, + in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1, + in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1, + ) + out = out.permute(0, 2, 3, 1) + out = out[:, ::down_y, ::down_x, :] + + out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1 + out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1 + + return out.view(-1, channel, out_h, out_w) diff --git a/modules/sgmse/ncsnpp_utils/op/upfirdn2d_kernel.cu b/modules/sgmse/ncsnpp_utils/op/upfirdn2d_kernel.cu new file mode 100644 index 00000000..ed3eea30 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/op/upfirdn2d_kernel.cu @@ -0,0 +1,369 @@ +// Copyright (c) 2019, NVIDIA Corporation. All rights reserved. +// +// This work is made available under the Nvidia Source Code License-NC. +// To view a copy of this license, visit +// https://nvlabs.github.io/stylegan2/license.html + +#include + +#include +#include +#include +#include + +#include +#include + +static __host__ __device__ __forceinline__ int floor_div(int a, int b) { + int c = a / b; + + if (c * b > a) { + c--; + } + + return c; +} + +struct UpFirDn2DKernelParams { + int up_x; + int up_y; + int down_x; + int down_y; + int pad_x0; + int pad_x1; + int pad_y0; + int pad_y1; + + int major_dim; + int in_h; + int in_w; + int minor_dim; + int kernel_h; + int kernel_w; + int out_h; + int out_w; + int loop_major; + int loop_x; +}; + +template +__global__ void upfirdn2d_kernel_large(scalar_t *out, const scalar_t *input, + const scalar_t *kernel, + const UpFirDn2DKernelParams p) { + int minor_idx = blockIdx.x * blockDim.x + threadIdx.x; + int out_y = minor_idx / p.minor_dim; + minor_idx -= out_y * p.minor_dim; + int out_x_base = blockIdx.y * p.loop_x * blockDim.y + threadIdx.y; + int major_idx_base = blockIdx.z * p.loop_major; + + if (out_x_base >= p.out_w || out_y >= p.out_h || + major_idx_base >= p.major_dim) { + return; + } + + int mid_y = out_y * p.down_y + p.up_y - 1 - p.pad_y0; + int in_y = min(max(floor_div(mid_y, p.up_y), 0), p.in_h); + int h = min(max(floor_div(mid_y + p.kernel_h, p.up_y), 0), p.in_h) - in_y; + int kernel_y = mid_y + p.kernel_h - (in_y + 1) * p.up_y; + + for (int loop_major = 0, major_idx = major_idx_base; + loop_major < p.loop_major && major_idx < p.major_dim; + loop_major++, major_idx++) { + for (int loop_x = 0, out_x = out_x_base; + loop_x < p.loop_x && out_x < p.out_w; loop_x++, out_x += blockDim.y) { + int mid_x = out_x * p.down_x + p.up_x - 1 - p.pad_x0; + int in_x = min(max(floor_div(mid_x, p.up_x), 0), p.in_w); + int w = min(max(floor_div(mid_x + p.kernel_w, p.up_x), 0), p.in_w) - in_x; + int kernel_x = mid_x + p.kernel_w - (in_x + 1) * p.up_x; + + const scalar_t *x_p = + &input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + + minor_idx]; + const scalar_t *k_p = &kernel[kernel_y * p.kernel_w + kernel_x]; + int x_px = p.minor_dim; + int k_px = -p.up_x; + int x_py = p.in_w * p.minor_dim; + int k_py = -p.up_y * p.kernel_w; + + scalar_t v = 0.0f; + + for (int y = 0; y < h; y++) { + for (int x = 0; x < w; x++) { + v += static_cast(*x_p) * static_cast(*k_p); + x_p += x_px; + k_p += k_px; + } + + x_p += x_py - w * x_px; + k_p += k_py - w * k_px; + } + + out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + + minor_idx] = v; + } + } +} + +template +__global__ void upfirdn2d_kernel(scalar_t *out, const scalar_t *input, + const scalar_t *kernel, + const UpFirDn2DKernelParams p) { + const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1; + const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1; + + __shared__ volatile float sk[kernel_h][kernel_w]; + __shared__ volatile float sx[tile_in_h][tile_in_w]; + + int minor_idx = blockIdx.x; + int tile_out_y = minor_idx / p.minor_dim; + minor_idx -= tile_out_y * p.minor_dim; + tile_out_y *= tile_out_h; + int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w; + int major_idx_base = blockIdx.z * p.loop_major; + + if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | + major_idx_base >= p.major_dim) { + return; + } + + for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; + tap_idx += blockDim.x) { + int ky = tap_idx / kernel_w; + int kx = tap_idx - ky * kernel_w; + scalar_t v = 0.0; + + if (kx < p.kernel_w & ky < p.kernel_h) { + v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)]; + } + + sk[ky][kx] = v; + } + + for (int loop_major = 0, major_idx = major_idx_base; + loop_major < p.loop_major & major_idx < p.major_dim; + loop_major++, major_idx++) { + for (int loop_x = 0, tile_out_x = tile_out_x_base; + loop_x < p.loop_x & tile_out_x < p.out_w; + loop_x++, tile_out_x += tile_out_w) { + int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0; + int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0; + int tile_in_x = floor_div(tile_mid_x, up_x); + int tile_in_y = floor_div(tile_mid_y, up_y); + + __syncthreads(); + + for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; + in_idx += blockDim.x) { + int rel_in_y = in_idx / tile_in_w; + int rel_in_x = in_idx - rel_in_y * tile_in_w; + int in_x = rel_in_x + tile_in_x; + int in_y = rel_in_y + tile_in_y; + + scalar_t v = 0.0; + + if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) { + v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * + p.minor_dim + + minor_idx]; + } + + sx[rel_in_y][rel_in_x] = v; + } + + __syncthreads(); + for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; + out_idx += blockDim.x) { + int rel_out_y = out_idx / tile_out_w; + int rel_out_x = out_idx - rel_out_y * tile_out_w; + int out_x = rel_out_x + tile_out_x; + int out_y = rel_out_y + tile_out_y; + + int mid_x = tile_mid_x + rel_out_x * down_x; + int mid_y = tile_mid_y + rel_out_y * down_y; + int in_x = floor_div(mid_x, up_x); + int in_y = floor_div(mid_y, up_y); + int rel_in_x = in_x - tile_in_x; + int rel_in_y = in_y - tile_in_y; + int kernel_x = (in_x + 1) * up_x - mid_x - 1; + int kernel_y = (in_y + 1) * up_y - mid_y - 1; + + scalar_t v = 0.0; + +#pragma unroll + for (int y = 0; y < kernel_h / up_y; y++) +#pragma unroll + for (int x = 0; x < kernel_w / up_x; x++) + v += sx[rel_in_y + y][rel_in_x + x] * + sk[kernel_y + y * up_y][kernel_x + x * up_x]; + + if (out_x < p.out_w & out_y < p.out_h) { + out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + + minor_idx] = v; + } + } + } + } +} + +torch::Tensor upfirdn2d_op(const torch::Tensor &input, + const torch::Tensor &kernel, int up_x, int up_y, + int down_x, int down_y, int pad_x0, int pad_x1, + int pad_y0, int pad_y1) { + int curDevice = -1; + cudaGetDevice(&curDevice); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice); + + UpFirDn2DKernelParams p; + + auto x = input.contiguous(); + auto k = kernel.contiguous(); + + p.major_dim = x.size(0); + p.in_h = x.size(1); + p.in_w = x.size(2); + p.minor_dim = x.size(3); + p.kernel_h = k.size(0); + p.kernel_w = k.size(1); + p.up_x = up_x; + p.up_y = up_y; + p.down_x = down_x; + p.down_y = down_y; + p.pad_x0 = pad_x0; + p.pad_x1 = pad_x1; + p.pad_y0 = pad_y0; + p.pad_y1 = pad_y1; + + p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / + p.down_y; + p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / + p.down_x; + + auto out = + at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options()); + + int mode = -1; + + int tile_out_h = -1; + int tile_out_w = -1; + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 1; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 3 && p.kernel_w <= 3) { + mode = 2; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 3; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && + p.kernel_h <= 2 && p.kernel_w <= 2) { + mode = 4; + tile_out_h = 16; + tile_out_w = 64; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && + p.kernel_h <= 4 && p.kernel_w <= 4) { + mode = 5; + tile_out_h = 8; + tile_out_w = 32; + } + + if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && + p.kernel_h <= 2 && p.kernel_w <= 2) { + mode = 6; + tile_out_h = 8; + tile_out_w = 32; + } + + dim3 block_size; + dim3 grid_size; + + if (tile_out_h > 0 && tile_out_w > 0) { + p.loop_major = (p.major_dim - 1) / 16384 + 1; + p.loop_x = 1; + block_size = dim3(32 * 8, 1, 1); + grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim, + (p.out_w - 1) / (p.loop_x * tile_out_w) + 1, + (p.major_dim - 1) / p.loop_major + 1); + } else { + p.loop_major = (p.major_dim - 1) / 16384 + 1; + p.loop_x = 4; + block_size = dim3(4, 32, 1); + grid_size = dim3((p.out_h * p.minor_dim - 1) / block_size.x + 1, + (p.out_w - 1) / (p.loop_x * block_size.y) + 1, + (p.major_dim - 1) / p.loop_major + 1); + } + + AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] { + switch (mode) { + case 1: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 2: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 3: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 4: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 5: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + case 6: + upfirdn2d_kernel + <<>>(out.data_ptr(), + x.data_ptr(), + k.data_ptr(), p); + + break; + + default: + upfirdn2d_kernel_large<<>>( + out.data_ptr(), x.data_ptr(), + k.data_ptr(), p); + } + }); + + return out; +} \ No newline at end of file diff --git a/modules/sgmse/ncsnpp_utils/up_or_down_sampling.py b/modules/sgmse/ncsnpp_utils/up_or_down_sampling.py new file mode 100644 index 00000000..cf7cd443 --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/up_or_down_sampling.py @@ -0,0 +1,257 @@ +"""Layers used for up-sampling or down-sampling images. + +Many functions are ported from https://github.com/NVlabs/stylegan2. +""" + +import torch.nn as nn +import torch +import torch.nn.functional as F +import numpy as np +from .op import upfirdn2d + + +# Function ported from StyleGAN2 +def get_weight(module, + shape, + weight_var='weight', + kernel_init=None): + """Get/create weight tensor for a convolution or fully-connected layer.""" + + return module.param(weight_var, kernel_init, shape) + + +class Conv2d(nn.Module): + """Conv2d layer with optimal upsampling and downsampling (StyleGAN2).""" + + def __init__(self, in_ch, out_ch, kernel, up=False, down=False, + resample_kernel=(1, 3, 3, 1), + use_bias=True, + kernel_init=None): + super().__init__() + assert not (up and down) + assert kernel >= 1 and kernel % 2 == 1 + self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel)) + if kernel_init is not None: + self.weight.data = kernel_init(self.weight.data.shape) + if use_bias: + self.bias = nn.Parameter(torch.zeros(out_ch)) + + self.up = up + self.down = down + self.resample_kernel = resample_kernel + self.kernel = kernel + self.use_bias = use_bias + + def forward(self, x): + if self.up: + x = upsample_conv_2d(x, self.weight, k=self.resample_kernel) + elif self.down: + x = conv_downsample_2d(x, self.weight, k=self.resample_kernel) + else: + x = F.conv2d(x, self.weight, stride=1, padding=self.kernel // 2) + + if self.use_bias: + x = x + self.bias.reshape(1, -1, 1, 1) + + return x + + +def naive_upsample_2d(x, factor=2): + _N, C, H, W = x.shape + x = torch.reshape(x, (-1, C, H, 1, W, 1)) + x = x.repeat(1, 1, 1, factor, 1, factor) + return torch.reshape(x, (-1, C, H * factor, W * factor)) + + +def naive_downsample_2d(x, factor=2): + _N, C, H, W = x.shape + x = torch.reshape(x, (-1, C, H // factor, factor, W // factor, factor)) + return torch.mean(x, dim=(3, 5)) + + +def upsample_conv_2d(x, w, k=None, factor=2, gain=1): + """Fused `upsample_2d()` followed by `tf.nn.conv2d()`. + + Padding is performed only once at the beginning, not between the + operations. + The fused op is considerably more efficient than performing the same + calculation + using standard TensorFlow ops. It supports gradients of arbitrary order. + Args: + x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, + C]`. + w: Weight tensor of the shape `[filterH, filterW, inChannels, + outChannels]`. Grouped convolution can be performed by `inChannels = + x.shape[0] // numGroups`. + k: FIR filter of the shape `[firH, firW]` or `[firN]` + (separable). The default is `[1] * factor`, which corresponds to + nearest-neighbor upsampling. + factor: Integer upsampling factor (default: 2). + gain: Scaling factor for signal magnitude (default: 1.0). + + Returns: + Tensor of the shape `[N, C, H * factor, W * factor]` or + `[N, H * factor, W * factor, C]`, and same datatype as `x`. + """ + + assert isinstance(factor, int) and factor >= 1 + + # Check weight shape. + assert len(w.shape) == 4 + convH = w.shape[2] + convW = w.shape[3] + inC = w.shape[1] + outC = w.shape[0] + + assert convW == convH + + # Setup filter kernel. + if k is None: + k = [1] * factor + k = _setup_kernel(k) * (gain * (factor ** 2)) + p = (k.shape[0] - factor) - (convW - 1) + + stride = (factor, factor) + + # Determine data dimensions. + stride = [1, 1, factor, factor] + output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) + output_padding = (output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, + output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW) + assert output_padding[0] >= 0 and output_padding[1] >= 0 + num_groups = _shape(x, 1) // inC + + # Transpose weights. + w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) + w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) + w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) + + x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) + ## Original TF code. + # x = tf.nn.conv2d_transpose( + # x, + # w, + # output_shape=output_shape, + # strides=stride, + # padding='VALID', + # data_format=data_format) + ## JAX equivalent + + return upfirdn2d(x, torch.tensor(k, device=x.device), + pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) + + +def conv_downsample_2d(x, w, k=None, factor=2, gain=1): + """Fused `tf.nn.conv2d()` followed by `downsample_2d()`. + + Padding is performed only once at the beginning, not between the operations. + The fused op is considerably more efficient than performing the same + calculation + using standard TensorFlow ops. It supports gradients of arbitrary order. + Args: + x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, + C]`. + w: Weight tensor of the shape `[filterH, filterW, inChannels, + outChannels]`. Grouped convolution can be performed by `inChannels = + x.shape[0] // numGroups`. + k: FIR filter of the shape `[firH, firW]` or `[firN]` + (separable). The default is `[1] * factor`, which corresponds to + average pooling. + factor: Integer downsampling factor (default: 2). + gain: Scaling factor for signal magnitude (default: 1.0). + + Returns: + Tensor of the shape `[N, C, H // factor, W // factor]` or + `[N, H // factor, W // factor, C]`, and same datatype as `x`. + """ + + assert isinstance(factor, int) and factor >= 1 + _outC, _inC, convH, convW = w.shape + assert convW == convH + if k is None: + k = [1] * factor + k = _setup_kernel(k) * gain + p = (k.shape[0] - factor) + (convW - 1) + s = [factor, factor] + x = upfirdn2d(x, torch.tensor(k, device=x.device), + pad=((p + 1) // 2, p // 2)) + return F.conv2d(x, w, stride=s, padding=0) + + +def _setup_kernel(k): + k = np.asarray(k, dtype=np.float32) + if k.ndim == 1: + k = np.outer(k, k) + k /= np.sum(k) + assert k.ndim == 2 + assert k.shape[0] == k.shape[1] + return k + + +def _shape(x, dim): + return x.shape[dim] + + +def upsample_2d(x, k=None, factor=2, gain=1): + r"""Upsample a batch of 2D images with the given filter. + + Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` + and upsamples each image with the given filter. The filter is normalized so + that + if the input pixels are constant, they will be scaled by the specified + `gain`. + Pixels outside the image are assumed to be zero, and the filter is padded + with + zeros so that its shape is a multiple of the upsampling factor. + Args: + x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, + C]`. + k: FIR filter of the shape `[firH, firW]` or `[firN]` + (separable). The default is `[1] * factor`, which corresponds to + nearest-neighbor upsampling. + factor: Integer upsampling factor (default: 2). + gain: Scaling factor for signal magnitude (default: 1.0). + + Returns: + Tensor of the shape `[N, C, H * factor, W * factor]` + """ + assert isinstance(factor, int) and factor >= 1 + if k is None: + k = [1] * factor + k = _setup_kernel(k) * (gain * (factor ** 2)) + p = k.shape[0] - factor + return upfirdn2d(x, torch.tensor(k, device=x.device), + up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) + + +def downsample_2d(x, k=None, factor=2, gain=1): + r"""Downsample a batch of 2D images with the given filter. + + Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` + and downsamples each image with the given filter. The filter is normalized + so that + if the input pixels are constant, they will be scaled by the specified + `gain`. + Pixels outside the image are assumed to be zero, and the filter is padded + with + zeros so that its shape is a multiple of the downsampling factor. + Args: + x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, + C]`. + k: FIR filter of the shape `[firH, firW]` or `[firN]` + (separable). The default is `[1] * factor`, which corresponds to + average pooling. + factor: Integer downsampling factor (default: 2). + gain: Scaling factor for signal magnitude (default: 1.0). + + Returns: + Tensor of the shape `[N, C, H // factor, W // factor]` + """ + + assert isinstance(factor, int) and factor >= 1 + if k is None: + k = [1] * factor + k = _setup_kernel(k) * gain + p = k.shape[0] - factor + return upfirdn2d(x, torch.tensor(k, device=x.device), + down=factor, pad=((p + 1) // 2, p // 2)) diff --git a/modules/sgmse/ncsnpp_utils/utils.py b/modules/sgmse/ncsnpp_utils/utils.py new file mode 100644 index 00000000..ed4c05bb --- /dev/null +++ b/modules/sgmse/ncsnpp_utils/utils.py @@ -0,0 +1,189 @@ +# coding=utf-8 +# Copyright 2020 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""All functions and modules related to model definition. +""" + +import torch + +import numpy as np +from ...sdes import OUVESDE, OUVPSDE + + +_MODELS = {} + + +def register_model(cls=None, *, name=None): + """A decorator for registering model classes.""" + + def _register(cls): + if name is None: + local_name = cls.__name__ + else: + local_name = name + if local_name in _MODELS: + raise ValueError(f'Already registered model with name: {local_name}') + _MODELS[local_name] = cls + return cls + + if cls is None: + return _register + else: + return _register(cls) + + +def get_model(name): + return _MODELS[name] + + +def get_sigmas(sigma_min, sigma_max, num_scales): + """Get sigmas --- the set of noise levels for SMLD from config files. + Args: + config: A ConfigDict object parsed from the config file + Returns: + sigmas: a jax numpy arrary of noise levels + """ + sigmas = np.exp( + np.linspace(np.log(sigma_max), np.log(sigma_min), num_scales)) + + return sigmas + + +def get_ddpm_params(config): + """Get betas and alphas --- parameters used in the original DDPM paper.""" + num_diffusion_timesteps = 1000 + # parameters need to be adapted if number of time steps differs from 1000 + beta_start = config.model.beta_min / config.model.num_scales + beta_end = config.model.beta_max / config.model.num_scales + betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) + + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) + sqrt_1m_alphas_cumprod = np.sqrt(1. - alphas_cumprod) + + return { + 'betas': betas, + 'alphas': alphas, + 'alphas_cumprod': alphas_cumprod, + 'sqrt_alphas_cumprod': sqrt_alphas_cumprod, + 'sqrt_1m_alphas_cumprod': sqrt_1m_alphas_cumprod, + 'beta_min': beta_start * (num_diffusion_timesteps - 1), + 'beta_max': beta_end * (num_diffusion_timesteps - 1), + 'num_diffusion_timesteps': num_diffusion_timesteps + } + + +def create_model(config): + """Create the score model.""" + model_name = config.model.name + score_model = get_model(model_name)(config) + score_model = score_model.to(config.device) + score_model = torch.nn.DataParallel(score_model) + return score_model + + +def get_model_fn(model, train=False): + """Create a function to give the output of the score-based model. + + Args: + model: The score model. + train: `True` for training and `False` for evaluation. + + Returns: + A model function. + """ + + def model_fn(x, labels): + """Compute the output of the score-based model. + + Args: + x: A mini-batch of input data. + labels: A mini-batch of conditioning variables for time steps. Should be interpreted differently + for different models. + + Returns: + A tuple of (model output, new mutable states) + """ + if not train: + model.eval() + return model(x, labels) + else: + model.train() + return model(x, labels) + + return model_fn + + +def get_score_fn(sde, model, train=False, continuous=False): + """Wraps `score_fn` so that the model output corresponds to a real time-dependent score function. + + Args: + sde: An `sde_lib.SDE` object that represents the forward SDE. + model: A score model. + train: `True` for training and `False` for evaluation. + continuous: If `True`, the score-based model is expected to directly take continuous time steps. + + Returns: + A score function. + """ + model_fn = get_model_fn(model, train=train) + + if isinstance(sde, OUVPSDE): + def score_fn(x, t): + # Scale neural network output by standard deviation and flip sign + if continuous: + # For VP-trained models, t=0 corresponds to the lowest noise level + # The maximum value of time embedding is assumed to 999 for + # continuously-trained models. + labels = t * 999 + score = model_fn(x, labels) + std = sde.marginal_prob(torch.zeros_like(x), t)[1] + else: + # For VP-trained models, t=0 corresponds to the lowest noise level + labels = t * (sde.N - 1) + score = model_fn(x, labels) + std = sde.sqrt_1m_alphas_cumprod.to(labels.device)[labels.long()] + + score = -score / std[:, None, None, None] + return score + + elif isinstance(sde, OUVESDE): + def score_fn(x, t): + if continuous: + labels = sde.marginal_prob(torch.zeros_like(x), t)[1] + else: + # For VE-trained models, t=0 corresponds to the highest noise level + labels = sde.T - t + labels *= sde.N - 1 + labels = torch.round(labels).long() + + score = model_fn(x, labels) + return score + + else: + raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.") + + return score_fn + + +def to_flattened_numpy(x): + """Flatten a torch tensor `x` and convert it to numpy.""" + return x.detach().cpu().numpy().reshape((-1,)) + + +def from_flattened_numpy(x, shape): + """Form a torch tensor with the given `shape` from a flattened numpy array `x`.""" + return torch.from_numpy(x.reshape(shape)) \ No newline at end of file diff --git a/modules/sgmse/sampling/__init__.py b/modules/sgmse/sampling/__init__.py new file mode 100644 index 00000000..0046a1c1 --- /dev/null +++ b/modules/sgmse/sampling/__init__.py @@ -0,0 +1,139 @@ +# Adapted from https://github.com/yang-song/score_sde_pytorch/blob/1618ddea340f3e4a2ed7852a0694a809775cf8d0/sampling.py +"""Various sampling methods.""" +from scipy import integrate +import torch + +from .predictors import Predictor, PredictorRegistry, ReverseDiffusionPredictor +from .correctors import Corrector, CorrectorRegistry + + +__all__ = [ + 'PredictorRegistry', 'CorrectorRegistry', 'Predictor', 'Corrector', + 'get_sampler' +] + + +def to_flattened_numpy(x): + """Flatten a torch tensor `x` and convert it to numpy.""" + return x.detach().cpu().numpy().reshape((-1,)) + + +def from_flattened_numpy(x, shape): + """Form a torch tensor with the given `shape` from a flattened numpy array `x`.""" + return torch.from_numpy(x.reshape(shape)) + + +def get_pc_sampler( + predictor_name, corrector_name, sde, score_fn, y, + denoise=True, eps=3e-2, snr=0.1, corrector_steps=1, probability_flow: bool = False, + intermediate=False, **kwargs +): + """Create a Predictor-Corrector (PC) sampler. + + Args: + predictor_name: The name of a registered `sampling.Predictor`. + corrector_name: The name of a registered `sampling.Corrector`. + sde: An `sdes.SDE` object representing the forward SDE. + score_fn: A function (typically learned model) that predicts the score. + y: A `torch.Tensor`, representing the (non-white-)noisy starting point(s) to condition the prior on. + denoise: If `True`, add one-step denoising to the final samples. + eps: A `float` number. The reverse-time SDE and ODE are integrated to `epsilon` to avoid numerical issues. + snr: The SNR to use for the corrector. 0.1 by default, and ignored for `NoneCorrector`. + N: The number of reverse sampling steps. If `None`, uses the SDE's `N` property by default. + + Returns: + A sampling function that returns samples and the number of function evaluations during sampling. + """ + predictor_cls = PredictorRegistry.get_by_name(predictor_name) + corrector_cls = CorrectorRegistry.get_by_name(corrector_name) + predictor = predictor_cls(sde, score_fn, probability_flow=probability_flow) + corrector = corrector_cls(sde, score_fn, snr=snr, n_steps=corrector_steps) + + def pc_sampler(): + """The PC sampler function.""" + with torch.no_grad(): + xt = sde.prior_sampling(y.shape, y).to(y.device) + timesteps = torch.linspace(sde.T, eps, sde.N, device=y.device) + for i in range(sde.N): + t = timesteps[i] + vec_t = torch.ones(y.shape[0], device=y.device) * t + xt, xt_mean = corrector.update_fn(xt, vec_t, y) + xt, xt_mean = predictor.update_fn(xt, vec_t, y) + x_result = xt_mean if denoise else xt + ns = sde.N * (corrector.n_steps + 1) + return x_result, ns + + return pc_sampler + + +def get_ode_sampler( + sde, score_fn, y, inverse_scaler=None, + denoise=True, rtol=1e-5, atol=1e-5, + method='RK45', eps=3e-2, device='cuda', **kwargs +): + """Probability flow ODE sampler with the black-box ODE solver. + + Args: + sde: An `sdes.SDE` object representing the forward SDE. + score_fn: A function (typically learned model) that predicts the score. + y: A `torch.Tensor`, representing the (non-white-)noisy starting point(s) to condition the prior on. + inverse_scaler: The inverse data normalizer. + denoise: If `True`, add one-step denoising to final samples. + rtol: A `float` number. The relative tolerance level of the ODE solver. + atol: A `float` number. The absolute tolerance level of the ODE solver. + method: A `str`. The algorithm used for the black-box ODE solver. + See the documentation of `scipy.integrate.solve_ivp`. + eps: A `float` number. The reverse-time SDE/ODE will be integrated to `eps` for numerical stability. + device: PyTorch device. + + Returns: + A sampling function that returns samples and the number of function evaluations during sampling. + """ + predictor = ReverseDiffusionPredictor(sde, score_fn, probability_flow=False) + rsde = sde.reverse(score_fn, probability_flow=True) + + def denoise_update_fn(x): + vec_eps = torch.ones(x.shape[0], device=x.device) * eps + _, x = predictor.update_fn(x, vec_eps, y) + return x + + def drift_fn(x, t, y): + """Get the drift function of the reverse-time SDE.""" + return rsde.sde(x, t, y)[0] + + def ode_sampler(z=None, **kwargs): + """The probability flow ODE sampler with black-box ODE solver. + + Args: + model: A score model. + z: If present, generate samples from latent code `z`. + Returns: + samples, number of function evaluations. + """ + with torch.no_grad(): + # If not represent, sample the latent code from the prior distibution of the SDE. + x = sde.prior_sampling(y.shape, y).to(device) + + def ode_func(t, x): + x = from_flattened_numpy(x, y.shape).to(device).type(torch.complex64) + vec_t = torch.ones(y.shape[0], device=x.device) * t + drift = drift_fn(x, vec_t, y) + return to_flattened_numpy(drift) + + # Black-box ODE solver for the probability flow ODE + solution = integrate.solve_ivp( + ode_func, (sde.T, eps), to_flattened_numpy(x), + rtol=rtol, atol=atol, method=method, **kwargs + ) + nfe = solution.nfev + x = torch.tensor(solution.y[:, -1]).reshape(y.shape).to(device).type(torch.complex64) + + # Denoising is equivalent to running one predictor step without adding noise + if denoise: + x = denoise_update_fn(x) + + if inverse_scaler is not None: + x = inverse_scaler(x) + return x, nfe + + return ode_sampler diff --git a/modules/sgmse/sampling/correctors.py b/modules/sgmse/sampling/correctors.py new file mode 100644 index 00000000..e1057475 --- /dev/null +++ b/modules/sgmse/sampling/correctors.py @@ -0,0 +1,96 @@ +import abc +import torch + +from modules.sgmse import sdes +from utils.sgmse_util.registry import Registry + + +CorrectorRegistry = Registry("Corrector") + + +class Corrector(abc.ABC): + """The abstract class for a corrector algorithm.""" + + def __init__(self, sde, score_fn, snr, n_steps): + super().__init__() + self.rsde = sde.reverse(score_fn) + self.score_fn = score_fn + self.snr = snr + self.n_steps = n_steps + + @abc.abstractmethod + def update_fn(self, x, t, *args): + """One update of the corrector. + + Args: + x: A PyTorch tensor representing the current state + t: A PyTorch tensor representing the current time step. + *args: Possibly additional arguments, in particular `y` for OU processes + + Returns: + x: A PyTorch tensor of the next state. + x_mean: A PyTorch tensor. The next state without random noise. Useful for denoising. + """ + pass + + +@CorrectorRegistry.register(name='langevin') +class LangevinCorrector(Corrector): + def __init__(self, sde, score_fn, snr, n_steps): + super().__init__(sde, score_fn, snr, n_steps) + self.score_fn = score_fn + self.n_steps = n_steps + self.snr = snr + + def update_fn(self, x, t, *args): + target_snr = self.snr + for _ in range(self.n_steps): + grad = self.score_fn(x, t, *args) + noise = torch.randn_like(x) + grad_norm = torch.norm(grad.reshape(grad.shape[0], -1), dim=-1).mean() + noise_norm = torch.norm(noise.reshape(noise.shape[0], -1), dim=-1).mean() + step_size = ((target_snr * noise_norm / grad_norm) ** 2 * 2).unsqueeze(0) + x_mean = x + step_size[:, None, None, None] * grad + x = x_mean + noise * torch.sqrt(step_size * 2)[:, None, None, None] + + return x, x_mean + + +@CorrectorRegistry.register(name='ald') +class AnnealedLangevinDynamics(Corrector): + """The original annealed Langevin dynamics predictor in NCSN/NCSNv2.""" + def __init__(self, sde, score_fn, snr, n_steps): + super().__init__(sde, score_fn, snr, n_steps) + if not isinstance(sde, (sdes.OUVESDE,)): + raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.") + self.sde = sde + self.score_fn = score_fn + self.snr = snr + self.n_steps = n_steps + + def update_fn(self, x, t, *args): + n_steps = self.n_steps + target_snr = self.snr + std = self.sde.marginal_prob(x, t, *args)[1] + + for _ in range(n_steps): + grad = self.score_fn(x, t, *args) + noise = torch.randn_like(x) + step_size = (target_snr * std) ** 2 * 2 + x_mean = x + step_size[:, None, None, None] * grad + x = x_mean + noise * torch.sqrt(step_size * 2)[:, None, None, None] + + return x, x_mean + + +@CorrectorRegistry.register(name='none') +class NoneCorrector(Corrector): + """An empty corrector that does nothing.""" + + def __init__(self, *args, **kwargs): + self.snr = 0 + self.n_steps = 0 + pass + + def update_fn(self, x, t, *args): + return x, x diff --git a/modules/sgmse/sampling/predictors.py b/modules/sgmse/sampling/predictors.py new file mode 100644 index 00000000..963fd525 --- /dev/null +++ b/modules/sgmse/sampling/predictors.py @@ -0,0 +1,76 @@ +import abc + +import torch +import numpy as np + +from utils.sgmse_util.registry import Registry + + +PredictorRegistry = Registry("Predictor") + + +class Predictor(abc.ABC): + """The abstract class for a predictor algorithm.""" + + def __init__(self, sde, score_fn, probability_flow=False): + super().__init__() + self.sde = sde + self.rsde = sde.reverse(score_fn) + self.score_fn = score_fn + self.probability_flow = probability_flow + + @abc.abstractmethod + def update_fn(self, x, t, *args): + """One update of the predictor. + + Args: + x: A PyTorch tensor representing the current state + t: A Pytorch tensor representing the current time step. + *args: Possibly additional arguments, in particular `y` for OU processes + + Returns: + x: A PyTorch tensor of the next state. + x_mean: A PyTorch tensor. The next state without random noise. Useful for denoising. + """ + pass + + def debug_update_fn(self, x, t, *args): + raise NotImplementedError(f"Debug update function not implemented for predictor {self}.") + + +@PredictorRegistry.register('euler_maruyama') +class EulerMaruyamaPredictor(Predictor): + def __init__(self, sde, score_fn, probability_flow=False): + super().__init__(sde, score_fn, probability_flow=probability_flow) + + def update_fn(self, x, t, *args): + dt = -1. / self.rsde.N + z = torch.randn_like(x) + f, g = self.rsde.sde(x, t, *args) + x_mean = x + f * dt + x = x_mean + g[:, None, None, None] * np.sqrt(-dt) * z + return x, x_mean + + +@PredictorRegistry.register('reverse_diffusion') +class ReverseDiffusionPredictor(Predictor): + def __init__(self, sde, score_fn, probability_flow=False): + super().__init__(sde, score_fn, probability_flow=probability_flow) + + def update_fn(self, x, t, *args): + f, g = self.rsde.discretize(x, t, *args) + z = torch.randn_like(x) + x_mean = x - f + x = x_mean + g[:, None, None, None] * z + return x, x_mean + + +@PredictorRegistry.register('none') +class NonePredictor(Predictor): + """An empty predictor that does nothing.""" + + def __init__(self, *args, **kwargs): + pass + + def update_fn(self, x, t, *args): + return x, x diff --git a/modules/sgmse/sdes.py b/modules/sgmse/sdes.py new file mode 100644 index 00000000..2fd86a69 --- /dev/null +++ b/modules/sgmse/sdes.py @@ -0,0 +1,307 @@ +""" +Abstract SDE classes, Reverse SDE, and VE/VP SDEs. + +Taken and adapted from https://github.com/yang-song/score_sde_pytorch/blob/1618ddea340f3e4a2ed7852a0694a809775cf8d0/sde_lib.py +""" +import abc +import warnings + +import numpy as np +from utils.sgmse_util.tensors import batch_broadcast +import torch + +from utils.sgmse_util.registry import Registry + + +SDERegistry = Registry("SDE") + + +class SDE(abc.ABC): + """SDE abstract class. Functions are designed for a mini-batch of inputs.""" + + def __init__(self, N): + """Construct an SDE. + + Args: + N: number of discretization time steps. + """ + super().__init__() + self.N = N + + @property + @abc.abstractmethod + def T(self): + """End time of the SDE.""" + pass + + @abc.abstractmethod + def sde(self, x, t, *args): + pass + + @abc.abstractmethod + def marginal_prob(self, x, t, *args): + """Parameters to determine the marginal distribution of the SDE, $p_t(x|args)$.""" + pass + + @abc.abstractmethod + def prior_sampling(self, shape, *args): + """Generate one sample from the prior distribution, $p_T(x|args)$ with shape `shape`.""" + pass + + @abc.abstractmethod + def prior_logp(self, z): + """Compute log-density of the prior distribution. + + Useful for computing the log-likelihood via probability flow ODE. + + Args: + z: latent code + Returns: + log probability density + """ + pass + + @staticmethod + @abc.abstractmethod + def add_argparse_args(parent_parser): + """ + Add the necessary arguments for instantiation of this SDE class to an argparse ArgumentParser. + """ + pass + + def discretize(self, x, t, *args): + """Discretize the SDE in the form: x_{i+1} = x_i + f_i(x_i) + G_i z_i. + + Useful for reverse diffusion sampling and probabiliy flow sampling. + Defaults to Euler-Maruyama discretization. + + Args: + x: a torch tensor + t: a torch float representing the time step (from 0 to `self.T`) + + Returns: + f, G + """ + dt = 1 / self.N + drift, diffusion = self.sde(x, t, *args) + f = drift * dt + G = diffusion * torch.sqrt(torch.tensor(dt, device=t.device)) + return f, G + + def reverse(oself, score_model, probability_flow=False): + """Create the reverse-time SDE/ODE. + + Args: + score_model: A function that takes x, t and y and returns the score. + probability_flow: If `True`, create the reverse-time ODE used for probability flow sampling. + """ + N = oself.N + T = oself.T + sde_fn = oself.sde + discretize_fn = oself.discretize + + # Build the class for reverse-time SDE. + class RSDE(oself.__class__): + def __init__(self): + self.N = N + self.probability_flow = probability_flow + + @property + def T(self): + return T + + def sde(self, x, t, *args): + """Create the drift and diffusion functions for the reverse SDE/ODE.""" + rsde_parts = self.rsde_parts(x, t, *args) + total_drift, diffusion = rsde_parts["total_drift"], rsde_parts["diffusion"] + return total_drift, diffusion + + def rsde_parts(self, x, t, *args): + sde_drift, sde_diffusion = sde_fn(x, t, *args) + score = score_model(x, t, *args) + score_drift = -sde_diffusion[:, None, None, None]**2 * score * (0.5 if self.probability_flow else 1.) + diffusion = torch.zeros_like(sde_diffusion) if self.probability_flow else sde_diffusion + total_drift = sde_drift + score_drift + return { + 'total_drift': total_drift, 'diffusion': diffusion, 'sde_drift': sde_drift, + 'sde_diffusion': sde_diffusion, 'score_drift': score_drift, 'score': score, + } + + def discretize(self, x, t, *args): + """Create discretized iteration rules for the reverse diffusion sampler.""" + f, G = discretize_fn(x, t, *args) + rev_f = f - G[:, None, None, None] ** 2 * score_model(x, t, *args) * (0.5 if self.probability_flow else 1.) + rev_G = torch.zeros_like(G) if self.probability_flow else G + return rev_f, rev_G + + return RSDE() + + @abc.abstractmethod + def copy(self): + pass + + +@SDERegistry.register("ouve") +class OUVESDE(SDE): + @staticmethod + def add_argparse_args(parser): + parser.add_argument("--sde-n", type=int, default=1000, help="The number of timesteps in the SDE discretization. 30 by default") + parser.add_argument("--theta", type=float, default=1.5, help="The constant stiffness of the Ornstein-Uhlenbeck process. 1.5 by default.") + parser.add_argument("--sigma-min", type=float, default=0.05, help="The minimum sigma to use. 0.05 by default.") + parser.add_argument("--sigma-max", type=float, default=0.5, help="The maximum sigma to use. 0.5 by default.") + return parser + + def __init__(self, theta, sigma_min, sigma_max, N=1000, **ignored_kwargs): + """Construct an Ornstein-Uhlenbeck Variance Exploding SDE. + + Note that the "steady-state mean" `y` is not provided at construction, but must rather be given as an argument + to the methods which require it (e.g., `sde` or `marginal_prob`). + + dx = -theta (y-x) dt + sigma(t) dw + + with + + sigma(t) = sigma_min (sigma_max/sigma_min)^t * sqrt(2 log(sigma_max/sigma_min)) + + Args: + theta: stiffness parameter. + sigma_min: smallest sigma. + sigma_max: largest sigma. + N: number of discretization steps + """ + super().__init__(N) + self.theta = theta + self.sigma_min = sigma_min + self.sigma_max = sigma_max + self.logsig = np.log(self.sigma_max / self.sigma_min) + self.N = N + + def copy(self): + return OUVESDE(self.theta, self.sigma_min, self.sigma_max, N=self.N) + + @property + def T(self): + return 1 + + def sde(self, x, t, y): + drift = self.theta * (y - x) + # the sqrt(2*logsig) factor is required here so that logsig does not in the end affect the perturbation kernel + # standard deviation. this can be understood from solving the integral of [exp(2s) * g(s)^2] from s=0 to t + # with g(t) = sigma(t) as defined here, and seeing that `logsig` remains in the integral solution + # unless this sqrt(2*logsig) factor is included. + sigma = self.sigma_min * (self.sigma_max / self.sigma_min) ** t + diffusion = sigma * np.sqrt(2 * self.logsig) + return drift, diffusion + + def _mean(self, x0, t, y): + theta = self.theta + exp_interp = torch.exp(-theta * t)[:, None, None, None] + return exp_interp * x0 + (1 - exp_interp) * y + + def _std(self, t): + # This is a full solution to the ODE for P(t) in our derivations, after choosing g(s) as in self.sde() + sigma_min, theta, logsig = self.sigma_min, self.theta, self.logsig + # could maybe replace the two torch.exp(... * t) terms here by cached values **t + return torch.sqrt( + ( + sigma_min**2 + * torch.exp(-2 * theta * t) + * (torch.exp(2 * (theta + logsig) * t) - 1) + * logsig + ) + / + (theta + logsig) + ) + + def marginal_prob(self, x0, t, y): + return self._mean(x0, t, y), self._std(t) + + def prior_sampling(self, shape, y): + if shape != y.shape: + warnings.warn(f"Target shape {shape} does not match shape of y {y.shape}! Ignoring target shape.") + std = self._std(torch.ones((y.shape[0],), device=y.device)) + x_T = y + torch.randn_like(y) * std[:, None, None, None] + return x_T + + def prior_logp(self, z): + raise NotImplementedError("prior_logp for OU SDE not yet implemented!") + + +@SDERegistry.register("ouvp") +class OUVPSDE(SDE): + # !!! We do not utilize this SDE in our works due to observed instabilities around t=0.2. !!! + @staticmethod + def add_argparse_args(parser): + parser.add_argument("--sde-n", type=int, default=1000, + help="The number of timesteps in the SDE discretization. 1000 by default") + parser.add_argument("--beta-min", type=float, required=True, + help="The minimum beta to use.") + parser.add_argument("--beta-max", type=float, required=True, + help="The maximum beta to use.") + parser.add_argument("--stiffness", type=float, default=1, + help="The stiffness factor for the drift, to be multiplied by 0.5*beta(t). 1 by default.") + return parser + + def __init__(self, beta_min, beta_max, stiffness=1, N=1000, **ignored_kwargs): + """ + !!! We do not utilize this SDE in our works due to observed instabilities around t=0.2. !!! + + Construct an Ornstein-Uhlenbeck Variance Preserving SDE: + + dx = -1/2 * beta(t) * stiffness * (y-x) dt + sqrt(beta(t)) * dw + + with + + beta(t) = beta_min + t(beta_max - beta_min) + + Note that the "steady-state mean" `y` is not provided at construction, but must rather be given as an argument + to the methods which require it (e.g., `sde` or `marginal_prob`). + + Args: + beta_min: smallest sigma. + beta_max: largest sigma. + stiffness: stiffness factor of the drift. 1 by default. + N: number of discretization steps + """ + super().__init__(N) + self.beta_min = beta_min + self.beta_max = beta_max + self.stiffness = stiffness + self.N = N + + def copy(self): + return OUVPSDE(self.beta_min, self.beta_max, self.stiffness, N=self.N) + + @property + def T(self): + return 1 + + def _beta(self, t): + return self.beta_min + t * (self.beta_max - self.beta_min) + + def sde(self, x, t, y): + drift = 0.5 * self.stiffness * batch_broadcast(self._beta(t), y) * (y - x) + diffusion = torch.sqrt(self._beta(t)) + return drift, diffusion + + def _mean(self, x0, t, y): + b0, b1, s = self.beta_min, self.beta_max, self.stiffness + x0y_fac = torch.exp(-0.25 * s * t * (t * (b1-b0) + 2 * b0))[:, None, None, None] + return y + x0y_fac * (x0 - y) + + def _std(self, t): + b0, b1, s = self.beta_min, self.beta_max, self.stiffness + return (1 - torch.exp(-0.5 * s * t * (t * (b1-b0) + 2 * b0))) / s + + def marginal_prob(self, x0, t, y): + return self._mean(x0, t, y), self._std(t) + + def prior_sampling(self, shape, y): + if shape != y.shape: + warnings.warn(f"Target shape {shape} does not match shape of y {y.shape}! Ignoring target shape.") + std = self._std(torch.ones((y.shape[0],), device=y.device)) + x_T = y + torch.randn_like(y) * std[:, None, None, None] + return x_T + + def prior_logp(self, z): + raise NotImplementedError("prior_logp for OU SDE not yet implemented!") diff --git a/modules/sgmse/shared.py b/modules/sgmse/shared.py new file mode 100644 index 00000000..458be781 --- /dev/null +++ b/modules/sgmse/shared.py @@ -0,0 +1,123 @@ +import functools +import numpy as np + +import torch +import torch.nn as nn + +from utils.sgmse_util.registry import Registry + + +BackboneRegistry = Registry("Backbone") + + +class GaussianFourierProjection(nn.Module): + """Gaussian random features for encoding time steps.""" + + def __init__(self, embed_dim, scale=16, complex_valued=False): + super().__init__() + self.complex_valued = complex_valued + if not complex_valued: + # If the output is real-valued, we concatenate sin+cos of the features to avoid ambiguities. + # Therefore, in this case the effective embed_dim is cut in half. For the complex-valued case, + # we use complex numbers which each represent sin+cos directly, so the ambiguity is avoided directly, + # and this halving is not necessary. + embed_dim = embed_dim // 2 + # Randomly sample weights during initialization. These weights are fixed + # during optimization and are not trainable. + self.W = nn.Parameter(torch.randn(embed_dim) * scale, requires_grad=False) + + def forward(self, t): + t_proj = t[:, None] * self.W[None, :] * 2*np.pi + if self.complex_valued: + return torch.exp(1j * t_proj) + else: + return torch.cat([torch.sin(t_proj), torch.cos(t_proj)], dim=-1) + + +class DiffusionStepEmbedding(nn.Module): + """Diffusion-Step embedding as in DiffWave / Vaswani et al. 2017.""" + + def __init__(self, embed_dim, complex_valued=False): + super().__init__() + self.complex_valued = complex_valued + if not complex_valued: + # If the output is real-valued, we concatenate sin+cos of the features to avoid ambiguities. + # Therefore, in this case the effective embed_dim is cut in half. For the complex-valued case, + # we use complex numbers which each represent sin+cos directly, so the ambiguity is avoided directly, + # and this halving is not necessary. + embed_dim = embed_dim // 2 + self.embed_dim = embed_dim + + def forward(self, t): + fac = 10**(4*torch.arange(self.embed_dim, device=t.device) / (self.embed_dim-1)) + inner = t[:, None] * fac[None, :] + if self.complex_valued: + return torch.exp(1j * inner) + else: + return torch.cat([torch.sin(inner), torch.cos(inner)], dim=-1) + + +class ComplexLinear(nn.Module): + """A potentially complex-valued linear layer. Reduces to a regular linear layer if `complex_valued=False`.""" + def __init__(self, input_dim, output_dim, complex_valued): + super().__init__() + self.complex_valued = complex_valued + if self.complex_valued: + self.re = nn.Linear(input_dim, output_dim) + self.im = nn.Linear(input_dim, output_dim) + else: + self.lin = nn.Linear(input_dim, output_dim) + + def forward(self, x): + if self.complex_valued: + return (self.re(x.real) - self.im(x.imag)) + 1j*(self.re(x.imag) + self.im(x.real)) + else: + return self.lin(x) + + +class FeatureMapDense(nn.Module): + """A fully connected layer that reshapes outputs to feature maps.""" + + def __init__(self, input_dim, output_dim, complex_valued=False): + super().__init__() + self.complex_valued = complex_valued + self.dense = ComplexLinear(input_dim, output_dim, complex_valued=complex_valued) + + def forward(self, x): + return self.dense(x)[..., None, None] + + +def torch_complex_from_reim(re, im): + return torch.view_as_complex(torch.stack([re, im], dim=-1)) + + +class ArgsComplexMultiplicationWrapper(nn.Module): + """Adapted from `asteroid`'s `complex_nn.py`, allowing args/kwargs to be passed through forward(). + + Make a complex-valued module `F` from a real-valued module `f` by applying + complex multiplication rules: + + F(a + i b) = f1(a) - f1(b) + i (f2(b) + f2(a)) + + where `f1`, `f2` are instances of `f` that do *not* share weights. + + Args: + module_cls (callable): A class or function that returns a Torch module/functional. + Constructor of `f` in the formula above. Called 2x with `*args`, `**kwargs`, + to construct the real and imaginary component modules. + """ + + def __init__(self, module_cls, *args, **kwargs): + super().__init__() + self.re_module = module_cls(*args, **kwargs) + self.im_module = module_cls(*args, **kwargs) + + def forward(self, x, *args, **kwargs): + return torch_complex_from_reim( + self.re_module(x.real, *args, **kwargs) - self.im_module(x.imag, *args, **kwargs), + self.re_module(x.imag, *args, **kwargs) + self.im_module(x.real, *args, **kwargs), + ) + + +ComplexConv2d = functools.partial(ArgsComplexMultiplicationWrapper, nn.Conv2d) +ComplexConvTranspose2d = functools.partial(ArgsComplexMultiplicationWrapper, nn.ConvTranspose2d) diff --git a/preprocessors/wsj0reverb.py b/preprocessors/wsj0reverb.py new file mode 100644 index 00000000..36b90f75 --- /dev/null +++ b/preprocessors/wsj0reverb.py @@ -0,0 +1,130 @@ +import json +from tqdm import tqdm +import os +import torchaudio +from utils import audio +import csv +import random +from text import _clean_text +import librosa +import soundfile as sf +import pyroomacoustics as pra +from scipy.io import wavfile +from glob import glob +from pathlib import Path +import numpy as np + +SEED = 100 +np.random.seed(SEED) + +T60_RANGE = [0.4, 1.0] +SNR_RANGE = [0, 20] +DIM_RANGE = [5, 15, 5, 15, 2, 6] +MIN_DISTANCE_TO_WALL = 1 +MIC_ARRAY_RADIUS = 0.16 +TARGET_T60_SHAPE = {"CI": 0.08, "HA": 0.2} +TARGET_T60_SHAPE = {"CI": 0.10, "HA": 0.2} +TARGETS_CROP = {"CI": 16e-3, "HA": 40e-3} +NB_SAMPLES_PER_ROOM = 1 +CHANNELS = 1 + +def obtain_clean_file(speech_list, i_sample, sample_rate=16000): + speech, speech_sr = sf.read(speech_list[i_sample]) + speech_basename = os.path.basename(speech_list[i_sample]) + assert speech_sr == sample_rate, f"wrong speech sampling rate here: expected {sample_rate} got {speech_sr}" + return speech.squeeze(), speech_sr, speech_basename[: -4] + + +def main(output_path, dataset_path): + print("-" * 10) + print("Dataset splits for {}...\n".format("wsj0reverb")) + dataset = "wsj0reverb" + sample_rate = 16000 + save_dir = os.path.join(output_path, dataset) + os.makedirs(save_dir, exist_ok=True) + wsj0reverb_path = dataset_path + splits = ['valid', 'train', 'test'] + dic_split = {"valid": "si_dt_05", "train": "si_tr_s", "test": "si_et_05"} + speech_lists = {split: sorted(glob(f"{os.path.join(wsj0reverb_path, dic_split[split])}/**/*.wav")) for split in splits} + + for i_split, split in enumerate(splits): + print("Processing split n° {}: {}...".format(i_split + 1, split)) + + reverberant_output_dir = os.path.join(save_dir, "audio", split, "reverb") + dry_output_dir = os.path.join(save_dir, "audio", split, "anechoic") + noisy_reverberant_output_dir = os.path.join(save_dir, "audio", split, "noisy_reverb") + if split == "test": + unauralized_output_dir = os.path.join(save_dir, "audio", split, "unauralized") + + os.makedirs(reverberant_output_dir, exist_ok=True) + os.makedirs(dry_output_dir, exist_ok=True) + if split == "test": + os.makedirs(unauralized_output_dir, exist_ok=True) + + speech_list = speech_lists[split] + real_nb_samples = len(speech_list) + for i_sample in tqdm(range(real_nb_samples)): + if not i_sample % NB_SAMPLES_PER_ROOM: # Generate new room + t60 = np.random.uniform(T60_RANGE[0], T60_RANGE[1]) # Draw T60 + room_dim = np.array( + [np.random.uniform(DIM_RANGE[2 * n], DIM_RANGE[2 * n + 1]) for n in range(3)]) # Draw Dimensions + center_mic_position = np.array( + [np.random.uniform(MIN_DISTANCE_TO_WALL, room_dim[n] - MIN_DISTANCE_TO_WALL) for n in + range(3)]) # draw source position + source_position = np.array( + [np.random.uniform(MIN_DISTANCE_TO_WALL, room_dim[n] - MIN_DISTANCE_TO_WALL) for n in + range(3)]) # draw source position + mic_array_2d = pra.beamforming.circular_2D_array(center_mic_position[: -1], CHANNELS, phi0=0, + radius=MIC_ARRAY_RADIUS) # Compute microphone array + mic_array = np.pad(mic_array_2d, ((0, 1), (0, 0)), mode="constant", + constant_values=center_mic_position[-1]) + + ### Reverberant Room + e_absorption, max_order = pra.inverse_sabine(t60, room_dim) # Compute absorption coeff + reverberant_room = pra.ShoeBox( + room_dim, fs=16000, materials=pra.Material(e_absorption), max_order=min(3, max_order) + ) # Create room + reverberant_room.set_ray_tracing() + reverberant_room.add_microphone_array(mic_array) # Add microphone array + + # Pick unauralized files + speech, speech_sr, speech_basename = obtain_clean_file(speech_list, i_sample, sample_rate=sample_rate) + + # Generate reverberant room + reverberant_room.add_source(source_position, signal=speech) + reverberant_room.compute_rir() + reverberant_room.simulate() + t60_real = np.mean(reverberant_room.measure_rt60()).squeeze() + reverberant = np.stack(reverberant_room.mic_array.signals).swapaxes(0, 1) + + e_absorption_dry = 0.99 # For Neural Networks OK but clearly not for WPE + dry_room = pra.ShoeBox( + room_dim, fs=16000, materials=pra.Material(e_absorption_dry), max_order=0 + ) # Create room + dry_room.add_microphone_array(mic_array) # Add microphone array + + # Generate dry room + dry_room.add_source(source_position, signal=speech) + dry_room.compute_rir() + dry_room.simulate() + t60_real_dry = np.mean(dry_room.measure_rt60()).squeeze() + rir_dry = dry_room.rir + dry = np.stack(dry_room.mic_array.signals).swapaxes(0, 1) + dry = np.pad(dry, ((0, int(.5 * sample_rate)), (0, 0)), mode="constant", + constant_values=0) # Add 1 second of silence after dry (because very dry) so that the reverb is not cut, and all samples have same length + + min_len_sample = min(reverberant.shape[0], dry.shape[0]) + dry = dry[: min_len_sample] + reverberant = reverberant[: min_len_sample] + output_scaling = np.max(reverberant) / .9 + + drr = 10 * np.log10(np.mean(dry ** 2) / (np.mean(reverberant ** 2) + 1e-8) + 1e-8) + output_filename = f"{speech_basename}_{i_sample // NB_SAMPLES_PER_ROOM}_{t60_real:.2f}_{drr:.1f}.wav" + + sf.write(os.path.join(dry_output_dir, output_filename), 1 / output_scaling * dry, samplerate=sample_rate) + sf.write(os.path.join(reverberant_output_dir, output_filename), 1 / output_scaling * reverberant, + samplerate=sample_rate) + + if split == "test": + sf.write(os.path.join(unauralized_output_dir, output_filename), speech, samplerate=sample_rate) + From 20e13042ae5f0ac13642e1169476b8b813a1d5de Mon Sep 17 00:00:00 2001 From: lithr1 <1102340779@qq.com> Date: Sun, 7 Apr 2024 14:54:54 +0800 Subject: [PATCH 2/3] sgmse --- bins/sgmse/inference.py | 23 +- .../sgmse/dereverberation/dereverberation.py | 1 + .../dereverberation_Trainer.py | 64 +- .../dereverberation_dataset.py | 31 +- .../dereverberation_inference.py | 20 +- modules/sgmse/__init__.py | 2 +- modules/sgmse/dcunet.py | 366 +++-- modules/sgmse/ncsnpp.py | 312 +++-- modules/sgmse/ncsnpp_utils/layers.py | 1222 +++++++++-------- modules/sgmse/ncsnpp_utils/layerspp.py | 489 ++++--- modules/sgmse/ncsnpp_utils/normalization.py | 354 ++--- modules/sgmse/ncsnpp_utils/op/fused_act.py | 8 +- modules/sgmse/ncsnpp_utils/op/upfirdn2d.py | 2 +- .../sgmse/ncsnpp_utils/up_or_down_sampling.py | 326 ++--- modules/sgmse/ncsnpp_utils/utils.py | 259 ++-- modules/sgmse/sampling/__init__.py | 54 +- modules/sgmse/sampling/correctors.py | 11 +- modules/sgmse/sampling/predictors.py | 12 +- modules/sgmse/sdes.py | 101 +- modules/sgmse/shared.py | 21 +- preprocessors/wsj0reverb.py | 117 +- 21 files changed, 2225 insertions(+), 1570 deletions(-) diff --git a/bins/sgmse/inference.py b/bins/sgmse/inference.py index 9e5e2a60..d3bdf2ad 100644 --- a/bins/sgmse/inference.py +++ b/bins/sgmse/inference.py @@ -7,7 +7,9 @@ from argparse import ArgumentParser import os -from models.sgmse.dereverberation.dereverberation_inference import DereverberationInference +from models.sgmse.dereverberation.dereverberation_inference import ( + DereverberationInference, +) from utils.util import save_config, load_model_config, load_config import numpy as np import torch @@ -36,16 +38,27 @@ def build_parser(): "--checkpoint_path", type=str, ) - parser.add_argument("--test_dir", type=str, required=True, - help='Directory containing the test data (must have subdirectory noisy/)') - parser.add_argument("--corrector_steps", type=int, default=1, help="Number of corrector steps") + parser.add_argument( + "--test_dir", + type=str, + required=True, + help="Directory containing the test data (must have subdirectory noisy/)", + ) + parser.add_argument( + "--corrector_steps", type=int, default=1, help="Number of corrector steps" + ) parser.add_argument( "--output_dir", type=str, default=None, help="Output dir for saving generated results", ) - parser.add_argument("--snr", type=float, default=0.33, help="SNR value for (annealed) Langevin dynmaics.") + parser.add_argument( + "--snr", + type=float, + default=0.33, + help="SNR value for (annealed) Langevin dynmaics.", + ) parser.add_argument("--N", type=int, default=50, help="Number of reverse steps") parser.add_argument("--local_rank", default=0, type=int) return parser diff --git a/models/sgmse/dereverberation/dereverberation.py b/models/sgmse/dereverberation/dereverberation.py index 4965d652..4d607096 100644 --- a/models/sgmse/dereverberation/dereverberation.py +++ b/models/sgmse/dereverberation/dereverberation.py @@ -18,6 +18,7 @@ def __init__(self, cfg): sde_cls = SDERegistry.get_by_name(cfg.sde) sde_cfg = cfg[cfg.sde] self.sde = sde_cls(**sde_cfg) + def forward(self, x, t, y): # Concatenate y as an extra channel dnn_input = torch.cat([x, y], dim=1) diff --git a/models/sgmse/dereverberation/dereverberation_Trainer.py b/models/sgmse/dereverberation/dereverberation_Trainer.py index 564395eb..ccf97263 100644 --- a/models/sgmse/dereverberation/dereverberation_Trainer.py +++ b/models/sgmse/dereverberation/dereverberation_Trainer.py @@ -17,7 +17,9 @@ def __init__(self, args, cfg): BaseTrainer.__init__(self, args, cfg) self.cfg = cfg self.save_config_file() - self.ema = ExponentialMovingAverage(self.model.parameters(), decay=self.cfg.train.ema_decay) + self.ema = ExponentialMovingAverage( + self.model.parameters(), decay=self.cfg.train.ema_decay + ) self._error_loading_ema = False self.t_eps = self.cfg.train.t_eps self.num_eval_files = self.cfg.train.num_eval_files @@ -49,19 +51,22 @@ def load_checkpoint(self): def build_data_loader(self): Dataset = self.build_dataset() - train_set = Dataset(self.cfg, subset='train', shuffle_spec=True) + train_set = Dataset(self.cfg, subset="train", shuffle_spec=True) train_loader = DataLoader( train_set, batch_size=self.cfg.train.batch_size, num_workers=self.args.num_workers, - pin_memory=False, shuffle=True + pin_memory=False, + shuffle=True, ) - self.valid_set = Dataset(self.cfg, subset='valid', shuffle_spec=False) + self.valid_set = Dataset(self.cfg, subset="valid", shuffle_spec=False) valid_loader = DataLoader( self.valid_set, batch_size=self.cfg.train.batch_size, num_workers=self.args.num_workers, - pin_memory=False, shuffle=False) + pin_memory=False, + shuffle=False, + ) data_loader = {"train": train_loader, "valid": valid_loader} return data_loader @@ -99,7 +104,7 @@ def get_state_dict(self): "step": self.step, "epoch": self.epoch, "batch_size": self.cfg.train.batch_size, - "ema": self.ema.state_dict() + "ema": self.ema.state_dict(), } if self.scheduler is not None: state_dict["scheduler"] = self.scheduler.state_dict() @@ -120,24 +125,38 @@ def build_model(self): self.model = ScoreModel(self.cfg.model.sgmse) return self.model - def get_pc_sampler(self, predictor_name, corrector_name, y, N=None, minibatch=None, **kwargs): + def get_pc_sampler( + self, predictor_name, corrector_name, y, N=None, minibatch=None, **kwargs + ): N = self.model.sde.N if N is None else N sde = self.model.sde.copy() sde.N = N kwargs = {"eps": self.t_eps, **kwargs} if minibatch is None: - return sampling.get_pc_sampler(predictor_name, corrector_name, sde=sde, score_fn=self.model, y=y, **kwargs) + return sampling.get_pc_sampler( + predictor_name, + corrector_name, + sde=sde, + score_fn=self.model, + y=y, + **kwargs, + ) else: M = y.shape[0] def batched_sampling_fn(): samples, ns = [], [] for i in range(int(ceil(M / minibatch))): - y_mini = y[i * minibatch:(i + 1) * minibatch] - sampler = sampling.get_pc_sampler(predictor_name, corrector_name, sde=sde, score_fn=self.model, - y=y_mini, - **kwargs) + y_mini = y[i * minibatch : (i + 1) * minibatch] + sampler = sampling.get_pc_sampler( + predictor_name, + corrector_name, + sde=sde, + score_fn=self.model, + y=y_mini, + **kwargs, + ) sample, n = sampler() samples.append(sample) ns.append(n) @@ -147,10 +166,13 @@ def batched_sampling_fn(): return batched_sampling_fn def _step(self, batch): - x = batch['X'] - y = batch['Y'] + x = batch["X"] + y = batch["Y"] - t = torch.rand(x.shape[0], device=x.device) * (self.model.sde.T - self.t_eps) + self.t_eps + t = ( + torch.rand(x.shape[0], device=x.device) * (self.model.sde.T - self.t_eps) + + self.t_eps + ) mean, std = self.model.sde.marginal_prob(x, t, y) z = torch.randn_like(x) @@ -173,18 +195,20 @@ def train_step(self, batch): # Update the EMA of the model parameters self.ema.update(self.model.parameters()) - self.write_summary({'train_loss': loss.item()}, {}) - return {'train_loss': loss.item()}, {}, loss.item() + self.write_summary({"train_loss": loss.item()}, {}) + return {"train_loss": loss.item()}, {}, loss.item() def eval_step(self, batch, batch_idx): self.ema.store(self.model.parameters()) self.ema.copy_to(self.model.parameters()) loss = self._step(batch) - self.write_valid_summary({'valid_loss': loss.item()}, {}) + self.write_valid_summary({"valid_loss": loss.item()}, {}) if batch_idx == 0 and self.num_eval_files != 0: pesq, si_sdr, estoi = evaluate_model(self, self.num_eval_files) - self.write_valid_summary({'pesq': pesq, 'si_sdr': si_sdr, 'estoi': estoi}, {}) + self.write_valid_summary( + {"pesq": pesq, "si_sdr": si_sdr, "estoi": estoi}, {} + ) print(f" pesq={pesq}, si_sdr={si_sdr}, estoi={estoi}") if self.ema.collected_params is not None: self.ema.restore(self.model.parameters()) - return {'valid_loss': loss.item()}, {}, loss.item() + return {"valid_loss": loss.item()}, {}, loss.item() diff --git a/models/sgmse/dereverberation/dereverberation_dataset.py b/models/sgmse/dereverberation/dereverberation_dataset.py index 852240cf..00b39262 100644 --- a/models/sgmse/dereverberation/dereverberation_dataset.py +++ b/models/sgmse/dereverberation/dereverberation_dataset.py @@ -7,12 +7,14 @@ from os.path import join -class Specs(): +class Specs: def __init__(self, cfg, subset, shuffle_spec): self.cfg = cfg - self.data_dir = os.path.join(cfg.preprocess.processed_dir, cfg.dataset[0], "audio") - self.clean_files = sorted(glob(join(self.data_dir, subset) + '/anechoic/*.wav')) - self.noisy_files = sorted(glob(join(self.data_dir, subset) + '/reverb/*.wav')) + self.data_dir = os.path.join( + cfg.preprocess.processed_dir, cfg.dataset[0], "audio" + ) + self.clean_files = sorted(glob(join(self.data_dir, subset) + "/anechoic/*.wav")) + self.noisy_files = sorted(glob(join(self.data_dir, subset) + "/reverb/*.wav")) self.dummy = cfg.preprocess.dummy self.num_frames = cfg.preprocess.num_frames self.shuffle_spec = shuffle_spec @@ -38,12 +40,12 @@ def __getitem__(self, i): start = int(np.random.uniform(0, current_len - target_len)) else: start = int((current_len - target_len) / 2) - x = x[..., start:start + target_len] - y = y[..., start:start + target_len] + x = x[..., start : start + target_len] + y = y[..., start : start + target_len] else: # pad audio if the length T is smaller than num_frames - x = F.pad(x, (pad // 2, pad // 2 + (pad % 2)), mode='constant') - y = F.pad(y, (pad // 2, pad // 2 + (pad % 2)), mode='constant') + x = F.pad(x, (pad // 2, pad // 2 + (pad % 2)), mode="constant") + y = F.pad(y, (pad // 2, pad // 2 + (pad % 2)), mode="constant") # normalize w.r.t to the noisy or the clean signal or not at all # to ensure same clean signal power in x and y. @@ -59,7 +61,7 @@ def __getitem__(self, i): X = torch.stft(x, **self.stft_kwargs()) Y = torch.stft(y, **self.stft_kwargs()) X, Y = self.spec_transform(X), self.spec_transform(Y) - return {'X': X, 'Y': Y} + return {"X": X, "Y": Y} def __len__(self): if self.dummy: @@ -80,8 +82,11 @@ def stft_kwargs(self): def istft_kwargs(self): return dict( - n_fft=self.n_fft, hop_length=self.hop_length, - window=self.window, center=True) + n_fft=self.n_fft, + hop_length=self.hop_length, + window=self.window, + center=True, + ) def stft(self, sig): window = self._get_window(sig) @@ -89,7 +94,9 @@ def stft(self, sig): def istft(self, spec, length=None): window = self._get_window(spec) - return torch.istft(spec, **{**self.istft_kwargs(), "window": window, "length": length}) + return torch.istft( + spec, **{**self.istft_kwargs(), "window": window, "length": length} + ) @staticmethod def get_window(window_length): diff --git a/models/sgmse/dereverberation/dereverberation_inference.py b/models/sgmse/dereverberation/dereverberation_inference.py index d0233193..c3c847c1 100644 --- a/models/sgmse/dereverberation/dereverberation_inference.py +++ b/models/sgmse/dereverberation/dereverberation_inference.py @@ -41,10 +41,10 @@ def inference(self): N = self.args.N corrector_steps = self.args.corrector_steps self.model.eval() - noisy_dir = join(self.test_dir, 'noisy/') - noisy_files = sorted(glob.glob('{}/*.wav'.format(noisy_dir))) + noisy_dir = join(self.test_dir, "noisy/") + noisy_files = sorted(glob.glob("{}/*.wav".format(noisy_dir))) for noisy_file in tqdm(noisy_files): - filename = noisy_file.split('/')[-1] + filename = noisy_file.split("/")[-1] # Load wav y, _ = load(noisy_file) @@ -55,14 +55,20 @@ def inference(self): y = y / norm_factor # Prepare DNN input - spec = Specs(self.cfg, subset='', shuffle_spec=False) + spec = Specs(self.cfg, subset="", shuffle_spec=False) Y = torch.unsqueeze(spec.spec_transform(spec.stft(sig=y.cuda())), 0) Y = pad_spec(Y) # Reverse sampling - sampler = DereverberationTrainer.get_pc_sampler(self, - 'reverse_diffusion', 'ald', Y.cuda(), N=N, - corrector_steps=corrector_steps, snr=snr) + sampler = DereverberationTrainer.get_pc_sampler( + self, + "reverse_diffusion", + "ald", + Y.cuda(), + N=N, + corrector_steps=corrector_steps, + snr=snr, + ) sample, _ = sampler() # Backward transform in time domain diff --git a/modules/sgmse/__init__.py b/modules/sgmse/__init__.py index 386d0a02..ff6c52ef 100644 --- a/modules/sgmse/__init__.py +++ b/modules/sgmse/__init__.py @@ -2,4 +2,4 @@ from .ncsnpp import NCSNpp from .dcunet import DCUNet -__all__ = ['BackboneRegistry', 'NCSNpp', 'DCUNet'] +__all__ = ["BackboneRegistry", "NCSNpp", "DCUNet"] diff --git a/modules/sgmse/dcunet.py b/modules/sgmse/dcunet.py index 6fa34c02..9815a76e 100644 --- a/modules/sgmse/dcunet.py +++ b/modules/sgmse/dcunet.py @@ -5,8 +5,16 @@ from torch import nn, Tensor from torch.nn.modules.batchnorm import _BatchNorm -from .shared import BackboneRegistry, ComplexConv2d, ComplexConvTranspose2d, ComplexLinear, \ - DiffusionStepEmbedding, GaussianFourierProjection, FeatureMapDense, torch_complex_from_reim +from .shared import ( + BackboneRegistry, + ComplexConv2d, + ComplexConvTranspose2d, + ComplexLinear, + DiffusionStepEmbedding, + GaussianFourierProjection, + FeatureMapDense, + torch_complex_from_reim, +) def get_activation(name): @@ -23,7 +31,9 @@ def get_activation(name): class BatchNorm(_BatchNorm): def _check_input_dim(self, input): if input.dim() < 2 or input.dim() > 4: - raise ValueError("expected 4D or 3D input (got {}D input)".format(input.dim())) + raise ValueError( + "expected 4D or 3D input (got {}D input)".format(input.dim()) + ) class OnReIm(nn.Module): @@ -38,6 +48,7 @@ def forward(self, x): # Code for DCUNet largely copied from Danilo's `informedenh` repo, cheers! + def unet_decoder_args(encoders, *, skip_connections): """Get list of decoder arguments for upsampling (right) side of a symmetric u-net, given the arguments used to construct the encoder. @@ -51,13 +62,27 @@ def unet_decoder_args(encoders, *, skip_connections): Arguments to be used to construct decoders """ decoder_args = [] - for enc_in_chan, enc_out_chan, enc_kernel_size, enc_stride, enc_padding, enc_dilation in reversed(encoders): + for ( + enc_in_chan, + enc_out_chan, + enc_kernel_size, + enc_stride, + enc_padding, + enc_dilation, + ) in reversed(encoders): if skip_connections and decoder_args: skip_in_chan = enc_out_chan else: skip_in_chan = 0 decoder_args.append( - (enc_out_chan + skip_in_chan, enc_in_chan, enc_kernel_size, enc_stride, enc_padding, enc_dilation) + ( + enc_out_chan + skip_in_chan, + enc_in_chan, + enc_kernel_size, + enc_stride, + enc_padding, + enc_dilation, + ) ) return tuple(decoder_args) @@ -69,8 +94,12 @@ def make_unet_encoder_decoder_args(encoder_args, decoder_args): out_chan, tuple(kernel_size), tuple(stride), - tuple([n // 2 for n in kernel_size]) if padding == "auto" else tuple(padding), - tuple(dilation) + ( + tuple([n // 2 for n in kernel_size]) + if padding == "auto" + else tuple(padding) + ), + tuple(dilation), ) for in_chan, out_chan, kernel_size, stride, padding, dilation in encoder_args ) @@ -102,11 +131,11 @@ def make_unet_encoder_decoder_args(encoder_args, decoder_args): # Encoders: # (in_chan, out_chan, kernel_size, stride, padding, dilation) ( - (1, 32, (7, 5), (2, 2), "auto", (1,1)), - (32, 64, (7, 5), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 1), "auto", (1,1)), + (1, 32, (7, 5), (2, 2), "auto", (1, 1)), + (32, 64, (7, 5), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 1), "auto", (1, 1)), ), # Decoders: automatic inverse "auto", @@ -115,14 +144,14 @@ def make_unet_encoder_decoder_args(encoder_args, decoder_args): # Encoders: # (in_chan, out_chan, kernel_size, stride, padding, dilation) ( - (1, 32, (7, 5), (2, 2), "auto", (1,1)), - (32, 32, (7, 5), (2, 1), "auto", (1,1)), - (32, 64, (7, 5), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 1), "auto", (1,1)), - (64, 64, (5, 3), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 1), "auto", (1,1)), - (64, 64, (5, 3), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 1), "auto", (1,1)), + (1, 32, (7, 5), (2, 2), "auto", (1, 1)), + (32, 32, (7, 5), (2, 1), "auto", (1, 1)), + (32, 64, (7, 5), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 1), "auto", (1, 1)), + (64, 64, (5, 3), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 1), "auto", (1, 1)), + (64, 64, (5, 3), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 1), "auto", (1, 1)), ), # Decoders: automatic inverse "auto", @@ -131,16 +160,16 @@ def make_unet_encoder_decoder_args(encoder_args, decoder_args): # Encoders: # (in_chan, out_chan, kernel_size, stride, padding, dilation) ( - (1, 32, (7, 1), (1, 1), "auto", (1,1)), - (32, 32, (1, 7), (1, 1), "auto", (1,1)), - (32, 64, (7, 5), (2, 2), "auto", (1,1)), - (64, 64, (7, 5), (2, 1), "auto", (1,1)), - (64, 64, (5, 3), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 1), "auto", (1,1)), - (64, 64, (5, 3), (2, 2), "auto", (1,1)), - (64, 64, (5, 3), (2, 1), "auto", (1,1)), - (64, 64, (5, 3), (2, 2), "auto", (1,1)), - (64, 90, (5, 3), (2, 1), "auto", (1,1)), + (1, 32, (7, 1), (1, 1), "auto", (1, 1)), + (32, 32, (1, 7), (1, 1), "auto", (1, 1)), + (32, 64, (7, 5), (2, 2), "auto", (1, 1)), + (64, 64, (7, 5), (2, 1), "auto", (1, 1)), + (64, 64, (5, 3), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 1), "auto", (1, 1)), + (64, 64, (5, 3), (2, 2), "auto", (1, 1)), + (64, 64, (5, 3), (2, 1), "auto", (1, 1)), + (64, 64, (5, 3), (2, 2), "auto", (1, 1)), + (64, 90, (5, 3), (2, 1), "auto", (1, 1)), ), # Decoders: automatic inverse "auto", @@ -149,11 +178,11 @@ def make_unet_encoder_decoder_args(encoder_args, decoder_args): # Encoders: # (in_chan, out_chan, kernel_size, stride, padding, dilation) ( - (1, 32, (4, 4), (1, 1), "auto", (1, 1)), - (32, 32, (4, 4), (1, 1), "auto", (1, 1)), - (32, 32, (4, 4), (1, 1), "auto", (1, 1)), - (32, 64, (4, 4), (2, 1), "auto", (2, 1)), - (64, 128, (4, 4), (2, 2), "auto", (4, 1)), + (1, 32, (4, 4), (1, 1), "auto", (1, 1)), + (32, 32, (4, 4), (1, 1), "auto", (1, 1)), + (32, 32, (4, 4), (1, 1), "auto", (1, 1)), + (32, 64, (4, 4), (2, 1), "auto", (2, 1)), + (64, 128, (4, 4), (2, 2), "auto", (4, 1)), (128, 256, (4, 4), (2, 2), "auto", (8, 1)), ), # Decoders: automatic inverse @@ -166,16 +195,71 @@ def make_unet_encoder_decoder_args(encoder_args, decoder_args): class DCUNet(nn.Module): @staticmethod def add_argparse_args(parser): - parser.add_argument("--dcunet-architecture", type=str, default="DilDCUNet-v2", choices=DCUNET_ARCHITECTURES.keys(), help="The concrete DCUNet architecture. 'DilDCUNet-v2' by default.") - parser.add_argument("--dcunet-time-embedding", type=str, choices=("gfp", "ds", "none"), default="gfp", help="Timestep embedding style. 'gfp' (Gaussian Fourier Projections) by default.") - parser.add_argument("--dcunet-temb-layers-global", type=int, default=1, help="Number of global linear+activation layers for the time embedding. 1 by default.") - parser.add_argument("--dcunet-temb-layers-local", type=int, default=1, help="Number of local (per-encoder/per-decoder) linear+activation layers for the time embedding. 1 by default.") - parser.add_argument("--dcunet-temb-activation", type=str, default="silu", help="The (complex) activation to use between all (global&local) time embedding layers.") - parser.add_argument("--dcunet-time-embedding-complex", action="store_true", help="Use complex-valued timestep embedding. Compatible with 'gfp' and 'ds' embeddings.") - parser.add_argument("--dcunet-fix-length", type=str, default="pad", choices=("pad", "trim", "none"), help="DCUNet strategy to 'fix' mismatched input timespan. 'pad' by default.") - parser.add_argument("--dcunet-mask-bound", type=str, choices=("tanh", "sigmoid", "none"), default="none", help="DCUNet output bounding strategy. 'none' by default.") - parser.add_argument("--dcunet-norm-type", type=str, choices=("bN", "CbN"), default="bN", help="The type of norm to use within each encoder and decoder layer. 'bN' (real/imaginary separate batch norm) by default.") - parser.add_argument("--dcunet-activation", type=str, choices=("leaky_relu", "relu", "silu"), default="leaky_relu", help="The activation to use within each encoder and decoder layer. 'leaky_relu' by default.") + parser.add_argument( + "--dcunet-architecture", + type=str, + default="DilDCUNet-v2", + choices=DCUNET_ARCHITECTURES.keys(), + help="The concrete DCUNet architecture. 'DilDCUNet-v2' by default.", + ) + parser.add_argument( + "--dcunet-time-embedding", + type=str, + choices=("gfp", "ds", "none"), + default="gfp", + help="Timestep embedding style. 'gfp' (Gaussian Fourier Projections) by default.", + ) + parser.add_argument( + "--dcunet-temb-layers-global", + type=int, + default=1, + help="Number of global linear+activation layers for the time embedding. 1 by default.", + ) + parser.add_argument( + "--dcunet-temb-layers-local", + type=int, + default=1, + help="Number of local (per-encoder/per-decoder) linear+activation layers for the time embedding. 1 by default.", + ) + parser.add_argument( + "--dcunet-temb-activation", + type=str, + default="silu", + help="The (complex) activation to use between all (global&local) time embedding layers.", + ) + parser.add_argument( + "--dcunet-time-embedding-complex", + action="store_true", + help="Use complex-valued timestep embedding. Compatible with 'gfp' and 'ds' embeddings.", + ) + parser.add_argument( + "--dcunet-fix-length", + type=str, + default="pad", + choices=("pad", "trim", "none"), + help="DCUNet strategy to 'fix' mismatched input timespan. 'pad' by default.", + ) + parser.add_argument( + "--dcunet-mask-bound", + type=str, + choices=("tanh", "sigmoid", "none"), + default="none", + help="DCUNet output bounding strategy. 'none' by default.", + ) + parser.add_argument( + "--dcunet-norm-type", + type=str, + choices=("bN", "CbN"), + default="bN", + help="The type of norm to use within each encoder and decoder layer. 'bN' (real/imaginary separate batch norm) by default.", + ) + parser.add_argument( + "--dcunet-activation", + type=str, + choices=("leaky_relu", "relu", "silu"), + default="leaky_relu", + help="The activation to use within each encoder and decoder layer. 'leaky_relu' by default.", + ) return parser def __init__( @@ -191,16 +275,20 @@ def __init__( dcunet_norm_type: str = "bN", dcunet_activation: str = "relu", embed_dim: int = 128, - **kwargs + **kwargs, ): super().__init__() self.architecture = dcunet_architecture - self.fix_length_mode = (dcunet_fix_length if dcunet_fix_length != "none" else None) + self.fix_length_mode = ( + dcunet_fix_length if dcunet_fix_length != "none" else None + ) self.norm_type = dcunet_norm_type self.activation = dcunet_activation self.input_channels = 2 # for x_t and y -- note that this is 2 rather than 4, because we directly treat complex channels in this DNN - self.time_embedding = (dcunet_time_embedding if dcunet_time_embedding != "none" else None) + self.time_embedding = ( + dcunet_time_embedding if dcunet_time_embedding != "none" else None + ) self.time_embedding_complex = dcunet_time_embedding_complex self.temb_layers_global = dcunet_temb_layers_global self.temb_layers_local = dcunet_temb_layers_local @@ -217,41 +305,63 @@ def __init__( # Prepare kwargs for encoder and decoder (to potentially be modified before layer instantiation) encoder_decoder_kwargs = dict( - norm_type=self.norm_type, activation=self.activation, - temb_layers=self.temb_layers_local, temb_activation=self.temb_activation) + norm_type=self.norm_type, + activation=self.activation, + temb_layers=self.temb_layers_local, + temb_activation=self.temb_activation, + ) # Instantiate (global) time embedding layer embed_ops = [] if self.time_embedding is not None: complex_valued = self.time_embedding_complex if self.time_embedding == "gfp": - embed_ops += [GaussianFourierProjection(embed_dim=embed_dim, complex_valued=complex_valued)] + embed_ops += [ + GaussianFourierProjection( + embed_dim=embed_dim, complex_valued=complex_valued + ) + ] encoder_decoder_kwargs["embed_dim"] = embed_dim elif self.time_embedding == "ds": - embed_ops += [DiffusionStepEmbedding(embed_dim=embed_dim, complex_valued=complex_valued)] + embed_ops += [ + DiffusionStepEmbedding( + embed_dim=embed_dim, complex_valued=complex_valued + ) + ] encoder_decoder_kwargs["embed_dim"] = embed_dim if self.time_embedding_complex: - assert self.time_embedding in ("gfp", "ds"), "Complex timestep embedding only available for gfp and ds" + assert self.time_embedding in ( + "gfp", + "ds", + ), "Complex timestep embedding only available for gfp and ds" encoder_decoder_kwargs["complex_time_embedding"] = True for _ in range(self.temb_layers_global): embed_ops += [ ComplexLinear(embed_dim, embed_dim, complex_valued=True), - OnReIm(get_activation(dcunet_temb_activation)) + OnReIm(get_activation(dcunet_temb_activation)), ] self.embed = nn.Sequential(*embed_ops) ### Instantiate DCUNet layers ### output_layer = ComplexConvTranspose2d(*decoders[-1]) - encoders = [DCUNetComplexEncoderBlock(*args, **encoder_decoder_kwargs) for args in encoders] - decoders = [DCUNetComplexDecoderBlock(*args, **encoder_decoder_kwargs) for args in decoders[:-1]] - - self.mask_bound = (dcunet_mask_bound if dcunet_mask_bound != "none" else None) + encoders = [ + DCUNetComplexEncoderBlock(*args, **encoder_decoder_kwargs) + for args in encoders + ] + decoders = [ + DCUNetComplexDecoderBlock(*args, **encoder_decoder_kwargs) + for args in decoders[:-1] + ] + + self.mask_bound = dcunet_mask_bound if dcunet_mask_bound != "none" else None if self.mask_bound is not None: - raise NotImplementedError("sorry, mask bounding not implemented at the moment") + raise NotImplementedError( + "sorry, mask bounding not implemented at the moment" + ) # TODO we can't use nn.Sequential since the ComplexConvTranspose2d needs a second `output_size` argument - #operations = (output_layer, complex_nn.BoundComplexMask(self.mask_bound)) - #output_layer = nn.Sequential(*[x for x in operations if x is not None]) + # operations = (output_layer, complex_nn.BoundComplexMask(self.mask_bound)) + # output_layer = nn.Sequential(*[x for x in operations if x is not None]) assert len(encoders) == len(decoders) + 1 self.encoders = nn.ModuleList(encoders) @@ -273,14 +383,14 @@ def forward(self, spec, t) -> Tensor: # Estimate mask from time-frequency representation. x_in = self.fix_input_dims(spec) x = x_in - t_embed = self.embed(t+0j) if self.time_embedding is not None else None + t_embed = self.embed(t + 0j) if self.time_embedding is not None else None enc_outs = [] for idx, enc in enumerate(self.encoders): x = enc(x, t_embed) # UNet skip connection enc_outs.append(x) - for (enc_out, dec) in zip(reversed(enc_outs[:-1]), self.decoders): + for enc_out, dec in zip(reversed(enc_outs[:-1]), self.decoders): x = dec(x, t_embed, output_size=enc_out.shape) x = torch.cat([x, enc_out], dim=1) @@ -356,7 +466,7 @@ def __init__( embed_dim=None, complex_time_embedding=False, temb_layers=1, - temb_activation="silu" + temb_activation="silu", ): super().__init__() @@ -371,7 +481,13 @@ def __init__( self.complex_time_embedding = complex_time_embedding self.conv = ComplexConv2d( - in_chan, out_chan, kernel_size, stride, padding, bias=norm_type is None, dilation=dilation + in_chan, + out_chan, + kernel_size, + stride, + padding, + bias=norm_type is None, + dilation=dilation, ) self.norm = _get_norm(norm_type)(out_chan) self.activation = OnReIm(get_activation(activation)) @@ -381,11 +497,11 @@ def __init__( for _ in range(max(0, self.temb_layers - 1)): ops += [ ComplexLinear(self.embed_dim, self.embed_dim, complex_valued=True), - OnReIm(get_activation(self.temb_activation)) + OnReIm(get_activation(self.temb_activation)), ] ops += [ FeatureMapDense(self.embed_dim, self.out_chan, complex_valued=True), - OnReIm(get_activation(self.temb_activation)) + OnReIm(get_activation(self.temb_activation)), ] self.embed_layer = nn.Sequential(*ops) @@ -410,7 +526,7 @@ def __init__( activation="leaky_relu", embed_dim=None, temb_layers=1, - temb_activation='swish', + temb_activation="swish", complex_time_embedding=False, ): super().__init__() @@ -427,7 +543,14 @@ def __init__( self.temb_activation = temb_activation self.deconv = ComplexConvTranspose2d( - in_chan, out_chan, kernel_size, stride, padding, output_padding, dilation=dilation, bias=norm_type is None + in_chan, + out_chan, + kernel_size, + stride, + padding, + output_padding, + dilation=dilation, + bias=norm_type is None, ) self.norm = _get_norm(norm_type)(out_chan) self.activation = OnReIm(get_activation(activation)) @@ -437,11 +560,11 @@ def __init__( for _ in range(max(0, self.temb_layers - 1)): ops += [ ComplexLinear(self.embed_dim, self.embed_dim, complex_valued=True), - OnReIm(get_activation(self.temb_activation)) + OnReIm(get_activation(self.temb_activation)), ] ops += [ FeatureMapDense(self.embed_dim, self.out_chan, complex_valued=True), - OnReIm(get_activation(self.temb_activation)) + OnReIm(get_activation(self.temb_activation)), ] self.embed_layer = nn.Sequential(*ops) @@ -454,39 +577,48 @@ def forward(self, x, t_embed, output_size=None): # From https://github.com/chanil1218/DCUnet.pytorch/blob/2dcdd30804be47a866fde6435cbb7e2f81585213/models/layers/complexnn.py class ComplexBatchNorm(torch.nn.Module): - def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=False): + def __init__( + self, + num_features, + eps=1e-5, + momentum=0.1, + affine=True, + track_running_stats=False, + ): super(ComplexBatchNorm, self).__init__() - self.num_features = num_features - self.eps = eps - self.momentum = momentum - self.affine = affine + self.num_features = num_features + self.eps = eps + self.momentum = momentum + self.affine = affine self.track_running_stats = track_running_stats if self.affine: self.Wrr = torch.nn.Parameter(torch.Tensor(num_features)) self.Wri = torch.nn.Parameter(torch.Tensor(num_features)) self.Wii = torch.nn.Parameter(torch.Tensor(num_features)) - self.Br = torch.nn.Parameter(torch.Tensor(num_features)) - self.Bi = torch.nn.Parameter(torch.Tensor(num_features)) + self.Br = torch.nn.Parameter(torch.Tensor(num_features)) + self.Bi = torch.nn.Parameter(torch.Tensor(num_features)) else: - self.register_parameter('Wrr', None) - self.register_parameter('Wri', None) - self.register_parameter('Wii', None) - self.register_parameter('Br', None) - self.register_parameter('Bi', None) + self.register_parameter("Wrr", None) + self.register_parameter("Wri", None) + self.register_parameter("Wii", None) + self.register_parameter("Br", None) + self.register_parameter("Bi", None) if self.track_running_stats: - self.register_buffer('RMr', torch.zeros(num_features)) - self.register_buffer('RMi', torch.zeros(num_features)) - self.register_buffer('RVrr', torch.ones (num_features)) - self.register_buffer('RVri', torch.zeros(num_features)) - self.register_buffer('RVii', torch.ones (num_features)) - self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long)) + self.register_buffer("RMr", torch.zeros(num_features)) + self.register_buffer("RMi", torch.zeros(num_features)) + self.register_buffer("RVrr", torch.ones(num_features)) + self.register_buffer("RVri", torch.zeros(num_features)) + self.register_buffer("RVii", torch.ones(num_features)) + self.register_buffer( + "num_batches_tracked", torch.tensor(0, dtype=torch.long) + ) else: - self.register_parameter('RMr', None) - self.register_parameter('RMi', None) - self.register_parameter('RVrr', None) - self.register_parameter('RVri', None) - self.register_parameter('RVii', None) - self.register_parameter('num_batches_tracked', None) + self.register_parameter("RMr", None) + self.register_parameter("RMi", None) + self.register_parameter("RVrr", None) + self.register_parameter("RVri", None) + self.register_parameter("RVii", None) + self.register_parameter("num_batches_tracked", None) self.reset_parameters() def reset_running_stats(self): @@ -504,12 +636,12 @@ def reset_parameters(self): self.Br.data.zero_() self.Bi.data.zero_() self.Wrr.data.fill_(1) - self.Wri.data.uniform_(-.9, +.9) # W will be positive-definite + self.Wri.data.uniform_(-0.9, +0.9) # W will be positive-definite self.Wii.data.fill_(1) def _check_input_dim(self, xr, xi): - assert(xr.shape == xi.shape) - assert(xr.size(1) == self.num_features) + assert xr.shape == xi.shape + assert xr.size(1) == self.num_features def forward(self, x): xr, xi = x.real, x.imag @@ -531,8 +663,8 @@ def forward(self, x): # False: Normalize using running statistics, ignore batch statistics. # training = self.training or not self.track_running_stats - redux = [i for i in reversed(range(xr.dim())) if i!=1] - vdim = [1] * xr.dim() + redux = [i for i in reversed(range(xr.dim())) if i != 1] + vdim = [1] * xr.dim() vdim[1] = xr.size(1) # @@ -551,7 +683,7 @@ def forward(self, x): else: Mr = self.RMr.view(vdim) Mi = self.RMi.view(vdim) - xr, xi = xr-Mr, xi-Mi + xr, xi = xr - Mr, xi - Mi # # Variance Matrix V Computation @@ -575,25 +707,25 @@ def forward(self, x): Vrr = self.RVrr.view(vdim) Vri = self.RVri.view(vdim) Vii = self.RVii.view(vdim) - Vrr = Vrr + self.eps - Vri = Vri - Vii = Vii + self.eps + Vrr = Vrr + self.eps + Vri = Vri + Vii = Vii + self.eps # # Matrix Inverse Square Root U = V^-0.5 # # sqrt of a 2x2 matrix, # - https://en.wikipedia.org/wiki/Square_root_of_a_2_by_2_matrix - tau = Vrr + Vii + tau = Vrr + Vii delta = torch.addcmul(Vrr * Vii, Vri, Vri, value=-1) - s = delta.sqrt() - t = (tau + 2*s).sqrt() + s = delta.sqrt() + t = (tau + 2 * s).sqrt() # matrix inverse, http://mathworld.wolfram.com/MatrixInverse.html - rst = (s * t).reciprocal() - Urr = (s + Vii) * rst - Uii = (s + Vrr) * rst - Uri = ( - Vri) * rst + rst = (s * t).reciprocal() + Urr = (s + Vii) * rst + Uii = (s + Vrr) * rst + Uri = (-Vri) * rst # # Optionally left-multiply U by affine weights W to produce combined @@ -605,7 +737,11 @@ def forward(self, x): # [Wir Wii][Uir Uii] [xi] [Bi] # if self.affine: - Wrr, Wri, Wii = self.Wrr.view(vdim), self.Wri.view(vdim), self.Wii.view(vdim) + Wrr, Wri, Wii = ( + self.Wrr.view(vdim), + self.Wri.view(vdim), + self.Wii.view(vdim), + ) Zrr = (Wrr * Urr) + (Wri * Uri) Zri = (Wrr * Uri) + (Wri * Uii) Zir = (Wri * Urr) + (Wii * Uri) @@ -623,5 +759,7 @@ def forward(self, x): return torch.view_as_complex(torch.stack([yr, yi], dim=-1)) def extra_repr(self): - return '{num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \ - 'track_running_stats={track_running_stats}'.format(**self.__dict__) + return ( + "{num_features}, eps={eps}, momentum={momentum}, affine={affine}, " + "track_running_stats={track_running_stats}".format(**self.__dict__) + ) diff --git a/modules/sgmse/ncsnpp.py b/modules/sgmse/ncsnpp.py index f5c810e7..4302f47a 100644 --- a/modules/sgmse/ncsnpp.py +++ b/modules/sgmse/ncsnpp.py @@ -39,37 +39,50 @@ class NCSNpp(nn.Module): @staticmethod def add_argparse_args(parser): - parser.add_argument("--ch_mult",type=int, nargs='+', default=[1,1,2,2,2,2,2]) + parser.add_argument( + "--ch_mult", type=int, nargs="+", default=[1, 1, 2, 2, 2, 2, 2] + ) parser.add_argument("--num_res_blocks", type=int, default=2) - parser.add_argument("--attn_resolutions", type=int, nargs='+', default=[16]) - parser.add_argument("--no-centered", dest="centered", action="store_false", help="The data is not centered [-1, 1]") - parser.add_argument("--centered", dest="centered", action="store_true", help="The data is centered [-1, 1]") + parser.add_argument("--attn_resolutions", type=int, nargs="+", default=[16]) + parser.add_argument( + "--no-centered", + dest="centered", + action="store_false", + help="The data is not centered [-1, 1]", + ) + parser.add_argument( + "--centered", + dest="centered", + action="store_true", + help="The data is centered [-1, 1]", + ) parser.set_defaults(centered=True) return parser - def __init__(self, - scale_by_sigma = True, - nonlinearity = 'swish', - nf = 128, - ch_mult = (1, 1, 2, 2, 2, 2, 2), - num_res_blocks = 2, - attn_resolutions = (16,), - resamp_with_conv = True, - conditional = True, - fir = True, - fir_kernel = [1, 3, 3, 1], - skip_rescale = True, - resblock_type = 'biggan', - progressive = 'output_skip', - progressive_input = 'input_skip', - progressive_combine = 'sum', - init_scale = 0., - fourier_scale = 16, - image_size = 256, - embedding_type = 'fourier', - dropout = .0, - centered = True, - **unused_kwargs + def __init__( + self, + scale_by_sigma=True, + nonlinearity="swish", + nf=128, + ch_mult=(1, 1, 2, 2, 2, 2, 2), + num_res_blocks=2, + attn_resolutions=(16,), + resamp_with_conv=True, + conditional=True, + fir=True, + fir_kernel=[1, 3, 3, 1], + skip_rescale=True, + resblock_type="biggan", + progressive="output_skip", + progressive_input="input_skip", + progressive_combine="sum", + init_scale=0.0, + fourier_scale=16, + image_size=256, + embedding_type="fourier", + dropout=0.0, + centered=True, + **unused_kwargs, ): super().__init__() self.act = act = get_act(nonlinearity) @@ -81,7 +94,9 @@ def __init__(self, dropout = dropout resamp_with_conv = resamp_with_conv self.num_resolutions = num_resolutions = len(ch_mult) - self.all_resolutions = all_resolutions = [image_size // (2 ** i) for i in range(num_resolutions)] + self.all_resolutions = all_resolutions = [ + image_size // (2**i) for i in range(num_resolutions) + ] self.conditional = conditional = conditional # noise-conditional self.centered = centered @@ -95,9 +110,9 @@ def __init__(self, self.progressive_input = progressive_input = progressive_input.lower() self.embedding_type = embedding_type = embedding_type.lower() init_scale = init_scale - assert progressive in ['none', 'output_skip', 'residual'] - assert progressive_input in ['none', 'input_skip', 'residual'] - assert embedding_type in ['fourier', 'positional'] + assert progressive in ["none", "output_skip", "residual"] + assert progressive_input in ["none", "input_skip", "residual"] + assert embedding_type in ["fourier", "positional"] combine_method = progressive_combine.lower() combiner = functools.partial(Combine, method=combine_method) @@ -106,16 +121,18 @@ def __init__(self, modules = [] # timestep/noise_level embedding - if embedding_type == 'fourier': + if embedding_type == "fourier": # Gaussian Fourier features embeddings. - modules.append(layerspp.GaussianFourierProjection( - embedding_size=nf, scale=fourier_scale - )) + modules.append( + layerspp.GaussianFourierProjection( + embedding_size=nf, scale=fourier_scale + ) + ) embed_dim = 2 * nf - elif embedding_type == 'positional': + elif embedding_type == "positional": embed_dim = nf else: - raise ValueError(f'embedding type {embedding_type} unknown.') + raise ValueError(f"embedding type {embedding_type} unknown.") if conditional: modules.append(nn.Linear(embed_dim, nf * 4)) @@ -125,43 +142,71 @@ def __init__(self, modules[-1].weight.data = default_initializer()(modules[-1].weight.shape) nn.init.zeros_(modules[-1].bias) - AttnBlock = functools.partial(layerspp.AttnBlockpp, - init_scale=init_scale, skip_rescale=skip_rescale) - - Upsample = functools.partial(layerspp.Upsample, - with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel) - - if progressive == 'output_skip': - self.pyramid_upsample = layerspp.Upsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) - elif progressive == 'residual': - pyramid_upsample = functools.partial(layerspp.Upsample, fir=fir, - fir_kernel=fir_kernel, with_conv=True) - - Downsample = functools.partial(layerspp.Downsample, with_conv=resamp_with_conv, fir=fir, fir_kernel=fir_kernel) - - if progressive_input == 'input_skip': - self.pyramid_downsample = layerspp.Downsample(fir=fir, fir_kernel=fir_kernel, with_conv=False) - elif progressive_input == 'residual': - pyramid_downsample = functools.partial(layerspp.Downsample, - fir=fir, fir_kernel=fir_kernel, with_conv=True) - - if resblock_type == 'ddpm': - ResnetBlock = functools.partial(ResnetBlockDDPM, act=act, - dropout=dropout, init_scale=init_scale, - skip_rescale=skip_rescale, temb_dim=nf * 4) - - elif resblock_type == 'biggan': - ResnetBlock = functools.partial(ResnetBlockBigGAN, act=act, - dropout=dropout, fir=fir, fir_kernel=fir_kernel, - init_scale=init_scale, skip_rescale=skip_rescale, temb_dim=nf * 4) + AttnBlock = functools.partial( + layerspp.AttnBlockpp, init_scale=init_scale, skip_rescale=skip_rescale + ) + + Upsample = functools.partial( + layerspp.Upsample, + with_conv=resamp_with_conv, + fir=fir, + fir_kernel=fir_kernel, + ) + + if progressive == "output_skip": + self.pyramid_upsample = layerspp.Upsample( + fir=fir, fir_kernel=fir_kernel, with_conv=False + ) + elif progressive == "residual": + pyramid_upsample = functools.partial( + layerspp.Upsample, fir=fir, fir_kernel=fir_kernel, with_conv=True + ) + + Downsample = functools.partial( + layerspp.Downsample, + with_conv=resamp_with_conv, + fir=fir, + fir_kernel=fir_kernel, + ) + + if progressive_input == "input_skip": + self.pyramid_downsample = layerspp.Downsample( + fir=fir, fir_kernel=fir_kernel, with_conv=False + ) + elif progressive_input == "residual": + pyramid_downsample = functools.partial( + layerspp.Downsample, fir=fir, fir_kernel=fir_kernel, with_conv=True + ) + + if resblock_type == "ddpm": + ResnetBlock = functools.partial( + ResnetBlockDDPM, + act=act, + dropout=dropout, + init_scale=init_scale, + skip_rescale=skip_rescale, + temb_dim=nf * 4, + ) + + elif resblock_type == "biggan": + ResnetBlock = functools.partial( + ResnetBlockBigGAN, + act=act, + dropout=dropout, + fir=fir, + fir_kernel=fir_kernel, + init_scale=init_scale, + skip_rescale=skip_rescale, + temb_dim=nf * 4, + ) else: - raise ValueError(f'resblock type {resblock_type} unrecognized.') + raise ValueError(f"resblock type {resblock_type} unrecognized.") # Downsampling block channels = num_channels - if progressive_input != 'none': + if progressive_input != "none": input_pyramid_ch = channels modules.append(conv3x3(channels, nf)) @@ -180,18 +225,20 @@ def __init__(self, hs_c.append(in_ch) if i_level != num_resolutions - 1: - if resblock_type == 'ddpm': + if resblock_type == "ddpm": modules.append(Downsample(in_ch=in_ch)) else: modules.append(ResnetBlock(down=True, in_ch=in_ch)) - if progressive_input == 'input_skip': + if progressive_input == "input_skip": modules.append(combiner(dim1=input_pyramid_ch, dim2=in_ch)) - if combine_method == 'cat': + if combine_method == "cat": in_ch *= 2 - elif progressive_input == 'residual': - modules.append(pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch)) + elif progressive_input == "residual": + modules.append( + pyramid_downsample(in_ch=input_pyramid_ch, out_ch=in_ch) + ) input_pyramid_ch = in_ch hs_c.append(in_ch) @@ -204,7 +251,9 @@ def __init__(self, pyramid_ch = 0 # Upsampling block for i_level in reversed(range(num_resolutions)): - for i_block in range(num_res_blocks + 1): # +1 blocks in upsampling because of skip connection from combiner (after downsampling) + for i_block in range( + num_res_blocks + 1 + ): # +1 blocks in upsampling because of skip connection from combiner (after downsampling) out_ch = nf * ch_mult[i_level] modules.append(ResnetBlock(in_ch=in_ch + hs_c.pop(), out_ch=out_ch)) in_ch = out_ch @@ -212,46 +261,66 @@ def __init__(self, if all_resolutions[i_level] in attn_resolutions: modules.append(AttnBlock(channels=in_ch)) - if progressive != 'none': + if progressive != "none": if i_level == num_resolutions - 1: - if progressive == 'output_skip': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), - num_channels=in_ch, eps=1e-6)) + if progressive == "output_skip": + modules.append( + nn.GroupNorm( + num_groups=min(in_ch // 4, 32), + num_channels=in_ch, + eps=1e-6, + ) + ) modules.append(conv3x3(in_ch, channels, init_scale=init_scale)) pyramid_ch = channels - elif progressive == 'residual': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6)) + elif progressive == "residual": + modules.append( + nn.GroupNorm( + num_groups=min(in_ch // 4, 32), + num_channels=in_ch, + eps=1e-6, + ) + ) modules.append(conv3x3(in_ch, in_ch, bias=True)) pyramid_ch = in_ch else: - raise ValueError(f'{progressive} is not a valid name.') + raise ValueError(f"{progressive} is not a valid name.") else: - if progressive == 'output_skip': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), - num_channels=in_ch, eps=1e-6)) - modules.append(conv3x3(in_ch, channels, bias=True, init_scale=init_scale)) + if progressive == "output_skip": + modules.append( + nn.GroupNorm( + num_groups=min(in_ch // 4, 32), + num_channels=in_ch, + eps=1e-6, + ) + ) + modules.append( + conv3x3(in_ch, channels, bias=True, init_scale=init_scale) + ) pyramid_ch = channels - elif progressive == 'residual': + elif progressive == "residual": modules.append(pyramid_upsample(in_ch=pyramid_ch, out_ch=in_ch)) pyramid_ch = in_ch else: - raise ValueError(f'{progressive} is not a valid name') + raise ValueError(f"{progressive} is not a valid name") if i_level != 0: - if resblock_type == 'ddpm': + if resblock_type == "ddpm": modules.append(Upsample(in_ch=in_ch)) else: modules.append(ResnetBlock(in_ch=in_ch, up=True)) assert not hs_c - if progressive != 'output_skip': - modules.append(nn.GroupNorm(num_groups=min(in_ch // 4, 32), - num_channels=in_ch, eps=1e-6)) + if progressive != "output_skip": + modules.append( + nn.GroupNorm( + num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6 + ) + ) modules.append(conv3x3(in_ch, channels, init_scale=init_scale)) self.all_modules = nn.ModuleList(modules) - def forward(self, x, time_cond): # timestep/noise_level embedding; only for continuous training @@ -259,23 +328,30 @@ def forward(self, x, time_cond): m_idx = 0 # Convert real and imaginary parts of (x,y) into four channel dimensions - x = torch.cat((x[:,[0],:,:].real, x[:,[0],:,:].imag, - x[:,[1],:,:].real, x[:,[1],:,:].imag), dim=1) - - if self.embedding_type == 'fourier': + x = torch.cat( + ( + x[:, [0], :, :].real, + x[:, [0], :, :].imag, + x[:, [1], :, :].real, + x[:, [1], :, :].imag, + ), + dim=1, + ) + + if self.embedding_type == "fourier": # Gaussian Fourier features embeddings. used_sigmas = time_cond temb = modules[m_idx](torch.log(used_sigmas)) m_idx += 1 - elif self.embedding_type == 'positional': + elif self.embedding_type == "positional": # Sinusoidal positional embeddings. timesteps = time_cond used_sigmas = self.sigmas[time_cond.long()] temb = layers.get_timestep_embedding(timesteps, self.nf) else: - raise ValueError(f'embedding type {self.embedding_type} unknown.') + raise ValueError(f"embedding type {self.embedding_type} unknown.") if self.conditional: temb = modules[m_idx](temb) @@ -287,11 +363,11 @@ def forward(self, x, time_cond): if not self.centered: # If input data is in [0, 1] - x = 2 * x - 1. + x = 2 * x - 1.0 # Downsampling block input_pyramid = None - if self.progressive_input != 'none': + if self.progressive_input != "none": input_pyramid = x # Input layer: Conv2d: 4ch -> 128ch @@ -305,36 +381,38 @@ def forward(self, x, time_cond): h = modules[m_idx](hs[-1], temb) m_idx += 1 # Attention layer (optional) - if h.shape[-2] in self.attn_resolutions: # edit: check H dim (-2) not W dim (-1) + if ( + h.shape[-2] in self.attn_resolutions + ): # edit: check H dim (-2) not W dim (-1) h = modules[m_idx](h) m_idx += 1 hs.append(h) # Downsampling if i_level != self.num_resolutions - 1: - if self.resblock_type == 'ddpm': + if self.resblock_type == "ddpm": h = modules[m_idx](hs[-1]) m_idx += 1 else: h = modules[m_idx](hs[-1], temb) m_idx += 1 - if self.progressive_input == 'input_skip': # Combine h with x + if self.progressive_input == "input_skip": # Combine h with x input_pyramid = self.pyramid_downsample(input_pyramid) h = modules[m_idx](input_pyramid, h) m_idx += 1 - elif self.progressive_input == 'residual': + elif self.progressive_input == "residual": input_pyramid = modules[m_idx](input_pyramid) m_idx += 1 if self.skip_rescale: - input_pyramid = (input_pyramid + h) / np.sqrt(2.) + input_pyramid = (input_pyramid + h) / np.sqrt(2.0) else: input_pyramid = input_pyramid + h h = input_pyramid hs.append(h) - h = hs[-1] # actualy equal to: h = h + h = hs[-1] # actualy equal to: h = h h = modules[m_idx](h, temb) # ResNet block m_idx += 1 h = modules[m_idx](h) # Attention block @@ -355,42 +433,42 @@ def forward(self, x, time_cond): h = modules[m_idx](h) m_idx += 1 - if self.progressive != 'none': + if self.progressive != "none": if i_level == self.num_resolutions - 1: - if self.progressive == 'output_skip': + if self.progressive == "output_skip": pyramid = self.act(modules[m_idx](h)) # GroupNorm m_idx += 1 pyramid = modules[m_idx](pyramid) # Conv2D: 256 -> 4 m_idx += 1 - elif self.progressive == 'residual': + elif self.progressive == "residual": pyramid = self.act(modules[m_idx](h)) m_idx += 1 pyramid = modules[m_idx](pyramid) m_idx += 1 else: - raise ValueError(f'{self.progressive} is not a valid name.') + raise ValueError(f"{self.progressive} is not a valid name.") else: - if self.progressive == 'output_skip': + if self.progressive == "output_skip": pyramid = self.pyramid_upsample(pyramid) # Upsample pyramid_h = self.act(modules[m_idx](h)) # GroupNorm m_idx += 1 pyramid_h = modules[m_idx](pyramid_h) m_idx += 1 pyramid = pyramid + pyramid_h - elif self.progressive == 'residual': + elif self.progressive == "residual": pyramid = modules[m_idx](pyramid) m_idx += 1 if self.skip_rescale: - pyramid = (pyramid + h) / np.sqrt(2.) + pyramid = (pyramid + h) / np.sqrt(2.0) else: pyramid = pyramid + h h = pyramid else: - raise ValueError(f'{self.progressive} is not a valid name') + raise ValueError(f"{self.progressive} is not a valid name") # Upsampling Layer if i_level != 0: - if self.resblock_type == 'ddpm': + if self.resblock_type == "ddpm": h = modules[m_idx](h) m_idx += 1 else: @@ -399,7 +477,7 @@ def forward(self, x, time_cond): assert not hs - if self.progressive == 'output_skip': + if self.progressive == "output_skip": h = pyramid else: h = self.act(modules[m_idx](h)) @@ -415,5 +493,5 @@ def forward(self, x, time_cond): # Convert back to complex number h = self.output_layer(h) h = torch.permute(h, (0, 2, 3, 1)).contiguous() - h = torch.view_as_complex(h)[:,None, :, :] + h = torch.view_as_complex(h)[:, None, :, :] return h diff --git a/modules/sgmse/ncsnpp_utils/layers.py b/modules/sgmse/ncsnpp_utils/layers.py index b0d6e87f..76bf8ac3 100644 --- a/modules/sgmse/ncsnpp_utils/layers.py +++ b/modules/sgmse/ncsnpp_utils/layers.py @@ -27,484 +27,613 @@ def get_act(config): - """Get activation functions from the config file.""" - - if config == 'elu': - return nn.ELU() - elif config == 'relu': - return nn.ReLU() - elif config == 'lrelu': - return nn.LeakyReLU(negative_slope=0.2) - elif config == 'swish': - return nn.SiLU() - else: - raise NotImplementedError('activation function does not exist!') - - -def ncsn_conv1x1(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=0): - """1x1 convolution. Same as NCSNv1/v2.""" - conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias, dilation=dilation, - padding=padding) - init_scale = 1e-10 if init_scale == 0 else init_scale - conv.weight.data *= init_scale - conv.bias.data *= init_scale - return conv - - -def variance_scaling(scale, mode, distribution, - in_axis=1, out_axis=0, - dtype=torch.float32, - device='cpu'): - """Ported from JAX. """ - - def _compute_fans(shape, in_axis=1, out_axis=0): - receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis] - fan_in = shape[in_axis] * receptive_field_size - fan_out = shape[out_axis] * receptive_field_size - return fan_in, fan_out - - def init(shape, dtype=dtype, device=device): - fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) - if mode == "fan_in": - denominator = fan_in - elif mode == "fan_out": - denominator = fan_out - elif mode == "fan_avg": - denominator = (fan_in + fan_out) / 2 + """Get activation functions from the config file.""" + + if config == "elu": + return nn.ELU() + elif config == "relu": + return nn.ReLU() + elif config == "lrelu": + return nn.LeakyReLU(negative_slope=0.2) + elif config == "swish": + return nn.SiLU() else: - raise ValueError( - "invalid mode for variance scaling initializer: {}".format(mode)) - variance = scale / denominator - if distribution == "normal": - return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance) - elif distribution == "uniform": - return (torch.rand(*shape, dtype=dtype, device=device) * 2. - 1.) * np.sqrt(3 * variance) - else: - raise ValueError("invalid distribution for variance scaling initializer") - - return init - - -def default_init(scale=1.): - """The same initialization used in DDPM.""" - scale = 1e-10 if scale == 0 else scale - return variance_scaling(scale, 'fan_avg', 'uniform') + raise NotImplementedError("activation function does not exist!") + + +def ncsn_conv1x1( + in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0, padding=0 +): + """1x1 convolution. Same as NCSNv1/v2.""" + conv = nn.Conv2d( + in_planes, + out_planes, + kernel_size=1, + stride=stride, + bias=bias, + dilation=dilation, + padding=padding, + ) + init_scale = 1e-10 if init_scale == 0 else init_scale + conv.weight.data *= init_scale + conv.bias.data *= init_scale + return conv + + +def variance_scaling( + scale, mode, distribution, in_axis=1, out_axis=0, dtype=torch.float32, device="cpu" +): + """Ported from JAX.""" + + def _compute_fans(shape, in_axis=1, out_axis=0): + receptive_field_size = np.prod(shape) / shape[in_axis] / shape[out_axis] + fan_in = shape[in_axis] * receptive_field_size + fan_out = shape[out_axis] * receptive_field_size + return fan_in, fan_out + + def init(shape, dtype=dtype, device=device): + fan_in, fan_out = _compute_fans(shape, in_axis, out_axis) + if mode == "fan_in": + denominator = fan_in + elif mode == "fan_out": + denominator = fan_out + elif mode == "fan_avg": + denominator = (fan_in + fan_out) / 2 + else: + raise ValueError( + "invalid mode for variance scaling initializer: {}".format(mode) + ) + variance = scale / denominator + if distribution == "normal": + return torch.randn(*shape, dtype=dtype, device=device) * np.sqrt(variance) + elif distribution == "uniform": + return ( + torch.rand(*shape, dtype=dtype, device=device) * 2.0 - 1.0 + ) * np.sqrt(3 * variance) + else: + raise ValueError("invalid distribution for variance scaling initializer") + + return init + + +def default_init(scale=1.0): + """The same initialization used in DDPM.""" + scale = 1e-10 if scale == 0 else scale + return variance_scaling(scale, "fan_avg", "uniform") class Dense(nn.Module): - """Linear layer with `default_init`.""" - def __init__(self): - super().__init__() - - -def ddpm_conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1., padding=0): - """1x1 convolution with DDPM initialization.""" - conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias) - conv.weight.data = default_init(init_scale)(conv.weight.data.shape) - nn.init.zeros_(conv.bias) - return conv - + """Linear layer with `default_init`.""" + + def __init__(self): + super().__init__() + + +def ddpm_conv1x1(in_planes, out_planes, stride=1, bias=True, init_scale=1.0, padding=0): + """1x1 convolution with DDPM initialization.""" + conv = nn.Conv2d( + in_planes, out_planes, kernel_size=1, stride=stride, padding=padding, bias=bias + ) + conv.weight.data = default_init(init_scale)(conv.weight.data.shape) + nn.init.zeros_(conv.bias) + return conv + + +def ncsn_conv3x3( + in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0, padding=1 +): + """3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2.""" + init_scale = 1e-10 if init_scale == 0 else init_scale + conv = nn.Conv2d( + in_planes, + out_planes, + stride=stride, + bias=bias, + dilation=dilation, + padding=padding, + kernel_size=3, + ) + conv.weight.data *= init_scale + conv.bias.data *= init_scale + return conv + + +def ddpm_conv3x3( + in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1.0, padding=1 +): + """3x3 convolution with DDPM initialization.""" + conv = nn.Conv2d( + in_planes, + out_planes, + kernel_size=3, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias, + ) + conv.weight.data = default_init(init_scale)(conv.weight.data.shape) + nn.init.zeros_(conv.bias) + return conv + + ########################################################################### + # Functions below are ported over from the NCSNv1/NCSNv2 codebase: + # https://github.com/ermongroup/ncsn + # https://github.com/ermongroup/ncsnv2 + ########################################################################### -def ncsn_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1): - """3x3 convolution with PyTorch initialization. Same as NCSNv1/NCSNv2.""" - init_scale = 1e-10 if init_scale == 0 else init_scale - conv = nn.Conv2d(in_planes, out_planes, stride=stride, bias=bias, - dilation=dilation, padding=padding, kernel_size=3) - conv.weight.data *= init_scale - conv.bias.data *= init_scale - return conv - -def ddpm_conv3x3(in_planes, out_planes, stride=1, bias=True, dilation=1, init_scale=1., padding=1): - """3x3 convolution with DDPM initialization.""" - conv = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, - dilation=dilation, bias=bias) - conv.weight.data = default_init(init_scale)(conv.weight.data.shape) - nn.init.zeros_(conv.bias) - return conv - - ########################################################################### - # Functions below are ported over from the NCSNv1/NCSNv2 codebase: - # https://github.com/ermongroup/ncsn - # https://github.com/ermongroup/ncsnv2 - ########################################################################### +class CRPBlock(nn.Module): + def __init__(self, features, n_stages, act=nn.ReLU(), maxpool=True): + super().__init__() + self.convs = nn.ModuleList() + for i in range(n_stages): + self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False)) + self.n_stages = n_stages + if maxpool: + self.pool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2) + else: + self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2) + + self.act = act + + def forward(self, x): + x = self.act(x) + path = x + for i in range(self.n_stages): + path = self.pool(path) + path = self.convs[i](path) + x = path + x + return x -class CRPBlock(nn.Module): - def __init__(self, features, n_stages, act=nn.ReLU(), maxpool=True): - super().__init__() - self.convs = nn.ModuleList() - for i in range(n_stages): - self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False)) - self.n_stages = n_stages - if maxpool: - self.pool = nn.MaxPool2d(kernel_size=5, stride=1, padding=2) - else: - self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2) +class CondCRPBlock(nn.Module): + def __init__(self, features, n_stages, num_classes, normalizer, act=nn.ReLU()): + super().__init__() + self.convs = nn.ModuleList() + self.norms = nn.ModuleList() + self.normalizer = normalizer + for i in range(n_stages): + self.norms.append(normalizer(features, num_classes, bias=True)) + self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False)) + + self.n_stages = n_stages + self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2) + self.act = act + + def forward(self, x, y): + x = self.act(x) + path = x + for i in range(self.n_stages): + path = self.norms[i](path, y) + path = self.pool(path) + path = self.convs[i](path) - self.act = act + x = path + x + return x - def forward(self, x): - x = self.act(x) - path = x - for i in range(self.n_stages): - path = self.pool(path) - path = self.convs[i](path) - x = path + x - return x +class RCUBlock(nn.Module): + def __init__(self, features, n_blocks, n_stages, act=nn.ReLU()): + super().__init__() -class CondCRPBlock(nn.Module): - def __init__(self, features, n_stages, num_classes, normalizer, act=nn.ReLU()): - super().__init__() - self.convs = nn.ModuleList() - self.norms = nn.ModuleList() - self.normalizer = normalizer - for i in range(n_stages): - self.norms.append(normalizer(features, num_classes, bias=True)) - self.convs.append(ncsn_conv3x3(features, features, stride=1, bias=False)) - - self.n_stages = n_stages - self.pool = nn.AvgPool2d(kernel_size=5, stride=1, padding=2) - self.act = act - - def forward(self, x, y): - x = self.act(x) - path = x - for i in range(self.n_stages): - path = self.norms[i](path, y) - path = self.pool(path) - path = self.convs[i](path) - - x = path + x - return x + for i in range(n_blocks): + for j in range(n_stages): + setattr( + self, + "{}_{}_conv".format(i + 1, j + 1), + ncsn_conv3x3(features, features, stride=1, bias=False), + ) + self.stride = 1 + self.n_blocks = n_blocks + self.n_stages = n_stages + self.act = act -class RCUBlock(nn.Module): - def __init__(self, features, n_blocks, n_stages, act=nn.ReLU()): - super().__init__() - - for i in range(n_blocks): - for j in range(n_stages): - setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False)) - - self.stride = 1 - self.n_blocks = n_blocks - self.n_stages = n_stages - self.act = act - - def forward(self, x): - for i in range(self.n_blocks): - residual = x - for j in range(self.n_stages): - x = self.act(x) - x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x) + def forward(self, x): + for i in range(self.n_blocks): + residual = x + for j in range(self.n_stages): + x = self.act(x) + x = getattr(self, "{}_{}_conv".format(i + 1, j + 1))(x) - x += residual - return x + x += residual + return x class CondRCUBlock(nn.Module): - def __init__(self, features, n_blocks, n_stages, num_classes, normalizer, act=nn.ReLU()): - super().__init__() - - for i in range(n_blocks): - for j in range(n_stages): - setattr(self, '{}_{}_norm'.format(i + 1, j + 1), normalizer(features, num_classes, bias=True)) - setattr(self, '{}_{}_conv'.format(i + 1, j + 1), ncsn_conv3x3(features, features, stride=1, bias=False)) - - self.stride = 1 - self.n_blocks = n_blocks - self.n_stages = n_stages - self.act = act - self.normalizer = normalizer - - def forward(self, x, y): - for i in range(self.n_blocks): - residual = x - for j in range(self.n_stages): - x = getattr(self, '{}_{}_norm'.format(i + 1, j + 1))(x, y) - x = self.act(x) - x = getattr(self, '{}_{}_conv'.format(i + 1, j + 1))(x) - - x += residual - return x + def __init__( + self, features, n_blocks, n_stages, num_classes, normalizer, act=nn.ReLU() + ): + super().__init__() + + for i in range(n_blocks): + for j in range(n_stages): + setattr( + self, + "{}_{}_norm".format(i + 1, j + 1), + normalizer(features, num_classes, bias=True), + ) + setattr( + self, + "{}_{}_conv".format(i + 1, j + 1), + ncsn_conv3x3(features, features, stride=1, bias=False), + ) + + self.stride = 1 + self.n_blocks = n_blocks + self.n_stages = n_stages + self.act = act + self.normalizer = normalizer + + def forward(self, x, y): + for i in range(self.n_blocks): + residual = x + for j in range(self.n_stages): + x = getattr(self, "{}_{}_norm".format(i + 1, j + 1))(x, y) + x = self.act(x) + x = getattr(self, "{}_{}_conv".format(i + 1, j + 1))(x) + + x += residual + return x class MSFBlock(nn.Module): - def __init__(self, in_planes, features): - super().__init__() - assert isinstance(in_planes, list) or isinstance(in_planes, tuple) - self.convs = nn.ModuleList() - self.features = features + def __init__(self, in_planes, features): + super().__init__() + assert isinstance(in_planes, list) or isinstance(in_planes, tuple) + self.convs = nn.ModuleList() + self.features = features - for i in range(len(in_planes)): - self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True)) + for i in range(len(in_planes)): + self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True)) - def forward(self, xs, shape): - sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device) - for i in range(len(self.convs)): - h = self.convs[i](xs[i]) - h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True) - sums += h - return sums + def forward(self, xs, shape): + sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device) + for i in range(len(self.convs)): + h = self.convs[i](xs[i]) + h = F.interpolate(h, size=shape, mode="bilinear", align_corners=True) + sums += h + return sums class CondMSFBlock(nn.Module): - def __init__(self, in_planes, features, num_classes, normalizer): - super().__init__() - assert isinstance(in_planes, list) or isinstance(in_planes, tuple) + def __init__(self, in_planes, features, num_classes, normalizer): + super().__init__() + assert isinstance(in_planes, list) or isinstance(in_planes, tuple) - self.convs = nn.ModuleList() - self.norms = nn.ModuleList() - self.features = features - self.normalizer = normalizer + self.convs = nn.ModuleList() + self.norms = nn.ModuleList() + self.features = features + self.normalizer = normalizer - for i in range(len(in_planes)): - self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True)) - self.norms.append(normalizer(in_planes[i], num_classes, bias=True)) + for i in range(len(in_planes)): + self.convs.append(ncsn_conv3x3(in_planes[i], features, stride=1, bias=True)) + self.norms.append(normalizer(in_planes[i], num_classes, bias=True)) - def forward(self, xs, y, shape): - sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device) - for i in range(len(self.convs)): - h = self.norms[i](xs[i], y) - h = self.convs[i](h) - h = F.interpolate(h, size=shape, mode='bilinear', align_corners=True) - sums += h - return sums + def forward(self, xs, y, shape): + sums = torch.zeros(xs[0].shape[0], self.features, *shape, device=xs[0].device) + for i in range(len(self.convs)): + h = self.norms[i](xs[i], y) + h = self.convs[i](h) + h = F.interpolate(h, size=shape, mode="bilinear", align_corners=True) + sums += h + return sums class RefineBlock(nn.Module): - def __init__(self, in_planes, features, act=nn.ReLU(), start=False, end=False, maxpool=True): - super().__init__() + def __init__( + self, in_planes, features, act=nn.ReLU(), start=False, end=False, maxpool=True + ): + super().__init__() - assert isinstance(in_planes, tuple) or isinstance(in_planes, list) - self.n_blocks = n_blocks = len(in_planes) + assert isinstance(in_planes, tuple) or isinstance(in_planes, list) + self.n_blocks = n_blocks = len(in_planes) - self.adapt_convs = nn.ModuleList() - for i in range(n_blocks): - self.adapt_convs.append(RCUBlock(in_planes[i], 2, 2, act)) + self.adapt_convs = nn.ModuleList() + for i in range(n_blocks): + self.adapt_convs.append(RCUBlock(in_planes[i], 2, 2, act)) - self.output_convs = RCUBlock(features, 3 if end else 1, 2, act) + self.output_convs = RCUBlock(features, 3 if end else 1, 2, act) - if not start: - self.msf = MSFBlock(in_planes, features) + if not start: + self.msf = MSFBlock(in_planes, features) - self.crp = CRPBlock(features, 2, act, maxpool=maxpool) + self.crp = CRPBlock(features, 2, act, maxpool=maxpool) - def forward(self, xs, output_shape): - assert isinstance(xs, tuple) or isinstance(xs, list) - hs = [] - for i in range(len(xs)): - h = self.adapt_convs[i](xs[i]) - hs.append(h) + def forward(self, xs, output_shape): + assert isinstance(xs, tuple) or isinstance(xs, list) + hs = [] + for i in range(len(xs)): + h = self.adapt_convs[i](xs[i]) + hs.append(h) - if self.n_blocks > 1: - h = self.msf(hs, output_shape) - else: - h = hs[0] + if self.n_blocks > 1: + h = self.msf(hs, output_shape) + else: + h = hs[0] - h = self.crp(h) - h = self.output_convs(h) + h = self.crp(h) + h = self.output_convs(h) - return h + return h class CondRefineBlock(nn.Module): - def __init__(self, in_planes, features, num_classes, normalizer, act=nn.ReLU(), start=False, end=False): - super().__init__() - - assert isinstance(in_planes, tuple) or isinstance(in_planes, list) - self.n_blocks = n_blocks = len(in_planes) - - self.adapt_convs = nn.ModuleList() - for i in range(n_blocks): - self.adapt_convs.append( - CondRCUBlock(in_planes[i], 2, 2, num_classes, normalizer, act) - ) - - self.output_convs = CondRCUBlock(features, 3 if end else 1, 2, num_classes, normalizer, act) - - if not start: - self.msf = CondMSFBlock(in_planes, features, num_classes, normalizer) - - self.crp = CondCRPBlock(features, 2, num_classes, normalizer, act) - - def forward(self, xs, y, output_shape): - assert isinstance(xs, tuple) or isinstance(xs, list) - hs = [] - for i in range(len(xs)): - h = self.adapt_convs[i](xs[i], y) - hs.append(h) - - if self.n_blocks > 1: - h = self.msf(hs, y, output_shape) - else: - h = hs[0] - - h = self.crp(h, y) - h = self.output_convs(h, y) - - return h + def __init__( + self, + in_planes, + features, + num_classes, + normalizer, + act=nn.ReLU(), + start=False, + end=False, + ): + super().__init__() + + assert isinstance(in_planes, tuple) or isinstance(in_planes, list) + self.n_blocks = n_blocks = len(in_planes) + + self.adapt_convs = nn.ModuleList() + for i in range(n_blocks): + self.adapt_convs.append( + CondRCUBlock(in_planes[i], 2, 2, num_classes, normalizer, act) + ) + + self.output_convs = CondRCUBlock( + features, 3 if end else 1, 2, num_classes, normalizer, act + ) + + if not start: + self.msf = CondMSFBlock(in_planes, features, num_classes, normalizer) + + self.crp = CondCRPBlock(features, 2, num_classes, normalizer, act) + + def forward(self, xs, y, output_shape): + assert isinstance(xs, tuple) or isinstance(xs, list) + hs = [] + for i in range(len(xs)): + h = self.adapt_convs[i](xs[i], y) + hs.append(h) + + if self.n_blocks > 1: + h = self.msf(hs, y, output_shape) + else: + h = hs[0] + + h = self.crp(h, y) + h = self.output_convs(h, y) + + return h class ConvMeanPool(nn.Module): - def __init__(self, input_dim, output_dim, kernel_size=3, biases=True, adjust_padding=False): - super().__init__() - if not adjust_padding: - conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) - self.conv = conv - else: - conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) - - self.conv = nn.Sequential( - nn.ZeroPad2d((1, 0, 1, 0)), - conv - ) - - def forward(self, inputs): - output = self.conv(inputs) - output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2], - output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. - return output + def __init__( + self, input_dim, output_dim, kernel_size=3, biases=True, adjust_padding=False + ): + super().__init__() + if not adjust_padding: + conv = nn.Conv2d( + input_dim, + output_dim, + kernel_size, + stride=1, + padding=kernel_size // 2, + bias=biases, + ) + self.conv = conv + else: + conv = nn.Conv2d( + input_dim, + output_dim, + kernel_size, + stride=1, + padding=kernel_size // 2, + bias=biases, + ) + + self.conv = nn.Sequential(nn.ZeroPad2d((1, 0, 1, 0)), conv) + + def forward(self, inputs): + output = self.conv(inputs) + output = ( + sum( + [ + output[:, :, ::2, ::2], + output[:, :, 1::2, ::2], + output[:, :, ::2, 1::2], + output[:, :, 1::2, 1::2], + ] + ) + / 4.0 + ) + return output class MeanPoolConv(nn.Module): - def __init__(self, input_dim, output_dim, kernel_size=3, biases=True): - super().__init__() - self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) - - def forward(self, inputs): - output = inputs - output = sum([output[:, :, ::2, ::2], output[:, :, 1::2, ::2], - output[:, :, ::2, 1::2], output[:, :, 1::2, 1::2]]) / 4. - return self.conv(output) + def __init__(self, input_dim, output_dim, kernel_size=3, biases=True): + super().__init__() + self.conv = nn.Conv2d( + input_dim, + output_dim, + kernel_size, + stride=1, + padding=kernel_size // 2, + bias=biases, + ) + + def forward(self, inputs): + output = inputs + output = ( + sum( + [ + output[:, :, ::2, ::2], + output[:, :, 1::2, ::2], + output[:, :, ::2, 1::2], + output[:, :, 1::2, 1::2], + ] + ) + / 4.0 + ) + return self.conv(output) class UpsampleConv(nn.Module): - def __init__(self, input_dim, output_dim, kernel_size=3, biases=True): - super().__init__() - self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride=1, padding=kernel_size // 2, bias=biases) - self.pixelshuffle = nn.PixelShuffle(upscale_factor=2) - - def forward(self, inputs): - output = inputs - output = torch.cat([output, output, output, output], dim=1) - output = self.pixelshuffle(output) - return self.conv(output) + def __init__(self, input_dim, output_dim, kernel_size=3, biases=True): + super().__init__() + self.conv = nn.Conv2d( + input_dim, + output_dim, + kernel_size, + stride=1, + padding=kernel_size // 2, + bias=biases, + ) + self.pixelshuffle = nn.PixelShuffle(upscale_factor=2) + + def forward(self, inputs): + output = inputs + output = torch.cat([output, output, output, output], dim=1) + output = self.pixelshuffle(output) + return self.conv(output) class ConditionalResidualBlock(nn.Module): - def __init__(self, input_dim, output_dim, num_classes, resample=1, act=nn.ELU(), - normalization=ConditionalInstanceNorm2dPlus, adjust_padding=False, dilation=None): - super().__init__() - self.non_linearity = act - self.input_dim = input_dim - self.output_dim = output_dim - self.resample = resample - self.normalization = normalization - if resample == 'down': - if dilation > 1: - self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation) - self.normalize2 = normalization(input_dim, num_classes) - self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) - conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) - else: - self.conv1 = ncsn_conv3x3(input_dim, input_dim) - self.normalize2 = normalization(input_dim, num_classes) - self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding) - conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding) - - elif resample is None: - if dilation > 1: - conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) - self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) - self.normalize2 = normalization(output_dim, num_classes) - self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation) - else: - conv_shortcut = nn.Conv2d - self.conv1 = ncsn_conv3x3(input_dim, output_dim) - self.normalize2 = normalization(output_dim, num_classes) - self.conv2 = ncsn_conv3x3(output_dim, output_dim) - else: - raise Exception('invalid resample value') - - if output_dim != input_dim or resample is not None: - self.shortcut = conv_shortcut(input_dim, output_dim) - - self.normalize1 = normalization(input_dim, num_classes) - - def forward(self, x, y): - output = self.normalize1(x, y) - output = self.non_linearity(output) - output = self.conv1(output) - output = self.normalize2(output, y) - output = self.non_linearity(output) - output = self.conv2(output) - - if self.output_dim == self.input_dim and self.resample is None: - shortcut = x - else: - shortcut = self.shortcut(x) - - return shortcut + output + def __init__( + self, + input_dim, + output_dim, + num_classes, + resample=1, + act=nn.ELU(), + normalization=ConditionalInstanceNorm2dPlus, + adjust_padding=False, + dilation=None, + ): + super().__init__() + self.non_linearity = act + self.input_dim = input_dim + self.output_dim = output_dim + self.resample = resample + self.normalization = normalization + if resample == "down": + if dilation > 1: + self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation) + self.normalize2 = normalization(input_dim, num_classes) + self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + else: + self.conv1 = ncsn_conv3x3(input_dim, input_dim) + self.normalize2 = normalization(input_dim, num_classes) + self.conv2 = ConvMeanPool( + input_dim, output_dim, 3, adjust_padding=adjust_padding + ) + conv_shortcut = partial( + ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding + ) + + elif resample is None: + if dilation > 1: + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + self.normalize2 = normalization(output_dim, num_classes) + self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation) + else: + conv_shortcut = nn.Conv2d + self.conv1 = ncsn_conv3x3(input_dim, output_dim) + self.normalize2 = normalization(output_dim, num_classes) + self.conv2 = ncsn_conv3x3(output_dim, output_dim) + else: + raise Exception("invalid resample value") + + if output_dim != input_dim or resample is not None: + self.shortcut = conv_shortcut(input_dim, output_dim) + + self.normalize1 = normalization(input_dim, num_classes) + + def forward(self, x, y): + output = self.normalize1(x, y) + output = self.non_linearity(output) + output = self.conv1(output) + output = self.normalize2(output, y) + output = self.non_linearity(output) + output = self.conv2(output) + + if self.output_dim == self.input_dim and self.resample is None: + shortcut = x + else: + shortcut = self.shortcut(x) + + return shortcut + output class ResidualBlock(nn.Module): - def __init__(self, input_dim, output_dim, resample=None, act=nn.ELU(), - normalization=nn.InstanceNorm2d, adjust_padding=False, dilation=1): - super().__init__() - self.non_linearity = act - self.input_dim = input_dim - self.output_dim = output_dim - self.resample = resample - self.normalization = normalization - if resample == 'down': - if dilation > 1: - self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation) - self.normalize2 = normalization(input_dim) - self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) - conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) - else: - self.conv1 = ncsn_conv3x3(input_dim, input_dim) - self.normalize2 = normalization(input_dim) - self.conv2 = ConvMeanPool(input_dim, output_dim, 3, adjust_padding=adjust_padding) - conv_shortcut = partial(ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding) - - elif resample is None: - if dilation > 1: - conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) - self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) - self.normalize2 = normalization(output_dim) - self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation) - else: - # conv_shortcut = nn.Conv2d ### Something wierd here. - conv_shortcut = partial(ncsn_conv1x1) - self.conv1 = ncsn_conv3x3(input_dim, output_dim) - self.normalize2 = normalization(output_dim) - self.conv2 = ncsn_conv3x3(output_dim, output_dim) - else: - raise Exception('invalid resample value') - - if output_dim != input_dim or resample is not None: - self.shortcut = conv_shortcut(input_dim, output_dim) - - self.normalize1 = normalization(input_dim) - - def forward(self, x): - output = self.normalize1(x) - output = self.non_linearity(output) - output = self.conv1(output) - output = self.normalize2(output) - output = self.non_linearity(output) - output = self.conv2(output) - - if self.output_dim == self.input_dim and self.resample is None: - shortcut = x - else: - shortcut = self.shortcut(x) - - return shortcut + output + def __init__( + self, + input_dim, + output_dim, + resample=None, + act=nn.ELU(), + normalization=nn.InstanceNorm2d, + adjust_padding=False, + dilation=1, + ): + super().__init__() + self.non_linearity = act + self.input_dim = input_dim + self.output_dim = output_dim + self.resample = resample + self.normalization = normalization + if resample == "down": + if dilation > 1: + self.conv1 = ncsn_conv3x3(input_dim, input_dim, dilation=dilation) + self.normalize2 = normalization(input_dim) + self.conv2 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + else: + self.conv1 = ncsn_conv3x3(input_dim, input_dim) + self.normalize2 = normalization(input_dim) + self.conv2 = ConvMeanPool( + input_dim, output_dim, 3, adjust_padding=adjust_padding + ) + conv_shortcut = partial( + ConvMeanPool, kernel_size=1, adjust_padding=adjust_padding + ) + + elif resample is None: + if dilation > 1: + conv_shortcut = partial(ncsn_conv3x3, dilation=dilation) + self.conv1 = ncsn_conv3x3(input_dim, output_dim, dilation=dilation) + self.normalize2 = normalization(output_dim) + self.conv2 = ncsn_conv3x3(output_dim, output_dim, dilation=dilation) + else: + # conv_shortcut = nn.Conv2d ### Something wierd here. + conv_shortcut = partial(ncsn_conv1x1) + self.conv1 = ncsn_conv3x3(input_dim, output_dim) + self.normalize2 = normalization(output_dim) + self.conv2 = ncsn_conv3x3(output_dim, output_dim) + else: + raise Exception("invalid resample value") + + if output_dim != input_dim or resample is not None: + self.shortcut = conv_shortcut(input_dim, output_dim) + + self.normalize1 = normalization(input_dim) + + def forward(self, x): + output = self.normalize1(x) + output = self.non_linearity(output) + output = self.conv1(output) + output = self.normalize2(output) + output = self.non_linearity(output) + output = self.conv2(output) + + if self.output_dim == self.input_dim and self.resample is None: + shortcut = x + else: + shortcut = self.shortcut(x) + + return shortcut + output ########################################################################### @@ -512,151 +641,160 @@ def forward(self, x): # https://github.com/hojonathanho/diffusion/blob/master/diffusion_tf/nn.py ########################################################################### + def get_timestep_embedding(timesteps, embedding_dim, max_positions=10000): - assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32 - half_dim = embedding_dim // 2 - # magic number 10000 is from transformers - emb = math.log(max_positions) / (half_dim - 1) - # emb = math.log(2.) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb) - # emb = tf.range(num_embeddings, dtype=jnp.float32)[:, None] * emb[None, :] - # emb = tf.cast(timesteps, dtype=jnp.float32)[:, None] * emb[None, :] - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = F.pad(emb, (0, 1), mode='constant') - assert emb.shape == (timesteps.shape[0], embedding_dim) - return emb + assert len(timesteps.shape) == 1 # and timesteps.dtype == tf.int32 + half_dim = embedding_dim // 2 + # magic number 10000 is from transformers + emb = math.log(max_positions) / (half_dim - 1) + # emb = math.log(2.) / (half_dim - 1) + emb = torch.exp( + torch.arange(half_dim, dtype=torch.float32, device=timesteps.device) * -emb + ) + # emb = tf.range(num_embeddings, dtype=jnp.float32)[:, None] * emb[None, :] + # emb = tf.cast(timesteps, dtype=jnp.float32)[:, None] * emb[None, :] + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = F.pad(emb, (0, 1), mode="constant") + assert emb.shape == (timesteps.shape[0], embedding_dim) + return emb def _einsum(a, b, c, x, y): - einsum_str = '{},{}->{}'.format(''.join(a), ''.join(b), ''.join(c)) - return torch.einsum(einsum_str, x, y) + einsum_str = "{},{}->{}".format("".join(a), "".join(b), "".join(c)) + return torch.einsum(einsum_str, x, y) def contract_inner(x, y): - """tensordot(x, y, 1).""" - x_chars = list(string.ascii_lowercase[:len(x.shape)]) - y_chars = list(string.ascii_lowercase[len(x.shape):len(y.shape) + len(x.shape)]) - y_chars[0] = x_chars[-1] # first axis of y and last of x get summed - out_chars = x_chars[:-1] + y_chars[1:] - return _einsum(x_chars, y_chars, out_chars, x, y) + """tensordot(x, y, 1).""" + x_chars = list(string.ascii_lowercase[: len(x.shape)]) + y_chars = list(string.ascii_lowercase[len(x.shape) : len(y.shape) + len(x.shape)]) + y_chars[0] = x_chars[-1] # first axis of y and last of x get summed + out_chars = x_chars[:-1] + y_chars[1:] + return _einsum(x_chars, y_chars, out_chars, x, y) class NIN(nn.Module): - def __init__(self, in_dim, num_units, init_scale=0.1): - super().__init__() - self.W = nn.Parameter(default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True) - self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) + def __init__(self, in_dim, num_units, init_scale=0.1): + super().__init__() + self.W = nn.Parameter( + default_init(scale=init_scale)((in_dim, num_units)), requires_grad=True + ) + self.b = nn.Parameter(torch.zeros(num_units), requires_grad=True) - def forward(self, x): - x = x.permute(0, 2, 3, 1) - y = contract_inner(x, self.W) + self.b - return y.permute(0, 3, 1, 2) + def forward(self, x): + x = x.permute(0, 2, 3, 1) + y = contract_inner(x, self.W) + self.b + return y.permute(0, 3, 1, 2) class AttnBlock(nn.Module): - """Channel-wise self-attention block.""" - def __init__(self, channels): - super().__init__() - self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6) - self.NIN_0 = NIN(channels, channels) - self.NIN_1 = NIN(channels, channels) - self.NIN_2 = NIN(channels, channels) - self.NIN_3 = NIN(channels, channels, init_scale=0.) - - def forward(self, x): - B, C, H, W = x.shape - h = self.GroupNorm_0(x) - q = self.NIN_0(h) - k = self.NIN_1(h) - v = self.NIN_2(h) - - w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5)) - w = torch.reshape(w, (B, H, W, H * W)) - w = F.softmax(w, dim=-1) - w = torch.reshape(w, (B, H, W, H, W)) - h = torch.einsum('bhwij,bcij->bchw', w, v) - h = self.NIN_3(h) - return x + h + """Channel-wise self-attention block.""" + + def __init__(self, channels): + super().__init__() + self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=channels, eps=1e-6) + self.NIN_0 = NIN(channels, channels) + self.NIN_1 = NIN(channels, channels) + self.NIN_2 = NIN(channels, channels) + self.NIN_3 = NIN(channels, channels, init_scale=0.0) + + def forward(self, x): + B, C, H, W = x.shape + h = self.GroupNorm_0(x) + q = self.NIN_0(h) + k = self.NIN_1(h) + v = self.NIN_2(h) + + w = torch.einsum("bchw,bcij->bhwij", q, k) * (int(C) ** (-0.5)) + w = torch.reshape(w, (B, H, W, H * W)) + w = F.softmax(w, dim=-1) + w = torch.reshape(w, (B, H, W, H, W)) + h = torch.einsum("bhwij,bcij->bchw", w, v) + h = self.NIN_3(h) + return x + h class Upsample(nn.Module): - def __init__(self, channels, with_conv=False): - super().__init__() - if with_conv: - self.Conv_0 = ddpm_conv3x3(channels, channels) - self.with_conv = with_conv + def __init__(self, channels, with_conv=False): + super().__init__() + if with_conv: + self.Conv_0 = ddpm_conv3x3(channels, channels) + self.with_conv = with_conv - def forward(self, x): - B, C, H, W = x.shape - h = F.interpolate(x, (H * 2, W * 2), mode='nearest') - if self.with_conv: - h = self.Conv_0(h) - return h + def forward(self, x): + B, C, H, W = x.shape + h = F.interpolate(x, (H * 2, W * 2), mode="nearest") + if self.with_conv: + h = self.Conv_0(h) + return h class Downsample(nn.Module): - def __init__(self, channels, with_conv=False): - super().__init__() - if with_conv: - self.Conv_0 = ddpm_conv3x3(channels, channels, stride=2, padding=0) - self.with_conv = with_conv - - def forward(self, x): - B, C, H, W = x.shape - # Emulate 'SAME' padding - if self.with_conv: - x = F.pad(x, (0, 1, 0, 1)) - x = self.Conv_0(x) - else: - x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=0) + def __init__(self, channels, with_conv=False): + super().__init__() + if with_conv: + self.Conv_0 = ddpm_conv3x3(channels, channels, stride=2, padding=0) + self.with_conv = with_conv + + def forward(self, x): + B, C, H, W = x.shape + # Emulate 'SAME' padding + if self.with_conv: + x = F.pad(x, (0, 1, 0, 1)) + x = self.Conv_0(x) + else: + x = F.avg_pool2d(x, kernel_size=2, stride=2, padding=0) - assert x.shape == (B, C, H // 2, W // 2) - return x + assert x.shape == (B, C, H // 2, W // 2) + return x class ResnetBlockDDPM(nn.Module): - """The ResNet Blocks used in DDPM.""" - def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, dropout=0.1): - super().__init__() - if out_ch is None: - out_ch = in_ch - self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=in_ch, eps=1e-6) - self.act = act - self.Conv_0 = ddpm_conv3x3(in_ch, out_ch) - if temb_dim is not None: - self.Dense_0 = nn.Linear(temb_dim, out_ch) - self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) - nn.init.zeros_(self.Dense_0.bias) - - self.GroupNorm_1 = nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-6) - self.Dropout_0 = nn.Dropout(dropout) - self.Conv_1 = ddpm_conv3x3(out_ch, out_ch, init_scale=0.) - if in_ch != out_ch: - if conv_shortcut: - self.Conv_2 = ddpm_conv3x3(in_ch, out_ch) - else: - self.NIN_0 = NIN(in_ch, out_ch) - self.out_ch = out_ch - self.in_ch = in_ch - self.conv_shortcut = conv_shortcut - - def forward(self, x, temb=None): - B, C, H, W = x.shape - assert C == self.in_ch - out_ch = self.out_ch if self.out_ch else self.in_ch - h = self.act(self.GroupNorm_0(x)) - h = self.Conv_0(h) - # Add bias to each feature map conditioned on the time embedding - if temb is not None: - h += self.Dense_0(self.act(temb))[:, :, None, None] - h = self.act(self.GroupNorm_1(h)) - h = self.Dropout_0(h) - h = self.Conv_1(h) - if C != out_ch: - if self.conv_shortcut: - x = self.Conv_2(x) - else: - x = self.NIN_0(x) - return x + h \ No newline at end of file + """The ResNet Blocks used in DDPM.""" + + def __init__( + self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, dropout=0.1 + ): + super().__init__() + if out_ch is None: + out_ch = in_ch + self.GroupNorm_0 = nn.GroupNorm(num_groups=32, num_channels=in_ch, eps=1e-6) + self.act = act + self.Conv_0 = ddpm_conv3x3(in_ch, out_ch) + if temb_dim is not None: + self.Dense_0 = nn.Linear(temb_dim, out_ch) + self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) + nn.init.zeros_(self.Dense_0.bias) + + self.GroupNorm_1 = nn.GroupNorm(num_groups=32, num_channels=out_ch, eps=1e-6) + self.Dropout_0 = nn.Dropout(dropout) + self.Conv_1 = ddpm_conv3x3(out_ch, out_ch, init_scale=0.0) + if in_ch != out_ch: + if conv_shortcut: + self.Conv_2 = ddpm_conv3x3(in_ch, out_ch) + else: + self.NIN_0 = NIN(in_ch, out_ch) + self.out_ch = out_ch + self.in_ch = in_ch + self.conv_shortcut = conv_shortcut + + def forward(self, x, temb=None): + B, C, H, W = x.shape + assert C == self.in_ch + out_ch = self.out_ch if self.out_ch else self.in_ch + h = self.act(self.GroupNorm_0(x)) + h = self.Conv_0(h) + # Add bias to each feature map conditioned on the time embedding + if temb is not None: + h += self.Dense_0(self.act(temb))[:, :, None, None] + h = self.act(self.GroupNorm_1(h)) + h = self.Dropout_0(h) + h = self.Conv_1(h) + if C != out_ch: + if self.conv_shortcut: + x = self.Conv_2(x) + else: + x = self.NIN_0(x) + return x + h diff --git a/modules/sgmse/ncsnpp_utils/layerspp.py b/modules/sgmse/ncsnpp_utils/layerspp.py index 948b0688..793b7e24 100644 --- a/modules/sgmse/ncsnpp_utils/layerspp.py +++ b/modules/sgmse/ncsnpp_utils/layerspp.py @@ -30,245 +30,294 @@ class GaussianFourierProjection(nn.Module): - """Gaussian Fourier embeddings for noise levels.""" + """Gaussian Fourier embeddings for noise levels.""" - def __init__(self, embedding_size=256, scale=1.0): - super().__init__() - self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) + def __init__(self, embedding_size=256, scale=1.0): + super().__init__() + self.W = nn.Parameter(torch.randn(embedding_size) * scale, requires_grad=False) - def forward(self, x): - x_proj = x[:, None] * self.W[None, :] * 2 * np.pi - return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) + def forward(self, x): + x_proj = x[:, None] * self.W[None, :] * 2 * np.pi + return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1) class Combine(nn.Module): - """Combine information from skip connections.""" + """Combine information from skip connections.""" - def __init__(self, dim1, dim2, method='cat'): - super().__init__() - self.Conv_0 = conv1x1(dim1, dim2) - self.method = method + def __init__(self, dim1, dim2, method="cat"): + super().__init__() + self.Conv_0 = conv1x1(dim1, dim2) + self.method = method - def forward(self, x, y): - h = self.Conv_0(x) - if self.method == 'cat': - return torch.cat([h, y], dim=1) - elif self.method == 'sum': - return h + y - else: - raise ValueError(f'Method {self.method} not recognized.') + def forward(self, x, y): + h = self.Conv_0(x) + if self.method == "cat": + return torch.cat([h, y], dim=1) + elif self.method == "sum": + return h + y + else: + raise ValueError(f"Method {self.method} not recognized.") class AttnBlockpp(nn.Module): - """Channel-wise self-attention block. Modified from DDPM.""" - - def __init__(self, channels, skip_rescale=False, init_scale=0.): - super().__init__() - self.GroupNorm_0 = nn.GroupNorm(num_groups=min(channels // 4, 32), num_channels=channels, - eps=1e-6) - self.NIN_0 = NIN(channels, channels) - self.NIN_1 = NIN(channels, channels) - self.NIN_2 = NIN(channels, channels) - self.NIN_3 = NIN(channels, channels, init_scale=init_scale) - self.skip_rescale = skip_rescale - - def forward(self, x): - B, C, H, W = x.shape - h = self.GroupNorm_0(x) - q = self.NIN_0(h) - k = self.NIN_1(h) - v = self.NIN_2(h) - - w = torch.einsum('bchw,bcij->bhwij', q, k) * (int(C) ** (-0.5)) - w = torch.reshape(w, (B, H, W, H * W)) - w = F.softmax(w, dim=-1) - w = torch.reshape(w, (B, H, W, H, W)) - h = torch.einsum('bhwij,bcij->bchw', w, v) - h = self.NIN_3(h) - if not self.skip_rescale: - return x + h - else: - return (x + h) / np.sqrt(2.) + """Channel-wise self-attention block. Modified from DDPM.""" + + def __init__(self, channels, skip_rescale=False, init_scale=0.0): + super().__init__() + self.GroupNorm_0 = nn.GroupNorm( + num_groups=min(channels // 4, 32), num_channels=channels, eps=1e-6 + ) + self.NIN_0 = NIN(channels, channels) + self.NIN_1 = NIN(channels, channels) + self.NIN_2 = NIN(channels, channels) + self.NIN_3 = NIN(channels, channels, init_scale=init_scale) + self.skip_rescale = skip_rescale + + def forward(self, x): + B, C, H, W = x.shape + h = self.GroupNorm_0(x) + q = self.NIN_0(h) + k = self.NIN_1(h) + v = self.NIN_2(h) + + w = torch.einsum("bchw,bcij->bhwij", q, k) * (int(C) ** (-0.5)) + w = torch.reshape(w, (B, H, W, H * W)) + w = F.softmax(w, dim=-1) + w = torch.reshape(w, (B, H, W, H, W)) + h = torch.einsum("bhwij,bcij->bchw", w, v) + h = self.NIN_3(h) + if not self.skip_rescale: + return x + h + else: + return (x + h) / np.sqrt(2.0) class Upsample(nn.Module): - def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, - fir_kernel=(1, 3, 3, 1)): - super().__init__() - out_ch = out_ch if out_ch else in_ch - if not fir: - if with_conv: - self.Conv_0 = conv3x3(in_ch, out_ch) - else: - if with_conv: - self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch, - kernel=3, up=True, - resample_kernel=fir_kernel, - use_bias=True, - kernel_init=default_init()) - self.fir = fir - self.with_conv = with_conv - self.fir_kernel = fir_kernel - self.out_ch = out_ch - - def forward(self, x): - B, C, H, W = x.shape - if not self.fir: - h = F.interpolate(x, (H * 2, W * 2), 'nearest') - if self.with_conv: - h = self.Conv_0(h) - else: - if not self.with_conv: - h = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2) - else: - h = self.Conv2d_0(x) - - return h + def __init__( + self, + in_ch=None, + out_ch=None, + with_conv=False, + fir=False, + fir_kernel=(1, 3, 3, 1), + ): + super().__init__() + out_ch = out_ch if out_ch else in_ch + if not fir: + if with_conv: + self.Conv_0 = conv3x3(in_ch, out_ch) + else: + if with_conv: + self.Conv2d_0 = up_or_down_sampling.Conv2d( + in_ch, + out_ch, + kernel=3, + up=True, + resample_kernel=fir_kernel, + use_bias=True, + kernel_init=default_init(), + ) + self.fir = fir + self.with_conv = with_conv + self.fir_kernel = fir_kernel + self.out_ch = out_ch + + def forward(self, x): + B, C, H, W = x.shape + if not self.fir: + h = F.interpolate(x, (H * 2, W * 2), "nearest") + if self.with_conv: + h = self.Conv_0(h) + else: + if not self.with_conv: + h = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2) + else: + h = self.Conv2d_0(x) + + return h class Downsample(nn.Module): - def __init__(self, in_ch=None, out_ch=None, with_conv=False, fir=False, - fir_kernel=(1, 3, 3, 1)): - super().__init__() - out_ch = out_ch if out_ch else in_ch - if not fir: - if with_conv: - self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, padding=0) - else: - if with_conv: - self.Conv2d_0 = up_or_down_sampling.Conv2d(in_ch, out_ch, - kernel=3, down=True, - resample_kernel=fir_kernel, - use_bias=True, - kernel_init=default_init()) - self.fir = fir - self.fir_kernel = fir_kernel - self.with_conv = with_conv - self.out_ch = out_ch - - def forward(self, x): - B, C, H, W = x.shape - if not self.fir: - if self.with_conv: - x = F.pad(x, (0, 1, 0, 1)) - x = self.Conv_0(x) - else: - x = F.avg_pool2d(x, 2, stride=2) - else: - if not self.with_conv: - x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2) - else: - x = self.Conv2d_0(x) - - return x + def __init__( + self, + in_ch=None, + out_ch=None, + with_conv=False, + fir=False, + fir_kernel=(1, 3, 3, 1), + ): + super().__init__() + out_ch = out_ch if out_ch else in_ch + if not fir: + if with_conv: + self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, padding=0) + else: + if with_conv: + self.Conv2d_0 = up_or_down_sampling.Conv2d( + in_ch, + out_ch, + kernel=3, + down=True, + resample_kernel=fir_kernel, + use_bias=True, + kernel_init=default_init(), + ) + self.fir = fir + self.fir_kernel = fir_kernel + self.with_conv = with_conv + self.out_ch = out_ch + + def forward(self, x): + B, C, H, W = x.shape + if not self.fir: + if self.with_conv: + x = F.pad(x, (0, 1, 0, 1)) + x = self.Conv_0(x) + else: + x = F.avg_pool2d(x, 2, stride=2) + else: + if not self.with_conv: + x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2) + else: + x = self.Conv2d_0(x) + + return x class ResnetBlockDDPMpp(nn.Module): - """ResBlock adapted from DDPM.""" - - def __init__(self, act, in_ch, out_ch=None, temb_dim=None, conv_shortcut=False, - dropout=0.1, skip_rescale=False, init_scale=0.): - super().__init__() - out_ch = out_ch if out_ch else in_ch - self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) - self.Conv_0 = conv3x3(in_ch, out_ch) - if temb_dim is not None: - self.Dense_0 = nn.Linear(temb_dim, out_ch) - self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) - nn.init.zeros_(self.Dense_0.bias) - self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) - self.Dropout_0 = nn.Dropout(dropout) - self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) - if in_ch != out_ch: - if conv_shortcut: - self.Conv_2 = conv3x3(in_ch, out_ch) - else: - self.NIN_0 = NIN(in_ch, out_ch) - - self.skip_rescale = skip_rescale - self.act = act - self.out_ch = out_ch - self.conv_shortcut = conv_shortcut - - def forward(self, x, temb=None): - h = self.act(self.GroupNorm_0(x)) - h = self.Conv_0(h) - if temb is not None: - h += self.Dense_0(self.act(temb))[:, :, None, None] - h = self.act(self.GroupNorm_1(h)) - h = self.Dropout_0(h) - h = self.Conv_1(h) - if x.shape[1] != self.out_ch: - if self.conv_shortcut: - x = self.Conv_2(x) - else: - x = self.NIN_0(x) - if not self.skip_rescale: - return x + h - else: - return (x + h) / np.sqrt(2.) + """ResBlock adapted from DDPM.""" + + def __init__( + self, + act, + in_ch, + out_ch=None, + temb_dim=None, + conv_shortcut=False, + dropout=0.1, + skip_rescale=False, + init_scale=0.0, + ): + super().__init__() + out_ch = out_ch if out_ch else in_ch + self.GroupNorm_0 = nn.GroupNorm( + num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6 + ) + self.Conv_0 = conv3x3(in_ch, out_ch) + if temb_dim is not None: + self.Dense_0 = nn.Linear(temb_dim, out_ch) + self.Dense_0.weight.data = default_init()(self.Dense_0.weight.data.shape) + nn.init.zeros_(self.Dense_0.bias) + self.GroupNorm_1 = nn.GroupNorm( + num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6 + ) + self.Dropout_0 = nn.Dropout(dropout) + self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) + if in_ch != out_ch: + if conv_shortcut: + self.Conv_2 = conv3x3(in_ch, out_ch) + else: + self.NIN_0 = NIN(in_ch, out_ch) + + self.skip_rescale = skip_rescale + self.act = act + self.out_ch = out_ch + self.conv_shortcut = conv_shortcut + + def forward(self, x, temb=None): + h = self.act(self.GroupNorm_0(x)) + h = self.Conv_0(h) + if temb is not None: + h += self.Dense_0(self.act(temb))[:, :, None, None] + h = self.act(self.GroupNorm_1(h)) + h = self.Dropout_0(h) + h = self.Conv_1(h) + if x.shape[1] != self.out_ch: + if self.conv_shortcut: + x = self.Conv_2(x) + else: + x = self.NIN_0(x) + if not self.skip_rescale: + return x + h + else: + return (x + h) / np.sqrt(2.0) class ResnetBlockBigGANpp(nn.Module): - def __init__(self, act, in_ch, out_ch=None, temb_dim=None, up=False, down=False, - dropout=0.1, fir=False, fir_kernel=(1, 3, 3, 1), - skip_rescale=True, init_scale=0.): - super().__init__() - - out_ch = out_ch if out_ch else in_ch - self.GroupNorm_0 = nn.GroupNorm(num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6) - self.up = up - self.down = down - self.fir = fir - self.fir_kernel = fir_kernel - - self.Conv_0 = conv3x3(in_ch, out_ch) - if temb_dim is not None: - self.Dense_0 = nn.Linear(temb_dim, out_ch) - self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape) - nn.init.zeros_(self.Dense_0.bias) - - self.GroupNorm_1 = nn.GroupNorm(num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6) - self.Dropout_0 = nn.Dropout(dropout) - self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) - if in_ch != out_ch or up or down: - self.Conv_2 = conv1x1(in_ch, out_ch) - - self.skip_rescale = skip_rescale - self.act = act - self.in_ch = in_ch - self.out_ch = out_ch - - def forward(self, x, temb=None): - h = self.act(self.GroupNorm_0(x)) - - if self.up: - if self.fir: - h = up_or_down_sampling.upsample_2d(h, self.fir_kernel, factor=2) - x = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2) - else: - h = up_or_down_sampling.naive_upsample_2d(h, factor=2) - x = up_or_down_sampling.naive_upsample_2d(x, factor=2) - elif self.down: - if self.fir: - h = up_or_down_sampling.downsample_2d(h, self.fir_kernel, factor=2) - x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2) - else: - h = up_or_down_sampling.naive_downsample_2d(h, factor=2) - x = up_or_down_sampling.naive_downsample_2d(x, factor=2) - - h = self.Conv_0(h) - # Add bias to each feature map conditioned on the time embedding - if temb is not None: - h += self.Dense_0(self.act(temb))[:, :, None, None] - h = self.act(self.GroupNorm_1(h)) - h = self.Dropout_0(h) - h = self.Conv_1(h) - - if self.in_ch != self.out_ch or self.up or self.down: - x = self.Conv_2(x) - - if not self.skip_rescale: - return x + h - else: - return (x + h) / np.sqrt(2.) + def __init__( + self, + act, + in_ch, + out_ch=None, + temb_dim=None, + up=False, + down=False, + dropout=0.1, + fir=False, + fir_kernel=(1, 3, 3, 1), + skip_rescale=True, + init_scale=0.0, + ): + super().__init__() + + out_ch = out_ch if out_ch else in_ch + self.GroupNorm_0 = nn.GroupNorm( + num_groups=min(in_ch // 4, 32), num_channels=in_ch, eps=1e-6 + ) + self.up = up + self.down = down + self.fir = fir + self.fir_kernel = fir_kernel + + self.Conv_0 = conv3x3(in_ch, out_ch) + if temb_dim is not None: + self.Dense_0 = nn.Linear(temb_dim, out_ch) + self.Dense_0.weight.data = default_init()(self.Dense_0.weight.shape) + nn.init.zeros_(self.Dense_0.bias) + + self.GroupNorm_1 = nn.GroupNorm( + num_groups=min(out_ch // 4, 32), num_channels=out_ch, eps=1e-6 + ) + self.Dropout_0 = nn.Dropout(dropout) + self.Conv_1 = conv3x3(out_ch, out_ch, init_scale=init_scale) + if in_ch != out_ch or up or down: + self.Conv_2 = conv1x1(in_ch, out_ch) + + self.skip_rescale = skip_rescale + self.act = act + self.in_ch = in_ch + self.out_ch = out_ch + + def forward(self, x, temb=None): + h = self.act(self.GroupNorm_0(x)) + + if self.up: + if self.fir: + h = up_or_down_sampling.upsample_2d(h, self.fir_kernel, factor=2) + x = up_or_down_sampling.upsample_2d(x, self.fir_kernel, factor=2) + else: + h = up_or_down_sampling.naive_upsample_2d(h, factor=2) + x = up_or_down_sampling.naive_upsample_2d(x, factor=2) + elif self.down: + if self.fir: + h = up_or_down_sampling.downsample_2d(h, self.fir_kernel, factor=2) + x = up_or_down_sampling.downsample_2d(x, self.fir_kernel, factor=2) + else: + h = up_or_down_sampling.naive_downsample_2d(h, factor=2) + x = up_or_down_sampling.naive_downsample_2d(x, factor=2) + + h = self.Conv_0(h) + # Add bias to each feature map conditioned on the time embedding + if temb is not None: + h += self.Dense_0(self.act(temb))[:, :, None, None] + h = self.act(self.GroupNorm_1(h)) + h = self.Dropout_0(h) + h = self.Conv_1(h) + + if self.in_ch != self.out_ch or self.up or self.down: + x = self.Conv_2(x) + + if not self.skip_rescale: + return x + h + else: + return (x + h) / np.sqrt(2.0) diff --git a/modules/sgmse/ncsnpp_utils/normalization.py b/modules/sgmse/ncsnpp_utils/normalization.py index 9a232043..fcc4707e 100644 --- a/modules/sgmse/ncsnpp_utils/normalization.py +++ b/modules/sgmse/ncsnpp_utils/normalization.py @@ -20,196 +20,224 @@ def get_normalization(config, conditional=False): - """Obtain normalization modules from the config file.""" - norm = config.model.normalization - if conditional: - if norm == 'InstanceNorm++': - return functools.partial(ConditionalInstanceNorm2dPlus, num_classes=config.model.num_classes) + """Obtain normalization modules from the config file.""" + norm = config.model.normalization + if conditional: + if norm == "InstanceNorm++": + return functools.partial( + ConditionalInstanceNorm2dPlus, num_classes=config.model.num_classes + ) + else: + raise NotImplementedError(f"{norm} not implemented yet.") else: - raise NotImplementedError(f'{norm} not implemented yet.') - else: - if norm == 'InstanceNorm': - return nn.InstanceNorm2d - elif norm == 'InstanceNorm++': - return InstanceNorm2dPlus - elif norm == 'VarianceNorm': - return VarianceNorm2d - elif norm == 'GroupNorm': - return nn.GroupNorm - else: - raise ValueError('Unknown normalization: %s' % norm) + if norm == "InstanceNorm": + return nn.InstanceNorm2d + elif norm == "InstanceNorm++": + return InstanceNorm2dPlus + elif norm == "VarianceNorm": + return VarianceNorm2d + elif norm == "GroupNorm": + return nn.GroupNorm + else: + raise ValueError("Unknown normalization: %s" % norm) class ConditionalBatchNorm2d(nn.Module): - def __init__(self, num_features, num_classes, bias=True): - super().__init__() - self.num_features = num_features - self.bias = bias - self.bn = nn.BatchNorm2d(num_features, affine=False) - if self.bias: - self.embed = nn.Embedding(num_classes, num_features * 2) - self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02) - self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 - else: - self.embed = nn.Embedding(num_classes, num_features) - self.embed.weight.data.uniform_() - - def forward(self, x, y): - out = self.bn(x) - if self.bias: - gamma, beta = self.embed(y).chunk(2, dim=1) - out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view(-1, self.num_features, 1, 1) - else: - gamma = self.embed(y) - out = gamma.view(-1, self.num_features, 1, 1) * out - return out + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.bn = nn.BatchNorm2d(num_features, affine=False) + if self.bias: + self.embed = nn.Embedding(num_classes, num_features * 2) + self.embed.weight.data[ + :, :num_features + ].uniform_() # Initialise scale at N(1, 0.02) + self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.uniform_() + + def forward(self, x, y): + out = self.bn(x) + if self.bias: + gamma, beta = self.embed(y).chunk(2, dim=1) + out = gamma.view(-1, self.num_features, 1, 1) * out + beta.view( + -1, self.num_features, 1, 1 + ) + else: + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * out + return out class ConditionalInstanceNorm2d(nn.Module): - def __init__(self, num_features, num_classes, bias=True): - super().__init__() - self.num_features = num_features - self.bias = bias - self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False) - if bias: - self.embed = nn.Embedding(num_classes, num_features * 2) - self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02) - self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 - else: - self.embed = nn.Embedding(num_classes, num_features) - self.embed.weight.data.uniform_() - - def forward(self, x, y): - h = self.instance_norm(x) - if self.bias: - gamma, beta = self.embed(y).chunk(2, dim=-1) - out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1) - else: - gamma = self.embed(y) - out = gamma.view(-1, self.num_features, 1, 1) * h - return out + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.instance_norm = nn.InstanceNorm2d( + num_features, affine=False, track_running_stats=False + ) + if bias: + self.embed = nn.Embedding(num_classes, num_features * 2) + self.embed.weight.data[ + :, :num_features + ].uniform_() # Initialise scale at N(1, 0.02) + self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.uniform_() + + def forward(self, x, y): + h = self.instance_norm(x) + if self.bias: + gamma, beta = self.embed(y).chunk(2, dim=-1) + out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view( + -1, self.num_features, 1, 1 + ) + else: + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * h + return out class ConditionalVarianceNorm2d(nn.Module): - def __init__(self, num_features, num_classes, bias=False): - super().__init__() - self.num_features = num_features - self.bias = bias - self.embed = nn.Embedding(num_classes, num_features) - self.embed.weight.data.normal_(1, 0.02) + def __init__(self, num_features, num_classes, bias=False): + super().__init__() + self.num_features = num_features + self.bias = bias + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.normal_(1, 0.02) - def forward(self, x, y): - vars = torch.var(x, dim=(2, 3), keepdim=True) - h = x / torch.sqrt(vars + 1e-5) + def forward(self, x, y): + vars = torch.var(x, dim=(2, 3), keepdim=True) + h = x / torch.sqrt(vars + 1e-5) - gamma = self.embed(y) - out = gamma.view(-1, self.num_features, 1, 1) * h - return out + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * h + return out class VarianceNorm2d(nn.Module): - def __init__(self, num_features, bias=False): - super().__init__() - self.num_features = num_features - self.bias = bias - self.alpha = nn.Parameter(torch.zeros(num_features)) - self.alpha.data.normal_(1, 0.02) + def __init__(self, num_features, bias=False): + super().__init__() + self.num_features = num_features + self.bias = bias + self.alpha = nn.Parameter(torch.zeros(num_features)) + self.alpha.data.normal_(1, 0.02) - def forward(self, x): - vars = torch.var(x, dim=(2, 3), keepdim=True) - h = x / torch.sqrt(vars + 1e-5) + def forward(self, x): + vars = torch.var(x, dim=(2, 3), keepdim=True) + h = x / torch.sqrt(vars + 1e-5) - out = self.alpha.view(-1, self.num_features, 1, 1) * h - return out + out = self.alpha.view(-1, self.num_features, 1, 1) * h + return out class ConditionalNoneNorm2d(nn.Module): - def __init__(self, num_features, num_classes, bias=True): - super().__init__() - self.num_features = num_features - self.bias = bias - if bias: - self.embed = nn.Embedding(num_classes, num_features * 2) - self.embed.weight.data[:, :num_features].uniform_() # Initialise scale at N(1, 0.02) - self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 - else: - self.embed = nn.Embedding(num_classes, num_features) - self.embed.weight.data.uniform_() - - def forward(self, x, y): - if self.bias: - gamma, beta = self.embed(y).chunk(2, dim=-1) - out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view(-1, self.num_features, 1, 1) - else: - gamma = self.embed(y) - out = gamma.view(-1, self.num_features, 1, 1) * x - return out + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + if bias: + self.embed = nn.Embedding(num_classes, num_features * 2) + self.embed.weight.data[ + :, :num_features + ].uniform_() # Initialise scale at N(1, 0.02) + self.embed.weight.data[:, num_features:].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, num_features) + self.embed.weight.data.uniform_() + + def forward(self, x, y): + if self.bias: + gamma, beta = self.embed(y).chunk(2, dim=-1) + out = gamma.view(-1, self.num_features, 1, 1) * x + beta.view( + -1, self.num_features, 1, 1 + ) + else: + gamma = self.embed(y) + out = gamma.view(-1, self.num_features, 1, 1) * x + return out class NoneNorm2d(nn.Module): - def __init__(self, num_features, bias=True): - super().__init__() + def __init__(self, num_features, bias=True): + super().__init__() - def forward(self, x): - return x + def forward(self, x): + return x class InstanceNorm2dPlus(nn.Module): - def __init__(self, num_features, bias=True): - super().__init__() - self.num_features = num_features - self.bias = bias - self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False) - self.alpha = nn.Parameter(torch.zeros(num_features)) - self.gamma = nn.Parameter(torch.zeros(num_features)) - self.alpha.data.normal_(1, 0.02) - self.gamma.data.normal_(1, 0.02) - if bias: - self.beta = nn.Parameter(torch.zeros(num_features)) - - def forward(self, x): - means = torch.mean(x, dim=(2, 3)) - m = torch.mean(means, dim=-1, keepdim=True) - v = torch.var(means, dim=-1, keepdim=True) - means = (means - m) / (torch.sqrt(v + 1e-5)) - h = self.instance_norm(x) - - if self.bias: - h = h + means[..., None, None] * self.alpha[..., None, None] - out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view(-1, self.num_features, 1, 1) - else: - h = h + means[..., None, None] * self.alpha[..., None, None] - out = self.gamma.view(-1, self.num_features, 1, 1) * h - return out + def __init__(self, num_features, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.instance_norm = nn.InstanceNorm2d( + num_features, affine=False, track_running_stats=False + ) + self.alpha = nn.Parameter(torch.zeros(num_features)) + self.gamma = nn.Parameter(torch.zeros(num_features)) + self.alpha.data.normal_(1, 0.02) + self.gamma.data.normal_(1, 0.02) + if bias: + self.beta = nn.Parameter(torch.zeros(num_features)) + + def forward(self, x): + means = torch.mean(x, dim=(2, 3)) + m = torch.mean(means, dim=-1, keepdim=True) + v = torch.var(means, dim=-1, keepdim=True) + means = (means - m) / (torch.sqrt(v + 1e-5)) + h = self.instance_norm(x) + + if self.bias: + h = h + means[..., None, None] * self.alpha[..., None, None] + out = self.gamma.view(-1, self.num_features, 1, 1) * h + self.beta.view( + -1, self.num_features, 1, 1 + ) + else: + h = h + means[..., None, None] * self.alpha[..., None, None] + out = self.gamma.view(-1, self.num_features, 1, 1) * h + return out class ConditionalInstanceNorm2dPlus(nn.Module): - def __init__(self, num_features, num_classes, bias=True): - super().__init__() - self.num_features = num_features - self.bias = bias - self.instance_norm = nn.InstanceNorm2d(num_features, affine=False, track_running_stats=False) - if bias: - self.embed = nn.Embedding(num_classes, num_features * 3) - self.embed.weight.data[:, :2 * num_features].normal_(1, 0.02) # Initialise scale at N(1, 0.02) - self.embed.weight.data[:, 2 * num_features:].zero_() # Initialise bias at 0 - else: - self.embed = nn.Embedding(num_classes, 2 * num_features) - self.embed.weight.data.normal_(1, 0.02) - - def forward(self, x, y): - means = torch.mean(x, dim=(2, 3)) - m = torch.mean(means, dim=-1, keepdim=True) - v = torch.var(means, dim=-1, keepdim=True) - means = (means - m) / (torch.sqrt(v + 1e-5)) - h = self.instance_norm(x) - - if self.bias: - gamma, alpha, beta = self.embed(y).chunk(3, dim=-1) - h = h + means[..., None, None] * alpha[..., None, None] - out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view(-1, self.num_features, 1, 1) - else: - gamma, alpha = self.embed(y).chunk(2, dim=-1) - h = h + means[..., None, None] * alpha[..., None, None] - out = gamma.view(-1, self.num_features, 1, 1) * h - return out + def __init__(self, num_features, num_classes, bias=True): + super().__init__() + self.num_features = num_features + self.bias = bias + self.instance_norm = nn.InstanceNorm2d( + num_features, affine=False, track_running_stats=False + ) + if bias: + self.embed = nn.Embedding(num_classes, num_features * 3) + self.embed.weight.data[:, : 2 * num_features].normal_( + 1, 0.02 + ) # Initialise scale at N(1, 0.02) + self.embed.weight.data[ + :, 2 * num_features : + ].zero_() # Initialise bias at 0 + else: + self.embed = nn.Embedding(num_classes, 2 * num_features) + self.embed.weight.data.normal_(1, 0.02) + + def forward(self, x, y): + means = torch.mean(x, dim=(2, 3)) + m = torch.mean(means, dim=-1, keepdim=True) + v = torch.var(means, dim=-1, keepdim=True) + means = (means - m) / (torch.sqrt(v + 1e-5)) + h = self.instance_norm(x) + + if self.bias: + gamma, alpha, beta = self.embed(y).chunk(3, dim=-1) + h = h + means[..., None, None] * alpha[..., None, None] + out = gamma.view(-1, self.num_features, 1, 1) * h + beta.view( + -1, self.num_features, 1, 1 + ) + else: + gamma, alpha = self.embed(y).chunk(2, dim=-1) + h = h + means[..., None, None] * alpha[..., None, None] + out = gamma.view(-1, self.num_features, 1, 1) * h + return out diff --git a/modules/sgmse/ncsnpp_utils/op/fused_act.py b/modules/sgmse/ncsnpp_utils/op/fused_act.py index e734e2cf..9f6cd311 100644 --- a/modules/sgmse/ncsnpp_utils/op/fused_act.py +++ b/modules/sgmse/ncsnpp_utils/op/fused_act.py @@ -41,7 +41,7 @@ def forward(ctx, grad_output, out, negative_slope, scale): @staticmethod def backward(ctx, gradgrad_input, gradgrad_bias): - out, = ctx.saved_tensors + (out,) = ctx.saved_tensors gradgrad_out = fused.fused_bias_act( gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale ) @@ -62,7 +62,7 @@ def forward(ctx, input, bias, negative_slope, scale): @staticmethod def backward(ctx, grad_output): - out, = ctx.saved_tensors + (out,) = ctx.saved_tensors grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply( grad_output, out, ctx.negative_slope, ctx.scale @@ -72,7 +72,7 @@ def backward(ctx, grad_output): class FusedLeakyReLU(nn.Module): - def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5): + def __init__(self, channel, negative_slope=0.2, scale=2**0.5): super().__init__() self.bias = nn.Parameter(torch.zeros(channel)) @@ -83,7 +83,7 @@ def forward(self, input): return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale) -def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5): +def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2**0.5): if input.device.type == "cpu": rest_dim = [1] * (input.ndim - bias.ndim - 1) return ( diff --git a/modules/sgmse/ncsnpp_utils/op/upfirdn2d.py b/modules/sgmse/ncsnpp_utils/op/upfirdn2d.py index a4cf05db..e18039c9 100644 --- a/modules/sgmse/ncsnpp_utils/op/upfirdn2d.py +++ b/modules/sgmse/ncsnpp_utils/op/upfirdn2d.py @@ -61,7 +61,7 @@ def forward( @staticmethod def backward(ctx, gradgrad_input): - kernel, = ctx.saved_tensors + (kernel,) = ctx.saved_tensors gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1) diff --git a/modules/sgmse/ncsnpp_utils/up_or_down_sampling.py b/modules/sgmse/ncsnpp_utils/up_or_down_sampling.py index cf7cd443..5d59071f 100644 --- a/modules/sgmse/ncsnpp_utils/up_or_down_sampling.py +++ b/modules/sgmse/ncsnpp_utils/up_or_down_sampling.py @@ -11,138 +11,150 @@ # Function ported from StyleGAN2 -def get_weight(module, - shape, - weight_var='weight', - kernel_init=None): - """Get/create weight tensor for a convolution or fully-connected layer.""" +def get_weight(module, shape, weight_var="weight", kernel_init=None): + """Get/create weight tensor for a convolution or fully-connected layer.""" - return module.param(weight_var, kernel_init, shape) + return module.param(weight_var, kernel_init, shape) class Conv2d(nn.Module): - """Conv2d layer with optimal upsampling and downsampling (StyleGAN2).""" - - def __init__(self, in_ch, out_ch, kernel, up=False, down=False, - resample_kernel=(1, 3, 3, 1), - use_bias=True, - kernel_init=None): - super().__init__() - assert not (up and down) - assert kernel >= 1 and kernel % 2 == 1 - self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel)) - if kernel_init is not None: - self.weight.data = kernel_init(self.weight.data.shape) - if use_bias: - self.bias = nn.Parameter(torch.zeros(out_ch)) - - self.up = up - self.down = down - self.resample_kernel = resample_kernel - self.kernel = kernel - self.use_bias = use_bias - - def forward(self, x): - if self.up: - x = upsample_conv_2d(x, self.weight, k=self.resample_kernel) - elif self.down: - x = conv_downsample_2d(x, self.weight, k=self.resample_kernel) - else: - x = F.conv2d(x, self.weight, stride=1, padding=self.kernel // 2) - - if self.use_bias: - x = x + self.bias.reshape(1, -1, 1, 1) - - return x + """Conv2d layer with optimal upsampling and downsampling (StyleGAN2).""" + + def __init__( + self, + in_ch, + out_ch, + kernel, + up=False, + down=False, + resample_kernel=(1, 3, 3, 1), + use_bias=True, + kernel_init=None, + ): + super().__init__() + assert not (up and down) + assert kernel >= 1 and kernel % 2 == 1 + self.weight = nn.Parameter(torch.zeros(out_ch, in_ch, kernel, kernel)) + if kernel_init is not None: + self.weight.data = kernel_init(self.weight.data.shape) + if use_bias: + self.bias = nn.Parameter(torch.zeros(out_ch)) + + self.up = up + self.down = down + self.resample_kernel = resample_kernel + self.kernel = kernel + self.use_bias = use_bias + + def forward(self, x): + if self.up: + x = upsample_conv_2d(x, self.weight, k=self.resample_kernel) + elif self.down: + x = conv_downsample_2d(x, self.weight, k=self.resample_kernel) + else: + x = F.conv2d(x, self.weight, stride=1, padding=self.kernel // 2) + + if self.use_bias: + x = x + self.bias.reshape(1, -1, 1, 1) + + return x def naive_upsample_2d(x, factor=2): - _N, C, H, W = x.shape - x = torch.reshape(x, (-1, C, H, 1, W, 1)) - x = x.repeat(1, 1, 1, factor, 1, factor) - return torch.reshape(x, (-1, C, H * factor, W * factor)) + _N, C, H, W = x.shape + x = torch.reshape(x, (-1, C, H, 1, W, 1)) + x = x.repeat(1, 1, 1, factor, 1, factor) + return torch.reshape(x, (-1, C, H * factor, W * factor)) def naive_downsample_2d(x, factor=2): - _N, C, H, W = x.shape - x = torch.reshape(x, (-1, C, H // factor, factor, W // factor, factor)) - return torch.mean(x, dim=(3, 5)) + _N, C, H, W = x.shape + x = torch.reshape(x, (-1, C, H // factor, factor, W // factor, factor)) + return torch.mean(x, dim=(3, 5)) def upsample_conv_2d(x, w, k=None, factor=2, gain=1): - """Fused `upsample_2d()` followed by `tf.nn.conv2d()`. - - Padding is performed only once at the beginning, not between the - operations. - The fused op is considerably more efficient than performing the same - calculation - using standard TensorFlow ops. It supports gradients of arbitrary order. - Args: - x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, - C]`. - w: Weight tensor of the shape `[filterH, filterW, inChannels, - outChannels]`. Grouped convolution can be performed by `inChannels = - x.shape[0] // numGroups`. - k: FIR filter of the shape `[firH, firW]` or `[firN]` - (separable). The default is `[1] * factor`, which corresponds to - nearest-neighbor upsampling. - factor: Integer upsampling factor (default: 2). - gain: Scaling factor for signal magnitude (default: 1.0). - - Returns: - Tensor of the shape `[N, C, H * factor, W * factor]` or - `[N, H * factor, W * factor, C]`, and same datatype as `x`. - """ - - assert isinstance(factor, int) and factor >= 1 - - # Check weight shape. - assert len(w.shape) == 4 - convH = w.shape[2] - convW = w.shape[3] - inC = w.shape[1] - outC = w.shape[0] - - assert convW == convH - - # Setup filter kernel. - if k is None: - k = [1] * factor - k = _setup_kernel(k) * (gain * (factor ** 2)) - p = (k.shape[0] - factor) - (convW - 1) - - stride = (factor, factor) - - # Determine data dimensions. - stride = [1, 1, factor, factor] - output_shape = ((_shape(x, 2) - 1) * factor + convH, (_shape(x, 3) - 1) * factor + convW) - output_padding = (output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, - output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW) - assert output_padding[0] >= 0 and output_padding[1] >= 0 - num_groups = _shape(x, 1) // inC - - # Transpose weights. - w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) - w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) - w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) - - x = F.conv_transpose2d(x, w, stride=stride, output_padding=output_padding, padding=0) - ## Original TF code. - # x = tf.nn.conv2d_transpose( - # x, - # w, - # output_shape=output_shape, - # strides=stride, - # padding='VALID', - # data_format=data_format) - ## JAX equivalent - - return upfirdn2d(x, torch.tensor(k, device=x.device), - pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) + """Fused `upsample_2d()` followed by `tf.nn.conv2d()`. + + Padding is performed only once at the beginning, not between the + operations. + The fused op is considerably more efficient than performing the same + calculation + using standard TensorFlow ops. It supports gradients of arbitrary order. + Args: + x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, + C]`. + w: Weight tensor of the shape `[filterH, filterW, inChannels, + outChannels]`. Grouped convolution can be performed by `inChannels = + x.shape[0] // numGroups`. + k: FIR filter of the shape `[firH, firW]` or `[firN]` + (separable). The default is `[1] * factor`, which corresponds to + nearest-neighbor upsampling. + factor: Integer upsampling factor (default: 2). + gain: Scaling factor for signal magnitude (default: 1.0). + + Returns: + Tensor of the shape `[N, C, H * factor, W * factor]` or + `[N, H * factor, W * factor, C]`, and same datatype as `x`. + """ + + assert isinstance(factor, int) and factor >= 1 + + # Check weight shape. + assert len(w.shape) == 4 + convH = w.shape[2] + convW = w.shape[3] + inC = w.shape[1] + outC = w.shape[0] + + assert convW == convH + + # Setup filter kernel. + if k is None: + k = [1] * factor + k = _setup_kernel(k) * (gain * (factor**2)) + p = (k.shape[0] - factor) - (convW - 1) + + stride = (factor, factor) + + # Determine data dimensions. + stride = [1, 1, factor, factor] + output_shape = ( + (_shape(x, 2) - 1) * factor + convH, + (_shape(x, 3) - 1) * factor + convW, + ) + output_padding = ( + output_shape[0] - (_shape(x, 2) - 1) * stride[0] - convH, + output_shape[1] - (_shape(x, 3) - 1) * stride[1] - convW, + ) + assert output_padding[0] >= 0 and output_padding[1] >= 0 + num_groups = _shape(x, 1) // inC + + # Transpose weights. + w = torch.reshape(w, (num_groups, -1, inC, convH, convW)) + w = w[..., ::-1, ::-1].permute(0, 2, 1, 3, 4) + w = torch.reshape(w, (num_groups * inC, -1, convH, convW)) + + x = F.conv_transpose2d( + x, w, stride=stride, output_padding=output_padding, padding=0 + ) + ## Original TF code. + # x = tf.nn.conv2d_transpose( + # x, + # w, + # output_shape=output_shape, + # strides=stride, + # padding='VALID', + # data_format=data_format) + ## JAX equivalent + + return upfirdn2d( + x, torch.tensor(k, device=x.device), pad=((p + 1) // 2 + factor - 1, p // 2 + 1) + ) def conv_downsample_2d(x, w, k=None, factor=2, gain=1): - """Fused `tf.nn.conv2d()` followed by `downsample_2d()`. + """Fused `tf.nn.conv2d()` followed by `downsample_2d()`. Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same @@ -163,37 +175,36 @@ def conv_downsample_2d(x, w, k=None, factor=2, gain=1): Returns: Tensor of the shape `[N, C, H // factor, W // factor]` or `[N, H // factor, W // factor, C]`, and same datatype as `x`. - """ + """ - assert isinstance(factor, int) and factor >= 1 - _outC, _inC, convH, convW = w.shape - assert convW == convH - if k is None: - k = [1] * factor - k = _setup_kernel(k) * gain - p = (k.shape[0] - factor) + (convW - 1) - s = [factor, factor] - x = upfirdn2d(x, torch.tensor(k, device=x.device), - pad=((p + 1) // 2, p // 2)) - return F.conv2d(x, w, stride=s, padding=0) + assert isinstance(factor, int) and factor >= 1 + _outC, _inC, convH, convW = w.shape + assert convW == convH + if k is None: + k = [1] * factor + k = _setup_kernel(k) * gain + p = (k.shape[0] - factor) + (convW - 1) + s = [factor, factor] + x = upfirdn2d(x, torch.tensor(k, device=x.device), pad=((p + 1) // 2, p // 2)) + return F.conv2d(x, w, stride=s, padding=0) def _setup_kernel(k): - k = np.asarray(k, dtype=np.float32) - if k.ndim == 1: - k = np.outer(k, k) - k /= np.sum(k) - assert k.ndim == 2 - assert k.shape[0] == k.shape[1] - return k + k = np.asarray(k, dtype=np.float32) + if k.ndim == 1: + k = np.outer(k, k) + k /= np.sum(k) + assert k.ndim == 2 + assert k.shape[0] == k.shape[1] + return k def _shape(x, dim): - return x.shape[dim] + return x.shape[dim] def upsample_2d(x, k=None, factor=2, gain=1): - r"""Upsample a batch of 2D images with the given filter. + r"""Upsample a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and upsamples each image with the given filter. The filter is normalized so @@ -214,18 +225,22 @@ def upsample_2d(x, k=None, factor=2, gain=1): Returns: Tensor of the shape `[N, C, H * factor, W * factor]` - """ - assert isinstance(factor, int) and factor >= 1 - if k is None: - k = [1] * factor - k = _setup_kernel(k) * (gain * (factor ** 2)) - p = k.shape[0] - factor - return upfirdn2d(x, torch.tensor(k, device=x.device), - up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) + """ + assert isinstance(factor, int) and factor >= 1 + if k is None: + k = [1] * factor + k = _setup_kernel(k) * (gain * (factor**2)) + p = k.shape[0] - factor + return upfirdn2d( + x, + torch.tensor(k, device=x.device), + up=factor, + pad=((p + 1) // 2 + factor - 1, p // 2), + ) def downsample_2d(x, k=None, factor=2, gain=1): - r"""Downsample a batch of 2D images with the given filter. + r"""Downsample a batch of 2D images with the given filter. Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]` and downsamples each image with the given filter. The filter is normalized @@ -246,12 +261,13 @@ def downsample_2d(x, k=None, factor=2, gain=1): Returns: Tensor of the shape `[N, C, H // factor, W // factor]` - """ - - assert isinstance(factor, int) and factor >= 1 - if k is None: - k = [1] * factor - k = _setup_kernel(k) * gain - p = k.shape[0] - factor - return upfirdn2d(x, torch.tensor(k, device=x.device), - down=factor, pad=((p + 1) // 2, p // 2)) + """ + + assert isinstance(factor, int) and factor >= 1 + if k is None: + k = [1] * factor + k = _setup_kernel(k) * gain + p = k.shape[0] - factor + return upfirdn2d( + x, torch.tensor(k, device=x.device), down=factor, pad=((p + 1) // 2, p // 2) + ) diff --git a/modules/sgmse/ncsnpp_utils/utils.py b/modules/sgmse/ncsnpp_utils/utils.py index ed4c05bb..38333da6 100644 --- a/modules/sgmse/ncsnpp_utils/utils.py +++ b/modules/sgmse/ncsnpp_utils/utils.py @@ -26,164 +26,167 @@ def register_model(cls=None, *, name=None): - """A decorator for registering model classes.""" - - def _register(cls): - if name is None: - local_name = cls.__name__ + """A decorator for registering model classes.""" + + def _register(cls): + if name is None: + local_name = cls.__name__ + else: + local_name = name + if local_name in _MODELS: + raise ValueError(f"Already registered model with name: {local_name}") + _MODELS[local_name] = cls + return cls + + if cls is None: + return _register else: - local_name = name - if local_name in _MODELS: - raise ValueError(f'Already registered model with name: {local_name}') - _MODELS[local_name] = cls - return cls - - if cls is None: - return _register - else: - return _register(cls) + return _register(cls) def get_model(name): - return _MODELS[name] + return _MODELS[name] def get_sigmas(sigma_min, sigma_max, num_scales): - """Get sigmas --- the set of noise levels for SMLD from config files. - Args: - config: A ConfigDict object parsed from the config file - Returns: - sigmas: a jax numpy arrary of noise levels - """ - sigmas = np.exp( - np.linspace(np.log(sigma_max), np.log(sigma_min), num_scales)) + """Get sigmas --- the set of noise levels for SMLD from config files. + Args: + config: A ConfigDict object parsed from the config file + Returns: + sigmas: a jax numpy arrary of noise levels + """ + sigmas = np.exp(np.linspace(np.log(sigma_max), np.log(sigma_min), num_scales)) - return sigmas + return sigmas def get_ddpm_params(config): - """Get betas and alphas --- parameters used in the original DDPM paper.""" - num_diffusion_timesteps = 1000 - # parameters need to be adapted if number of time steps differs from 1000 - beta_start = config.model.beta_min / config.model.num_scales - beta_end = config.model.beta_max / config.model.num_scales - betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) - - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) - sqrt_1m_alphas_cumprod = np.sqrt(1. - alphas_cumprod) - - return { - 'betas': betas, - 'alphas': alphas, - 'alphas_cumprod': alphas_cumprod, - 'sqrt_alphas_cumprod': sqrt_alphas_cumprod, - 'sqrt_1m_alphas_cumprod': sqrt_1m_alphas_cumprod, - 'beta_min': beta_start * (num_diffusion_timesteps - 1), - 'beta_max': beta_end * (num_diffusion_timesteps - 1), - 'num_diffusion_timesteps': num_diffusion_timesteps - } + """Get betas and alphas --- parameters used in the original DDPM paper.""" + num_diffusion_timesteps = 1000 + # parameters need to be adapted if number of time steps differs from 1000 + beta_start = config.model.beta_min / config.model.num_scales + beta_end = config.model.beta_max / config.model.num_scales + betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) + + alphas = 1.0 - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + sqrt_alphas_cumprod = np.sqrt(alphas_cumprod) + sqrt_1m_alphas_cumprod = np.sqrt(1.0 - alphas_cumprod) + + return { + "betas": betas, + "alphas": alphas, + "alphas_cumprod": alphas_cumprod, + "sqrt_alphas_cumprod": sqrt_alphas_cumprod, + "sqrt_1m_alphas_cumprod": sqrt_1m_alphas_cumprod, + "beta_min": beta_start * (num_diffusion_timesteps - 1), + "beta_max": beta_end * (num_diffusion_timesteps - 1), + "num_diffusion_timesteps": num_diffusion_timesteps, + } def create_model(config): - """Create the score model.""" - model_name = config.model.name - score_model = get_model(model_name)(config) - score_model = score_model.to(config.device) - score_model = torch.nn.DataParallel(score_model) - return score_model + """Create the score model.""" + model_name = config.model.name + score_model = get_model(model_name)(config) + score_model = score_model.to(config.device) + score_model = torch.nn.DataParallel(score_model) + return score_model def get_model_fn(model, train=False): - """Create a function to give the output of the score-based model. + """Create a function to give the output of the score-based model. + + Args: + model: The score model. + train: `True` for training and `False` for evaluation. + + Returns: + A model function. + """ + + def model_fn(x, labels): + """Compute the output of the score-based model. + + Args: + x: A mini-batch of input data. + labels: A mini-batch of conditioning variables for time steps. Should be interpreted differently + for different models. - Args: - model: The score model. - train: `True` for training and `False` for evaluation. + Returns: + A tuple of (model output, new mutable states) + """ + if not train: + model.eval() + return model(x, labels) + else: + model.train() + return model(x, labels) - Returns: - A model function. - """ + return model_fn - def model_fn(x, labels): - """Compute the output of the score-based model. + +def get_score_fn(sde, model, train=False, continuous=False): + """Wraps `score_fn` so that the model output corresponds to a real time-dependent score function. Args: - x: A mini-batch of input data. - labels: A mini-batch of conditioning variables for time steps. Should be interpreted differently - for different models. + sde: An `sde_lib.SDE` object that represents the forward SDE. + model: A score model. + train: `True` for training and `False` for evaluation. + continuous: If `True`, the score-based model is expected to directly take continuous time steps. Returns: - A tuple of (model output, new mutable states) + A score function. """ - if not train: - model.eval() - return model(x, labels) - else: - model.train() - return model(x, labels) - - return model_fn + model_fn = get_model_fn(model, train=train) + + if isinstance(sde, OUVPSDE): + + def score_fn(x, t): + # Scale neural network output by standard deviation and flip sign + if continuous: + # For VP-trained models, t=0 corresponds to the lowest noise level + # The maximum value of time embedding is assumed to 999 for + # continuously-trained models. + labels = t * 999 + score = model_fn(x, labels) + std = sde.marginal_prob(torch.zeros_like(x), t)[1] + else: + # For VP-trained models, t=0 corresponds to the lowest noise level + labels = t * (sde.N - 1) + score = model_fn(x, labels) + std = sde.sqrt_1m_alphas_cumprod.to(labels.device)[labels.long()] + + score = -score / std[:, None, None, None] + return score + + elif isinstance(sde, OUVESDE): + + def score_fn(x, t): + if continuous: + labels = sde.marginal_prob(torch.zeros_like(x), t)[1] + else: + # For VE-trained models, t=0 corresponds to the highest noise level + labels = sde.T - t + labels *= sde.N - 1 + labels = torch.round(labels).long() + + score = model_fn(x, labels) + return score + else: + raise NotImplementedError( + f"SDE class {sde.__class__.__name__} not yet supported." + ) -def get_score_fn(sde, model, train=False, continuous=False): - """Wraps `score_fn` so that the model output corresponds to a real time-dependent score function. - - Args: - sde: An `sde_lib.SDE` object that represents the forward SDE. - model: A score model. - train: `True` for training and `False` for evaluation. - continuous: If `True`, the score-based model is expected to directly take continuous time steps. - - Returns: - A score function. - """ - model_fn = get_model_fn(model, train=train) - - if isinstance(sde, OUVPSDE): - def score_fn(x, t): - # Scale neural network output by standard deviation and flip sign - if continuous: - # For VP-trained models, t=0 corresponds to the lowest noise level - # The maximum value of time embedding is assumed to 999 for - # continuously-trained models. - labels = t * 999 - score = model_fn(x, labels) - std = sde.marginal_prob(torch.zeros_like(x), t)[1] - else: - # For VP-trained models, t=0 corresponds to the lowest noise level - labels = t * (sde.N - 1) - score = model_fn(x, labels) - std = sde.sqrt_1m_alphas_cumprod.to(labels.device)[labels.long()] - - score = -score / std[:, None, None, None] - return score - - elif isinstance(sde, OUVESDE): - def score_fn(x, t): - if continuous: - labels = sde.marginal_prob(torch.zeros_like(x), t)[1] - else: - # For VE-trained models, t=0 corresponds to the highest noise level - labels = sde.T - t - labels *= sde.N - 1 - labels = torch.round(labels).long() - - score = model_fn(x, labels) - return score - - else: - raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.") - - return score_fn + return score_fn def to_flattened_numpy(x): - """Flatten a torch tensor `x` and convert it to numpy.""" - return x.detach().cpu().numpy().reshape((-1,)) + """Flatten a torch tensor `x` and convert it to numpy.""" + return x.detach().cpu().numpy().reshape((-1,)) def from_flattened_numpy(x, shape): - """Form a torch tensor with the given `shape` from a flattened numpy array `x`.""" - return torch.from_numpy(x.reshape(shape)) \ No newline at end of file + """Form a torch tensor with the given `shape` from a flattened numpy array `x`.""" + return torch.from_numpy(x.reshape(shape)) diff --git a/modules/sgmse/sampling/__init__.py b/modules/sgmse/sampling/__init__.py index 0046a1c1..3248dc62 100644 --- a/modules/sgmse/sampling/__init__.py +++ b/modules/sgmse/sampling/__init__.py @@ -8,8 +8,11 @@ __all__ = [ - 'PredictorRegistry', 'CorrectorRegistry', 'Predictor', 'Corrector', - 'get_sampler' + "PredictorRegistry", + "CorrectorRegistry", + "Predictor", + "Corrector", + "get_sampler", ] @@ -24,9 +27,18 @@ def from_flattened_numpy(x, shape): def get_pc_sampler( - predictor_name, corrector_name, sde, score_fn, y, - denoise=True, eps=3e-2, snr=0.1, corrector_steps=1, probability_flow: bool = False, - intermediate=False, **kwargs + predictor_name, + corrector_name, + sde, + score_fn, + y, + denoise=True, + eps=3e-2, + snr=0.1, + corrector_steps=1, + probability_flow: bool = False, + intermediate=False, + **kwargs ): """Create a Predictor-Corrector (PC) sampler. @@ -62,14 +74,22 @@ def pc_sampler(): x_result = xt_mean if denoise else xt ns = sde.N * (corrector.n_steps + 1) return x_result, ns - + return pc_sampler def get_ode_sampler( - sde, score_fn, y, inverse_scaler=None, - denoise=True, rtol=1e-5, atol=1e-5, - method='RK45', eps=3e-2, device='cuda', **kwargs + sde, + score_fn, + y, + inverse_scaler=None, + denoise=True, + rtol=1e-5, + atol=1e-5, + method="RK45", + eps=3e-2, + device="cuda", + **kwargs ): """Probability flow ODE sampler with the black-box ODE solver. @@ -122,11 +142,21 @@ def ode_func(t, x): # Black-box ODE solver for the probability flow ODE solution = integrate.solve_ivp( - ode_func, (sde.T, eps), to_flattened_numpy(x), - rtol=rtol, atol=atol, method=method, **kwargs + ode_func, + (sde.T, eps), + to_flattened_numpy(x), + rtol=rtol, + atol=atol, + method=method, + **kwargs ) nfe = solution.nfev - x = torch.tensor(solution.y[:, -1]).reshape(y.shape).to(device).type(torch.complex64) + x = ( + torch.tensor(solution.y[:, -1]) + .reshape(y.shape) + .to(device) + .type(torch.complex64) + ) # Denoising is equivalent to running one predictor step without adding noise if denoise: diff --git a/modules/sgmse/sampling/correctors.py b/modules/sgmse/sampling/correctors.py index e1057475..4b995f4b 100644 --- a/modules/sgmse/sampling/correctors.py +++ b/modules/sgmse/sampling/correctors.py @@ -34,7 +34,7 @@ def update_fn(self, x, t, *args): pass -@CorrectorRegistry.register(name='langevin') +@CorrectorRegistry.register(name="langevin") class LangevinCorrector(Corrector): def __init__(self, sde, score_fn, snr, n_steps): super().__init__(sde, score_fn, snr, n_steps) @@ -56,13 +56,16 @@ def update_fn(self, x, t, *args): return x, x_mean -@CorrectorRegistry.register(name='ald') +@CorrectorRegistry.register(name="ald") class AnnealedLangevinDynamics(Corrector): """The original annealed Langevin dynamics predictor in NCSN/NCSNv2.""" + def __init__(self, sde, score_fn, snr, n_steps): super().__init__(sde, score_fn, snr, n_steps) if not isinstance(sde, (sdes.OUVESDE,)): - raise NotImplementedError(f"SDE class {sde.__class__.__name__} not yet supported.") + raise NotImplementedError( + f"SDE class {sde.__class__.__name__} not yet supported." + ) self.sde = sde self.score_fn = score_fn self.snr = snr @@ -83,7 +86,7 @@ def update_fn(self, x, t, *args): return x, x_mean -@CorrectorRegistry.register(name='none') +@CorrectorRegistry.register(name="none") class NoneCorrector(Corrector): """An empty corrector that does nothing.""" diff --git a/modules/sgmse/sampling/predictors.py b/modules/sgmse/sampling/predictors.py index 963fd525..84723f18 100644 --- a/modules/sgmse/sampling/predictors.py +++ b/modules/sgmse/sampling/predictors.py @@ -35,16 +35,18 @@ def update_fn(self, x, t, *args): pass def debug_update_fn(self, x, t, *args): - raise NotImplementedError(f"Debug update function not implemented for predictor {self}.") + raise NotImplementedError( + f"Debug update function not implemented for predictor {self}." + ) -@PredictorRegistry.register('euler_maruyama') +@PredictorRegistry.register("euler_maruyama") class EulerMaruyamaPredictor(Predictor): def __init__(self, sde, score_fn, probability_flow=False): super().__init__(sde, score_fn, probability_flow=probability_flow) def update_fn(self, x, t, *args): - dt = -1. / self.rsde.N + dt = -1.0 / self.rsde.N z = torch.randn_like(x) f, g = self.rsde.sde(x, t, *args) x_mean = x + f * dt @@ -52,7 +54,7 @@ def update_fn(self, x, t, *args): return x, x_mean -@PredictorRegistry.register('reverse_diffusion') +@PredictorRegistry.register("reverse_diffusion") class ReverseDiffusionPredictor(Predictor): def __init__(self, sde, score_fn, probability_flow=False): super().__init__(sde, score_fn, probability_flow=probability_flow) @@ -65,7 +67,7 @@ def update_fn(self, x, t, *args): return x, x_mean -@PredictorRegistry.register('none') +@PredictorRegistry.register("none") class NonePredictor(Predictor): """An empty predictor that does nothing.""" diff --git a/modules/sgmse/sdes.py b/modules/sgmse/sdes.py index 2fd86a69..441311bb 100644 --- a/modules/sgmse/sdes.py +++ b/modules/sgmse/sdes.py @@ -3,6 +3,7 @@ Taken and adapted from https://github.com/yang-song/score_sde_pytorch/blob/1618ddea340f3e4a2ed7852a0694a809775cf8d0/sde_lib.py """ + import abc import warnings @@ -113,24 +114,41 @@ def T(self): def sde(self, x, t, *args): """Create the drift and diffusion functions for the reverse SDE/ODE.""" rsde_parts = self.rsde_parts(x, t, *args) - total_drift, diffusion = rsde_parts["total_drift"], rsde_parts["diffusion"] + total_drift, diffusion = ( + rsde_parts["total_drift"], + rsde_parts["diffusion"], + ) return total_drift, diffusion def rsde_parts(self, x, t, *args): sde_drift, sde_diffusion = sde_fn(x, t, *args) score = score_model(x, t, *args) - score_drift = -sde_diffusion[:, None, None, None]**2 * score * (0.5 if self.probability_flow else 1.) - diffusion = torch.zeros_like(sde_diffusion) if self.probability_flow else sde_diffusion + score_drift = ( + -sde_diffusion[:, None, None, None] ** 2 + * score + * (0.5 if self.probability_flow else 1.0) + ) + diffusion = ( + torch.zeros_like(sde_diffusion) + if self.probability_flow + else sde_diffusion + ) total_drift = sde_drift + score_drift return { - 'total_drift': total_drift, 'diffusion': diffusion, 'sde_drift': sde_drift, - 'sde_diffusion': sde_diffusion, 'score_drift': score_drift, 'score': score, + "total_drift": total_drift, + "diffusion": diffusion, + "sde_drift": sde_drift, + "sde_diffusion": sde_diffusion, + "score_drift": score_drift, + "score": score, } def discretize(self, x, t, *args): """Create discretized iteration rules for the reverse diffusion sampler.""" f, G = discretize_fn(x, t, *args) - rev_f = f - G[:, None, None, None] ** 2 * score_model(x, t, *args) * (0.5 if self.probability_flow else 1.) + rev_f = f - G[:, None, None, None] ** 2 * score_model(x, t, *args) * ( + 0.5 if self.probability_flow else 1.0 + ) rev_G = torch.zeros_like(G) if self.probability_flow else G return rev_f, rev_G @@ -145,10 +163,30 @@ def copy(self): class OUVESDE(SDE): @staticmethod def add_argparse_args(parser): - parser.add_argument("--sde-n", type=int, default=1000, help="The number of timesteps in the SDE discretization. 30 by default") - parser.add_argument("--theta", type=float, default=1.5, help="The constant stiffness of the Ornstein-Uhlenbeck process. 1.5 by default.") - parser.add_argument("--sigma-min", type=float, default=0.05, help="The minimum sigma to use. 0.05 by default.") - parser.add_argument("--sigma-max", type=float, default=0.5, help="The maximum sigma to use. 0.5 by default.") + parser.add_argument( + "--sde-n", + type=int, + default=1000, + help="The number of timesteps in the SDE discretization. 30 by default", + ) + parser.add_argument( + "--theta", + type=float, + default=1.5, + help="The constant stiffness of the Ornstein-Uhlenbeck process. 1.5 by default.", + ) + parser.add_argument( + "--sigma-min", + type=float, + default=0.05, + help="The minimum sigma to use. 0.05 by default.", + ) + parser.add_argument( + "--sigma-max", + type=float, + default=0.5, + help="The maximum sigma to use. 0.5 by default.", + ) return parser def __init__(self, theta, sigma_min, sigma_max, N=1000, **ignored_kwargs): @@ -209,8 +247,7 @@ def _std(self, t): * (torch.exp(2 * (theta + logsig) * t) - 1) * logsig ) - / - (theta + logsig) + / (theta + logsig) ) def marginal_prob(self, x0, t, y): @@ -218,7 +255,9 @@ def marginal_prob(self, x0, t, y): def prior_sampling(self, shape, y): if shape != y.shape: - warnings.warn(f"Target shape {shape} does not match shape of y {y.shape}! Ignoring target shape.") + warnings.warn( + f"Target shape {shape} does not match shape of y {y.shape}! Ignoring target shape." + ) std = self._std(torch.ones((y.shape[0],), device=y.device)) x_T = y + torch.randn_like(y) * std[:, None, None, None] return x_T @@ -232,14 +271,24 @@ class OUVPSDE(SDE): # !!! We do not utilize this SDE in our works due to observed instabilities around t=0.2. !!! @staticmethod def add_argparse_args(parser): - parser.add_argument("--sde-n", type=int, default=1000, - help="The number of timesteps in the SDE discretization. 1000 by default") - parser.add_argument("--beta-min", type=float, required=True, - help="The minimum beta to use.") - parser.add_argument("--beta-max", type=float, required=True, - help="The maximum beta to use.") - parser.add_argument("--stiffness", type=float, default=1, - help="The stiffness factor for the drift, to be multiplied by 0.5*beta(t). 1 by default.") + parser.add_argument( + "--sde-n", + type=int, + default=1000, + help="The number of timesteps in the SDE discretization. 1000 by default", + ) + parser.add_argument( + "--beta-min", type=float, required=True, help="The minimum beta to use." + ) + parser.add_argument( + "--beta-max", type=float, required=True, help="The maximum beta to use." + ) + parser.add_argument( + "--stiffness", + type=float, + default=1, + help="The stiffness factor for the drift, to be multiplied by 0.5*beta(t). 1 by default.", + ) return parser def __init__(self, beta_min, beta_max, stiffness=1, N=1000, **ignored_kwargs): @@ -286,19 +335,23 @@ def sde(self, x, t, y): def _mean(self, x0, t, y): b0, b1, s = self.beta_min, self.beta_max, self.stiffness - x0y_fac = torch.exp(-0.25 * s * t * (t * (b1-b0) + 2 * b0))[:, None, None, None] + x0y_fac = torch.exp(-0.25 * s * t * (t * (b1 - b0) + 2 * b0))[ + :, None, None, None + ] return y + x0y_fac * (x0 - y) def _std(self, t): b0, b1, s = self.beta_min, self.beta_max, self.stiffness - return (1 - torch.exp(-0.5 * s * t * (t * (b1-b0) + 2 * b0))) / s + return (1 - torch.exp(-0.5 * s * t * (t * (b1 - b0) + 2 * b0))) / s def marginal_prob(self, x0, t, y): return self._mean(x0, t, y), self._std(t) def prior_sampling(self, shape, y): if shape != y.shape: - warnings.warn(f"Target shape {shape} does not match shape of y {y.shape}! Ignoring target shape.") + warnings.warn( + f"Target shape {shape} does not match shape of y {y.shape}! Ignoring target shape." + ) std = self._std(torch.ones((y.shape[0],), device=y.device)) x_T = y + torch.randn_like(y) * std[:, None, None, None] return x_T diff --git a/modules/sgmse/shared.py b/modules/sgmse/shared.py index 458be781..8165069f 100644 --- a/modules/sgmse/shared.py +++ b/modules/sgmse/shared.py @@ -27,7 +27,7 @@ def __init__(self, embed_dim, scale=16, complex_valued=False): self.W = nn.Parameter(torch.randn(embed_dim) * scale, requires_grad=False) def forward(self, t): - t_proj = t[:, None] * self.W[None, :] * 2*np.pi + t_proj = t[:, None] * self.W[None, :] * 2 * np.pi if self.complex_valued: return torch.exp(1j * t_proj) else: @@ -49,7 +49,9 @@ def __init__(self, embed_dim, complex_valued=False): self.embed_dim = embed_dim def forward(self, t): - fac = 10**(4*torch.arange(self.embed_dim, device=t.device) / (self.embed_dim-1)) + fac = 10 ** ( + 4 * torch.arange(self.embed_dim, device=t.device) / (self.embed_dim - 1) + ) inner = t[:, None] * fac[None, :] if self.complex_valued: return torch.exp(1j * inner) @@ -59,6 +61,7 @@ def forward(self, t): class ComplexLinear(nn.Module): """A potentially complex-valued linear layer. Reduces to a regular linear layer if `complex_valued=False`.""" + def __init__(self, input_dim, output_dim, complex_valued): super().__init__() self.complex_valued = complex_valued @@ -70,7 +73,9 @@ def __init__(self, input_dim, output_dim, complex_valued): def forward(self, x): if self.complex_valued: - return (self.re(x.real) - self.im(x.imag)) + 1j*(self.re(x.imag) + self.im(x.real)) + return (self.re(x.real) - self.im(x.imag)) + 1j * ( + self.re(x.imag) + self.im(x.real) + ) else: return self.lin(x) @@ -114,10 +119,14 @@ def __init__(self, module_cls, *args, **kwargs): def forward(self, x, *args, **kwargs): return torch_complex_from_reim( - self.re_module(x.real, *args, **kwargs) - self.im_module(x.imag, *args, **kwargs), - self.re_module(x.imag, *args, **kwargs) + self.im_module(x.real, *args, **kwargs), + self.re_module(x.real, *args, **kwargs) + - self.im_module(x.imag, *args, **kwargs), + self.re_module(x.imag, *args, **kwargs) + + self.im_module(x.real, *args, **kwargs), ) ComplexConv2d = functools.partial(ArgsComplexMultiplicationWrapper, nn.Conv2d) -ComplexConvTranspose2d = functools.partial(ArgsComplexMultiplicationWrapper, nn.ConvTranspose2d) +ComplexConvTranspose2d = functools.partial( + ArgsComplexMultiplicationWrapper, nn.ConvTranspose2d +) diff --git a/preprocessors/wsj0reverb.py b/preprocessors/wsj0reverb.py index 36b90f75..84b71c01 100644 --- a/preprocessors/wsj0reverb.py +++ b/preprocessors/wsj0reverb.py @@ -28,11 +28,14 @@ NB_SAMPLES_PER_ROOM = 1 CHANNELS = 1 + def obtain_clean_file(speech_list, i_sample, sample_rate=16000): speech, speech_sr = sf.read(speech_list[i_sample]) speech_basename = os.path.basename(speech_list[i_sample]) - assert speech_sr == sample_rate, f"wrong speech sampling rate here: expected {sample_rate} got {speech_sr}" - return speech.squeeze(), speech_sr, speech_basename[: -4] + assert ( + speech_sr == sample_rate + ), f"wrong speech sampling rate here: expected {sample_rate} got {speech_sr}" + return speech.squeeze(), speech_sr, speech_basename[:-4] def main(output_path, dataset_path): @@ -43,18 +46,27 @@ def main(output_path, dataset_path): save_dir = os.path.join(output_path, dataset) os.makedirs(save_dir, exist_ok=True) wsj0reverb_path = dataset_path - splits = ['valid', 'train', 'test'] + splits = ["valid", "train", "test"] dic_split = {"valid": "si_dt_05", "train": "si_tr_s", "test": "si_et_05"} - speech_lists = {split: sorted(glob(f"{os.path.join(wsj0reverb_path, dic_split[split])}/**/*.wav")) for split in splits} + speech_lists = { + split: sorted( + glob(f"{os.path.join(wsj0reverb_path, dic_split[split])}/**/*.wav") + ) + for split in splits + } for i_split, split in enumerate(splits): print("Processing split n° {}: {}...".format(i_split + 1, split)) reverberant_output_dir = os.path.join(save_dir, "audio", split, "reverb") dry_output_dir = os.path.join(save_dir, "audio", split, "anechoic") - noisy_reverberant_output_dir = os.path.join(save_dir, "audio", split, "noisy_reverb") + noisy_reverberant_output_dir = os.path.join( + save_dir, "audio", split, "noisy_reverb" + ) if split == "test": - unauralized_output_dir = os.path.join(save_dir, "audio", split, "unauralized") + unauralized_output_dir = os.path.join( + save_dir, "audio", split, "unauralized" + ) os.makedirs(reverberant_output_dir, exist_ok=True) os.makedirs(dry_output_dir, exist_ok=True) @@ -67,28 +79,54 @@ def main(output_path, dataset_path): if not i_sample % NB_SAMPLES_PER_ROOM: # Generate new room t60 = np.random.uniform(T60_RANGE[0], T60_RANGE[1]) # Draw T60 room_dim = np.array( - [np.random.uniform(DIM_RANGE[2 * n], DIM_RANGE[2 * n + 1]) for n in range(3)]) # Draw Dimensions + [ + np.random.uniform(DIM_RANGE[2 * n], DIM_RANGE[2 * n + 1]) + for n in range(3) + ] + ) # Draw Dimensions center_mic_position = np.array( - [np.random.uniform(MIN_DISTANCE_TO_WALL, room_dim[n] - MIN_DISTANCE_TO_WALL) for n in - range(3)]) # draw source position + [ + np.random.uniform( + MIN_DISTANCE_TO_WALL, room_dim[n] - MIN_DISTANCE_TO_WALL + ) + for n in range(3) + ] + ) # draw source position source_position = np.array( - [np.random.uniform(MIN_DISTANCE_TO_WALL, room_dim[n] - MIN_DISTANCE_TO_WALL) for n in - range(3)]) # draw source position - mic_array_2d = pra.beamforming.circular_2D_array(center_mic_position[: -1], CHANNELS, phi0=0, - radius=MIC_ARRAY_RADIUS) # Compute microphone array - mic_array = np.pad(mic_array_2d, ((0, 1), (0, 0)), mode="constant", - constant_values=center_mic_position[-1]) + [ + np.random.uniform( + MIN_DISTANCE_TO_WALL, room_dim[n] - MIN_DISTANCE_TO_WALL + ) + for n in range(3) + ] + ) # draw source position + mic_array_2d = pra.beamforming.circular_2D_array( + center_mic_position[:-1], CHANNELS, phi0=0, radius=MIC_ARRAY_RADIUS + ) # Compute microphone array + mic_array = np.pad( + mic_array_2d, + ((0, 1), (0, 0)), + mode="constant", + constant_values=center_mic_position[-1], + ) ### Reverberant Room - e_absorption, max_order = pra.inverse_sabine(t60, room_dim) # Compute absorption coeff + e_absorption, max_order = pra.inverse_sabine( + t60, room_dim + ) # Compute absorption coeff reverberant_room = pra.ShoeBox( - room_dim, fs=16000, materials=pra.Material(e_absorption), max_order=min(3, max_order) + room_dim, + fs=16000, + materials=pra.Material(e_absorption), + max_order=min(3, max_order), ) # Create room reverberant_room.set_ray_tracing() reverberant_room.add_microphone_array(mic_array) # Add microphone array # Pick unauralized files - speech, speech_sr, speech_basename = obtain_clean_file(speech_list, i_sample, sample_rate=sample_rate) + speech, speech_sr, speech_basename = obtain_clean_file( + speech_list, i_sample, sample_rate=sample_rate + ) # Generate reverberant room reverberant_room.add_source(source_position, signal=speech) @@ -99,7 +137,10 @@ def main(output_path, dataset_path): e_absorption_dry = 0.99 # For Neural Networks OK but clearly not for WPE dry_room = pra.ShoeBox( - room_dim, fs=16000, materials=pra.Material(e_absorption_dry), max_order=0 + room_dim, + fs=16000, + materials=pra.Material(e_absorption_dry), + max_order=0, ) # Create room dry_room.add_microphone_array(mic_array) # Add microphone array @@ -110,21 +151,37 @@ def main(output_path, dataset_path): t60_real_dry = np.mean(dry_room.measure_rt60()).squeeze() rir_dry = dry_room.rir dry = np.stack(dry_room.mic_array.signals).swapaxes(0, 1) - dry = np.pad(dry, ((0, int(.5 * sample_rate)), (0, 0)), mode="constant", - constant_values=0) # Add 1 second of silence after dry (because very dry) so that the reverb is not cut, and all samples have same length + dry = np.pad( + dry, + ((0, int(0.5 * sample_rate)), (0, 0)), + mode="constant", + constant_values=0, + ) # Add 1 second of silence after dry (because very dry) so that the reverb is not cut, and all samples have same length min_len_sample = min(reverberant.shape[0], dry.shape[0]) - dry = dry[: min_len_sample] - reverberant = reverberant[: min_len_sample] - output_scaling = np.max(reverberant) / .9 + dry = dry[:min_len_sample] + reverberant = reverberant[:min_len_sample] + output_scaling = np.max(reverberant) / 0.9 - drr = 10 * np.log10(np.mean(dry ** 2) / (np.mean(reverberant ** 2) + 1e-8) + 1e-8) + drr = 10 * np.log10( + np.mean(dry**2) / (np.mean(reverberant**2) + 1e-8) + 1e-8 + ) output_filename = f"{speech_basename}_{i_sample // NB_SAMPLES_PER_ROOM}_{t60_real:.2f}_{drr:.1f}.wav" - sf.write(os.path.join(dry_output_dir, output_filename), 1 / output_scaling * dry, samplerate=sample_rate) - sf.write(os.path.join(reverberant_output_dir, output_filename), 1 / output_scaling * reverberant, - samplerate=sample_rate) + sf.write( + os.path.join(dry_output_dir, output_filename), + 1 / output_scaling * dry, + samplerate=sample_rate, + ) + sf.write( + os.path.join(reverberant_output_dir, output_filename), + 1 / output_scaling * reverberant, + samplerate=sample_rate, + ) if split == "test": - sf.write(os.path.join(unauralized_output_dir, output_filename), speech, samplerate=sample_rate) - + sf.write( + os.path.join(unauralized_output_dir, output_filename), + speech, + samplerate=sample_rate, + ) From 9852b07c37f20f98230f85907edbd292f1706de8 Mon Sep 17 00:00:00 2001 From: lithr1 <1102340779@qq.com> Date: Mon, 8 Apr 2024 12:42:02 +0800 Subject: [PATCH 3/3] sgmse --- egs/sgmse/README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/egs/sgmse/README.md b/egs/sgmse/README.md index ed46552a..4765f4a1 100644 --- a/egs/sgmse/README.md +++ b/egs/sgmse/README.md @@ -6,8 +6,7 @@
- -This repository contains the official PyTorch implementations for the 2023 papers: +This repository contains the PyTorch implementations for the 2023 papers and also adapted from [sgmse](https://github.com/sp-uhh/sgmse): - Julius Richter, Simon Welker, Jean-Marie Lemercier, Bunlong Lay, Timo Gerkmann. [*"Speech Enhancement and Dereverberation with Diffusion-Based Generative Models"*](https://ieeexplore.ieee.org/abstract/document/10149431), IEEE/ACM Transactions on Audio, Speech, and Language Processing, vol. 31, pp. 2351-2364, 2023.