From bc21f21e8cb58ddad6081f90ff2ab5701ac75f03 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 21 Sep 2023 16:35:00 -0700 Subject: [PATCH 01/87] Don't export model internals publicly (#1255) Following our conventions, layers will be exposed standalone when they move from `models/xx/some_layer` -> `layers/some_layer` --- keras_nlp/models/xlnet/relative_attention.py | 2 -- keras_nlp/models/xlnet/xlnet_encoder.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/keras_nlp/models/xlnet/relative_attention.py b/keras_nlp/models/xlnet/relative_attention.py index d11dc4bd5a..a11ae3fd9d 100644 --- a/keras_nlp/models/xlnet/relative_attention.py +++ b/keras_nlp/models/xlnet/relative_attention.py @@ -15,7 +15,6 @@ import math import string -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops @@ -76,7 +75,6 @@ def _rel_shift(x, klen=-1): return x -@keras_nlp_export("keras_nlp.layers.TwoStreamRelativeAttention") class TwoStreamRelativeAttention(keras.layers.MultiHeadAttention): """Two-stream relative self-attention for XLNet. diff --git a/keras_nlp/models/xlnet/xlnet_encoder.py b/keras_nlp/models/xlnet/xlnet_encoder.py index 13f5a953ee..bb8e56e4cc 100644 --- a/keras_nlp/models/xlnet/xlnet_encoder.py +++ b/keras_nlp/models/xlnet/xlnet_encoder.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.xlnet.relative_attention import TwoStreamRelativeAttention @@ -22,7 +21,6 @@ def xlnet_kernel_initializer(stddev=0.02): return keras.initializers.TruncatedNormal(stddev=stddev) -@keras_nlp_export("keras_nlp.layers.XLNetEncoder") class XLNetEncoder(keras.layers.Layer): """ XLNet Encoder. From 2c8e915859ffcb3ba2d19e7707cce542d92c6b96 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 21 Sep 2023 16:35:08 -0700 Subject: [PATCH 02/87] Bump master branch version number to 0.7.0.dev0 (#1254) I have been forgetting to increment our master branch release number, so it's now less than our pip release :) I think we can consider the master branch a preview of our 0.7.0 release at this point. --- keras_nlp/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/__init__.py b/keras_nlp/__init__.py index de7ae1c180..759347103c 100644 --- a/keras_nlp/__init__.py +++ b/keras_nlp/__init__.py @@ -28,4 +28,4 @@ from keras_nlp import utils # This is the global source of truth for the version number. -__version__ = "0.6.0.dev0" +__version__ = "0.7.0.dev0" From 84c212229b0f600ac2d39b2643dfc3e4c52465a9 Mon Sep 17 00:00:00 2001 From: ferraric <32451307+ferraric@users.noreply.github.com> Date: Mon, 2 Oct 2023 20:32:28 +0200 Subject: [PATCH 03/87] Fix/allow different encoder and decoder feature dimensions in transformer decoder layer (#1260) * test: add failing unit test * fix: pass correct shapes --- keras_nlp/layers/modeling/transformer_decoder.py | 6 +++--- keras_nlp/layers/modeling/transformer_decoder_test.py | 9 +++++++++ 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/keras_nlp/layers/modeling/transformer_decoder.py b/keras_nlp/layers/modeling/transformer_decoder.py index 92734fca2e..3a3cda3f21 100644 --- a/keras_nlp/layers/modeling/transformer_decoder.py +++ b/keras_nlp/layers/modeling/transformer_decoder.py @@ -199,12 +199,12 @@ def build( ) if hasattr(self._cross_attention_layer, "_build_from_signature"): self._cross_attention_layer._build_from_signature( - query=encoder_sequence_shape, + query=decoder_sequence_shape, value=encoder_sequence_shape, ) else: self._cross_attention_layer.build( - query_shape=encoder_sequence_shape, + query_shape=decoder_sequence_shape, value_shape=encoder_sequence_shape, ) self._cross_attention_layer_norm = keras.layers.LayerNormalization( @@ -212,7 +212,7 @@ def build( dtype=self.dtype_policy, name="cross_attention_layer_norm", ) - self._cross_attention_layer_norm.build(encoder_sequence_shape) + self._cross_attention_layer_norm.build(decoder_sequence_shape) self._cross_attention_dropout = keras.layers.Dropout( rate=self.dropout, dtype=self.dtype_policy, diff --git a/keras_nlp/layers/modeling/transformer_decoder_test.py b/keras_nlp/layers/modeling/transformer_decoder_test.py index 12cd189e74..9a7bc9c1be 100644 --- a/keras_nlp/layers/modeling/transformer_decoder_test.py +++ b/keras_nlp/layers/modeling/transformer_decoder_test.py @@ -168,3 +168,12 @@ def call(outputs, cache): output, output_cache = call(outputs, input_cache) self.assertAllClose(output, no_loop_outputs) self.assertAllClose(output_cache, no_loop_cache) + + def test_different_feature_dimension_for_encoder_and_decoder_sequence(self): + decoder = TransformerDecoder( + intermediate_dim=4, + num_heads=2, + ) + decoder_sequence = ops.random.uniform(shape=[1, 4, 6]) + encoder_sequence = ops.random.uniform(shape=[1, 4, 5]) + decoder(decoder_sequence, encoder_sequence) From a9db112859d08eb5a57343c6b9bbf2c71a0616db Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 5 Oct 2023 11:52:16 -0700 Subject: [PATCH 04/87] Doc updates to try to clarify multi-backend support (#1259) --- README.md | 111 +++++++++++++++++++++++++++++------------------------- 1 file changed, 59 insertions(+), 52 deletions(-) diff --git a/README.md b/README.md index abf7e38813..f109464690 100644 --- a/README.md +++ b/README.md @@ -1,32 +1,25 @@ # KerasNLP: Modular NLP Workflows for Keras [![](https://github.com/keras-team/keras-nlp/workflows/Tests/badge.svg?branch=master)](https://github.com/keras-team/keras-nlp/actions?query=workflow%3ATests+branch%3Amaster) -![Python](https://img.shields.io/badge/python-v3.8.0+-success.svg) -![Tensorflow](https://img.shields.io/badge/tensorflow-v2.5.0+-success.svg) +![Python](https://img.shields.io/badge/python-v3.9.0+-success.svg) [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/keras-team/keras-nlp/issues) -KerasNLP is a natural language processing library that works natively -with TensorFlow, JAX, or PyTorch. Built on [Keras Core](https://keras.io/keras_core/announcement/), -these models, layers, metrics, callbacks, etc., can be trained and serialized -in any framework and re-used in another without costly migrations. See "Using -KerasNLP with Keras Core" below for more details on multi-framework KerasNLP. - -KerasNLP supports users through their entire development cycle. Our workflows -are built from modular components that have state-of-the-art preset weights and -architectures when used out-of-the-box and are easily customizable when more -control is needed. - -This library is an extension of the core Keras API; all high-level modules are -[`Layers`](https://keras.io/api/layers/) or -[`Models`](https://keras.io/api/models/) that receive that same level of polish -as core Keras. If you are familiar with Keras, congratulations! You already -understand most of KerasNLP. +KerasNLP is a natural language processing library that works natively +with TensorFlow, JAX, or PyTorch. Built on [multi-backend Keras](https://keras.io/keras_core/announcement/) +(Keras 3), these models, layers, metrics, and tokenizers can be trained and +serialized in any framework and re-used in another without costly migrations. + +KerasNLP supports users through their entire development cycle. Our workflows +are built from modular components that have state-of-the-art preset weights when +used out-of-the-box and are easily customizable when more control is needed. -See our [Getting Started guide](https://keras.io/guides/keras_nlp/getting_started) -for example usage of our modular API starting with evaluating pretrained models -and building up to designing a novel transformer architecture and training a -tokenizer from scratch. +This library is an extension of the core Keras API; all high-level modules are +[`Layers`](https://keras.io/api/layers/) or +[`Models`](https://keras.io/api/models/) that receive that same level of polish +as core Keras. If you are familiar with Keras, congratulations! You already +understand most of KerasNLP. -We are a new and growing project and welcome [contributions](CONTRIBUTING.md). +See our [Getting Started guide](https://keras.io/guides/keras_nlp/getting_started) +to start learning our API. We welcome [contributions](CONTRIBUTING.md). ## Quick Links @@ -59,41 +52,16 @@ pip to install directly from the master branch on github: ``` pip install git+https://github.com/keras-team/keras-nlp.git --upgrade ``` -## Using KerasNLP with Keras Core - -As of version `0.6.0`, KerasNLP supports multiple backends with Keras Core out -of the box. There are two ways to configure KerasNLP to run with multi-backend -support: - -1. Via the `KERAS_BACKEND` environment variable. If set, then KerasNLP will be -using Keras Core with the backend specified (e.g., `KERAS_BACKEND=jax`). -2. Via the `.keras/keras.json` and `.keras/keras_nlp.json` config files (which -are automatically created the first time you import KerasNLP): - - Set your backend of choice in `.keras/keras.json`; e.g., `"backend": "jax"`. - - Set `"multi_backend": True` in `.keras/keras_nlp.json`. - -Once that configuration step is done, you can just import KerasNLP and start -using it on top of your backend of choice: - -```python -import keras_nlp - -gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset("gpt2_base_en") -gpt2_lm.generate("My trip to Yosemite was", max_length=200) -``` - -Until Keras Core is officially released as Keras 3.0, KerasNLP will use -`tf.keras` as the default backend. To restore this default behavior, simply -`unset KERAS_BACKEND` and ensure that `"multi_backend": False` or is unset in -`.keras/keras_nlp.json`. You will need to restart the Python runtime for changes -to take effect. ## Quickstart -Fine-tune BERT on a small sentiment analysis task using the +Fine-tune BERT on a small sentiment analysis task using the [`keras_nlp.models`](https://keras.io/api/keras_nlp/models/) API: ```python +import os +os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow", or "torch". + import keras_nlp import tensorflow_datasets as tfds @@ -107,6 +75,7 @@ imdb_train, imdb_test = tfds.load( classifier = keras_nlp.models.BertClassifier.from_preset( "bert_base_en_uncased", num_classes=2, + activation="softmax", ) # Fine-tune on IMDb movie reviews. classifier.fit(imdb_train, validation_data=imdb_test) @@ -116,6 +85,44 @@ classifier.predict(["What an amazing movie!", "A total waste of my time."]) For more in depth guides and examples, visit https://keras.io/keras_nlp/. +## Configuring your backend + +**Keras 3** is an upcoming release of the Keras library which supports +TensorFlow, Jax or Torch as backends. This is supported today in KerasNLP, +but will not be enabled by default until the official release of Keras 3. If you +`pip install keras-nlp` and run a script or notebook without changes, you will +be using TensorFlow and **Keras 2**. + +If you would like to enable a preview of the Keras 3 behavior, you can do +so by setting the `KERAS_BACKEND` environment variable. For example: + +```shell +export KERAS_BACKEND=jax +``` + +Or in Colab, with: + +```python +import os +os.environ["KERAS_BACKEND"] = "jax" + +import keras_nlp +``` + +> [!IMPORTANT] +> Make sure to set the `KERAS_BACKEND` before import any Keras libraries, it +> will be used to set up Keras when it is first imported. + +Until the Keras 3 release, KerasNLP will use a preview of Keras 3 on PyPI named +[keras-core](https://pypi.org/project/keras-core/). + +> [!IMPORTANT] +> If you set `KERAS_BACKEND` variable, you should `import keras_core as keras` +> instead of `import keras`. This is a temporary step until Keras 3 is out! + +To restore the default **Keras 2** behavior, `unset KERAS_BACKEND` before +importing Keras and KerasNLP. + ## Compatibility We follow [Semantic Versioning](https://semver.org/), and plan to From f0ab133ee68d9f9c3f49c7f6879df288e4fe9c52 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Fri, 6 Oct 2023 09:17:59 -0700 Subject: [PATCH 05/87] Remove unused TPU testing for backbones (#1266) TPU testing would be great, but we should not do it quite like this. - These are not run in any form of CI right now. Do they run at all? I don't think anyone has tried these for many months. - They basically replicate the simplest tests from the main fixture inside a TPU strategy scope. We would be much better off handling this inside some common test setup, rather than replicating test code. - They would not work multi-backend. - They do not test presets, tasks, backprop, and a lot of the actual important functionality for our pretrained model offering. Let's remove for now as I consolidate our preset testing code. We can bring this back later when we have a plan to run these in a more sustainable way for our test suite. --- keras_nlp/conftest.py | 45 +------------------ .../models/albert/albert_backbone_test.py | 31 ------------- keras_nlp/models/bart/bart_backbone_test.py | 28 ------------ keras_nlp/models/bert/bert_backbone_test.py | 27 ----------- .../deberta_v3/deberta_v3_backbone_test.py | 27 ----------- .../distil_bert/distil_bert_backbone_test.py | 26 ----------- keras_nlp/models/f_net/f_net_backbone_test.py | 26 ----------- keras_nlp/models/gpt2/gpt2_backbone_test.py | 26 ----------- .../gpt_neo_x/gpt_neo_x_backbone_test.py | 26 ----------- keras_nlp/models/opt/opt_backbone_test.py | 26 ----------- .../models/roberta/roberta_backbone_test.py | 26 ----------- keras_nlp/models/t5/t5_backbone_test.py | 27 ----------- .../models/whisper/whisper_backbone_test.py | 41 ----------------- .../xlm_roberta/xlm_roberta_backbone_test.py | 26 ----------- keras_nlp/models/xlnet/xlnet_backbone_test.py | 28 ------------ 15 files changed, 2 insertions(+), 434 deletions(-) diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py index 6bcda7ed0e..04daf5cd37 100644 --- a/keras_nlp/conftest.py +++ b/keras_nlp/conftest.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - import pytest import tensorflow as tf @@ -21,21 +19,6 @@ from keras_nlp.backend import keras -@pytest.fixture(scope="session") -def tpu_strategy(): - tpu_name = os.getenv("KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS") - resolver = tf.distribute.cluster_resolver.TPUClusterResolver.connect( - tpu=tpu_name, - ) - return tf.distribute.TPUStrategy(resolver) - - -@pytest.fixture(scope="class") -def tpu_test_class(request, tpu_strategy): - # set a class attribute on the invoking test context - request.cls.tpu_strategy = tpu_strategy - - def pytest_addoption(parser): parser.addoption( "--run_large", @@ -49,18 +32,6 @@ def pytest_addoption(parser): default=False, help="run extra_large tests", ) - parser.addoption( - "--run_tpu", - action="store_true", - default=False, - help="run tpu tests", - ) - parser.addoption( - "--mixed_precision", - action="store_true", - default=False, - help="run with mixed precision", - ) parser.addoption( "--docstring_module", action="store", @@ -70,18 +41,13 @@ def pytest_addoption(parser): def pytest_configure(config): - if config.getoption("--mixed_precision"): - keras.mixed_precision.set_global_policy("mixed_float16") - config.addinivalue_line( - "markers", "large: mark test as being slow or requiring a network" - ) config.addinivalue_line( "markers", - "extra_large: mark test as being too large to run continuously", + "large: mark test as being slow or requiring a network", ) config.addinivalue_line( "markers", - "tpu: mark test as tpu test", + "extra_large: mark test as being too large to run continuously", ) config.addinivalue_line( "markers", @@ -93,7 +59,6 @@ def pytest_collection_modifyitems(config, items): run_extra_large_tests = config.getoption("--run_extra_large") # Run large tests for --run_extra_large or --run_large. run_large_tests = config.getoption("--run_large") or run_extra_large_tests - run_tpu = config.getoption("--run_tpu") # Messages to annotate skipped tests with. skip_large = pytest.mark.skipif( @@ -104,10 +69,6 @@ def pytest_collection_modifyitems(config, items): not run_extra_large_tests, reason="need --run_extra_large option to run", ) - skip_tpu = pytest.mark.skipif( - not run_tpu, - reason="need --run_tpu option to run", - ) skip_tf_only = pytest.mark.skipif( not backend_config.backend() == "tensorflow", reason="tests only run on tf backend", @@ -117,8 +78,6 @@ def pytest_collection_modifyitems(config, items): item.add_marker(skip_large) if "extra_large" in item.keywords: item.add_marker(skip_extra_large) - if "tpu" in item.keywords: - item.add_marker(skip_tpu) if "tf_only" in item.keywords: item.add_marker(skip_tf_only) diff --git a/keras_nlp/models/albert/albert_backbone_test.py b/keras_nlp/models/albert/albert_backbone_test.py index 5f93c5dc12..7dc4bb7f7e 100644 --- a/keras_nlp/models/albert/albert_backbone_test.py +++ b/keras_nlp/models/albert/albert_backbone_test.py @@ -101,34 +101,3 @@ def test_saved_model(self): self.assertAllClose( model_output["pooled_output"], restored_output["pooled_output"] ) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class AlbertBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = AlbertBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - num_groups=1, - num_inner_repetitions=1, - embedding_dim=16, - hidden_dim=2, - intermediate_dim=2, - max_sequence_length=4, - ) - - self.input_batch = { - "token_ids": np.ones((8, 128), dtype="int32"), - "segment_ids": np.ones((8, 128), dtype="int32"), - "padding_mask": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/bart/bart_backbone_test.py b/keras_nlp/models/bart/bart_backbone_test.py index feb643a4de..129d0f824a 100644 --- a/keras_nlp/models/bart/bart_backbone_test.py +++ b/keras_nlp/models/bart/bart_backbone_test.py @@ -91,31 +91,3 @@ def test_saved_model(self): model_output["decoder_sequence_output"], restored_output["decoder_sequence_output"], ) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class BartBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = BartBackbone( - vocabulary_size=1000, - num_layers=2, - num_heads=2, - hidden_dim=64, - intermediate_dim=128, - max_sequence_length=128, - ) - self.input_batch = { - "encoder_token_ids": np.ones((8, 128), dtype="int32"), - "encoder_padding_mask": np.ones((8, 128), dtype="int32"), - "decoder_token_ids": np.ones((8, 128), dtype="int32"), - "decoder_padding_mask": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/bert/bert_backbone_test.py b/keras_nlp/models/bert/bert_backbone_test.py index 6374a6848f..3038cc4afb 100644 --- a/keras_nlp/models/bert/bert_backbone_test.py +++ b/keras_nlp/models/bert/bert_backbone_test.py @@ -85,30 +85,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class BertBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = BertBackbone( - vocabulary_size=1000, - num_layers=2, - num_heads=2, - hidden_dim=64, - intermediate_dim=128, - max_sequence_length=128, - ) - self.input_batch = { - "token_ids": np.ones((8, 128), dtype="int32"), - "segment_ids": np.ones((8, 128), dtype="int32"), - "padding_mask": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py b/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py index cd38d63f48..e37eca7f56 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py @@ -90,30 +90,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class DebertaV3BackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = DebertaV3Backbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - bucket_size=2, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/distil_bert/distil_bert_backbone_test.py b/keras_nlp/models/distil_bert/distil_bert_backbone_test.py index 897e7ffc26..cc7f765231 100644 --- a/keras_nlp/models/distil_bert/distil_bert_backbone_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_backbone_test.py @@ -83,29 +83,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class DistilBertTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = DistilBertBackbone( - vocabulary_size=1000, - num_layers=2, - num_heads=2, - hidden_dim=64, - intermediate_dim=128, - max_sequence_length=128, - ) - self.input_batch = { - "token_ids": np.ones((8, 128), dtype="int32"), - "padding_mask": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/f_net/f_net_backbone_test.py b/keras_nlp/models/f_net/f_net_backbone_test.py index 39236fa5f1..6ce243f432 100644 --- a/keras_nlp/models/f_net/f_net_backbone_test.py +++ b/keras_nlp/models/f_net/f_net_backbone_test.py @@ -81,29 +81,3 @@ def test_saved_model(self): self.assertAllClose( model_output["pooled_output"], restored_output["pooled_output"] ) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class FNetBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = FNetBackbone( - vocabulary_size=100, - num_layers=2, - hidden_dim=16, - intermediate_dim=32, - max_sequence_length=128, - num_segments=4, - ) - self.input_batch = { - "token_ids": np.ones((8, 128), dtype="int32"), - "segment_ids": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/gpt2/gpt2_backbone_test.py b/keras_nlp/models/gpt2/gpt2_backbone_test.py index 1a1cedcfe4..8fc779c634 100644 --- a/keras_nlp/models/gpt2/gpt2_backbone_test.py +++ b/keras_nlp/models/gpt2/gpt2_backbone_test.py @@ -101,29 +101,3 @@ def test_create_layout_map(self): # bridge elsewhere and must disable. See # https://github.com/keras-team/keras-nlp/issues/1001 tf.config.experimental.disable_mlir_bridge() - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class GPT2BackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.model = GPT2Backbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.model.compile() - self.model.predict(self.input_dataset) diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py index 803006a06a..c885a2682c 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py @@ -83,29 +83,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class GPTNeoXBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - GPTNeoXBackbone( - vocabulary_size=10, - num_layers=4, - num_heads=4, - hidden_dim=64, - intermediate_dim=64, - max_sequence_length=10, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.model.compile() - self.model.predict(self.input_dataset) diff --git a/keras_nlp/models/opt/opt_backbone_test.py b/keras_nlp/models/opt/opt_backbone_test.py index 012c99c8a6..1d7e54889c 100644 --- a/keras_nlp/models/opt/opt_backbone_test.py +++ b/keras_nlp/models/opt/opt_backbone_test.py @@ -101,29 +101,3 @@ def test_create_layout_map(self): # bridge elsewhere and must disable. See # https://github.com/keras-team/keras-nlp/issues/1001 tf.config.experimental.disable_mlir_bridge() - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class OPTBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = OPTBackbone( - vocabulary_size=1000, - num_layers=2, - num_heads=2, - hidden_dim=32, - intermediate_dim=128, - max_sequence_length=128, - ) - self.input_batch = { - "token_ids": np.ones((8, 128), dtype="int32"), - "padding_mask": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/roberta/roberta_backbone_test.py b/keras_nlp/models/roberta/roberta_backbone_test.py index 9a466b527f..b90847bd5f 100644 --- a/keras_nlp/models/roberta/roberta_backbone_test.py +++ b/keras_nlp/models/roberta/roberta_backbone_test.py @@ -89,29 +89,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class RobertaBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = RobertaBackbone( - vocabulary_size=1000, - num_layers=2, - num_heads=2, - hidden_dim=64, - intermediate_dim=128, - max_sequence_length=128, - ) - self.input_batch = { - "token_ids": np.ones((8, 128), dtype="int32"), - "padding_mask": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/t5/t5_backbone_test.py b/keras_nlp/models/t5/t5_backbone_test.py index ab4270d9d9..476304c566 100644 --- a/keras_nlp/models/t5/t5_backbone_test.py +++ b/keras_nlp/models/t5/t5_backbone_test.py @@ -110,30 +110,3 @@ def test_saved_model(self): restored_outputs = restored_model(self.input_batch) for key in ["encoder_sequence_output", "decoder_sequence_output"]: self.assertAllClose(outputs[key], restored_outputs[key]) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class T5BackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = T5Backbone( - vocabulary_size=4, - num_layers=2, - num_heads=2, - hidden_dim=4, - intermediate_dim=4, - ) - self.input_batch = { - "token_ids": np.ones((8, 4), dtype="int32"), - "padding_mask": np.ones((8, 4), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - outputs = self.backbone.predict(self.input_dataset) - self.assertIn("encoder_sequence_output", outputs) - self.assertIn("decoder_sequence_output", outputs) diff --git a/keras_nlp/models/whisper/whisper_backbone_test.py b/keras_nlp/models/whisper/whisper_backbone_test.py index e3266ea9d5..4f0ac0897c 100644 --- a/keras_nlp/models/whisper/whisper_backbone_test.py +++ b/keras_nlp/models/whisper/whisper_backbone_test.py @@ -117,44 +117,3 @@ def test_saved_model(self): model_output["decoder_sequence_output"], restored_output["decoder_sequence_output"], ) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class WhisperBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = WhisperBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_encoder_sequence_length=6, - max_decoder_sequence_length=6, - ) - - self.input_batch = { - "encoder_features": np.ones( - ( - 8, - self.backbone.max_encoder_sequence_length, - 80, - ), - dtype="int32", - ), - "decoder_token_ids": np.ones( - (8, self.backbone.max_decoder_sequence_length), dtype="int32" - ), - "decoder_padding_mask": np.ones( - (8, self.backbone.max_decoder_sequence_length), dtype="int32" - ), - } - - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py index 26559f990d..426cbe30e7 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py @@ -88,29 +88,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class XLMRobertaBackboneTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = XLMRobertaBackbone( - vocabulary_size=1000, - num_layers=2, - num_heads=2, - hidden_dim=64, - intermediate_dim=128, - max_sequence_length=128, - ) - self.input_batch = { - "token_ids": np.ones((8, 128), dtype="int32"), - "padding_mask": np.ones((8, 128), dtype="int32"), - } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) diff --git a/keras_nlp/models/xlnet/xlnet_backbone_test.py b/keras_nlp/models/xlnet/xlnet_backbone_test.py index f8cdc3e7be..3ce0e62c89 100644 --- a/keras_nlp/models/xlnet/xlnet_backbone_test.py +++ b/keras_nlp/models/xlnet/xlnet_backbone_test.py @@ -15,7 +15,6 @@ import os import numpy as np -import pytest import tensorflow as tf from keras_nlp.backend import keras @@ -81,30 +80,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - -@pytest.mark.tpu -@pytest.mark.usefixtures("tpu_test_class") -class XLNetTPUTest(TestCase): - def setUp(self): - with self.tpu_strategy.scope(): - self.backbone = XLNetBackbone( - vocabulary_size=1000, - num_layers=2, - num_heads=2, - hidden_dim=64, - intermediate_dim=128, - ) - self.input_batch = { - "token_ids": np.ones((2, 7), dtype=np.int32), - "padding_mask": np.ones((2, 7), dtype=np.int32), - "segment_ids": np.ones((2, 7), dtype=np.int32), - } - - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_predict(self): - self.backbone.compile() - self.backbone.predict(self.input_dataset) From 7b2f69b4256aaced7ca962d6ff2609ef9385ee67 Mon Sep 17 00:00:00 2001 From: Calvin Giles <2253836+calvingiles@users.noreply.github.com> Date: Mon, 9 Oct 2023 09:09:05 +1300 Subject: [PATCH 06/87] Make gelu a function, not a lambda so it can be loaded without safe_mode=False (#1262) * Make gelu a function, not a lambda so it can be loaded without safe_mode=False * trigger tests after CLA sign * Replace all approximate gelu usages * Replace lambda for non approximate gelu --- keras_nlp/models/albert/albert_backbone.py | 5 ++--- keras_nlp/models/albert/albert_masked_lm.py | 5 ++--- keras_nlp/models/bart/bart_backbone.py | 8 ++------ keras_nlp/models/bert/bert_backbone.py | 5 ++--- keras_nlp/models/deberta_v3/deberta_v3_backbone.py | 4 +--- keras_nlp/models/deberta_v3/deberta_v3_classifier.py | 2 +- keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py | 4 +--- keras_nlp/models/f_net/f_net_backbone.py | 5 ++--- keras_nlp/models/gpt2/gpt2_backbone.py | 5 ++--- keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py | 5 ++--- keras_nlp/models/whisper/whisper_backbone.py | 8 ++------ keras_nlp/utils/keras_utils.py | 5 +++++ 12 files changed, 24 insertions(+), 37 deletions(-) diff --git a/keras_nlp/models/albert/albert_backbone.py b/keras_nlp/models/albert/albert_backbone.py index 51da1f49a7..414bb97e87 100644 --- a/keras_nlp/models/albert/albert_backbone.py +++ b/keras_nlp/models/albert/albert_backbone.py @@ -21,6 +21,7 @@ from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder from keras_nlp.models.albert.albert_presets import backbone_presets from keras_nlp.models.backbone import Backbone +from keras_nlp.utils.keras_utils import gelu_approximate from keras_nlp.utils.python_utils import classproperty @@ -180,9 +181,7 @@ def get_group_layer(group_idx): TransformerEncoder( num_heads=num_heads, intermediate_dim=intermediate_dim, - activation=lambda x: keras.activations.gelu( - x, approximate=True - ), + activation=gelu_approximate, dropout=dropout, layer_norm_epsilon=1e-12, kernel_initializer=albert_kernel_initializer(), diff --git a/keras_nlp/models/albert/albert_masked_lm.py b/keras_nlp/models/albert/albert_masked_lm.py index 423f196c0d..e95af7c207 100644 --- a/keras_nlp/models/albert/albert_masked_lm.py +++ b/keras_nlp/models/albert/albert_masked_lm.py @@ -24,6 +24,7 @@ ) from keras_nlp.models.albert.albert_presets import backbone_presets from keras_nlp.models.task import Task +from keras_nlp.utils.keras_utils import gelu_approximate from keras_nlp.utils.python_utils import classproperty @@ -107,9 +108,7 @@ def __init__(self, backbone, preprocessor=None, **kwargs): outputs = MaskedLMHead( vocabulary_size=backbone.vocabulary_size, token_embedding=backbone.token_embedding, - intermediate_activation=lambda x: keras.activations.gelu( - x, approximate=True - ), + intermediate_activation=gelu_approximate, kernel_initializer=albert_kernel_initializer(), name="mlm_head", )(backbone_outputs["sequence_output"], inputs["mask_positions"]) diff --git a/keras_nlp/models/bart/bart_backbone.py b/keras_nlp/models/bart/bart_backbone.py index 203d3ab2d7..2679b84a9f 100644 --- a/keras_nlp/models/bart/bart_backbone.py +++ b/keras_nlp/models/bart/bart_backbone.py @@ -157,9 +157,7 @@ def __init__( x = TransformerEncoder( num_heads=num_heads, intermediate_dim=intermediate_dim, - activation=lambda x: keras.activations.gelu( - x, approximate=False - ), + activation=keras.activations.gelu, dropout=dropout, layer_norm_epsilon=1e-5, kernel_initializer=bart_kernel_initializer(), @@ -200,9 +198,7 @@ def __init__( intermediate_dim=intermediate_dim, num_heads=num_heads, dropout=dropout, - activation=lambda x: keras.activations.gelu( - x, approximate=False - ), + activation=keras.activations.gelu, layer_norm_epsilon=1e-5, kernel_initializer=bart_kernel_initializer(), name=f"transformer_decoder_layer_{i}", diff --git a/keras_nlp/models/bert/bert_backbone.py b/keras_nlp/models/bert/bert_backbone.py index 381f1f8cb4..ea3e3ad868 100644 --- a/keras_nlp/models/bert/bert_backbone.py +++ b/keras_nlp/models/bert/bert_backbone.py @@ -21,6 +21,7 @@ from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder from keras_nlp.models.backbone import Backbone from keras_nlp.models.bert.bert_presets import backbone_presets +from keras_nlp.utils.keras_utils import gelu_approximate from keras_nlp.utils.python_utils import classproperty @@ -151,9 +152,7 @@ def __init__( x = TransformerEncoder( num_heads=num_heads, intermediate_dim=intermediate_dim, - activation=lambda x: keras.activations.gelu( - x, approximate=True - ), + activation=gelu_approximate, dropout=dropout, layer_norm_epsilon=1e-12, kernel_initializer=bert_kernel_initializer(), diff --git a/keras_nlp/models/deberta_v3/deberta_v3_backbone.py b/keras_nlp/models/deberta_v3/deberta_v3_backbone.py index b963d35433..76e2cf9dd7 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_backbone.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_backbone.py @@ -153,9 +153,7 @@ def __init__( max_position_embeddings=max_sequence_length, bucket_size=bucket_size, dropout=dropout, - activation=lambda x: keras.activations.gelu( - x, approximate=False - ), + activation=keras.activations.gelu, layer_norm_epsilon=1e-7, kernel_initializer=deberta_kernel_initializer(), name=f"disentangled_attention_encoder_layer_{i}", diff --git a/keras_nlp/models/deberta_v3/deberta_v3_classifier.py b/keras_nlp/models/deberta_v3/deberta_v3_classifier.py index d477ab83a4..b03122064d 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_classifier.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_classifier.py @@ -170,7 +170,7 @@ def __init__( x = keras.layers.Dropout(dropout, name="pooled_dropout")(x) x = keras.layers.Dense( hidden_dim, - activation=lambda x: keras.activations.gelu(x, approximate=False), + activation=keras.activations.gelu, name="pooled_dense", )(x) x = keras.layers.Dropout(backbone.dropout, name="classifier_dropout")(x) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py index 91fdfbda5a..bf6a850a54 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm.py @@ -114,9 +114,7 @@ def __init__( outputs = MaskedLMHead( vocabulary_size=backbone.vocabulary_size, token_embedding=backbone.token_embedding, - intermediate_activation=lambda x: keras.activations.gelu( - x, approximate=False - ), + intermediate_activation=keras.activations.gelu, kernel_initializer=deberta_kernel_initializer(), name="mlm_head", )(backbone_outputs, inputs["mask_positions"]) diff --git a/keras_nlp/models/f_net/f_net_backbone.py b/keras_nlp/models/f_net/f_net_backbone.py index a2e6e8ce95..ac4d290b02 100644 --- a/keras_nlp/models/f_net/f_net_backbone.py +++ b/keras_nlp/models/f_net/f_net_backbone.py @@ -21,6 +21,7 @@ from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.models.backbone import Backbone from keras_nlp.models.f_net.f_net_presets import backbone_presets +from keras_nlp.utils.keras_utils import gelu_approximate from keras_nlp.utils.python_utils import classproperty @@ -156,9 +157,7 @@ def __init__( for i in range(num_layers): x = FNetEncoder( intermediate_dim=intermediate_dim, - activation=lambda x: keras.activations.gelu( - x, approximate=True - ), + activation=gelu_approximate, dropout=dropout, layer_norm_epsilon=1e-12, kernel_initializer=f_net_kernel_initializer(), diff --git a/keras_nlp/models/gpt2/gpt2_backbone.py b/keras_nlp/models/gpt2/gpt2_backbone.py index cb734e3db7..caf73c0606 100644 --- a/keras_nlp/models/gpt2/gpt2_backbone.py +++ b/keras_nlp/models/gpt2/gpt2_backbone.py @@ -25,6 +25,7 @@ from keras_nlp.layers.modeling.transformer_decoder import TransformerDecoder from keras_nlp.models.backbone import Backbone from keras_nlp.models.gpt2.gpt2_presets import backbone_presets +from keras_nlp.utils.keras_utils import gelu_approximate from keras_nlp.utils.python_utils import classproperty @@ -139,9 +140,7 @@ def __init__( num_heads=num_heads, dropout=dropout, layer_norm_epsilon=1e-05, - activation=lambda x: keras.activations.gelu( - x, approximate=True - ), + activation=gelu_approximate, kernel_initializer=_gpt_2_kernel_initializer(stddev=0.02), normalize_first=True, name=f"transformer_layer_{i}", diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py index 62dba1dd2d..6804331aed 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py @@ -17,6 +17,7 @@ from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.models.backbone import Backbone from keras_nlp.models.gpt_neo_x.gpt_neo_x_decoder import GPTNeoXDecoder +from keras_nlp.utils.keras_utils import gelu_approximate def _gpt_neo_x_kernel_initializer(stddev=0.02): @@ -106,9 +107,7 @@ def __init__( rotary_percentage=rotary_percentage, rotary_max_wavelength=rotary_max_wavelength, layer_norm_epsilon=layer_norm_epsilon, - activation=lambda x: keras.activations.gelu( - x, approximate=True - ), + activation=gelu_approximate, kernel_initializer=_gpt_neo_x_kernel_initializer(stddev=0.02), name=f"transformer_layer_{i}", )(x, decoder_padding_mask=padding_mask) diff --git a/keras_nlp/models/whisper/whisper_backbone.py b/keras_nlp/models/whisper/whisper_backbone.py index 87c94ae7fd..32cfab215b 100644 --- a/keras_nlp/models/whisper/whisper_backbone.py +++ b/keras_nlp/models/whisper/whisper_backbone.py @@ -187,9 +187,7 @@ def __init__( x = WhisperEncoder( num_heads=num_heads, intermediate_dim=intermediate_dim, - activation=lambda x: keras.activations.gelu( - x, approximate=False - ), + activation=keras.activations.gelu, layer_norm_epsilon=1e-5, dropout=dropout, kernel_initializer=whisper_kernel_initializer(), @@ -229,9 +227,7 @@ def __init__( intermediate_dim=intermediate_dim, num_heads=num_heads, dropout=dropout, - activation=lambda x: keras.activations.gelu( - x, approximate=False - ), + activation=keras.activations.gelu, layer_norm_epsilon=1e-5, kernel_initializer=whisper_kernel_initializer(), normalize_first=True, diff --git a/keras_nlp/utils/keras_utils.py b/keras_nlp/utils/keras_utils.py index 6e4d43193c..96750754a2 100644 --- a/keras_nlp/utils/keras_utils.py +++ b/keras_nlp/utils/keras_utils.py @@ -155,3 +155,8 @@ def print_row(fields, positions, print_fn, nested_level=0): line += " " * (positions[col] - len(line)) line += "|" * nested_level print_fn(line) + + +@keras.saving.register_keras_serializable(package="keras_nlp") +def gelu_approximate(x): + return keras.activations.gelu(x, approximate=True) From a4db0d153c9472a5e1e570c832886a7e52e12550 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:08:33 -0700 Subject: [PATCH 07/87] Update requirements and install instructions for multi-backend keras (#1257) * Update requirements and install instructions for multi-backend keras * Address comments * less strict bounds * Update to released tf-text * Match keras versions --- .github/workflows/actions.yml | 3 -- CONTRIBUTING.md | 93 ++++++++++++++------------------ requirements-common.txt | 3 ++ requirements-jax-cuda.txt | 14 +++++ requirements-macos-m1.txt | 16 ------ requirements-nightly.txt | 7 --- requirements-tensorflow-cuda.txt | 13 +++++ requirements-torch-cuda.txt | 13 +++++ requirements.txt | 16 ++++-- 9 files changed, 93 insertions(+), 85 deletions(-) create mode 100644 requirements-jax-cuda.txt delete mode 100644 requirements-macos-m1.txt delete mode 100644 requirements-nightly.txt create mode 100644 requirements-tensorflow-cuda.txt create mode 100644 requirements-torch-cuda.txt diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 926d6795a0..7d4a7d4ed0 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -30,7 +30,6 @@ jobs: - name: Install dependencies run: | pip install -r requirements.txt --progress-bar off - pip install jax[cpu] --progress-bar off pip install --no-deps -e "." --progress-bar off - name: Test with pytest run: | @@ -66,8 +65,6 @@ jobs: - name: Install dependencies run: | pip install -r requirements.txt --progress-bar off - pip install torch>=2.0.1+cpu --progress-bar off - pip install jax[cpu] --progress-bar off pip install --no-deps -e "." --progress-bar off - name: Test with pytest env: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d5af724c00..688c6311e2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -81,7 +81,7 @@ Once the pull request is approved, a team member will take care of merging. ## Setting up an Environment -Python 3.7 or later is required. +Python 3.9 or later is required. Setting up your KerasNLP development environment requires you to fork the KerasNLP repository and clone it locally. With the @@ -93,72 +93,57 @@ cd keras-nlp ``` Next we must setup a python environment with the correct dependencies. We -recommend using `conda` to install tensorflow dependencies (such as CUDA), and -`pip` to install python packages from PyPI. The exact method will depend on your -OS. +recommend using `conda` to set up a base environment, and `pip` to install +python packages from PyPI. The exact method will depend on your OS. -**Note**: Please be careful not to use the `tensorflow` pre-packaged with conda, -which is incompatible with `tensorflow-text` on PyPi, and follow the -instructions below. +**Note**: Be careful not to use mix pre-packaged tensorflow and jax libraries in +`conda` with PyPI packages from `pip`. We recommend pulling *all* KerasNLP +dependencies via `pip` as described below. ### Linux (recommended) -To setup a complete environment with TensorFlow, a local install of keras-nlp, -and all development tools, run the following or adapt it to suit your needs. +For developing and unit testing the library, a CPU-only environment is often +sufficient. For any training or inference with the library, you will quickly +want accelerator support. The easiest way to get GPU support across all of our +backends is to set up a few different python environements and pull in all cuda +dependencies via `pip`. + +The shell snippet below will install four conda environments: `keras-nlp-cpu`, +`keras-nlp-jax`, `keras-nlp-torch`, and `keras-nlp-tensorflow`. The cpu +environement supports all backends without cuda, and each backend environement +has cuda support. ```shell -# Create and activate conda environment. -conda create -n keras-nlp python=3.9 -conda activate keras-nlp - -# The following can be omitted if GPU support is not required. -conda install -c conda-forge cudatoolkit-dev=11.2 cudnn=8.1.0 -mkdir -p $CONDA_PREFIX/etc/conda/activate.d/ -echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/' >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh -echo 'export XLA_FLAGS=--xla_gpu_cuda_data_dir=$CONDA_PREFIX/' >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh -source $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh - -# Install dependencies. -python -m pip install --upgrade pip -python -m pip install -r requirements.txt -python -m pip install -e "." +conda create -y -n keras-nlp-cpu python=3.10 +conda activate keras-nlp-cpu +pip install -r requirements.txt # install deps +python pip_build.py --install # install keras-nlp + +for backend in "jax" "torch" "tensorflow"; do + conda create -y -n keras-nlp-${backend} python=3.10 + conda activate keras-nlp-${backend} + pip install -r requirements-${backend}-cuda.txt # install deps + python pip_build.py --install # install keras-nlp +done ``` -### MacOS - -⚠️⚠️⚠️ MacOS binaries are for the M1 architecture are not currently available from -official sources. You can try experimental development workflow leveraging the -[tensorflow metal plugin](https://developer.apple.com/metal/tensorflow-plugin/) -and a [community maintained build](https://github.com/sun1638650145/Libraries-and-Extensions-for-TensorFlow-for-Apple-Silicon) -of `tensorflow-text`. These binaries are not provided by Google, so proceed at -your own risk. - -#### Experimental instructions for Arm (M1) +To activate the jax environment and set keras to use jax, run: ```shell -# Create and activate conda environment. -conda create -n keras-nlp python=3.9 -conda activate keras-nlp - -# Install dependencies. -conda install -c apple tensorflow-deps=2.9 -python -m pip install --upgrade pip -python -m pip install -r requirements-macos-m1.txt -python -m pip install -e "." +conda activate keras-nlp-jax && export KERAS_BACKEND=jax ``` -#### Instructions for x86 (Intel) +### MacOS -```shell -# Create and activate conda environment. -conda create -n keras-nlp python=3.9 -conda activate keras-nlp - -# Install dependencies. -python -m pip install --upgrade pip -python -m pip install -r requirements.txt -python -m pip install -e "." -``` +`tensorflow-text` does not release precompiled binaries for MacOS M-series +chips, though the library does support building from source on MacOS. + +We strongly recommend a Linux development environment for an easy contribution +experience. To build a dev environement from scratch on MacOS, see the following +guides: + +https://developer.apple.com/metal/tensorflow-plugin/ +https://github.com/tensorflow/text ### Windows diff --git a/requirements-common.txt b/requirements-common.txt index 21334a40c2..44661e315a 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -16,3 +16,6 @@ namex # Optional deps. rouge-score sentencepiece +tensorflow-datasets +# Breakage fix. +ml-dtypes==0.2.0 diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt new file mode 100644 index 0000000000..bb115b14f8 --- /dev/null +++ b/requirements-jax-cuda.txt @@ -0,0 +1,14 @@ +# Tensorflow cpu-only version. +tensorflow>=2.14.0 +tensorflow-text>=2.14.0 + +# Torch cpu-only version. +--extra-index-url https://download.pytorch.org/whl/cpu +torch>=2.1.0 +torchvision>=0.16.0 + +# Jax with cuda support. +--find-links https://storage.googleapis.com/jax-releases/jax_cuda_releases.html +jax[cuda12_pip] + +-r requirements-common.txt diff --git a/requirements-macos-m1.txt b/requirements-macos-m1.txt deleted file mode 100644 index 05dd07e604..0000000000 --- a/requirements-macos-m1.txt +++ /dev/null @@ -1,16 +0,0 @@ -# WARNING: KerasNLP has no official support for MacOS M1 at this time. The -# following will pull required depenencies from the following external sources. -# - https://developer.apple.com/metal/tensorflow-plugin/ -# - https://github.com/sun1638650145/Libraries-and-Extensions-for-TensorFlow-for-Apple-Silicon/ -# These are not provided by Google, please review both of these dependencies -# before proceeding. - -# Core deps. -tensorflow-macos~=2.9 -https://github.com/sun1638650145/Libraries-and-Extensions-for-TensorFlow-for-Apple-Silicon/releases/download/v2.9/tensorflow_text-2.9.0-cp39-cp39-macosx_11_0_arm64.whl -tensorflow-datasets -# The metal plugin breaks many tests, so is not enabled by default. -# tensorflow-metal~=0.5 - -# Common deps. --r requirements-common.txt diff --git a/requirements-nightly.txt b/requirements-nightly.txt deleted file mode 100644 index 22a0c6e55a..0000000000 --- a/requirements-nightly.txt +++ /dev/null @@ -1,7 +0,0 @@ -# Core deps. -tf-nightly -tensorflow-text-nightly -tfds-nightly - -# Common deps. --r requirements-common.txt diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt new file mode 100644 index 0000000000..4b2cf167ea --- /dev/null +++ b/requirements-tensorflow-cuda.txt @@ -0,0 +1,13 @@ +# Tensorflow with cuda support. +tensorflow[and-cuda]>=2.14.0 +tensorflow-text>=2.14.0 + +# Torch cpu-only version. +--extra-index-url https://download.pytorch.org/whl/cpu +torch>=2.1.0 +torchvision>=0.16.0 + +# Jax cpu-only version. +jax[cpu] + +-r requirements-common.txt diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt new file mode 100644 index 0000000000..14e94dd862 --- /dev/null +++ b/requirements-torch-cuda.txt @@ -0,0 +1,13 @@ +# Tensorflow cpu-only version. +tensorflow>=2.14.0 +tensorflow-text>=2.14.0 + +# Torch with cuda support. +--extra-index-url https://download.pytorch.org/whl/cu118 +torch==2.1.0 +torchvision==0.16.0 + +# Jax cpu-only version. +jax[cpu] + +-r requirements-common.txt diff --git a/requirements.txt b/requirements.txt index 99adbbd656..aa289402fd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,13 @@ -# Core deps. -tensorflow~=2.13.0 -tensorflow-text~=2.13.0 -tensorflow-datasets +# Tensorflow. +tensorflow>=2.14.0 +tensorflow-text>=2.14.0 + +# Torch. +--extra-index-url https://download.pytorch.org/whl/cpu +torch>=2.1.0 +torchvision>=0.16.0 + +# Jax. +jax[cpu] -# Common deps. -r requirements-common.txt From f6d240bcff1fee7ec41b71ebfccb8c9691c6c796 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:44:35 -0700 Subject: [PATCH 08/87] Support Keras 3 installation (#1258) * Support Keras 3 installation Currently this is only available via github. Eventually this will be available via `pip install keras`. We need it know to support testing against the latest keras changes. As part of this changes, we will also move our off any private, unexported symbols (no more keras_core.src). * Fixes for whisper --- keras_nlp/backend/config.py | 26 ++++++++++++++--- keras_nlp/backend/keras.py | 6 ++-- keras_nlp/backend/ops.py | 24 ++++++---------- keras_nlp/backend/random.py | 8 +++--- .../cached_multi_head_attention_test.py | 7 +++-- .../layers/modeling/f_net_encoder_test.py | 4 +-- .../layers/modeling/masked_lm_head_test.py | 10 +++---- .../modeling/position_embedding_test.py | 7 +++-- .../modeling/reversible_embedding_test.py | 3 +- .../layers/modeling/rotary_embedding_test.py | 7 +++-- .../modeling/sine_position_encoding_test.py | 9 +++--- .../token_and_position_embedding_test.py | 3 +- .../modeling/transformer_decoder_test.py | 19 +++++++------ .../modeling/transformer_encoder_test.py | 9 +++--- .../modeling/transformer_layer_utils_test.py | 7 +++-- .../layers/preprocessing/random_deletion.py | 4 +-- keras_nlp/layers/preprocessing/random_swap.py | 4 +-- keras_nlp/metrics/bleu.py | 4 +-- keras_nlp/metrics/edit_distance.py | 4 +-- keras_nlp/metrics/perplexity.py | 4 +-- keras_nlp/metrics/rouge_base.py | 4 +-- .../models/albert/albert_presets_test.py | 5 ++-- keras_nlp/models/bart/bart_presets_test.py | 5 ++-- keras_nlp/models/bert/bert_presets_test.py | 5 ++-- .../deberta_v3/deberta_v3_presets_test.py | 5 ++-- .../distil_bert/distil_bert_presets_test.py | 5 ++-- keras_nlp/models/f_net/f_net_presets_test.py | 5 ++-- keras_nlp/models/gpt2/gpt2_presets_test.py | 3 +- keras_nlp/models/opt/opt_presets_test.py | 3 +- .../models/roberta/roberta_presets_test.py | 7 +++-- keras_nlp/models/task.py | 28 +++++++++++++------ .../whisper_audio_feature_extractor.py | 11 ++++++-- .../whisper_audio_feature_extractor_test.py | 4 +-- .../models/whisper/whisper_preprocessor.py | 4 --- .../whisper/whisper_preprocessor_test.py | 25 +++++++++-------- .../xlm_roberta/xlm_roberta_presets_test.py | 5 ++-- keras_nlp/tests/test_case.py | 6 ++-- keras_nlp/tokenizers/byte_pair_tokenizer.py | 4 +-- keras_nlp/tokenizers/byte_tokenizer.py | 4 +-- .../tokenizers/sentence_piece_tokenizer.py | 4 +-- .../tokenizers/unicode_codepoint_tokenizer.py | 4 +-- keras_nlp/tokenizers/word_piece_tokenizer.py | 4 +-- keras_nlp/utils/tensor_utils.py | 23 ++++++++------- 43 files changed, 194 insertions(+), 148 deletions(-) diff --git a/keras_nlp/backend/config.py b/keras_nlp/backend/config.py index 578e1746b7..9b85907d00 100644 --- a/keras_nlp/backend/config.py +++ b/keras_nlp/backend/config.py @@ -15,9 +15,11 @@ import json import os -import keras_core +import keras +from packaging import version _MULTI_BACKEND = False +_IS_KERAS_3 = False # Set Keras base dir path given KERAS_HOME env variable, if applicable. # Otherwise either ~/.keras or /tmp. @@ -59,16 +61,32 @@ # Except permission denied. pass -# Use keras-core if KERAS_BACKEND is set in the environment. +# If KERAS_BACKEND is set in the environment use multi-backend keras. if "KERAS_BACKEND" in os.environ and os.environ["KERAS_BACKEND"]: _MULTI_BACKEND = True +# If keras is version 3, use multi-backend keras (our only option). +_IS_KERAS_3 = version.parse(keras.__version__) >= version.parse("3.0.0") +if _IS_KERAS_3: + _MULTI_BACKEND = True + + +def keras_3(): + """Check if Keras 3 is installed.""" + return _IS_KERAS_3 + def multi_backend(): - """Check if keras_core is enabled.""" + """Check if multi-backend Keras is enabled.""" return _MULTI_BACKEND def backend(): """Check the backend framework.""" - return "tensorflow" if not multi_backend() else keras_core.config.backend() + if not multi_backend(): + return "tensorflow" + if not keras_3(): + import keras_core + + return keras_core.config.backend() + return keras.config.backend() diff --git a/keras_nlp/backend/keras.py b/keras_nlp/backend/keras.py index 85f0ebcb5b..865f62d1fc 100644 --- a/keras_nlp/backend/keras.py +++ b/keras_nlp/backend/keras.py @@ -16,9 +16,11 @@ import tensorflow as tf -from keras_nlp.backend.config import multi_backend +from keras_nlp.backend import config -if multi_backend(): +if config.keras_3(): + from keras import * # noqa: F403, F401 +elif config.multi_backend(): from keras_core import * # noqa: F403, F401 else: from tensorflow.keras import * # noqa: F403, F401 diff --git a/keras_nlp/backend/ops.py b/keras_nlp/backend/ops.py index 516834b6e5..f36a2d2d05 100644 --- a/keras_nlp/backend/ops.py +++ b/keras_nlp/backend/ops.py @@ -12,22 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. -import keras_core -import tensorflow as tf +from keras_nlp.backend import config -from keras_nlp.backend.config import multi_backend - -if multi_backend(): - from keras_core.src.ops import * # noqa: F403, F401 +if config.keras_3(): + from keras.ops import * # noqa: F403, F401 else: - from keras_core.src.backend.tensorflow import * # noqa: F403, F401 - from keras_core.src.backend.tensorflow.core import * # noqa: F403, F401 - from keras_core.src.backend.tensorflow.math import * # noqa: F403, F401 - from keras_core.src.backend.tensorflow.nn import * # noqa: F403, F401 - from keras_core.src.backend.tensorflow.numpy import * # noqa: F403, F401 - + from keras_core.ops import * # noqa: F403, F401 -if keras_core.config.backend() == "tensorflow" or not multi_backend(): +if config.backend() == "tensorflow": + import tensorflow as tf + from tensorflow.experimental import numpy as tfnp def take_along_axis(x, indices, axis=None): # TODO: move this workaround for dynamic shapes into keras-core. @@ -46,6 +40,4 @@ def take_along_axis(x, indices, axis=None): indices = tf.squeeze(indices, leftover_axes) return tf.gather(x, indices, batch_dims=axis) # Otherwise, fall back to the tfnp call. - return keras_core.src.backend.tensorflow.numpy.take_along_axis( - x, indices, axis=axis - ) + return tfnp.take_along_axis(x, indices, axis=axis) diff --git a/keras_nlp/backend/random.py b/keras_nlp/backend/random.py index 70be5910f4..c4f4e6f467 100644 --- a/keras_nlp/backend/random.py +++ b/keras_nlp/backend/random.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend.config import multi_backend +from keras_nlp.backend import config -if multi_backend(): - from keras_core.random import * # noqa: F403, F401 +if config.keras_3(): + from keras.random import * # noqa: F403, F401 else: - from keras_core.src.backend.tensorflow.random import * # noqa: F403, F401 + from keras_core.random import * # noqa: F403, F401 diff --git a/keras_nlp/layers/modeling/cached_multi_head_attention_test.py b/keras_nlp/layers/modeling/cached_multi_head_attention_test.py index fdaab606d3..8e233f102e 100644 --- a/keras_nlp/layers/modeling/cached_multi_head_attention_test.py +++ b/keras_nlp/layers/modeling/cached_multi_head_attention_test.py @@ -14,6 +14,7 @@ from keras_nlp.backend import config from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.cached_multi_head_attention import ( CachedMultiHeadAttention, ) @@ -29,8 +30,8 @@ def test_layer_behaviors(self): "key_dim": 4, }, input_data={ - "query": ops.random.uniform(shape=(2, 4, 6)), - "value": ops.random.uniform(shape=(2, 4, 6)), + "query": random.uniform(shape=(2, 4, 6)), + "value": random.uniform(shape=(2, 4, 6)), }, expected_output_shape=(2, 4, 6), expected_num_trainable_weights=8, @@ -48,7 +49,7 @@ def test_cache_call_is_correct(self): hidden_dim = num_heads * key_dim input_shape = (batch_size, seq_len, hidden_dim) - x = ops.random.uniform(shape=input_shape) + x = random.uniform(shape=input_shape) input_cache = ops.zeros((batch_size, 2, seq_len, num_heads, key_dim)) # Use a causal mask. mask = ops.tril(ops.ones((seq_len, seq_len))) diff --git a/keras_nlp/layers/modeling/f_net_encoder_test.py b/keras_nlp/layers/modeling/f_net_encoder_test.py index ffafc8a740..06d759ada0 100644 --- a/keras_nlp/layers/modeling/f_net_encoder_test.py +++ b/keras_nlp/layers/modeling/f_net_encoder_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.f_net_encoder import FNetEncoder from keras_nlp.tests.test_case import TestCase @@ -29,7 +29,7 @@ def test_layer_behaviors(self): "kernel_initializer": "HeNormal", "bias_initializer": "Zeros", }, - input_data=ops.random.uniform(shape=(2, 4, 6)), + input_data=random.uniform(shape=(2, 4, 6)), expected_output_shape=(2, 4, 6), expected_num_trainable_weights=8, expected_num_non_trainable_variables=1, diff --git a/keras_nlp/layers/modeling/masked_lm_head_test.py b/keras_nlp/layers/modeling/masked_lm_head_test.py index 9ccdcae196..703c56521d 100644 --- a/keras_nlp/layers/modeling/masked_lm_head_test.py +++ b/keras_nlp/layers/modeling/masked_lm_head_test.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.masked_lm_head import MaskedLMHead from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.tests.test_case import TestCase @@ -29,8 +29,8 @@ def test_layer_behaviors(self): "bias_initializer": "Zeros", }, input_data={ - "inputs": ops.random.uniform(shape=(4, 10, 16)), - "mask_positions": ops.random.randint( + "inputs": random.uniform(shape=(4, 10, 16)), + "mask_positions": random.randint( minval=0, maxval=10, shape=(4, 5) ), }, @@ -51,8 +51,8 @@ def test_layer_behaviors_with_embedding(self): "token_embedding": embedding, }, input_data={ - "inputs": ops.random.uniform(shape=(4, 10, 16)), - "mask_positions": ops.random.randint( + "inputs": random.uniform(shape=(4, 10, 16)), + "mask_positions": random.randint( minval=0, maxval=10, shape=(4, 5) ), }, diff --git a/keras_nlp/layers/modeling/position_embedding_test.py b/keras_nlp/layers/modeling/position_embedding_test.py index ec16f541d3..e80cef8ce2 100644 --- a/keras_nlp/layers/modeling/position_embedding_test.py +++ b/keras_nlp/layers/modeling/position_embedding_test.py @@ -16,6 +16,7 @@ from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.position_embedding import PositionEmbedding from keras_nlp.tests.test_case import TestCase @@ -34,7 +35,7 @@ def test_layer_behaviors(self): init_kwargs={ "sequence_length": 21, }, - input_data=ops.random.uniform(shape=(4, 21, 30)), + input_data=random.uniform(shape=(4, 21, 30)), expected_output_shape=(4, 21, 30), expected_num_trainable_weights=1, ) @@ -45,7 +46,7 @@ def test_layer_behaviors_4d(self): init_kwargs={ "sequence_length": 21, }, - input_data=ops.random.uniform(shape=(4, 5, 21, 30)), + input_data=random.uniform(shape=(4, 5, 21, 30)), expected_output_shape=(4, 5, 21, 30), expected_num_trainable_weights=1, ) @@ -145,7 +146,7 @@ def test_callable_initializer(self): def test_start_index(self): batch_size, seq_length, feature_size = 2, 3, 4 layer = PositionEmbedding(seq_length) - data = ops.random.uniform(shape=(batch_size, seq_length, feature_size)) + data = random.uniform(shape=(batch_size, seq_length, feature_size)) full_output = layer(data) sequential_output = ops.zeros((batch_size, seq_length, feature_size)) for i in range(seq_length): diff --git a/keras_nlp/layers/modeling/reversible_embedding_test.py b/keras_nlp/layers/modeling/reversible_embedding_test.py index ceb04578db..dcd4599c04 100644 --- a/keras_nlp/layers/modeling/reversible_embedding_test.py +++ b/keras_nlp/layers/modeling/reversible_embedding_test.py @@ -20,6 +20,7 @@ from keras_nlp.backend import config from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.tests.test_case import TestCase @@ -38,7 +39,7 @@ def test_layer_behaviors_tied(self, tie_weights): "tie_weights": tie_weights, "embeddings_initializer": "HeNormal", }, - input_data=ops.random.randint(minval=0, maxval=100, shape=(4, 10)), + input_data=random.randint(minval=0, maxval=100, shape=(4, 10)), expected_output_shape=(4, 10, 32), expected_num_trainable_weights=1 if tie_weights else 2, ) diff --git a/keras_nlp/layers/modeling/rotary_embedding_test.py b/keras_nlp/layers/modeling/rotary_embedding_test.py index 3fdac028de..cb502f4570 100644 --- a/keras_nlp/layers/modeling/rotary_embedding_test.py +++ b/keras_nlp/layers/modeling/rotary_embedding_test.py @@ -14,6 +14,7 @@ from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding from keras_nlp.tests.test_case import TestCase @@ -28,7 +29,7 @@ def test_layer_behaviors(self): "sequence_axis": 1, "feature_axis": -1, }, - input_data=ops.random.uniform(shape=(2, 4, 6)), + input_data=random.uniform(shape=(2, 4, 6)), expected_output_shape=(2, 4, 6), ) @@ -38,7 +39,7 @@ def test_layer_behaviors_4d(self): init_kwargs={ "max_wavelength": 1000, }, - input_data=ops.random.uniform(shape=(2, 8, 4, 6)), + input_data=random.uniform(shape=(2, 8, 4, 6)), expected_output_shape=(2, 8, 4, 6), ) @@ -86,7 +87,7 @@ def test_output_correct_values(self): def test_start_index(self): batch_size, seq_length, feature_size = 2, 3, 4 layer = RotaryEmbedding(seq_length) - data = ops.random.uniform(shape=(batch_size, seq_length, feature_size)) + data = random.uniform(shape=(batch_size, seq_length, feature_size)) full_output = layer(data) sequential_output = ops.zeros((batch_size, seq_length, feature_size)) for i in range(seq_length): diff --git a/keras_nlp/layers/modeling/sine_position_encoding_test.py b/keras_nlp/layers/modeling/sine_position_encoding_test.py index 22d1d9d3bf..2163d4ee6b 100644 --- a/keras_nlp/layers/modeling/sine_position_encoding_test.py +++ b/keras_nlp/layers/modeling/sine_position_encoding_test.py @@ -14,6 +14,7 @@ from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.sine_position_encoding import ( SinePositionEncoding, ) @@ -27,7 +28,7 @@ def test_layer_behaviors(self): init_kwargs={ "max_wavelength": 10000, }, - input_data=ops.random.uniform(shape=(2, 4, 6)), + input_data=random.uniform(shape=(2, 4, 6)), expected_output_shape=(2, 4, 6), ) @@ -37,7 +38,7 @@ def test_layer_behaviors_4d(self): init_kwargs={ "max_wavelength": 10000, }, - input_data=ops.random.uniform(shape=(1, 2, 4, 6)), + input_data=random.uniform(shape=(1, 2, 4, 6)), expected_output_shape=(1, 2, 4, 6), ) @@ -85,7 +86,7 @@ def test_output_correct_values(self): pos_encoding, ] ) - input = ops.random.uniform(shape=[1, 4, 6]) + input = random.uniform(shape=[1, 4, 6]) output = model(input) # comapre position encoding values for position 0 and 3 @@ -97,7 +98,7 @@ def test_output_correct_values(self): def test_start_index(self): batch_size, seq_length, feature_size = 2, 3, 4 layer = SinePositionEncoding() - data = ops.random.uniform(shape=(batch_size, seq_length, feature_size)) + data = random.uniform(shape=(batch_size, seq_length, feature_size)) full_output = layer(data) sequential_output = ops.zeros((batch_size, seq_length, feature_size)) for i in range(seq_length): diff --git a/keras_nlp/layers/modeling/token_and_position_embedding_test.py b/keras_nlp/layers/modeling/token_and_position_embedding_test.py index ceb96b114e..b0c5949a2c 100644 --- a/keras_nlp/layers/modeling/token_and_position_embedding_test.py +++ b/keras_nlp/layers/modeling/token_and_position_embedding_test.py @@ -16,6 +16,7 @@ from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.token_and_position_embedding import ( TokenAndPositionEmbedding, ) @@ -32,7 +33,7 @@ def test_layer_behaviors(self): "embedding_dim": 3, "embeddings_initializer": keras.initializers.Constant(1.0), }, - input_data=ops.random.randint(minval=0, maxval=5, shape=(2, 4)), + input_data=random.randint(minval=0, maxval=5, shape=(2, 4)), expected_output_shape=(2, 4, 3), expected_output_data=ops.ones((2, 4, 3)) * 2, expected_num_trainable_weights=2, diff --git a/keras_nlp/layers/modeling/transformer_decoder_test.py b/keras_nlp/layers/modeling/transformer_decoder_test.py index 9a7bc9c1be..f904d92511 100644 --- a/keras_nlp/layers/modeling/transformer_decoder_test.py +++ b/keras_nlp/layers/modeling/transformer_decoder_test.py @@ -15,6 +15,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.transformer_decoder import TransformerDecoder from keras_nlp.tests.test_case import TestCase @@ -36,7 +37,7 @@ def test_layer_behaviors(self, normalize_first): "kernel_initializer": "HeNormal", "bias_initializer": "Zeros", }, - input_data=ops.random.uniform(shape=(2, 4, 6)), + input_data=random.uniform(shape=(2, 4, 6)), expected_output_shape=(2, 4, 6), expected_num_trainable_weights=16, expected_num_non_trainable_variables=3, # dropout rng seeds @@ -60,8 +61,8 @@ def test_layer_behaviors_with_cross_attention(self, normalize_first): "bias_initializer": "Zeros", }, input_data={ - "decoder_sequence": ops.random.uniform(shape=(2, 4, 6)), - "encoder_sequence": ops.random.uniform(shape=(2, 4, 6)), + "decoder_sequence": random.uniform(shape=(2, 4, 6)), + "encoder_sequence": random.uniform(shape=(2, 4, 6)), }, expected_output_shape=(2, 4, 6), expected_num_trainable_weights=26, @@ -106,8 +107,8 @@ def test_mask_propagation(self): intermediate_dim=4, num_heads=2, ) - decoder_sequence = ops.random.uniform(shape=[1, 4, 6]) - encoder_sequence = ops.random.uniform(shape=[1, 4, 6]) + decoder_sequence = random.uniform(shape=[1, 4, 6]) + encoder_sequence = random.uniform(shape=[1, 4, 6]) mask = ops.array([[True, True, False, False]]) decoder_sequence._keras_mask = mask outputs = decoder(decoder_sequence, encoder_sequence) @@ -118,7 +119,7 @@ def test_mask_propagation_without_cross_attention(self): intermediate_dim=4, num_heads=2, ) - decoder_sequence = ops.random.uniform(shape=[1, 4, 6]) + decoder_sequence = random.uniform(shape=[1, 4, 6]) mask = ops.array([[True, True, False, False]]) decoder_sequence._keras_mask = mask outputs = decoder(decoder_sequence) @@ -132,7 +133,7 @@ def test_cache_call_is_correct(self): hidden_dim = num_heads * key_dim input_shape = (batch_size, seq_len, hidden_dim) - x = ops.random.uniform(shape=input_shape) + x = random.uniform(shape=input_shape) input_cache = ops.zeros((batch_size, 2, seq_len, num_heads, key_dim)) outputs = ops.zeros_like(x) @@ -174,6 +175,6 @@ def test_different_feature_dimension_for_encoder_and_decoder_sequence(self): intermediate_dim=4, num_heads=2, ) - decoder_sequence = ops.random.uniform(shape=[1, 4, 6]) - encoder_sequence = ops.random.uniform(shape=[1, 4, 5]) + decoder_sequence = random.uniform(shape=[1, 4, 6]) + encoder_sequence = random.uniform(shape=[1, 4, 5]) decoder(decoder_sequence, encoder_sequence) diff --git a/keras_nlp/layers/modeling/transformer_encoder_test.py b/keras_nlp/layers/modeling/transformer_encoder_test.py index 9fe7c3eab2..3882a0e7e2 100644 --- a/keras_nlp/layers/modeling/transformer_encoder_test.py +++ b/keras_nlp/layers/modeling/transformer_encoder_test.py @@ -16,6 +16,7 @@ from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder from keras_nlp.tests.test_case import TestCase @@ -37,7 +38,7 @@ def test_layer_behaviors(self, normalize_first): "kernel_initializer": "HeNormal", "bias_initializer": "Zeros", }, - input_data=ops.random.uniform(shape=(2, 4, 6)), + input_data=random.uniform(shape=(2, 4, 6)), expected_output_shape=(2, 4, 6), expected_num_trainable_weights=16, expected_num_non_trainable_variables=3, # dropout rng seeds @@ -59,7 +60,7 @@ def test_valid_call(self, normalize_first): encoder, ] ) - input = ops.random.uniform(shape=[2, 4, 6]) + input = random.uniform(shape=[2, 4, 6]) model(input) def test_valid_call_with_mask(self): @@ -68,7 +69,7 @@ def test_valid_call_with_mask(self): num_heads=2, ) encoder.build([2, 4, 6]) - input = ops.random.uniform(shape=[2, 4, 6]) + input = random.uniform(shape=[2, 4, 6]) mask = input[:, :, 0] < 0.5 encoder(input, mask) @@ -86,7 +87,7 @@ def test_mask_propagation(self): intermediate_dim=4, num_heads=2, ) - inputs = ops.random.uniform(shape=[1, 4, 6]) + inputs = random.uniform(shape=[1, 4, 6]) mask = ops.array([[True, True, False, False]]) inputs._keras_mask = mask outputs = encoder(inputs) diff --git a/keras_nlp/layers/modeling/transformer_layer_utils_test.py b/keras_nlp/layers/modeling/transformer_layer_utils_test.py index 7fc2013ad7..57df677e94 100644 --- a/keras_nlp/layers/modeling/transformer_layer_utils_test.py +++ b/keras_nlp/layers/modeling/transformer_layer_utils_test.py @@ -14,6 +14,7 @@ import keras_nlp.layers.modeling.transformer_layer_utils as utils from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.tests.test_case import TestCase @@ -25,7 +26,7 @@ def test_compute_causal_mask(self): def test_merge_padding_and_attention_mask(self): padding_mask = ops.array([[1, 1, 0]]) attention_mask = ops.array([[[0, 0, 1], [0, 1, 0], [1, 0, 0]]]) - inputs = ops.random.uniform(shape=[1, 3, 2]) + inputs = random.uniform(shape=[1, 3, 2]) merged_mask = utils.merge_padding_and_attention_mask( inputs, padding_mask, @@ -37,7 +38,7 @@ def test_bad_mask_shapes(self): with self.assertRaises(ValueError): padding_mask = ops.array([[[1, 1, 0], [1, 0, 0]]]) attention_mask = ops.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]]) - inputs = ops.random.uniform(shape=[1, 3, 2]) + inputs = random.uniform(shape=[1, 3, 2]) utils.merge_padding_and_attention_mask( inputs, padding_mask, @@ -47,7 +48,7 @@ def test_bad_mask_shapes(self): with self.assertRaises(ValueError): padding_mask = ops.array([[1, 1, 0]]) attention_mask = ops.array([[0, 0, 1], [1, 0, 0]]) - inputs = ops.random.uniform(shape=[1, 3, 2]) + inputs = random.uniform(shape=[1, 3, 2]) utils.merge_padding_and_attention_mask( inputs, padding_mask, diff --git a/keras_nlp/layers/preprocessing/random_deletion.py b/keras_nlp/layers/preprocessing/random_deletion.py index 41289c0054..061290ba56 100644 --- a/keras_nlp/layers/preprocessing/random_deletion.py +++ b/keras_nlp/layers/preprocessing/random_deletion.py @@ -21,7 +21,7 @@ PreprocessingLayer, ) from keras_nlp.utils.tensor_utils import convert_to_ragged_batch -from keras_nlp.utils.tensor_utils import is_integer_dtype +from keras_nlp.utils.tensor_utils import is_int_dtype from keras_nlp.utils.tensor_utils import is_string_dtype @@ -125,7 +125,7 @@ def __init__( dtype="int32", **kwargs, ): - if not is_integer_dtype(dtype) and not is_string_dtype(dtype): + if not is_int_dtype(dtype) and not is_string_dtype(dtype): raise ValueError( "Output dtype must be an integer type or a string. " f"Received: dtype={dtype}" diff --git a/keras_nlp/layers/preprocessing/random_swap.py b/keras_nlp/layers/preprocessing/random_swap.py index ad6f1c0980..27873f0fe8 100644 --- a/keras_nlp/layers/preprocessing/random_swap.py +++ b/keras_nlp/layers/preprocessing/random_swap.py @@ -21,7 +21,7 @@ PreprocessingLayer, ) from keras_nlp.utils.tensor_utils import convert_to_ragged_batch -from keras_nlp.utils.tensor_utils import is_integer_dtype +from keras_nlp.utils.tensor_utils import is_int_dtype from keras_nlp.utils.tensor_utils import is_string_dtype @@ -127,7 +127,7 @@ def __init__( dtype="int32", **kwargs, ): - if not is_integer_dtype(dtype) and not is_string_dtype(dtype): + if not is_int_dtype(dtype) and not is_string_dtype(dtype): raise ValueError( "Output dtype must be an integer type or a string. " f"Received: dtype={dtype}" diff --git a/keras_nlp/metrics/bleu.py b/keras_nlp/metrics/bleu.py index 51d224c676..750a1b704a 100644 --- a/keras_nlp/metrics/bleu.py +++ b/keras_nlp/metrics/bleu.py @@ -20,7 +20,7 @@ from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops -from keras_nlp.utils.tensor_utils import is_floating_dtype +from keras_nlp.utils.tensor_utils import is_float_dtype from keras_nlp.utils.tensor_utils import tensor_to_list REPLACE_SUBSTRINGS = [ @@ -112,7 +112,7 @@ def __init__( ): super().__init__(name=name, dtype=dtype, **kwargs) - if not is_floating_dtype(dtype): + if not is_float_dtype(dtype): raise ValueError( "`dtype` must be a floating point type. " f"Received: dtype={dtype}" diff --git a/keras_nlp/metrics/edit_distance.py b/keras_nlp/metrics/edit_distance.py index 899f7f1f2e..263ff8290b 100644 --- a/keras_nlp/metrics/edit_distance.py +++ b/keras_nlp/metrics/edit_distance.py @@ -16,7 +16,7 @@ from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras -from keras_nlp.utils.tensor_utils import is_floating_dtype +from keras_nlp.utils.tensor_utils import is_float_dtype @keras_nlp_export("keras_nlp.metrics.EditDistance") @@ -87,7 +87,7 @@ def __init__( ): super().__init__(name=name, dtype=dtype, **kwargs) - if not is_floating_dtype(dtype): + if not is_float_dtype(dtype): raise ValueError( "`dtype` must be a floating point type. " f"Received: dtype={dtype}" diff --git a/keras_nlp/metrics/perplexity.py b/keras_nlp/metrics/perplexity.py index eb742fc31c..4a7e626bc9 100644 --- a/keras_nlp/metrics/perplexity.py +++ b/keras_nlp/metrics/perplexity.py @@ -15,7 +15,7 @@ from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops -from keras_nlp.utils.tensor_utils import is_floating_dtype +from keras_nlp.utils.tensor_utils import is_float_dtype @keras_nlp_export("keras_nlp.metrics.Perplexity") @@ -88,7 +88,7 @@ def __init__( name="perplexity", **kwargs, ): - if not is_floating_dtype(dtype): + if not is_float_dtype(dtype): raise ValueError( "`dtype` must be a floating point type. " f"Received: dtype={dtype}" diff --git a/keras_nlp/metrics/rouge_base.py b/keras_nlp/metrics/rouge_base.py index f84e718080..824a6c4b5b 100644 --- a/keras_nlp/metrics/rouge_base.py +++ b/keras_nlp/metrics/rouge_base.py @@ -16,7 +16,7 @@ from keras_nlp.backend import keras from keras_nlp.backend import ops -from keras_nlp.utils.tensor_utils import is_floating_dtype +from keras_nlp.utils.tensor_utils import is_float_dtype from keras_nlp.utils.tensor_utils import tensor_to_list try: @@ -65,7 +65,7 @@ def __init__( "package. Please install it with `pip install rouge-score`." ) - if not is_floating_dtype(dtype): + if not is_float_dtype(dtype): raise ValueError( "`dtype` must be a floating point type. " f"Received: dtype={dtype}" diff --git a/keras_nlp/models/albert/albert_presets_test.py b/keras_nlp/models/albert/albert_presets_test.py index 86265a1abd..f7576c6729 100644 --- a/keras_nlp/models/albert/albert_presets_test.py +++ b/keras_nlp/models/albert/albert_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.albert.albert_backbone import AlbertBackbone from keras_nlp.models.albert.albert_classifier import AlbertClassifier from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor @@ -138,7 +139,7 @@ def test_load_albert(self, load_weights): preset, load_weights=load_weights ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=model.vocabulary_size ), "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), @@ -171,7 +172,7 @@ def test_load_albert_classifier_without_preprocessing(self, load_weights): load_weights=load_weights, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, diff --git a/keras_nlp/models/bart/bart_presets_test.py b/keras_nlp/models/bart/bart_presets_test.py index 89518d845b..59bf3b5584 100644 --- a/keras_nlp/models/bart/bart_presets_test.py +++ b/keras_nlp/models/bart/bart_presets_test.py @@ -15,6 +15,7 @@ # from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.tests.test_case import TestCase # Licensed under the Apache License, Version 2.0 (the "License"); @@ -117,7 +118,7 @@ def test_load_bart(self, load_weights): for preset in BartBackbone.presets: model = BartBackbone.from_preset(preset, load_weights=load_weights) input_data = { - "encoder_token_ids": ops.random.uniform( + "encoder_token_ids": random.uniform( shape=(1, 1024), dtype="int64", maxval=model.vocabulary_size, @@ -125,7 +126,7 @@ def test_load_bart(self, load_weights): "encoder_padding_mask": ops.array( [1] * 768 + [0] * 256, shape=(1, 1024) ), - "decoder_token_ids": ops.random.uniform( + "decoder_token_ids": random.uniform( shape=(1, 1024), dtype="int64", maxval=model.vocabulary_size, diff --git a/keras_nlp/models/bert/bert_presets_test.py b/keras_nlp/models/bert/bert_presets_test.py index 71e739bbfc..a84286c091 100644 --- a/keras_nlp/models/bert/bert_presets_test.py +++ b/keras_nlp/models/bert/bert_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.bert.bert_backbone import BertBackbone from keras_nlp.models.bert.bert_classifier import BertClassifier from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor @@ -186,7 +187,7 @@ def test_load_bert(self, load_weights): for preset in BertBackbone.presets: model = BertBackbone.from_preset(preset, load_weights=load_weights) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=model.vocabulary_size ), "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), @@ -219,7 +220,7 @@ def test_load_bert_classifier_without_preprocessing(self, load_weights): load_weights=load_weights, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, diff --git a/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py b/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py index a033825dad..7b023a0601 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone from keras_nlp.models.deberta_v3.deberta_v3_classifier import ( DebertaV3Classifier, @@ -150,7 +151,7 @@ def test_load_deberta(self, load_weights): preset, load_weights=load_weights ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=model.vocabulary_size ), "padding_mask": ops.array([1] * 512, shape=(1, 512)), @@ -182,7 +183,7 @@ def test_load_deberta_classifier_without_preprocessing(self, load_weights): preprocessor=None, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, diff --git a/keras_nlp/models/distil_bert/distil_bert_presets_test.py b/keras_nlp/models/distil_bert/distil_bert_presets_test.py index a974f54aee..691f3dbbbd 100644 --- a/keras_nlp/models/distil_bert/distil_bert_presets_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone from keras_nlp.models.distil_bert.distil_bert_classifier import ( DistilBertClassifier, @@ -141,7 +142,7 @@ def test_load_distilbert(self, load_weights): preset, load_weights=load_weights ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=model.vocabulary_size ), "padding_mask": ops.array([1] * 512, shape=(1, 512)), @@ -173,7 +174,7 @@ def test_load_distilbert_classifier_no_preprocessing(self, load_weights): preprocessor=None, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, diff --git a/keras_nlp/models/f_net/f_net_presets_test.py b/keras_nlp/models/f_net/f_net_presets_test.py index 1f8f79c447..9f656b51bf 100644 --- a/keras_nlp/models/f_net/f_net_presets_test.py +++ b/keras_nlp/models/f_net/f_net_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.models.f_net.f_net_classifier import FNetClassifier from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor @@ -126,7 +127,7 @@ def test_load_f_net(self, load_weights): for preset in FNetBackbone.presets: model = FNetBackbone.from_preset(preset, load_weights=load_weights) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=model.vocabulary_size ), "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), @@ -158,7 +159,7 @@ def test_load_fnet_classifier_without_preprocessing(self, load_weights): load_weights=load_weights, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, diff --git a/keras_nlp/models/gpt2/gpt2_presets_test.py b/keras_nlp/models/gpt2/gpt2_presets_test.py index 37e7e53e87..a1b645553c 100644 --- a/keras_nlp/models/gpt2/gpt2_presets_test.py +++ b/keras_nlp/models/gpt2/gpt2_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer from keras_nlp.tests.test_case import TestCase @@ -94,7 +95,7 @@ def test_load_gpt2(self, load_weights): for preset in GPT2Backbone.presets: model = GPT2Backbone.from_preset(preset, load_weights=load_weights) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 1024), dtype="int64", maxval=model.vocabulary_size, diff --git a/keras_nlp/models/opt/opt_presets_test.py b/keras_nlp/models/opt/opt_presets_test.py index a9426d29e7..2484e5f3d8 100644 --- a/keras_nlp/models/opt/opt_presets_test.py +++ b/keras_nlp/models/opt/opt_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.opt.opt_backbone import OPTBackbone from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer from keras_nlp.tests.test_case import TestCase @@ -94,7 +95,7 @@ def test_load_opt(self, load_weights): for preset in OPTBackbone.presets: model = OPTBackbone.from_preset(preset, load_weights=load_weights) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 1024), dtype="int64", maxval=model.vocabulary_size, diff --git a/keras_nlp/models/roberta/roberta_presets_test.py b/keras_nlp/models/roberta/roberta_presets_test.py index 22c1f2e8a6..657c43507c 100644 --- a/keras_nlp/models/roberta/roberta_presets_test.py +++ b/keras_nlp/models/roberta/roberta_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.roberta.roberta_backbone import RobertaBackbone from keras_nlp.models.roberta.roberta_classifier import RobertaClassifier from keras_nlp.models.roberta.roberta_masked_lm import RobertaMaskedLM @@ -167,7 +168,7 @@ def test_load_roberta(self, load_weights): preset, load_weights=load_weights ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=model.vocabulary_size ), "padding_mask": ops.array([1] * 512, shape=(1, 512)), @@ -197,7 +198,7 @@ def test_load_roberta_classifier_without_preprocessing(self, load_weights): load_weights=load_weights, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, @@ -228,7 +229,7 @@ def test_load_roberta_masked_lm_without_preprocessing(self, load_weights): load_weights=load_weights, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, diff --git a/keras_nlp/models/task.py b/keras_nlp/models/task.py index 5d9a605449..d4c6180405 100644 --- a/keras_nlp/models/task.py +++ b/keras_nlp/models/task.py @@ -14,11 +14,11 @@ import os -import keras_core from rich import console as rich_console from rich import markup from rich import table as rich_table +from keras_nlp.backend import config from keras_nlp.backend import keras from keras_nlp.utils.keras_utils import print_msg from keras_nlp.utils.pipeline_model import PipelineModel @@ -315,11 +315,21 @@ def bold_text(x): if print_fn: print_fn(console.end_capture(), line_break=False) - # Hardcode summary from keras_core for now. - keras_core.Model.summary( - self, - line_length=line_length, - positions=positions, - print_fn=print_fn, - **kwargs, - ) + # Avoid `tf.keras.Model.summary()`, so the above output matches. + if config.multi_backend(): + super().summary( + line_length=line_length, + positions=positions, + print_fn=print_fn, + **kwargs, + ) + else: + import keras_core + + keras_core.Model.summary( + self, + line_length=line_length, + positions=positions, + print_fn=print_fn, + **kwargs, + ) diff --git a/keras_nlp/models/whisper/whisper_audio_feature_extractor.py b/keras_nlp/models/whisper/whisper_audio_feature_extractor.py index 73b15a3afd..e41519bbc9 100644 --- a/keras_nlp/models/whisper/whisper_audio_feature_extractor.py +++ b/keras_nlp/models/whisper/whisper_audio_feature_extractor.py @@ -18,14 +18,16 @@ import tensorflow as tf from keras_nlp.api_export import keras_nlp_export -from keras_nlp.backend import keras +from keras_nlp.layers.preprocessing.preprocessing_layer import ( + PreprocessingLayer, +) from keras_nlp.models.whisper.whisper_presets import backbone_presets from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring @keras_nlp_export("keras_nlp.models.WhisperAudioFeatureExtractor") -class WhisperAudioFeatureExtractor(keras.layers.Layer): +class WhisperAudioFeatureExtractor(PreprocessingLayer): """ Whisper audio feature extractor layer. @@ -163,9 +165,10 @@ def _get_mel_filters(self): weights *= enorm[:, np.newaxis] weights = np.transpose(weights) - return tf.constant(weights, dtype=self.dtype) + return tf.constant(weights, dtype=self.compute_dtype) def _extract_audio_features(self, audio): + audio = tf.cast(audio, self.compute_dtype) # Use "reflection" padding - `tf.signal.stft` uses symmetric padding # internally. audio = tf.pad( @@ -246,6 +249,8 @@ def call(self, audio): # Find the log mel spectrogram. log_spec = self._extract_audio_features(audio) + if rank_1_input: + log_spec = tf.squeeze(log_spec, 0) return log_spec def get_config(self): diff --git a/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py b/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py index 8eca8f9c79..1d282f76d2 100644 --- a/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py +++ b/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py @@ -42,10 +42,10 @@ def test_unbatched_inputs(self): outputs = self.audio_feature_extractor(audio_tensor) # Verify shape. - self.assertEqual(outputs.shape, (1, 5, self.num_mels)) + self.assertEqual(outputs.shape, (5, self.num_mels)) # Verify output. expected = [1.1656, 1.0151, -0.8343, -0.8343, -0.8343] - self.assertAllClose(outputs[0, :, 0], expected, atol=0.01, rtol=0.01) + self.assertAllClose(outputs[:, 0], expected, atol=0.01, rtol=0.01) def test_batched_inputs(self): audio_tensor_1 = tf.ones((2,), dtype="float32") diff --git a/keras_nlp/models/whisper/whisper_preprocessor.py b/keras_nlp/models/whisper/whisper_preprocessor.py index 88fd60cae2..8545890cc2 100644 --- a/keras_nlp/models/whisper/whisper_preprocessor.py +++ b/keras_nlp/models/whisper/whisper_preprocessor.py @@ -14,7 +14,6 @@ import copy -import tensorflow as tf from absl import logging from keras_nlp.api_export import keras_nlp_export @@ -278,9 +277,6 @@ def call(self, x, y=None, sample_weight=None, decoder_sequence_length=None): ) encoder_features = self.audio_feature_extractor(encoder_audio[0]) - if encoder_audio[0].shape.rank < 2: - encoder_features = tf.squeeze(encoder_features, axis=0) - decoder_sequence_length = ( decoder_sequence_length or self.decoder_sequence_length ) diff --git a/keras_nlp/models/whisper/whisper_preprocessor_test.py b/keras_nlp/models/whisper/whisper_preprocessor_test.py index 3f07ef618f..9f8f54e494 100644 --- a/keras_nlp/models/whisper/whisper_preprocessor_test.py +++ b/keras_nlp/models/whisper/whisper_preprocessor_test.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import numpy as np import tensorflow as tf from keras_nlp.backend import keras @@ -89,8 +90,8 @@ def setUp(self): def test_unbatched_preprocess(self): input_data = { - "encoder_audio": tf.ones((200,)), - "decoder_text": tf.constant(" airplane at airport"), + "encoder_audio": np.ones((200,)), + "decoder_text": " airplane at airport", } x = self.preprocessor(input_data) @@ -106,8 +107,8 @@ def test_unbatched_preprocess(self): def test_preprocess_batch(self): input_data = { - "encoder_audio": tf.ones((4, 200)), - "decoder_text": tf.constant([" airplane at airport"] * 4), + "encoder_audio": np.ones((4, 200)), + "decoder_text": [" airplane at airport"] * 4, } x = self.preprocessor(input_data) @@ -125,11 +126,11 @@ def test_preprocess_batch(self): def test_preprocess_labeled_batch(self): x = { - "encoder_audio": tf.ones((4, 200)), - "decoder_text": tf.constant([" airplane at airport"] * 4), + "encoder_audio": np.ones((4, 200)), + "decoder_text": [" airplane at airport"] * 4, } - y_in = tf.constant([1] * 4) - sw_in = tf.constant([1.0] * 4) + y_in = np.ones((4,)) + sw_in = np.ones((4,)) x, y, sw = self.preprocessor(x, y_in, sw_in) self.assertAllEqual( x["encoder_features"].shape, [4, self.output_length, self.num_mels] @@ -147,8 +148,8 @@ def test_preprocess_labeled_batch(self): def test_preprocess_dataset(self): x = { - "encoder_audio": tf.ones((4, 200)), - "decoder_text": tf.constant([" airplane at airport"] * 4), + "encoder_audio": np.ones((4, 200)), + "decoder_text": [" airplane at airport"] * 4, } ds = tf.data.Dataset.from_tensor_slices(x) ds = ds.map(self.preprocessor) @@ -167,8 +168,8 @@ def test_preprocess_dataset(self): def test_sequence_length_override(self): input_data = { - "encoder_audio": tf.ones((200,)), - "decoder_text": tf.constant(" airplane at airport"), + "encoder_audio": np.ones((200,)), + "decoder_text": " airplane at airport", } x = self.preprocessor(input_data, decoder_sequence_length=6) self.assertAllEqual(x["decoder_token_ids"], [9, 14, 13, 11, 0, 10]) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py index b74de26b01..a1824c86c5 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py @@ -16,6 +16,7 @@ from absl.testing import parameterized from keras_nlp.backend import ops +from keras_nlp.backend import random from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone from keras_nlp.models.xlm_roberta.xlm_roberta_classifier import ( XLMRobertaClassifier, @@ -143,7 +144,7 @@ def test_load_xlm_roberta(self, load_weights): preset, load_weights=load_weights ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=model.vocabulary_size ), "padding_mask": ops.array([1] * 512, shape=(1, 512)), @@ -177,7 +178,7 @@ def test_load_xlm_roberta_classifier_without_preprocessing( preprocessor=None, ) input_data = { - "token_ids": ops.random.uniform( + "token_ids": random.uniform( shape=(1, 512), dtype="int64", maxval=classifier.backbone.vocabulary_size, diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py index d0af5a47a4..976f4eb37c 100644 --- a/keras_nlp/tests/test_case.py +++ b/keras_nlp/tests/test_case.py @@ -17,12 +17,12 @@ import tensorflow as tf import tree from absl.testing import parameterized -from keras_core.backend import is_float_dtype -from keras_core.backend import standardize_dtype from keras_nlp.backend import config from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.utils.tensor_utils import is_float_dtype +from keras_nlp.utils.tensor_utils import standardize_dtype def convert_to_comparible_type(x): @@ -39,7 +39,7 @@ def convert_to_comparible_type(x): return tree.map_structure(lambda x: x.decode("utf-8"), x) if isinstance(x, (tf.Tensor, tf.RaggedTensor)): return x - if ops.is_tensor(x): + if hasattr(x, "__array__"): return ops.convert_to_numpy(x) return x diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index 6ec140a113..f92d9e6a77 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -34,7 +34,7 @@ from keras_nlp.utils.python_utils import format_docstring from keras_nlp.utils.tensor_utils import assert_tf_text_installed from keras_nlp.utils.tensor_utils import convert_to_ragged_batch -from keras_nlp.utils.tensor_utils import is_integer_dtype +from keras_nlp.utils.tensor_utils import is_int_dtype from keras_nlp.utils.tensor_utils import is_string_dtype try: @@ -283,7 +283,7 @@ def __init__( ) -> None: assert_tf_text_installed(self.__class__.__name__) - if not is_integer_dtype(dtype) and not is_string_dtype(dtype): + if not is_int_dtype(dtype) and not is_string_dtype(dtype): raise ValueError( "Output dtype must be an integer type or a string. " f"Received: dtype={dtype}" diff --git a/keras_nlp/tokenizers/byte_tokenizer.py b/keras_nlp/tokenizers/byte_tokenizer.py index f6b60bfeaf..3aefc4a01d 100644 --- a/keras_nlp/tokenizers/byte_tokenizer.py +++ b/keras_nlp/tokenizers/byte_tokenizer.py @@ -19,7 +19,7 @@ from keras_nlp.tokenizers import tokenizer from keras_nlp.utils.tensor_utils import assert_tf_text_installed from keras_nlp.utils.tensor_utils import convert_to_ragged_batch -from keras_nlp.utils.tensor_utils import is_integer_dtype +from keras_nlp.utils.tensor_utils import is_int_dtype try: import tensorflow_text as tf_text @@ -165,7 +165,7 @@ def __init__( ): assert_tf_text_installed(self.__class__.__name__) - if not is_integer_dtype(dtype): + if not is_int_dtype(dtype): raise ValueError( "Output dtype must be an integer type. " f"Received: dtype={dtype}" diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer.py b/keras_nlp/tokenizers/sentence_piece_tokenizer.py index 2308c33506..3a53c12ad3 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer.py @@ -26,7 +26,7 @@ from keras_nlp.utils.python_utils import format_docstring from keras_nlp.utils.tensor_utils import assert_tf_text_installed from keras_nlp.utils.tensor_utils import convert_to_ragged_batch -from keras_nlp.utils.tensor_utils import is_integer_dtype +from keras_nlp.utils.tensor_utils import is_int_dtype from keras_nlp.utils.tensor_utils import is_string_dtype from keras_nlp.utils.tensor_utils import tensor_to_list @@ -113,7 +113,7 @@ def __init__( ) -> None: assert_tf_text_installed(self.__class__.__name__) - if not is_integer_dtype(dtype) and not is_string_dtype(dtype): + if not is_int_dtype(dtype) and not is_string_dtype(dtype): raise ValueError( "Output dtype must be an integer type or a string. " f"Received: dtype={dtype}" diff --git a/keras_nlp/tokenizers/unicode_codepoint_tokenizer.py b/keras_nlp/tokenizers/unicode_codepoint_tokenizer.py index 5a16a76fc0..5fe8f0144d 100644 --- a/keras_nlp/tokenizers/unicode_codepoint_tokenizer.py +++ b/keras_nlp/tokenizers/unicode_codepoint_tokenizer.py @@ -18,7 +18,7 @@ from keras_nlp.tokenizers import tokenizer from keras_nlp.utils.tensor_utils import assert_tf_text_installed from keras_nlp.utils.tensor_utils import convert_to_ragged_batch -from keras_nlp.utils.tensor_utils import is_integer_dtype +from keras_nlp.utils.tensor_utils import is_int_dtype try: import tensorflow_text as tf_text @@ -219,7 +219,7 @@ def __init__( ) -> None: assert_tf_text_installed(self.__class__.__name__) - if not is_integer_dtype(dtype): + if not is_int_dtype(dtype): raise ValueError( "Output dtype must be an integer type. " f"Received: dtype={dtype}" diff --git a/keras_nlp/tokenizers/word_piece_tokenizer.py b/keras_nlp/tokenizers/word_piece_tokenizer.py index dc9ce49427..fe37bebf78 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer.py @@ -25,7 +25,7 @@ from keras_nlp.utils.python_utils import format_docstring from keras_nlp.utils.tensor_utils import assert_tf_text_installed from keras_nlp.utils.tensor_utils import convert_to_ragged_batch -from keras_nlp.utils.tensor_utils import is_integer_dtype +from keras_nlp.utils.tensor_utils import is_int_dtype from keras_nlp.utils.tensor_utils import is_string_dtype try: @@ -305,7 +305,7 @@ def __init__( ) -> None: assert_tf_text_installed(self.__class__.__name__) - if not is_integer_dtype(dtype) and not is_string_dtype(dtype): + if not is_int_dtype(dtype) and not is_string_dtype(dtype): raise ValueError( "Output dtype must be an integer type or a string. " f"Received: dtype={dtype}" diff --git a/keras_nlp/utils/tensor_utils.py b/keras_nlp/utils/tensor_utils.py index 97df75e74f..1c9ad1e3bd 100644 --- a/keras_nlp/utils/tensor_utils.py +++ b/keras_nlp/utils/tensor_utils.py @@ -15,6 +15,7 @@ import tensorflow as tf from keras_nlp.backend import config +from keras_nlp.backend import keras from keras_nlp.backend import ops try: @@ -151,19 +152,21 @@ def is_tensor_type(x): return hasattr(x, "__array__") -def is_floating_dtype(dtype): +def standardize_dtype(dtype): + if config.multi_backend(): + return keras.backend.standardize_dtype(dtype) if hasattr(dtype, "name"): - dtype = dtype.name - return "float" in dtype + return dtype.name + return dtype -def is_integer_dtype(dtype): - if hasattr(dtype, "name"): - dtype = dtype.name - return "int" in dtype +def is_float_dtype(dtype): + return "float" in standardize_dtype(dtype) + + +def is_int_dtype(dtype): + return "int" in standardize_dtype(dtype) def is_string_dtype(dtype): - if hasattr(dtype, "name"): - dtype = dtype.name - return "string" in dtype + return "string" in standardize_dtype(dtype) From 8cab8ef603bf90511c71fa95e75a676bee6c904c Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 9 Oct 2023 15:44:48 -0700 Subject: [PATCH 09/87] Remove dtensor (#1268) We will replace this with the work on https://github.com/keras-team/keras-nlp/pull/1267 But we have no coverage for that PR till we run tests against Keras 3, which will probably still be about a week. For now, let's just remove this usage, which is no longer needed and will break a Keras 3 install. --- keras_nlp/conftest.py | 5 -- keras_nlp/models/gpt2/gpt2_backbone.py | 72 -------------------- keras_nlp/models/gpt2/gpt2_backbone_test.py | 17 ----- keras_nlp/models/gpt2/gpt2_causal_lm.py | 36 ---------- keras_nlp/models/gpt2/gpt2_causal_lm_test.py | 10 --- keras_nlp/models/opt/opt_backbone.py | 72 -------------------- keras_nlp/models/opt/opt_backbone_test.py | 17 ----- keras_nlp/models/opt/opt_causal_lm.py | 36 ---------- keras_nlp/models/opt/opt_causal_lm_test.py | 10 --- 9 files changed, 275 deletions(-) diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py index 04daf5cd37..3a23ee475d 100644 --- a/keras_nlp/conftest.py +++ b/keras_nlp/conftest.py @@ -86,8 +86,3 @@ def pytest_collection_modifyitems(config, items): tf.debugging.disable_traceback_filtering() if backend_config.multi_backend(): keras.config.disable_traceback_filtering() - -# One off setup for dtensor tests. -if not backend_config.multi_backend(): - keras.backend.experimental.enable_tf_random_generator() - keras.utils.set_random_seed(1337) diff --git a/keras_nlp/models/gpt2/gpt2_backbone.py b/keras_nlp/models/gpt2/gpt2_backbone.py index caf73c0606..3f357d6408 100644 --- a/keras_nlp/models/gpt2/gpt2_backbone.py +++ b/keras_nlp/models/gpt2/gpt2_backbone.py @@ -14,10 +14,6 @@ import copy -from tensorflow.experimental import dtensor -from tensorflow.experimental.dtensor import Layout -from tensorflow.keras.dtensor.experimental import LayoutMap - from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.modeling.position_embedding import PositionEmbedding @@ -190,71 +186,3 @@ def get_config(self): @classproperty def presets(cls): return copy.deepcopy(backbone_presets) - - @classmethod - def create_layout_map(cls, mesh): - """Create a DTensor layout map for a GPT2Backbone. - - Given a DTensor mesh describing a list of devices, this method returns a - DTensor layout map for creating a `keras_nlp.models.GPT2Backbone` - instance. This mapping describes how to distribute all model weights - across multiple devices. For an overview of DTensor concepts, see - [this guide](https://www.tensorflow.org/guide/dtensor_overview). - - Args: - mesh: A 2D `tf.experimental.dtensor.Mesh` describing the arrangement - of devices for running distributed computation. The - first dimension in the mesh is expected to be for data parallel - distribution, and the second for model parallel distribution. - - Returns: - A `tf.keras.dtensor.experimental.LayoutMap` which contains the - proper layout to weights mapping for the model parallel setting. - - Examples: - ```python - keras.backend.experimental.enable_tf_random_generator() - keras.utils.set_random_seed(1337) - - # Update both dimensions below for a multi-device setting. - mesh = dtensor.create_mesh([("batch", 1), ("model", 1)]) - layout_map = keras_nlp.models.GPT2Backbone.create_layout_map(mesh) - - with layout_map.scope(): - model = keras_nlp.models.GPT2Backbone.from_preset("gpt2_base_en") - ``` - """ - # We assert the mesh is 2D, and assume the first mesh dim is for data - # parallel and the second dim is for model parallel. - mesh_shape = mesh.shape() - if len(mesh_shape) != 2: - raise ValueError( - f"Expect to create layout based on 2D mesh, received {mesh}" - ) - _, model_dim = mesh.dim_names - unshard_dim = dtensor.UNSHARDED - - layout_map = LayoutMap(mesh=mesh) - # Embedding sharding - layout_map[r".*embeddings"] = Layout([unshard_dim, model_dim], mesh) - - # Transformer block sharding - layout_map[r".*_(query|key|value)_dense.kernel"] = Layout( - [unshard_dim, unshard_dim, model_dim], mesh - ) - layout_map[r".*_(query|key|value)_dense.bias"] = Layout( - [model_dim, unshard_dim], mesh - ) - layout_map[r".*_feedforward_intermediate_dense.kernel"] = Layout( - [unshard_dim, model_dim], mesh - ) - layout_map[r".*_feedforward_intermediate_dense.bias"] = Layout( - [model_dim], mesh - ) - layout_map[r".*_feedforward_output_dense.kernel"] = Layout( - [model_dim, unshard_dim], mesh - ) - layout_map[r".*_feedforward_output_dense.bias"] = Layout( - [unshard_dim], mesh - ) - return layout_map diff --git a/keras_nlp/models/gpt2/gpt2_backbone_test.py b/keras_nlp/models/gpt2/gpt2_backbone_test.py index 8fc779c634..c82ec1b06d 100644 --- a/keras_nlp/models/gpt2/gpt2_backbone_test.py +++ b/keras_nlp/models/gpt2/gpt2_backbone_test.py @@ -84,20 +84,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - def test_create_layout_map(self): - mesh = tf.experimental.dtensor.create_mesh([("batch", 1), ("model", 1)]) - with GPT2Backbone.create_layout_map(mesh).scope(): - GPT2Backbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - # Using DTensor enables the mlir bridge as a side effect. Eventually - # this will be default, but for now we have compile errors with the - # bridge elsewhere and must disable. See - # https://github.com/keras-team/keras-nlp/issues/1001 - tf.config.experimental.disable_mlir_bridge() diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm.py b/keras_nlp/models/gpt2/gpt2_causal_lm.py index 23dcc41664..44eebd0a20 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm.py @@ -325,39 +325,3 @@ def next(prompt, cache, index): "token_ids": token_ids, "padding_mask": padding_mask, } - - @classmethod - def create_layout_map(cls, mesh): - """Create a DTensor layout map for a GPT2CausalLM. - - Given a DTensor mesh describing a list of devices, this method returns a - DTensor layout map for creating a `keras_nlp.models.GPT2CausalLM` - instance. This mapping describes how to distribute all model weights - across multiple devices. For an overview of DTensor concepts, see - [this guide](https://www.tensorflow.org/guide/dtensor_overview). - - Args: - mesh: A 2D `tf.experimental.dtensor.Mesh` describing the arrangement - of devices for running distributed computation. The - first dimension in the mesh is expected to be for data parallel - distribution, and the second for model parallel distribution. - - Returns: - A `keras.dtensor.experimental.LayoutMap` which contains the - proper layout to weights mapping for the model parallel setting. - - Examples: - ```python - keras.backend.experimental.enable_tf_random_generator() - keras.utils.set_random_seed(1337) - - # Update both dimensions below for a multi-device setting. - mesh = tf.experimental.dtensor.create_mesh([("batch", 1), ("model", 1)]) - layout_map = keras_nlp.models.GPT2CausalLM.create_layout_map(mesh) - - with layout_map.scope(): - gpt2_lm = keras_nlp.models.GPT2CausalLM.from_preset("gpt2_base_en") - ``` - """ - # As this task has no new variables, we just re-use the backbone method. - return cls.backbone_cls.create_layout_map(mesh) diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py index c50b6c5cf4..412083b275 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py @@ -165,13 +165,3 @@ def test_saved_model(self): keras.utils.set_random_seed(42) restored_output = restored_model.predict(self.raw_batch) self.assertAllClose(model_output, restored_output) - - def test_create_layout_map(self): - mesh = tf.experimental.dtensor.create_mesh([("batch", 1), ("model", 1)]) - with GPT2CausalLM.create_layout_map(mesh).scope(): - GPT2CausalLM(backbone=self.backbone) - # Using DTensor enables the mlir bridge as a side effect. Eventually - # this will be default, but for now we have compile errors with the - # bridge elsewhere and must disable. See - # https://github.com/keras-team/keras-nlp/issues/1001 - tf.config.experimental.disable_mlir_bridge() diff --git a/keras_nlp/models/opt/opt_backbone.py b/keras_nlp/models/opt/opt_backbone.py index 8fe37472a0..ff1495ba9f 100644 --- a/keras_nlp/models/opt/opt_backbone.py +++ b/keras_nlp/models/opt/opt_backbone.py @@ -14,10 +14,6 @@ import copy -from tensorflow.experimental import dtensor -from tensorflow.experimental.dtensor import Layout -from tensorflow.keras.dtensor.experimental import LayoutMap - from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.modeling.token_and_position_embedding import ( @@ -168,71 +164,3 @@ def get_config(self): @classproperty def presets(cls): return copy.deepcopy(backbone_presets) - - @classmethod - def create_layout_map(cls, mesh): - """Create a DTensor layout map for an OPTBackbone. - - Given a DTensor mesh describing a list of devices, this method returns a - DTensor layout map for creating a `keras_nlp.models.OPTBackbone` - instance. This mapping describes how to distribute all model weights - across multiple devices. For an overview of DTensor concepts, see - [this guide](https://www.tensorflow.org/guide/dtensor_overview). - - Args: - mesh: A 2D `tf.experimental.dtensor.Mesh` describing the arrangement - of devices for running distributed computation. The - first dimension in the mesh is expected to be for data parallel - distribution, and the second for model parallel distribution. - - Returns: - A `tf.keras.dtensor.experimental.LayoutMap` which contains the - proper layout to weights mapping for the model parallel setting. - - Examples: - ```python - keras.backend.experimental.enable_tf_random_generator() - keras.utils.set_random_seed(1337) - - # Update both dimensions below for a multi-device setting. - mesh = dtensor.create_mesh([("batch", 1), ("model", 1)]) - layout_map = keras_nlp.models.OPTBackbone.create_layout_map(mesh) - - with layout_map.scope(): - model = keras_nlp.models.OPTBackbone.from_preset("opt_125m_en") - ``` - """ - # We assert the mesh is 2D, and assume the first mesh dim is for data - # parallel and the second dim is for model parallel. - mesh_shape = mesh.shape() - if len(mesh_shape) != 2: - raise ValueError( - f"Expect to create layout based on 2D mesh, received {mesh}" - ) - _, model_dim = mesh.dim_names - unshard_dim = dtensor.UNSHARDED - - layout_map = LayoutMap(mesh=mesh) - # Embedding sharding - layout_map[r".*embeddings"] = Layout([unshard_dim, model_dim], mesh) - - # Transformer block sharding - layout_map[r".*_(query|key|value)_dense.kernel"] = Layout( - [unshard_dim, unshard_dim, model_dim], mesh - ) - layout_map[r".*_(query|key|value)_dense.bias"] = Layout( - [model_dim, unshard_dim], mesh - ) - layout_map[r".*_feedforward_intermediate_dense.kernel"] = Layout( - [unshard_dim, model_dim], mesh - ) - layout_map[r".*_feedforward_intermediate_dense.bias"] = Layout( - [model_dim], mesh - ) - layout_map[r".*_feedforward_output_dense.kernel"] = Layout( - [model_dim, unshard_dim], mesh - ) - layout_map[r".*_feedforward_output_dense.bias"] = Layout( - [unshard_dim], mesh - ) - return layout_map diff --git a/keras_nlp/models/opt/opt_backbone_test.py b/keras_nlp/models/opt/opt_backbone_test.py index 1d7e54889c..c887001040 100644 --- a/keras_nlp/models/opt/opt_backbone_test.py +++ b/keras_nlp/models/opt/opt_backbone_test.py @@ -84,20 +84,3 @@ def test_saved_model(self): # Check that output matches. restored_output = restored_model(self.input_batch) self.assertAllClose(model_output, restored_output) - - def test_create_layout_map(self): - mesh = tf.experimental.dtensor.create_mesh([("batch", 1), ("model", 1)]) - with OPTBackbone.create_layout_map(mesh).scope(): - OPTBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - # Using DTensor enables the mlir bridge as a side effect. Eventually - # this will be default, but for now we have compile errors with the - # bridge elsewhere and must disable. See - # https://github.com/keras-team/keras-nlp/issues/1001 - tf.config.experimental.disable_mlir_bridge() diff --git a/keras_nlp/models/opt/opt_causal_lm.py b/keras_nlp/models/opt/opt_causal_lm.py index f0b0682749..6197a87ffd 100644 --- a/keras_nlp/models/opt/opt_causal_lm.py +++ b/keras_nlp/models/opt/opt_causal_lm.py @@ -321,39 +321,3 @@ def next(prompt, cache, index): "token_ids": token_ids, "padding_mask": padding_mask, } - - @classmethod - def create_layout_map(cls, mesh): - """Create a DTensor layout map for an OPTCausalLM. - - Given a DTensor mesh describing a list of devices, this method returns a - DTensor layout map for creating a `keras_nlp.models.OPTCausalLM` - instance. This mapping describes how to distribute all model weights - across multiple devices. For an overview of DTensor concepts, see - [this guide](https://www.tensorflow.org/guide/dtensor_overview). - - Args: - mesh: A 2D `tf.experimental.dtensor.Mesh` describing the arrangement - of devices for running distributed computation. The - first dimension in the mesh is expected to be for data parallel - distribution, and the second for model parallel distribution. - - Returns: - A `tf.keras.dtensor.experimental.LayoutMap` which contains the - proper layout to weights mapping for the model parallel setting. - - Examples: - ```python - keras.backend.experimental.enable_tf_random_generator() - keras.utils.set_random_seed(1337) - - # Update both dimensions below for a multi-device setting. - mesh = tf.experimental.dtensor.create_mesh([("batch", 1), ("model", 1)]) - layout_map = keras_nlp.models.OPTCausalLM.create_layout_map(mesh) - - with layout_map.scope(): - opt_lm = keras_nlp.models.OPTCausalLM.from_preset("opt_125m_en") - ``` - """ - # As this task has no new variables, we just re-use the backbone method. - return cls.backbone_cls.create_layout_map(mesh) diff --git a/keras_nlp/models/opt/opt_causal_lm_test.py b/keras_nlp/models/opt/opt_causal_lm_test.py index 19325c7b0a..1e8fcb8785 100644 --- a/keras_nlp/models/opt/opt_causal_lm_test.py +++ b/keras_nlp/models/opt/opt_causal_lm_test.py @@ -171,13 +171,3 @@ def test_saved_model(self): keras.utils.set_random_seed(42) restored_output = restored_model.predict(self.raw_batch) self.assertAllClose(model_output, restored_output) - - def test_create_layout_map(self): - mesh = tf.experimental.dtensor.create_mesh([("batch", 1), ("model", 1)]) - with OPTCausalLM.create_layout_map(mesh).scope(): - OPTCausalLM(backbone=self.backbone) - # Using DTensor enables the mlir bridge as a side effect. Eventually - # this will be default, but for now we have compile errors with the - # bridge elsewhere and must disable. See - # https://github.com/keras-team/keras-nlp/issues/1001 - tf.config.experimental.disable_mlir_bridge() From 07e1cc292192a6393f13118cf03dc73f02cfccfb Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 12 Oct 2023 13:53:02 -0700 Subject: [PATCH 10/87] Add a lora dense layer (#1263) * Add a lora dense layer Co-authored-by: Abheesht * address comments * Fix merge conflict * minor fix * another einsum restriction * Last doc nit from Ian --------- Co-authored-by: Abheesht --- keras_nlp/conftest.py | 10 +- keras_nlp/layers/__init__.py | 1 + keras_nlp/layers/modeling/lora_dense.py | 234 +++++++++++++++++++ keras_nlp/layers/modeling/lora_dense_test.py | 135 +++++++++++ 4 files changed, 378 insertions(+), 2 deletions(-) create mode 100644 keras_nlp/layers/modeling/lora_dense.py create mode 100644 keras_nlp/layers/modeling/lora_dense_test.py diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py index 3a23ee475d..c918090ae6 100644 --- a/keras_nlp/conftest.py +++ b/keras_nlp/conftest.py @@ -69,17 +69,23 @@ def pytest_collection_modifyitems(config, items): not run_extra_large_tests, reason="need --run_extra_large option to run", ) - skip_tf_only = pytest.mark.skipif( + tf_only = pytest.mark.skipif( not backend_config.backend() == "tensorflow", reason="tests only run on tf backend", ) + multi_backend_only = pytest.mark.skipif( + not backend_config.multi_backend(), + reason="tests only run on with multi-backend keras", + ) for item in items: if "large" in item.keywords: item.add_marker(skip_large) if "extra_large" in item.keywords: item.add_marker(skip_extra_large) if "tf_only" in item.keywords: - item.add_marker(skip_tf_only) + item.add_marker(tf_only) + if "multi_backend_only" in item.keywords: + item.add_marker(multi_backend_only) # Disable traceback filtering for quicker debugging of tests failures. diff --git a/keras_nlp/layers/__init__.py b/keras_nlp/layers/__init__.py index 105f511552..a16dd2b4e3 100644 --- a/keras_nlp/layers/__init__.py +++ b/keras_nlp/layers/__init__.py @@ -16,6 +16,7 @@ CachedMultiHeadAttention, ) from keras_nlp.layers.modeling.f_net_encoder import FNetEncoder +from keras_nlp.layers.modeling.lora_dense import LoraDense from keras_nlp.layers.modeling.masked_lm_head import MaskedLMHead from keras_nlp.layers.modeling.position_embedding import PositionEmbedding from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding diff --git a/keras_nlp/layers/modeling/lora_dense.py b/keras_nlp/layers/modeling/lora_dense.py new file mode 100644 index 0000000000..3bc23e79f5 --- /dev/null +++ b/keras_nlp/layers/modeling/lora_dense.py @@ -0,0 +1,234 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import re + +from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import config +from keras_nlp.backend import keras +from keras_nlp.backend import ops + + +def validate_einsum_equation(equation): + # For simplicity, we greatly restrict possible einsum equations. The final + # axis of the input must be the first axis of our kernel, and must not + # appear in our output. + left, right, output = re.split(",|->", equation) + valid = ( + left[-1] == right[0] + and left[-1] not in output + and set(left[:-1]).isdisjoint(set(right[1:])) + ) + if not valid: + raise ValueError( + "When passing a `EinsumDense` layer to a `LoraDense` layer, the " + "einsum `equation` must always have the form `*x,x*->*`, where " + "each `*` can be any sequence. Conceptually, the `equation` should " + "always represent a dense matmul on the last axis of the input. " + f"Received invalid equation `'{equation}'`." + ) + + +@keras_nlp_export("keras_nlp.layers.LoraDense") +class LoraDense(keras.layers.Layer): + """A LoRA adapter layer for a dense input layer. + + This layer implements a low-rank decomposition of a dense transformation, as + described in [LoRA: Low-Rank Adaptation Of Large Language Models](https://arxiv.org/pdf/2106.09685.pdf) + This layer can be used to replace a dense layer with a layer whose + parameters are mostly frozen. + + By default, this layer takes in an `inner_dense` layer, freezes its + parameters, and builds a low-rank decomposed update to sum with the original + `inner_dense` output. These update parameters can be merged back into the + `inner_dense` kernel by calling `merge_weights()`. + + Args: + inner_dense: A `keras.layers.Dense` or `keras.layers.EinsumDense`. + The inner dense layer to freeze and wrap with the `LoraDense` + layer. Note that for `EinsumDense` layers, the einsum equation must + represent a dense transformation on the last axis of the input, + though adding new axes to the output (e.g. a multi-head axis) is + allowed. + rank: int. The inner rank of the decomposed dense transformation. The + lower this number, the fewer trainable parameters the layer will + have. + alpha: float. A constant value used for scaling the lora update. The + lora update to the original dense transformation will be scaled by + `alpha / rank`. + lora_a_initializer: The initializer to use for the inner projection + from layer inputs to the inner `rank` intermediate outputs. + freeze_kernel: If true, the kernel of the inner dense layer will have + `trainable` set to `False`. + freeze_bias: If true, the kernel of the inner dense layer will have + `trainable` set to `False`. + **kwargs: other keyword arguments. + + Examples: + + Wrap a `Dense` layer. + ```python + batch_size, feature_size = 4, 16 + rank = 4 + inputs = np.random.uniform(size=(batch_size, feature_size)) + inner_dense = keras.layers.Dense(feature_size) + lora_dense = keras_nlp.layers.LoraDense(inner_dense, rank=4) + # Output with inner dense begins equal. + assert np.allclose(inner_dense(inputs), lora_dense(inputs)) + + # Add some random updates to the lora parameters. + lora_dense.lora_a.assign(np.random.uniform(size=(feature_size, rank))) + lora_dense.lora_b.assign(np.random.uniform(size=(rank, feature_size))) + assert not np.allclose(inner_dense(inputs), lora_dense(inputs)) + + # Merge the lora dense and output + lora_dense.merge_weights() + assert np.allclose(inner_dense(inputs), lora_dense(inputs)) + ``` + + Wrap an `EinsumDense` layer with a multi-head projection. + ```python + batch_size, sequence_length, feature_size = 4, 10, 16 + num_heads = 2 + rank = 4 + inputs = np.random.uniform(size=(batch_size, sequence_length, feature_size)) + inner_dense = keras.layers.EinsumDense( + "abc,cde->abde", + output_shape=(sequence_length, num_heads, feature_size // num_heads), + ) + lora_dense = keras_nlp.layers.LoraDense(inner_dense, rank=4) + # Output shape (4, 10, 2, 8) + lora_dense(inputs) + ``` + """ + + def __init__( + self, + inner_dense, + rank=8, + alpha=8.0, + lora_a_initializer="variance_scaling", + freeze_kernel=True, + freeze_bias=True, + **kwargs, + ): + # Default to the same dtype as our inner layer. + if "dtype" not in kwargs: + kwargs["dtype"] = inner_dense.dtype_policy + super().__init__(**kwargs) + + if not config.multi_backend(): + raise ValueError( + "Lora only works with multi-backend Keras 3. Please set the " + "`KERAS_BACKEND` environment variable to use this API." + ) + + if isinstance(inner_dense, keras.layers.Dense): + self.inner_dense = inner_dense + elif isinstance(inner_dense, keras.layers.EinsumDense): + self.inner_dense = inner_dense + validate_einsum_equation(inner_dense.equation) + else: + raise ValueError( + "Only `Dense` and `EinsumDense` inner layers are supported. " + f"Received: inner_dense={inner_dense}" + ) + + self.rank = rank + self.alpha = alpha + self.scale = alpha / rank + self.freeze_kernel = freeze_kernel + self.freeze_bias = freeze_bias + self.lora_a_initializer = keras.initializers.get(lora_a_initializer) + + if inner_dense.built: + self.build_from_config(inner_dense.get_build_config()) + + def build(self, inputs_shape): + if not self.inner_dense.built: + self.inner_dense.build(inputs_shape) + + if self.freeze_kernel and self.inner_dense.kernel is not None: + self.inner_dense.kernel.trainable = False + + if self.freeze_bias and self.inner_dense.bias is not None: + self.inner_dense.bias.trainable = False + + input_dim = inputs_shape[-1] + self.lora_a = self.add_weight( + name="lora_a", + shape=(input_dim, self.rank), + initializer=self.lora_a_initializer, + ) + kernel_shape = self.inner_dense.kernel.shape + self.lora_b = self.add_weight( + name="lora_b", + shape=(self.rank,) + kernel_shape[1:], + initializer="zeros", + ) + self.built = True + + def merge_weights(self): + """Merge lora updates into the wrapped dense layer. + + This function should only be called outside of any compiled context + (e.g. not during `fit()`, `predict()` or `evaluate()`). It will merge + the updates from the lora layers into the original dense layer, and + re-initialize the lora variables. + """ + if not self.built: + return + + # Compute matmul of lora_a and lora_b to get a kernel sized update. + update = ops.tensordot(self.lora_a, self.lora_b, axes=([-1], [0])) + update = update * ops.cast(self.scale, update.dtype) + # Add lora updates back into the inner dense kernel. + self.inner_dense.kernel.assign_add(update) + # Re-initialize lora weights. + self.lora_a.assign( + self.lora_a_initializer(self.lora_a.shape, self.lora_a.dtype) + ) + self.lora_b.assign(ops.zeros_like(self.lora_b)) + + def call(self, inputs): + original_output = self.inner_dense(inputs) + # Compute the low-rank intermediate output. + update = ops.matmul(inputs, self.lora_a) + # Use the matching dense computation for a Dense or EinsumDense. + if isinstance(self.inner_dense, keras.layers.Dense): + update = ops.matmul(update, self.lora_b) + else: + update = ops.einsum(self.inner_dense.equation, update, self.lora_b) + # Scale and sum the lora update with the original frozen output. + return original_output + update * ops.cast(self.scale, update.dtype) + + @classmethod + def from_config(cls, config): + config["inner_dense"] = keras.layers.deserialize(config["inner_dense"]) + return super().from_config(config) + + def get_config(self): + config = super().get_config() + config.update( + { + "inner_dense": keras.layers.serialize(self.inner_dense), + "rank": self.rank, + "alpha": self.alpha, + "lora_a_initializer": keras.initializers.serialize( + self.lora_a_initializer + ), + "freeze_kernel": self.freeze_kernel, + "freeze_bias": self.freeze_bias, + } + ) + return config diff --git a/keras_nlp/layers/modeling/lora_dense_test.py b/keras_nlp/layers/modeling/lora_dense_test.py new file mode 100644 index 0000000000..55b7a686df --- /dev/null +++ b/keras_nlp/layers/modeling/lora_dense_test.py @@ -0,0 +1,135 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from keras_nlp.backend import keras +from keras_nlp.backend import random +from keras_nlp.layers.modeling.lora_dense import LoraDense +from keras_nlp.tests.test_case import TestCase + + +@pytest.mark.multi_backend_only +class LoraDenseTest(TestCase): + def test_layer_behaviors(self): + self.run_layer_test( + layer_cls=LoraDense, + init_kwargs={ + "inner_dense": keras.layers.Dense(16), + "rank": 2, + "alpha": 16, + "lora_a_initializer": "HeNormal", + }, + input_data=random.uniform(shape=(2, 4, 8)), + expected_output_shape=(2, 4, 16), + expected_num_trainable_weights=2, + expected_num_non_trainable_weights=2, + expected_num_non_trainable_variables=2, + run_mixed_precision_check=False, + ) + + def test_layer_behaviors_einsum(self): + self.run_layer_test( + layer_cls=LoraDense, + init_kwargs={ + "inner_dense": keras.layers.EinsumDense( + "abc,cde->abde", + output_shape=(None, 2, 16), + ), + "lora_a_initializer": "HeNormal", + }, + input_data=random.uniform(shape=(2, 4, 8)), + expected_output_shape=(2, 4, 2, 16), + expected_num_trainable_weights=2, + expected_num_non_trainable_weights=1, + expected_num_non_trainable_variables=1, + run_mixed_precision_check=False, + ) + + def test_merge_dense(self): + inner_dense = keras.layers.Dense(16) + layer = LoraDense(inner_dense, rank=4) + layer.build((2, 16)) + layer.lora_a.assign(random.uniform(shape=(16, 4))) + layer.lora_b.assign(random.uniform(shape=(4, 16))) + + input_data = random.uniform((2, 16)) + lora_output = layer(input_data) + dense_output = inner_dense(input_data) + self.assertNotAllClose(lora_output, dense_output) + + layer.merge_weights() + merged_lora_output = layer(input_data) + dense_output = inner_dense(input_data) + self.assertAllClose(lora_output, merged_lora_output) + self.assertAllClose(lora_output, dense_output) + + def test_merge_einsum(self): + inner_dense = keras.layers.EinsumDense( + "abc,cde->abde", + output_shape=(None, 2, 16), + ) + layer = LoraDense(inner_dense, rank=4) + layer.build((2, 4, 16)) + layer.lora_a.assign(random.uniform(shape=(16, 4))) + layer.lora_b.assign(random.uniform(shape=(4, 2, 16))) + + input_data = random.uniform((2, 4, 16)) + lora_output = layer(input_data) + dense_output = inner_dense(input_data) + self.assertNotAllClose(lora_output, dense_output) + + layer.merge_weights() + merged_lora_output = layer(input_data) + dense_output = inner_dense(input_data) + self.assertAllClose(lora_output, merged_lora_output) + self.assertAllClose(lora_output, dense_output) + + def test_freezing(self): + inner_dense = keras.layers.Dense(16) + layer = LoraDense(inner_dense, freeze_bias=False) + layer.build((2, 16)) + self.assertFalse(inner_dense.kernel.trainable) + self.assertTrue(inner_dense.bias.trainable) + + inner_dense = keras.layers.Dense(16) + layer = LoraDense(inner_dense) + layer.build((2, 16)) + self.assertFalse(inner_dense.kernel.trainable) + self.assertFalse(inner_dense.bias.trainable) + + def test_errors_if_not_dense(self): + with self.assertRaises(ValueError): + LoraDense(keras.layers.Concatenate()) + + def test_errors_invalid_einsum(self): + with self.assertRaises(ValueError): + # Kernel feature dim in the wrong place. + einsum = keras.layers.EinsumDense("abc,dec->abde", (2, 2, 16)) + LoraDense(einsum, rank=4) + + with self.assertRaises(ValueError): + # Input feature dim in the wrong place. + einsum = keras.layers.EinsumDense("acb,cde->abde", (2, 2, 16)) + LoraDense(einsum, rank=4) + + with self.assertRaises(ValueError): + # Input feature dim not summed over. + einsum = keras.layers.EinsumDense("abc,cde->abcde", (2, 2, 2, 16)) + LoraDense(einsum, rank=4) + + with self.assertRaises(ValueError): + # Double summations. + einsum = keras.layers.EinsumDense("abcd,cde->abe", (2, 2, 16)) + LoraDense(einsum, rank=4) From ab376b1654e7a4dbf15218b45b5cf951f3f40791 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Tue, 17 Oct 2023 14:47:46 -0700 Subject: [PATCH 11/87] Factor out testing routines for models (#1269) * Factor out testing routines for models Model testing is one of the largest places we have a ton of duplicated code. It makes it hard to add things like LoRA or model parallel training with good coverage without a lot of busy work. This factors out some new testing routines. - `run_preprocessing_layer_test` - `run_model_saving_test` - `run_backbone_test` - `run_task_test` These move the bulk of common enumerated tests that are shared across all models into a single place we can extend for new common modeling features. * gpt2 consolidated testing * opt consolidate testing * gpt neox consolidate testing * albert consolidate testing * deberta consolidate testing * f_net consolidated testing * distil_bert consolidated testing * roberta consolidated testing * xlm roberta consolidated testing * xlnet consolidated testing * t5 consolidated testing * whisper consolidated testing * bart consolidated testing * Remove preset_tests from docs * t5 fix * roberta fix * torch gpu fix * Simplify check * missing deberta file --- CONTRIBUTING_MODELS.md | 1 - .../cached_multi_head_attention_test.py | 2 +- .../layers/modeling/f_net_encoder_test.py | 2 +- keras_nlp/layers/modeling/lora_dense_test.py | 4 +- .../layers/modeling/masked_lm_head_test.py | 4 +- .../modeling/position_embedding_test.py | 4 +- .../modeling/reversible_embedding_test.py | 2 +- .../layers/modeling/rotary_embedding_test.py | 4 +- .../modeling/sine_position_encoding_test.py | 4 +- .../token_and_position_embedding_test.py | 2 +- .../modeling/transformer_decoder_test.py | 4 +- .../modeling/transformer_encoder_test.py | 2 +- .../models/albert/albert_backbone_test.py | 127 ++++----- .../models/albert/albert_classifier_test.py | 132 +++------ .../albert_masked_lm_preprocessor_test.py | 147 ++++------ .../models/albert/albert_masked_lm_test.py | 101 +++---- .../models/albert/albert_preprocessor_test.py | 151 +++-------- .../models/albert/albert_presets_test.py | 193 -------------- .../models/albert/albert_tokenizer_test.py | 61 ++--- keras_nlp/models/bart/bart_backbone_test.py | 126 +++++---- .../models/bart/bart_preprocessor_test.py | 187 ++++--------- keras_nlp/models/bart/bart_presets_test.py | 143 ---------- .../bart_seq_2_seq_lm_preprocessor_test.py | 174 +++++------- .../models/bart/bart_seq_2_seq_lm_test.py | 189 +++++-------- keras_nlp/models/bart/bart_tokenizer_test.py | 88 +++--- keras_nlp/models/bert/bert_backbone_test.py | 116 ++++---- keras_nlp/models/bert/bert_classifier_test.py | 111 +++----- .../bert/bert_masked_lm_preprocessor_test.py | 143 ++++------ keras_nlp/models/bert/bert_masked_lm_test.py | 81 ++---- .../models/bert/bert_preprocessor_test.py | 118 +++------ keras_nlp/models/bert/bert_presets_test.py | 241 ----------------- keras_nlp/models/bert/bert_tokenizer_test.py | 59 +++-- .../deberta_v3/deberta_v3_backbone_test.py | 108 ++++---- .../deberta_v3/deberta_v3_classifier_test.py | 120 +++------ .../deberta_v3_masked_lm_preprocessor_test.py | 141 ++++------ .../deberta_v3/deberta_v3_masked_lm_test.py | 93 +++---- .../deberta_v3_preprocessor_test.py | 127 +++------ .../deberta_v3/deberta_v3_presets_test.py | 203 -------------- .../deberta_v3/deberta_v3_tokenizer_test.py | 88 +++--- .../distil_bert/distil_bert_backbone_test.py | 105 ++++---- .../distil_bert_classifier_test.py | 116 +++----- ...distil_bert_masked_lm_preprocessor_test.py | 122 ++++----- .../distil_bert/distil_bert_masked_lm_test.py | 79 ++---- .../distil_bert_preprocessor_test.py | 103 +++----- .../distil_bert/distil_bert_presets_test.py | 194 -------------- .../distil_bert/distil_bert_tokenizer_test.py | 57 ++-- keras_nlp/models/f_net/f_net_backbone_test.py | 109 ++++---- .../models/f_net/f_net_classifier_test.py | 133 +++------- .../f_net_masked_lm_preprocessor_test.py | 133 ++++------ .../models/f_net/f_net_masked_lm_test.py | 98 +++---- .../models/f_net/f_net_preprocessor_test.py | 140 +++------- keras_nlp/models/f_net/f_net_presets_test.py | 180 ------------- .../models/f_net/f_net_tokenizer_test.py | 69 +++-- keras_nlp/models/gpt2/gpt2_backbone_test.py | 104 ++++---- .../gpt2/gpt2_causal_lm_preprocessor_test.py | 108 +++----- keras_nlp/models/gpt2/gpt2_causal_lm_test.py | 131 ++++----- .../models/gpt2/gpt2_preprocessor_test.py | 92 +++---- keras_nlp/models/gpt2/gpt2_presets_test.py | 110 -------- keras_nlp/models/gpt2/gpt2_tokenizer_test.py | 98 +++---- .../gpt_neo_x/gpt_neo_x_backbone_test.py | 84 ++---- .../gpt_neo_x_causal_lm_preprocessor_test.py | 100 +++---- .../gpt_neo_x/gpt_neo_x_causal_lm_test.py | 131 +++------ .../gpt_neo_x/gpt_neo_x_preprocessor_test.py | 85 ++---- .../gpt_neo_x/gpt_neo_x_tokenizer_test.py | 82 ++---- keras_nlp/models/opt/opt_backbone_test.py | 104 ++++---- .../opt/opt_causal_lm_preprocessor_test.py | 127 ++++----- keras_nlp/models/opt/opt_causal_lm_test.py | 142 ++++------ keras_nlp/models/opt/opt_preprocessor_test.py | 107 +++----- keras_nlp/models/opt/opt_presets_test.py | 110 -------- keras_nlp/models/opt/opt_tokenizer_test.py | 87 +++--- .../models/roberta/roberta_backbone_test.py | 109 ++++---- .../models/roberta/roberta_classifier_test.py | 140 +++------- .../roberta_masked_lm_preprocessor_test.py | 167 ++++-------- .../models/roberta/roberta_masked_lm_test.py | 114 +++----- .../roberta/roberta_preprocessor_test.py | 155 +++-------- .../models/roberta/roberta_presets_test.py | 250 ------------------ .../models/roberta/roberta_tokenizer_test.py | 89 +++---- keras_nlp/models/t5/t5_backbone_test.py | 114 ++------ keras_nlp/models/t5/t5_tokenizer_test.py | 39 +-- .../whisper_audio_feature_extractor_test.py | 65 ++--- .../models/whisper/whisper_backbone_test.py | 145 +++++----- .../whisper/whisper_preprocessor_test.py | 157 ++--------- .../models/whisper/whisper_presets_test.py | 133 ---------- .../models/whisper/whisper_tokenizer_test.py | 105 ++++---- .../xlm_roberta/xlm_roberta_backbone_test.py | 108 ++++---- .../xlm_roberta_classifier_test.py | 121 +++------ ...xlm_roberta_masked_lm_preprocessor_test.py | 159 ++++------- .../xlm_roberta/xlm_roberta_masked_lm_test.py | 109 +++----- .../xlm_roberta_preprocessor_test.py | 143 ++++------ .../xlm_roberta/xlm_roberta_presets_test.py | 198 -------------- .../xlm_roberta/xlm_roberta_tokenizer_test.py | 106 +++----- keras_nlp/models/xlnet/xlnet_backbone_test.py | 81 ++---- keras_nlp/tests/test_case.py | 210 ++++++++++++++- keras_nlp/utils/tensor_utils.py | 2 +- 94 files changed, 2977 insertions(+), 7085 deletions(-) delete mode 100644 keras_nlp/models/albert/albert_presets_test.py delete mode 100644 keras_nlp/models/bart/bart_presets_test.py delete mode 100644 keras_nlp/models/bert/bert_presets_test.py delete mode 100644 keras_nlp/models/deberta_v3/deberta_v3_presets_test.py delete mode 100644 keras_nlp/models/distil_bert/distil_bert_presets_test.py delete mode 100644 keras_nlp/models/f_net/f_net_presets_test.py delete mode 100644 keras_nlp/models/gpt2/gpt2_presets_test.py delete mode 100644 keras_nlp/models/opt/opt_presets_test.py delete mode 100644 keras_nlp/models/roberta/roberta_presets_test.py delete mode 100644 keras_nlp/models/whisper/whisper_presets_test.py delete mode 100644 keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py diff --git a/CONTRIBUTING_MODELS.md b/CONTRIBUTING_MODELS.md index de5e71af7a..40028aac15 100644 --- a/CONTRIBUTING_MODELS.md +++ b/CONTRIBUTING_MODELS.md @@ -35,7 +35,6 @@ Keep this checklist handy! ### Step 4: PR #3 - Add XX Presets - [ ] An `xx/xx_presets.py` file with links to weights uploaded to a personal GCP bucket/Google Drive \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_presets.py)\]. -- [ ] An `xx/xx_presets_test.py` file with runnable tests for each preset \[[Example](https://github.com/keras-team/keras-nlp/blob/master/keras_nlp/models/distil_bert/distil_bert_presets_test.py)\]. - [ ] A `tools/checkpoint_conversion/convert_xx_checkpoints.py` which is reusable script for converting checkpoints \[[Example](https://github.com/keras-team/keras-nlp/blob/master/tools/checkpoint_conversion/convert_distilbert_checkpoints.py)\]. - [ ] A Colab notebook link in the PR description, showing an end-to-end task such as text classification, etc. The task model can be built using the backbone model, with the task head on top \[[Example](https://gist.github.com/mattdangerw/bf0ca07fb66b6738150c8b56ee5bab4e)\]. diff --git a/keras_nlp/layers/modeling/cached_multi_head_attention_test.py b/keras_nlp/layers/modeling/cached_multi_head_attention_test.py index 8e233f102e..4aa0998454 100644 --- a/keras_nlp/layers/modeling/cached_multi_head_attention_test.py +++ b/keras_nlp/layers/modeling/cached_multi_head_attention_test.py @@ -24,7 +24,7 @@ class CachedMultiHeadAttentionTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=CachedMultiHeadAttention, + cls=CachedMultiHeadAttention, init_kwargs={ "num_heads": 2, "key_dim": 4, diff --git a/keras_nlp/layers/modeling/f_net_encoder_test.py b/keras_nlp/layers/modeling/f_net_encoder_test.py index 06d759ada0..e5d0b1ea77 100644 --- a/keras_nlp/layers/modeling/f_net_encoder_test.py +++ b/keras_nlp/layers/modeling/f_net_encoder_test.py @@ -20,7 +20,7 @@ class FNetEncoderTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=FNetEncoder, + cls=FNetEncoder, init_kwargs={ "intermediate_dim": 4, "dropout": 0, diff --git a/keras_nlp/layers/modeling/lora_dense_test.py b/keras_nlp/layers/modeling/lora_dense_test.py index 55b7a686df..81b575310a 100644 --- a/keras_nlp/layers/modeling/lora_dense_test.py +++ b/keras_nlp/layers/modeling/lora_dense_test.py @@ -24,7 +24,7 @@ class LoraDenseTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=LoraDense, + cls=LoraDense, init_kwargs={ "inner_dense": keras.layers.Dense(16), "rank": 2, @@ -41,7 +41,7 @@ def test_layer_behaviors(self): def test_layer_behaviors_einsum(self): self.run_layer_test( - layer_cls=LoraDense, + cls=LoraDense, init_kwargs={ "inner_dense": keras.layers.EinsumDense( "abc,cde->abde", diff --git a/keras_nlp/layers/modeling/masked_lm_head_test.py b/keras_nlp/layers/modeling/masked_lm_head_test.py index 703c56521d..8d22ea0343 100644 --- a/keras_nlp/layers/modeling/masked_lm_head_test.py +++ b/keras_nlp/layers/modeling/masked_lm_head_test.py @@ -21,7 +21,7 @@ class MaskedLMHeadTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=MaskedLMHead, + cls=MaskedLMHead, init_kwargs={ "vocabulary_size": 100, "activation": "softmax", @@ -42,7 +42,7 @@ def test_layer_behaviors_with_embedding(self): embedding = ReversibleEmbedding(100, 16) embedding.build((4, 10)) self.run_layer_test( - layer_cls=MaskedLMHead, + cls=MaskedLMHead, init_kwargs={ "vocabulary_size": 100, "activation": "softmax", diff --git a/keras_nlp/layers/modeling/position_embedding_test.py b/keras_nlp/layers/modeling/position_embedding_test.py index e80cef8ce2..549411e0b8 100644 --- a/keras_nlp/layers/modeling/position_embedding_test.py +++ b/keras_nlp/layers/modeling/position_embedding_test.py @@ -31,7 +31,7 @@ def custom_init(shape, dtype=None): class PositionEmbeddingTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=PositionEmbedding, + cls=PositionEmbedding, init_kwargs={ "sequence_length": 21, }, @@ -42,7 +42,7 @@ def test_layer_behaviors(self): def test_layer_behaviors_4d(self): self.run_layer_test( - layer_cls=PositionEmbedding, + cls=PositionEmbedding, init_kwargs={ "sequence_length": 21, }, diff --git a/keras_nlp/layers/modeling/reversible_embedding_test.py b/keras_nlp/layers/modeling/reversible_embedding_test.py index dcd4599c04..0875759a77 100644 --- a/keras_nlp/layers/modeling/reversible_embedding_test.py +++ b/keras_nlp/layers/modeling/reversible_embedding_test.py @@ -32,7 +32,7 @@ class ReversibleEmbeddingTest(TestCase): ) def test_layer_behaviors_tied(self, tie_weights): self.run_layer_test( - layer_cls=ReversibleEmbedding, + cls=ReversibleEmbedding, init_kwargs={ "input_dim": 100, "output_dim": 32, diff --git a/keras_nlp/layers/modeling/rotary_embedding_test.py b/keras_nlp/layers/modeling/rotary_embedding_test.py index cb502f4570..9874f69e5e 100644 --- a/keras_nlp/layers/modeling/rotary_embedding_test.py +++ b/keras_nlp/layers/modeling/rotary_embedding_test.py @@ -22,7 +22,7 @@ class RotaryEmbeddingTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=RotaryEmbedding, + cls=RotaryEmbedding, init_kwargs={ "max_wavelength": 1000, "scaling_factor": 2.0, @@ -35,7 +35,7 @@ def test_layer_behaviors(self): def test_layer_behaviors_4d(self): self.run_layer_test( - layer_cls=RotaryEmbedding, + cls=RotaryEmbedding, init_kwargs={ "max_wavelength": 1000, }, diff --git a/keras_nlp/layers/modeling/sine_position_encoding_test.py b/keras_nlp/layers/modeling/sine_position_encoding_test.py index 2163d4ee6b..80dad26cbc 100644 --- a/keras_nlp/layers/modeling/sine_position_encoding_test.py +++ b/keras_nlp/layers/modeling/sine_position_encoding_test.py @@ -24,7 +24,7 @@ class SinePositionEncodingTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=SinePositionEncoding, + cls=SinePositionEncoding, init_kwargs={ "max_wavelength": 10000, }, @@ -34,7 +34,7 @@ def test_layer_behaviors(self): def test_layer_behaviors_4d(self): self.run_layer_test( - layer_cls=SinePositionEncoding, + cls=SinePositionEncoding, init_kwargs={ "max_wavelength": 10000, }, diff --git a/keras_nlp/layers/modeling/token_and_position_embedding_test.py b/keras_nlp/layers/modeling/token_and_position_embedding_test.py index b0c5949a2c..16269b5df0 100644 --- a/keras_nlp/layers/modeling/token_and_position_embedding_test.py +++ b/keras_nlp/layers/modeling/token_and_position_embedding_test.py @@ -26,7 +26,7 @@ class TokenAndPositionEmbeddingTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( - layer_cls=TokenAndPositionEmbedding, + cls=TokenAndPositionEmbedding, init_kwargs={ "vocabulary_size": 5, "sequence_length": 4, diff --git a/keras_nlp/layers/modeling/transformer_decoder_test.py b/keras_nlp/layers/modeling/transformer_decoder_test.py index f904d92511..2b54324f02 100644 --- a/keras_nlp/layers/modeling/transformer_decoder_test.py +++ b/keras_nlp/layers/modeling/transformer_decoder_test.py @@ -27,7 +27,7 @@ class TransformerDecoderTest(TestCase): ) def test_layer_behaviors(self, normalize_first): self.run_layer_test( - layer_cls=TransformerDecoder, + cls=TransformerDecoder, init_kwargs={ "intermediate_dim": 4, "num_heads": 2, @@ -50,7 +50,7 @@ def test_layer_behaviors(self, normalize_first): def test_layer_behaviors_with_cross_attention(self, normalize_first): pass self.run_layer_test( - layer_cls=TransformerDecoder, + cls=TransformerDecoder, init_kwargs={ "intermediate_dim": 4, "num_heads": 2, diff --git a/keras_nlp/layers/modeling/transformer_encoder_test.py b/keras_nlp/layers/modeling/transformer_encoder_test.py index 3882a0e7e2..844125c4b0 100644 --- a/keras_nlp/layers/modeling/transformer_encoder_test.py +++ b/keras_nlp/layers/modeling/transformer_encoder_test.py @@ -28,7 +28,7 @@ class TransformerEncoderTest(TestCase): ) def test_layer_behaviors(self, normalize_first): self.run_layer_test( - layer_cls=TransformerEncoder, + cls=TransformerEncoder, init_kwargs={ "intermediate_dim": 4, "num_heads": 2, diff --git a/keras_nlp/models/albert/albert_backbone_test.py b/keras_nlp/models/albert/albert_backbone_test.py index 7dc4bb7f7e..f1211e0fa3 100644 --- a/keras_nlp/models/albert/albert_backbone_test.py +++ b/keras_nlp/models/albert/albert_backbone_test.py @@ -12,66 +12,43 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.albert.albert_backbone import AlbertBackbone from keras_nlp.tests.test_case import TestCase class AlbertBackboneTest(TestCase): def setUp(self): - self.backbone = AlbertBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - num_groups=1, - num_inner_repetitions=1, - embedding_dim=16, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - self.batch_size = 8 - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "segment_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "num_groups": 1, + "num_inner_repetitions": 1, + "num_inner_repetitions": 1, + "embedding_dim": 16, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_albert(self): - self.backbone(self.input_batch) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "albert_backbone") - - def test_variable_sequence_length_call_albert(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "segment_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=AlbertBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape={ + "sequence_output": (2, 5, 2), + "pooled_output": (2, 2), + }, ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) def test_error_for_invalid_num_groups(self): with self.assertRaises(ValueError): @@ -88,16 +65,46 @@ def test_error_for_invalid_num_groups(self): @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, AlbertBackbone) + self.run_model_saving_test( + cls=AlbertBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose( - model_output["pooled_output"], restored_output["pooled_output"] + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=AlbertBackbone, + preset="albert_base_en_uncased", + input_data={ + "token_ids": ops.array([[2, 13, 1, 3]], dtype="int32"), + "segment_ids": ops.zeros((1, 4), dtype="int32"), + "padding_mask": ops.ones((1, 4), dtype="int32"), + }, + expected_output_shape={ + "sequence_output": (1, 4, 768), + "pooled_output": (1, 768), + }, + # The forward pass from a preset should be stable! + expected_partial_output={ + "sequence_output": ( + ops.array( + [1.830863, 1.698645, -1.819195, -0.53382, -0.38114] + ) + ), + "pooled_output": ( + ops.array( + [0.328261, -0.415397, -0.388745, 0.156846, 0.657874] + ) + ), + }, ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in AlbertBackbone.presets: + self.run_preset_test( + cls=AlbertBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/albert/albert_classifier_test.py b/keras_nlp/models/albert/albert_classifier_test.py index 71fef68ce1..e2581df6a1 100644 --- a/keras_nlp/models/albert/albert_classifier_test.py +++ b/keras_nlp/models/albert/albert_classifier_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,15 +13,10 @@ # limitations under the License. import io -import os -import numpy as np import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.backend import ops from keras_nlp.models.albert.albert_backbone import AlbertBackbone from keras_nlp.models.albert.albert_classifier import AlbertClassifier from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor @@ -31,16 +26,13 @@ class AlbertClassifierTest(TestCase): def setUp(self): - # Setup model - + # Setup model. + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=10, + vocab_size=12, model_type="WORD", pad_id=0, unk_id=1, @@ -52,99 +44,53 @@ def setUp(self): eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - tokenizer = AlbertTokenizer(proto=self.proto) - self.preprocessor = AlbertPreprocessor( - tokenizer=tokenizer, + AlbertTokenizer(proto=bytes_io.getvalue()), sequence_length=5, ) self.backbone = AlbertBackbone( vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), num_layers=2, num_heads=2, - embedding_dim=2, hidden_dim=2, + embedding_dim=2, intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - - self.classifier = AlbertClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - # Check we handle serialization correctly. - activation=keras.activations.softmax, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + "num_classes": 2, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. + [1, 0], # Labels. ) - - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - (self.raw_batch, np.ones((2,))) - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.classifier(self.preprocessed_batch) - - def test_classifier_predict(self): - preds1 = self.classifier.predict(self.raw_batch) - self.classifier.preprocessor = None - preds2 = self.classifier.predict(self.preprocessed_batch) - # Assert predictions match. - self.assertAllClose(preds1, preds2) - # Assert valid softmax output. - self.assertAllClose(ops.sum(preds2, axis=-1), [1.0, 1.0]) - - def test_classifier_fit(self): - self.classifier.fit(self.raw_dataset) - self.classifier.preprocessor = None - self.classifier.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.classifier.preprocessor = None - self.classifier.compile( - loss="sparse_categorical_crossentropy", - jit_compile=False, - ) - self.classifier.fit(self.preprocessed_dataset) - - def test_serialization(self): - # Defaults. - original = AlbertClassifier( - self.backbone, - num_classes=2, - ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - # With options. - original = AlbertClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - activation=keras.activations.softmax, - name="test", - trainable=False, + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_classifier_basics(self): + self.run_task_test( + cls=AlbertClassifier, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 2), ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) @pytest.mark.large - def test_saving_model(self): - model_output = self.classifier.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.classifier.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back - self.assertIsInstance(restored_model, AlbertClassifier) + def test_saved_model(self): + self.run_model_saving_test( + cls=AlbertClassifier, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in AlbertClassifier.presets: + self.run_preset_test( + cls=AlbertClassifier, + preset=preset, + init_kwargs={"num_classes": 2}, + input_data=self.input_data, + expected_output_shape=(2, 2), + ) diff --git a/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py b/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py index d95b072108..36eef72f39 100644 --- a/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.albert.albert_masked_lm_preprocessor import ( AlbertMaskedLMPreprocessor, ) @@ -27,13 +26,10 @@ class AlbertMaskedLMPreprocessorTest(TestCase): def setUp(self): - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) - + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", @@ -47,103 +43,62 @@ def setUp(self): eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - - proto = bytes_io.getvalue() - - tokenizer = AlbertTokenizer(proto=proto) - - self.preprocessor = AlbertMaskedLMPreprocessor( - tokenizer=tokenizer, + self.tokenizer = AlbertTokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. - mask_selection_rate=1.0, - mask_token_rate=1.0, - random_token_rate=0.0, - mask_selection_length=4, - sequence_length=12, + "mask_selection_rate": 1.0, + "mask_token_rate": 1.0, + "random_token_rate": 0.0, + "mask_selection_length": 4, + "sequence_length": 12, + } + self.input_data = ["the quick brown fox"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=AlbertMaskedLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[1, 2, 3, 4]], + }, + [[5, 10, 6, 8]], + [[1.0, 1.0, 1.0, 1.0]], + ), ) - def test_preprocess_strings(self): - input_data = "the quick brown fox" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 3, 4]) - self.assertAllEqual(y, [5, 10, 6, 8]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) - - def test_preprocess_list_of_strings(self): - input_data = ["the quick brown fox"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[5, 10, 6, 8]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) - - def test_preprocess_dataset(self): - sentences = tf.constant(["the quick brown fox"] * 4) - ds = tf.data.Dataset.from_tensor_slices(sentences) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x["token_ids"], [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[5, 10, 6, 8]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) - - def test_mask_multiple_sentences(self): - sentence_one = tf.constant("the quick") - sentence_two = tf.constant("brown fox") - - x, y, sw = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - x["token_ids"], [2, 4, 4, 3, 4, 4, 3, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 4, 5]) - self.assertAllEqual(y, [5, 10, 6, 8]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) - def test_no_masking_zero_rate(self): no_mask_preprocessor = AlbertMaskedLMPreprocessor( - self.preprocessor.tokenizer, + self.tokenizer, mask_selection_rate=0.0, mask_selection_length=4, sequence_length=12, ) - input_data = "the quick brown fox" - - x, y, sw = no_mask_preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] + input_data = ["the quick brown fox"] + self.assertAllClose( + no_mask_preprocessor(input_data), + ( + { + "token_ids": [[2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[0, 0, 0, 0]], + }, + [[0, 0, 0, 0]], + [[0.0, 0.0, 0.0, 0.0]], + ), ) - self.assertAllEqual(x["mask_positions"], [0, 0, 0, 0]) - self.assertAllEqual(y, [0, 0, 0, 0]) - self.assertAllEqual(sw, [0.0, 0.0, 0.0, 0.0]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in AlbertMaskedLMPreprocessor.presets: + self.run_preset_test( + cls=AlbertMaskedLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/albert/albert_masked_lm_test.py b/keras_nlp/models/albert/albert_masked_lm_test.py index 9d9ea2478d..456b0edda4 100644 --- a/keras_nlp/models/albert/albert_masked_lm_test.py +++ b/keras_nlp/models/albert/albert_masked_lm_test.py @@ -13,13 +13,10 @@ # limitations under the License. import io -import os import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.albert.albert_backbone import AlbertBackbone from keras_nlp.models.albert.albert_masked_lm import AlbertMaskedLM from keras_nlp.models.albert.albert_masked_lm_preprocessor import ( @@ -32,15 +29,12 @@ class AlbertMaskedLMTest(TestCase): def setUp(self): # Setup model. - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round", "an eagle flew"] - ) - + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=15, + vocab_size=12, model_type="WORD", pad_id=0, unk_id=1, @@ -52,13 +46,8 @@ def setUp(self): eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - - proto = bytes_io.getvalue() - - tokenizer = AlbertTokenizer(proto=proto) - self.preprocessor = AlbertMaskedLMPreprocessor( - tokenizer=tokenizer, + AlbertTokenizer(proto=bytes_io.getvalue()), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, @@ -70,65 +59,41 @@ def setUp(self): vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), num_layers=2, num_heads=2, - embedding_dim=4, - hidden_dim=4, + hidden_dim=2, + embedding_dim=2, intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.masked_lm = AlbertMaskedLM( - self.backbone, - preprocessor=self.preprocessor, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. ) - self.masked_lm_no_preprocessing = AlbertMaskedLM( - self.backbone, - preprocessor=None, + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_masked_lm_basics(self): + self.run_task_test( + cls=AlbertMaskedLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 5, 12), ) - self.raw_batch = [ - "quick brown fox", - "eagle flew over fox", - "the eagle flew quick", - "a brown eagle", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch)[0] - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.masked_lm(self.preprocessed_batch) - - def test_albert_masked_lm_fit_default_compile(self): - self.masked_lm.fit(self.raw_dataset) - - def test_classifier_predict(self): - self.masked_lm.predict(self.raw_batch) - self.masked_lm.preprocessor = None - self.masked_lm.predict(self.preprocessed_batch) - - def test_classifier_fit(self): - self.masked_lm.fit(self.raw_dataset) - self.masked_lm.preprocessor = None - self.masked_lm.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.masked_lm.preprocessor = None - self.masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, - ) - self.masked_lm.fit(self.preprocessed_dataset) - @pytest.mark.large def test_saved_model(self): - model_output = self.masked_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.masked_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=AlbertMaskedLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, AlbertMaskedLM) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output, atol=0.01, rtol=0.01) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in AlbertMaskedLM.presets: + self.run_preset_test( + cls=AlbertMaskedLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/albert/albert_preprocessor_test.py b/keras_nlp/models/albert/albert_preprocessor_test.py index 2830b85073..95cb2c832e 100644 --- a/keras_nlp/models/albert/albert_preprocessor_test.py +++ b/keras_nlp/models/albert/albert_preprocessor_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer from keras_nlp.tests.test_case import TestCase @@ -25,12 +24,10 @@ class AlbertPreprocessorTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", @@ -44,120 +41,44 @@ def setUp(self): eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.preprocessor = AlbertPreprocessor( - tokenizer=AlbertTokenizer(proto=self.proto), - sequence_length=12, + self.tokenizer = AlbertTokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + ["the quick brown fox"], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ) - def test_tokenize_strings(self): - input_data = "the quick brown fox" - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], [2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - output["segment_ids"], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] - ) - - def test_tokenize_list_of_strings(self): - # We should handle a list of strings as as batch. - input_data = ["the quick brown fox"] * 4 - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], - [[2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0]] * 4, - ) - self.assertAllEqual( - output["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual( - x_out["token_ids"], - [[2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0]] * 4, - ) - self.assertAllEqual( - x_out["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x_out["token_ids"], - [[2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0]] * 4, - ) - self.assertAllEqual( - x_out["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_multiple_sentences(self): - sentence_one = tf.constant("the quick brown fox") - sentence_two = tf.constant("the earth") - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], - [2, 5, 10, 6, 8, 3, 5, 7, 3, 0, 0, 0], - ) - self.assertAllEqual( - output["segment_ids"], [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0] - ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0] - ) - - def test_tokenize_multiple_batched_sentences(self): - sentence_one = tf.constant(["the quick brown fox"] * 4) - sentence_two = tf.constant(["the earth"] * 4) - # The first tuple or list is always interpreted as an enumeration of - # separate sequences to concatenate. - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], - [[2, 5, 10, 6, 8, 3, 5, 7, 3, 0, 0, 0]] * 4, - ) - self.assertAllEqual( - output["segment_ids"], [[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]] * 4 + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=AlbertPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 5, 10, 6, 8, 3, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), ) def test_errors_for_2d_list_input(self): + preprocessor = AlbertPreprocessor(**self.init_kwargs) ambiguous_input = [["one", "two"], ["three", "four"]] with self.assertRaises(ValueError): - self.preprocessor(ambiguous_input) + preprocessor(ambiguous_input) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in AlbertPreprocessor.presets: + self.run_preset_test( + cls=AlbertPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/albert/albert_presets_test.py b/keras_nlp/models/albert/albert_presets_test.py deleted file mode 100644 index f7576c6729..0000000000 --- a/keras_nlp/models/albert/albert_presets_test.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.albert.albert_backbone import AlbertBackbone -from keras_nlp.models.albert.albert_classifier import AlbertClassifier -from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor -from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class AlbertPresetSmokeTest(TestCase): - """ - A smoke test for ALBERT presets we run continuously. - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/albert/albert_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = AlbertTokenizer.from_preset( - "albert_base_en_uncased", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [13, 1, 438, 2231, 886, 2385, 9] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_output(self): - preprocessor = AlbertPreprocessor.from_preset( - "albert_base_en_uncased", - sequence_length=4, - ) - outputs = preprocessor("The quick brown fox.")["token_ids"] - expected_outputs = [2, 13, 1, 3] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_classifier_output(self, load_weights): - input_data = ["The quick brown fox."] - model = AlbertClassifier.from_preset( - "albert_base_en_uncased", - num_classes=2, - load_weights=load_weights, - ) - # We don't assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_classifier_output_without_preprocessing(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "segment_ids": ops.array([[0, 0, 0, 0]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = AlbertClassifier.from_preset( - "albert_base_en_uncased", - num_classes=2, - load_weights=load_weights, - preprocessor=None, - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[2, 13, 1, 3]]), - "segment_ids": ops.array([[0, 0, 0, 0]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = AlbertBackbone.from_preset( - "albert_base_en_uncased", load_weights=load_weights - ) - outputs = model(input_data) - if load_weights: - outputs = outputs["sequence_output"][0, 0, :5] - expected = [1.830863, 1.698645, -1.819195, -0.53382, -0.38114] - self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("albert_tokenizer", AlbertTokenizer), - ("albert_preprocessor", AlbertPreprocessor), - ("albert", AlbertBackbone), - ("albert_classifier", AlbertClassifier), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("albert_tokenizer", AlbertTokenizer, {}), - ("albert_preprocessor", AlbertPreprocessor, {}), - ("albert", AlbertBackbone, {}), - ("albert_classifier", AlbertClassifier, {"num_classes": 2}), - ) - def test_unknown_preset_error(self, cls, kwargs): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("albert_base_en_uncased_clowntown", **kwargs) - - -@pytest.mark.extra_large -class AlbertPresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - This tests every ALBERT preset and is only run manually. - Run with: - `pytest keras_nlp/models/albert/albert_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_albert(self, load_weights): - for preset in AlbertBackbone.presets: - model = AlbertBackbone.from_preset( - preset, load_weights=load_weights - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), dtype="int64", maxval=model.vocabulary_size - ), - "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - model(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_albert_classifier(self, load_weights): - for preset in AlbertClassifier.presets: - classifier = AlbertClassifier.from_preset( - preset, - num_classes=2, - load_weights=load_weights, - ) - input_data = ["This quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_albert_classifier_without_preprocessing(self, load_weights): - for preset in AlbertClassifier.presets: - classifier = AlbertClassifier.from_preset( - preset, - num_classes=2, - preprocessor=None, - load_weights=load_weights, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - classifier.predict(input_data) - - def test_load_tokenizers(self): - for preset in AlbertTokenizer.presets: - tokenizer = AlbertTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_preprocessors(self): - for preset in AlbertPreprocessor.presets: - preprocessor = AlbertPreprocessor.from_preset(preset) - preprocessor("The quick brown fox.") diff --git a/keras_nlp/models/albert/albert_tokenizer_test.py b/keras_nlp/models/albert/albert_tokenizer_test.py index fb7c145518..e645436c09 100644 --- a/keras_nlp/models/albert/albert_tokenizer_test.py +++ b/keras_nlp/models/albert/albert_tokenizer_test.py @@ -14,22 +14,19 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer from keras_nlp.tests.test_case import TestCase class AlbertTokenizerTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", @@ -43,28 +40,16 @@ def setUp(self): eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.tokenizer = AlbertTokenizer(proto=self.proto) - - def test_tokenize(self): - input_data = "the quick brown fox" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [5, 10, 6, 8]) - - def test_tokenize_batch(self): - input_data = ["the quick brown fox", "the earth is round"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[5, 10, 6, 8], [5, 7, 9, 11]]) + self.init_kwargs = {"proto": bytes_io.getvalue()} + self.input_data = ["the quick brown fox.", "the earth is round."] - def test_detokenize(self): - input_data = [[5, 10, 6, 8]] - output = self.tokenizer.detokenize(input_data) - self.assertEqual(output, ["the quick brown fox"]) - - def test_vocabulary_size(self): - tokenizer = AlbertTokenizer(proto=self.proto) - self.assertEqual(tokenizer.vocabulary_size(), 12) + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=AlbertTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[5, 10, 6, 1], [5, 7, 9, 1]], + ) def test_errors_missing_special_tokens(self): bytes_io = io.BytesIO() @@ -79,10 +64,20 @@ def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): AlbertTokenizer(proto=bytes_io.getvalue()) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=AlbertTokenizer, + preset="albert_base_en_uncased", + input_data=["The quick brown fox."], + expected_output=[[13, 1, 438, 2231, 886, 2385, 9]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in AlbertTokenizer.presets: + self.run_preset_test( + cls=AlbertTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bart/bart_backbone_test.py b/keras_nlp/models/bart/bart_backbone_test.py index 129d0f824a..fe4b6af52a 100644 --- a/keras_nlp/models/bart/bart_backbone_test.py +++ b/keras_nlp/models/bart/bart_backbone_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,82 +12,80 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.bart.bart_backbone import BartBackbone from keras_nlp.tests.test_case import TestCase class BartBackboneTest(TestCase): def setUp(self): - self.backbone = BartBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=3, - intermediate_dim=4, - max_sequence_length=5, - ) - self.input_batch = { - "encoder_token_ids": np.ones((2, 5), dtype="int32"), - "encoder_padding_mask": np.ones((2, 5), dtype="int32"), - "decoder_token_ids": np.ones((2, 5), dtype="int32"), - "decoder_padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "encoder_token_ids": ops.ones((2, 3), dtype="int32"), + "encoder_padding_mask": ops.zeros((2, 3), dtype="int32"), + "decoder_token_ids": ops.ones((2, 5), dtype="int32"), + "decoder_padding_mask": ops.zeros((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call(self): - self.backbone(self.input_batch) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "bart_backbone") - - def test_variable_sequence_length_call(self): - for seq_length in (2, 3, 4): - input_data = { - "encoder_token_ids": np.ones((2, seq_length), dtype="int32"), - "encoder_padding_mask": np.ones((2, seq_length), dtype="int32"), - "decoder_token_ids": np.ones((2, seq_length), dtype="int32"), - "decoder_padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=BartBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape={ + "encoder_sequence_output": (2, 3, 2), + "decoder_sequence_output": (2, 5, 2), + }, ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, BartBackbone) - - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose( - model_output["encoder_sequence_output"], - restored_output["encoder_sequence_output"], + self.run_model_saving_test( + cls=BartBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, ) - self.assertAllClose( - model_output["decoder_sequence_output"], - restored_output["decoder_sequence_output"], + + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=BartBackbone, + preset="bart_base_en", + input_data={ + "encoder_token_ids": ops.array([[0, 133, 2119, 2]]), + "encoder_padding_mask": ops.array([[1, 1, 1, 1]]), + "decoder_token_ids": ops.array([[0, 7199, 14, 2119, 2]]), + "decoder_padding_mask": ops.array([[1, 1, 1, 1, 1]]), + }, + expected_output_shape={ + "encoder_sequence_output": (1, 4, 768), + "decoder_sequence_output": (1, 5, 768), + }, + # The forward pass from a preset should be stable! + expected_partial_output={ + "encoder_sequence_output": ops.array( + [-0.033, 0.013, -0.003, -0.012, -0.002] + ), + "decoder_sequence_output": ops.array( + [2.516, 2.489, 0.695, 8.057, 1.245] + ), + }, ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BartBackbone.presets: + self.run_preset_test( + cls=BartBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bart/bart_preprocessor_test.py b/keras_nlp/models/bart/bart_preprocessor_test.py index 189de29ed3..23cb7cae79 100644 --- a/keras_nlp/models/bart/bart_preprocessor_test.py +++ b/keras_nlp/models/bart/bart_preprocessor_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.bart.bart_preprocessor import BartPreprocessor from keras_nlp.models.bart.bart_tokenizer import BartTokenizer from keras_nlp.tests.test_case import TestCase @@ -22,141 +22,48 @@ class BartPreprocessorTest(TestCase): def setUp(self): - vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.tokenizer = BartTokenizer( + vocabulary=self.vocab, merges=self.merges + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "encoder_sequence_length": 5, + "decoder_sequence_length": 8, } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - - self.preprocessor = BartPreprocessor( - tokenizer=BartTokenizer( - vocabulary=vocab, - merges=merges, - ), - encoder_sequence_length=10, - decoder_sequence_length=9, - ) - - def test_tokenize_strings(self): - input_data = { - "encoder_text": " airplane at airport", - "decoder_text": " kohli is the best", - } - - output = self.preprocessor(input_data) - self.assertAllEqual( - output["encoder_token_ids"], [0, 3, 4, 5, 3, 6, 2, 1, 1, 1] - ) - self.assertAllEqual( - output["encoder_padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0] - ) - self.assertAllEqual( - output["decoder_token_ids"], [2, 0, 7, 8, 9, 10, 11, 2, 1] - ) - self.assertAllEqual( - output["decoder_padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 0] - ) - - def test_key_order(self): - self.assertAllClose( - self.preprocessor( - { - "encoder_text": " airplane at airport", - "decoder_text": " kohli is the best", - } - ), - self.preprocessor( + self.input_data = ( + { + "encoder_text": [" airplane at airport"], + "decoder_text": [" airplane airport"], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ) + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=BartPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( { - "decoder_text": " kohli is the best", - "encoder_text": " airplane at airport", - } + "encoder_token_ids": [[0, 4, 5, 6, 2]], + "encoder_padding_mask": [[1, 1, 1, 1, 1]], + "decoder_token_ids": [[2, 0, 4, 5, 4, 7, 2, 1]], + "decoder_padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ), ) - def test_tokenize_list_of_strings(self): - input_data = { - "encoder_text": [" airplane at airport"] * 4, - "decoder_text": [" kohli is the best"] * 4, - } - - output = self.preprocessor(input_data) - self.assertAllEqual( - output["encoder_token_ids"], [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - output["encoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - output["decoder_token_ids"], [[2, 0, 7, 8, 9, 10, 11, 2, 1]] * 4 - ) - self.assertAllEqual( - output["decoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = { - "encoder_text": [" airplane at airport"] * 4, - "decoder_text": [" kohli is the best"] * 4, - } - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual( - x_out["encoder_token_ids"], [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x_out["encoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["decoder_token_ids"], [[2, 0, 7, 8, 9, 10, 11, 2, 1]] * 4 - ) - self.assertAllEqual( - x_out["decoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = { - "encoder_text": [" airplane at airport"] * 4, - "decoder_text": [" kohli is the best"] * 4, - } - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x_out["encoder_token_ids"], [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x_out["encoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["decoder_token_ids"], [[2, 0, 7, 8, 9, 10, 11, 2, 1]] * 4 - ) - self.assertAllEqual( - x_out["decoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - def test_error_multi_segment_input(self): + preprocessor = BartPreprocessor(**self.init_kwargs) input_data = { "encoder_text": ( tf.constant([" airplane at airport"] * 2), @@ -167,14 +74,14 @@ def test_error_multi_segment_input(self): tf.constant([" kohli"] * 2), ), } - with self.assertRaises(ValueError): - self.preprocessor(input_data) - - def test_serialization(self): - new_preprocessor = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.preprocessor) - ) - self.assertEqual( - new_preprocessor.get_config(), self.preprocessor.get_config() - ) + preprocessor(input_data) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BartPreprocessor.presets: + self.run_preset_test( + cls=BartPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bart/bart_presets_test.py b/keras_nlp/models/bart/bart_presets_test.py deleted file mode 100644 index 59bf3b5584..0000000000 --- a/keras_nlp/models/bart/bart_presets_test.py +++ /dev/null @@ -1,143 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# Copyright 2023 The KerasNLP Authors -# - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.tests.test_case import TestCase - -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for loading pretrained model presets.""" - -import pytest -from absl.testing import parameterized - -from keras_nlp.models.bart.bart_backbone import BartBackbone -from keras_nlp.models.bart.bart_tokenizer import BartTokenizer - - -@pytest.mark.large -class BartPresetSmokeTest(TestCase): - """ - A smoke test for BART presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/bart/bart_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = BartTokenizer.from_preset( - "bart_base_en", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [133, 2119, 6219, 23602, 4] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "encoder_token_ids": ops.array([[0, 133, 2119, 2]]), - "encoder_padding_mask": ops.array([[1, 1, 1, 1]]), - "decoder_token_ids": ops.array([[0, 7199, 14, 2119, 2]]), - "decoder_padding_mask": ops.array([[1, 1, 1, 1, 1]]), - } - model = BartBackbone.from_preset( - "bart_base_en", load_weights=load_weights - ) - outputs = model(input_data) - if load_weights: - encoder_output = outputs["encoder_sequence_output"][0, 0, :5] - expected_encoder_output = [-0.033, 0.013, -0.003, -0.012, -0.002] - decoder_output = outputs["decoder_sequence_output"][0, 0, :5] - expected_decoder_output = [2.516, 2.489, 0.695, 8.057, 1.245] - - self.assertAllClose( - encoder_output, expected_encoder_output, atol=0.01, rtol=0.01 - ) - self.assertAllClose( - decoder_output, expected_decoder_output, atol=0.01, rtol=0.01 - ) - - @parameterized.named_parameters( - ("bart_tokenizer", BartTokenizer), - ("bart", BartBackbone), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("bart_tokenizer", BartTokenizer), - ("bart", BartBackbone), - ) - def test_unknown_preset_error(self, cls): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("bart_base_en_clowntown") - - -@pytest.mark.extra_large -class BartPresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This tests every BART preset and is only run manually. - Run with: - `pytest keras_nlp/models/bart/bart_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_bart(self, load_weights): - for preset in BartBackbone.presets: - model = BartBackbone.from_preset(preset, load_weights=load_weights) - input_data = { - "encoder_token_ids": random.uniform( - shape=(1, 1024), - dtype="int64", - maxval=model.vocabulary_size, - ), - "encoder_padding_mask": ops.array( - [1] * 768 + [0] * 256, shape=(1, 1024) - ), - "decoder_token_ids": random.uniform( - shape=(1, 1024), - dtype="int64", - maxval=model.vocabulary_size, - ), - "decoder_padding_mask": ops.array( - [1] * 489 + [0] * 535, shape=(1, 1024) - ), - } - model(input_data) - - def test_load_tokenizers(self): - for preset in BartTokenizer.presets: - tokenizer = BartTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py index 41a3c3dab2..37493bb91d 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.bart.bart_seq_2_seq_lm_preprocessor import ( BartSeq2SeqLMPreprocessor, ) @@ -22,131 +22,77 @@ from keras_nlp.tests.test_case import TestCase -class BartSeq2SeqLMPreprocessorTest(TestCase): +class BartPreprocessorTest(TestCase): def setUp(self): - vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - - self.preprocessor = BartSeq2SeqLMPreprocessor( - tokenizer=BartTokenizer( - vocabulary=vocab, - merges=merges, - ), - encoder_sequence_length=10, - decoder_sequence_length=9, + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.tokenizer = BartTokenizer( + vocabulary=self.vocab, merges=self.merges ) - - def test_tokenize_strings(self): - input_data = { - "encoder_text": " airplane at airport", - "decoder_text": " kohli is the best", + self.init_kwargs = { + "tokenizer": self.tokenizer, + "encoder_sequence_length": 5, + "decoder_sequence_length": 8, } - - x_out, y_out, sw_out = self.preprocessor(input_data) - self.assertAllEqual( - x_out["encoder_token_ids"], [0, 3, 4, 5, 3, 6, 2, 1, 1, 1] - ) - self.assertAllEqual( - x_out["encoder_padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0] + self.input_data = ( + { + "encoder_text": [" airplane at airport"], + "decoder_text": [" airplane airport"], + }, ) - self.assertAllEqual( - x_out["decoder_token_ids"], [2, 0, 7, 8, 9, 10, 11, 2, 1] - ) - self.assertAllEqual( - x_out["decoder_padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 0] - ) - self.assertAllEqual(y_out, [0, 7, 8, 9, 10, 11, 2, 1, 1]) - self.assertAllEqual(sw_out, [1, 1, 1, 1, 1, 1, 1, 0, 0]) - def test_tokenize_list_of_strings(self): - input_data = { - "encoder_text": [" airplane at airport"] * 4, - "decoder_text": [" kohli is the best"] * 4, - } - - x_out, y_out, sw_out = self.preprocessor(input_data) - self.assertAllEqual( - x_out["encoder_token_ids"], [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x_out["encoder_padding_mask"], - [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0]] * 4, - ) - self.assertAllEqual( - x_out["decoder_token_ids"], [[2, 0, 7, 8, 9, 10, 11, 2, 1]] * 4 - ) - self.assertAllEqual( - x_out["decoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - self.assertAllEqual(y_out, [[0, 7, 8, 9, 10, 11, 2, 1, 1]] * 4) - self.assertAllEqual(sw_out, [[1, 1, 1, 1, 1, 1, 1, 0, 0]] * 4) - - def test_error_multi_segment_input(self): - input_data = { - "encoder_text": ( - tf.constant([" airplane at airport"] * 2), - tf.constant([" airplane"] * 2), + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=BartSeq2SeqLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "encoder_token_ids": [[0, 4, 5, 6, 2]], + "encoder_padding_mask": [[1, 1, 1, 1, 1]], + "decoder_token_ids": [[2, 0, 4, 5, 4, 7, 2, 1]], + "decoder_padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [[0, 4, 5, 4, 7, 2, 1, 1]], + [[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]], ), - "decoder_text": ( - tf.constant([" kohli is the best"] * 2), - tf.constant([" kohli"] * 2), - ), - } - - with self.assertRaises(ValueError): - self.preprocessor(input_data) + ) def test_generate_preprocess(self): + preprocessor = BartSeq2SeqLMPreprocessor(**self.init_kwargs) input_data = { - "encoder_text": tf.convert_to_tensor([" airplane at airport"]), - "decoder_text": tf.convert_to_tensor([" kohli is the best"]), + "encoder_text": [" airplane at airport"], + "decoder_text": [" airplane airport"], } - x_out = self.preprocessor.generate_preprocess(input_data) - self.assertAllEqual( - x_out["encoder_token_ids"], [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1]] - ) - self.assertAllEqual( - x_out["encoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0]] - ) - self.assertAllEqual( - x_out["decoder_token_ids"], [[2, 0, 7, 8, 9, 10, 11, 1, 1]] - ) - self.assertAllEqual( - x_out["decoder_padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0]] + output = preprocessor.generate_preprocess(input_data) + self.assertAllClose( + output, + { + "encoder_token_ids": [[0, 4, 5, 6, 2]], + "encoder_padding_mask": [[1, 1, 1, 1, 1]], + "decoder_token_ids": [[2, 0, 4, 5, 4, 7, 1, 1]], + "decoder_padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]], + }, ) def test_generate_postprocess(self): + preprocessor = BartSeq2SeqLMPreprocessor(**self.init_kwargs) input_data = { - "decoder_token_ids": tf.constant([2, 0, 7, 8, 9, 10, 11, 1, 1]), - "decoder_padding_mask": tf.cast( - [1, 1, 1, 1, 1, 1, 1, 0, 0], dtype="bool" - ), + "decoder_token_ids": ops.array([0, 4, 5, 6, 2], dtype="int32"), + "decoder_padding_mask": ops.array([1, 1, 1, 1, 1], dtype="bool"), } - x = self.preprocessor.generate_postprocess(input_data) - self.assertAllEqual(x, " kohli is the best") + output = preprocessor.generate_postprocess(input_data) + self.assertAllEqual(output, " airplane at") - def test_serialization(self): - new_preprocessor = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.preprocessor) - ) - self.assertEqual( - new_preprocessor.get_config(), self.preprocessor.get_config() - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BartSeq2SeqLMPreprocessor.presets: + self.run_preset_test( + cls=BartSeq2SeqLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py index 7c72580daf..280ec33dc6 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_test.py @@ -12,13 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os from unittest.mock import patch import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.bart.bart_backbone import BartBackbone from keras_nlp.models.bart.bart_seq_2_seq_lm import BartSeq2SeqLM @@ -31,26 +28,12 @@ class BartSeq2SeqLMTest(TestCase): def setUp(self): - self.vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, - } - - self.merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - self.merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e"] - self.merges += ["s t", "Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e"] - self.merges += ["Ġbe st", "po rt", "pla ne"] + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] self.preprocessor = BartSeq2SeqLMPreprocessor( BartTokenizer(vocabulary=self.vocab, merges=self.merges), encoder_sequence_length=12, @@ -64,64 +47,47 @@ def setUp(self): intermediate_dim=8, max_sequence_length=12, ) - self.seq_2_seq_lm = BartSeq2SeqLM( - backbone=self.backbone, - preprocessor=self.preprocessor, - ) - - self.raw_batch = { - "encoder_text": [" airplane at airport", " airplane at airport"], - "decoder_text": [" kohli is the best", " kohli is the best"], + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, } - - self.preprocessed_batch = self.preprocessor(self.raw_batch)[0] - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_seq_2_seq_lm(self): - self.seq_2_seq_lm(self.preprocessed_batch) - - def test_predict(self): - self.seq_2_seq_lm.predict(self.raw_batch) - self.seq_2_seq_lm.preprocessor = None - self.seq_2_seq_lm.predict(self.preprocessed_batch) - - def test_fit(self): - self.seq_2_seq_lm.fit(self.raw_dataset) - self.seq_2_seq_lm.preprocessor = None - self.seq_2_seq_lm.fit(self.preprocessed_dataset) - - def test_fit_no_xla(self): - self.seq_2_seq_lm.preprocessor = None - self.seq_2_seq_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.train_data = ( + { + "encoder_text": [ + " airplane at airport", + " airplane at airport", + ], + "decoder_text": [" airplane airport", " airplane airport"], + }, + ) + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_causal_lm_basics(self): + self.run_task_test( + cls=BartSeq2SeqLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 10, 9), ) - self.seq_2_seq_lm.fit(self.preprocessed_dataset) def test_generate(self): # String input. inputs = { "encoder_text": " airplane at airport", - "decoder_text": " kohli is the best", + "decoder_text": " airplane at", } - output = self.seq_2_seq_lm.generate(inputs) - self.assertTrue(" kohli is the best" in output) + seq_2_seq_lm = BartSeq2SeqLM(**self.init_kwargs) + output = seq_2_seq_lm.generate(inputs) + self.assertTrue(" airplane at" in output) # String tensor input. self.assertIsInstance( - self.seq_2_seq_lm.generate(self.raw_batch)[0], str - ) - # String dataset input. - self.assertIsInstance( - self.seq_2_seq_lm.generate(self.raw_dataset)[0], str + seq_2_seq_lm.generate(" airplane at airport"), str ) # Int tensor input. - self.seq_2_seq_lm.preprocessor = None + seq_2_seq_lm.preprocessor = None preprocessed_batch = self.preprocessor.generate_preprocess(inputs) - outputs = self.seq_2_seq_lm.generate(preprocessed_batch) + outputs = seq_2_seq_lm.generate(preprocessed_batch) # Assert prompt is in output in token id space. self.assertAllEqual( outputs["decoder_token_ids"][:, :5], @@ -132,27 +98,9 @@ def test_generate(self): preprocessed_batch["decoder_padding_mask"][:, :5], ) - def test_generate_string_in_string_out(self): - # String input. - inputs = " airplane at airport" - self.seq_2_seq_lm.generate(inputs) - - # String tensor input. - self.assertIsInstance( - self.seq_2_seq_lm.generate( - [" airplane at airport", " airplane at airport"] - )[0], - str, - ) - - # String dataset input. - raw_dataset = tf.data.Dataset.from_tensor_slices( - tf.constant([" airplane at airport", " airplane at airport"]) - ).batch(2) - self.assertIsInstance(self.seq_2_seq_lm.generate(raw_dataset)[0], str) - def test_early_stopping(self): - call_decoder_with_cache = self.seq_2_seq_lm.call_decoder_with_cache + seq_2_seq_lm = BartSeq2SeqLM(**self.init_kwargs) + call_decoder_with_cache = seq_2_seq_lm.call_decoder_with_cache def wrapper(*args, **kwargs): """Modify output logits to always favor end_token_id""" @@ -174,61 +122,52 @@ def wrapper(*args, **kwargs): ) with patch.object( - self.seq_2_seq_lm, "call_decoder_with_cache", wraps=wrapper + seq_2_seq_lm, "call_decoder_with_cache", wraps=wrapper ): inputs = { "encoder_text": [ " airplane at airport", " airplane at airport", ], - "decoder_text": [" kohli is the best", " kohli"], + "decoder_text": [" airplane at", " airplane"], } - output = self.seq_2_seq_lm.generate(inputs) - + output = seq_2_seq_lm.generate(inputs) # We should immediately abort and output the prompt. self.assertAllEqual(inputs["decoder_text"], output) - # TODO: fix beam search. - @pytest.mark.tf_only - def test_beam_search(self): - seq_2_seq_lm = BartSeq2SeqLM( - backbone=self.backbone, - preprocessor=self.preprocessor, - ) - seq_2_seq_lm.compile(sampler="beam") - seq_2_seq_lm.generate(self.raw_batch) - def test_generate_compilation(self): + seq_2_seq_lm = BartSeq2SeqLM(**self.init_kwargs) # Assert we do not recompile with successive calls. - self.seq_2_seq_lm.generate(self.raw_batch) - first_fn = self.seq_2_seq_lm.generate_function - self.seq_2_seq_lm.generate(self.raw_batch) - second_fn = self.seq_2_seq_lm.generate_function + seq_2_seq_lm.generate(" airplane at airport") + first_fn = seq_2_seq_lm.generate_function + seq_2_seq_lm.generate(" airplane at airport") + second_fn = seq_2_seq_lm.generate_function self.assertEqual(first_fn, second_fn) # Assert we do recompile after compile is called. - self.seq_2_seq_lm.compile(sampler="greedy") - self.assertIsNone(self.seq_2_seq_lm.generate_function) + seq_2_seq_lm.compile(sampler="greedy") + self.assertIsNone(seq_2_seq_lm.generate_function) - def test_serialization(self): - new_seq_2_seq_lm = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.seq_2_seq_lm) - ) - self.assertEqual( - new_seq_2_seq_lm.get_config(), self.seq_2_seq_lm.get_config() + def test_beam_search(self): + seq_2_seq_lm = BartSeq2SeqLM( + backbone=self.backbone, + preprocessor=self.preprocessor, ) + seq_2_seq_lm.compile(sampler="beam") + seq_2_seq_lm.generate(" airplane at airport") @pytest.mark.large def test_saved_model(self): - keras.utils.set_random_seed(42) - model_output = self.seq_2_seq_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.seq_2_seq_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, BartSeq2SeqLM) + self.run_model_saving_test( + cls=BartSeq2SeqLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - keras.utils.set_random_seed(42) - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BartSeq2SeqLM.presets: + self.run_preset_test( + cls=BartSeq2SeqLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bart/bart_tokenizer_test.py b/keras_nlp/models/bart/bart_tokenizer_test.py index d3e77958e8..acfdbc3d87 100644 --- a/keras_nlp/models/bart/bart_tokenizer_test.py +++ b/keras_nlp/models/bart/bart_tokenizer_test.py @@ -12,66 +12,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import keras +import pytest + from keras_nlp.models.bart.bart_tokenizer import BartTokenizer from keras_nlp.tests.test_case import TestCase class BartTokenizerTest(TestCase): def setUp(self): - vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - - self.tokenizer = BartTokenizer(vocabulary=vocab, merges=merges) - - def test_tokenize(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [3, 4, 5, 3, 6]) - - def test_tokenize_special_tokens(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [0, 3, 4, 5, 3, 6, 0, 1]) - - def test_tokenize_batch(self): - input_data = [" airplane at airport", " kohli is the best"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[3, 4, 5, 3, 6], [7, 8, 9, 10, 11]]) - - def test_detokenize(self): - input_tokens = [[3, 4, 5, 3, 6]] - output = self.tokenizer.detokenize(input_tokens) - self.assertAllEqual(output, [" airplane at airport"]) - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 12) + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges} + self.input_data = [ + " airplane at airport", + " airplane airport", + ] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=BartTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[0, 4, 5, 6, 4, 7, 0, 1], [4, 5, 4, 7]], + ) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): BartTokenizer(vocabulary=["a", "b", "c"], merges=[]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=BartTokenizer, + preset="bart_base_en", + input_data=["The quick brown fox."], + expected_output=[[133, 2119, 6219, 23602, 4]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BartTokenizer.presets: + self.run_preset_test( + cls=BartTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bert/bert_backbone_test.py b/keras_nlp/models/bert/bert_backbone_test.py index 3038cc4afb..c1039114f5 100644 --- a/keras_nlp/models/bert/bert_backbone_test.py +++ b/keras_nlp/models/bert/bert_backbone_test.py @@ -12,76 +12,78 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.bert.bert_backbone import BertBackbone from keras_nlp.tests.test_case import TestCase class BertBackboneTest(TestCase): def setUp(self): - self.backbone = BertBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "segment_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_bert(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "bert_backbone") - - def test_variable_sequence_length_call_bert(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "segment_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=BertBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape={ + "sequence_output": (2, 5, 2), + "pooled_output": (2, 2), + }, ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=BertBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, BertBackbone) + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=BertBackbone, + preset="bert_tiny_en_uncased", + input_data={ + "token_ids": ops.array([[101, 1996, 4248, 102]], dtype="int32"), + "segment_ids": ops.zeros((1, 4), dtype="int32"), + "padding_mask": ops.ones((1, 4), dtype="int32"), + }, + expected_output_shape={ + "sequence_output": (1, 4, 128), + "pooled_output": (1, 128), + }, + # The forward pass from a preset should be stable! + expected_partial_output={ + "sequence_output": ( + ops.array([-1.38173, 0.16598, -2.92788, -2.66958, -0.61556]) + ), + "pooled_output": ( + ops.array([-0.99999, 0.07777, -0.99955, -0.00982, -0.99967]) + ), + }, + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BertBackbone.presets: + self.run_preset_test( + cls=BertBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bert/bert_classifier_test.py b/keras_nlp/models/bert/bert_classifier_test.py index 92122335b5..d5a767d3c7 100644 --- a/keras_nlp/models/bert/bert_classifier_test.py +++ b/keras_nlp/models/bert/bert_classifier_test.py @@ -12,14 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.backend import ops from keras_nlp.models.bert.bert_backbone import BertBackbone from keras_nlp.models.bert.bert_classifier import BertClassifier from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor @@ -44,83 +38,40 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.classifier = BertClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - # Check we handle serialization correctly. - activation=keras.activations.softmax, - ) - - # Setup data. - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - (self.raw_batch, np.ones((2,))) - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.classifier(self.preprocessed_batch) - - def test_classifier_predict(self): - preds1 = self.classifier.predict(self.raw_batch) - self.classifier.preprocessor = None - preds2 = self.classifier.predict(self.preprocessed_batch) - # Assert predictions match. - self.assertAllClose(preds1, preds2) - # Assert valid softmax output. - self.assertAllClose(ops.sum(preds2, axis=-1), [1.0, 1.0]) - - def test_classifier_fit(self): - self.classifier.fit(self.raw_dataset) - self.classifier.preprocessor = None - self.classifier.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.classifier.preprocessor = None - self.classifier.compile( - optimizer="adam", - loss="sparse_categorical_crossentropy", - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + "num_classes": 2, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. + [1, 0], # Labels. ) - self.classifier.fit(self.preprocessed_dataset) + self.input_data = self.preprocessor(*self.train_data)[0] - def test_serialization(self): - # Defaults. - original = BertClassifier( - self.backbone, - num_classes=2, - ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - # With options. - original = BertClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - activation=keras.activations.softmax, - name="test", - trainable=False, + def test_classifier_basics(self): + self.run_task_test( + cls=BertClassifier, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 2), ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) @pytest.mark.large - def test_saving_model(self): - model_output = self.classifier.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.classifier.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back - self.assertIsInstance(restored_model, BertClassifier) + def test_saved_model(self): + self.run_model_saving_test( + cls=BertClassifier, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BertClassifier.presets: + self.run_preset_test( + cls=BertClassifier, + preset=preset, + init_kwargs={"num_classes": 2}, + input_data=self.input_data, + expected_output_shape=(2, 2), + ) diff --git a/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py b/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py index ad208c24a3..ff58962215 100644 --- a/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/bert/bert_masked_lm_preprocessor_test.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.bert.bert_masked_lm_preprocessor import ( BertMaskedLMPreprocessor, ) @@ -27,106 +26,62 @@ def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] - - tokenizer = BertTokenizer(vocabulary=self.vocab) - - self.preprocessor = BertMaskedLMPreprocessor( - tokenizer=tokenizer, + self.tokenizer = BertTokenizer(vocabulary=self.vocab) + self.init_kwargs = { + "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. - mask_selection_rate=1.0, - mask_token_rate=1.0, - random_token_rate=0.0, - mask_selection_length=4, - sequence_length=12, - ) - - def test_preprocess_strings(self): - input_data = "the quick brown fox" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["segment_ids"], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + "mask_selection_rate": 1.0, + "mask_token_rate": 1.0, + "random_token_rate": 0.0, + "mask_selection_length": 4, + "sequence_length": 12, + } + self.input_data = ["the quick brown fox"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=BertMaskedLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[1, 2, 3, 4]], + }, + [[9, 10, 11, 12]], + [[1.0, 1.0, 1.0, 1.0]], + ), ) - self.assertAllEqual(x["mask_positions"], [1, 2, 3, 4]) - self.assertAllEqual(y, [9, 10, 11, 12]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) - - def test_preprocess_list_of_strings(self): - input_data = ["the quick brown fox"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], - [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4, - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[9, 10, 11, 12]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) - - def test_preprocess_dataset(self): - sentences = tf.constant(["the quick brown fox"] * 4) - ds = tf.data.Dataset.from_tensor_slices(sentences) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x["token_ids"], [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[9, 10, 11, 12]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) - - def test_mask_multiple_sentences(self): - sentence_one = tf.constant("the quick") - sentence_two = tf.constant("brown fox") - - x, y, sw = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - x["token_ids"], [2, 4, 4, 3, 4, 4, 3, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 4, 5]) - self.assertAllEqual(y, [9, 10, 11, 12]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) def test_no_masking_zero_rate(self): no_mask_preprocessor = BertMaskedLMPreprocessor( - self.preprocessor.tokenizer, + self.tokenizer, mask_selection_rate=0.0, mask_selection_length=4, sequence_length=12, ) - input_data = "the quick brown fox" - - x, y, sw = no_mask_preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [2, 9, 10, 11, 12, 3, 0, 0, 0, 0, 0, 0] + input_data = ["the quick brown fox"] + self.assertAllClose( + no_mask_preprocessor(input_data), + ( + { + "token_ids": [[2, 9, 10, 11, 12, 3, 0, 0, 0, 0, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[0, 0, 0, 0]], + }, + [[0, 0, 0, 0]], + [[0.0, 0.0, 0.0, 0.0]], + ), ) - self.assertAllEqual( - x["padding_mask"], - [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0], - ) - self.assertAllEqual(x["mask_positions"], [0, 0, 0, 0]) - self.assertAllEqual(y, [0, 0, 0, 0]) - self.assertAllEqual(sw, [0.0, 0.0, 0.0, 0.0]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BertMaskedLMPreprocessor.presets: + self.run_preset_test( + cls=BertMaskedLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bert/bert_masked_lm_test.py b/keras_nlp/models/bert/bert_masked_lm_test.py index 8659de2474..0bad92a401 100644 --- a/keras_nlp/models/bert/bert_masked_lm_test.py +++ b/keras_nlp/models/bert/bert_masked_lm_test.py @@ -12,12 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.bert.bert_backbone import BertBackbone from keras_nlp.models.bert.bert_masked_lm import BertMaskedLM from keras_nlp.models.bert.bert_masked_lm_preprocessor import ( @@ -49,61 +45,36 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.masked_lm = BertMaskedLM( - self.backbone, - preprocessor=self.preprocessor, - ) - - # Setup data. - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call(self): - self.masked_lm(self.preprocessed_batch[0]) - - def test_predict(self): - self.masked_lm.predict(self.raw_batch) - self.masked_lm.preprocessor = None - self.masked_lm.predict(self.preprocessed_batch[0]) - - def test_fit(self): - self.masked_lm.fit(self.raw_dataset) - self.masked_lm.preprocessor = None - self.masked_lm.fit(self.preprocessed_dataset) - - def test_fit_no_xla(self): - self.masked_lm.preprocessor = None - self.masked_lm.compile( - optimizer="adam", - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. ) - self.masked_lm.fit(self.preprocessed_dataset) + self.input_data = self.preprocessor(*self.train_data)[0] - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.masked_lm) - new_classifier = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_classifier.get_config(), - self.masked_lm.get_config(), + def test_masked_lm_basics(self): + self.run_task_test( + cls=BertMaskedLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 5, 10), ) @pytest.mark.large def test_saved_model(self): - model_output = self.masked_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.masked_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=BertMaskedLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, BertMaskedLM) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output, atol=0.01, rtol=0.01) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BertMaskedLM.presets: + self.run_preset_test( + cls=BertMaskedLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bert/bert_preprocessor_test.py b/keras_nlp/models/bert/bert_preprocessor_test.py index efedb2d550..6d1e5fee57 100644 --- a/keras_nlp/models/bert/bert_preprocessor_test.py +++ b/keras_nlp/models/bert/bert_preprocessor_test.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor from keras_nlp.models.bert.bert_tokenizer import BertTokenizer from keras_nlp.tests.test_case import TestCase @@ -25,93 +24,44 @@ def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] - self.preprocessor = BertPreprocessor( - BertTokenizer(vocabulary=self.vocab), - sequence_length=8, + self.tokenizer = BertTokenizer(vocabulary=self.vocab) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + ["THE QUICK BROWN FOX."], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ) - def test_tokenize_strings(self): - input_data = "THE QUICK BROWN FOX." - output = self.preprocessor(input_data) - self.assertAllEqual(output["token_ids"], [2, 5, 6, 7, 8, 1, 3, 0]) - self.assertAllEqual(output["segment_ids"], [0, 0, 0, 0, 0, 0, 0, 0]) - self.assertAllEqual(output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - - def test_tokenize_list_of_strings(self): - # We should handle a list of strings as as batch. - input_data = ["THE QUICK BROWN FOX."] * 4 - output = self.preprocessor(input_data) - self.assertAllEqual(output["token_ids"], [[2, 5, 6, 7, 8, 1, 3, 0]] * 4) - self.assertAllEqual( - output["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = tf.constant(["THE QUICK BROWN FOX."] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual(x_out["token_ids"], [[2, 5, 6, 7, 8, 1, 3, 0]] * 4) - self.assertAllEqual( - x_out["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["THE QUICK BROWN FOX."] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x_out["token_ids"], [[2, 5, 6, 7, 8, 1, 3, 0]] * 4) - self.assertAllEqual( - x_out["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_multiple_sentences(self): - sentence_one = tf.constant("THE QUICK") - sentence_two = tf.constant("BROWN FOX.") - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual(output["token_ids"], [2, 5, 6, 3, 7, 8, 1, 3]) - self.assertAllEqual(output["segment_ids"], [0, 0, 0, 0, 1, 1, 1, 1]) - self.assertAllEqual(output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1]) - - def test_tokenize_multiple_batched_sentences(self): - sentence_one = tf.constant(["THE QUICK"] * 4) - sentence_two = tf.constant(["BROWN FOX."] * 4) - # The first tuple or list is always interpreted as an enumeration of - # separate sequences to concatenate. - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual(output["token_ids"], [[2, 5, 6, 3, 7, 8, 1, 3]] * 4) - self.assertAllEqual( - output["segment_ids"], [[0, 0, 0, 0, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1]] * 4 + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=BertPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 5, 6, 7, 8, 1, 3, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), ) def test_errors_for_2d_list_input(self): + preprocessor = BertPreprocessor(**self.init_kwargs) ambiguous_input = [["one", "two"], ["three", "four"]] with self.assertRaises(ValueError): - self.preprocessor(ambiguous_input) + preprocessor(ambiguous_input) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BertPreprocessor.presets: + self.run_preset_test( + cls=BertPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/bert/bert_presets_test.py b/keras_nlp/models/bert/bert_presets_test.py deleted file mode 100644 index a84286c091..0000000000 --- a/keras_nlp/models/bert/bert_presets_test.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.bert.bert_backbone import BertBackbone -from keras_nlp.models.bert.bert_classifier import BertClassifier -from keras_nlp.models.bert.bert_preprocessor import BertPreprocessor -from keras_nlp.models.bert.bert_tokenizer import BertTokenizer -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class BertPresetSmokeTest(TestCase): - """ - A smoke test for BERT presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/bert/bert_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = BertTokenizer.from_preset( - "bert_tiny_en_uncased", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [1996, 4248, 2829, 4419, 1012] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_output(self): - tokenizer = BertPreprocessor.from_preset( - "bert_tiny_en_uncased", - sequence_length=4, - ) - outputs = tokenizer("The quick brown fox.")["token_ids"] - expected_outputs = [101, 1996, 4248, 102] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "segment_ids": ops.array([[0, 0, 0, 0]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = BertBackbone.from_preset( - "bert_tiny_en_uncased", load_weights=load_weights - ) - outputs = model(input_data)["sequence_output"] - if load_weights: - # The forward pass from a preset should be stable! - # This test should catch cases where we unintentionally change our - # network code in a way that would invalidate our preset weights. - # We should only update these numbers if we are updating a weights - # file, or have found a discrepancy with the upstream source. - outputs = outputs[0, 0, :5] - expected = [-1.38173, 0.16598, -2.92788, -2.66958, -0.61556] - # Keep a high tolerance, so we are robust to different hardware. - self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_classifier_output(self, load_weights): - input_data = ["The quick brown fox."] - model = BertClassifier.from_preset( - "bert_tiny_en_uncased", - num_classes=2, - load_weights=load_weights, - ) - # We don't assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_classifier_output_without_preprocessing(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "segment_ids": ops.array([[0, 0, 0, 0]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = BertClassifier.from_preset( - "bert_tiny_en_uncased", - num_classes=2, - load_weights=load_weights, - preprocessor=None, - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("bert_tokenizer", BertTokenizer, {}), - ("bert_preprocessor", BertPreprocessor, {}), - ("bert", BertBackbone, {}), - ("bert_classifier", BertClassifier, {"num_classes": 2}), - ) - def test_preset_mutability(self, cls, kwargs): - preset = "bert_tiny_en_uncased" - obj = cls.from_preset(preset, **kwargs) - # Cannot overwrite the presents attribute in an object - with self.assertRaises(AttributeError): - obj.presets = {"my_model": "clowntown"} - # Cannot mutate presents in an object - config = obj.presets[preset]["config"] - config["num_layers"] = 1 - self.assertEqual(config["num_layers"], 1) - self.assertEqual(obj.presets[preset]["config"]["num_layers"], 2) - # Cannot mutate presets in the class - config = BertBackbone.presets[preset]["config"] - config["num_layers"] = 1 - self.assertEqual(config["num_layers"], 1) - self.assertEqual( - BertBackbone.presets[preset]["config"]["num_layers"], 2 - ) - - @parameterized.named_parameters( - ("bert_tokenizer", BertTokenizer), - ("bert_preprocessor", BertPreprocessor), - ("bert", BertBackbone), - ("bert_classifier", BertClassifier), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("bert_tokenizer", BertTokenizer, {}), - ("bert_preprocessor", BertPreprocessor, {}), - ("bert", BertBackbone, {}), - ("bert_classifier", BertClassifier, {"num_classes": 2}), - ) - def test_unknown_preset_error(self, cls, kwargs): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("bert_base_uncased_clowntown", **kwargs) - - def test_override_preprocessor_sequence_length(self): - """Override sequence length longer than model's maximum.""" - preprocessor = BertPreprocessor.from_preset( - "bert_base_en_uncased", - sequence_length=64, - ) - self.assertEqual(preprocessor.get_config()["sequence_length"], 64) - preprocessor("The quick brown fox.") - - def test_override_preprocessor_sequence_length_gt_max(self): - """Override sequence length longer than model's maximum.""" - with self.assertRaises(ValueError): - BertPreprocessor.from_preset( - "bert_base_en_uncased", - sequence_length=1024, - ) - - -@pytest.mark.extra_large -class BertPresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This every presets for BERT and is only run manually. - Run with: - `pytest keras_nlp/models/bert/bert_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_bert(self, load_weights): - for preset in BertBackbone.presets: - model = BertBackbone.from_preset(preset, load_weights=load_weights) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), dtype="int64", maxval=model.vocabulary_size - ), - "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - model(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_bert_classifier(self, load_weights): - for preset in BertClassifier.presets: - classifier = BertClassifier.from_preset( - preset, - num_classes=2, - load_weights=load_weights, - ) - input_data = ["This quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_bert_classifier_without_preprocessing(self, load_weights): - for preset in BertClassifier.presets: - classifier = BertClassifier.from_preset( - preset, - num_classes=2, - preprocessor=None, - load_weights=load_weights, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - classifier.predict(input_data) - - def test_load_tokenizers(self): - for preset in BertTokenizer.presets: - tokenizer = BertTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_preprocessors(self): - for preset in BertPreprocessor.presets: - preprocessor = BertPreprocessor.from_preset(preset) - preprocessor("The quick brown fox.") diff --git a/keras_nlp/models/bert/bert_tokenizer_test.py b/keras_nlp/models/bert/bert_tokenizer_test.py index 2cd1baa490..29ed902a62 100644 --- a/keras_nlp/models/bert/bert_tokenizer_test.py +++ b/keras_nlp/models/bert/bert_tokenizer_test.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import keras +import pytest + from keras_nlp.models.bert.bert_tokenizer import BertTokenizer from keras_nlp.tests.test_case import TestCase @@ -22,40 +23,40 @@ def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] - self.tokenizer = BertTokenizer(vocabulary=self.vocab) - - def test_tokenize(self): - input_data = "THE QUICK BROWN FOX." - output = self.tokenizer(input_data) - self.assertAllEqual(output, [5, 6, 7, 8, 1]) - - def test_tokenize_batch(self): - input_data = ["THE QUICK BROWN FOX.", "THE FOX."] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[5, 6, 7, 8, 1], [5, 8, 1]]) + self.init_kwargs = {"vocabulary": self.vocab} + self.input_data = ["THE QUICK BROWN FOX.", "THE FOX."] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=BertTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[5, 6, 7, 8, 1], [5, 8, 1]], + ) def test_lowercase(self): - input_data = "THE QUICK BROWN FOX." tokenizer = BertTokenizer(vocabulary=self.vocab, lowercase=True) - output = tokenizer(input_data) - self.assertAllEqual(output, [9, 10, 11, 12, 1]) - - def test_detokenize(self): - input_tokens = [[5, 6, 7, 8]] - output = self.tokenizer.detokenize(input_tokens) - self.assertAllEqual(output, ["THE QUICK BROWN FOX"]) - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 13) + output = tokenizer(self.input_data) + self.assertAllEqual(output, [[9, 10, 11, 12, 1], [9, 12, 1]]) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): BertTokenizer(vocabulary=["a", "b", "c"]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=BertTokenizer, + preset="bert_tiny_en_uncased", + input_data=["The quick brown fox."], + expected_output=[[1996, 4248, 2829, 4419, 1012]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in BertTokenizer.presets: + self.run_preset_test( + cls=BertTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py b/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py index e37eca7f56..3559002864 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_backbone_test.py @@ -12,13 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone from keras_nlp.tests.test_case import TestCase @@ -26,67 +21,58 @@ class DebertaV3BackboneTest(TestCase): def setUp(self): - self.backbone = DebertaV3Backbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - bucket_size=2, - ) - self.batch_size = 8 - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_deberta(self): - self.backbone(self.input_batch) - - def test_name(self): - self.assertRegexpMatches(self.backbone.name, "deberta_v3_backbone") - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_variable_sequence_length_call_deberta(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - output = self.backbone(input_data) - self.assertAllEqual( - ops.shape(output), - [2, seq_length, self.backbone.hidden_dim], - ) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=DebertaV3Backbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=DebertaV3Backbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, DebertaV3Backbone) + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=DebertaV3Backbone, + preset="deberta_v3_extra_small_en", + input_data={ + "token_ids": ops.array([[0, 581, 63773, 2]], dtype="int32"), + "segment_ids": ops.zeros((1, 4), dtype="int32"), + "padding_mask": ops.ones((1, 4), dtype="int32"), + }, + expected_output_shape=(1, 4, 384), + # The forward pass from a preset should be stable! + expected_partial_output=ops.array( + [0.418, -0.116, -0.122, -1.847, -0.035] + ), + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DebertaV3Backbone.presets: + self.run_preset_test( + cls=DebertaV3Backbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py index c53b0fd143..046c18dd5e 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py @@ -13,15 +13,10 @@ # limitations under the License. import io -import os -import numpy as np import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.backend import ops from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone from keras_nlp.models.deberta_v3.deberta_v3_classifier import ( DebertaV3Classifier, @@ -35,14 +30,13 @@ class DebertaV3ClassifierTest(TestCase): def setUp(self): + # Setup model. + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=10, + vocab_size=12, model_type="WORD", pad_id=0, bos_id=1, @@ -65,85 +59,41 @@ def setUp(self): hidden_dim=2, intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, - bucket_size=2, - ) - self.classifier = DebertaV3Classifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - # Check we handle serialization correctly. - activation=keras.activations.softmax, - hidden_dim=4, ) - - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - (self.raw_batch, np.ones((2,))) - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.classifier(self.preprocessed_batch) - - def test_classifier_predict(self): - preds1 = self.classifier.predict(self.raw_batch) - self.classifier.preprocessor = None - preds2 = self.classifier.predict(self.preprocessed_batch) - # Assert predictions match. - self.assertAllClose(preds1, preds2) - # Assert valid softmax output. - self.assertAllClose(ops.sum(preds2, axis=-1), [1.0, 1.0]) - - def test_classifier_fit(self): - self.classifier.fit(self.raw_dataset) - self.classifier.preprocessor = None - self.classifier.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.classifier.preprocessor = None - self.classifier.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + "num_classes": 2, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. + [1, 0], # Labels. ) - self.classifier.fit(self.preprocessed_dataset) + self.input_data = self.preprocessor(*self.train_data)[0] - def test_serialization(self): - # Defaults. - original = DebertaV3Classifier( - self.backbone, - num_classes=2, - ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - # With options. - original = DebertaV3Classifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - activation=keras.activations.softmax, - hidden_dim=4, - name="test", - trainable=False, + def test_classifier_basics(self): + self.run_task_test( + cls=DebertaV3Classifier, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 2), ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) @pytest.mark.large - def test_saving_model(self): - model_output = self.classifier.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.classifier.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, DebertaV3Classifier) + def test_saved_model(self): + self.run_model_saving_test( + cls=DebertaV3Classifier, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DebertaV3Classifier.presets: + self.run_preset_test( + cls=DebertaV3Classifier, + preset=preset, + init_kwargs={"num_classes": 2}, + input_data=self.input_data, + expected_output_shape=(2, 2), + ) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py index 3c1f671297..faf1ee1a8f 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import ( DebertaV3MaskedLMPreprocessor, ) @@ -25,14 +24,12 @@ from keras_nlp.tests.test_case import TestCase -class DebertaV3PreprocessorTest(TestCase): +class DebertaV3MaskedLMPreprocessorTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", @@ -46,100 +43,60 @@ def setUp(self): unk_piece="[UNK]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - self.tokenizer = DebertaV3Tokenizer(proto=self.proto) - self.preprocessor = DebertaV3MaskedLMPreprocessor( - tokenizer=self.tokenizer, + self.tokenizer = DebertaV3Tokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. - mask_selection_rate=1.0, - mask_token_rate=1.0, - random_token_rate=0.0, - mask_selection_length=4, - sequence_length=12, - ) - - def test_preprocess_strings(self): - input_data = "the quick brown fox" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [1, 4, 4, 4, 4, 2, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 3, 4]) - self.assertAllEqual(y, [5, 10, 6, 8]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) - - def test_preprocess_list_of_strings(self): - input_data = ["the quick brown fox"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [[1, 4, 4, 4, 4, 2, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[5, 10, 6, 8]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) - - def test_preprocess_dataset(self): - sentences = tf.constant(["the quick brown fox"] * 4) - ds = tf.data.Dataset.from_tensor_slices(sentences) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x["token_ids"], [[1, 4, 4, 4, 4, 2, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[5, 10, 6, 8]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) + "mask_selection_rate": 1.0, + "mask_token_rate": 1.0, + "random_token_rate": 0.0, + "mask_selection_length": 4, + "sequence_length": 12, + } + self.input_data = ["the quick brown fox"] - def test_mask_multiple_sentences(self): - sentence_one = tf.constant("the quick") - sentence_two = tf.constant("brown fox") - - x, y, sw = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - x["token_ids"], [1, 4, 4, 2, 4, 4, 2, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0] + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=DebertaV3MaskedLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[1, 4, 4, 4, 4, 2, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[1, 2, 3, 4]], + }, + [[5, 10, 6, 8]], + [[1.0, 1.0, 1.0, 1.0]], + ), ) - self.assertAllEqual(x["mask_positions"], [1, 2, 4, 5]) - self.assertAllEqual(y, [5, 10, 6, 8]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) def test_no_masking_zero_rate(self): no_mask_preprocessor = DebertaV3MaskedLMPreprocessor( - self.preprocessor.tokenizer, + self.tokenizer, mask_selection_rate=0.0, mask_selection_length=4, sequence_length=12, ) - input_data = "the quick brown fox" - - x, y, sw = no_mask_preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [1, 5, 10, 6, 8, 2, 0, 0, 0, 0, 0, 0] + input_data = ["the quick brown fox"] + self.assertAllClose( + no_mask_preprocessor(input_data), + ( + { + "token_ids": [[1, 5, 10, 6, 8, 2, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[0, 0, 0, 0]], + }, + [[0, 0, 0, 0]], + [[0.0, 0.0, 0.0, 0.0]], + ), ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [0, 0, 0, 0]) - self.assertAllEqual(y, [0, 0, 0, 0]) - self.assertAllEqual(sw, [0.0, 0.0, 0.0, 0.0]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DebertaV3MaskedLMPreprocessor.presets: + self.run_preset_test( + cls=DebertaV3MaskedLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py index 3dead2c80d..62f84b508c 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,13 +13,10 @@ # limitations under the License. import io -import os import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone from keras_nlp.models.deberta_v3.deberta_v3_masked_lm import DebertaV3MaskedLM from keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import ( @@ -31,14 +28,13 @@ class DebertaV3MaskedLMTest(TestCase): def setUp(self): + # Setup model. + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round", "an eagle flew"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=15, + vocab_size=12, model_type="WORD", pad_id=0, bos_id=1, @@ -50,9 +46,8 @@ def setUp(self): unk_piece="[UNK]", user_defined_symbols="[MASK]", ) - proto = bytes_io.getvalue() self.preprocessor = DebertaV3MaskedLMPreprocessor( - DebertaV3Tokenizer(proto=proto), + DebertaV3Tokenizer(proto=bytes_io.getvalue()), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, @@ -68,60 +63,36 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.masked_lm = DebertaV3MaskedLM( - self.backbone, - preprocessor=self.preprocessor, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. ) + self.input_data = self.preprocessor(*self.train_data)[0] - self.raw_batch = [ - "the quick brown fox.", - "the eagle flew over fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.masked_lm(self.preprocessed_batch[0]) - - def test_classifier_predict(self): - self.masked_lm.predict(self.raw_batch) - self.masked_lm.preprocessor = None - self.masked_lm.predict(self.preprocessed_batch[0]) - - def test_classifier_fit(self): - self.masked_lm.fit(self.raw_dataset) - self.masked_lm.preprocessor = None - self.masked_lm.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.masked_lm.preprocessor = None - self.masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, - ) - self.masked_lm.fit(self.preprocessed_dataset) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.masked_lm) - new_classifier = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_classifier.get_config(), - self.masked_lm.get_config(), + def test_masked_lm_basics(self): + self.run_task_test( + cls=DebertaV3MaskedLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 5, 12), ) @pytest.mark.large def test_saved_model(self): - model_output = self.masked_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.masked_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, DebertaV3MaskedLM) + self.run_model_saving_test( + cls=DebertaV3MaskedLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output, atol=0.01, rtol=0.01) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DebertaV3MaskedLM.presets: + self.run_preset_test( + cls=DebertaV3MaskedLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py index 1e95e7988c..f6f648ab83 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.deberta_v3.deberta_v3_preprocessor import ( DebertaV3Preprocessor, ) @@ -27,12 +26,10 @@ class DebertaV3PreprocessorTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", @@ -46,97 +43,43 @@ def setUp(self): unk_piece="[UNK]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.preprocessor = DebertaV3Preprocessor( - tokenizer=DebertaV3Tokenizer(proto=self.proto), - sequence_length=12, - ) - - def test_tokenize_strings(self): - input_data = "the quick brown fox" - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], [1, 5, 10, 6, 8, 2, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] + self.tokenizer = DebertaV3Tokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + ["the quick brown fox"], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ) - def test_tokenize_list_of_strings(self): - # We should handle a list of strings as as batch. - input_data = ["the quick brown fox"] * 4 - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], [[1, 5, 10, 6, 8, 2, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual( - x_out["token_ids"], [[1, 5, 10, 6, 8, 2, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x_out["token_ids"], [[1, 5, 10, 6, 8, 2, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_multiple_sentences(self): - sentence_one = tf.constant("the quick brown fox") - sentence_two = tf.constant("the earth") - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], [1, 5, 10, 6, 8, 2, 5, 7, 2, 0, 0, 0] - ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0] - ) - - def test_tokenize_multiple_batched_sentences(self): - sentence_one = tf.constant(["the quick brown fox"] * 4) - sentence_two = tf.constant(["the earth"] * 4) - # The first tuple or list is always interpreted as an enumeration of - # separate sequences to concatenate. - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], [[1, 5, 10, 6, 8, 2, 5, 7, 2, 0, 0, 0]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]] * 4 + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=DebertaV3Preprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[1, 5, 10, 6, 8, 2, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), ) def test_errors_for_2d_list_input(self): + preprocessor = DebertaV3Preprocessor(**self.init_kwargs) ambiguous_input = [["one", "two"], ["three", "four"]] with self.assertRaises(ValueError): - self.preprocessor(ambiguous_input) + preprocessor(ambiguous_input) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DebertaV3Preprocessor.presets: + self.run_preset_test( + cls=DebertaV3Preprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py b/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py deleted file mode 100644 index 7b023a0601..0000000000 --- a/keras_nlp/models/deberta_v3/deberta_v3_presets_test.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone -from keras_nlp.models.deberta_v3.deberta_v3_classifier import ( - DebertaV3Classifier, -) -from keras_nlp.models.deberta_v3.deberta_v3_preprocessor import ( - DebertaV3Preprocessor, -) -from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class DebertaV3PresetSmokeTest(TestCase): - """ - A smoke test for DeBERTa presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/deberta/deberta_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = DebertaV3Tokenizer.from_preset( - "deberta_v3_extra_small_en", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [279, 1538, 3258, 16123, 260] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_output(self): - preprocessor = DebertaV3Preprocessor.from_preset( - "deberta_v3_extra_small_en", - sequence_length=4, - ) - outputs = preprocessor("The quick brown fox.")["token_ids"] - expected_outputs = [1, 279, 1538, 2] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_mask_token(self): - preprocessor = DebertaV3Preprocessor.from_preset( - "deberta_v3_extra_small_en", - sequence_length=4, - ) - self.assertEqual(preprocessor.tokenizer.id_to_token(128000), "[MASK]") - self.assertEqual(preprocessor.tokenizer.token_to_id("[MASK]"), 128000) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[0, 581, 63773, 2]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = DebertaV3Backbone.from_preset( - "deberta_v3_extra_small_en", load_weights=load_weights - ) - outputs = model(input_data) - if load_weights: - outputs = outputs[0, 0, :5] - expected = [0.418, -0.116, -0.122, -1.847, -0.035] - self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_classifier_output(self, load_weights): - input_data = ["The quick brown fox."] - model = DebertaV3Classifier.from_preset( - "deberta_v3_extra_small_en", - num_classes=2, - load_weights=load_weights, - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_classifier_output_without_preprocessing(self, load_weights): - input_data = { - "token_ids": ops.array([[0, 581, 63773, 2]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = DebertaV3Classifier.from_preset( - "deberta_v3_extra_small_en", - num_classes=2, - load_weights=load_weights, - preprocessor=None, - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("deberta_tokenizer", DebertaV3Tokenizer), - ("deberta_preprocessor", DebertaV3Preprocessor), - ("deberta", DebertaV3Backbone), - ("deberta_classifier", DebertaV3Classifier), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("deberta_tokenizer", DebertaV3Tokenizer, {}), - ("deberta_preprocessor", DebertaV3Preprocessor, {}), - ("deberta", DebertaV3Backbone, {}), - ("deberta_classifier", DebertaV3Classifier, {"num_classes": 2}), - ) - def test_unknown_preset_error(self, cls, kwargs): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("deberta_v3_extra_small_en_clowntown", **kwargs) - - -@pytest.mark.extra_large -class DebertaV3PresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This tests every DeBERTa preset and is only run manually. - Run with: - `pytest keras_nlp/models/deberta/deberta_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_deberta(self, load_weights): - for preset in DebertaV3Backbone.presets: - model = DebertaV3Backbone.from_preset( - preset, load_weights=load_weights - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), dtype="int64", maxval=model.vocabulary_size - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - model(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_deberta_classifier(self, load_weights): - for preset in DebertaV3Classifier.presets: - classifier = DebertaV3Classifier.from_preset( - preset, - num_classes=4, - load_weights=load_weights, - ) - input_data = ["The quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_deberta_classifier_without_preprocessing(self, load_weights): - for preset in DebertaV3Classifier.presets: - classifier = DebertaV3Classifier.from_preset( - preset, - num_classes=4, - load_weights=load_weights, - preprocessor=None, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - classifier.predict(input_data) - - def test_load_tokenizers(self): - for preset in DebertaV3Tokenizer.presets: - tokenizer = DebertaV3Tokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_preprocessors(self): - for preset in DebertaV3Preprocessor.presets: - preprocessor = DebertaV3Preprocessor.from_preset(preset) - preprocessor("The quick brown fox.") diff --git a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py index d9a0708b9d..c542de786d 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py @@ -14,24 +14,21 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer from keras_nlp.tests.test_case import TestCase class DebertaV3TokenizerTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=10, + vocab_size=11, model_type="WORD", pad_id=0, bos_id=1, @@ -42,41 +39,17 @@ def setUp(self): eos_piece="[SEP]", unk_piece="[UNK]", ) - self.proto = bytes_io.getvalue() - - self.tokenizer = DebertaV3Tokenizer(proto=self.proto) - - def test_tokenize(self): - input_data = "the quick brown fox" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [4, 9, 5, 7]) - - def test_tokenize_batch(self): - input_data = ["the quick brown fox", "the earth is round"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[4, 9, 5, 7], [4, 6, 8, 3]]) - - def test_detokenize(self): - input_data = [[4, 9, 5, 7]] - output = self.tokenizer.detokenize(input_data) - self.assertEqual(output, ["the quick brown fox"]) - - def test_detokenize_mask_token(self): - input_data = [[4, 9, 5, 7, self.tokenizer.mask_token_id]] - output = self.tokenizer.detokenize(input_data) - self.assertEqual(output, ["the quick brown fox"]) - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 11) - - def test_get_vocabulary_mask_token(self): - self.assertEqual(self.tokenizer.get_vocabulary()[10], "[MASK]") - - def test_id_to_token_mask_token(self): - self.assertEqual(self.tokenizer.id_to_token(10), "[MASK]") - - def test_token_to_id_mask_token(self): - self.assertEqual(self.tokenizer.token_to_id("[MASK]"), 10) + self.tokenizer = DebertaV3Tokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = {"proto": bytes_io.getvalue()} + self.input_data = ["the quick brown fox.", "the earth is round."] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=DebertaV3Tokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[4, 9, 5, 3], [4, 6, 8, 3]], + ) def test_errors_missing_special_tokens(self): bytes_io = io.BytesIO() @@ -91,10 +64,29 @@ def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): DebertaV3Tokenizer(proto=bytes_io.getvalue()) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + def test_mask_token_handling(self): + tokenizer = DebertaV3Tokenizer(**self.init_kwargs) + self.assertEqual(tokenizer.get_vocabulary()[11], "[MASK]") + self.assertEqual(tokenizer.id_to_token(11), "[MASK]") + self.assertEqual(tokenizer.token_to_id("[MASK]"), 11) + input_data = [[4, 9, 5, 7, self.tokenizer.mask_token_id]] + output = tokenizer.detokenize(input_data) + self.assertEqual(output, ["the quick brown fox"]) + + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=DebertaV3Tokenizer, + preset="deberta_v3_extra_small_en", + input_data=["The quick brown fox."], + expected_output=[[279, 1538, 3258, 16123, 260]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DebertaV3Tokenizer.presets: + self.run_preset_test( + cls=DebertaV3Tokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/distil_bert/distil_bert_backbone_test.py b/keras_nlp/models/distil_bert/distil_bert_backbone_test.py index cc7f765231..8790f87e93 100644 --- a/keras_nlp/models/distil_bert/distil_bert_backbone_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_backbone_test.py @@ -12,74 +12,67 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone from keras_nlp.tests.test_case import TestCase -class DistilBertTest(TestCase): +class DistilBertBackboneTest(TestCase): def setUp(self): - self.backbone = DistilBertBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - name="encoder", - ) - - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_distilbert(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_variable_sequence_length_call_distilbert(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "mask_positions": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=DistilBertBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=DistilBertBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, DistilBertBackbone) + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=DistilBertBackbone, + preset="distil_bert_base_en_uncased", + input_data={ + "token_ids": ops.array([[101, 1996, 4248, 102]], dtype="int32"), + "segment_ids": ops.zeros((1, 4), dtype="int32"), + "padding_mask": ops.ones((1, 4), dtype="int32"), + }, + expected_output_shape=(1, 4, 768), + # The forward pass from a preset should be stable! + expected_partial_output=ops.array( + [-0.2381, -0.1965, 0.1053, -0.0847, -0.145], + ), + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DistilBertBackbone.presets: + self.run_preset_test( + cls=DistilBertBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/distil_bert/distil_bert_classifier_test.py b/keras_nlp/models/distil_bert/distil_bert_classifier_test.py index 6ca36d1692..782cf76574 100644 --- a/keras_nlp/models/distil_bert/distil_bert_classifier_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_classifier_test.py @@ -12,14 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.backend import ops from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone from keras_nlp.models.distil_bert.distil_bert_classifier import ( DistilBertClassifier, @@ -35,13 +29,12 @@ class DistilBertClassifierTest(TestCase): def setUp(self): - # Setup model - + # Setup model. self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["the", "quick", "brown", "fox", "."] self.preprocessor = DistilBertPreprocessor( DistilBertTokenizer(vocabulary=self.vocab), - sequence_length=8, + sequence_length=5, ) self.backbone = DistilBertBackbone( vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), @@ -51,83 +44,40 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.classifier = DistilBertClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - # Check we handle serialization correctly. - activation=keras.activations.softmax, - hidden_dim=4, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + "num_classes": 2, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. + [1, 0], # Labels. ) + self.input_data = self.preprocessor(*self.train_data)[0] - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - (self.raw_batch, np.ones((2,))) - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.classifier(self.preprocessed_batch) - - def test_classifier_predict(self): - preds1 = self.classifier.predict(self.raw_batch) - self.classifier.preprocessor = None - preds2 = self.classifier.predict(self.preprocessed_batch) - # Assert predictions match. - self.assertAllClose(preds1, preds2) - # Assert valid softmax output. - self.assertAllClose(ops.sum(preds2, axis=-1), [1.0, 1.0]) - - def test_classifier_fit(self): - self.classifier.fit(self.raw_dataset) - self.classifier.preprocessor = None - self.classifier.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.classifier.preprocessor = None - self.classifier.compile( - loss="sparse_categorical_crossentropy", - jit_compile=False, + def test_classifier_basics(self): + self.run_task_test( + cls=DistilBertClassifier, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 2), ) - self.classifier.fit(self.preprocessed_dataset) - - def test_serialization(self): - # Defaults. - original = DistilBertClassifier( - self.backbone, - num_classes=2, - ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - # With options. - original = DistilBertClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - activation=keras.activations.softmax, - hidden_dim=4, - name="test", - trainable=False, - ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) @pytest.mark.large - def test_saving_model(self): - model_output = self.classifier.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.classifier.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, DistilBertClassifier) + def test_saved_model(self): + self.run_model_saving_test( + cls=DistilBertClassifier, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DistilBertClassifier.presets: + self.run_preset_test( + cls=DistilBertClassifier, + preset=preset, + init_kwargs={"num_classes": 2}, + input_data=self.input_data, + expected_output_shape=(2, 2), + ) diff --git a/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py b/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py index 091ae77262..b01b1da8ac 100644 --- a/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.distil_bert.distil_bert_masked_lm_preprocessor import ( DistilBertMaskedLMPreprocessor, ) @@ -29,81 +28,60 @@ def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] + self.tokenizer = DistilBertTokenizer(vocabulary=self.vocab) + self.init_kwargs = { + "tokenizer": self.tokenizer, + # Simplify our testing by masking every available token. + "mask_selection_rate": 1.0, + "mask_token_rate": 1.0, + "random_token_rate": 0.0, + "mask_selection_length": 4, + "sequence_length": 12, + } + self.input_data = ["the quick brown fox"] - self.preprocessor = DistilBertMaskedLMPreprocessor( - tokenizer=DistilBertTokenizer( - vocabulary=self.vocab, + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=DistilBertMaskedLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[1, 2, 3, 4]], + }, + [[9, 10, 11, 12]], + [[1.0, 1.0, 1.0, 1.0]], ), - # Simplify our testing by masking every available token. - mask_selection_rate=1.0, - mask_token_rate=1.0, - random_token_rate=0.0, - mask_selection_length=5, - sequence_length=8, ) - def test_preprocess_strings(self): - input_data = " THE QUICK BROWN FOX." - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [2, 4, 4, 4, 4, 4, 3, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - self.assertAllEqual(x["mask_positions"], [1, 2, 3, 4, 5]) - self.assertAllEqual(y, [5, 6, 7, 8, 1]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0, 1.0]) - - def test_preprocess_list_of_strings(self): - input_data = [" THE QUICK BROWN FOX."] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[2, 4, 4, 4, 4, 4, 3, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4, 5]] * 4) - self.assertAllEqual(y, [[5, 6, 7, 8, 1]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0, 1.0]] * 4) - - def test_preprocess_dataset(self): - sentences = tf.constant([" THE QUICK BROWN FOX."] * 4) - ds = tf.data.Dataset.from_tensor_slices(sentences) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x["token_ids"], [[2, 4, 4, 4, 4, 4, 3, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4, 5]] * 4) - self.assertAllEqual(y, [[5, 6, 7, 8, 1]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0, 1.0]] * 4) - - def test_mask_multiple_sentences(self): - sentence_one = tf.constant(" THE QUICK") - sentence_two = tf.constant(" BROWN FOX.") - - x, y, sw = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual(x["token_ids"], [2, 4, 4, 3, 4, 4, 4, 3]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1]) - self.assertAllEqual(x["mask_positions"], [1, 2, 4, 5, 6]) - self.assertAllEqual(y, [5, 6, 7, 8, 1]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0, 1.0]) - def test_no_masking_zero_rate(self): no_mask_preprocessor = DistilBertMaskedLMPreprocessor( - self.preprocessor.tokenizer, + self.tokenizer, mask_selection_rate=0.0, - mask_selection_length=5, - sequence_length=8, + mask_selection_length=4, + sequence_length=12, ) - input_data = " THE QUICK BROWN FOX." - - x, y, sw = no_mask_preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [2, 5, 6, 7, 8, 1, 3, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - self.assertAllEqual(x["mask_positions"], [0, 0, 0, 0, 0]) - self.assertAllEqual(y, [0, 0, 0, 0, 0]) - self.assertAllEqual(sw, [0.0, 0.0, 0.0, 0.0, 0.0]) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), + input_data = ["the quick brown fox"] + self.assertAllClose( + no_mask_preprocessor(input_data), + ( + { + "token_ids": [[2, 9, 10, 11, 12, 3, 0, 0, 0, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[0, 0, 0, 0]], + }, + [[0, 0, 0, 0]], + [[0.0, 0.0, 0.0, 0.0]], + ), ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DistilBertMaskedLMPreprocessor.presets: + self.run_preset_test( + cls=DistilBertMaskedLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py b/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py index e43feb32f1..52e846f4fd 100644 --- a/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone from keras_nlp.models.distil_bert.distil_bert_masked_lm import ( DistilBertMaskedLM, @@ -49,59 +45,40 @@ def setUp(self): vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), num_layers=2, num_heads=2, - hidden_dim=4, + hidden_dim=2, intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.masked_lm = DistilBertMaskedLM( - self.backbone, - preprocessor=self.preprocessor, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. ) + self.input_data = self.preprocessor(*self.train_data)[0] - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.masked_lm(self.preprocessed_batch[0]) - - def test_distil_bert_masked_lm_fit_default_compile(self): - self.masked_lm.fit(self.raw_dataset) - - def test_classifier_predict(self): - self.masked_lm.predict(self.raw_batch) - self.masked_lm.preprocessor = None - self.masked_lm.predict(self.preprocessed_batch[0]) - - def test_classifier_fit(self): - self.masked_lm.fit(self.raw_dataset) - self.masked_lm.preprocessor = None - self.masked_lm.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.masked_lm.preprocessor = None - self.masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + def test_masked_lm_basics(self): + self.run_task_test( + cls=DistilBertMaskedLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 5, 10), ) - self.masked_lm.fit(self.preprocessed_dataset) @pytest.mark.large def test_saved_model(self): - model_output = self.masked_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.masked_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, DistilBertMaskedLM) + self.run_model_saving_test( + cls=DistilBertMaskedLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output, atol=0.01, rtol=0.01) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DistilBertMaskedLM.presets: + self.run_preset_test( + cls=DistilBertMaskedLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py b/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py index 77176e4c15..22d69c88dc 100644 --- a/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_preprocessor_test.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.distil_bert.distil_bert_preprocessor import ( DistilBertPreprocessor, ) @@ -29,79 +28,43 @@ def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] - self.preprocessor = DistilBertPreprocessor( - DistilBertTokenizer(vocabulary=self.vocab), - sequence_length=8, + self.tokenizer = DistilBertTokenizer(vocabulary=self.vocab) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + ["THE QUICK BROWN FOX."], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ) - def test_tokenize_strings(self): - input_data = "THE QUICK BROWN FOX." - output = self.preprocessor(input_data) - self.assertAllEqual(output["token_ids"], [2, 5, 6, 7, 8, 1, 3, 0]) - self.assertAllEqual(output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - - def test_tokenize_list_of_strings(self): - # We should handle a list of strings as as batch. - input_data = ["THE QUICK BROWN FOX."] * 4 - output = self.preprocessor(input_data) - self.assertAllEqual(output["token_ids"], [[2, 5, 6, 7, 8, 1, 3, 0]] * 4) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = tf.constant(["THE QUICK BROWN FOX."] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual(x_out["token_ids"], [[2, 5, 6, 7, 8, 1, 3, 0]] * 4) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["THE QUICK BROWN FOX."] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x_out["token_ids"], [[2, 5, 6, 7, 8, 1, 3, 0]] * 4) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_multiple_sentences(self): - sentence_one = tf.constant("THE QUICK") - sentence_two = tf.constant("BROWN FOX.") - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual(output["token_ids"], [2, 5, 6, 3, 7, 8, 1, 3]) - self.assertAllEqual(output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1]) - - def test_tokenize_multiple_batched_sentences(self): - sentence_one = tf.constant(["THE QUICK"] * 4) - sentence_two = tf.constant(["BROWN FOX."] * 4) - # The first tuple or list is always interpreted as an enumeration of - # separate sequences to concatenate. - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual(output["token_ids"], [[2, 5, 6, 3, 7, 8, 1, 3]] * 4) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1]] * 4 + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=DistilBertPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 5, 6, 7, 8, 1, 3, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), ) def test_errors_for_2d_list_input(self): + preprocessor = DistilBertPreprocessor(**self.init_kwargs) ambiguous_input = [["one", "two"], ["three", "four"]] with self.assertRaises(ValueError): - self.preprocessor(ambiguous_input) + preprocessor(ambiguous_input) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DistilBertPreprocessor.presets: + self.run_preset_test( + cls=DistilBertPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/distil_bert/distil_bert_presets_test.py b/keras_nlp/models/distil_bert/distil_bert_presets_test.py deleted file mode 100644 index 691f3dbbbd..0000000000 --- a/keras_nlp/models/distil_bert/distil_bert_presets_test.py +++ /dev/null @@ -1,194 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.distil_bert.distil_bert_backbone import DistilBertBackbone -from keras_nlp.models.distil_bert.distil_bert_classifier import ( - DistilBertClassifier, -) -from keras_nlp.models.distil_bert.distil_bert_preprocessor import ( - DistilBertPreprocessor, -) -from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( - DistilBertTokenizer, -) -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class DistilBertPresetSmokeTest(TestCase): - """ - A smoke test for DistilBERT presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/distilbert/distilbert_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = DistilBertTokenizer.from_preset( - "distil_bert_base_en_uncased", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [1996, 4248, 2829, 4419, 1012] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_output(self): - tokenizer = DistilBertPreprocessor.from_preset( - "distil_bert_base_en_uncased", - sequence_length=4, - ) - outputs = tokenizer("The quick brown fox.")["token_ids"] - expected_outputs = [101, 1996, 4248, 102] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = DistilBertBackbone.from_preset( - "distil_bert_base_en_uncased", load_weights=load_weights - ) - outputs = model(input_data)[0, 0, :5] - if load_weights: - expected_outputs = [-0.2381, -0.1965, 0.1053, -0.0847, -0.145] - self.assertAllClose(outputs, expected_outputs, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_classifier_output(self, load_weights): - input_data = ["The quick brown fox."] - model = DistilBertClassifier.from_preset( - "distil_bert_base_en_uncased", - num_classes=2, - load_weights=load_weights, - ) - model.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_classifier_output_without_preprocessing(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = DistilBertClassifier.from_preset( - "distil_bert_base_en_uncased", - num_classes=2, - load_weights=load_weights, - preprocessor=None, - ) - model.predict(input_data) - - @parameterized.named_parameters( - ("distilbert_tokenizer", DistilBertTokenizer), - ("distilbert_preprocessor", DistilBertPreprocessor), - ("distilbert", DistilBertBackbone), - ("distilbert_classifier", DistilBertClassifier), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("distilbert_tokenizer", DistilBertTokenizer, {}), - ("distilbert_preprocessor", DistilBertPreprocessor, {}), - ("distilbert", DistilBertBackbone, {}), - ("distilbert_classifier", DistilBertClassifier, {"num_classes": 2}), - ) - def test_unknown_preset_error(self, cls, kwargs): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("distilbert_base_uncased", **kwargs) - - -@pytest.mark.extra_large -class DistilBertPresetFullTest(TestCase): - """ - Tests the full enumeration of our preset. - - This tests every DistilBERT preset and is only run manually. - Run with: - `pytest keras_nlp/models/distilbert/distilbert_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_distilbert(self, load_weights): - for preset in DistilBertBackbone.presets: - model = DistilBertBackbone.from_preset( - preset, load_weights=load_weights - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), dtype="int64", maxval=model.vocabulary_size - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - model(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_distilbert_classifier(self, load_weights): - for preset in DistilBertClassifier.presets: - classifier = DistilBertClassifier.from_preset( - preset, - num_classes=2, - load_weights=load_weights, - ) - input_data = ["This quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_distilbert_classifier_no_preprocessing(self, load_weights): - for preset in DistilBertClassifier.presets: - classifier = DistilBertClassifier.from_preset( - preset, - num_classes=2, - load_weights=load_weights, - preprocessor=None, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - classifier.predict(input_data) - - def test_load_tokenizers(self): - for preset in DistilBertTokenizer.presets: - tokenizer = DistilBertTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_preprocessors(self): - for preset in DistilBertPreprocessor.presets: - preprocessor = DistilBertPreprocessor.from_preset(preset) - preprocessor("The quick brown fox.") diff --git a/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py b/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py index db85435f1c..b025b4e7fb 100644 --- a/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import keras +import pytest + from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) @@ -24,40 +25,40 @@ def setUp(self): self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] - self.tokenizer = DistilBertTokenizer(vocabulary=self.vocab) - - def test_tokenize(self): - input_data = "THE QUICK BROWN FOX." - output = self.tokenizer(input_data) - self.assertAllEqual(output, [5, 6, 7, 8, 1]) + self.init_kwargs = {"vocabulary": self.vocab} + self.input_data = ["THE QUICK BROWN FOX.", "THE FOX."] - def test_tokenize_batch(self): - input_data = ["THE QUICK BROWN FOX.", "THE FOX."] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[5, 6, 7, 8, 1], [5, 8, 1]]) + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=DistilBertTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[5, 6, 7, 8, 1], [5, 8, 1]], + ) def test_lowercase(self): - input_data = "THE QUICK BROWN FOX." tokenizer = DistilBertTokenizer(vocabulary=self.vocab, lowercase=True) - output = tokenizer(input_data) - self.assertAllEqual(output, [9, 10, 11, 12, 1]) - - def test_detokenize(self): - input_tokens = [[5, 6, 7, 8]] - output = self.tokenizer.detokenize(input_tokens) - self.assertAllEqual(output, ["THE QUICK BROWN FOX"]) - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 13) + output = tokenizer(self.input_data) + self.assertAllEqual(output, [[9, 10, 11, 12, 1], [9, 12, 1]]) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): DistilBertTokenizer(vocabulary=["a", "b", "c"]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=DistilBertTokenizer, + preset="distil_bert_base_en_uncased", + input_data=["The quick brown fox."], + expected_output=[[1996, 4248, 2829, 4419, 1012]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in DistilBertTokenizer.presets: + self.run_preset_test( + cls=DistilBertTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/f_net/f_net_backbone_test.py b/keras_nlp/models/f_net/f_net_backbone_test.py index 6ce243f432..25dfaf799a 100644 --- a/keras_nlp/models/f_net/f_net_backbone_test.py +++ b/keras_nlp/models/f_net/f_net_backbone_test.py @@ -12,72 +12,75 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.tests.test_case import TestCase class FNetBackboneTest(TestCase): def setUp(self): - self.backbone = FNetBackbone( - vocabulary_size=10, - num_layers=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - num_segments=4, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "segment_ids": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_f_net(self): - self.backbone(self.input_batch) - - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "f_net_backbone") - - def test_variable_sequence_length_call_f_net(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "segment_ids": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=FNetBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape={ + "sequence_output": (2, 5, 2), + "pooled_output": (2, 2), + }, ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, FNetBackbone) + self.run_model_saving_test( + cls=FNetBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose( - model_output["pooled_output"], restored_output["pooled_output"] + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=FNetBackbone, + preset="f_net_base_en", + input_data={ + "token_ids": ops.array([[101, 1996, 4248, 102]], dtype="int32"), + "segment_ids": ops.zeros((1, 4), dtype="int32"), + }, + expected_output_shape={ + "sequence_output": (1, 4, 768), + "pooled_output": (1, 768), + }, + # The forward pass from a preset should be stable! + expected_partial_output={ + "sequence_output": ( + ops.array([4.15728, -0.09661, -0.24494, -0.06810, -0.55959]) + ), + "pooled_output": ( + ops.array([-0.04117, -0.03273, -0.02134, 0.99754, -0.09777]) + ), + }, ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in FNetBackbone.presets: + self.run_preset_test( + cls=FNetBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/f_net/f_net_classifier_test.py b/keras_nlp/models/f_net/f_net_classifier_test.py index 66ec838470..b972f64655 100644 --- a/keras_nlp/models/f_net/f_net_classifier_test.py +++ b/keras_nlp/models/f_net/f_net_classifier_test.py @@ -13,15 +13,10 @@ # limitations under the License. import io -import os -import numpy as np import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.backend import ops from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.models.f_net.f_net_classifier import FNetClassifier from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor @@ -31,33 +26,27 @@ class FNetClassifierTest(TestCase): def setUp(self): - # Setup Model + # Setup model. + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", - pad_id=3, - unk_id=0, - bos_id=4, - eos_id=5, + pad_id=0, + unk_id=1, + bos_id=2, + eos_id=3, pad_piece="", unk_piece="", bos_piece="[CLS]", eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - - self.proto = bytes_io.getvalue() - self.preprocessor = FNetPreprocessor( - tokenizer=FNetTokenizer(proto=self.proto), - sequence_length=8, + FNetTokenizer(proto=bytes_io.getvalue()), + sequence_length=5, ) self.backbone = FNetBackbone( vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), @@ -66,82 +55,40 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.classifier = FNetClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - # Check we handle serialization correctly. - activation=keras.activations.softmax, - ) - - # Setup data. - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - (self.raw_batch, np.ones((2,))) - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.classifier(self.preprocessed_batch) - - def test_classifier_predict(self): - preds1 = self.classifier.predict(self.raw_batch) - self.classifier.preprocessor = None - preds2 = self.classifier.predict(self.preprocessed_batch) - # Assert predictions match. - self.assertAllClose(preds1, preds2) - # Assert valid softmax output. - self.assertAllClose(ops.sum(preds2, axis=-1), [1.0, 1.0]) - - def test_fnet_classifier_fit(self): - self.classifier.fit(self.raw_dataset) - self.classifier.preprocessor = None - self.classifier.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.classifier.preprocessor = None - self.classifier.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, - ) - self.classifier.fit(self.preprocessed_dataset) - - def test_serialization(self): - # Defaults. - original = FNetClassifier( - self.backbone, - num_classes=2, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + "num_classes": 2, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. + [1, 0], # Labels. ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - # With options. - original = FNetClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - activation=keras.activations.softmax, - name="test", - trainable=False, + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_classifier_basics(self): + self.run_task_test( + cls=FNetClassifier, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 2), ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.classifier.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.classifier.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, FNetClassifier) + self.run_model_saving_test( + cls=FNetClassifier, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in FNetClassifier.presets: + self.run_preset_test( + cls=FNetClassifier, + preset=preset, + init_kwargs={"num_classes": 2}, + input_data=self.input_data, + expected_output_shape=(2, 2), + ) diff --git a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py index d926f66566..eb7036005a 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.f_net.f_net_masked_lm_preprocessor import ( FNetMaskedLMPreprocessor, ) @@ -27,103 +26,77 @@ class FNetMaskedLMPreprocessorTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, + unk_id=1, + bos_id=2, + eos_id=3, pad_piece="", unk_piece="", bos_piece="[CLS]", eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.preprocessor = FNetMaskedLMPreprocessor( - tokenizer=FNetTokenizer(proto=self.proto), - mask_selection_rate=1.0, - mask_token_rate=1.0, - random_token_rate=0.0, - mask_selection_length=4, - sequence_length=12, + self.tokenizer = FNetTokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, + # Simplify our testing by masking every available token. + "mask_selection_rate": 1.0, + "mask_token_rate": 1.0, + "random_token_rate": 0.0, + "mask_selection_length": 4, + "sequence_length": 12, + } + self.input_data = ["the quick brown fox"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=FNetMaskedLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 4, 4, 4, 4, 3, 0, 0, 0, 0, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[1, 2, 3, 4]], + }, + [[5, 10, 6, 8]], + [[1.0, 1.0, 1.0, 1.0]], + ), ) - def test_preprocess_strings(self): - input_data = "the quick brown fox" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [1, 4, 4, 4, 4, 2, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 3, 4]) - self.assertAllEqual(y, [5, 10, 6, 8]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) - - def test_preprocess_list_of_strings(self): - input_data = ["the quick brown fox"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [[1, 4, 4, 4, 4, 2, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[5, 10, 6, 8]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) - - def test_preprocess_dataset(self): - sentences = tf.constant(["the quick brown fox"] * 4) - ds = tf.data.Dataset.from_tensor_slices(sentences) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x["token_ids"], [[1, 4, 4, 4, 4, 2, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4]] * 4) - self.assertAllEqual(y, [[5, 10, 6, 8]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0]] * 4) - - def test_mask_multiple_sentences(self): - sentence_one = tf.constant("the quick") - sentence_two = tf.constant("brown fox") - - x, y, sw = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - x["token_ids"], [1, 4, 4, 2, 4, 4, 2, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 4, 5]) - self.assertAllEqual(y, [5, 10, 6, 8]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0]) - def test_no_masking_zero_rate(self): no_mask_preprocessor = FNetMaskedLMPreprocessor( - self.preprocessor.tokenizer, + self.tokenizer, mask_selection_rate=0.0, mask_selection_length=4, sequence_length=12, ) - input_data = "the quick brown fox" - - x, y, sw = no_mask_preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [1, 5, 10, 6, 8, 2, 0, 0, 0, 0, 0, 0] + input_data = ["the quick brown fox"] + self.assertAllClose( + no_mask_preprocessor(input_data), + ( + { + "token_ids": [[2, 5, 10, 6, 8, 3, 0, 0, 0, 0, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[0, 0, 0, 0]], + }, + [[0, 0, 0, 0]], + [[0.0, 0.0, 0.0, 0.0]], + ), ) - self.assertAllEqual(x["mask_positions"], [0, 0, 0, 0]) - self.assertAllEqual(y, [0, 0, 0, 0]) - self.assertAllEqual(sw, [0.0, 0.0, 0.0, 0.0]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in FNetMaskedLMPreprocessor.presets: + self.run_preset_test( + cls=FNetMaskedLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/f_net/f_net_masked_lm_test.py b/keras_nlp/models/f_net/f_net_masked_lm_test.py index d4e9e548f1..dc8bb8e9b3 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_test.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_test.py @@ -13,13 +13,10 @@ # limitations under the License. import io -import os import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.models.f_net.f_net_masked_lm import FNetMaskedLM from keras_nlp.models.f_net.f_net_masked_lm_preprocessor import ( @@ -31,29 +28,26 @@ class FNetMaskedLMTest(TestCase): def setUp(self): - # Setup Model. + # Setup model. + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the slow brown fox"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=5, + vocab_size=12, model_type="WORD", pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, + unk_id=1, + bos_id=2, + eos_id=3, pad_piece="", unk_piece="", bos_piece="[CLS]", eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() self.preprocessor = FNetMaskedLMPreprocessor( - FNetTokenizer(proto=self.proto), + FNetTokenizer(proto=bytes_io.getvalue()), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, @@ -68,60 +62,36 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.masked_lm = FNetMaskedLM( - self.backbone, - preprocessor=self.preprocessor, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. ) + self.input_data = self.preprocessor(*self.train_data)[0] - self.raw_batch = [ - "the quick brown fox", - "the slow brown fox", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch)[0] - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.masked_lm(self.preprocessed_batch) - - def test_predict(self): - # self.masked_lm.predict(self.raw_batch) - self.masked_lm.preprocessor = None - self.masked_lm.predict(self.preprocessed_batch) - - def test_fit(self): - self.masked_lm.fit(self.raw_dataset) - self.masked_lm.preprocessor = None - self.masked_lm.fit(self.preprocessed_dataset) - - def test_fit_no_xla(self): - self.masked_lm.preprocessor = None - self.masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, - ) - self.masked_lm.fit(self.preprocessed_dataset) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.masked_lm) - new_classifier = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_classifier.get_config(), - self.masked_lm.get_config(), + def test_masked_lm_basics(self): + self.run_task_test( + cls=FNetMaskedLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 5, 12), ) @pytest.mark.large def test_saved_model(self): - model_output = self.masked_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.masked_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, FNetMaskedLM) + self.run_model_saving_test( + cls=FNetMaskedLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output, atol=0.01, rtol=0.01) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in FNetMaskedLM.presets: + self.run_preset_test( + cls=FNetMaskedLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/f_net/f_net_preprocessor_test.py b/keras_nlp/models/f_net/f_net_preprocessor_test.py index 638df89123..f5470c700d 100644 --- a/keras_nlp/models/f_net/f_net_preprocessor_test.py +++ b/keras_nlp/models/f_net/f_net_preprocessor_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer from keras_nlp.tests.test_case import TestCase @@ -25,121 +24,60 @@ class FNetPreprocessorTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", - pad_id=3, - unk_id=0, - bos_id=4, - eos_id=5, + pad_id=0, + unk_id=1, + bos_id=2, + eos_id=3, pad_piece="", unk_piece="", bos_piece="[CLS]", eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.preprocessor = FNetPreprocessor( - tokenizer=FNetTokenizer(proto=self.proto), - sequence_length=12, - ) - - def test_tokenize_strings(self): - input_data = "the quick brown fox" - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], [4, 2, 10, 6, 8, 5, 3, 3, 3, 3, 3, 3] - ) - self.assertAllEqual( - output["segment_ids"], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + self.tokenizer = FNetTokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + ["the quick brown fox"], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ) - def test_tokenize_list_of_strings(self): - # We should handle a list of strings as batch. - input_data = ["the quick brown fox"] * 4 - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], - [[4, 2, 10, 6, 8, 5, 3, 3, 3, 3, 3, 3]] * 4, - ) - self.assertAllEqual( - output["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual( - x_out["token_ids"], - [[4, 2, 10, 6, 8, 5, 3, 3, 3, 3, 3, 3]] * 4, - ) - self.assertAllEqual( - x_out["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x_out["token_ids"], - [[4, 2, 10, 6, 8, 5, 3, 3, 3, 3, 3, 3]] * 4, - ) - self.assertAllEqual( - x_out["segment_ids"], [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_multiple_sentences(self): - sentence_one = tf.constant("the quick brown fox") - sentence_two = tf.constant("the earth") - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], - [4, 2, 10, 6, 8, 5, 2, 7, 5, 3, 3, 3], - ) - self.assertAllEqual( - output["segment_ids"], [0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0] - ) - - def test_tokenize_multiple_batched_sentences(self): - sentence_one = tf.constant(["the quick brown fox"] * 4) - sentence_two = tf.constant(["the earth"] * 4) - # The first tuple or list is always interpreted as an enumeration of - # separate sequences to concatenate. - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], - [[4, 2, 10, 6, 8, 5, 2, 7, 5, 3, 3, 3]] * 4, - ) - self.assertAllEqual( - output["segment_ids"], [[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0]] * 4 + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=FNetPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[2, 5, 10, 6, 8, 3, 0, 0]], + "segment_ids": [[0, 0, 0, 0, 0, 0, 0, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), ) def test_errors_for_2d_list_input(self): + preprocessor = FNetPreprocessor(**self.init_kwargs) ambiguous_input = [["one", "two"], ["three", "four"]] with self.assertRaises(ValueError): - self.preprocessor(ambiguous_input) + preprocessor(ambiguous_input) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in FNetPreprocessor.presets: + self.run_preset_test( + cls=FNetPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/f_net/f_net_presets_test.py b/keras_nlp/models/f_net/f_net_presets_test.py deleted file mode 100644 index 9f656b51bf..0000000000 --- a/keras_nlp/models/f_net/f_net_presets_test.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.f_net.f_net_backbone import FNetBackbone -from keras_nlp.models.f_net.f_net_classifier import FNetClassifier -from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor -from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class FNetPresetSmokeTest(TestCase): - """ - A smoke test for FNet presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/f_net/f_net_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = FNetTokenizer.from_preset( - "f_net_base_en", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [97, 1467, 5187, 26, 2521, 16678] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_output(self): - preprocessor = FNetPreprocessor.from_preset( - "f_net_base_en", - sequence_length=4, - ) - outputs = preprocessor("The quick brown fox.")["token_ids"] - expected_outputs = [4, 97, 1467, 5] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "segment_ids": ops.array([[0, 0, 0, 0]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = FNetBackbone.from_preset( - "f_net_base_en", load_weights=load_weights - ) - outputs = model(input_data)["sequence_output"] - if load_weights: - # The forward pass from a preset should be stable! - # This test should catch cases where we unintentionally change our - # network code in a way that would invalidate our preset weights. - # We should only update these numbers if we are updating a weights - # file, or have found a discrepancy with the upstream source. - outputs = outputs[0, 0, :5] - expected = [4.157282, -0.096616, -0.244943, -0.068104, -0.559592] - # Keep a high tolerance, so we are robust to different hardware. - self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_classifier_output(self, load_weights): - input_data = ["The quick brown fox."] - model = FNetClassifier.from_preset( - "f_net_base_en", - num_classes=2, - load_weights=load_weights, - ) - # We don't assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("f_net_tokenizer", FNetTokenizer), - ("f_net_preprocessor", FNetPreprocessor), - ("f_net", FNetBackbone), - ("f_net_classifier", FNetClassifier), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("f_net_tokenizer", FNetTokenizer, {}), - ("f_net_preprocessor", FNetPreprocessor, {}), - ("f_net", FNetBackbone, {}), - ("f_net_classifier", FNetClassifier, {"num_classes": 2}), - ) - def test_unknown_preset_error(self, cls, kwargs): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("f_net_base_en_clowntown", **kwargs) - - -@pytest.mark.extra_large -class FNetPresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This tests every FNet preset and is only run manually. - Run with: - `pytest keras_nlp/models/f_net/f_net_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_f_net(self, load_weights): - for preset in FNetBackbone.presets: - model = FNetBackbone.from_preset(preset, load_weights=load_weights) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), dtype="int64", maxval=model.vocabulary_size - ), - "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), - } - model(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_fnet_classifier(self, load_weights): - for preset in FNetClassifier.presets: - classifier = FNetClassifier.from_preset( - preset, - num_classes=2, - load_weights=load_weights, - ) - input_data = ["The quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_fnet_classifier_without_preprocessing(self, load_weights): - for preset in FNetClassifier.presets: - classifier = FNetClassifier.from_preset( - preset, - num_classes=2, - preprocessor=None, - load_weights=load_weights, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "segment_ids": ops.array([0] * 200 + [1] * 312, shape=(1, 512)), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - classifier.predict(input_data) - - def test_load_tokenizers(self): - for preset in FNetTokenizer.presets: - tokenizer = FNetTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_preprocessors(self): - for preset in FNetPreprocessor.presets: - preprocessor = FNetPreprocessor.from_preset(preset) - preprocessor("The quick brown fox.") diff --git a/keras_nlp/models/f_net/f_net_tokenizer_test.py b/keras_nlp/models/f_net/f_net_tokenizer_test.py index 259bba8ef4..80b7f9e037 100644 --- a/keras_nlp/models/f_net/f_net_tokenizer_test.py +++ b/keras_nlp/models/f_net/f_net_tokenizer_test.py @@ -16,57 +16,40 @@ import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer from keras_nlp.tests.test_case import TestCase -@pytest.mark.tf_only class FNetTokenizerTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=12, model_type="WORD", - pad_id=3, - unk_id=0, - bos_id=4, - eos_id=5, + pad_id=0, + unk_id=1, + bos_id=2, + eos_id=3, pad_piece="", unk_piece="", bos_piece="[CLS]", eos_piece="[SEP]", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.tokenizer = FNetTokenizer(proto=self.proto) - - def test_tokenize(self): - input_data = "the quick brown fox" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [2, 10, 6, 8]) - - def test_tokenize_batch(self): - input_data = ["the quick brown fox", "the earth is round"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[2, 10, 6, 8], [2, 7, 9, 11]]) + self.init_kwargs = {"proto": bytes_io.getvalue()} + self.input_data = ["the quick brown fox.", "the earth is round."] - def test_detokenize(self): - input_data = [[2, 10, 6, 8]] - output = self.tokenizer.detokenize(input_data) - self.assertEqual(output, ["the quick brown fox"]) - - def test_vocabulary_size(self): - tokenizer = FNetTokenizer(proto=self.proto) - self.assertEqual(tokenizer.vocabulary_size(), 12) + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=FNetTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[5, 10, 6, 1], [5, 7, 9, 1]], + ) def test_errors_missing_special_tokens(self): bytes_io = io.BytesIO() @@ -81,10 +64,20 @@ def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): FNetTokenizer(proto=bytes_io.getvalue()) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=FNetTokenizer, + preset="f_net_base_en", + input_data=["The quick brown fox."], + expected_output=[[97, 1467, 5187, 26, 2521, 16678]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in FNetTokenizer.presets: + self.run_preset_test( + cls=FNetTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt2/gpt2_backbone_test.py b/keras_nlp/models/gpt2/gpt2_backbone_test.py index c82ec1b06d..d29bc68565 100644 --- a/keras_nlp/models/gpt2/gpt2_backbone_test.py +++ b/keras_nlp/models/gpt2/gpt2_backbone_test.py @@ -12,75 +12,65 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone from keras_nlp.tests.test_case import TestCase -class GPT2Test(TestCase): +class GPT2BackboneTest(TestCase): def setUp(self): - self.backbone = GPT2Backbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "segment_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_call(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "gpt2_backbone") - - def test_variable_sequence_length(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=GPT2Backbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=GPT2Backbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, GPT2Backbone) + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=GPT2Backbone, + preset="gpt2_base_en", + input_data={ + "token_ids": ops.array([[1169, 2068, 7586, 21831, 13]]), + "padding_mask": ops.ones((1, 5), dtype="int32"), + }, + expected_output_shape=(1, 5, 768), + # The forward pass from a preset should be stable! + expected_partial_output=ops.array( + [-0.1116, -0.0375, -0.2624, 0.00891, -0.0061] + ), + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in GPT2Backbone.presets: + self.run_preset_test( + cls=GPT2Backbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py index 63ff66b194..b0cdd2e3ee 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.gpt2.gpt2_causal_lm_preprocessor import ( GPT2CausalLMPreprocessor, ) @@ -24,55 +24,42 @@ class GPT2CausalLMPreprocessorTest(TestCase): def setUp(self): - self.vocab = { - "!": 0, - "air": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "<|endoftext|>": 6, - } - + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] - - self.preprocessor = GPT2CausalLMPreprocessor( - tokenizer=GPT2Tokenizer( - vocabulary=self.vocab, - merges=self.merges, + self.tokenizer = GPT2Tokenizer( + vocabulary=self.vocab, + merges=self.merges, + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ["airplane at airport"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=GPT2CausalLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[6, 1, 3, 4, 2, 5, 6, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [[1, 3, 4, 2, 5, 6, 0, 0]], # Pass through labels. + [[1, 1, 1, 1, 1, 1, 0, 0]], # Pass through sample_weights. ), - sequence_length=8, ) - def test_strings(self): - input_data = "airplane at airport" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 6, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - self.assertAllEqual(y, [1, 3, 4, 2, 5, 6, 0, 0]) - self.assertAllEqual(sw, [1, 1, 1, 1, 1, 1, 0, 0]) - - def test_list_of_strings(self): - input_data = ["airplane at airport"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[1, 3, 4, 2, 5, 6, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - def test_no_start_end_token(self): input_data = ["airplane at airport"] * 4 preprocessor = GPT2CausalLMPreprocessor( - tokenizer=GPT2Tokenizer( - vocabulary=self.vocab, - merges=self.merges, - ), - sequence_length=8, + **self.init_kwargs, add_start_token=False, add_end_token=False, ) @@ -82,29 +69,10 @@ def test_no_start_end_token(self): self.assertAllEqual(y, [[3, 4, 2, 5, 0, 0, 0, 0]] * 4) self.assertAllEqual(sw, [[1, 1, 1, 1, 0, 0, 0, 0]] * 4) - def test_labeled_batch(self): - x = tf.constant(["airplane at airport"] * 4) - y = tf.constant([1] * 4) # Ignored. - sw = tf.constant([1.0] * 4) # Ignored. - x, y, sw = self.preprocessor(x, y, sw) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[1, 3, 4, 2, 5, 6, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - - def test_dataset(self): - x = tf.constant(["airplane at airport"] * 4) - ds = tf.data.Dataset.from_tensor_slices(x) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[1, 3, 4, 2, 5, 6, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - def test_generate_preprocess(self): input_data = "airplane at airport" - x = self.preprocessor.generate_preprocess(input_data) + preprocessor = GPT2CausalLMPreprocessor(**self.init_kwargs) + x = preprocessor.generate_preprocess(input_data) self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 0, 0]) self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0]) @@ -113,13 +81,15 @@ def test_generate_postprocess(self): "token_ids": tf.constant([6, 1, 3, 4, 2, 5, 0, 0]), "padding_mask": tf.cast([1, 1, 1, 1, 1, 1, 0, 0], dtype="bool"), } - x = self.preprocessor.generate_postprocess(input_data) + preprocessor = GPT2CausalLMPreprocessor(**self.init_kwargs) + x = preprocessor.generate_postprocess(input_data) self.assertAllEqual(x, "airplane at airport") - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in GPT2CausalLMPreprocessor.presets: + self.run_preset_test( + cls=GPT2CausalLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py index 412083b275..7ce931505c 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py @@ -12,13 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os from unittest.mock import patch import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone from keras_nlp.models.gpt2.gpt2_causal_lm import GPT2CausalLM @@ -31,15 +28,9 @@ class GPT2CausalLMTest(TestCase): def setUp(self): - self.vocab = { - "!": 0, - "air": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "<|endoftext|>": 6, - } + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] @@ -55,66 +46,44 @@ def setUp(self): intermediate_dim=8, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.causal_lm = GPT2CausalLM( - backbone=self.backbone, - preprocessor=self.preprocessor, - ) - - self.raw_batch = [ - " airplane at airport", - " airplane at airport", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch)[0] - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_causal_lm(self): - self.causal_lm(self.preprocessed_batch) - - def test_predict(self): - self.causal_lm.predict(self.raw_batch) - self.causal_lm.preprocessor = None - self.causal_lm.predict(self.preprocessed_batch) - - def test_fit(self): - self.causal_lm.fit(self.raw_dataset) - self.causal_lm.preprocessor = None - self.causal_lm.fit(self.preprocessed_dataset) - - def test_fit_no_xla(self): - self.causal_lm.preprocessor = None - self.causal_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ([" airplane at airport", " airplane at airport"],) + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_causal_lm_basics(self): + self.run_task_test( + cls=GPT2CausalLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 8, 7), ) - self.causal_lm.fit(self.preprocessed_dataset) def test_generate(self): + causal_lm = GPT2CausalLM(**self.init_kwargs) # String input. prompt = " airplane at airport" - output = self.causal_lm.generate(" airplane at airport") + output = causal_lm.generate(" airplane at airport") self.assertTrue(prompt in output) - # String tensor input. - self.assertIsInstance(self.causal_lm.generate(self.raw_batch)[0], str) - # String dataset input. - self.assertIsInstance(self.causal_lm.generate(self.raw_dataset)[0], str) # Int tensor input. - self.causal_lm.preprocessor = None - outputs = self.causal_lm.generate(self.preprocessed_batch) + prompt_ids = self.preprocessor.generate_preprocess([prompt]) + causal_lm.preprocessor = None + outputs = causal_lm.generate(prompt_ids) # Assert prompt is in output in token id space. self.assertAllEqual( outputs["token_ids"][:, :5], - self.preprocessed_batch["token_ids"][:, :5], + prompt_ids["token_ids"][:, :5], ) self.assertAllEqual( outputs["padding_mask"][:, :5], - self.preprocessed_batch["padding_mask"][:, :5], + prompt_ids["padding_mask"][:, :5], ) def test_early_stopping(self): - call_with_cache = self.causal_lm.call_with_cache + causal_lm = GPT2CausalLM(**self.init_kwargs) + call_with_cache = causal_lm.call_with_cache def wrapper(*args, **kwargs): """Modify output logits to always favor end_token_id""" @@ -125,43 +94,37 @@ def wrapper(*args, **kwargs): logits = ops.slice_update(logits, (0, 0, index), update) return logits, hidden_states, cache - with patch.object(self.causal_lm, "call_with_cache", wraps=wrapper): + with patch.object(causal_lm, "call_with_cache", wraps=wrapper): prompt = [" airplane at airport", " airplane"] - output = self.causal_lm.generate(prompt) + output = causal_lm.generate(prompt) # We should immediately abort and output the prompt. self.assertEqual(prompt, output) def test_generate_compilation(self): + causal_lm = GPT2CausalLM(**self.init_kwargs) # Assert we do not recompile with successive calls. - self.causal_lm.generate(self.raw_batch) - first_fn = self.causal_lm.generate_function - self.causal_lm.generate(self.raw_batch) - second_fn = self.causal_lm.generate_function + causal_lm.generate(" airplane at airport") + first_fn = causal_lm.generate_function + causal_lm.generate(" airplane at airport") + second_fn = causal_lm.generate_function self.assertEqual(first_fn, second_fn) # Assert we do recompile after compile is called. - self.causal_lm.compile(sampler="greedy") - self.assertIsNone(self.causal_lm.generate_function) - - def test_serialization(self): - new_causal_lm = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.causal_lm) - ) - self.assertEqual( - new_causal_lm.get_config(), self.causal_lm.get_config() - ) + causal_lm.compile(sampler="greedy") + self.assertIsNone(causal_lm.generate_function) @pytest.mark.large def test_saved_model(self): - keras.utils.set_random_seed(42) - model_output = self.causal_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.causal_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, GPT2CausalLM) + self.run_model_saving_test( + cls=GPT2CausalLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - keras.utils.set_random_seed(42) - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in GPT2CausalLM.presets: + self.run_preset_test( + cls=GPT2CausalLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt2/gpt2_preprocessor_test.py b/keras_nlp/models/gpt2/gpt2_preprocessor_test.py index db221c7279..d7dcd261ed 100644 --- a/keras_nlp/models/gpt2/gpt2_preprocessor_test.py +++ b/keras_nlp/models/gpt2/gpt2_preprocessor_test.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.gpt2.gpt2_preprocessor import GPT2Preprocessor from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer from keras_nlp.tests.test_case import TestCase @@ -22,41 +21,32 @@ class GPT2PreprocessorTest(TestCase): def setUp(self): - self.vocab = { - "!": 0, - "air": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "<|endoftext|>": 6, - } - + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] - - self.preprocessor = GPT2Preprocessor( - tokenizer=GPT2Tokenizer( - vocabulary=self.vocab, - merges=self.merges, - ), - sequence_length=8, + self.tokenizer = GPT2Tokenizer( + vocabulary=self.vocab, + merges=self.merges, + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ["airplane at airport"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=GPT2Preprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output={ + "token_ids": [[6, 1, 3, 4, 2, 5, 6, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, ) - - def test_tokenize_strings(self): - input_data = "airplane at airport" - - x = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 6, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - - def test_tokenize_list_of_strings(self): - input_data = ["airplane at airport"] * 4 - - x = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) def test_no_start_end_token(self): input_data = ["airplane at airport"] * 4 @@ -74,33 +64,17 @@ def test_no_start_end_token(self): self.assertAllEqual(x["token_ids"], [[1, 3, 4, 2, 5, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4) - def test_tokenize_labeled_batch(self): - x = tf.constant(["airplane at airport"] * 4) - y_in = tf.constant([1] * 4) - sw_in = tf.constant([1.0] * 4) - x, y, sw = self.preprocessor(x, y_in, sw_in) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, y_in) - self.assertAllEqual(sw, sw_in) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["airplane at airport"] * 4) - ds = tf.data.Dataset.from_tensor_slices(x) - ds = ds.map(self.preprocessor) - x = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - def test_sequence_length_override(self): input_data = "airplane at airport" - x = self.preprocessor(input_data, sequence_length=4) + preprocessor = GPT2Preprocessor(**self.init_kwargs) + x = preprocessor(input_data, sequence_length=4) self.assertAllEqual(x["token_ids"], [6, 1, 3, 6]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in GPT2Preprocessor.presets: + self.run_preset_test( + cls=GPT2Preprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt2/gpt2_presets_test.py b/keras_nlp/models/gpt2/gpt2_presets_test.py deleted file mode 100644 index a1b645553c..0000000000 --- a/keras_nlp/models/gpt2/gpt2_presets_test.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.gpt2.gpt2_backbone import GPT2Backbone -from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class GPT2PresetSmokeTest(TestCase): - """ - A smoke test for GPT-2 presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/gpt2/gpt2_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = GPT2Tokenizer.from_preset("gpt2_base_en") - outputs = tokenizer("The quick brown fox.") - expected_outputs = [464, 2068, 7586, 21831, 13] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[1169, 2068, 7586, 21831, 13]]), - "padding_mask": ops.array([[1, 1, 1, 1, 1]]), - } - model = GPT2Backbone.from_preset( - "gpt2_base_en", load_weights=load_weights - ) - outputs = model(input_data)[0, 0, :5] - if load_weights: - # The forward pass from a preset should be stable! - # This test should catch cases where we unintentionally change our - # network code in a way that would invalidate our preset weights. - # We should only update these numbers if we are updating a weights - # file, or have found a discrepancy with the upstream source. - expected_outputs = [-0.1116, -0.0375, -0.2624, 0.00891, -0.0061] - # Keep a high tolerance, so we are robust to different hardware. - self.assertAllClose(outputs, expected_outputs, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("gpt2_tokenizer", GPT2Tokenizer), - ("gpt2", GPT2Backbone), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("gpt2_tokenizer", GPT2Tokenizer), - ("gpt2", GPT2Backbone), - ) - def test_unknown_preset_error(self, cls): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("gpt2_base_en_clowntown") - - -@pytest.mark.extra_large -class GPT2PresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This tests every GPT-2 preset and is only run manually. - Run with: - `pytest keras_nlp/models/gpt2/gpt2_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_gpt2(self, load_weights): - for preset in GPT2Backbone.presets: - model = GPT2Backbone.from_preset(preset, load_weights=load_weights) - input_data = { - "token_ids": random.uniform( - shape=(1, 1024), - dtype="int64", - maxval=model.vocabulary_size, - ), - "padding_mask": ops.array([1] * 1024, shape=(1, 1024)), - } - model(input_data) - - def test_load_tokenizers(self): - for preset in GPT2Tokenizer.presets: - tokenizer = GPT2Tokenizer.from_preset(preset) - tokenizer("The quick brown fox.") diff --git a/keras_nlp/models/gpt2/gpt2_tokenizer_test.py b/keras_nlp/models/gpt2/gpt2_tokenizer_test.py index 38ed89f121..026392fd25 100644 --- a/keras_nlp/models/gpt2/gpt2_tokenizer_test.py +++ b/keras_nlp/models/gpt2/gpt2_tokenizer_test.py @@ -12,82 +12,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import keras +import pytest + from keras_nlp.models.gpt2.gpt2_tokenizer import GPT2Tokenizer from keras_nlp.tests.test_case import TestCase class GPT2TokenizerTest(TestCase): def setUp(self): - self.vocab = { - "<|endoftext|>": 0, - "Ġair": 1, - "plane": 2, - "Ġat": 3, - "port": 4, - "Ġkoh": 5, - "li": 6, - "Ġis": 7, - "Ġthe": 8, - "Ġbest": 9, - } - self.merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - self.merges += [ - "Ġa t", - "p o", - "r t", - "o h", - "l i", - "Ġi s", - "Ġb e", - "s t", - ] - self.merges += [ - "Ġt h", - "Ġai r", - "pl a", - "Ġk oh", - "Ġth e", - "Ġbe st", - "po rt", + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges} + self.input_data = [ + " airplane at airport<|endoftext|>", + " airplane airport", ] - self.merges += ["pla ne"] - self.tokenizer = GPT2Tokenizer( - vocabulary=self.vocab, merges=self.merges + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=GPT2Tokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[2, 3, 4, 2, 5, 6], [2, 3, 2, 5]], ) - def test_tokenize(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [1, 2, 3, 1, 4]) - - def test_tokenize_end_token(self): - input_data = " airplane at airport<|endoftext|>" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [1, 2, 3, 1, 4, 0]) - - def test_tokenize_batch(self): - input_data = [" airplane at airport", " kohli is the best"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[1, 2, 3, 1, 4], [5, 6, 7, 8, 9]]) - - def test_detokenize(self): - input_tokens = [1, 2, 3, 1, 4] - output = self.tokenizer.detokenize(input_tokens) - self.assertEqual(output, " airplane at airport") - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 10) - def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): GPT2Tokenizer(vocabulary=["a", "b", "c"], merges=[]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=GPT2Tokenizer, + preset="gpt2_base_en", + input_data=["The quick brown fox."], + expected_output=[[464, 2068, 7586, 21831, 13]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in GPT2Tokenizer.presets: + self.run_preset_test( + cls=GPT2Tokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py index c885a2682c..f207f4f19e 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone_test.py @@ -12,74 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.models import GPTNeoXBackbone +from keras_nlp.backend import ops +from keras_nlp.models.gpt_neo_x.gpt_neo_x_backbone import GPTNeoXBackbone from keras_nlp.tests.test_case import TestCase -class GPTNeoXTest(TestCase): +class GPTNeoXBackboneTest(TestCase): def setUp(self): - self.backbone = GPTNeoXBackbone( - vocabulary_size=10, - num_layers=4, - num_heads=4, - hidden_dim=64, - intermediate_dim=64, - max_sequence_length=10, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_call(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 64)) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "gpt_neo_x_backbone") - - def test_variable_sequence_length(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=GPTNeoXBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, GPTNeoXBackbone) - - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + self.run_model_saving_test( + cls=GPTNeoXBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor_test.py index e494b12b63..f5a7c57421 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor_test.py @@ -14,7 +14,6 @@ import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.gpt_neo_x.gpt_neo_x_causal_lm_preprocessor import ( GPTNeoXCausalLMPreprocessor, ) @@ -24,55 +23,42 @@ class GPTNeoXCausalLMPreprocessorTest(TestCase): def setUp(self): - self.vocab = { - "!": 0, - "air": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "<|endoftext|>": 6, - } - + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] - - self.preprocessor = GPTNeoXCausalLMPreprocessor( - tokenizer=GPTNeoXTokenizer( - vocabulary=self.vocab, - merges=self.merges, + self.tokenizer = GPTNeoXTokenizer( + vocabulary=self.vocab, + merges=self.merges, + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ["airplane at airport"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=GPTNeoXCausalLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[6, 1, 3, 4, 2, 5, 6, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [[1, 3, 4, 2, 5, 6, 0, 0]], # Pass through labels. + [[1, 1, 1, 1, 1, 1, 0, 0]], # Pass through sample_weights. ), - sequence_length=8, ) - def test_strings(self): - input_data = "airplane at airport" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 6, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - self.assertAllEqual(y, [1, 3, 4, 2, 5, 6, 0, 0]) - self.assertAllEqual(sw, [1, 1, 1, 1, 1, 1, 0, 0]) - - def test_list_of_strings(self): - input_data = ["airplane at airport"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[1, 3, 4, 2, 5, 6, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - def test_no_start_end_token(self): input_data = ["airplane at airport"] * 4 preprocessor = GPTNeoXCausalLMPreprocessor( - tokenizer=GPTNeoXTokenizer( - vocabulary=self.vocab, - merges=self.merges, - ), - sequence_length=8, + **self.init_kwargs, add_start_token=False, add_end_token=False, ) @@ -82,29 +68,10 @@ def test_no_start_end_token(self): self.assertAllEqual(y, [[3, 4, 2, 5, 0, 0, 0, 0]] * 4) self.assertAllEqual(sw, [[1, 1, 1, 1, 0, 0, 0, 0]] * 4) - def test_labeled_batch(self): - x = tf.constant(["airplane at airport"] * 4) - y = tf.constant([1] * 4) # Ignored. - sw = tf.constant([1.0] * 4) # Ignored. - x, y, sw = self.preprocessor(x, y, sw) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[1, 3, 4, 2, 5, 6, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - - def test_dataset(self): - x = tf.constant(["airplane at airport"] * 4) - ds = tf.data.Dataset.from_tensor_slices(x) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[1, 3, 4, 2, 5, 6, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - def test_generate_preprocess(self): input_data = "airplane at airport" - x = self.preprocessor.generate_preprocess(input_data) + preprocessor = GPTNeoXCausalLMPreprocessor(**self.init_kwargs) + x = preprocessor.generate_preprocess(input_data) self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 0, 0]) self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0]) @@ -113,13 +80,6 @@ def test_generate_postprocess(self): "token_ids": tf.constant([6, 1, 3, 4, 2, 5, 0, 0]), "padding_mask": tf.cast([1, 1, 1, 1, 1, 1, 0, 0], dtype="bool"), } - x = self.preprocessor.generate_postprocess(input_data) + preprocessor = GPTNeoXCausalLMPreprocessor(**self.init_kwargs) + x = preprocessor.generate_postprocess(input_data) self.assertAllEqual(x, "airplane at airport") - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py index dda9f51bdd..6857d5f40e 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py @@ -12,13 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os from unittest.mock import patch import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.gpt_neo_x.gpt_neo_x_backbone import GPTNeoXBackbone from keras_nlp.models.gpt_neo_x.gpt_neo_x_causal_lm import GPTNeoXCausalLM @@ -31,15 +28,9 @@ class GPTNeoXCausalLMTest(TestCase): def setUp(self): - self.vocab = { - "!": 0, - "air": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "<|endoftext|>": 6, - } + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] @@ -50,72 +41,49 @@ def setUp(self): self.backbone = GPTNeoXBackbone( vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), num_layers=2, - num_heads=4, - hidden_dim=32, - intermediate_dim=32, + num_heads=2, + hidden_dim=4, + intermediate_dim=8, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.causal_lm = GPTNeoXCausalLM( - backbone=self.backbone, - preprocessor=self.preprocessor, - ) - - self.raw_batch = [ - " airplane at airport", - " airplane at airport", - ] - - self.preprocessed_batch = self.preprocessor(self.raw_batch)[0] - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_causal_lm(self): - self.causal_lm(self.preprocessed_batch) - - def test_predict(self): - self.causal_lm.predict(self.raw_batch) - self.causal_lm.preprocessor = None - self.causal_lm.predict(self.preprocessed_batch) - - def test_fit(self): - self.causal_lm.fit(self.raw_dataset) - self.causal_lm.preprocessor = None - self.causal_lm.fit(self.preprocessed_dataset) - - def test_fit_no_xla(self): - self.causal_lm.preprocessor = None - self.causal_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ([" airplane at airport", " airplane at airport"],) + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_causal_lm_basics(self): + self.run_task_test( + cls=GPTNeoXCausalLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 8, 7), ) - self.causal_lm.fit(self.preprocessed_dataset) def test_generate(self): + causal_lm = GPTNeoXCausalLM(**self.init_kwargs) # String input. prompt = " airplane at airport" - output = self.causal_lm.generate(" airplane at airport") + output = causal_lm.generate(" airplane at airport") self.assertTrue(prompt in output) - # String tensor input. - self.assertIsInstance(self.causal_lm.generate(self.raw_batch)[0], str) - # String dataset input. - self.assertIsInstance(self.causal_lm.generate(self.raw_dataset)[0], str) # Int tensor input. - self.causal_lm.preprocessor = None - outputs = self.causal_lm.generate(self.preprocessed_batch) + prompt_ids = self.preprocessor.generate_preprocess([prompt]) + causal_lm.preprocessor = None + outputs = causal_lm.generate(prompt_ids) # Assert prompt is in output in token id space. self.assertAllEqual( outputs["token_ids"][:, :5], - self.preprocessed_batch["token_ids"][:, :5], + prompt_ids["token_ids"][:, :5], ) self.assertAllEqual( outputs["padding_mask"][:, :5], - self.preprocessed_batch["padding_mask"][:, :5], + prompt_ids["padding_mask"][:, :5], ) def test_early_stopping(self): - call_with_cache = self.causal_lm.call_with_cache + causal_lm = GPTNeoXCausalLM(**self.init_kwargs) + call_with_cache = causal_lm.call_with_cache def wrapper(*args, **kwargs): """Modify output logits to always favor end_token_id""" @@ -126,43 +94,28 @@ def wrapper(*args, **kwargs): logits = ops.slice_update(logits, (0, 0, index), update) return logits, hidden_states, cache - with patch.object(self.causal_lm, "call_with_cache", wraps=wrapper): + with patch.object(causal_lm, "call_with_cache", wraps=wrapper): prompt = [" airplane at airport", " airplane"] - output = self.causal_lm.generate(prompt) + output = causal_lm.generate(prompt) # We should immediately abort and output the prompt. self.assertEqual(prompt, output) def test_generate_compilation(self): + causal_lm = GPTNeoXCausalLM(**self.init_kwargs) # Assert we do not recompile with successive calls. - self.causal_lm.generate(self.raw_batch) - first_fn = self.causal_lm.generate_function - self.causal_lm.generate(self.raw_batch) - second_fn = self.causal_lm.generate_function + causal_lm.generate(" airplane at airport") + first_fn = causal_lm.generate_function + causal_lm.generate(" airplane at airport") + second_fn = causal_lm.generate_function self.assertEqual(first_fn, second_fn) # Assert we do recompile after compile is called. - self.causal_lm.compile(sampler="greedy") - self.assertIsNone(self.causal_lm.generate_function) - - def test_serialization(self): - new_causal_lm = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.causal_lm) - ) - self.assertEqual( - new_causal_lm.get_config(), self.causal_lm.get_config() - ) + causal_lm.compile(sampler="greedy") + self.assertIsNone(causal_lm.generate_function) @pytest.mark.large def test_saved_model(self): - keras.utils.set_random_seed(42) - model_output = self.causal_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.causal_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, GPTNeoXCausalLM) - - # Check that output matches. - keras.utils.set_random_seed(42) - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + self.run_model_saving_test( + cls=GPTNeoXCausalLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor_test.py index 53655822a0..c87329af4a 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor_test.py @@ -12,9 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf - -from keras_nlp.backend import keras from keras_nlp.models.gpt_neo_x.gpt_neo_x_preprocessor import ( GPTNeoXPreprocessor, ) @@ -24,41 +21,32 @@ class GPTNeoXPreprocessorTest(TestCase): def setUp(self): - self.vocab = { - "!": 0, - "air": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "<|endoftext|>": 6, - } - + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] self.merges += ["Ġai r", "Ġa i", "pla ne"] - - self.preprocessor = GPTNeoXPreprocessor( - tokenizer=GPTNeoXTokenizer( - vocabulary=self.vocab, - merges=self.merges, - ), - sequence_length=8, + self.tokenizer = GPTNeoXTokenizer( + vocabulary=self.vocab, + merges=self.merges, + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ["airplane at airport"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=GPTNeoXPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output={ + "token_ids": [[6, 1, 3, 4, 2, 5, 6, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, ) - - def test_tokenize_strings(self): - input_data = "airplane at airport" - - x = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [6, 1, 3, 4, 2, 5, 6, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - - def test_tokenize_list_of_strings(self): - input_data = ["airplane at airport"] * 4 - - x = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) def test_no_start_end_token(self): input_data = ["airplane at airport"] * 4 @@ -76,33 +64,8 @@ def test_no_start_end_token(self): self.assertAllEqual(x["token_ids"], [[1, 3, 4, 2, 5, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4) - def test_tokenize_labeled_batch(self): - x = tf.constant(["airplane at airport"] * 4) - y_in = tf.constant([1] * 4) - sw_in = tf.constant([1.0] * 4) - x, y, sw = self.preprocessor(x, y_in, sw_in) - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, y_in) - self.assertAllEqual(sw, sw_in) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["airplane at airport"] * 4) - ds = tf.data.Dataset.from_tensor_slices(x) - ds = ds.map(self.preprocessor) - x = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x["token_ids"], [[6, 1, 3, 4, 2, 5, 6, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - def test_sequence_length_override(self): input_data = "airplane at airport" - x = self.preprocessor(input_data, sequence_length=4) + preprocessor = GPTNeoXPreprocessor(**self.init_kwargs) + x = preprocessor(input_data, sequence_length=4) self.assertAllEqual(x["token_ids"], [6, 1, 3, 6]) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py index da6daa2c24..c23b7dd44d 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer_test.py @@ -12,82 +12,32 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import keras from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer from keras_nlp.tests.test_case import TestCase class GPTNeoXTokenizerTest(TestCase): def setUp(self): - self.vocab = { - "<|endoftext|>": 0, - "Ġair": 1, - "plane": 2, - "Ġat": 3, - "port": 4, - "Ġkoh": 5, - "li": 6, - "Ġis": 7, - "Ġthe": 8, - "Ġbest": 9, - } - self.merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - self.merges += [ - "Ġa t", - "p o", - "r t", - "o h", - "l i", - "Ġi s", - "Ġb e", - "s t", + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges} + self.input_data = [ + " airplane at airport<|endoftext|>", + " airplane airport", ] - self.merges += [ - "Ġt h", - "Ġai r", - "pl a", - "Ġk oh", - "Ġth e", - "Ġbe st", - "po rt", - ] - self.merges += ["pla ne"] - self.tokenizer = GPTNeoXTokenizer( - vocabulary=self.vocab, merges=self.merges + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=GPTNeoXTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[2, 3, 4, 2, 5, 6], [2, 3, 2, 5]], ) - def test_tokenize(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [1, 2, 3, 1, 4]) - - def test_tokenize_end_token(self): - input_data = " airplane at airport<|endoftext|>" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [1, 2, 3, 1, 4, 0]) - - def test_tokenize_batch(self): - input_data = [" airplane at airport", " kohli is the best"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[1, 2, 3, 1, 4], [5, 6, 7, 8, 9]]) - - def test_detokenize(self): - input_tokens = [1, 2, 3, 1, 4] - output = self.tokenizer.detokenize(input_tokens) - self.assertEqual(output, " airplane at airport") - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 10) - def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): GPTNeoXTokenizer(vocabulary=["a", "b", "c"], merges=[]) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), - ) diff --git a/keras_nlp/models/opt/opt_backbone_test.py b/keras_nlp/models/opt/opt_backbone_test.py index c887001040..445bdaebad 100644 --- a/keras_nlp/models/opt/opt_backbone_test.py +++ b/keras_nlp/models/opt/opt_backbone_test.py @@ -12,75 +12,65 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.opt.opt_backbone import OPTBackbone from keras_nlp.tests.test_case import TestCase class OPTBackboneTest(TestCase): def setUp(self): - self.backbone = OPTBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_opt(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "opt_backbone") - - def test_variable_sequence_length_call_opt(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=OPTBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) - @pytest.mark.large # Saving is slow, so mark these large. + @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=OPTBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, OPTBackbone) + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=OPTBackbone, + preset="opt_125m_en", + input_data={ + "token_ids": ops.array([[133, 2119, 6219, 23602, 4]]), + "padding_mask": ops.ones((1, 5), dtype="int32"), + }, + expected_output_shape=(1, 5, 768), + # The forward pass from a preset should be stable! + expected_partial_output=ops.array( + [-0.246, -1.004, -0.072, 0.097, 0.533] + ), + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in OPTBackbone.presets: + self.run_preset_test( + cls=OPTBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py b/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py index eb54a94196..2f225612d4 100644 --- a/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py +++ b/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. +import pytest import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.opt.opt_causal_lm_preprocessor import ( OPTCausalLMPreprocessor, ) @@ -24,104 +24,71 @@ class OPTCausalLMPreprocessorTest(TestCase): def setUp(self): - self.vocab = { - "": 0, - "": 1, - "air": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, + self.vocab = ["", "", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.tokenizer = OPTTokenizer( + vocabulary=self.vocab, + merges=self.merges, + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges - - self.preprocessor = OPTCausalLMPreprocessor( - tokenizer=OPTTokenizer( - vocabulary=self.vocab, - merges=self.merges, + self.input_data = ["airplane at airport"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=OPTCausalLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[1, 2, 4, 5, 3, 6, 1, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [[2, 4, 5, 3, 6, 1, 0, 0]], # Pass through labels. + [[1, 1, 1, 1, 1, 1, 0, 0]], # Pass through sample_weights. ), - sequence_length=8, ) - def test_strings(self): - input_data = " airplane at airport" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [1, 3, 4, 5, 3, 6, 1, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - self.assertAllEqual(y, [3, 4, 5, 3, 6, 1, 0, 0]) - self.assertAllEqual(sw, [1, 1, 1, 1, 1, 1, 0, 0]) - - def test_list_of_strings(self): - input_data = [" airplane at airport"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[1, 3, 4, 5, 3, 6, 1, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[3, 4, 5, 3, 6, 1, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - def test_no_start_end_token(self): - input_data = [" airplane at airport"] * 4 + input_data = ["airplane at airport"] * 4 preprocessor = OPTCausalLMPreprocessor( - tokenizer=OPTTokenizer( - vocabulary=self.vocab, - merges=self.merges, - ), - sequence_length=8, + **self.init_kwargs, add_start_token=False, add_end_token=False, ) x, y, sw = preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[3, 4, 5, 3, 6, 0, 0, 0]] * 4) + self.assertAllEqual(x["token_ids"], [[2, 4, 5, 3, 6, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4) self.assertAllEqual(y, [[4, 5, 3, 6, 0, 0, 0, 0]] * 4) self.assertAllEqual(sw, [[1, 1, 1, 1, 0, 0, 0, 0]] * 4) - def test_labeled_batch(self): - x = tf.constant([" airplane at airport"] * 4) - y = tf.constant([1] * 4) # Ignored. - sw = tf.constant([1.0] * 4) # Ignored. - x, y, sw = self.preprocessor(x, y, sw) - self.assertAllEqual(x["token_ids"], [[1, 3, 4, 5, 3, 6, 1, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[3, 4, 5, 3, 6, 1, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - - def test_dataset(self): - x = tf.constant([" airplane at airport"] * 4) - ds = tf.data.Dataset.from_tensor_slices(x) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x["token_ids"], [[1, 3, 4, 5, 3, 6, 1, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, [[3, 4, 5, 3, 6, 1, 0, 0]] * 4) - self.assertAllEqual(sw, [[1, 1, 1, 1, 1, 1, 0, 0]] * 4) - def test_generate_preprocess(self): - input_data = " airplane at airport" - x = self.preprocessor.generate_preprocess(input_data) - self.assertAllEqual(x["token_ids"], [1, 3, 4, 5, 3, 6, 0, 0]) + input_data = "airplane at airport" + preprocessor = OPTCausalLMPreprocessor(**self.init_kwargs) + x = preprocessor.generate_preprocess(input_data) + self.assertAllEqual(x["token_ids"], [1, 2, 4, 5, 3, 6, 0, 0]) self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0]) def test_generate_postprocess(self): input_data = { - "token_ids": tf.constant([1, 3, 4, 5, 3, 6, 0, 0]), + "token_ids": tf.constant([1, 2, 4, 5, 3, 6, 0, 0]), "padding_mask": tf.cast([1, 1, 1, 1, 1, 1, 0, 0], dtype="bool"), } - x = self.preprocessor.generate_postprocess(input_data) - self.assertAllEqual(x, " airplane at airport") - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + preprocessor = OPTCausalLMPreprocessor(**self.init_kwargs) + x = preprocessor.generate_postprocess(input_data) + self.assertAllEqual(x, "airplane at airport") + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in OPTCausalLMPreprocessor.presets: + self.run_preset_test( + cls=OPTCausalLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/opt/opt_causal_lm_test.py b/keras_nlp/models/opt/opt_causal_lm_test.py index 1e8fcb8785..e6e707e72a 100644 --- a/keras_nlp/models/opt/opt_causal_lm_test.py +++ b/keras_nlp/models/opt/opt_causal_lm_test.py @@ -12,13 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os from unittest.mock import patch import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.opt.opt_backbone import OPTBackbone from keras_nlp.models.opt.opt_causal_lm import OPTCausalLM @@ -31,24 +28,11 @@ class OPTCausalLMTest(TestCase): def setUp(self): - self.vocab = { - "": 0, - "": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "Ġkoh": 6, - "li": 7, - "Ġis": 8, - "Ġthe": 9, - "Ġbest": 10, - } - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges + self.vocab = ["", "", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] self.preprocessor = OPTCausalLMPreprocessor( OPTTokenizer(vocabulary=self.vocab, merges=self.merges), sequence_length=8, @@ -61,66 +45,44 @@ def setUp(self): intermediate_dim=8, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.causal_lm = OPTCausalLM( - backbone=self.backbone, - preprocessor=self.preprocessor, - ) - - self.raw_batch = [ - " airplane at airport", - " airplane at airport", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch)[0] - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_causal_lm(self): - self.causal_lm(self.preprocessed_batch) - - def test_predict(self): - self.causal_lm.predict(self.raw_batch) - self.causal_lm.preprocessor = None - self.causal_lm.predict(self.preprocessed_batch) - - def test_fit(self): - self.causal_lm.fit(self.raw_dataset) - self.causal_lm.preprocessor = None - self.causal_lm.fit(self.preprocessed_dataset) - - def test_fit_no_xla(self): - self.causal_lm.preprocessor = None - self.causal_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ([" airplane at airport", " airplane at airport"],) + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_causal_lm_basics(self): + self.run_task_test( + cls=OPTCausalLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 8, 7), ) - self.causal_lm.fit(self.preprocessed_dataset) def test_generate(self): + causal_lm = OPTCausalLM(**self.init_kwargs) # String input. prompt = " airplane at airport" - output = self.causal_lm.generate(" airplane at airport") + output = causal_lm.generate(" airplane at airport") self.assertTrue(prompt in output) - # String tensor input. - self.assertIsInstance(self.causal_lm.generate(self.raw_batch)[0], str) - # String dataset input. - self.assertIsInstance(self.causal_lm.generate(self.raw_dataset)[0], str) # Int tensor input. - self.causal_lm.preprocessor = None - outputs = self.causal_lm.generate(self.preprocessed_batch) + prompt_ids = self.preprocessor.generate_preprocess([prompt]) + causal_lm.preprocessor = None + outputs = causal_lm.generate(prompt_ids) # Assert prompt is in output in token id space. self.assertAllEqual( outputs["token_ids"][:, :5], - self.preprocessed_batch["token_ids"][:, :5], + prompt_ids["token_ids"][:, :5], ) self.assertAllEqual( outputs["padding_mask"][:, :5], - self.preprocessed_batch["padding_mask"][:, :5], + prompt_ids["padding_mask"][:, :5], ) def test_early_stopping(self): - call_with_cache = self.causal_lm.call_with_cache + causal_lm = OPTCausalLM(**self.init_kwargs) + call_with_cache = causal_lm.call_with_cache def wrapper(*args, **kwargs): """Modify output logits to always favor end_token_id""" @@ -131,43 +93,37 @@ def wrapper(*args, **kwargs): logits = ops.slice_update(logits, (0, 0, index), update) return logits, hidden_states, cache - with patch.object(self.causal_lm, "call_with_cache", wraps=wrapper): + with patch.object(causal_lm, "call_with_cache", wraps=wrapper): prompt = [" airplane at airport", " airplane"] - output = self.causal_lm.generate(prompt) + output = causal_lm.generate(prompt) # We should immediately abort and output the prompt. self.assertEqual(prompt, output) def test_generate_compilation(self): + causal_lm = OPTCausalLM(**self.init_kwargs) # Assert we do not recompile with successive calls. - self.causal_lm.generate(self.raw_batch) - first_fn = self.causal_lm.generate_function - self.causal_lm.generate(self.raw_batch) - second_fn = self.causal_lm.generate_function + causal_lm.generate(" airplane at airport") + first_fn = causal_lm.generate_function + causal_lm.generate(" airplane at airport") + second_fn = causal_lm.generate_function self.assertEqual(first_fn, second_fn) # Assert we do recompile after compile is called. - self.causal_lm.compile(sampler="greedy") - self.assertIsNone(self.causal_lm.generate_function) - - def test_serialization(self): - new_causal_lm = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.causal_lm) - ) - self.assertEqual( - new_causal_lm.get_config(), self.causal_lm.get_config() - ) + causal_lm.compile(sampler="greedy") + self.assertIsNone(causal_lm.generate_function) @pytest.mark.large def test_saved_model(self): - keras.utils.set_random_seed(42) - model_output = self.causal_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.causal_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, OPTCausalLM) + self.run_model_saving_test( + cls=OPTCausalLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - keras.utils.set_random_seed(42) - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in OPTCausalLM.presets: + self.run_preset_test( + cls=OPTCausalLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/opt/opt_preprocessor_test.py b/keras_nlp/models/opt/opt_preprocessor_test.py index ae68cf8089..b80c409b92 100644 --- a/keras_nlp/models/opt/opt_preprocessor_test.py +++ b/keras_nlp/models/opt/opt_preprocessor_test.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.opt.opt_preprocessor import OPTPreprocessor from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer from keras_nlp.tests.test_case import TestCase @@ -22,46 +21,34 @@ class OPTPreprocessorTest(TestCase): def setUp(self): - self.vocab = { - "": 0, - "": 1, - "air": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, + self.vocab = ["", "", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.tokenizer = OPTTokenizer( + vocabulary=self.vocab, + merges=self.merges, + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges - - self.preprocessor = OPTPreprocessor( - tokenizer=OPTTokenizer( - vocabulary=self.vocab, - merges=self.merges, - ), - sequence_length=8, + self.input_data = ["airplane at airport"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=OPTPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output={ + "token_ids": [[1, 2, 4, 5, 3, 6, 1, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, ) - def test_tokenize_strings(self): - input_data = " airplane at airport" - - x = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [1, 3, 4, 5, 3, 6, 1, 0]) - self.assertAllEqual(x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0]) - - def test_tokenize_list_of_strings(self): - input_data = [" airplane at airport"] * 4 - - x = self.preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[1, 3, 4, 5, 3, 6, 1, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - def test_no_start_end_token(self): - input_data = [" airplane at airport"] * 4 + input_data = ["airplane at airport"] * 4 preprocessor = OPTPreprocessor( tokenizer=OPTTokenizer( @@ -73,36 +60,20 @@ def test_no_start_end_token(self): add_end_token=False, ) x = preprocessor(input_data) - self.assertAllEqual(x["token_ids"], [[3, 4, 5, 3, 6, 0, 0, 0]] * 4) + self.assertAllEqual(x["token_ids"], [[2, 4, 5, 3, 6, 0, 0, 0]] * 4) self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0]] * 4) - def test_tokenize_labeled_batch(self): - x = tf.constant([" airplane at airport"] * 4) - y_in = tf.constant([1] * 4) - sw_in = tf.constant([1.0] * 4) - x, y, sw = self.preprocessor(x, y_in, sw_in) - self.assertAllEqual(x["token_ids"], [[1, 3, 4, 5, 3, 6, 1, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - self.assertAllEqual(y, y_in) - self.assertAllEqual(sw, sw_in) - - def test_tokenize_labeled_dataset(self): - x = tf.constant([" airplane at airport"] * 4) - ds = tf.data.Dataset.from_tensor_slices(x) - ds = ds.map(self.preprocessor) - x = ds.batch(4).take(1).get_single_element() - self.assertAllEqual(x["token_ids"], [[1, 3, 4, 5, 3, 6, 1, 0]] * 4) - self.assertAllEqual(x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0]] * 4) - def test_sequence_length_override(self): - input_data = " airplane at airport" - x = self.preprocessor(input_data, sequence_length=4) - self.assertAllEqual(x["token_ids"], [1, 3, 4, 1]) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + input_data = "airplane at airport" + preprocessor = OPTPreprocessor(**self.init_kwargs) + x = preprocessor(input_data, sequence_length=4) + self.assertAllEqual(x["token_ids"], [1, 2, 4, 1]) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in OPTPreprocessor.presets: + self.run_preset_test( + cls=OPTPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/opt/opt_presets_test.py b/keras_nlp/models/opt/opt_presets_test.py deleted file mode 100644 index 2484e5f3d8..0000000000 --- a/keras_nlp/models/opt/opt_presets_test.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.opt.opt_backbone import OPTBackbone -from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class OPTPresetSmokeTest(TestCase): - """ - A smoke test for GPT-2 presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/opt/opt_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = OPTTokenizer.from_preset("opt_125m_en") - outputs = tokenizer("The quick brown fox.") - expected_outputs = [133, 2119, 6219, 23602, 4] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[133, 2119, 6219, 23602, 4]]), - "padding_mask": ops.array([[1, 1, 1, 1, 1]]), - } - model = OPTBackbone.from_preset( - "opt_125m_en", load_weights=load_weights - ) - outputs = model(input_data)[0, 0, :5] - if load_weights: - # The forward pass from a preset should be stable! - # This test should catch cases where we unintentionally change our - # network code in a way that would invalidate our preset weights. - # We should only update these numbers if we are updating a weights - # file, or have found a discrepancy with the upstream source. - expected_outputs = [-0.246, -1.004, -0.072, 0.097, 0.533] - # Keep a high tolerance, so we are robust to different hardware. - self.assertAllClose(outputs, expected_outputs, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("opt_tokenizer", OPTTokenizer), - ("opt_backbone", OPTBackbone), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("opt_tokenizer", OPTTokenizer), - ("opt_backbone", OPTBackbone), - ) - def test_unknown_preset_error(self, cls): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("opt_clowntown") - - -@pytest.mark.extra_large -class OPTPresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This tests every GPT-2 preset and is only run manually. - Run with: - `pytest keras_nlp/models/opt/opt_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_opt(self, load_weights): - for preset in OPTBackbone.presets: - model = OPTBackbone.from_preset(preset, load_weights=load_weights) - input_data = { - "token_ids": random.uniform( - shape=(1, 1024), - dtype="int64", - maxval=model.vocabulary_size, - ), - "padding_mask": ops.array([1] * 1024, shape=(1, 1024)), - } - model(input_data) - - def test_load_tokenizers(self): - for preset in OPTTokenizer.presets: - tokenizer = OPTTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") diff --git a/keras_nlp/models/opt/opt_tokenizer_test.py b/keras_nlp/models/opt/opt_tokenizer_test.py index af460db409..4b52ef1aed 100644 --- a/keras_nlp/models/opt/opt_tokenizer_test.py +++ b/keras_nlp/models/opt/opt_tokenizer_test.py @@ -12,66 +12,51 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import keras +import pytest + from keras_nlp.models.opt.opt_tokenizer import OPTTokenizer from keras_nlp.tests.test_case import TestCase class OPTTokenizerTest(TestCase): def setUp(self): - self.vocab = { - "": 0, - "": 1, - "Ġair": 2, - "plane": 3, - "Ġat": 4, - "port": 5, - "Ġkoh": 6, - "li": 7, - "Ġis": 8, - "Ġthe": 9, - "Ġbest": 10, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges - - self.tokenizer = OPTTokenizer(vocabulary=self.vocab, merges=self.merges) - - def test_tokenize(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [2, 3, 4, 2, 5]) - - def test_tokenize_special_tokens(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [1, 2, 3, 4, 2, 5, 1, 0]) - - def test_tokenize_batch(self): - input_data = [" airplane at airport", " kohli is the best"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[2, 3, 4, 2, 5], [6, 7, 8, 9, 10]]) - - def test_detokenize(self): - input_tokens = [2, 3, 4, 2, 5] - output = self.tokenizer.detokenize(input_tokens) - self.assertEqual(output, " airplane at airport") - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 11) + self.vocab = ["", "", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges} + self.input_data = [ + " airplane at airport", + " airplane airport", + ] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=OPTTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[3, 4, 5, 3, 6, 1], [3, 4, 3, 6]], + ) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): OPTTokenizer(vocabulary=["a", "b", "c"], merges=[]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=OPTTokenizer, + preset="opt_125m_en", + input_data=["The quick brown fox."], + expected_output=[[133, 2119, 6219, 23602, 4]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in OPTTokenizer.presets: + self.run_preset_test( + cls=OPTTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/roberta/roberta_backbone_test.py b/keras_nlp/models/roberta/roberta_backbone_test.py index b90847bd5f..fe85e183a8 100644 --- a/keras_nlp/models/roberta/roberta_backbone_test.py +++ b/keras_nlp/models/roberta/roberta_backbone_test.py @@ -12,13 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.roberta.roberta_backbone import RobertaBackbone from keras_nlp.tests.test_case import TestCase @@ -26,66 +21,58 @@ class RobertaBackboneTest(TestCase): def setUp(self): - self.backbone = RobertaBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - self.batch_size = 8 - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_roberta(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_name(self): - self.assertRegexpMatches(self.backbone.name, "roberta_backbone") - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=RobertaBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) - - def test_variable_sequence_length_call_roberta(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - output = self.backbone(input_data) - self.assertAllEqual( - ops.shape(output), - (2, seq_length, self.backbone.hidden_dim), - ) - @pytest.mark.large # Saving is slow, so mark these large. + @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=RobertaBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, RobertaBackbone) + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=RobertaBackbone, + preset="roberta_base_en", + input_data={ + "token_ids": ops.array([[0, 133, 2119, 2]], dtype="int32"), + "segment_ids": ops.zeros((1, 4), dtype="int32"), + "padding_mask": ops.ones((1, 4), dtype="int32"), + }, + expected_output_shape=(1, 4, 768), + # The forward pass from a preset should be stable! + expected_partial_output=ops.array( + [-0.051, 0.100, -0.010, -0.097, 0.059], + ), + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in RobertaBackbone.presets: + self.run_preset_test( + cls=RobertaBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/roberta/roberta_classifier_test.py b/keras_nlp/models/roberta/roberta_classifier_test.py index 6636768bdc..04c054f4bc 100644 --- a/keras_nlp/models/roberta/roberta_classifier_test.py +++ b/keras_nlp/models/roberta/roberta_classifier_test.py @@ -12,14 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.backend import ops from keras_nlp.models.roberta.roberta_backbone import RobertaBackbone from keras_nlp.models.roberta.roberta_classifier import RobertaClassifier from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor @@ -29,27 +23,13 @@ class RobertaClassifierTest(TestCase): def setUp(self): - self.vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges + # Setup model. + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] self.preprocessor = RobertaPreprocessor( RobertaTokenizer(vocabulary=self.vocab, merges=self.merges), sequence_length=5, @@ -62,84 +42,40 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.classifier = RobertaClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - # Check we handle serialization correctly. - activation=keras.activations.softmax, - hidden_dim=4, - ) - - # Setup data. - self.raw_batch = [ - " airplane at airport", - " the airplane is the best", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - (self.raw_batch, np.ones((2,))) - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.classifier(self.preprocessed_batch) - - def test_classifier_predict(self): - preds1 = self.classifier.predict(self.raw_batch) - self.classifier.preprocessor = None - preds2 = self.classifier.predict(self.preprocessed_batch) - # Assert predictions match. - self.assertAllClose(preds1, preds2) - # Assert valid softmax output. - self.assertAllClose(ops.sum(preds2, axis=-1), [1.0, 1.0]) - - def test_classifier_fit(self): - self.classifier.fit(self.raw_dataset) - self.classifier.preprocessor = None - self.classifier.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.classifier.preprocessor = None - self.classifier.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + "num_classes": 2, + } + self.train_data = ( + [" airplane at airport", " airplane airport"], # Features. + [1, 0], # Labels. ) - self.classifier.fit(self.preprocessed_dataset) + self.input_data = self.preprocessor(*self.train_data)[0] - def test_serialization(self): - # Defaults. - original = RobertaClassifier( - self.backbone, - num_classes=2, + def test_classifier_basics(self): + self.run_task_test( + cls=RobertaClassifier, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 2), ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - # With options. - original = RobertaClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - activation=keras.activations.softmax, - hidden_dim=4, - name="test", - trainable=False, - ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - @pytest.mark.large # Saving is slow, so mark these large. + @pytest.mark.large def test_saved_model(self): - model_output = self.classifier.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.classifier.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, RobertaClassifier) + self.run_model_saving_test( + cls=RobertaClassifier, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in RobertaClassifier.presets: + self.run_preset_test( + cls=RobertaClassifier, + preset=preset, + init_kwargs={"num_classes": 2}, + input_data=self.input_data, + expected_output_shape=(2, 2), + ) diff --git a/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py b/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py index 92abcf3011..ae762079e2 100644 --- a/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/roberta/roberta_masked_lm_preprocessor_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.roberta.roberta_masked_lm_preprocessor import ( RobertaMaskedLMPreprocessor, ) @@ -24,122 +23,68 @@ class RobertaMaskedLMPreprocessorTest(TestCase): def setUp(self): - vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.tokenizer = RobertaTokenizer( + vocabulary=self.vocab, merges=self.merges + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + # Simplify our testing by masking every available token. + "mask_selection_rate": 1.0, + "mask_token_rate": 1.0, + "random_token_rate": 0.0, + "mask_selection_length": 4, + "sequence_length": 12, } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - - self.preprocessor = RobertaMaskedLMPreprocessor( - tokenizer=RobertaTokenizer( - vocabulary=vocab, - merges=merges, + self.input_data = [" airplane airport"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=RobertaMaskedLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[0, 8, 8, 8, 8, 2, 1, 1, 1, 1, 1, 1]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[1, 2, 3, 4]], + }, + [[4, 5, 4, 7]], + [[1.0, 1.0, 1.0, 1.0]], ), - # Simplify our testing by masking every available token. - mask_selection_rate=1.0, - mask_token_rate=1.0, - random_token_rate=0.0, - mask_selection_length=5, - sequence_length=12, ) - def test_preprocess_strings(self): - input_data = " airplane at airport" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [0, 12, 12, 12, 12, 12, 2, 1, 1, 1, 1, 1] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 3, 4, 5]) - self.assertAllEqual(y, [3, 4, 5, 3, 6]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0, 1.0]) - - def test_preprocess_list_of_strings(self): - input_data = [" airplane at airport"] * 4 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [[0, 12, 12, 12, 12, 12, 2, 1, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4, 5]] * 4) - self.assertAllEqual(y, [[3, 4, 5, 3, 6]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0, 1.0]] * 4) - - def test_preprocess_dataset(self): - sentences = tf.constant([" airplane at airport"] * 4) - ds = tf.data.Dataset.from_tensor_slices(sentences) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x["token_ids"], [[0, 12, 12, 12, 12, 12, 2, 1, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 4, 5]] * 4) - self.assertAllEqual(y, [[3, 4, 5, 3, 6]] * 4) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 1.0, 1.0]] * 4) - - def test_mask_multiple_sentences(self): - sentence_one = tf.constant(" airplane") - sentence_two = tf.constant(" kohli") - - x, y, sw = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - x["token_ids"], [0, 12, 12, 2, 2, 12, 12, 2, 1, 1, 1, 1] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 5, 6, 0]) - self.assertAllEqual(y, [3, 4, 7, 8, 0]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 1.0, 0.0]) - def test_no_masking_zero_rate(self): no_mask_preprocessor = RobertaMaskedLMPreprocessor( - self.preprocessor.tokenizer, + self.tokenizer, mask_selection_rate=0.0, - mask_selection_length=5, + mask_selection_length=4, sequence_length=12, ) - input_data = " airplane at airport" - - x, y, sw = no_mask_preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [0, 3, 4, 5, 3, 6, 2, 1, 1, 1, 1, 1] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0] + input_data = [" airplane airport"] + self.assertAllClose( + no_mask_preprocessor(input_data), + ( + { + "token_ids": [[0, 4, 5, 4, 7, 2, 1, 1, 1, 1, 1, 1]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[0, 0, 0, 0]], + }, + [[0, 0, 0, 0]], + [[0.0, 0.0, 0.0, 0.0]], + ), ) - self.assertAllEqual(x["mask_positions"], [0, 0, 0, 0, 0]) - self.assertAllEqual(y, [0, 0, 0, 0, 0]) - self.assertAllEqual(sw, [0.0, 0.0, 0.0, 0.0, 0.0]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in RobertaMaskedLMPreprocessor.presets: + self.run_preset_test( + cls=RobertaMaskedLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/roberta/roberta_masked_lm_test.py b/keras_nlp/models/roberta/roberta_masked_lm_test.py index d2a5a27011..663b5a5b0c 100644 --- a/keras_nlp/models/roberta/roberta_masked_lm_test.py +++ b/keras_nlp/models/roberta/roberta_masked_lm_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,12 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.roberta.roberta_backbone import RobertaBackbone from keras_nlp.models.roberta.roberta_masked_lm import RobertaMaskedLM from keras_nlp.models.roberta.roberta_masked_lm_preprocessor import ( @@ -29,27 +25,13 @@ class RobertaMaskedLMTest(TestCase): def setUp(self): - self.vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges + # Setup model. + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] self.preprocessor = RobertaMaskedLMPreprocessor( RobertaTokenizer(vocabulary=self.vocab, merges=self.merges), # Simplify our testing by masking every available token. @@ -67,64 +49,36 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.masked_lm = RobertaMaskedLM( - self.backbone, - preprocessor=self.preprocessor, - ) - self.masked_lm_no_preprocessing = RobertaMaskedLM( - self.backbone, - preprocessor=None, - ) - - self.raw_batch = [ - " airplane at airport", - " the airplane is the best", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.masked_lm(self.preprocessed_batch[0]) - - def test_classifier_predict(self): - self.masked_lm.predict(self.raw_batch) - self.masked_lm.preprocessor = None - self.masked_lm.predict(self.preprocessed_batch[0]) - - def test_classifier_fit(self): - self.masked_lm.fit(self.raw_dataset) - self.masked_lm.preprocessor = None - self.masked_lm.fit(self.preprocessed_dataset) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.masked_lm) - new_classifier = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_classifier.get_config(), - self.masked_lm.get_config(), + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ( + [" airplane at airport", " airplane_airport"], # Features. ) + self.input_data = self.preprocessor(*self.train_data)[0] - def test_classifier_fit_no_xla(self): - self.masked_lm.preprocessor = None - self.masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + def test_masked_lm_basics(self): + self.run_task_test( + cls=RobertaMaskedLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 5, 9), ) - self.masked_lm.fit(self.preprocessed_dataset) @pytest.mark.large def test_saved_model(self): - model_output = self.masked_lm.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.masked_lm.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, RobertaMaskedLM) + self.run_model_saving_test( + cls=RobertaMaskedLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output, atol=0.01, rtol=0.01) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in RobertaMaskedLM.presets: + self.run_preset_test( + cls=RobertaMaskedLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/roberta/roberta_preprocessor_test.py b/keras_nlp/models/roberta/roberta_preprocessor_test.py index 3471fd372d..5e7ad77514 100644 --- a/keras_nlp/models/roberta/roberta_preprocessor_test.py +++ b/keras_nlp/models/roberta/roberta_preprocessor_test.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf +import pytest -from keras_nlp.backend import keras from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer from keras_nlp.tests.test_case import TestCase @@ -22,123 +21,51 @@ class RobertaPreprocessorTest(TestCase): def setUp(self): - vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - - self.preprocessor = RobertaPreprocessor( - tokenizer=RobertaTokenizer( - vocabulary=vocab, - merges=merges, - ), - sequence_length=12, - ) - - def test_tokenize_strings(self): - input_data = " airplane at airport" - - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], [0, 3, 4, 5, 3, 6, 2, 1, 1, 1, 1, 1] - ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0] - ) - - def test_tokenize_list_of_strings(self): - input_data = [" airplane at airport"] * 4 - - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], - [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1, 1, 1]] * 4, - ) - - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = tf.constant([" airplane at airport"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual( - x_out["token_ids"], [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = tf.constant([" airplane at airport"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x_out["token_ids"], [[0, 3, 4, 5, 3, 6, 2, 1, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_multiple_sentences(self): - sentence_one = tf.constant(" airplane at airport") - sentence_two = tf.constant(" kohli is the best") - - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], [0, 3, 4, 5, 3, 2, 2, 7, 8, 9, 10, 2] + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.tokenizer = RobertaTokenizer( + vocabulary=self.vocab, merges=self.merges ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + [" airplane at airport"], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ) - def test_tokenize_multiple_batched_sentences(self): - sentence_one = tf.constant([" airplane at airport"] * 4) - sentence_two = tf.constant([" kohli is the best"] * 4) - - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], - [[0, 3, 4, 5, 3, 2, 2, 7, 8, 9, 10, 2]] * 4, - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] * 4 + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=RobertaPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[0, 4, 5, 6, 4, 7, 2, 1]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 1, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), ) def test_errors_for_2d_list_input(self): + preprocessor = RobertaPreprocessor(**self.init_kwargs) ambiguous_input = [["one", "two"], ["three", "four"]] with self.assertRaises(ValueError): - self.preprocessor(ambiguous_input) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + preprocessor(ambiguous_input) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in RobertaPreprocessor.presets: + self.run_preset_test( + cls=RobertaPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/roberta/roberta_presets_test.py b/keras_nlp/models/roberta/roberta_presets_test.py deleted file mode 100644 index 657c43507c..0000000000 --- a/keras_nlp/models/roberta/roberta_presets_test.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.roberta.roberta_backbone import RobertaBackbone -from keras_nlp.models.roberta.roberta_classifier import RobertaClassifier -from keras_nlp.models.roberta.roberta_masked_lm import RobertaMaskedLM -from keras_nlp.models.roberta.roberta_preprocessor import RobertaPreprocessor -from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -class RobertaPresetSmokeTest(TestCase): - """ - A smoke test for RoBERTa presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/roberta/roberta_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = RobertaTokenizer.from_preset( - "roberta_base_en", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [133, 2119, 6219, 23602, 4] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_output(self): - preprocessor = RobertaPreprocessor.from_preset( - "roberta_base_en", - sequence_length=4, - ) - outputs = preprocessor("The quick brown fox.")["token_ids"] - expected_outputs = [0, 133, 2119, 2] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[0, 133, 2119, 2]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = RobertaBackbone.from_preset( - "roberta_base_en", load_weights=load_weights - ) - outputs = model(input_data) - if load_weights: - outputs = outputs[0, 0, :5] - expected = [-0.051, 0.100, -0.010, -0.097, 0.059] - self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_classifier_output(self, load_weights): - input_data = ["Let's rock!"] - model = RobertaClassifier.from_preset( - "roberta_base_en", num_classes=2, load_weights=load_weights - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_classifier_output_without_preprocessing(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = RobertaClassifier.from_preset( - "roberta_base_en", - num_classes=2, - load_weights=load_weights, - preprocessor=None, - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_masked_lm_output(self, load_weights): - input_data = ["Let's rock!"] - model = RobertaMaskedLM.from_preset( - "roberta_base_en", load_weights=load_weights - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_masked_lm_output_without_preprocessing(self, load_weights): - input_data = { - "token_ids": ops.array([[101, 1996, 4248, 102]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - "mask_positions": ops.array([[0, 0]]), - } - model = RobertaMaskedLM.from_preset( - "roberta_base_en", - load_weights=load_weights, - preprocessor=None, - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("roberta_tokenizer", RobertaTokenizer), - ("roberta_preprocessor", RobertaPreprocessor), - ("roberta", RobertaBackbone), - ("roberta_classifier", RobertaClassifier), - ("roberta_masked_lm", RobertaMaskedLM), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("roberta_tokenizer", RobertaTokenizer, {}), - ("roberta_preprocessor", RobertaPreprocessor, {}), - ("roberta", RobertaBackbone, {}), - ("roberta_classifier", RobertaClassifier, {"num_classes": 2}), - ("roberta_masked_lm", RobertaMaskedLM, {}), - ) - def test_unknown_preset_error(self, cls, kwargs): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("roberta_base_en_clowntown", **kwargs) - - -@pytest.mark.extra_large -class RobertaPresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This tests every RoBERTa preset and is only run manually. - Run with: - `pytest keras_nlp/models/roberta/roberta_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_roberta(self, load_weights): - for preset in RobertaBackbone.presets: - model = RobertaBackbone.from_preset( - preset, load_weights=load_weights - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), dtype="int64", maxval=model.vocabulary_size - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - model(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_roberta_classifier(self, load_weights): - for preset in RobertaClassifier.presets: - classifier = RobertaClassifier.from_preset( - preset, num_classes=4, load_weights=load_weights - ) - input_data = ["The quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_roberta_classifier_without_preprocessing(self, load_weights): - for preset in RobertaClassifier.presets: - classifier = RobertaClassifier.from_preset( - preset, - num_classes=2, - preprocessor=None, - load_weights=load_weights, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - classifier.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_roberta_masked_lm(self, load_weights): - for preset in RobertaMaskedLM.presets: - classifier = RobertaMaskedLM.from_preset( - preset, load_weights=load_weights - ) - input_data = ["The quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("load_weights", True), ("no_load_weights", False) - ) - def test_load_roberta_masked_lm_without_preprocessing(self, load_weights): - for preset in RobertaMaskedLM.presets: - classifier = RobertaMaskedLM.from_preset( - preset, - preprocessor=None, - load_weights=load_weights, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - "mask_positions": ops.array([1] * 128, shape=(1, 128)), - } - classifier.predict(input_data) - - def test_load_tokenizers(self): - for preset in RobertaTokenizer.presets: - tokenizer = RobertaTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_preprocessors(self): - for preset in RobertaPreprocessor.presets: - preprocessor = RobertaPreprocessor.from_preset(preset) - preprocessor("The quick brown fox.") diff --git a/keras_nlp/models/roberta/roberta_tokenizer_test.py b/keras_nlp/models/roberta/roberta_tokenizer_test.py index ef47f204eb..e5fcb1867d 100644 --- a/keras_nlp/models/roberta/roberta_tokenizer_test.py +++ b/keras_nlp/models/roberta/roberta_tokenizer_test.py @@ -12,67 +12,52 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.backend import keras +import pytest + from keras_nlp.models.roberta.roberta_tokenizer import RobertaTokenizer from keras_nlp.tests.test_case import TestCase class RobertaTokenizerTest(TestCase): def setUp(self): - vocab = { - "": 0, - "": 1, - "": 2, - "Ġair": 3, - "plane": 4, - "Ġat": 5, - "port": 6, - "Ġkoh": 7, - "li": 8, - "Ġis": 9, - "Ġthe": 10, - "Ġbest": 11, - "": 12, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - - self.tokenizer = RobertaTokenizer(vocabulary=vocab, merges=merges) - - def test_tokenize(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [3, 4, 5, 3, 6]) - - def test_tokenize_special_tokens(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [0, 3, 4, 5, 3, 6, 0, 1]) - - def test_tokenize_batch(self): - input_data = [" airplane at airport", " kohli is the best"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[3, 4, 5, 3, 6], [7, 8, 9, 10, 11]]) - - def test_detokenize(self): - input_tokens = [[3, 4, 5, 3, 6]] - output = self.tokenizer.detokenize(input_tokens) - self.assertAllEqual(output, [" airplane at airport"]) - - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 13) + self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] + self.vocab += ["port", ""] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] + self.init_kwargs = {"vocabulary": self.vocab, "merges": self.merges} + self.input_data = [ + " airplane at airport", + " airplane airport", + ] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=RobertaTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[0, 4, 5, 6, 4, 7, 0, 1], [4, 5, 4, 7]], + ) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): RobertaTokenizer(vocabulary=["a", "b", "c"], merges=[]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=RobertaTokenizer, + preset="roberta_base_en", + input_data=["The quick brown fox."], + expected_output=[[133, 2119, 6219, 23602, 4]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in RobertaTokenizer.presets: + self.run_preset_test( + cls=RobertaTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/t5/t5_backbone_test.py b/keras_nlp/models/t5/t5_backbone_test.py index 476304c566..e5e147705e 100644 --- a/keras_nlp/models/t5/t5_backbone_test.py +++ b/keras_nlp/models/t5/t5_backbone_test.py @@ -12,101 +12,45 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.t5.t5_backbone import T5Backbone from keras_nlp.tests.test_case import TestCase @pytest.mark.tf_only -class T5Test(TestCase): +class T5BackboneTest(TestCase): def setUp(self): - self.backbone = T5Backbone( - vocabulary_size=4, - num_layers=2, - num_heads=2, - hidden_dim=4, - intermediate_dim=4, - ) - self.batch_size = 2 - seq_length = 3 - self.input_batch = { - "encoder_token_ids": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), - "encoder_padding_mask": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), - "decoder_token_ids": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), - "decoder_padding_mask": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + } + self.input_data = { + "encoder_token_ids": ops.ones((2, 3), dtype="int32"), + "encoder_padding_mask": ops.zeros((2, 3), dtype="int32"), + "decoder_token_ids": ops.ones((2, 3), dtype="int32"), + "decoder_padding_mask": ops.zeros((2, 3), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_t5(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding( - self.input_batch["encoder_token_ids"] - ) - self.assertEqual(output.shape, (2, 3, 4)) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "t5_backbone") - - def test_variable_sequence_length_call_t5(self): - for seq_length in (2, 3, 4): - input_data = { - "encoder_token_ids": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), - "encoder_padding_mask": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), - "decoder_token_ids": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), - "decoder_padding_mask": np.ones( - (self.batch_size, seq_length), dtype="int32" - ), - } - outputs = self.backbone(input_data) - self.assertIn("encoder_sequence_output", outputs) - self.assertIn("decoder_sequence_output", outputs) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=T5Backbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape={ + "encoder_sequence_output": (2, 3, 2), + "decoder_sequence_output": (2, 3, 2), + }, ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) - @pytest.mark.large # Saving is slow, so mark these large. + @pytest.mark.large def test_saved_model(self): - outputs = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, T5Backbone) - - # Check that output matches. - restored_outputs = restored_model(self.input_batch) - for key in ["encoder_sequence_output", "decoder_sequence_output"]: - self.assertAllClose(outputs[key], restored_outputs[key]) + self.run_model_saving_test( + cls=T5Backbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/t5/t5_tokenizer_test.py b/keras_nlp/models/t5/t5_tokenizer_test.py index ce16492acd..f8cef35c30 100644 --- a/keras_nlp/models/t5/t5_tokenizer_test.py +++ b/keras_nlp/models/t5/t5_tokenizer_test.py @@ -17,7 +17,6 @@ import sentencepiece import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.t5.t5_tokenizer import T5Tokenizer from keras_nlp.tests.test_case import TestCase @@ -42,28 +41,16 @@ def setUp(self): unk_piece="", user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() + self.init_kwargs = {"proto": bytes_io.getvalue()} + self.input_data = ["the quick brown fox.", "the earth is round."] - self.tokenizer = T5Tokenizer(proto=self.proto) - - def test_tokenize(self): - input_data = "the quick brown fox" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [4, 9, 5, 7]) - - def test_tokenize_batch(self): - input_data = ["the quick brown fox", "the earth is round"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[4, 9, 5, 7], [4, 6, 8, 10]]) - - def test_detokenize(self): - input_data = [[4, 9, 5, 7]] - output = self.tokenizer.detokenize(input_data) - self.assertEqual(output, ["the quick brown fox"]) - - def test_vocabulary_size(self): - tokenizer = T5Tokenizer(proto=self.proto) - self.assertEqual(tokenizer.vocabulary_size(), 11) + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=T5Tokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[4, 9, 5, 2], [4, 6, 8, 2]], + ) def test_errors_missing_special_tokens(self): bytes_io = io.BytesIO() @@ -77,11 +64,3 @@ def test_errors_missing_special_tokens(self): ) with self.assertRaises(ValueError): T5Tokenizer(proto=bytes_io.getvalue()) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), - ) diff --git a/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py b/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py index 1d282f76d2..ff3178950e 100644 --- a/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py +++ b/keras_nlp/models/whisper/whisper_audio_feature_extractor_test.py @@ -14,7 +14,6 @@ import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.whisper.whisper_audio_feature_extractor import ( WhisperAudioFeatureExtractor, ) @@ -23,53 +22,33 @@ class WhisperAudioFeatureExtractorTest(TestCase): def setUp(self): - self.num_mels = 80 - self.num_fft_bins = 400 - self.stride = 100 - self.sampling_rate = 100 - self.max_audio_length = 5 - self.audio_feature_extractor = WhisperAudioFeatureExtractor( - num_mels=self.num_mels, - num_fft_bins=self.num_fft_bins, - stride=self.stride, - sampling_rate=self.sampling_rate, - max_audio_length=self.max_audio_length, + self.init_kwargs = { + "num_mels": 80, + "num_fft_bins": 400, + "stride": 100, + "sampling_rate": 100, + "max_audio_length": 5, + } + audio_tensor_1 = tf.ones((2,), dtype="float32") + audio_tensor_2 = tf.ones((25,), dtype="float32") + self.input_data = tf.ragged.stack( + [audio_tensor_1, audio_tensor_2], + axis=0, ) - def test_unbatched_inputs(self): - audio_tensor = tf.ones((2,), dtype="float32") + def test_feature_extractor_basics(self): + self.run_preprocessing_layer_test( + cls=WhisperAudioFeatureExtractor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - outputs = self.audio_feature_extractor(audio_tensor) + def test_correctness(self): + audio_tensor = tf.ones((2,), dtype="float32") + outputs = WhisperAudioFeatureExtractor(**self.init_kwargs)(audio_tensor) # Verify shape. - self.assertEqual(outputs.shape, (5, self.num_mels)) + self.assertEqual(outputs.shape, (5, 80)) # Verify output. expected = [1.1656, 1.0151, -0.8343, -0.8343, -0.8343] self.assertAllClose(outputs[:, 0], expected, atol=0.01, rtol=0.01) - - def test_batched_inputs(self): - audio_tensor_1 = tf.ones((2,), dtype="float32") - audio_tensor_2 = tf.ones((25,), dtype="float32") - audio_tensor = tf.ragged.stack([audio_tensor_1, audio_tensor_2], axis=0) - - outputs = self.audio_feature_extractor(audio_tensor) - - # Verify shape. - self.assertEqual(outputs.shape, (2, 5, self.num_mels)) - # Verify output. - expected_1 = [1.1656, 1.0151, -0.8343, -0.8343, -0.8343] - self.assertAllClose(outputs[0, :, 0], expected_1, atol=0.01, rtol=0.01) - expected_2 = [1.2299, 1.0970, 0.3997, -0.7700, -0.7700] - self.assertAllClose(outputs[1, :, 0], expected_2, atol=0.01, rtol=0.01) - - def test_serialization(self): - config = keras.saving.serialize_keras_object( - self.audio_feature_extractor - ) - new_audio_feature_extractor = keras.saving.deserialize_keras_object( - config - ) - self.assertEqual( - new_audio_feature_extractor.get_config(), - self.audio_feature_extractor.get_config(), - ) diff --git a/keras_nlp/models/whisper/whisper_backbone_test.py b/keras_nlp/models/whisper/whisper_backbone_test.py index 4f0ac0897c..41fc6df33c 100644 --- a/keras_nlp/models/whisper/whisper_backbone_test.py +++ b/keras_nlp/models/whisper/whisper_backbone_test.py @@ -12,13 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.whisper.whisper_backbone import WhisperBackbone from keras_nlp.tests.test_case import TestCase @@ -26,94 +22,95 @@ @pytest.mark.tf_only class WhisperBackboneTest(TestCase): def setUp(self): - self.backbone = WhisperBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_encoder_sequence_length=6, - max_decoder_sequence_length=6, - ) - self.input_batch = { - "encoder_features": np.ones((2, 5, 80), dtype="float32"), - "decoder_token_ids": np.ones((2, 5), dtype="int32"), - "decoder_padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_encoder_sequence_length": 6, + "max_decoder_sequence_length": 6, + } + self.input_data = { + "encoder_features": ops.ones((2, 5, 80), dtype="float32"), + "decoder_token_ids": ops.ones((2, 5), dtype="int32"), + "decoder_padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_whisper(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding( - self.input_batch["decoder_token_ids"] - ) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "whisper_backbone") - - def test_variable_sequence_length_call_whisper(self): - for seq_length in (2, 3, 4): - input_data = { - "encoder_features": np.ones( - (2, seq_length, 80), dtype="float32" - ), - "decoder_token_ids": np.ones((2, seq_length), dtype="int32"), - "decoder_padding_mask": np.ones((2, seq_length), dtype="int32"), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=WhisperBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape={ + "encoder_sequence_output": (2, 3, 2), + "decoder_sequence_output": (2, 5, 2), + }, ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) def test_key_projection_bias_absence(self): + backbone = WhisperBackbone(**self.init_kwargs) # Check only for the first encoder layer and first decoder layer. self.assertIsNone( - self.backbone.get_layer( + backbone.get_layer( "transformer_encoder_layer_0" )._self_attention_layer._key_dense.bias ) self.assertIsNone( - self.backbone.get_layer( + backbone.get_layer( "transformer_decoder_layer_0" )._self_attention_layer._key_dense.bias ) self.assertIsNone( - self.backbone.get_layer( + backbone.get_layer( "transformer_decoder_layer_0" )._cross_attention_layer._key_dense.bias ) - @pytest.mark.large # Saving is slow, so mark these large. + @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, WhisperBackbone) - - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose( - model_output["encoder_sequence_output"], - restored_output["encoder_sequence_output"], + self.run_model_saving_test( + cls=WhisperBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, ) - self.assertAllClose( - model_output["decoder_sequence_output"], - restored_output["decoder_sequence_output"], + + @pytest.mark.skip # TODO: fix weight mismatch error. + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=WhisperBackbone, + preset="whisper_tiny_en", + input_data={ + "encoder_features": ops.ones((1, 3000, 80)), + "decoder_token_ids": ops.array( + [[50257, 50362, 464, 2068, 7586, 21831, 13, 50256, 50256]] + ), + "decoder_padding_mask": ops.array( + [[1, 1, 1, 1, 1, 1, 1, 1, 0]] + ), + }, + expected_output_shape={ + "encoder_sequence_output": (1, 1500, 384), + "decoder_sequence_output": (1, 9, 384), + }, + # The forward pass from a preset should be stable! + expected_partial_output={ + "encoder_sequence_output": ops.array( + [-0.21382, -0.48528, 0.42348, -1.33874, -0.14191] + ), + "decoder_sequence_output": ops.array( + [13.238, 1.051, 8.348, -20.012, -5.022] + ), + }, ) + + @pytest.mark.skip # TODO: fix weight mismatch error. + @pytest.mark.extra_large + def test_all_presets(self): + for preset in WhisperBackbone.presets: + self.run_preset_test( + cls=WhisperBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/whisper/whisper_preprocessor_test.py b/keras_nlp/models/whisper/whisper_preprocessor_test.py index 9f8f54e494..6837dc8bfa 100644 --- a/keras_nlp/models/whisper/whisper_preprocessor_test.py +++ b/keras_nlp/models/whisper/whisper_preprocessor_test.py @@ -13,9 +13,7 @@ # limitations under the License. import numpy as np -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.whisper.whisper_audio_feature_extractor import ( WhisperAudioFeatureExtractor, ) @@ -26,40 +24,18 @@ class WhisperPreprocessorTest(TestCase): def setUp(self): - self.num_mels = 80 - self.num_fft_bins = 400 - self.stride = 100 - self.sampling_rate = 100 - self.max_audio_length = 5 - self.output_length = ( - self.max_audio_length * self.sampling_rate - ) // self.stride self.audio_feature_extractor = WhisperAudioFeatureExtractor( - num_mels=self.num_mels, - num_fft_bins=self.num_fft_bins, - stride=self.stride, - sampling_rate=self.sampling_rate, - max_audio_length=self.max_audio_length, - ) - - self.vocab = { - "Ġair": 0, - "plane": 1, - "Ġat": 2, - "port": 3, - "Ġkoh": 4, - "li": 5, - "Ġis": 6, - "Ġthe": 7, - "Ġbest": 8, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges - + num_mels=80, + num_fft_bins=400, + stride=100, + sampling_rate=100, + max_audio_length=5, + ) + self.vocab = ["air", "Ġair", "plane", "Ġat", "port"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] self.special_tokens = { "<|startoftranscript|>": 9, "<|endoftext|>": 10, @@ -67,103 +43,33 @@ def setUp(self): "<|transcribe|>": 12, "<|translate|>": 13, } - self.language_tokens = { "<|en|>": 14, "<|fr|>": 15, } - self.tokenizer = WhisperTokenizer( vocabulary=self.vocab, merges=self.merges, special_tokens=self.special_tokens, language_tokens=self.language_tokens, ) - - self.preprocessor = WhisperPreprocessor( - audio_feature_extractor=self.audio_feature_extractor, - tokenizer=self.tokenizer, - decoder_sequence_length=12, - language="<|en|>", - task="translate", - ) - - def test_unbatched_preprocess(self): - input_data = { - "encoder_audio": np.ones((200,)), - "decoder_text": " airplane at airport", + self.init_kwargs = { + "audio_feature_extractor": self.audio_feature_extractor, + "tokenizer": self.tokenizer, + "decoder_sequence_length": 12, + "language": "<|en|>", + "task": "translate", } - - x = self.preprocessor(input_data) - self.assertAllEqual( - x["encoder_features"].shape, [self.output_length, self.num_mels] - ) - self.assertAllEqual( - x["decoder_token_ids"], [9, 14, 13, 11, 0, 1, 2, 0, 3, 10, 10, 10] - ) - self.assertAllEqual( - x["decoder_padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0] - ) - - def test_preprocess_batch(self): - input_data = { - "encoder_audio": np.ones((4, 200)), - "decoder_text": [" airplane at airport"] * 4, - } - - x = self.preprocessor(input_data) - self.assertAllEqual( - x["encoder_features"].shape, [4, self.output_length, self.num_mels] - ) - self.assertAllEqual( - x["decoder_token_ids"], - [[9, 14, 13, 11, 0, 1, 2, 0, 3, 10, 10, 10]] * 4, - ) - self.assertAllEqual( - x["decoder_padding_mask"], - [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 4, - ) - - def test_preprocess_labeled_batch(self): - x = { - "encoder_audio": np.ones((4, 200)), - "decoder_text": [" airplane at airport"] * 4, + self.input_data = { + "encoder_audio": np.ones((2, 200)), + "decoder_text": [" airplane at airport", " airplane at"], } - y_in = np.ones((4,)) - sw_in = np.ones((4,)) - x, y, sw = self.preprocessor(x, y_in, sw_in) - self.assertAllEqual( - x["encoder_features"].shape, [4, self.output_length, self.num_mels] - ) - self.assertAllEqual( - x["decoder_token_ids"], - [[9, 14, 13, 11, 0, 1, 2, 0, 3, 10, 10, 10]] * 4, - ) - self.assertAllEqual( - x["decoder_padding_mask"], - [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 4, - ) - self.assertAllEqual(y, y_in) - self.assertAllEqual(sw, sw_in) - def test_preprocess_dataset(self): - x = { - "encoder_audio": np.ones((4, 200)), - "decoder_text": [" airplane at airport"] * 4, - } - ds = tf.data.Dataset.from_tensor_slices(x) - ds = ds.map(self.preprocessor) - x = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x["encoder_features"].shape, [4, self.output_length, self.num_mels] - ) - self.assertAllEqual( - x["decoder_token_ids"], - [[9, 14, 13, 11, 0, 1, 2, 0, 3, 10, 10, 10]] * 4, - ) - self.assertAllEqual( - x["decoder_padding_mask"], - [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 4, + def test_feature_extractor_basics(self): + self.run_preprocessing_layer_test( + cls=WhisperPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, ) def test_sequence_length_override(self): @@ -171,13 +77,6 @@ def test_sequence_length_override(self): "encoder_audio": np.ones((200,)), "decoder_text": " airplane at airport", } - x = self.preprocessor(input_data, decoder_sequence_length=6) - self.assertAllEqual(x["decoder_token_ids"], [9, 14, 13, 11, 0, 10]) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + preprocessor = WhisperPreprocessor(**self.init_kwargs) + x = preprocessor(input_data, decoder_sequence_length=6) + self.assertAllEqual(x["decoder_token_ids"], [9, 14, 13, 11, 1, 10]) diff --git a/keras_nlp/models/whisper/whisper_presets_test.py b/keras_nlp/models/whisper/whisper_presets_test.py deleted file mode 100644 index 09529c66f3..0000000000 --- a/keras_nlp/models/whisper/whisper_presets_test.py +++ /dev/null @@ -1,133 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -import tensorflow as tf -from absl.testing import parameterized - -from keras_nlp.models.whisper.whisper_audio_feature_extractor import ( - WhisperAudioFeatureExtractor, -) -from keras_nlp.models.whisper.whisper_backbone import WhisperBackbone -from keras_nlp.models.whisper.whisper_tokenizer import WhisperTokenizer - - -@pytest.mark.tf_only -@pytest.mark.large -class WhisperPresetSmokeTest(tf.test.TestCase, parameterized.TestCase): - """ - A smoke test for Whisper presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/whisper/whisper_presets_test.py --run_large` - """ - - def test_audio_feature_extractor_output(self): - audio_feature_extractor = WhisperAudioFeatureExtractor.from_preset( - "whisper_tiny_en" - ) - # Don't really need to check for output here. - audio_feature_extractor(tf.ones((200,))) - - def test_tokenizer_output(self): - tokenizer = WhisperTokenizer.from_preset("whisper_tiny_en") - outputs = tokenizer("The quick brown fox.") - expected_outputs = [464, 2068, 7586, 21831, 13] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - @pytest.mark.skip # TODO: fix weight mismatch error. - def test_backbone_output(self, load_weights): - input_data = { - "encoder_features": tf.ones((1, 3000, 80)), - "decoder_token_ids": tf.constant( - [[50257, 50362, 464, 2068, 7586, 21831, 13, 50256, 50256]] - ), - "decoder_padding_mask": tf.constant([[1, 1, 1, 1, 1, 1, 1, 1, 0]]), - } - model = WhisperBackbone.from_preset( - "whisper_tiny_en", load_weights=load_weights - ) - outputs = model(input_data)["decoder_sequence_output"][0, 0, :5] - if load_weights: - # The forward pass from a preset should be stable! - # This test should catch cases where we unintentionally change our - # network code in a way that would invalidate our preset weights. - # We should only update these numbers if we are updating a weights - # file, or have found a discrepancy with the upstream source. - expected_outputs = [13.238, 1.051, 8.348, -20.012, -5.022] - # Keep a high tolerance, so we are robust to different hardware. - self.assertAllClose(outputs, expected_outputs, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("whisper_tokenizer", WhisperTokenizer), - ("whisper", WhisperBackbone), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("whisper_tokenizer", WhisperTokenizer), - ("whisper", WhisperBackbone), - ) - def test_unknown_preset_error(self, cls): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("whisper_tiny_en_clowntown") - - -@pytest.mark.extra_large -class WhisperPresetFullTest(tf.test.TestCase, parameterized.TestCase): - """ - Test the full enumeration of our preset. - - This tests every Whisper preset and is only run manually. - Run with: - `pytest keras_nlp/models/whisper/whisper_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_whisper(self, load_weights): - for preset in WhisperBackbone.presets: - model = WhisperBackbone.from_preset( - preset, load_weights=load_weights - ) - input_data = { - "encoder_features": tf.ones((1, 3000, 80)), - "decoder_token_ids": tf.random.uniform( - shape=(1, 446), - dtype="int64", - maxval=model.vocabulary_size, - ), - "decoder_padding_mask": tf.constant([1] * 446, shape=(1, 446)), - } - model(input_data) - - def test_load_tokenizers(self): - for preset in WhisperTokenizer.presets: - tokenizer = WhisperTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_audio_feature_extractors(self): - for preset in WhisperAudioFeatureExtractor.presets: - audio_feature_extractor = WhisperAudioFeatureExtractor.from_preset( - preset - ) - audio_feature_extractor(tf.ones((200,))) diff --git a/keras_nlp/models/whisper/whisper_tokenizer_test.py b/keras_nlp/models/whisper/whisper_tokenizer_test.py index 5ebcb12e55..16fab2e34a 100644 --- a/keras_nlp/models/whisper/whisper_tokenizer_test.py +++ b/keras_nlp/models/whisper/whisper_tokenizer_test.py @@ -18,27 +18,14 @@ from keras_nlp.tests.test_case import TestCase -@pytest.mark.tf_only class WhisperTokenizerTest(TestCase): def setUp(self): - self.vocab = { - "Ġair": 0, - "plane": 1, - "Ġat": 2, - "port": 3, - "Ġkoh": 4, - "li": 5, - "Ġis": 6, - "Ġthe": 7, - "Ġbest": 8, - } - - merges = ["Ġ a", "Ġ t", "Ġ k", "Ġ i", "Ġ b", "Ġa i", "p l", "n e"] - merges += ["Ġa t", "p o", "r t", "o h", "l i", "Ġi s", "Ġb e", "s t"] - merges += ["Ġt h", "Ġai r", "pl a", "Ġk oh", "Ġth e", "Ġbe st", "po rt"] - merges += ["pla ne"] - self.merges = merges - + self.vocab = ["!", "air", "Ġair", "plane", "Ġat", "port"] + self.vocab += ["<|endoftext|>"] + self.vocab = dict([(token, i) for i, token in enumerate(self.vocab)]) + self.merges = ["Ġ a", "Ġ t", "Ġ i", "Ġ b", "a i", "p l", "n e"] + self.merges += ["Ġa t", "p o", "r t", "Ġt h", "ai r", "pl a", "po rt"] + self.merges += ["Ġai r", "Ġa i", "pla ne"] self.special_tokens = { "<|startoftranscript|>": 9, "<|endoftext|>": 10, @@ -46,56 +33,58 @@ def setUp(self): "<|transcribe|>": 12, "<|translate|>": 13, } - self.language_tokens = { "<|en|>": 14, "<|fr|>": 15, } + self.init_kwargs = { + "vocabulary": self.vocab, + "merges": self.merges, + "special_tokens": self.special_tokens, + "language_tokens": self.language_tokens, + } + self.input_data = [ + " airplane at airport<|endoftext|>", + " airplane airport", + ] - self.tokenizer = WhisperTokenizer( - vocabulary=self.vocab, - merges=self.merges, - special_tokens=self.special_tokens, - language_tokens=self.language_tokens, - ) - - def test_tokenize(self): - input_data = " airplane at airport" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [0, 1, 2, 0, 3]) - - def test_tokenize_batch(self): - input_data = [" airplane at airport", " kohli is the best"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[0, 1, 2, 0, 3], [4, 5, 6, 7, 8]]) - - def test_detokenize(self): - input_tokens = [0, 1, 2, 0, 3] - output = self.tokenizer.detokenize(input_tokens) - self.assertEqual(output, " airplane at airport") - - def test_detokenize_with_special_tokens(self): - input_tokens = [9, 14, 12, 11, 0, 1, 2, 0, 3, 10] - output = self.tokenizer.detokenize(input_tokens) - print(output) - self.assertEqual( - output, - "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> airplane at airport<|endoftext|>", + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=WhisperTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[2, 3, 4, 2, 5, 10], [2, 3, 2, 5]], ) - def test_vocabulary_size(self): - self.assertEqual(self.tokenizer.vocabulary_size(), 16) - def test_special_tokens(self): - self.assertEqual(self.tokenizer.bos_token_id, 9) - self.assertEqual(self.tokenizer.eos_token_id, 10) - self.assertEqual(self.tokenizer.pad_token_id, 10) - self.assertEqual(self.tokenizer.no_timestamps_token_id, 11) - self.assertEqual(self.tokenizer.translate_token_id, 13) - self.assertEqual(self.tokenizer.transcribe_token_id, 12) + tokenizer = WhisperTokenizer(**self.init_kwargs) + self.assertEqual(tokenizer.bos_token_id, 9) + self.assertEqual(tokenizer.eos_token_id, 10) + self.assertEqual(tokenizer.pad_token_id, 10) + self.assertEqual(tokenizer.no_timestamps_token_id, 11) + self.assertEqual(tokenizer.translate_token_id, 13) + self.assertEqual(tokenizer.transcribe_token_id, 12) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): WhisperTokenizer( vocabulary=["a", "b", "c"], merges=[], special_tokens={} ) + + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=WhisperTokenizer, + preset="whisper_tiny_en", + input_data=["The quick brown fox."], + expected_output=[[464, 2068, 7586, 21831, 13]], + ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in WhisperTokenizer.presets: + self.run_preset_test( + cls=WhisperTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py index 426cbe30e7..e92aaea0ef 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_backbone_test.py @@ -12,13 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - -import numpy as np import pytest -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone from keras_nlp.tests.test_case import TestCase @@ -26,65 +21,58 @@ class XLMRobertaBackboneTest(TestCase): def setUp(self): - self.backbone = XLMRobertaBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - max_sequence_length=5, - ) - self.input_batch = { - "token_ids": np.ones((2, 5), dtype="int32"), - "padding_mask": np.ones((2, 5), dtype="int32"), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_valid_call_xlm_roberta(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 5, 2)) - - def test_name(self): - # Check default name passed through - self.assertRegexpMatches(self.backbone.name, "xlm_roberta_backbone") - - def test_variable_sequence_length_call_xlm_roberta(self): - for seq_length in (2, 3, 4): - input_data = { - "token_ids": np.ones((2, seq_length), dtype="int32"), - "padding_mask": np.ones((2, seq_length), dtype="int32"), - } - output = self.backbone(input_data) - self.assertAllEqual( - ops.shape(output), - (2, seq_length, self.backbone.hidden_dim), - ) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=XLMRobertaBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) - @pytest.mark.large # Saving is slow, so mark these large. + @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) + self.run_model_saving_test( + cls=XLMRobertaBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - # Check we got the real object back. - self.assertIsInstance(restored_model, XLMRobertaBackbone) + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=XLMRobertaBackbone, + preset="xlm_roberta_base_multi", + input_data={ + "token_ids": ops.array([[0, 581, 63773, 2]], dtype="int32"), + "segment_ids": ops.zeros((1, 4), dtype="int32"), + "padding_mask": ops.ones((1, 4), dtype="int32"), + }, + expected_output_shape=(1, 4, 768), + # The forward pass from a preset should be stable! + expected_partial_output=ops.array( + [0.084763, 0.097018, 0.051329, -0.000805, 0.028415], + ), + ) - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in XLMRobertaBackbone.presets: + self.run_preset_test( + cls=XLMRobertaBackbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py index f96e40b5b4..c123cc6bc0 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py @@ -13,15 +13,10 @@ # limitations under the License. import io -import os -import numpy as np import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras -from keras_nlp.backend import ops from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone from keras_nlp.models.xlm_roberta.xlm_roberta_classifier import ( XLMRobertaClassifier, @@ -37,12 +32,11 @@ class XLMRobertaClassifierTest(TestCase): def setUp(self): + # Setup model. + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, vocab_size=10, model_type="WORD", @@ -51,94 +45,51 @@ def setUp(self): eos_id=2, ) self.preprocessor = XLMRobertaPreprocessor( - tokenizer=XLMRobertaTokenizer(proto=bytes_io.getvalue()), + XLMRobertaTokenizer(proto=bytes_io.getvalue()), sequence_length=5, ) self.backbone = XLMRobertaBackbone( - vocabulary_size=10, + vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), num_layers=2, num_heads=2, hidden_dim=2, intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - self.classifier = XLMRobertaClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - # Check we handle serialization correctly. - activation=keras.activations.softmax, - hidden_dim=4, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + "num_classes": 2, + } + self.train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. + [1, 0], # Labels. ) + self.input_data = self.preprocessor(*self.train_data)[0] - self.raw_batch = [ - "the quick brown fox.", - "the slow brown fox.", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch) - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - (self.raw_batch, np.ones((2,))) - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_classifier(self): - self.classifier(self.preprocessed_batch) - - def test_classifier_predict(self): - preds1 = self.classifier.predict(self.raw_batch) - self.classifier.preprocessor = None - preds2 = self.classifier.predict(self.preprocessed_batch) - # Assert predictions match. - self.assertAllClose(preds1, preds2) - # Assert valid softmax output. - self.assertAllClose(ops.sum(preds2, axis=-1), [1.0, 1.0]) - - def test_classifier_fit(self): - self.classifier.fit(self.raw_dataset) - self.classifier.preprocessor = None - self.classifier.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.classifier.preprocessor = None - self.classifier.compile( - loss="sparse_categorical_crossentropy", - jit_compile=False, + def test_classifier_basics(self): + self.run_task_test( + cls=XLMRobertaClassifier, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 2), ) - self.classifier.fit(self.preprocessed_dataset) - def test_serialization(self): - # Defaults. - original = XLMRobertaClassifier( - self.backbone, - num_classes=2, - ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - # With options. - original = XLMRobertaClassifier( - self.backbone, - num_classes=4, - preprocessor=self.preprocessor, - activation=keras.activations.softmax, - hidden_dim=4, - name="test", - trainable=False, + @pytest.mark.large + def test_saved_model(self): + self.run_model_saving_test( + cls=XLMRobertaClassifier, + init_kwargs=self.init_kwargs, + input_data=self.input_data, ) - config = keras.saving.serialize_keras_object(original) - restored = keras.saving.deserialize_keras_object(config) - self.assertEqual(restored.get_config(), original.get_config()) - - @pytest.mark.large # Saving is slow, so mark these large. - def test_saving_model(self): - model_output = self.classifier.predict(self.raw_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.classifier.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, XLMRobertaClassifier) - # Check that output matches. - restored_output = restored_model.predict(self.raw_batch) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in XLMRobertaClassifier.presets: + self.run_preset_test( + cls=XLMRobertaClassifier, + preset=preset, + init_kwargs={"num_classes": 2}, + input_data=self.input_data, + expected_output_shape=(2, 2), + ) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py index cbb74c7722..6dd0bc0f71 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.xlm_roberta.xlm_roberta_masked_lm_preprocessor import ( XLMRobertaMaskedLMPreprocessor, ) @@ -29,120 +28,72 @@ class XLMRobertaMaskedLMPreprocessorTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=12, + vocab_size=11, model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="", - eos_piece="", + unk_id=0, + bos_id=1, + eos_id=2, user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.tokenizer = XLMRobertaTokenizer(proto=self.proto) - self.preprocessor = XLMRobertaMaskedLMPreprocessor( - tokenizer=self.tokenizer, + self.tokenizer = XLMRobertaTokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. - mask_selection_rate=1.0, - mask_token_rate=1.0, - random_token_rate=0.0, - mask_selection_length=5, - sequence_length=12, - ) - - def test_preprocess_strings(self): - input_data = " brown fox quick" - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [0, 13, 13, 13, 2, 1, 1, 1, 1, 1, 1, 1] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [1, 2, 3, 0, 0]) - self.assertAllEqual(y, [7, 9, 11, 0, 0]) - self.assertAllEqual(sw, [1.0, 1.0, 1.0, 0.0, 0.0]) - - def test_preprocess_list_of_strings(self): - input_data = [" brown fox quick"] * 13 - - x, y, sw = self.preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [[0, 13, 13, 13, 2, 1, 1, 1, 1, 1, 1, 1]] * 13 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]] * 13 + "mask_selection_rate": 1.0, + "mask_token_rate": 1.0, + "random_token_rate": 0.0, + "mask_selection_length": 4, + "sequence_length": 12, + } + self.input_data = ["the quick brown fox"] + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=XLMRobertaMaskedLMPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[0, 12, 12, 12, 12, 2, 1, 1, 1, 1, 1, 1]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[1, 2, 3, 4]], + }, + [[5, 10, 6, 8]], + [[1.0, 1.0, 1.0, 1.0]], + ), ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 0, 0]] * 13) - self.assertAllEqual(y, [[7, 9, 11, 0, 0]] * 13) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 0.0, 0.0]] * 13) - - def test_preprocess_dataset(self): - sentences = tf.constant([" brown fox quick"] * 13) - ds = tf.data.Dataset.from_tensor_slices(sentences) - ds = ds.map(self.preprocessor) - x, y, sw = ds.batch(13).take(1).get_single_element() - self.assertAllEqual( - x["token_ids"], [[0, 13, 13, 13, 2, 1, 1, 1, 1, 1, 1, 1]] * 13 - ) - self.assertAllEqual( - x["padding_mask"], [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0]] * 13 - ) - self.assertAllEqual(x["mask_positions"], [[1, 2, 3, 0, 0]] * 13) - self.assertAllEqual(y, [[7, 9, 11, 0, 0]] * 13) - self.assertAllEqual(sw, [[1.0, 1.0, 1.0, 0.0, 0.0]] * 13) - - def test_mask_multiple_sentences(self): - sentence_one = tf.constant(" airplane") - sentence_two = tf.constant(" round") - - x, y, sw = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - x["token_ids"], [0, 2, 2, 2, 13, 2, 1, 1, 1, 1, 1, 1] - ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [4, 0, 0, 0, 0]) - self.assertAllEqual(y, [12, 0, 0, 0, 0]) - self.assertAllEqual(sw, [1.0, 0.0, 0.0, 0.0, 0.0]) def test_no_masking_zero_rate(self): no_mask_preprocessor = XLMRobertaMaskedLMPreprocessor( - self.preprocessor.tokenizer, + self.tokenizer, mask_selection_rate=0.0, - mask_selection_length=5, + mask_selection_length=4, sequence_length=12, ) - input_data = " quick brown fox" - - x, y, sw = no_mask_preprocessor(input_data) - self.assertAllEqual( - x["token_ids"], [0, 11, 7, 9, 2, 1, 1, 1, 1, 1, 1, 1] + input_data = ["the quick brown fox"] + self.assertAllClose( + no_mask_preprocessor(input_data), + ( + { + "token_ids": [[0, 5, 10, 6, 8, 2, 1, 1, 1, 1, 1, 1]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], + "mask_positions": [[0, 0, 0, 0]], + }, + [[0, 0, 0, 0]], + [[0.0, 0.0, 0.0, 0.0]], + ), ) - self.assertAllEqual( - x["padding_mask"], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0] - ) - self.assertAllEqual(x["mask_positions"], [0, 0, 0, 0, 0]) - self.assertAllEqual(y, [0, 0, 0, 0, 0]) - self.assertAllEqual(sw, [0.0, 0.0, 0.0, 0.0, 0.0]) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in XLMRobertaMaskedLMPreprocessor.presets: + self.run_preset_test( + cls=XLMRobertaMaskedLMPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py index 94c06ea7be..81fafbe4dc 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py @@ -1,4 +1,4 @@ -# Copyright 2022 The KerasNLP Authors +# Copyright 2023 The KerasNLP Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,13 +13,10 @@ # limitations under the License. import io -import os import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone from keras_nlp.models.xlm_roberta.xlm_roberta_masked_lm import ( XLMRobertaMaskedLM, @@ -35,29 +32,21 @@ class XLMRobertaMaskedLMTest(TestCase): def setUp(self): + # Setup model. + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the slow brown fox"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=5, + vocab_size=11, model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="", - eos_piece="", + unk_id=0, + bos_id=1, + eos_id=2, user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - self.preprocessor = XLMRobertaMaskedLMPreprocessor( - XLMRobertaTokenizer(proto=self.proto), + XLMRobertaTokenizer(proto=bytes_io.getvalue()), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, @@ -65,7 +54,6 @@ def setUp(self): mask_selection_length=5, sequence_length=5, ) - self.backbone = XLMRobertaBackbone( vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), num_layers=2, @@ -74,61 +62,36 @@ def setUp(self): intermediate_dim=4, max_sequence_length=self.preprocessor.packer.sequence_length, ) - - self.masked_lm = XLMRobertaMaskedLM( - self.backbone, - preprocessor=self.preprocessor, - ) - - self.raw_batch = [ - "the quick brown fox", - "the slow brown fox", - ] - self.preprocessed_batch = self.preprocessor(self.raw_batch)[0] - self.raw_dataset = tf.data.Dataset.from_tensor_slices( - self.raw_batch - ).batch(2) - self.preprocessed_dataset = self.raw_dataset.map(self.preprocessor) - - def test_valid_call_masked_lm(self): - self.masked_lm(self.preprocessed_batch) - - def test_classifier_predict(self): - self.masked_lm.predict(self.raw_batch) - self.masked_lm.preprocessor = None - self.masked_lm.predict(self.preprocessed_batch) - - def test_classifier_fit(self): - self.masked_lm.fit(self.raw_dataset) - self.masked_lm.preprocessor = None - self.masked_lm.fit(self.preprocessed_dataset) - - def test_classifier_fit_no_xla(self): - self.masked_lm.preprocessor = None - self.masked_lm.compile( - loss=keras.losses.SparseCategoricalCrossentropy(from_logits=False), - jit_compile=False, + self.init_kwargs = { + "preprocessor": self.preprocessor, + "backbone": self.backbone, + } + self.train_data = ( + ["the quick brown fox.", "the earth is round"], # Features. ) - self.masked_lm.fit(self.preprocessed_dataset) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.masked_lm) - new_classifier = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_classifier.get_config(), - self.masked_lm.get_config(), + self.input_data = self.preprocessor(*self.train_data)[0] + + def test_masked_lm_basics(self): + self.run_task_test( + cls=XLMRobertaMaskedLM, + init_kwargs=self.init_kwargs, + train_data=self.train_data, + expected_output_shape=(2, 5, 13), ) @pytest.mark.large def test_saved_model(self): - save_path = os.path.join(self.get_temp_dir(), "model.keras") - self.masked_lm.save(save_path, save_format="keras_v3") - restored_model = keras.models.load_model(save_path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, XLMRobertaMaskedLM) - - model_output = self.masked_lm(self.preprocessed_batch) - restored_output = restored_model(self.preprocessed_batch) + self.run_model_saving_test( + cls=XLMRobertaMaskedLM, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) - self.assertAllClose(model_output, restored_output) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in XLMRobertaMaskedLM.presets: + self.run_preset_test( + cls=XLMRobertaMaskedLM, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py index 711b52e264..38eb4882f3 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.xlm_roberta.xlm_roberta_preprocessor import ( XLMRobertaPreprocessor, ) @@ -29,110 +28,60 @@ class XLMRobertaPreprocessorTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=10, + vocab_size=12, model_type="WORD", - unk_id=0, - bos_id=1, - eos_id=2, - ) - self.proto = bytes_io.getvalue() - - self.preprocessor = XLMRobertaPreprocessor( - tokenizer=XLMRobertaTokenizer(proto=self.proto), - sequence_length=12, - ) - - def test_tokenize_strings(self): - input_data = "the quick brown fox" - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], [0, 4, 9, 5, 7, 2, 1, 1, 1, 1, 1, 1] - ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0] - ) - - def test_tokenize_list_of_strings(self): - # We should handle a list of strings as as batch. - input_data = ["the quick brown fox"] * 4 - output = self.preprocessor(input_data) - self.assertAllEqual( - output["token_ids"], [[0, 4, 9, 5, 7, 2, 1, 1, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - - def test_tokenize_labeled_batch(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - x_out, y_out, sw_out = self.preprocessor(x, y, sw) - self.assertAllEqual( - x_out["token_ids"], [[0, 4, 9, 5, 7, 2, 1, 1, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 - ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - - def test_tokenize_labeled_dataset(self): - x = tf.constant(["the quick brown fox"] * 4) - y = tf.constant([1] * 4) - sw = tf.constant([1.0] * 4) - ds = tf.data.Dataset.from_tensor_slices((x, y, sw)) - ds = ds.map(self.preprocessor) - x_out, y_out, sw_out = ds.batch(4).take(1).get_single_element() - self.assertAllEqual( - x_out["token_ids"], [[0, 4, 9, 5, 7, 2, 1, 1, 1, 1, 1, 1]] * 4 - ) - self.assertAllEqual( - x_out["padding_mask"], [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]] * 4 + pad_id=0, + unk_id=1, + bos_id=2, + eos_id=3, + pad_piece="", + unk_piece="", + bos_piece="[CLS]", + eos_piece="[SEP]", + user_defined_symbols="[MASK]", + ) + self.tokenizer = XLMRobertaTokenizer(proto=bytes_io.getvalue()) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + ["the quick brown fox"], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. ) - self.assertAllEqual(y_out, y) - self.assertAllEqual(sw_out, sw) - def test_tokenize_multiple_sentences(self): - sentence_one = tf.constant("the quick brown fox") - sentence_two = tf.constant("the earth") - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], [0, 4, 9, 5, 7, 2, 2, 4, 6, 2, 1, 1] - ) - self.assertAllEqual( - output["padding_mask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0] - ) - - def test_tokenize_multiple_batched_sentences(self): - sentence_one = tf.constant(["the quick brown fox"] * 4) - sentence_two = tf.constant(["the earth"] * 4) - # The first tuple or list is always interpreted as an enumeration of - # separate sequences to concatenate. - output = self.preprocessor((sentence_one, sentence_two)) - self.assertAllEqual( - output["token_ids"], [[0, 4, 9, 5, 7, 2, 2, 4, 6, 2, 1, 1]] * 4 - ) - self.assertAllEqual( - output["padding_mask"], [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]] * 4 + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=XLMRobertaPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[0, 6, 11, 7, 9, 2, 1, 1]], + "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), ) def test_errors_for_2d_list_input(self): + preprocessor = XLMRobertaPreprocessor(**self.init_kwargs) ambiguous_input = [["one", "two"], ["three", "four"]] with self.assertRaises(ValueError): - self.preprocessor(ambiguous_input) + preprocessor(ambiguous_input) - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.preprocessor) - new_preprocessor = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_preprocessor.get_config(), - self.preprocessor.get_config(), - ) + @pytest.mark.extra_large + def test_all_presets(self): + for preset in XLMRobertaPreprocessor.presets: + self.run_preset_test( + cls=XLMRobertaPreprocessor, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py deleted file mode 100644 index a1824c86c5..0000000000 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py +++ /dev/null @@ -1,198 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest -from absl.testing import parameterized - -from keras_nlp.backend import ops -from keras_nlp.backend import random -from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone -from keras_nlp.models.xlm_roberta.xlm_roberta_classifier import ( - XLMRobertaClassifier, -) -from keras_nlp.models.xlm_roberta.xlm_roberta_preprocessor import ( - XLMRobertaPreprocessor, -) -from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( - XLMRobertaTokenizer, -) -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.large -@pytest.mark.tf_only # TODO: jax OOM. -class XLMRobertaPresetSmokeTest(TestCase): - """ - A smoke test for XLM-RoBERTa presets we run continuously. - - This only tests the smallest weights we have available. Run with: - `pytest keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py --run_large` - """ - - def test_tokenizer_output(self): - tokenizer = XLMRobertaTokenizer.from_preset( - "xlm_roberta_base_multi", - ) - outputs = tokenizer("The quick brown fox.") - expected_outputs = [581, 63773, 119455, 6, 147797, 5] - self.assertAllEqual(outputs, expected_outputs) - - def test_preprocessor_output(self): - preprocessor = XLMRobertaPreprocessor.from_preset( - "xlm_roberta_base_multi", - sequence_length=4, - ) - outputs = preprocessor("The quick brown fox.")["token_ids"] - expected_outputs = [0, 581, 63773, 2] - self.assertAllEqual(outputs, expected_outputs) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_backbone_output(self, load_weights): - input_data = { - "token_ids": ops.array([[0, 581, 63773, 2]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = XLMRobertaBackbone.from_preset( - "xlm_roberta_base_multi", load_weights=load_weights - ) - outputs = model(input_data) - if load_weights: - outputs = outputs[0, 0, :5] - expected = [0.084763, 0.097018, 0.051329, -0.000805, 0.028415] - self.assertAllClose(outputs, expected, atol=0.01, rtol=0.01) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_classifier_output(self, load_weights): - input_data = ["The quick brown fox."] - model = XLMRobertaClassifier.from_preset( - "xlm_roberta_base_multi", num_classes=2, load_weights=load_weights - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_classifier_output_without_preprocessing(self, load_weights): - input_data = { - "token_ids": ops.array([[0, 581, 63773, 2]]), - "padding_mask": ops.array([[1, 1, 1, 1]]), - } - model = XLMRobertaClassifier.from_preset( - "xlm_roberta_base_multi", - num_classes=2, - load_weights=load_weights, - preprocessor=None, - ) - # Never assert output values, as the head weights are random. - model.predict(input_data) - - @parameterized.named_parameters( - ("xlm_roberta_tokenizer", XLMRobertaTokenizer), - ("xlm_roberta_preprocessor", XLMRobertaPreprocessor), - ("xlm_roberta", XLMRobertaBackbone), - ("xlm_roberta_classifier", XLMRobertaClassifier), - ) - def test_preset_docstring(self, cls): - """Check we did our docstring formatting correctly.""" - for name in cls.presets: - self.assertRegex(cls.from_preset.__doc__, name) - - @parameterized.named_parameters( - ("xlm_roberta_tokenizer", XLMRobertaTokenizer, {}), - ("xlm_roberta_preprocessor", XLMRobertaPreprocessor, {}), - ("xlm_roberta", XLMRobertaBackbone, {}), - ("xlm_roberta_classifier", XLMRobertaClassifier, {"num_classes": 2}), - ) - def test_unknown_preset_error(self, cls, kwargs): - # Not a preset name - with self.assertRaises(ValueError): - cls.from_preset("xlm_roberta_base_clowntown", **kwargs) - - -@pytest.mark.extra_large -class XLMRobertaPresetFullTest(TestCase): - """ - Test the full enumeration of our preset. - - This tests every XLM-RoBERTa preset and is only run manually. - Run with: - `pytest keras_nlp/models/xlm_roberta/xlm_roberta_presets_test.py --run_extra_large` - """ - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_xlm_roberta(self, load_weights): - for preset in XLMRobertaBackbone.presets: - model = XLMRobertaBackbone.from_preset( - preset, load_weights=load_weights - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), dtype="int64", maxval=model.vocabulary_size - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - model(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_xlm_roberta_classifier(self, load_weights): - for preset in XLMRobertaClassifier.presets: - classifier = XLMRobertaClassifier.from_preset( - preset, - num_classes=4, - load_weights=load_weights, - ) - input_data = ["The quick brown fox."] - classifier.predict(input_data) - - @parameterized.named_parameters( - ("preset_weights", True), ("random_weights", False) - ) - def test_load_xlm_roberta_classifier_without_preprocessing( - self, load_weights - ): - for preset in XLMRobertaClassifier.presets: - classifier = XLMRobertaClassifier.from_preset( - preset, - num_classes=4, - load_weights=load_weights, - preprocessor=None, - ) - input_data = { - "token_ids": random.uniform( - shape=(1, 512), - dtype="int64", - maxval=classifier.backbone.vocabulary_size, - ), - "padding_mask": ops.array([1] * 512, shape=(1, 512)), - } - classifier.predict(input_data) - - def test_load_tokenizers(self): - for preset in XLMRobertaTokenizer.presets: - tokenizer = XLMRobertaTokenizer.from_preset(preset) - tokenizer("The quick brown fox.") - - def test_load_preprocessors(self): - for preset in XLMRobertaPreprocessor.presets: - preprocessor = XLMRobertaPreprocessor.from_preset(preset) - preprocessor("The quick brown fox.") diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py index 7800e3913c..a58ee4c74b 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py @@ -14,10 +14,9 @@ import io +import pytest import sentencepiece -import tensorflow as tf -from keras_nlp.backend import keras from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, ) @@ -26,88 +25,43 @@ class XLMRobertaTokenizerTest(TestCase): def setUp(self): + vocab_data = ["the quick brown fox", "the earth is round"] bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), + sentence_iterator=iter(vocab_data), model_writer=bytes_io, - vocab_size=10, + vocab_size=11, model_type="WORD", unk_id=0, bos_id=1, eos_id=2, + user_defined_symbols="[MASK]", ) - self.proto = bytes_io.getvalue() - - self.tokenizer = XLMRobertaTokenizer(proto=self.proto) - - def test_tokenize(self): - input_data = "the quick brown fox" - output = self.tokenizer(input_data) - self.assertAllEqual(output, [4, 9, 5, 7]) - - def test_tokenize_batch(self): - input_data = ["the quick brown fox", "the earth is round"] - output = self.tokenizer(input_data) - self.assertAllEqual(output, [[4, 9, 5, 7], [4, 6, 8, 10]]) - - def test_unk_token(self): - input_data = "the quick brown fox running" - - output = self.tokenizer(input_data) - self.assertAllEqual(output, [4, 9, 5, 7, 3]) - - def test_detokenize(self): - input_data = [[4, 9, 5, 7]] - output = self.tokenizer.detokenize(input_data) - self.assertEqual(output, ["brown round earth is"]) - - def test_vocabulary(self): - vocabulary = self.tokenizer.get_vocabulary() - self.assertAllEqual( - vocabulary, - [ - "", - "", - "", - "", - "▁the", - "▁brown", - "▁earth", - "▁fox", - "▁is", - "▁quick", - "▁round", - "", - ], + self.init_kwargs = {"proto": bytes_io.getvalue()} + self.input_data = ["the quick brown fox.", "the earth is round."] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=XLMRobertaTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[5, 10, 6, 3], [5, 7, 9, 3]], ) - self.assertEqual(self.tokenizer.vocabulary_size(), 12) - - def test_id_to_token(self): - print(self.tokenizer.id_to_token(9)) - self.assertEqual(self.tokenizer.id_to_token(9), "▁quick") - self.assertEqual(self.tokenizer.id_to_token(5), "▁brown") - def test_error_id_out_of_vocabulary(self): - with self.assertRaises(ValueError): - self.tokenizer.id_to_token(self.tokenizer.vocabulary_size()) - with self.assertRaises(ValueError): - self.tokenizer.id_to_token(-1) - - def test_token_to_id(self): - self.assertEqual(self.tokenizer.token_to_id("▁the"), 4) - self.assertEqual(self.tokenizer.token_to_id("▁round"), 10) - # Test any random OOV token. - self.assertEqual(self.tokenizer.token_to_id(""), 3) - # Test a special token. - self.assertEqual(self.tokenizer.token_to_id(""), 1) - - def test_serialization(self): - config = keras.saving.serialize_keras_object(self.tokenizer) - new_tokenizer = keras.saving.deserialize_keras_object(config) - self.assertEqual( - new_tokenizer.get_config(), - self.tokenizer.get_config(), + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=XLMRobertaTokenizer, + preset="xlm_roberta_base_multi", + input_data=["The quick brown fox."], + expected_output=[[581, 63773, 119455, 6, 147797, 5]], ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in XLMRobertaTokenizer.presets: + self.run_preset_test( + cls=XLMRobertaTokenizer, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/xlnet/xlnet_backbone_test.py b/keras_nlp/models/xlnet/xlnet_backbone_test.py index 3ce0e62c89..f2faf4cdd9 100644 --- a/keras_nlp/models/xlnet/xlnet_backbone_test.py +++ b/keras_nlp/models/xlnet/xlnet_backbone_test.py @@ -12,71 +12,40 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +import pytest -import numpy as np -import tensorflow as tf - -from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.models.xlnet.xlnet_backbone import XLNetBackbone from keras_nlp.tests.test_case import TestCase class XLNetTest(TestCase): def setUp(self): - self.backbone = XLNetBackbone( - vocabulary_size=10, - num_layers=2, - num_heads=2, - hidden_dim=2, - intermediate_dim=4, - name="xlnet_backbone", - ) - - self.input_batch = { - "token_ids": np.ones((2, 7), dtype=np.int32), - "segment_ids": np.ones((2, 7), dtype=np.int32), - "padding_mask": np.ones((2, 7), dtype=np.int32), + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "intermediate_dim": 4, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), } - self.input_dataset = tf.data.Dataset.from_tensor_slices( - self.input_batch - ).batch(2) - - def test_call(self): - self.backbone(self.input_batch) - - def test_token_embedding(self): - output = self.backbone.token_embedding(self.input_batch["token_ids"]) - self.assertEqual(output.shape, (2, 7, 2)) - - def test_variable_sequence_length(self): - for seq_length in (20, 30, 40): - input_data = { - "token_ids": np.ones((2, seq_length), dtype=np.int32), - "padding_mask": np.ones((2, seq_length), dtype=np.int32), - "segment_ids": np.ones((2, seq_length), dtype=np.int32), - } - self.backbone(input_data) - - def test_predict(self): - self.backbone.predict(self.input_batch) - self.backbone.predict(self.input_dataset) - def test_serialization(self): - new_backbone = keras.saving.deserialize_keras_object( - keras.saving.serialize_keras_object(self.backbone) + def test_backbone_basics(self): + self.run_backbone_test( + cls=XLNetBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 2), ) - self.assertEqual(new_backbone.get_config(), self.backbone.get_config()) + @pytest.mark.large def test_saved_model(self): - model_output = self.backbone(self.input_batch) - path = os.path.join(self.get_temp_dir(), "model.keras") - self.backbone.save(path, save_format="keras_v3") - restored_model = keras.models.load_model(path) - - # Check we got the real object back. - self.assertIsInstance(restored_model, XLNetBackbone) - - # Check that output matches. - restored_output = restored_model(self.input_batch) - self.assertAllClose(model_output, restored_output) + self.run_model_saving_test( + cls=XLNetBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py index 976f4eb37c..2025f3ad13 100644 --- a/keras_nlp/tests/test_case.py +++ b/keras_nlp/tests/test_case.py @@ -13,6 +13,8 @@ # limitations under the License. import json +import os +import re import tensorflow as tf import tree @@ -74,7 +76,7 @@ def assertDTypeEqual(self, x, expected_dtype, msg=None): def run_layer_test( self, - layer_cls, + cls, init_kwargs, input_data, expected_output_shape, @@ -85,9 +87,10 @@ def run_layer_test( run_training_check=True, run_mixed_precision_check=True, ): + """Run basic tests for a modeling layer.""" # Serialization test. - layer = layer_cls(**init_kwargs) - self.run_class_serialization_test(layer) + layer = cls(**init_kwargs) + self.run_serialization_test(layer) def run_build_asserts(layer): self.assertTrue(layer.built) @@ -143,7 +146,7 @@ def call(self, x): if config.multi_backend(): # Build test. - layer = layer_cls(**init_kwargs) + layer = cls(**init_kwargs) if isinstance(input_data, dict): shapes = {k + "_shape": v.shape for k, v in input_data.items()} layer.build(**shapes) @@ -155,7 +158,7 @@ def call(self, x): keras_tensor_inputs = tree.map_structure( lambda x: keras.KerasTensor(x.shape, x.dtype), input_data ) - layer = layer_cls(**init_kwargs) + layer = cls(**init_kwargs) if isinstance(keras_tensor_inputs, dict): keras_tensor_outputs = layer(**keras_tensor_inputs) else: @@ -164,7 +167,7 @@ def call(self, x): run_output_asserts(layer, keras_tensor_outputs) # Eager call test and compiled training test. - layer = layer_cls(**init_kwargs) + layer = cls(**init_kwargs) if isinstance(input_data, dict): output_data = layer(**input_data) else: @@ -181,7 +184,7 @@ def call(self, x): run_mixed_precision_check = torch.cuda.is_available() if run_mixed_precision_check: - layer = layer_cls(**{**init_kwargs, "dtype": "mixed_float16"}) + layer = cls(**{**init_kwargs, "dtype": "mixed_float16"}) if isinstance(input_data, dict): output_data = layer(**input_data) else: @@ -193,7 +196,43 @@ def call(self, x): if is_float_dtype(weight.dtype): self.assertDTypeEqual(weight, "float32") - def run_class_serialization_test(self, instance): + def run_preprocessing_layer_test( + self, + cls, + init_kwargs, + input_data, + expected_output=None, + batch_size=2, + ): + """Run basic tests for a preprocessing layer.""" + layer = cls(**init_kwargs) + # Check serialization (without a full save). + self.run_serialization_test(layer) + + ds = tf.data.Dataset.from_tensor_slices(input_data) + + # Run with direct call. + if isinstance(input_data, tuple): + # Mimic tf.data unpacking behavior for preprocessing layers. + output = layer(*input_data) + else: + output = layer(input_data) + + # Run with an unbatched dataset. + output_ds = ds.map(layer).ragged_batch(1_000) + self.assertAllClose(output, output_ds.get_single_element()) + + # Run with a batched dataset. + output_ds = ds.batch(1_000).map(layer) + self.assertAllClose(output, output_ds.get_single_element()) + + if expected_output: + self.assertAllClose(output, expected_output) + + def run_serialization_test(self, instance): + """Check idempotency of serialize/deserialize. + + Not this is a much faster test than saving.""" # get_config roundtrip cls = instance.__class__ cfg = instance.get_config() @@ -223,3 +262,158 @@ def run_class_serialization_test(self, instance): if "__annotations__" in lst: lst.remove("__annotations__") self.assertEqual(ref_dir, new_dir) + + def run_model_saving_test( + self, + cls, + init_kwargs, + input_data, + ): + """Save and load a model from disk and assert output is unchanged.""" + model = cls(**init_kwargs) + model_output = model(input_data) + path = os.path.join(self.get_temp_dir(), "model.keras") + model.save(path, save_format="keras_v3") + restored_model = keras.models.load_model(path) + + # Check we got the real object back. + self.assertIsInstance(restored_model, cls) + + # Check that output matches. + restored_output = restored_model(input_data) + self.assertAllClose(model_output, restored_output) + + def run_backbone_test( + self, + cls, + init_kwargs, + input_data, + expected_output_shape, + variable_length_data=None, + ): + """Run basic tests for a backbone, including compilation.""" + backbone = cls(**init_kwargs) + # Check serialization (without a full save). + self.run_serialization_test(backbone) + + # Call model eagerly. + output = backbone(input_data) + if isinstance(expected_output_shape, dict): + for key in expected_output_shape: + self.assertEqual(output[key].shape, expected_output_shape[key]) + else: + self.assertEqual(output.shape, expected_output_shape) + + # Check we can embed tokens eagerly. + output = backbone.token_embedding(ops.zeros((2, 3), dtype="int32")) + + # Check variable length sequences. + if variable_length_data is None: + # If no variable length data passed, assume the second axis of all + # inputs is our sequence axis and create it ourselves. + variable_length_data = [ + tree.map_structure(lambda x: x[:, :seq_length, ...], input_data) + for seq_length in (2, 3, 4) + ] + for batch in variable_length_data: + backbone(batch) + + # Check compiled predict function. + backbone.predict(input_data) + # Convert to numpy first, torch GPU tensor -> tf.data will error. + numpy_data = tree.map_structure(ops.convert_to_numpy, input_data) + # Create a dataset. + input_dataset = tf.data.Dataset.from_tensor_slices(numpy_data).batch(2) + backbone.predict(input_dataset) + + # Check name maps to classname. + name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", cls.__name__) + name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower() + self.assertRegexpMatches(backbone.name, name) + + def run_task_test( + self, + cls, + init_kwargs, + train_data, + expected_output_shape=None, + batch_size=2, + ): + """Run basic tests for a backbone, including compilation.""" + task = cls(**init_kwargs) + # Check serialization (without a full save). + self.run_serialization_test(task) + preprocessor = task.preprocessor + ds = tf.data.Dataset.from_tensor_slices(train_data).batch(batch_size) + x, y, sw = keras.utils.unpack_x_y_sample_weight(train_data) + + # Test predict. + output = task.predict(x) + if expected_output_shape is not None: + output_shape = tree.map_structure(lambda x: x.shape, output) + self.assertAllClose(output_shape, expected_output_shape) + # With a dataset. + output_ds = task.predict(ds) + self.assertAllClose(output, output_ds) + # With split preprocessing. + task.preprocessor = None + output_split = task.predict(ds.map(preprocessor)) + task.preprocessor = preprocessor + self.assertAllClose(output, output_split) + + # Test fit. + task.fit(x, y, sample_weight=sw) + # With a dataset. + task.fit(ds) + # With split preprocessing. + task.preprocessor = None + task.fit(ds.map(preprocessor)) + task.preprocessor = preprocessor + + def run_preset_test( + self, + cls, + preset, + input_data, + init_kwargs={}, + expected_output=None, + expected_output_shape=None, + expected_partial_output=None, + ): + """Run instantiation and a forward pass for a preset.""" + self.assertRegex(cls.from_preset.__doc__, preset) + + with self.assertRaises(ValueError): + cls.from_preset("clowntown", **init_kwargs) + + instance = cls.from_preset(preset, **init_kwargs) + + if isinstance(input_data, tuple): + # Mimic tf.data unpacking behavior for preprocessing layers. + output = instance(*input_data) + else: + output = instance(input_data) + + if isinstance(instance, keras.Model): + instance = cls.from_preset( + preset, load_weights=False, **init_kwargs + ) + instance(input_data) + + if expected_output is not None: + self.assertAllClose(output, expected_output) + + if expected_output_shape is not None: + output_shape = tree.map_structure(lambda x: x.shape, output) + self.assertAllClose(output_shape, expected_output_shape) + + if expected_partial_output is not None: + # Allow passing a partial output snippet of the last dimension. + # We want check stability, but the full output would be too long. + def compare(actual, expected): + expected = ops.convert_to_numpy(expected) + self.assertEqual(len(expected.shape), 1) + actual = ops.reshape(actual, (-1,))[: expected.shape[0]] + self.assertAllClose(actual, expected, atol=0.01, rtol=0.01) + + tree.map_structure(compare, output, expected_partial_output) diff --git a/keras_nlp/utils/tensor_utils.py b/keras_nlp/utils/tensor_utils.py index 1c9ad1e3bd..9f639ed7c1 100644 --- a/keras_nlp/utils/tensor_utils.py +++ b/keras_nlp/utils/tensor_utils.py @@ -63,7 +63,7 @@ def convert_to_backend_tensor_or_python_list(x): If we encounter one of these types in torch or jax, we will instead covert the tensor to simple pythonic types (lists of strings). """ - if isinstance(x, tf.RaggedTensor) or x.dtype == tf.string: + if isinstance(x, tf.RaggedTensor) or getattr(x, "dtype", None) == tf.string: return tensor_to_list(x) return ops.convert_to_tensor(x) From 871f664d37d242a2a08630b1e88f284c3d1fb40c Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Tue, 17 Oct 2023 17:51:47 -0700 Subject: [PATCH 12/87] Convert T5 to Keras 3 (#1274) * Change TF ops to Keras Core ops * Fix formatting * Remove build override * Fix formatting and remove unneeded function --- keras_nlp/models/t5/t5_backbone.py | 11 +-- keras_nlp/models/t5/t5_backbone_test.py | 1 - keras_nlp/models/t5/t5_layer_norm.py | 9 +- .../models/t5/t5_multi_head_attention.py | 86 +++++++++---------- keras_nlp/models/t5/t5_transformer_layer.py | 12 +-- 5 files changed, 55 insertions(+), 64 deletions(-) diff --git a/keras_nlp/models/t5/t5_backbone.py b/keras_nlp/models/t5/t5_backbone.py index 7514cc51ae..13db116f43 100644 --- a/keras_nlp/models/t5/t5_backbone.py +++ b/keras_nlp/models/t5/t5_backbone.py @@ -19,7 +19,6 @@ from keras_nlp.models.t5.t5_layer_norm import T5LayerNorm from keras_nlp.models.t5.t5_transformer_layer import T5TransformerLayer from keras_nlp.utils.python_utils import classproperty -from keras_nlp.utils.tensor_utils import assert_tf_backend @keras_nlp_export("keras_nlp.models.T5Backbone") @@ -81,8 +80,6 @@ def __init__( tie_embedding_weights=False, **kwargs, ): - assert_tf_backend(self.__class__.__name__) - # Encoder inputs encoder_token_ids = keras.Input( shape=(None,), dtype="int32", name="encoder_token_ids" @@ -121,7 +118,7 @@ def __init__( position_bias = None for i in range(num_layers): - x, position_bias = T5TransformerLayer( + output = T5TransformerLayer( is_decoder=False, hidden_dim=hidden_dim, intermediate_dim=intermediate_dim, @@ -138,6 +135,8 @@ def __init__( position_bias=position_bias, use_causal_mask=False, ) + if isinstance(output, tuple): + x, position_bias = output x = T5LayerNorm( epsilon=layer_norm_epsilon, @@ -162,7 +161,7 @@ def __init__( position_bias = None for i in range(num_layers): - x, position_bias = T5TransformerLayer( + output = T5TransformerLayer( is_decoder=True, hidden_dim=hidden_dim, intermediate_dim=intermediate_dim, @@ -181,6 +180,8 @@ def __init__( encoder_attention_mask=encoder_attention_mask, use_causal_mask=True, ) + if isinstance(output, tuple): + x, position_bias = output x = T5LayerNorm( epsilon=layer_norm_epsilon, diff --git a/keras_nlp/models/t5/t5_backbone_test.py b/keras_nlp/models/t5/t5_backbone_test.py index e5e147705e..b8041e876e 100644 --- a/keras_nlp/models/t5/t5_backbone_test.py +++ b/keras_nlp/models/t5/t5_backbone_test.py @@ -19,7 +19,6 @@ from keras_nlp.tests.test_case import TestCase -@pytest.mark.tf_only class T5BackboneTest(TestCase): def setUp(self): self.init_kwargs = { diff --git a/keras_nlp/models/t5/t5_layer_norm.py b/keras_nlp/models/t5/t5_layer_norm.py index 7cfdb2315e..b4f157c004 100644 --- a/keras_nlp/models/t5/t5_layer_norm.py +++ b/keras_nlp/models/t5/t5_layer_norm.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf - from keras_nlp.backend import keras +from keras_nlp.backend import ops class T5LayerNorm(keras.layers.Layer): @@ -31,8 +30,6 @@ def build(self, input_shape): self.built = True def call(self, hidden_states): - variance = tf.math.reduce_mean( - tf.math.square(hidden_states), axis=-1, keepdims=True - ) - hidden_states = hidden_states * tf.math.rsqrt(variance + self.epsilon) + variance = ops.mean(ops.square(hidden_states), axis=-1, keepdims=True) + hidden_states = hidden_states * ops.rsqrt(variance + self.epsilon) return self.weight * hidden_states diff --git a/keras_nlp/models/t5/t5_multi_head_attention.py b/keras_nlp/models/t5/t5_multi_head_attention.py index 479de51e7d..5cb59769dc 100644 --- a/keras_nlp/models/t5/t5_multi_head_attention.py +++ b/keras_nlp/models/t5/t5_multi_head_attention.py @@ -12,18 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf -from tensorflow.compiler.tf2xla.python.xla import dynamic_slice +import numpy as np from keras_nlp.backend import keras - - -def shape_list(tensor): - dynamic = tf.shape(tensor) - if tensor.shape == tf.TensorShape(None): - return dynamic - static = tensor.shape.as_list() - return [dynamic[i] if s is None else s for i, s in enumerate(static)] +from keras_nlp.backend import ops class T5MultiHeadAttention(keras.layers.Layer): @@ -123,39 +115,39 @@ def _relative_position_bucket( if bidirectional: num_buckets //= 2 relative_buckets += ( - tf.cast( - tf.math.greater(relative_position, 0), + ops.cast( + ops.greater(relative_position, 0), dtype=relative_position.dtype, ) * num_buckets ) - relative_position = tf.math.abs(relative_position) + relative_position = ops.abs(relative_position) else: - relative_position = -tf.math.minimum(relative_position, 0) + relative_position = -ops.minimum(relative_position, 0) # now n is in the range [0, inf) max_exact = num_buckets // 2 - is_small = tf.math.less(relative_position, max_exact) - relative_position_if_large = max_exact + tf.cast( - tf.math.log( - tf.cast(relative_position, "float32") - / tf.cast(max_exact, "float32") + is_small = ops.less(relative_position, max_exact) + relative_position_if_large = max_exact + ops.cast( + ops.log( + ops.cast(relative_position, "float32") + / ops.cast(max_exact, "float32") ) - / tf.math.log(max_distance / max_exact) + / ops.cast(ops.log(max_distance / max_exact), "float32") * (num_buckets - max_exact), dtype=relative_position.dtype, ) - relative_position_if_large = tf.math.minimum( + relative_position_if_large = ops.minimum( relative_position_if_large, num_buckets - 1 ) - relative_buckets += tf.where( + relative_buckets += ops.where( is_small, relative_position, relative_position_if_large ) return relative_buckets def compute_bias(self, query_length, key_length): """Compute binned relative position bias""" - context_position = tf.range(query_length)[:, None] - memory_position = tf.range(key_length)[None, :] + context_position = ops.arange(query_length)[:, None] + memory_position = ops.arange(key_length)[None, :] relative_position = ( memory_position - context_position ) # shape (query_length, key_length) @@ -165,11 +157,11 @@ def compute_bias(self, query_length, key_length): num_buckets=self.relative_attention_buckets, max_distance=self.relative_attention_max_distance, ) - values = tf.gather( - self.relative_attention_bias, relative_position_bucket + values = ops.take( + self.relative_attention_bias, relative_position_bucket, axis=0 ) # shape (query_length, key_length, num_heads) - values = tf.expand_dims( - tf.transpose(values, [2, 0, 1]), axis=0 + values = ops.expand_dims( + ops.transpose(values, axes=(2, 0, 1)), axis=0 ) # shape (1, num_heads, query_length, key_length) return values @@ -186,7 +178,7 @@ def call( ): # Input is (batch_size, query_length, dim) # past_key_value[0] is (batch_size, num_heads, q_len - 1, dim_per_head) - batch_size, seq_length = shape_list(hidden_states)[:2] + batch_size, seq_length = ops.shape(hidden_states)[:2] real_seq_length = seq_length @@ -197,7 +189,7 @@ def call( f"keys and values. Got {len(past_key_value)} past states." ) real_seq_length += ( - shape_list(past_key_value[0])[2] + ops.shape(past_key_value[0])[2] if query_length is None else query_length ) @@ -205,21 +197,21 @@ def call( key_length = ( real_seq_length if key_value_states is None - else shape_list(key_value_states)[1] + else ops.shape(key_value_states)[1] ) def shape(hidden_states): - return tf.transpose( - tf.reshape( + return ops.transpose( + ops.reshape( hidden_states, (batch_size, -1, self.num_heads, self.key_value_dim), ), - perm=(0, 2, 1, 3), + axes=(0, 2, 1, 3), ) def unshape(hidden_states): - return tf.reshape( - tf.transpose(hidden_states, perm=(0, 2, 1, 3)), + return ops.reshape( + ops.transpose(hidden_states, axes=(0, 2, 1, 3)), (batch_size, -1, self.inner_dim), ) @@ -240,7 +232,7 @@ def project( if key_value_states is None: # self-attention # (batch_size, num_heads, key_length, dim_per_head) - hidden_states = tf.concat( + hidden_states = ops.concat( [past_key_value, hidden_states], axis=2 ) else: @@ -267,13 +259,13 @@ def project( past_key_value[1] if past_key_value is not None else None, ) - scores = tf.einsum( + scores = ops.einsum( "bnqd,bnkd->bnqk", query_states, key_states ) # (batch_size, num_heads, query_length, key_length) if position_bias is None: if not self.use_relative_attention_bias: - position_bias = tf.zeros( + position_bias = ops.zeros( (1, self.num_heads, real_seq_length, key_length), self.compute_dtype, ) @@ -289,10 +281,10 @@ def project( # we might have a padded past structure, # in which case we want to fetch the position bias slice # right after the most recently filled past index - most_recently_filled_past_index = tf.reduce_max( - tf.where(past_key_value[0][0, 0, :, 0] != 0.0) + most_recently_filled_past_index = ops.amax( + ops.where(past_key_value[0][0, 0, :, 0] != 0.0) ) - position_bias = dynamic_slice( + position_bias = ops.slice( position_bias, (0, 0, most_recently_filled_past_index + 1, 0), (1, self.num_heads, seq_length, real_seq_length), @@ -300,13 +292,13 @@ def project( if mask is not None: # Add a new mask axis for the head dim. - mask = mask[:, tf.newaxis, :, :] + mask = mask[:, np.newaxis, :, :] # Add a very large negative position bias for masked positions. - mask = (1.0 - tf.cast(mask, position_bias.dtype)) * -1e9 + mask = (1.0 - ops.cast(mask, position_bias.dtype)) * -1e9 position_bias = position_bias + mask scores += position_bias - weights = tf.nn.softmax( + weights = ops.nn.softmax( scores, axis=-1 ) # (batch_size, num_heads, query_length, key_length) weights = self.dropout_layer( @@ -315,9 +307,9 @@ def project( # Optionally mask heads if layer_head_mask is not None: - weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * weights + weights = ops.reshape(layer_head_mask, (1, -1, 1, 1)) * weights - attention_output = tf.matmul( + attention_output = ops.matmul( weights, value_states ) # (batch_size, num_heads, query_length, dim_per_head) diff --git a/keras_nlp/models/t5/t5_transformer_layer.py b/keras_nlp/models/t5/t5_transformer_layer.py index ce4a28d67f..22c2dc1c74 100644 --- a/keras_nlp/models/t5/t5_transformer_layer.py +++ b/keras_nlp/models/t5/t5_transformer_layer.py @@ -12,9 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import tensorflow as tf - from keras_nlp.backend import keras +from keras_nlp.backend import ops from keras_nlp.layers.modeling.transformer_layer_utils import ( compute_causal_mask, ) @@ -103,10 +102,10 @@ def call( training=False, ): if use_causal_mask: - shape = tf.shape(hidden_states) + shape = ops.shape(hidden_states) batch_size, length = shape[0], shape[1] causal_mask = compute_causal_mask(batch_size, length, length) - attention_mask = tf.cast(attention_mask, "int32") + attention_mask = ops.cast(attention_mask, "int32") attention_mask = causal_mask & attention_mask x = hidden_states # Intermediate result. @@ -147,4 +146,7 @@ def call( x = self.dropout_layer(x, training=training) x = x + residual - return x, position_bias + if position_bias is not None: + return x, position_bias + else: + return x From f77762b1931ce1aee590dbeed0a93bdbe4ecf69b Mon Sep 17 00:00:00 2001 From: Philippe Modard Date: Fri, 20 Oct 2023 20:17:47 +0200 Subject: [PATCH 13/87] Fix missing backticks in DistilBertClassifier docstrings (#1278) --- keras_nlp/models/distil_bert/distil_bert_classifier.py | 1 + 1 file changed, 1 insertion(+) diff --git a/keras_nlp/models/distil_bert/distil_bert_classifier.py b/keras_nlp/models/distil_bert/distil_bert_classifier.py index 770bf5e02b..42de1cee83 100644 --- a/keras_nlp/models/distil_bert/distil_bert_classifier.py +++ b/keras_nlp/models/distil_bert/distil_bert_classifier.py @@ -137,6 +137,7 @@ class DistilBertClassifier(Task): num_classes=4, ) classifier.fit(x=features, y=labels, batch_size=2) + ``` """ def __init__( From c5a531aac6865234bb9c14e2de0d807645d7f328 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Tue, 24 Oct 2023 11:35:51 -0700 Subject: [PATCH 14/87] T5 checkpoint conversion with HF (#1277) * Change TF ops to Keras Core ops * Fix formatting * Remove build override * Fix formatting and remove unneeded function * Copy over diff from T5 conversion script PR * Add T5 checkpoint conversion logic * Fix output check script * Add transposes for t5 checkpoints * Add relative attention bias * Fix formatting and imports * Add flow for no gated activation * Add flan and t5 to checkpoint conversion script and presets * Add key_value_dim to self and config * Add new gelu approximate standard fn --- keras_nlp/models/t5/t5_backbone.py | 12 +- .../models/t5/t5_multi_head_attention.py | 3 +- keras_nlp/models/t5/t5_presets.py | 163 ++++++++ keras_nlp/models/t5/t5_transformer_layer.py | 24 +- .../convert_t5_checkpoints.py | 367 ++++++++++++++++++ 5 files changed, 559 insertions(+), 10 deletions(-) create mode 100644 keras_nlp/models/t5/t5_presets.py create mode 100644 tools/checkpoint_conversion/convert_t5_checkpoints.py diff --git a/keras_nlp/models/t5/t5_backbone.py b/keras_nlp/models/t5/t5_backbone.py index 13db116f43..ace329c3f6 100644 --- a/keras_nlp/models/t5/t5_backbone.py +++ b/keras_nlp/models/t5/t5_backbone.py @@ -11,12 +11,14 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import copy from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.models.backbone import Backbone from keras_nlp.models.t5.t5_layer_norm import T5LayerNorm +from keras_nlp.models.t5.t5_presets import backbone_presets from keras_nlp.models.t5.t5_transformer_layer import T5TransformerLayer from keras_nlp.utils.python_utils import classproperty @@ -50,6 +52,9 @@ class T5Backbone(Backbone): hidden_dim: int. The hidden size of the Transformer layers. intermediate_dim: int. The output dimension of the first Dense layer in a two-layer feedforward network for each Transformer layer. + key_value_dim: int. The dimension of each head of the key/value + projections in the multi-head attention layers. Defaults to + hidden_dim / num_heads. dropout: float. Dropout probability for the Transformer layers. activation: activation function (or activation string name). The activation to be used in the inner dense blocks of the @@ -73,6 +78,7 @@ def __init__( num_heads, hidden_dim, intermediate_dim, + key_value_dim=None, dropout=0.1, activation="gelu", use_gated_activation=True, @@ -122,6 +128,7 @@ def __init__( is_decoder=False, hidden_dim=hidden_dim, intermediate_dim=intermediate_dim, + key_value_dim=key_value_dim or hidden_dim // num_heads, dropout=dropout, activation=activation, layer_norm_epsilon=layer_norm_epsilon, @@ -165,6 +172,7 @@ def __init__( is_decoder=True, hidden_dim=hidden_dim, intermediate_dim=intermediate_dim, + key_value_dim=key_value_dim or hidden_dim // num_heads, dropout=dropout, activation=activation, layer_norm_epsilon=layer_norm_epsilon, @@ -213,6 +221,7 @@ def __init__( self.num_layers = num_layers self.num_heads = num_heads self.activation = keras.activations.get(activation) + self.key_value_dim = key_value_dim self.dropout = dropout self.layer_norm_epsilon = layer_norm_epsilon self.tie_embedding_weights = tie_embedding_weights @@ -228,6 +237,7 @@ def get_config(self): "num_layers": self.num_layers, "num_heads": self.num_heads, "activation": keras.activations.serialize(self.activation), + "key_value_dim": self.key_value_dim, "dropout": self.dropout, "layer_norm_epsilon": self.layer_norm_epsilon, "tie_embedding_weights": self.tie_embedding_weights, @@ -237,4 +247,4 @@ def get_config(self): @classproperty def presets(cls): - return {} + return copy.deepcopy(backbone_presets) diff --git a/keras_nlp/models/t5/t5_multi_head_attention.py b/keras_nlp/models/t5/t5_multi_head_attention.py index 5cb59769dc..77e7109efe 100644 --- a/keras_nlp/models/t5/t5_multi_head_attention.py +++ b/keras_nlp/models/t5/t5_multi_head_attention.py @@ -25,6 +25,7 @@ def __init__( self, is_decoder, hidden_dim, + key_value_dim, num_heads, dropout, use_relative_attention_bias=False, @@ -33,7 +34,7 @@ def __init__( super().__init__(**kwargs) self.is_decoder = is_decoder self.hidden_dim = hidden_dim - self.key_value_dim = hidden_dim // num_heads + self.key_value_dim = key_value_dim self.num_heads = num_heads self.use_relative_attention_bias = use_relative_attention_bias diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py new file mode 100644 index 0000000000..5bba204051 --- /dev/null +++ b/keras_nlp/models/t5/t5_presets.py @@ -0,0 +1,163 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""XLM-RoBERTa model preset configurations.""" + +backbone_presets = { + "t5_small_multi": { + "metadata": { + "description": ( + "8-layer T5 model. Trained on the Colossal Clean Crawled " + "Corpus (C4)." + ), + "params": 0, + "official_name": "T5", + "path": "t5", + "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", + }, + "config": { + "vocabulary_size": 32128, + "num_layers": 6, + "num_heads": 8, + "hidden_dim": 512, + "intermediate_dim": 2048, + "key_value_dim": 64, + "dropout": 0.1, + "activation": "relu", + "use_gated_activation": False, + "layer_norm_epsilon": 1e-06, + }, + "preprocessor_config": {}, + }, + "t5_base_multi": { + "metadata": { + "description": ( + "12-layer T5 model. Trained on the Colossal Clean Crawled " + "Corpus (C4)." + ), + "params": 0, + "official_name": "T5", + "path": "t5", + "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", + }, + "config": { + "vocabulary_size": 32128, + "num_layers": 12, + "num_heads": 12, + "hidden_dim": 768, + "intermediate_dim": 3072, + "dropout": 0.1, + "activation": "relu", + "use_gated_activation": False, + "layer_norm_epsilon": 1e-06, + }, + "preprocessor_config": {}, + }, + "t5_large_multi": { + "metadata": { + "description": ( + "24-layer T5 model. Trained on the Colossal Clean Crawled " + "Corpus (C4)." + ), + "params": 0, + "official_name": "T5", + "path": "t5", + "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", + }, + "config": { + "vocabulary_size": 32128, + "num_layers": 24, + "num_heads": 16, + "hidden_dim": 1024, + "intermediate_dim": 4096, + "dropout": 0.1, + "activation": "relu", + "use_gated_activation": False, + "layer_norm_epsilon": 1e-06, + }, + "preprocessor_config": {}, + }, + "flan_small_multi": { + "metadata": { + "description": ( + "8-layer T5 model. Trained on the Colossal Clean Crawled " + "Corpus (C4)." + ), + "params": 0, + "official_name": "T5", + "path": "t5", + "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", + }, + "config": { + "vocabulary_size": 32128, + "num_layers": 8, + "num_heads": 6, + "hidden_dim": 512, + "intermediate_dim": 1024, + "key_value_dim": 64, + "dropout": 0.1, + "activation": "gelu", + "use_gated_activation": True, + "layer_norm_epsilon": 1e-06, + }, + "preprocessor_config": {}, + }, + "flan_base_multi": { + "metadata": { + "description": ( + "12-layer T5 model. Trained on the Colossal Clean Crawled " + "Corpus (C4)." + ), + "params": 0, + "official_name": "T5", + "path": "t5", + "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", + }, + "config": { + "vocabulary_size": 32128, + "num_layers": 12, + "num_heads": 12, + "hidden_dim": 768, + "intermediate_dim": 2048, + "dropout": 0.1, + "activation": "gelu", + "use_gated_activation": True, + "layer_norm_epsilon": 1e-06, + }, + "preprocessor_config": {}, + }, + "flan_large_multi": { + "metadata": { + "description": ( + "24-layer T5 model. Trained on the Colossal Clean Crawled " + "Corpus (C4)." + ), + "params": 0, + "official_name": "T5", + "path": "t5", + "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", + }, + "config": { + "vocabulary_size": 32128, + "num_layers": 24, + "num_heads": 16, + "hidden_dim": 1024, + "intermediate_dim": 2816, + "dropout": 0.1, + "activation": "gelu", + "use_gated_activation": True, + "layer_norm_epsilon": 1e-06, + }, + "preprocessor_config": {}, + }, +} diff --git a/keras_nlp/models/t5/t5_transformer_layer.py b/keras_nlp/models/t5/t5_transformer_layer.py index 22c2dc1c74..655019b451 100644 --- a/keras_nlp/models/t5/t5_transformer_layer.py +++ b/keras_nlp/models/t5/t5_transformer_layer.py @@ -27,6 +27,7 @@ def __init__( is_decoder, hidden_dim, intermediate_dim, + key_value_dim, dropout, activation, layer_norm_epsilon, @@ -40,10 +41,11 @@ def __init__( self.use_gated_activation = use_gated_activation self.self_attention = T5MultiHeadAttention( - is_decoder, - hidden_dim, - num_heads, - dropout, + is_decoder=is_decoder, + hidden_dim=hidden_dim, + key_value_dim=key_value_dim, + num_heads=num_heads, + dropout=dropout, use_relative_attention_bias=use_relative_attention_bias, name="self_attention", ) @@ -52,16 +54,22 @@ def __init__( if self.is_decoder: self.cross_attention = T5MultiHeadAttention( - is_decoder, - hidden_dim, - num_heads, - dropout, + is_decoder=is_decoder, + hidden_dim=hidden_dim, + key_value_dim=key_value_dim, + num_heads=num_heads, + dropout=dropout, use_relative_attention_bias=False, name="cross_attention", ) self.cross_attention_layer_norm = T5LayerNorm(layer_norm_epsilon) self.cross_attention_dropout = keras.layers.Dropout(dropout) + if activation == "gelu": + activation = keras.activations.get("keras_nlp>gelu_approximate") + else: + activation = keras.activations.get(activation) + self.input_projector = keras.layers.Dense( intermediate_dim, use_bias=False, diff --git a/tools/checkpoint_conversion/convert_t5_checkpoints.py b/tools/checkpoint_conversion/convert_t5_checkpoints.py new file mode 100644 index 0000000000..b04a9a319c --- /dev/null +++ b/tools/checkpoint_conversion/convert_t5_checkpoints.py @@ -0,0 +1,367 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import math +import os +import shutil + +import numpy as np +import transformers +from absl import app +from absl import flags +from checkpoint_conversion_utils import get_md5_checksum +from keras_core import ops + +import keras_nlp + +PRESET_MAP = { + "t5_small_multi": "t5-small", + "t5_base_multi": "t5-base", + "t5_large_multi": "t5-large", + "flan_small_multi": "google/flan-t5-small", + "flan_base_multi": "google/flan-t5-base", + "flan_large_multi": "google/flan-t5-large", +} + +FLAGS = flags.FLAGS + +flags.DEFINE_string( + "preset", "t5_base_multi", f'Must be one of {",".join(PRESET_MAP.keys())}' +) +os.environ["KERAS_BACKEND"] = "torch" + + +def extract_vocab(hf_tokenizer): + proto_path = f"./{FLAGS.preset}/vocab.spm" + print(f"\n-> Save KerasNLP vocab to `{proto_path}`.") + + # Huggingface has a save_vocabulary function but it's not byte-for-byte + # with the source. Instead copy the original downloaded file directly. + shutil.copyfile( + transformers.utils.hub.get_file_from_repo( + hf_tokenizer.name_or_path, "spiece.model" + ), + proto_path, + ) + + keras_tokenizer = keras_nlp.models.T5Tokenizer( + proto=proto_path, + ) + + print("-> Print MD5 checksum of the vocab files.") + print(f"`{proto_path}` md5sum: ", get_md5_checksum(proto_path)) + + return keras_tokenizer + + +def convert_checkpoints(hf_model): + keras_nlp_model = keras_nlp.models.T5Backbone.from_preset( + FLAGS.preset, load_weights=False + ) + + hf_wts = hf_model.state_dict() + print("Original weights:") + print(list(hf_wts.keys())) + + for i in range(keras_nlp_model.num_layers): + for section in ["encoder", "decoder"]: + n = 0 + + # Token embedding layer + keras_nlp_model.get_layer("token_embedding").embeddings.assign( + hf_wts[f"{section}.embed_tokens.weight"] + ) + + # Query, key, value, and output projectors in self-attention + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).self_attention.query_projector.kernel.assign( + hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.q.weight"] + .transpose(1, 0) + .numpy() + ) + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).self_attention.key_projector.kernel.assign( + hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.k.weight"] + .transpose(1, 0) + .numpy() + ) + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).self_attention.value_projector.kernel.assign( + hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.v.weight"] + .transpose(1, 0) + .numpy() + ) + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).self_attention.output_projector.kernel.assign( + hf_wts[f"{section}.block.{i}.layer.{n}.SelfAttention.o.weight"] + .transpose(1, 0) + .numpy() + ) + + # Add relative attention bias + if keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).self_attention.use_relative_attention_bias: + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).self_attention.relative_attention_bias.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.SelfAttention.relative_attention_bias.weight" + ].numpy() + ) + + # Self-attention norm + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).self_attention_layer_norm.weight.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.layer_norm.weight" + ].numpy() + ) + + # Increment for next layer + n += 1 + + if section == "decoder": + # Cross-attention QKV and output proj (one between encoder and decoder) + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).cross_attention.query_projector.kernel.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.EncDecAttention.q.weight" + ] + .transpose(1, 0) + .numpy() + ) + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).cross_attention.key_projector.kernel.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.EncDecAttention.k.weight" + ] + .transpose(1, 0) + .numpy() + ) + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).cross_attention.value_projector.kernel.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.EncDecAttention.v.weight" + ] + .transpose(1, 0) + .numpy() + ) + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).cross_attention.output_projector.kernel.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.EncDecAttention.o.weight" + ] + .transpose(1, 0) + .numpy() + ) + + # Cross-attention layer norm + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).cross_attention_layer_norm.weight.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.layer_norm.weight" + ].numpy() + ) + # Increment for next layer + n += 1 + + if keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).use_gated_activation: + # Input projection layer + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).input_projector.weights[0].assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.DenseReluDense.wi_0.weight" + ] + .transpose(1, 0) + .numpy() + ) + + # Gated activation layer + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).gate_projector.weights[0].assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.DenseReluDense.wi_1.weight" + ] + .transpose(1, 0) + .numpy() + ) + else: + # Input projection layer + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).input_projector.weights[0].assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.DenseReluDense.wi.weight" + ] + .transpose(1, 0) + .numpy() + ) + + # Output projection layer + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).output_projector.weights[0].assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.DenseReluDense.wo.weight" + ] + .transpose(1, 0) + .numpy() + ) + + # Layer norm + keras_nlp_model.get_layer( + f"transformer_{section}_layer_{i}" + ).layer_norm.weight.assign( + hf_wts[ + f"{section}.block.{i}.layer.{n}.layer_norm.weight" + ].numpy() + ) + + # Final normalization + keras_nlp_model.get_layer(f"{section}_output_layer_norm").weights[ + -1 + ].assign(hf_wts[f"{section}.final_layer_norm.weight"].numpy()) + + return keras_nlp_model + + +def check_output( + keras_model, + keras_tokenizer, + hf_model, + hf_tokenizer, +): + print("\n-> Compare the outputs.") + encoder_input = ["the quick brown fox jumped."] + decoder_input = ["the quick brown fox fell."] + + sequence_length = 12 + + # KerasNLP Tokenization + packer = keras_nlp.layers.StartEndPacker( + sequence_length=sequence_length, + pad_value=keras_tokenizer.pad_token_id, + end_value=keras_tokenizer.end_token_id, + ) + encoder_token_ids = packer(keras_tokenizer(encoder_input)) + encoder_padding_mask = encoder_token_ids != keras_tokenizer.pad_token_id + decoder_token_ids = packer(keras_tokenizer(decoder_input)) + decoder_padding_mask = decoder_token_ids != keras_tokenizer.pad_token_id + keras_inputs = { + "encoder_token_ids": encoder_token_ids, + "encoder_padding_mask": encoder_padding_mask, + "decoder_token_ids": decoder_token_ids, + "decoder_padding_mask": decoder_padding_mask, + } + + # HF Tokenization. + hf_encoder_inputs = hf_tokenizer( + encoder_input, + padding="max_length", + max_length=sequence_length, + return_tensors="pt", + ) + hf_decoder_inputs = hf_tokenizer( + decoder_input, + padding="max_length", + max_length=sequence_length, + return_tensors="pt", + ) + hf_inputs = { + "input_ids": hf_encoder_inputs["input_ids"], + "attention_mask": hf_encoder_inputs["attention_mask"], + "decoder_input_ids": hf_decoder_inputs["input_ids"], + "decoder_attention_mask": hf_decoder_inputs["attention_mask"], + } + + # Compare tokenized inputs. This should be a compete match. + print("-> KerasNLP inputs:") + for k, v in keras_inputs.items(): + print(k, v) + print("-> HF inputs:") + for k, v in hf_inputs.items(): + print(k, v) + + # Forward pass + keras_outputs = keras_model(keras_inputs) + hf_outputs = hf_model(**hf_inputs) + + # Only compare non-padded token ids. + keras_outputs = keras_outputs["decoder_sequence_output"] + keras_outputs = ops.take_along_axis( + keras_outputs, ops.where(decoder_padding_mask) + ) + hf_outputs = hf_outputs.last_hidden_state + hf_outputs = ops.take_along_axis( + hf_outputs, ops.where(decoder_padding_mask) + ) + + print("-> KerasNLP output:", keras_outputs[0:5]) + print("-> HF output:", hf_outputs[0:5]) + np.testing.assert_allclose( + keras_outputs.detach().numpy(), hf_outputs.detach().numpy(), atol=1e-5 + ) + + +def count_params(weights): + shapes = [v.shape for v in weights] + return int(sum(math.prod(p) for p in shapes)) + + +def main(_): + hf_id = PRESET_MAP[FLAGS.preset] + shutil.rmtree(f"./{FLAGS.preset}", ignore_errors=True) + os.mkdir(f"./{FLAGS.preset}") + + print("\n-> Convert weights.") + hf_model = transformers.AutoModel.from_pretrained(hf_id) + keras_model = convert_checkpoints(hf_model) + + # Save the model. + model_path = f"./{FLAGS.preset}/model.weights.h5" + print(f"\n-> Save KerasNLP model weights to `{model_path}`.") + keras_model.save_weights(model_path) + print("-> Print MD5 checksum of the model weights files.") + print(f"`{model_path}` md5sum: ", get_md5_checksum(model_path)) + print(f"-> Param count {count_params(keras_model.weights)}") + + print("\n-> Convert vocab.") + hf_tokenizer = transformers.AutoTokenizer.from_pretrained(hf_id) + keras_tokenizer = extract_vocab(hf_tokenizer) + + check_output( + keras_model, + keras_tokenizer, + hf_model, + hf_tokenizer, + ) + + +if __name__ == "__main__": + flags.mark_flag_as_required("preset") + app.run(main) From 4c434284e06efc53e5684fa84afa7d63b92ffe98 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Tue, 24 Oct 2023 13:15:19 -0700 Subject: [PATCH 15/87] Use gelu_approximate directly in t5 presets (#1284) We should keep `activation="gelu"` meaning the same canonical thing it means across Keras. Let's use our string identifier for approximate gelu directly in the preset --- keras_nlp/models/t5/t5_backbone.py | 5 ++--- keras_nlp/models/t5/t5_presets.py | 6 +++--- keras_nlp/models/t5/t5_transformer_layer.py | 5 ----- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/keras_nlp/models/t5/t5_backbone.py b/keras_nlp/models/t5/t5_backbone.py index ace329c3f6..9d64edd3bc 100644 --- a/keras_nlp/models/t5/t5_backbone.py +++ b/keras_nlp/models/t5/t5_backbone.py @@ -58,8 +58,7 @@ class T5Backbone(Backbone): dropout: float. Dropout probability for the Transformer layers. activation: activation function (or activation string name). The activation to be used in the inner dense blocks of the - Transformer layers. The original T5 architecture used `"relu"`, - but more recent versions use `"gelu"`. Defaults to `"gelu"`. + Transformer layers. Defaults to `"relu"`. use_gated_activation: boolean. Whether to use activation gating in the inner dense blocks of the Transformer layers. The original T5 architecture didn't use gating, but more @@ -80,7 +79,7 @@ def __init__( intermediate_dim, key_value_dim=None, dropout=0.1, - activation="gelu", + activation="relu", use_gated_activation=True, layer_norm_epsilon=1e-06, tie_embedding_weights=False, diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index 5bba204051..cbdde0391a 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -106,7 +106,7 @@ "intermediate_dim": 1024, "key_value_dim": 64, "dropout": 0.1, - "activation": "gelu", + "activation": "keras_nlp>gelu_approximate", "use_gated_activation": True, "layer_norm_epsilon": 1e-06, }, @@ -130,7 +130,7 @@ "hidden_dim": 768, "intermediate_dim": 2048, "dropout": 0.1, - "activation": "gelu", + "activation": "keras_nlp>gelu_approximate", "use_gated_activation": True, "layer_norm_epsilon": 1e-06, }, @@ -154,7 +154,7 @@ "hidden_dim": 1024, "intermediate_dim": 2816, "dropout": 0.1, - "activation": "gelu", + "activation": "keras_nlp>gelu_approximate", "use_gated_activation": True, "layer_norm_epsilon": 1e-06, }, diff --git a/keras_nlp/models/t5/t5_transformer_layer.py b/keras_nlp/models/t5/t5_transformer_layer.py index 655019b451..27b4c9892c 100644 --- a/keras_nlp/models/t5/t5_transformer_layer.py +++ b/keras_nlp/models/t5/t5_transformer_layer.py @@ -65,11 +65,6 @@ def __init__( self.cross_attention_layer_norm = T5LayerNorm(layer_norm_epsilon) self.cross_attention_dropout = keras.layers.Dropout(dropout) - if activation == "gelu": - activation = keras.activations.get("keras_nlp>gelu_approximate") - else: - activation = keras.activations.get(activation) - self.input_projector = keras.layers.Dense( intermediate_dim, use_bias=False, From aff79b3ad2f25aba967bc80b2ffafd1ad762f413 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Wed, 25 Oct 2023 15:32:24 -0700 Subject: [PATCH 16/87] Add preset tests and weights URLs (#1285) * Add preset tests and weights URLs * Change filename conditional --- keras_nlp/models/backbone.py | 4 +++- keras_nlp/models/t5/t5_backbone_test.py | 29 ++++++++++++++++++++++++ keras_nlp/models/t5/t5_presets.py | 24 ++++++++++++++++++++ keras_nlp/models/t5/t5_tokenizer_test.py | 20 ++++++++++++++++ 4 files changed, 76 insertions(+), 1 deletion(-) diff --git a/keras_nlp/models/backbone.py b/keras_nlp/models/backbone.py index b7a7ba2119..a55f767394 100644 --- a/keras_nlp/models/backbone.py +++ b/keras_nlp/models/backbone.py @@ -112,12 +112,14 @@ def from_preset( if not load_weights: return model + filename = os.path.basename(metadata["weights_url"]) weights = keras.utils.get_file( - "model.h5", + filename, metadata["weights_url"], cache_subdir=os.path.join("models", preset), file_hash=metadata["weights_hash"], ) + model.load_weights(weights) return model diff --git a/keras_nlp/models/t5/t5_backbone_test.py b/keras_nlp/models/t5/t5_backbone_test.py index b8041e876e..9006925f10 100644 --- a/keras_nlp/models/t5/t5_backbone_test.py +++ b/keras_nlp/models/t5/t5_backbone_test.py @@ -53,3 +53,32 @@ def test_saved_model(self): init_kwargs=self.init_kwargs, input_data=self.input_data, ) + + @pytest.mark.large + def test_smallest_preset(self): + self.run_preset_test( + cls=T5Backbone, + preset="t5_small_multi", + input_data=self.input_data, + expected_output_shape={ + "encoder_sequence_output": (2, 3, 512), + "decoder_sequence_output": (2, 3, 512), + }, + expected_partial_output={ + "encoder_sequence_output": ops.array( + [-0.0034, 0.0293, -0.0827, -0.1076] + ), + "decoder_sequence_output": ops.array( + [0.0097, 0.3576, -0.1508, 0.0150] + ), + }, + ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in T5Backbone.presets: + self.run_preset_test( + cls=T5Backbone, + preset=preset, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index cbdde0391a..1c737a863b 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -38,6 +38,10 @@ "layer_norm_epsilon": 1e-06, }, "preprocessor_config": {}, + "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/model.weights.h5", + "weights_hash": "5a241ea61142eaf96ac1805898a2f2d1", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/vocab.spm", + "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "t5_base_multi": { "metadata": { @@ -62,6 +66,10 @@ "layer_norm_epsilon": 1e-06, }, "preprocessor_config": {}, + "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/model.weights.h5", + "weights_hash": "9bef4c6650d91d1ea438ee4a2bea47ad", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/vocab.spm", + "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "t5_large_multi": { "metadata": { @@ -86,6 +94,10 @@ "layer_norm_epsilon": 1e-06, }, "preprocessor_config": {}, + "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/model.weights.h5", + "weights_hash": "eab8eee1bad033e65324a71cd6e5a8e9", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/vocab.spm", + "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "flan_small_multi": { "metadata": { @@ -111,6 +123,10 @@ "layer_norm_epsilon": 1e-06, }, "preprocessor_config": {}, + "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/model.weights.h5", + "weights_hash": "4e39b0bab56606a9ab2b8e52a6bc7a9f", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/vocab.spm", + "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "flan_base_multi": { "metadata": { @@ -135,6 +151,10 @@ "layer_norm_epsilon": 1e-06, }, "preprocessor_config": {}, + "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/model.weights.h5", + "weights_hash": "b529270c5361db36d359a46403532b5c", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/vocab.spm", + "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "flan_large_multi": { "metadata": { @@ -159,5 +179,9 @@ "layer_norm_epsilon": 1e-06, }, "preprocessor_config": {}, + "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/model.weights.h5", + "weights_hash": "50b8d3c88fc10db07e495d79ff29a1b6", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/vocab.spm", + "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, } diff --git a/keras_nlp/models/t5/t5_tokenizer_test.py b/keras_nlp/models/t5/t5_tokenizer_test.py index f8cef35c30..9f6f4e9e8f 100644 --- a/keras_nlp/models/t5/t5_tokenizer_test.py +++ b/keras_nlp/models/t5/t5_tokenizer_test.py @@ -14,6 +14,7 @@ import io +import pytest import sentencepiece import tensorflow as tf @@ -64,3 +65,22 @@ def test_errors_missing_special_tokens(self): ) with self.assertRaises(ValueError): T5Tokenizer(proto=bytes_io.getvalue()) + + @pytest.mark.large + def test_smallest_preset(self): + for preset in T5Tokenizer.presets: + self.run_preset_test( + cls=T5Tokenizer, + preset=preset, + input_data=["The quick brown fox."], + expected_output=[[1996, 4248, 2829, 4419, 1012]], + ) + + @pytest.mark.extra_large + def test_all_presets(self): + for preset in T5Tokenizer.presets: + self.run_preset_test( + cls=T5Tokenizer, + preset=preset, + input_data=self.input_data, + ) From bbb4b1e3951f4d4803e73e336e6eab3d82e32d3c Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:36:36 -0700 Subject: [PATCH 17/87] Support loading keras 3 nightly (#1286) The nightly version will actually be consider to have a value less that "3.0.0" by python packaging, as the dev prefix indicates a pre-release. --- keras_nlp/backend/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/backend/config.py b/keras_nlp/backend/config.py index 9b85907d00..7436012d1c 100644 --- a/keras_nlp/backend/config.py +++ b/keras_nlp/backend/config.py @@ -66,7 +66,7 @@ _MULTI_BACKEND = True # If keras is version 3, use multi-backend keras (our only option). -_IS_KERAS_3 = version.parse(keras.__version__) >= version.parse("3.0.0") +_IS_KERAS_3 = version.parse(keras.__version__) >= version.parse("3.0.0.dev0") if _IS_KERAS_3: _MULTI_BACKEND = True From d254b026cb8cd331de5f409a090e3c98f69a0325 Mon Sep 17 00:00:00 2001 From: Tirth Patel Date: Thu, 26 Oct 2023 20:36:47 +0000 Subject: [PATCH 18/87] Remove the use of `SentencePieceTrainer` from tests (#1283) * Remove SentencePieceTrainer from keras_nlp/models/albert * Remove SentencePieceTrainer from keras_nlp/models/deberta_v3 * Remove SentencePieceTrainer from keras_nlp/models/f_net * Remove SentencePieceTrainer from keras_nlp/models/t5 * Remove SentencePieceTrainer from keras_nlp/models/xlm_roberta * Remove the .absolute() calls * Make the bad sentencepiece proto common between all the tests * Factor missing instances out. * Address review comments - Use one proto per model; modify tests accordingly - Add a comment saying where the test proto file was generated from - Rename the files from `*_sentencepiece.proto` to `*_test_vocab.spm` - Rename the bad proto file to `no_special_token_vocab.spm` - Add a method to get the test dir - Remove the underscores from the sentencepiece util file - Save the file in `train_sentencepiece` function itself - Address the XLM Roberta test failure * create_bad_proto.py -> create_no_special_token_proto.py * Update the SentencePieceTokenizer test proto file * Use os.path.join and resolve XLMRoberta failures * Fix T5 Tokenizer test failures * Fix a merge artifact --- .../models/albert/albert_classifier_test.py | 29 +++------- .../albert_masked_lm_preprocessor_test.py | 25 +++------ .../models/albert/albert_masked_lm_test.py | 28 +++------- .../models/albert/albert_preprocessor_test.py | 25 +++------ .../models/albert/albert_tokenizer_test.py | 43 +++++---------- .../deberta_v3/deberta_v3_classifier_test.py | 27 +++------- .../deberta_v3_masked_lm_preprocessor_test.py | 25 +++------ .../deberta_v3/deberta_v3_masked_lm_test.py | 27 +++------- .../deberta_v3_preprocessor_test.py | 25 +++------ .../deberta_v3/deberta_v3_tokenizer_test.py | 51 ++++++------------ .../models/f_net/f_net_classifier_test.py | 27 +++------- .../f_net_masked_lm_preprocessor_test.py | 23 ++------ .../models/f_net/f_net_masked_lm_test.py | 27 +++------- .../models/f_net/f_net_preprocessor_test.py | 23 ++------ .../models/f_net/f_net_tokenizer_test.py | 43 +++++---------- keras_nlp/models/t5/t5_tokenizer_test.py | 43 ++++----------- .../xlm_roberta_classifier_test.py | 21 +++----- ...xlm_roberta_masked_lm_preprocessor_test.py | 26 ++++----- .../xlm_roberta/xlm_roberta_masked_lm_test.py | 24 +++------ .../xlm_roberta_preprocessor_test.py | 25 +++------ .../xlm_roberta/xlm_roberta_tokenizer_test.py | 24 +++------ keras_nlp/tests/test_case.py | 4 ++ .../tests/test_data/albert_test_vocab.spm | Bin 0 -> 237831 bytes .../tests/test_data/deberta_v3_test_vocab.spm | Bin 0 -> 237831 bytes .../tests/test_data/f_net_test_vocab.spm | Bin 0 -> 237831 bytes .../test_data/no_special_token_vocab.spm | Bin 0 -> 237677 bytes keras_nlp/tests/test_data/t5_test_vocab.spm | Bin 0 -> 237814 bytes .../tests/test_data/tokenizer_test_vocab.spm | Bin 0 -> 237695 bytes .../test_data/xlm_roberta_test_vocab.spm | Bin 0 -> 237831 bytes .../sentence_piece_tokenizer_test.py | 26 +++------ tools/sentencepiece_testing/__init__.py | 13 +++++ .../create_albert_test_proto.py | 37 +++++++++++++ .../create_deberta_v3_test_proto.py | 37 +++++++++++++ .../create_f_net_test_proto.py | 37 +++++++++++++ .../create_no_special_token_proto.py | 30 +++++++++++ .../create_sentence_piece_tokenizer_proto.py | 28 ++++++++++ .../create_t5_test_proto.py | 36 +++++++++++++ .../create_xlm_roberta_test_proto.py | 37 +++++++++++++ tools/sentencepiece_testing/utils.py | 33 ++++++++++++ 39 files changed, 467 insertions(+), 462 deletions(-) create mode 100644 keras_nlp/tests/test_data/albert_test_vocab.spm create mode 100644 keras_nlp/tests/test_data/deberta_v3_test_vocab.spm create mode 100644 keras_nlp/tests/test_data/f_net_test_vocab.spm create mode 100644 keras_nlp/tests/test_data/no_special_token_vocab.spm create mode 100644 keras_nlp/tests/test_data/t5_test_vocab.spm create mode 100644 keras_nlp/tests/test_data/tokenizer_test_vocab.spm create mode 100644 keras_nlp/tests/test_data/xlm_roberta_test_vocab.spm create mode 100644 tools/sentencepiece_testing/__init__.py create mode 100644 tools/sentencepiece_testing/create_albert_test_proto.py create mode 100644 tools/sentencepiece_testing/create_deberta_v3_test_proto.py create mode 100644 tools/sentencepiece_testing/create_f_net_test_proto.py create mode 100644 tools/sentencepiece_testing/create_no_special_token_proto.py create mode 100644 tools/sentencepiece_testing/create_sentence_piece_tokenizer_proto.py create mode 100644 tools/sentencepiece_testing/create_t5_test_proto.py create mode 100644 tools/sentencepiece_testing/create_xlm_roberta_test_proto.py create mode 100644 tools/sentencepiece_testing/utils.py diff --git a/keras_nlp/models/albert/albert_classifier_test.py b/keras_nlp/models/albert/albert_classifier_test.py index e2581df6a1..ebf8a630eb 100644 --- a/keras_nlp/models/albert/albert_classifier_test.py +++ b/keras_nlp/models/albert/albert_classifier_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.albert.albert_backbone import AlbertBackbone from keras_nlp.models.albert.albert_classifier import AlbertClassifier @@ -27,26 +26,14 @@ class AlbertClassifierTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", - ) self.preprocessor = AlbertPreprocessor( - AlbertTokenizer(proto=bytes_io.getvalue()), - sequence_length=5, + AlbertTokenizer( + # Generated using create_albert_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "albert_test_vocab.spm" + ), + sequence_length=5, + ) ) self.backbone = AlbertBackbone( vocabulary_size=self.preprocessor.tokenizer.vocabulary_size(), diff --git a/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py b/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py index 36eef72f39..79d3a36bbb 100644 --- a/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/albert/albert_masked_lm_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.albert.albert_masked_lm_preprocessor import ( AlbertMaskedLMPreprocessor, @@ -26,24 +25,12 @@ class AlbertMaskedLMPreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", + self.tokenizer = AlbertTokenizer( + # Generated using create_albert_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "albert_test_vocab.spm" + ) ) - self.tokenizer = AlbertTokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. diff --git a/keras_nlp/models/albert/albert_masked_lm_test.py b/keras_nlp/models/albert/albert_masked_lm_test.py index 456b0edda4..f992ed2b37 100644 --- a/keras_nlp/models/albert/albert_masked_lm_test.py +++ b/keras_nlp/models/albert/albert_masked_lm_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.albert.albert_backbone import AlbertBackbone from keras_nlp.models.albert.albert_masked_lm import AlbertMaskedLM @@ -29,25 +28,14 @@ class AlbertMaskedLMTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", - ) self.preprocessor = AlbertMaskedLMPreprocessor( - AlbertTokenizer(proto=bytes_io.getvalue()), + AlbertTokenizer( + # Generated using create_albert_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "albert_test_vocab.spm" + ), + sequence_length=5, + ), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, diff --git a/keras_nlp/models/albert/albert_preprocessor_test.py b/keras_nlp/models/albert/albert_preprocessor_test.py index 95cb2c832e..7d6fb4cfd4 100644 --- a/keras_nlp/models/albert/albert_preprocessor_test.py +++ b/keras_nlp/models/albert/albert_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.albert.albert_preprocessor import AlbertPreprocessor from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer @@ -24,24 +23,12 @@ class AlbertPreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", + self.tokenizer = AlbertTokenizer( + # Generated using create_albert_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "albert_test_vocab.spm" + ) ) - self.tokenizer = AlbertTokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, diff --git a/keras_nlp/models/albert/albert_tokenizer_test.py b/keras_nlp/models/albert/albert_tokenizer_test.py index e645436c09..ca80ace281 100644 --- a/keras_nlp/models/albert/albert_tokenizer_test.py +++ b/keras_nlp/models/albert/albert_tokenizer_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.albert.albert_tokenizer import AlbertTokenizer from keras_nlp.tests.test_case import TestCase @@ -23,24 +22,12 @@ class AlbertTokenizerTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", - ) - self.init_kwargs = {"proto": bytes_io.getvalue()} + self.init_kwargs = { + # Generated using create_albert_test_proto.py + "proto": os.path.join( + self.get_test_data_dir(), "albert_test_vocab.spm" + ) + } self.input_data = ["the quick brown fox.", "the earth is round."] def test_tokenizer_basics(self): @@ -52,17 +39,13 @@ def test_tokenizer_basics(self): ) def test_errors_missing_special_tokens(self): - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(["abc"]), - model_writer=bytes_io, - vocab_size=5, - pad_id=-1, - eos_id=-1, - bos_id=-1, - ) with self.assertRaises(ValueError): - AlbertTokenizer(proto=bytes_io.getvalue()) + AlbertTokenizer( + # Generated using create_no_special_token_proto.py + proto=os.path.join( + self.get_test_data_dir(), "no_special_token_vocab.spm" + ) + ) @pytest.mark.large def test_smallest_preset(self): diff --git a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py index 046c18dd5e..0e0ab7642d 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone from keras_nlp.models.deberta_v3.deberta_v3_classifier import ( @@ -31,25 +30,13 @@ class DebertaV3ClassifierTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, - pad_piece="[PAD]", - bos_piece="[CLS]", - eos_piece="[SEP]", - unk_piece="[UNK]", - user_defined_symbols="[MASK]", - ) self.preprocessor = DebertaV3Preprocessor( - DebertaV3Tokenizer(proto=bytes_io.getvalue()), + DebertaV3Tokenizer( + # Generated using create_deberta_v3_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "deberta_v3_test_vocab.spm" + ) + ), sequence_length=5, ) self.backbone = DebertaV3Backbone( diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py index faf1ee1a8f..217980ea59 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.deberta_v3.deberta_v3_masked_lm_preprocessor import ( DebertaV3MaskedLMPreprocessor, @@ -26,24 +25,12 @@ class DebertaV3MaskedLMPreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, - pad_piece="[PAD]", - bos_piece="[CLS]", - eos_piece="[SEP]", - unk_piece="[UNK]", - user_defined_symbols="[MASK]", + self.tokenizer = DebertaV3Tokenizer( + # Generated using create_deberta_v3_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "deberta_v3_test_vocab.spm" + ) ) - self.tokenizer = DebertaV3Tokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py index 62f84b508c..32bf71de13 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.deberta_v3.deberta_v3_backbone import DebertaV3Backbone from keras_nlp.models.deberta_v3.deberta_v3_masked_lm import DebertaV3MaskedLM @@ -29,25 +28,13 @@ class DebertaV3MaskedLMTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, - pad_piece="[PAD]", - bos_piece="[CLS]", - eos_piece="[SEP]", - unk_piece="[UNK]", - user_defined_symbols="[MASK]", - ) self.preprocessor = DebertaV3MaskedLMPreprocessor( - DebertaV3Tokenizer(proto=bytes_io.getvalue()), + DebertaV3Tokenizer( + # Generated using create_deberta_v3_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "deberta_v3_test_vocab.spm" + ) + ), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, diff --git a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py index f6f648ab83..a50022f3c7 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.deberta_v3.deberta_v3_preprocessor import ( DebertaV3Preprocessor, @@ -26,24 +25,12 @@ class DebertaV3PreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, - pad_piece="[PAD]", - bos_piece="[CLS]", - eos_piece="[SEP]", - unk_piece="[UNK]", - user_defined_symbols="[MASK]", + self.tokenizer = DebertaV3Tokenizer( + # Generated using create_deberta_v3_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "deberta_v3_test_vocab.spm" + ) ) - self.tokenizer = DebertaV3Tokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, diff --git a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py index c542de786d..fcaf637974 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.deberta_v3.deberta_v3_tokenizer import DebertaV3Tokenizer from keras_nlp.tests.test_case import TestCase @@ -23,24 +22,12 @@ class DebertaV3TokenizerTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=11, - model_type="WORD", - pad_id=0, - bos_id=1, - eos_id=2, - unk_id=3, - pad_piece="[PAD]", - bos_piece="[CLS]", - eos_piece="[SEP]", - unk_piece="[UNK]", + # Generated using create_deberta_v3_test_proto.py + proto = os.path.join( + self.get_test_data_dir(), "deberta_v3_test_vocab.spm" ) - self.tokenizer = DebertaV3Tokenizer(proto=bytes_io.getvalue()) - self.init_kwargs = {"proto": bytes_io.getvalue()} + self.tokenizer = DebertaV3Tokenizer(proto=proto) + self.init_kwargs = {"proto": proto} self.input_data = ["the quick brown fox.", "the earth is round."] def test_tokenizer_basics(self): @@ -48,28 +35,24 @@ def test_tokenizer_basics(self): cls=DebertaV3Tokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[4, 9, 5, 3], [4, 6, 8, 3]], + expected_output=[[5, 10, 6, 3], [5, 7, 9, 3]], ) def test_errors_missing_special_tokens(self): - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(["abc"]), - model_writer=bytes_io, - vocab_size=5, - pad_id=-1, - eos_id=-1, - bos_id=-1, - ) with self.assertRaises(ValueError): - DebertaV3Tokenizer(proto=bytes_io.getvalue()) + DebertaV3Tokenizer( + # Generated using create_no_special_token_proto.py + proto=os.path.join( + self.get_test_data_dir(), "no_special_token_vocab.spm" + ) + ) def test_mask_token_handling(self): tokenizer = DebertaV3Tokenizer(**self.init_kwargs) - self.assertEqual(tokenizer.get_vocabulary()[11], "[MASK]") - self.assertEqual(tokenizer.id_to_token(11), "[MASK]") - self.assertEqual(tokenizer.token_to_id("[MASK]"), 11) - input_data = [[4, 9, 5, 7, self.tokenizer.mask_token_id]] + self.assertEqual(tokenizer.get_vocabulary()[4], "[MASK]") + self.assertEqual(tokenizer.id_to_token(4), "[MASK]") + self.assertEqual(tokenizer.token_to_id("[MASK]"), 4) + input_data = [[5, 10, 6, 8, self.tokenizer.mask_token_id]] output = tokenizer.detokenize(input_data) self.assertEqual(output, ["the quick brown fox"]) diff --git a/keras_nlp/models/f_net/f_net_classifier_test.py b/keras_nlp/models/f_net/f_net_classifier_test.py index b972f64655..c871fbcc7b 100644 --- a/keras_nlp/models/f_net/f_net_classifier_test.py +++ b/keras_nlp/models/f_net/f_net_classifier_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.models.f_net.f_net_classifier import FNetClassifier @@ -27,25 +26,13 @@ class FNetClassifierTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", - ) self.preprocessor = FNetPreprocessor( - FNetTokenizer(proto=bytes_io.getvalue()), + FNetTokenizer( + # Generated using create_f_net_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "f_net_test_vocab.spm" + ) + ), sequence_length=5, ) self.backbone = FNetBackbone( diff --git a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py index eb7036005a..5f72081a0d 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.f_net.f_net_masked_lm_preprocessor import ( FNetMaskedLMPreprocessor, @@ -26,24 +25,10 @@ class FNetMaskedLMPreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", + self.tokenizer = FNetTokenizer( + # Generated using create_f_net_test_proto.py + proto=os.path.join(self.get_test_data_dir(), "f_net_test_vocab.spm") ) - self.tokenizer = FNetTokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. diff --git a/keras_nlp/models/f_net/f_net_masked_lm_test.py b/keras_nlp/models/f_net/f_net_masked_lm_test.py index dc8bb8e9b3..b4931a76fc 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_test.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.models.f_net.f_net_masked_lm import FNetMaskedLM @@ -29,25 +28,13 @@ class FNetMaskedLMTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", - ) self.preprocessor = FNetMaskedLMPreprocessor( - FNetTokenizer(proto=bytes_io.getvalue()), + FNetTokenizer( + # Generated using create_f_net_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "f_net_test_vocab.spm" + ) + ), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, diff --git a/keras_nlp/models/f_net/f_net_preprocessor_test.py b/keras_nlp/models/f_net/f_net_preprocessor_test.py index f5470c700d..f67737c828 100644 --- a/keras_nlp/models/f_net/f_net_preprocessor_test.py +++ b/keras_nlp/models/f_net/f_net_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.f_net.f_net_preprocessor import FNetPreprocessor from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer @@ -24,24 +23,10 @@ class FNetPreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", + self.tokenizer = FNetTokenizer( + # Generated using create_f_net_test_proto.py + proto=os.path.join(self.get_test_data_dir(), "f_net_test_vocab.spm") ) - self.tokenizer = FNetTokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, diff --git a/keras_nlp/models/f_net/f_net_tokenizer_test.py b/keras_nlp/models/f_net/f_net_tokenizer_test.py index 80b7f9e037..8d3511dee7 100644 --- a/keras_nlp/models/f_net/f_net_tokenizer_test.py +++ b/keras_nlp/models/f_net/f_net_tokenizer_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.f_net.f_net_tokenizer import FNetTokenizer from keras_nlp.tests.test_case import TestCase @@ -23,24 +22,12 @@ class FNetTokenizerTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", - ) - self.init_kwargs = {"proto": bytes_io.getvalue()} + self.init_kwargs = { + # Generated using create_f_net_test_proto.py + "proto": os.path.join( + self.get_test_data_dir(), "f_net_test_vocab.spm" + ) + } self.input_data = ["the quick brown fox.", "the earth is round."] def test_tokenizer_basics(self): @@ -52,17 +39,13 @@ def test_tokenizer_basics(self): ) def test_errors_missing_special_tokens(self): - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(["abc"]), - model_writer=bytes_io, - vocab_size=5, - pad_id=-1, - eos_id=-1, - bos_id=-1, - ) with self.assertRaises(ValueError): - FNetTokenizer(proto=bytes_io.getvalue()) + FNetTokenizer( + # Generated using create_no_special_token_proto.py + proto=os.path.join( + self.get_test_data_dir(), "no_special_token_vocab.spm" + ) + ) @pytest.mark.large def test_smallest_preset(self): diff --git a/keras_nlp/models/t5/t5_tokenizer_test.py b/keras_nlp/models/t5/t5_tokenizer_test.py index 9f6f4e9e8f..be07b486e4 100644 --- a/keras_nlp/models/t5/t5_tokenizer_test.py +++ b/keras_nlp/models/t5/t5_tokenizer_test.py @@ -12,11 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece -import tensorflow as tf from keras_nlp.models.t5.t5_tokenizer import T5Tokenizer from keras_nlp.tests.test_case import TestCase @@ -24,25 +22,10 @@ class T5TokenizerTest(TestCase): def setUp(self): - bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox", "the earth is round"] - ) - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), - model_writer=bytes_io, - vocab_size=11, - model_type="WORD", - bos_id=-1, - pad_id=0, - eos_id=1, - unk_id=2, - pad_piece="", - eos_piece="", - unk_piece="", - user_defined_symbols="[MASK]", - ) - self.init_kwargs = {"proto": bytes_io.getvalue()} + self.init_kwargs = { + # Generated using create_t5_test_proto.py + "proto": os.path.join(self.get_test_data_dir(), "t5_test_vocab.spm") + } self.input_data = ["the quick brown fox.", "the earth is round."] def test_tokenizer_basics(self): @@ -54,17 +37,13 @@ def test_tokenizer_basics(self): ) def test_errors_missing_special_tokens(self): - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(["abc"]), - model_writer=bytes_io, - vocab_size=5, - pad_id=-1, - eos_id=-1, - bos_id=-1, - ) with self.assertRaises(ValueError): - T5Tokenizer(proto=bytes_io.getvalue()) + T5Tokenizer( + # Generated using create_no_special_token_proto.py + proto=os.path.join( + self.get_test_data_dir(), "no_special_token_vocab.spm" + ) + ) @pytest.mark.large def test_smallest_preset(self): diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py index c123cc6bc0..8255a40cf5 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone from keras_nlp.models.xlm_roberta.xlm_roberta_classifier import ( @@ -33,19 +32,13 @@ class XLMRobertaClassifierTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=10, - model_type="WORD", - unk_id=0, - bos_id=1, - eos_id=2, - ) self.preprocessor = XLMRobertaPreprocessor( - XLMRobertaTokenizer(proto=bytes_io.getvalue()), + XLMRobertaTokenizer( + # Generated using create_xlm_roberta_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "xlm_roberta_test_vocab.spm" + ) + ), sequence_length=5, ) self.backbone = XLMRobertaBackbone( diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py index 6dd0bc0f71..c1bfc7242a 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.xlm_roberta.xlm_roberta_masked_lm_preprocessor import ( XLMRobertaMaskedLMPreprocessor, @@ -28,19 +27,12 @@ class XLMRobertaMaskedLMPreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=11, - model_type="WORD", - unk_id=0, - bos_id=1, - eos_id=2, - user_defined_symbols="[MASK]", + self.tokenizer = XLMRobertaTokenizer( + # Generated using create_xlm_roberta_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "xlm_roberta_test_vocab.spm" + ) ) - self.tokenizer = XLMRobertaTokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, # Simplify our testing by masking every available token. @@ -59,11 +51,11 @@ def test_preprocessor_basics(self): input_data=self.input_data, expected_output=( { - "token_ids": [[0, 12, 12, 12, 12, 2, 1, 1, 1, 1, 1, 1]], + "token_ids": [[0, 13, 13, 13, 13, 2, 1, 1, 1, 1, 1, 1]], "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], "mask_positions": [[1, 2, 3, 4]], }, - [[5, 10, 6, 8]], + [[6, 11, 7, 9]], [[1.0, 1.0, 1.0, 1.0]], ), ) @@ -80,7 +72,7 @@ def test_no_masking_zero_rate(self): no_mask_preprocessor(input_data), ( { - "token_ids": [[0, 5, 10, 6, 8, 2, 1, 1, 1, 1, 1, 1]], + "token_ids": [[0, 6, 11, 7, 9, 2, 1, 1, 1, 1, 1, 1]], "padding_mask": [[1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]], "mask_positions": [[0, 0, 0, 0]], }, diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py index 81fafbe4dc..bcbafe4ad9 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.xlm_roberta.xlm_roberta_backbone import XLMRobertaBackbone from keras_nlp.models.xlm_roberta.xlm_roberta_masked_lm import ( @@ -33,20 +32,13 @@ class XLMRobertaMaskedLMTest(TestCase): def setUp(self): # Setup model. - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=11, - model_type="WORD", - unk_id=0, - bos_id=1, - eos_id=2, - user_defined_symbols="[MASK]", - ) self.preprocessor = XLMRobertaMaskedLMPreprocessor( - XLMRobertaTokenizer(proto=bytes_io.getvalue()), + XLMRobertaTokenizer( + # Generated using create_xlm_roberta_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "xlm_roberta_test_vocab.spm" + ) + ), # Simplify our testing by masking every available token. mask_selection_rate=1.0, mask_token_rate=1.0, @@ -76,7 +68,7 @@ def test_masked_lm_basics(self): cls=XLMRobertaMaskedLM, init_kwargs=self.init_kwargs, train_data=self.train_data, - expected_output_shape=(2, 5, 13), + expected_output_shape=(2, 5, 14), ) @pytest.mark.large diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py index 38eb4882f3..3c3bbf2612 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.xlm_roberta.xlm_roberta_preprocessor import ( XLMRobertaPreprocessor, @@ -28,24 +27,12 @@ class XLMRobertaPreprocessorTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=12, - model_type="WORD", - pad_id=0, - unk_id=1, - bos_id=2, - eos_id=3, - pad_piece="", - unk_piece="", - bos_piece="[CLS]", - eos_piece="[SEP]", - user_defined_symbols="[MASK]", + self.tokenizer = XLMRobertaTokenizer( + # Generated using create_xlm_roberta_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "xlm_roberta_test_vocab.spm" + ) ) - self.tokenizer = XLMRobertaTokenizer(proto=bytes_io.getvalue()) self.init_kwargs = { "tokenizer": self.tokenizer, "sequence_length": 8, diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py index a58ee4c74b..9ec205c725 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py @@ -12,10 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io +import os import pytest -import sentencepiece from keras_nlp.models.xlm_roberta.xlm_roberta_tokenizer import ( XLMRobertaTokenizer, @@ -25,19 +24,12 @@ class XLMRobertaTokenizerTest(TestCase): def setUp(self): - vocab_data = ["the quick brown fox", "the earth is round"] - bytes_io = io.BytesIO() - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=iter(vocab_data), - model_writer=bytes_io, - vocab_size=11, - model_type="WORD", - unk_id=0, - bos_id=1, - eos_id=2, - user_defined_symbols="[MASK]", - ) - self.init_kwargs = {"proto": bytes_io.getvalue()} + self.init_kwargs = { + # Generated using create_xlm_roberta_test_proto.py + "proto": os.path.join( + self.get_test_data_dir(), "xlm_roberta_test_vocab.spm" + ) + } self.input_data = ["the quick brown fox.", "the earth is round."] def test_tokenizer_basics(self): @@ -45,7 +37,7 @@ def test_tokenizer_basics(self): cls=XLMRobertaTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[5, 10, 6, 3], [5, 7, 9, 3]], + expected_output=[[6, 11, 7, 2], [6, 8, 10, 2]], ) @pytest.mark.large diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py index 2025f3ad13..ec29b2add6 100644 --- a/keras_nlp/tests/test_case.py +++ b/keras_nlp/tests/test_case.py @@ -14,6 +14,7 @@ import json import os +import pathlib import re import tensorflow as tf @@ -417,3 +418,6 @@ def compare(actual, expected): self.assertAllClose(actual, expected, atol=0.01, rtol=0.01) tree.map_structure(compare, output, expected_partial_output) + + def get_test_data_dir(self): + return str(pathlib.Path(__file__).parent / "test_data") diff --git a/keras_nlp/tests/test_data/albert_test_vocab.spm b/keras_nlp/tests/test_data/albert_test_vocab.spm new file mode 100644 index 0000000000000000000000000000000000000000..8520ca4919b4344d5643f1e99b2c03993b901e80 GIT binary patch literal 237831 zcmZU+4Oo%9fPN_rC2p#yu9z4ZHXKe(1dX-P?12-{aSN^MCc~n8#0+KB1ct_aB-^ zx%fz{`9!PzRIB@3EB~f_Sd{;zeOQG5K&q@4>mC;QNz+FOkB$4)p8Z>Y^*6cOe{wI7 zAl+IKqpAtfYasZ&R(hgEj{E`om+0vt2Bm-f(7#&g%SA%{BEKCk`X=}gN>aFa3SnVY zis+X?7OV&T0{o$ca23e(mvpjxtxi@wCi;wBI?y?jB65^U>AAJA`;BX_1gaq_Sr$r{76qy_n`N>(4qydq~FNh=$iTrd>WXd(# zIV>{umdKMgL{dgXrrj1v9Tk~=Ph{JY@sdkEw?CI4fBnrk+3{4OysA%-ou4Pl-+YrO zyI+r!UC+cx?l*Drx1Yz!p0)Av+O&AtJMky-tJmUX9p!jEZnEraoh-lpda~p{HBt7z zFi{FVog{zv_(VDI=}+a2Zzsxj()i7{Kb3>XeNa@Wk%~CcuheTLJVErUo)G!7k3?3l z6?y&@ku`6}$j^-$S@%?otbI)*>zBmJpMMi8nXkvn3(v>OhHs+eFM?6hPQAYPbhKoB z@R(V|G#8SOlHFP< zUab|wBCY7bv_~gp**Y;SrCoMtOD9$1MXcQWH2#Ot{}@@ZKquAc z!Oumus5s%&lcL{1-RO4^S3Gg1i68aufF+{O=%jQ$;@k?xVY@qu>Wo z5+Yo1oJM+aTghV?_k?AxTzzx4441Ff{!82vtr>q)wMM*1C+G(?)D3BTjDtRW+~<0Guao5;^*@ms(yJugIr`B*UHvKd{#2Gw z=jZ!p$Uh&SA{T-`lTSZSlZ#*dOu9B|B!g=$L$Pv-`~G|X&t&kE+4A{ke;}8?n=K1f z-fqQ8De(ori;`Km=Mv6zHb$J}qni8rk#q6iN0?>EO0HL_w!Rl52}u1wj2KSFjMYad zDq4y+QpWsP(cAIwjghiNv0|DNE5?-~)x^<)-K`>?@*A;I5sH;M{HjjI%3;!KK)*#_ zYRQR_PWq%7cN>^j#>jf^*#dgf%tCGldtQv>BL9Ir2EU_UJgt+{=w@vE6XfAc5f}Hj z&`-R`uJjl&5pP+oNNWmtst`Gee{maaGazEu#7YQvHGQ-Am`2VKM<_0uJ`ybk{7j`9 zDfemQAZebX9=gcuVfx1N$& zUkT+qOh35Ey~A*UcnpPv$-|yz5JxBWU#OGM@HbD=$sOv`Fi#_0qs$kKDfX2*xrv{d zF-bQaTSmV_JT18O)Po^Kqzk=#o=81qXsN}Ixb>u8f;5ldCQo*Q$bIhX8n2TI>cN;! z`q-h7_EK|}e!Lp%M0C@rpHE{Y$T&Lzy}E*MFT_YFivIpOazFC9SgD_vD8W|xjxAQo zXiHOeBK-Z$yfQFM5*B3pKy;y-Ipfv zqG|`~>o1X0Deq*M3$tJ$OaT2H$^;L%XEJ#-9$;=m8ZzlSO^iSIn@7pt6554%c9yzE zeT3fB(wE|8HrHP!{?@aJVp=NlKdAePrbLOszj~5L6W50DJC6IW(e=HF*ma^@Cf_AT z66GHLb5*+ZvxUf+xR)X4Aq@@0Ig|Qusrn)PuSl~yj&V0mnnSedX^||#Rv8lIa7Cgh zyIshAtMFTa>^jT6#JdIedN9#W+mZXp$5O7FqshxX+N4z{xhf2Phu6{0x3MqWzaiXe z{8T?E!F>>dHu~8oB3CJsX}8EQ(%{1eskeVse!Uvm$UW7ThyBUSJ)3ZslMgF;eYHpq z?i++vwm*P=7EZx=C|ONh#9;uvMI)ua)%b(Q5#kmS89O#+LO^64Hqie=nSZU_avo^K=E{L2#Pt%d8Jh z)27%>a5?Nke_ksknIh*1ZyKcS=8-mKG|-QVNh_Z;FLUi8468WlljYP$dH7)-)fyvc zB%T*FVqUEi<5I>n!rf6}zo6aLYGoh!|C+i#NL-cZcIrTt@jhV;_?a)z*7zTUV0n}n z@`)Ra>Y9uAg?ay%TG7#e-@?y`oxF{lfO|3oKTVL})09V|CG>K%L`ds@LGZl<<~i;U z(wCPLhIK>eIP$YZ3BB>qFSLX(*JyVrfnvDCy+)*31FRwb3JY_{AoX`PR#xEN1RGSk z&uZj5!s>^puNxviSL5Xfr|K|P_7ZL+)z3*bxgukQb!NPP}rchGM% zAszKZuyC#(aCHLOplrxsOyWI)MDtPkuO@t_A^BtNY2 z478j6EOm)geHO2k(J2K&5RZJD?2)av?pog9sCy) zUm1SogrAK*A6jS|eLU&W2TTj7&vmrfl!tTk3a(G$zN3VcXaa+%Jev6Q!niSEv4)g317T}b|IWf!`yF+krwXxN`;vt;z4(S`4-`y zBRp%N0PZtj7*GF4j`@G8l~%@pWb&obP~&Ov71Dh*M!qGkRJcUg>Ny%R&%L2kxq|yY z@Q)y`t9U*pJb5)x=lVgBKj2y#sJW+PHGLMT=Ag11johWKjK~Nw#GW$v4gEzkO)3s( zB$jcqDw{cGn?@dR-7qv=3`wcdNE(KmR5@%&71PR8%1^uPVs6Mx6@w*Jo+7Q1Hm(!) z@P+9z7q`ik%G|{DV?aHX_;8n^SER~a;#2Fa-%{oZu0O!7=G5_&FP5=eA5VLYvX4hM z52lLwz;rRwc8OfKFG`h`ZPTTN^giT%BVpT-X8iw%bnV1%%%iPmrb<`(bm=0^_qcwY z@+5J89|XH;@6KtGg?|_2*^F$&Kb7()!+e+x`d-Qp#WSf#)@(-VWikF`71$Deq8uBp zxS^3N#9cW`yb#mU#6VcRq6rpISd{!v9au*C78)`ElP?^bKIT&3b+i)VjVJyTz}7_z&O1=2_pH zdhthJ&V9jGIVZsG4-z zE9`|5+Ce{)Ym{FP2J{v9SE+g+Uk2vaUz4U`l=!IkM*Qo*1Nxa9dqXwHIg zw{D|;;;|p}PZ-vwvtF_9Ch$D!HyxYKp^aIKEBo)^J{y?O%dzbWmqy<5X{2gSlJpbb z4AuT`V85SpPC$Q9Yp(0$rFfM4r-=L)uAAsLvsAcev5RM-$L}Xy2`oY|7Bh zdcie^{xM!7(Zv04F{1x>I1#N8XSzoEiNiN4;vZx$KpZ_9&cC1!mXpRRD2o$uHt6I; zg-+arQTu_{@ZSqbr12&3U56!Ht0=|RzKoSXlTOBw-v3bLp;AL%#?g1SA3FrdrMb4LXS;@3HV0y!SNcwzNkx`?Jtt`hIJSlVx0tBo?^{ zJ|nHJT9LWj>qMS_W#}33b65$_LOx-e$!{96p8U_6$vIoLPS$ZP6@4n~oIzc@CUSH~ zyu2e?IZit_STyo}1>-clcT6M4kii+uyK4Xc9P(u(l@{VGHrUTOno3B+o-{sD_7Yzo z_moD96rgVfe<^)q4rc*riIR)EcxIxM&JpP*ol?e%efT-q2cIZSl&-TH>E!Id!`|3U z-*G`V7hN=e=%*EM;&Fr9c*qd@;>r6$h*jHuGhrVKhPh5aUf1g zZzRZ38pkx0AYS~A@f_h4@*H?z73l_$jr%zZLsmaQ!bjLE84_6kCrIsK`b0&7)IHCf zLOrzg2!})1`7PE=gx^5$o5<45iBeL*nFNh$Se?MWCP588A z4P!~CMt(t^ZRgrnXj3}z<$|8E+dx|T&<`p%>8o?IV*1EV+#%-Py~qM6f^ydQR^k}j zpY^l;WnYj@d}XWy%v{%F!-i4nS;b2}4eQ7&HXr(eIzU+zEk_O&@TGWQR0|5 zp7Ye7O7Nuw_WAUS$Kx62AESS8j>X0(cr_VtFPR{rZ^w=GuPx+3zmYi=uFoJ}Z_|Hh zs|b4VbK?1$HM#N^{7Ey!bEObx3ZY*T|1Q#gjrAqhU&Ypr9%F3zXqo7-cf%lkfxL!Z zCEOc?dlNr9eHlu3(1s1@8}WmqVa6ra`f6;bIn6$v`)?9w`OJs?@s3KLHJExv6Y3WF zJ^5wL&`8~sj^{iYd7m&k7={Tj8Ky!a#DbGLOGc(bum^i#?V%=Ab>^b_|1A7fT&47b zSi(9~y)gG1nBY{o)xJ0p|I*EzwIUx7o=O=jXN|IV+QxW;Gb;45YwS2rXGI!Cx*H!-eHi(~u#-wS-l5$k?96^`pHR@?TZCo45~(q5Q~l*}^q7$Bm6+ zGf8V;h163v{VnR@Hs>)Gk@dKZZIqR@v?^N`?(%yXQhIxZY(d`urKeZ0F5~({_*TsW zv@?An)XzN6S%2`Vn(wifT}YnAs=a#X4g5Lt4{`P%{0jLA>)*^3QZh=r@$A%aLo1tz zdn@Rp)1|bKeF6DhNd2j2e)E~a49w|z<_J}P#UUhhRIKJ0`k8(+FxRX0uOyDK{IkC( zrd;YwaP0aV^gQYpTUO(Ny1$07YD`yss|vji3P^YC8O@jJl!Ni+EAELSe5zEMnH zDY>>n5(%3Osj!%P=8(sO$W_WszSkiC7(YGbG_0hoxc741gv>?i)9|0OLTakHRw(i` zVUJ?}!Ov++&VBX~rY4)bseVfSytHWvekzQUIL5~PriXb~{n$wRdGS~6cnVog*^4i5 z?;v49kcLhjD0@A}bq@sK4AgYd|B2%~`k0+1JqkaM@aZB&D!;0q5TBa=N(|_qrpvM! z*zXKwzl@cv>+1-&h;lR{D>w%kR`C&snpeikv4>|`DoyUICe63$Yc)g5T#u+QH{@YDq^H84Bivo( zPyJm)fAA=t%jjJvqotGi^c4CDq-!u*x)@`f@SR)jZPT6kW zUEo?NbH3`|87kaY)a&vUQcn5Q{8NQrIrXs;SqQ5C)+5#U)`TnzW5bj`Q(Yq;ElOge z<+NG(9QqaGPnPmSs(jkfOXsmC`GEYqMt%m!59Ke`u*P7`5`2}s1hw+#kKzl)k)Ltc z>LlVi&h<|2>t_7<82J%W|FB$6!mDQ!!S@&^Uc`R}{wj?)HEzHg5JkB*i<}$@@)jxpgdvgP?`*@dj650`_QN6OQvuN~a?I_26=de0!)m;N>3-T^fa?LY=OYv;8V z(k4HQMP=Nl_Gt#J1IOd{qZ01AsJWzVO@?T5**vMa^cS7Aq z(ef^HO6?QGPy5%r#GaTm%gNK*-z}Fi>gTtu(PCUJ@+)=!XIgo`o3J?|@7bcI^*(GkzSL|7*MTzSv{MpAj zk(CEXzmR9^`=g|czUKds{YE~1DLe<{p1v1oclHWB-_u80iHmyt2mIBuDU(_QF40OZ z?qHNwD#=q7*9&l0O(CC*6Td+}!rr}{IN#$r_OYjRQj6dF%9dWxQXjmeZi6bC!(y&M5_%@Q`dKdl4`Jz^82|rE6$#{5` zdU4Y4)%^Mtem`7)8hr-*3{;wBhe;nBQfbv<_wQnJ&v32#E9#XvReqPKu%BtA`&;s) z^74UJo_*wZjQc%Y<9tq*~j=aGbD^arPPkA;ETVXrwga+y=a(I^< zZ_-FE?!EB4W9-q91#l3GprwK}3ep5^>sX^BZEdXakTp;T&NTWivJuSSVZY~I%K6a( z=1$yJ=)K1}U=r)~DV$&6cB*jT1$`%TPdW1udI|HC@t2I1FGtBK+(w><>xY<6=dnIv z9P-n*&f$L^OxRM{@hEXJ_g=((3H+4TKP_6^tO;C$I=PIWmpQoaHqRDcx~MT;<=l`p zil1vf;_U}N9p8fOHplDa3gL}(#9`!hFms=cdE5>igmFy4{oz!t{fj5H?WAi> zh=1*qa+C0WPJp|wKB;wmmZEh(GhG{@%_8`RKj6G`$&=b)#;iW!JoMFRTK~7xv;mo@ zt&X~=dHc)%t-1O<&sWD~XzvjIF5HI);I~Z^9oz9(m;i||8IoZt7&`Y*iJZ?(`-K#L zULd72-WTKBeld+ZkB$7RRJ^uWQ-1YnP304ZG;e)AS##v7MRWL@ztU8Fe^c}KFI8ze z2a=^rvrA4K$>j_-SGw2bimP_7q!Rbl*EvH*&W5=#AN1IfI!{vexe)gGWNBRxBW=*0 z86(!!F=9j7*|T>Xi4jLB=lOc<{0W_`Ae~jP2G+v{$bufu@q6=QrLQnnyvTmsKF$^V zJR1l=XIl)<=VIimw@9|taN}KY|u&?ZSyqJ zjvZOkIZr~`ad#jc*tLm!DxnH$pbm z(KX8Wa+0|Lx?R|&4;w{#xUZi08o>-s#)B553+V_kM=X%%xS+EJVz*g7}k@sR{Co)&* z<@D(at?WhL2Y&sNQh;>h=YoUi-W{|Z6rqF2W_a3|C+ndak8P@2ynnLwfHK=MCbnCJhK2NtMoQ-Vex`E+>%A(NyUs zoQv>oq-RLIcaSRAiRUIn;0`qVQpEyy(XGcAJBh2ToH_jkY>jev%p{MhE(~m<*D)4A z@n-thvFUQ3dt4RM#l4aDE6{c6*aP&Vn2)i=3EZCulOY+VLMqIH9{N)6o`-$R%b3uQ+lO2Au>k$M)57>;XZ%4reT+Zo zF5-9B()V&0KL%1{Hu21b`LGb0Q&{uR{)^GAyS383O)JaLSAb(P>mdB>V9V3WD*V>K zde{J2unD%nR``AU-;O&M_JWDEd)doTVtgS=_Ter7V^p*Z&)|J)^l&RSgNz(~Qc92} zsDvu0fjX!MXOm7Ek!ENCE4T|;e}DtsyP5UJM)nNo9tbc__`wJLj5VzrwbDkJ+9{7J ztIb8(e3TLCu(KC$WnNSF!6{Jhj-Ns5k?MIx5BA&(edB3g_;Gwj4?yPt&v=J87e+dF zQ~!*|E^woJw(u?kN%g0Lu88J>3g}F&9cW{?| z6(uG%XLr$?3u!;Nk8V|Eql^#GbsUd4M(JB%2ir>W#5{S4Yq4CL0EytPU_3e!Ba_j+ zj1zshlhLO_V1)4qw~uk8ADN2ZESL>*VLmK`#Sk&3$TDP@b>Rx+@b^)?j7GhsJY2u5 z!msrM^2pffh5ixN z1)O2{k%1emKQb9hvl&x!7+aCuxLpevbHM{<-euTIJlkO>#^!Bov%i))Wc+VU zq5NqN*ClysfR`|Lkkr5vL^51+#T6-i~d(i|2sneL-sdd|42XX z0J4+#y5cE6_3FGv|D)YqNH=%}uzyRmSc%gCPVho=Q?yuI(Gozno{g6Fd(l$H!+z5= zY-%R9u@oCwfUR7JmQ(7!+gdq;JO^%`v!6#^1Xq~#HgmZ5fKD#qz6|~aItiq+_QxiD z(3*z*EuxR6V}D57JmRiq{s#wK;htf*4mZKZ`aOcY19ze4n2z-qHf|xEJ;a+&yx6`U z8Nl6nTPI!jc(>ssV*qq_F*f)(2XL|fY}3ho;&=eM71#hwfJB%Kk=7^0&x_gNqrAI^ zdw3f2Z9M%qkNyi&@iUL;Bo#RejO?f9BAc-{%Qc;xxmo{MxH zqkYl$Dt~NyAF_|W;LW6eWYa%#=pV?yI@+Iu*x6h!fP+v3&G=ce6Ql&)YDi%3kRT@X zN^q1WNXM}R+9*M6$g%I7##0~IbQRYlPmGruWF6GQ@bY+RM226A6Em`mvm#SJXVU0a zXeFLD;%PsdAXW}iZ5)u=Pm%^WMiRuxa|;LIoZta31mF~$fu56z(hGgyg?{jX9|9wZ z(#d-VU8@u11P97aWOq)2xG0|+JkYw4^f!|};j9g$PxGus>uVg#BG${(}?Xgzk;lAGpB-^NGWGLn{lBeti_@b6Q!9z6{33 zk4akb!V280z+X+g(67;o57ywf9?a~4Hy|y{*;&X<(0o8ETaa79N;%uXo=F+OK^{Ax zb%62@acv&^tu*Gubmm562W|&)JNNE{T-Xcy;2;!13G|dQ{#G#lHeg@1w0{@v5B>w} zKcJKT&~=UVSD5wJJ;vV~jK4$J=K$lckGcOFY>Rk?A5V}<*5mG>etM~kv3Z_6_oM$fzGhSg`W#`=SDnti-4N<3L$G;&V*Hs1rqQ~o5*LGZI8+o5b6^~v*IM>vCPfX-Xk{|NRE&ePaGw&rqa z#ofibt58oIjbK(>9b3D zF9yF=5R3h-L54ev4Oq<69mxx;LAS5TI3MlsuW5k{#8%|bE$kx0W7~ zc4$O5g9E(-?6rr)25S6qE;#h#_~X7^Bu1V~v=G)cle9xc;=NGxa2)+ioy!$5E*G&* zC}N#ZB(2wqq-~@~+DD7TinQG-68j?3NIxVFq*bK}PVj&i0&oh>K+oVI=?#+(c%lCq zWxGKcj~$W#bY>pnyX-@90-Vr2a!6eF4vCvE9x#)ibHsBVF2W^fb{!JSjv~2?Zmp&K z6^G;s`Y<@iR|n;_QwKJ|4Zrrpj$;>feDZZW9N9tv3=Ym_P>*`|0&r2Oza=}2bhQ|IOGxbZ27!3$TCifS>avb6(-tGYB>RHxM$cC!`2bdepP#>Wj@%Ysd?1-8O)P$%1w z>Kt_^GQz$o7r7VqK>-|uA_%`;B%74|MPVbjE0rJnjVk2uOBx;)GM+(k8aDjpA*sQ? z4kA|%Nj=i3;aMfJ>?^*xdzvz!yB9IWM01vmuFgdJ!n~J>?tlREmj43hAH>;@e=>e1 zaDoS#G*R-wB;uJO^1IVf(#m|*#vIjtgZXikIp-F0&j54K5c4DRXD9P$S9UDtYn+Ad z;e0=z^Zg{w_ft6ENA}F)`M@IP;RVdY@yxqZn0LWHo_QG5xBgzz4ZtZl1I_u&f2;Xj z2y|-~`|qY$sXDty&f|96X8&Ey{`(mFZ~SYA^5y8s{ZbctUEZCyPpWfv$=lhxq-Jox zypy?G8m_$|$5ReUBYFF6?;CO~rAXe(eqHLD-jw(6y(z`pUXzm2-Ext*FG1O)d@+HM zhnl5FcJb}*eyKeDYq`v|EAaP2hoovjp&U-nlOqjz^3Ie(sqy_rhPf7bd%V0$-PCR5 z{koh3Qak>Dyq^~>^}cAi&h?ur41{?f;`jm1oB3Uirc&$`KQk}w{*L#*Et+wBx6Ah} zZR4fcH(uH-Nz%%*8Y{GqCW*awlGwT?iG$}l9ZeIZYjm1)UP}?@jVW?M^MtrGKa=jl zDPrUqq#wqyPn#V+tq2j6cww&-hcw_=EK9!LKVyj6B2j^I^go;#&{y3sK^-M9BtpZzzg6BucW- zH$kA6xVuPW5oy5|{LCX!vK6@E2$;9SHrccjb3 zGbLmrsAo+6ui1B_d#jlXjxpwMWbChC4nX=hGydO-m$uP(X&;Ff>mK%n`Roe|+5aLP zxOZ}I3s}JcPVj&i0?;$U{&$o;D$=`>{Wtj5vH#9v{|%izF9^r#YMt@VgEDi^$(n@`oPayNuRs_WwDgQOiErPMj{{_K^nC zfqR%ZuER};z#X^?_u&Ejc>aq$d!c_fbqxL?>UeBoj zlmyYOW{!os*yIFcb2{aRM0D#m@1KPjvSaj$??VC4M-H9ibKo+#^ahp)zqk-506lfL>m=dAc;!6w)OTVXpyK1!6G z$XwV9`=9_0f^&~fijeB;-JQ?7Rp=(D1a$`Qg?{?KkN)q!!TcNM8$zztKpoUWBbcED ztk9ar`uu=e2k?9lY{#%0=sIzzL&}Q-fGVGR&u_uj`XoD5A@9Bd;AC;coLITcc27Pv>5|6wk=+LQEw@oJRJ#~o;*-F@^?=!e#5=J%9` zdra%RhkI&!I`jJ?=Jz@5QOQR!(MuWBTU{# zS%ZKLvhZ_~zfH(3;J(FqK5{#_$Zy{f<~P<0J8|cNKc96+A?pwW^Bc02e6*3TcJgHf z8@e4j4p9F57}?7``=9_0LJ^dJ2`ZsyfbtJfe)8odKmFGz|1HXoRA(if@sxiO^+cuH?VD_S_7!DMU63Cne0Kp$=>9|Jm&s1)&NVH`zNsmKySrf+sJ1-Sg(=x zD0xWY{4a&NiRX9e)?jat2$LZhra~&rf*#h{z2m88!h5-|e*yIhWBdOR#((C7ANT*X zP4{YSGn4&4(nDL6eUQTY9Xx|2t_bf;&PC3Lh06VLj2K^=CSkr$Sd4qPlk+H~+8?Y? z?&s-KQ`o1VuYvVY`t{Syt)#(vU;}RNY4SG8HxTHXAaItvog|OQe!`gtQzRDuEwB}~ zLvzw}v5ch1PIPND&!%}EpNqa19OHTZt>GCp(ndJ-&aVN~zHlGc3*aD_cve^TCi4M$ ziE=|FR6z~YK|QouQl-t#Gj^nvXYMwn9d`%Pk;Ly35XKBGUrS=J6W=Z~~mGdH%nW=l_{J|4--nKffp7gm#N;dcY(aQ?oVdSHH4Hr$MUnQ&LYg8svM4a2yN zSLs*0^WX?kcVX&qfcGD0gSKnDqjQV%XQYj`vLiciJCObl_eNe{<}vGYIN`fb!Z zsQ#BlxGi9mDAqQt$%oG5`)bOS$r(THEa+~@f5&8C<1pln({|W3Az_M?gJBgB?R)ZUk&y<9{ZiiyW04fn|MBjtb=CyO+B&^ zEOz$wdDs=0ar>WQ-M*T+eF1j713S*deh08$wcibMMnxF){*MEx-v4nT)mY>~j=g`d zi}gQajTd(SPJ#1?PR=0Dfl(hP=aFt7djJFPC8K-U7xcj;^ve)H_k$1mp>-wpPd?ho zmwH#mwvK!ivi6{yj$XC?h?gteHw@Ro_*$IYL`L8a^kC1u*mWPMcm4XYcV80qKZ*K> z&a<3B`z)d($KKB0xd;j;?hw=MDVSZoWE^*$62cY|NYzihoB20#_DSw1- zG9q7L8@ScntHwZcDevqfEfrcx#eWtwyV#dJ$9o4K$BC8lsWRHZ1`bed(RzgaZ}p@7 zZyS3`_NR9CryWQKd+%i8nGJJcJ}iXAunbl}Pd?Az3wi#I^zwTG{ioSqhuB{u1EcK! ziL;Bn^a*g5vj1h@>tdho1`jyd|E?mQHLxBwfO{TcAPe1_$JxT31lfeX1p>9iy_x6V z8+rcCK6opBX58D6JE58VaV~N%SkT+&u&-Z~Ap39^KnLkLE+mRAjdCXO{M$$QFAxr_ za~S`ZGX6scx&wZH|9p`9i=YJj`>_e+kMG|YXn*K0rTvf4{@9i}XYbsM{lSS_%GR*I zQS1--!}~Xt#G#(qsds*>(4FfNqy|~Ggz<*&5d#Z|i+uQI5;xLI-0EGQ7WzjW;mlJK zq#oI*bZCL*d%XX9oAm^`6`OAd2f7m++bG)s%9ux4kskaaU(j~Q0Gxs|a1PGHMYsf) z;R*~ZzDkhm$guh-jeYZv`{(Ca@8chVJK$GqKsEn9&wDnwAAoK>eHtb}B1C?smC49t zmjX+8W`!!=C`DgYhXQW0Cx)Z4_WBm@!0=Tooqti z0zbb0caOEwB<$aS{bMuj4cLDdwlRQ>3}G8cM~Lw$#QDH@#wX6hw{rh>$OS*&yzNEy z(6+tMx0|+se(KZ*e)It2K__dAt|QogDfYjI_UGD<@Ba~|YzFl+jxm_H3g93VK?x{3 zSLF}C%sT?OhpF#MWEIpv9hkFuhKg)dI<&yp`Njqx{6}A+OHy ztc{2#8Ky!ixHXg?|5@nXB+3u7(dR;dHuC3D{%Fb%^YJrh@(vmDF80;Dn!O_WGO*y^ zPFd9&Yz6LBFm|q!PB~XnPB5@XR^Rv-%lT%OJ#$&-axw8eke}~>))RgMWWgrb0$X7_ z?1Wqx{)RIWWcV5Os>{0i9}(4=333JE0rh1@0Wq{}>y3_Hh2kS!5r0 zIUnrDU!C2Ly@xo5Gey#KfD^nBenlsdZ}}ecd7fY5J_Wk2eg%f1k9CF@`oRZ&2*7o&-Gm6-fxB=Y9zf(f_8+^J zOL#f&$Yc@^=Q6R#36Kbr!JMR%WaLyxg;~(NR40}NJpV+uGKRH-|J4kck6Vpt9iZNE zut94|hP0(+Nc+4Du_A5h8Dd9v;C3KAo)Bkp4vvL1Z{1a1*G_@E!M@H2-qWD{}=G^1}tZU@V4$~8o}`0a?DxO1VSjdFnv z>`)b*CQiz^muvgLJtsp7kOx8Kv#*eHtAUjZL!Q!XE}v2cm}cE?g_xV=P< zZ+uqjH!qU+H!hN6x1W*s;ulEWjs^1W$!DZ?^E^44IZtZBPs=-lbEJCn9C`cpQ_^jj zBCeq)#6;Z2aqQQtlcY6`L!{b9#8EjpWzWK7Bo+KTEJTqK6QCfG5lQvD9v=8#UXMDW8 zwcwAWvT31I5qAyzeJ}c?Kas;Je=J8b|Cr~_|3ymL=F)!{BVLJ;vZncBO8O%)PWeMA zt^PxKe_Mvs(+*{yFpqtqlX~I^znme*E3hHSkau?fiPRieERFa_p2(1QGnYwS?+TvT zE|c1vKb5wIbZM%Rj9`Hi%=5*=V zn9j48bm`lX&a;61Nlo6BX&qz3I}Omo6UMZlrnUatRR6DL4b? zpn2YMvA}tB>&oTKjmzaC`XzAmF6Z2GIp>(m8H<+7W&Eyyvyk#4)pw8AkvBno2N^-$ z0q<$XAn^4v2Dvz&q)pXx#JgO(4-X*nGGnot3pqS^%KL^`Wr4XlSB-ap)c?qxr%-apJj-vj~n)PDSY zF5-eM_?fry9vpHzG^6iC=7NR1x95=WFz@o?-Ul7%4)*r8+dQ9!)-KBDBV34ZLzEAx zo-K5YF#ofU)3Z)8um&tX&6@62_EY?(qDlBhg}92K=K|}$LDqj^)_>4n&H9gd%U{d- zuY&oXHD6ag>;FR5|Hy9KF4pF5@PL!~x`22}zyy`xE@b}4zY5*kMci%t#tV8KsCW1L zxP48eg{;TVtcj6EC83$MObgNq7W~J~6-QYA<95Q>xndq`zgz49*oTClA|Ln<^Ubyw z8Guu82F}5GxCocvGN^ao)iZQ__w^Sm;cL(mmeRu%64fI)<0RCI_ zqfGk31^UD_`U+<}-jnpZMXbvP=!evYC!Tc|{mg}aVgYNebk?OC>9^3z+PDqc4XiJ~ z2I{?{AK!m|EnX5;I~2EcoO!>o;F6X#r*4-3J_?B?mPkMt0)GmX7D@f5&8C;~Tru0?!*fbK0N?kVhl&?_NOLEHw?K=xNB zN)>+QZS0wmb9_7lRTqpTo1GrnD<8-1pLhS#^uN~QXf&Kp=;g+&r2HPC$ z0y?l^_5H7vdmP{d4~(4$uuoCn0DE!w5T=)~|9Sq#`8zh^hX8b9XI&$Y&fgeAx`(iT z*MFYB1&HGmID6?cAJI3k=`*;`f%*pchwp#AXKij{uCgq-o9s)*vA&qbcj(-+3f91S=owMJam4cv zr1uDWdIR+jeh5J4HNOA7#rMBRCwu;Gbk_yG{~h4_-w?kiM;uwO37jYSUI00EK6e}Y zLEjEL;s5vjChg!)qYY?_<~F{OSj+Fl5JtVPxEE z&cS)O2$!ITIlUM9&NA*o{~+Tq_(P1#r#XLT4bXLf^}#ykdgvysD~)lPHGv1rjMJBi z=L!tNb!e{T`#Z+#o9I>_XXq||`yc%dIGDdXSU1=kn8V>Ne)r)4=r+PUs%N{ztKYq=);QIXpuko)xeP)_^*jcjfcW z1G@UAqz^WrXF*^oeQyD2lrj#0WggFO@ZSQ;?zbYhgE^XY5YkTmY%q52$2gFS-(u>@ z8lo@4Uflb@_%ZXbd@BX$zh8d`aTh@e_}Lp*BCDVV>Y(f@-~IF6P=xR3>v1=N8Ct*! z4se18yb%6~b0OsL)BKMrn{5EI@E7-se9pHdY#*Hqd+I`G)#!(5mT3t=%tSmztR=K25Y^by>{FLLIITm@@jJ*a1<8<1JB3ATXx zcEW=HR`egf-`?EFC7<>PR-yrHaJzaWvp52G^4)IJJ>BH?u1~&8keLCOYNAvxCJm23VyHj|!&ieyy zbk8V$z0+kc@$G{GI0$a;b@6Px2;Ix`^1gf1r3Bps0j~K6N#g=(AuI7S6HgVg2AT~# zPes;)rJ8d2C|BupX~b=Yjt0s_7#q@F%k%d+lpor&3AYFTeEbV3KhnYT`xfr8f&-l3 z0WSpL6!hGn{3GOXl=36{Z;?0B^+NzUQz-v6%8$Pj*-bbX(v91LbPn+w2gGv@&cj9c z;rA1aFEOUfByP&-e~M?fTnpg#FC~oyq=o#U|Cwuf7lHm~aZQ&?gu4RG=qB247~M)) z+R2ao0ANQGH28|K1%=)FN73}f5(uyLdhcRw<~Sl~x?&Efr@G}hkf ztifku*Q|fLS^Ihh*`rM2yF=nw49mb=%K8FnK{mrG^!2a-+KJbCjqzuc@rQUjz%jsn zeI0X6HtRt!GbZ@oi{d+?au9bBlz^LY-32Ceuj}F3 zv=Y4v)SlFDVE#a=Z^hT(R|9oW4~<}k7O+CAhWEOmJ)ify*?ZfVi|x$G9c`=+pp&`2 zi#^zhd(7`6%}mu;wUa?Sb^;4j|RD75y>Zxg;(1j;a&s z0WSogH9lI}3aP(L-VtkJezq__vql+vKEYhwS;{`Hntfdx`#RPz-CTEZ-Hr4d;5Sm% zu@8j4&799eKe`Wo_ zJeR=FJcTt3@(MH$vX4Vv2Mc=pJ=PT=)-t#w(9z3&7i?!K$7%MTtRdUr|0C`GgKW<4 z#NWR~TO;&zCL&@D9VN3eUzrtO@wL8UI#V(wN|aUNiYuI8q8-?{Z#LuN?pOY^!pS}?JQSv3?7~C#(bK-4vI~MzG zDdO1pCM1u`Yp2W&sxfv){7S6C8q^---x9x$z5$z1j%}#I``a%&gm+;ND%81^|LXcb z%}*(O2swWKYVrsY=uzIK$Xe^thid7QvWoJQ=^J42qs1zbX_JRaNM zSLoU4^50~0J?Phvzc2qGjuGg>b#XnvwXPD`Yj1&DWh$F{CV1DiYc zlQ%xc{-4MGM+*B~!$4_`!+1=->Yx_pmED&Uij!l_WxA< z|NZp;qvbB!9?~1Fv&#>%%eR(2A1#<%;JB*(YNibz}7!@nXiFt5zaL8KcEe9{{2$vEXPW$LaXb{ z40|%Hp=Y0RZ-e+K=oQ~*^R0Jp!rhMPB43l&3iI#frMUl2?Tda_Z*OpXjpx@e$8%j^ z{=L4*IDV)}IOUl%l1ayIa$GsKp$a>&3*$WZJ!GA8)T3d9bkImoF7tj6{S%M<3%;rJ zZT5xtpYdKFjW79={;8YtJ>vMlea>+Jhfs|pi2eHsGTQTTO}{ukASIke8?wmb1kyK) z!%6Zq&fpwcw-$#?PjR?F&(70snDv2hiGBrzc7t&21M4EICl-fZ@zH;9o$N#H3&o*k zOmVnHPwdt%cwIY#eh(>ic9U>Y+a%5b+@frY>+rlb$DAg=_k8Wcq1QSby-y4dasFNZ z|3#YCE$sW)N5epRCB&qmMp^L~^A&B};lWZH2pFL^&pyq^W^zd6sx7T95xmmu)Y~i`c{aF3`qWQY?4B0w|FN0qB zIrsZ5bC`S=S!G1meb-0kdR*UmW5xGA5&n6=6XD?H;_&A5A>qKpA>n(si^KjK9}3@{ z{-Ln%{8QmO&BMap0Urr(TpbqnWJZK0%5~P>_4_7NKQkbdlQm@RbH=C9 ztFQyRum}5a0EbYGBS;{HG^*5k8oP@^(z9v0TNF|^i?qLg z8}2#o0s1dg7ho_-Fcj?@2bljc!2FK^Axn1YbL=K_!ucE4Cf{EansICvUlm%6_0OP{ z-iGQ?MIrZ---hAR8i`WWE-DH&lZ(PwdSZ!d!#MhQqz;;6gCrU;L0pLscetAuPnJLZLn2Y&Xh<3+zIJT3_CbelY+BV(VIAq>=k7~m-pXZ-I+V3smAB_)a z6J9KhrKmouokP~#?jKf?t57@2UTfq!B=|BK2DrAhu5plSBbzp|w?1iKF2DEw+y&p0gou8gva6fb+O>aYfzqEHd$BXy9 z!}}pCu6oq{kS9-|cJlpjl01!?>F(<(_k}aU=g@Ti-H>vPjs4sw>fU}3>fKiZ5@W<8 zi6*3Q!GA8{3VLx3*U^VtXy57nT~Fs;_n&nC8Q0d~8tdIZj`qAOkG{+Q>|XD_YkZ3P zcMYwsq0MpWMGwLq>710m?vW4Bde|}k+yBe%{~2jxAbl|MPf7bZ=RnuQcMIdvsz*Jr zZ^XlQRO>t?jvI=a8}EjOW$%UrD!!)7*I(T9lKWcVKG(ZX`bfw1e&*d!O7^^R|Ka?< zC+r(7JPzYA0TrLn|Nn`iFp1vxg?GagavI{=J!NF>^YZcM280>(R{1f*Pd1aDRY!Mr z>!YI2MShX*MqcYCyVULVqvXM1?EhyT_EW{NJ&p2BO8wO&FXz6d@7MnpVlkFtIaXp7 zI_~-gZ_0afuCn zas%FPKWq{%$2RQ1F6=?yr?j2O>UY=@fJ`zqY$ zZuwYT8g0lTj}thFKJCbg?-%JKd=O3x_g1JY$aA=WOGv9Tu8`Y*GQ#{}-zQ?HgQZJ{TA{EeFx|k-<8z<*XLSa%uapkVZUqR#OVKKUq7q-N|$dvpo=d<^@1Dtk)0EhE_0XuZrn zKX3lPe(Rn{um2)-3kIXM+5CYH^9ShBCm+}U8A>0Hl>f!G2a@9BS_C7-rCnDkITo#a z4&%u2h<=E|{J#~(_zO=$VP1f6*Jk|`>-GOX=l&-<$8`7a`U>OnUvv({`F~UVXBx^d z12Zujb1@(7@BE4T-_1v`m!Eql|3tC-cdf@f;{>hsYYTnmmG9dV)+LaajIN`tEDwZ}oCY-j8c&{HOju_G{Yz+mJ;bCvXy{ z@&DKVSD62E-nHTJ{2y)iLjTWm@|$yu;ZQjQg?_+C?_m|Y(HxA&IH1FVEOkdre`;_$u&HsBK+-vXP{{O4@ z!2dUp>>c8pQhpAmSAE|40$(&%f!=z`IWDtZ=vjHc^Q=Ax`bgxvz58b0K!54NLL9xwzP`b}zRQ-r&A#5tzGl0(qAgPr z+S&CT%Y1*xq6^)~A&=@M#lM^XI?5b%=d69*{C7TtGJ0Y^+jqA)0`!?k-DmsWWCP!2 z3vYcg%oZ2VTlov?6n81pUCUf?^O3pAFR+ncV2-g1!i$lc%rEc^Kfx@1f)|YoV2dvm zzZ@&E3boHV{}}c^J>gm!y4nBq4M_F8cYfz?ID~2(L7_kV zy!U;XzfZXC1^&Nzd^1R_;G5aZN3&Z!ja0-jX%y;jb$F%vs;a`X6qiT4V>-}@EV`OK zU*r(idO3QN|L-pU-*M#wTEu10iZ;YKDJP_p`?~zE4nIk6eM%ekwzdB0+3D)l$=1!H zpF@7Va7$t$Q`U>qEI4K>a3)mm-3jcj}2J~T~K=b>?)wk+zV>fghE z=>PPeJGh4j=)YJ!fx#%jf9U`8-RJS2`9FtBV>m{l6n&q!j~!Y0U4HwY^COs>6W5;T z`HlHz;8{>pLYEV=0znC03!AUvdq(4jWMMYikw!+_zZImxt)nOYZd@?e`sFlVi$Jt1Q?? zRw2#_dF{SFcXA84^|rcF{0{8G9_+&bRJO}c-|~Kn`Ts}o|F7o%S8q0|HA+uF29ASFO&^lP6G1KS`cO&2HDd^Wk-$5k7~)b-S0wweEwg8z%p4 z_1us^Ba*Kx|C>EmT=1VuxPo3>!*%rG7TUGDJNR8Y$t-_N7uhYGBlGh3(ZkCBr1HN; z`QM@Z_pW}o{{XOH`rB6T{vy$^Yb|cPJ&;5JlB@6^)&F(Mf_i04!(`>e zEak!?d{)mI*w-LU7>8!&h#I+?WpMF~V)APUS`QP^ZcY6MNJ^!tqKl0t4 z|Db3CX!|=Z-J|?N2CY|>e;4fuC#`KUJ*1UaJIKss{xj`?UGzQJhfenr=Q4Mxzq_Y; zZk}r%wL8tf@%tgvY}fW7W1svHGJ&SGz9TfQ_FXND`BOUsDZi!BhMo#-XR`Mb+Ke&m znV*PZHb(9jqryr0Y4q_SSNzu40%Hu$#PAmehI8ZvT*4JR`i^?(aZYsQ=Zr`A@zdeD za35~r4({Or>huxU4>14UJo*IL*w6g?LFV62HUA#h9MgY^G8cnUf}t3Wk!U|<{{4CL z@6VclZ|(iAyVl>oVg3F6*5Aj`XUxBU&is36rOmf*p=X?{)w$Y?CrEEE4W-fi))N#{<|oT z^=Mz?kH-Jbk^dLS|4Zb5vM~SqruH8S~3klt}RdB zfWr7+;qI-@MQ)1m-chx&0Oj3Z;?!GR>qNg{I(AV za0t~nf&@}XqYYW)aRR4t2C>c1^K*UF`uux;Q5xdB`{>);ir6o5&M}YX|6QP8!WHzQ z@8iBL@_N*9D@y6yA@AV<`u}gvVO;w_axh9T6ooM~WUqe6v~pr3y%b|H4z1$i8bIUe zh5pB9tTRlXguFIO4v+dD>n5{7wrZ!~PmTZGa{qVZzo+?sC0}}#b%M(16<;q3?X$dp zbT0D#(M9h@Zlm%6h4H`j#vXLL){L&Ok*gmOF@|GSNzc+S1?tyIx>AT^BLh2$7^*t2{uY~Ome zZ^Hui?+W(sX0|0+m?yVPo!~#aum}54XuoPV#`SLx2p8;MHgMILb@JOTio0y=x^${>1PR37GPl{^^z1XzMx}OG6(3=qUm`cgImj;8 zo_0+1A+*I~QF~Y!N}fOsKf_g}63LH3wmFo~RkzE3&s=g)*` z^tcvxa^6E#l287=h=nsuZ=v)UM~3Kh10^l>ePkOiTiLYCM&*VO?`4XR^sqG9|+$c z#!q9s!4KSTA0O%}@oTUS8?Xu6Kl2wMu0>Ez&+KJdsYt*-+p^|*n4(pc;lrHTch&B z;X7{+4civ{+`jEU4^`4iplau+P`UeKVf)mNSp#!K*zwfU_UJQi&~It%+CD7&(|&zl zQ$G~Gxp!39Gw&ne+b@13wD~QIeaozYx&Dc;_u!!L#@%81zVs(1i$mUTCvXy{aRzVR z91;$%E)M7D)%}XX54QVe_ZNj9?k);PM*NjMh>ZOi^jD$o*}sxE&BN+v-c`R(hsIH# z4XI~8t&ir@%9hXQ|N4x4^vQ7S{%81i&8a*4$#6lMmv9BWxQ6S9ZR|Ma=(US{qQZ4g z@o(^JG#E3GSirx*SCCxGzp{VFEb`wV9GIg`9?(t?uif`+C7R3gH=OdeMEk$HsZ? z6Lnjy|A+=8(1;|On(b9_*qWIBGaGX;9}DqXhi9m5y#?))t-m>)k6(F_-Rt|_>HE(3 zzSVbm^5|;$&3At6MfIP0sKv3FQR=^8#^4p}qn6%Mr27?zb%R! z8`oCY-^caVxu*Kb${W}9yZuM}u8xjA$|`B>z%J~;PXfO`c>ssd?pivMuC2!XYvXsV zSO05U<;cA7QPY;OZ!rgXOO;ajX|=||M1PCko!teh<%_L_j^fvcC%|+Z*4YuFY;5p8~5A2;9*%* zJ$XP_DgFYk;W}z}n^XD>{{lVHZR}mfT0rzWNZr-{dRzbNUj47wqMX@^*Vx(Voz^mU zOdLBG_YitO@4rl5LFXK{`ipG#C2aQ>*zm|>u-{5>N4^?L4#!B8Vk`>n6Ln<6OX@K+ zs?Q4TlUR@QAsoH>-uOc@?gP;B4&MP<7a4!ZKhe(4??5NAh+_}BT|*pu5c?;pkMn^z z&v;D0B-HXh)SR{c4?Qu+xt)I+y$mUJZWEGdR7bym{j(W<&wZE8j+yk?s0`-Q|Je9_ z`g|z_5V zKRVbSud_eUqFs_ftM9YT_nFp5UnQL#*o8f4RsUwNkN)WUIY2*zn5T1ybyyd2Y@#@( zqS|jqP%Ez|$P{Ym4HJcZb7|pQ}({8!Q-9X|szk^A9i!)$Uc z`kvNTpkHP_y>S0U)5Ah~+IwA0W=43wgUkh?FULxB_G6!*3*E>ej!&xF{(Pu6r@aA* zx1Lu%J|B`~)A8p+>i+X#mH)26I&8oulw%vJ&_1tB{a2>`D^vfKDgVlpe`U(QGUZ=c zIQqKz|2Sq2e;O_H3|cpqg*L~udDhiapAS2vH4e2en3GTLLCrkZHOqBjpYQ=R(Nkzd z5{3S+dA<*%cbA1jj>(u`-uhx$sHRtbj@_Vdt5Z0;#P!W_ebZgvv#xJUS*V-i{uek8 z5-X&;S-P*g{|@KD5osilLKp z_bl2_?RlS+&S{*%In)k%-ujErhYR$?Q?5<;68#ENt6iJxOs;jU{hklK;;!L3`fv+( za1Rgg*k3&$g#OEYNA1cd45pXh_xiu&xyH%zpFG$^ri6z&W;jNo6k{`W=1mceTgSN^k2Crwo}Soyt$Dr+;ESetKd3uI~O} z8a*qm&fUfs(`O*=9hO5ky0%*PKYS!qD@SJfZ8mD9J(rx18hXQde#Ohy{}En{IA$h= z#<$!j+54sEL(fmktg-7^e%9I%j*0neIk^(65cB66GWLJ=ef9aUj=l+fpD%m3{vdzv zYn%Dzf^lBIavyIR|77jpa>r~#6?R}3_Fx|l;1JraNzj4L{njwpZS8^%YZxGR)*1#l zI>7f+?E4|pqkKPPM!1!1b6nj)-*3`4QsevW_DzsU;U+S*+4=#E+61U}t|Lewg*4hw zr#`Po!?Wri#6IP?wrrvQUwDIKvdH5EPU1Aq;2a9}?{4-l9^1d_`A7BpW0W%=gt-m76bU zlcLr+`W$x)HMiBP7u^>M`(M4RUrk+`TH-!8yH9$ZGz#kvzVyEK#YXopoWdRH+`|L( z|C(#SV3c4e{^$E|%KszS|2VeQ{i6jLw4x1h@4(^G8HrMiMeAM1+_dHoJ$u>xV?2EV z@@)Max)-qjF-ct9Lum>*4RP({GI9pu9!hcSL;^E~XQQc~`$BBHBvIJ^Mmj^nQu(b-x<;CrX5|Cc z(JOxTfv|z>lx}vf^mj^sqV$nR%=?@CzWwVjg>rHmYVYfZKovc4Q5kSnUqPJz-X-sE zR<X9=KBHrHVGs79@2KzR$Hw%~525!JzKu8dHrma@`KkHV;*KDJ6w+uz#pl)Q zUoH*>|JY>rGhKg#_&iRau>apzio4pBsG3Er&$0E;vkVCgVp|0oD|IZm;K3O04 zbhZas82`Urzaml{Pll7uHO~LWV*=vZ4_guY7A6U|FVeq{LO=UPHt9=jQ{+bIXUEYR zPL$wqRyaNtjR1u6J!%NMDRR`!|Pf$8@b=|E_iaXg~su zyNxYC6H-{}Kg+QatFQ*^umPLU?m2brWdD=dgY18Lcf6+z`=5XJ=uP*JV@SW{{*Rkm zfL64j`tFmVTsqrOg&nAU%J_p3PljFe_)Z$IhrSP~0j>>6$HX-Z3*Xx??_(5ytltlz z8WnG7gP`(r_J#Smu?F$KNTUr|Nw71zbYAeAFQybs~!{bf1zxZ_1xIx|99;qWXW1x}I!VqW&jah1WO?HWW^_p112}2_F40P$9y1^(;v)m-YLfdf)uT!_N7v z`*d6yZOEc^iTZbp>yKe|Z6{99Pa>~=&7m97*MC}EwRXfA@*HYMXwQ(BP_yEp?;xSR zy&~L;rn}CEMkG<^zx8bNhFQiR_$C_J@=3DEH3RQSFBO3&x2{HbghNH46L`GWa0^b$1DQ?ul+dGZ&Aii>-54kt&V z_ON~haxCKbp!#}iFdSAsx!wlXoxE%gBvLoE)$Y5GPdy#RId(iIU=pTa8p_bInGX_i z?d?v)wYR&Fr^mg{n)iAJo=JL+XF$&^@eI%=+=>~}n2Fh#i}_fH#aN2>+fU1dS78mR ze&dz|IZs8dg*ab`i5ENi9aE~zwF%}93A2u`Xm}(938GX zCOxP$Tqpa`O20+kL5AKrz_@_o(r{1s0dm55@m<68;fp_d)%buL#s`q;`^E>5nRkp2 z7-4(>+MhMPV6t?kONZ=w%Kt_g10apOwEF*zb`b9PJ_nN}7>eN-iMq4)7eHYgK=&W) zFL1Eb+H#{qspH0C9LA$oTn#4B6Yjm?h0$RWeF{?In}n0%8$F}<*T2*JUWOT{Xz`4Z z|3$wrTe$KI%FfTQ5xcAfE<7K(7Wsdt@ke{*f5fu7QGJv|6Fv2o`QeURjHOtPl~{!} zSck&;^T*|XWYLA+TmRqtZpN`^`TEc@UEA=P-;F=buI8V@25D`= z?0o8@;oEEfJnXsp=OOL4zBm3ZeCMY1AFhrLZ_N7huy@4h@IBXdKpB?xdmbmy^YIUd zH*c!nKmO5hXiRDN=i{Z};Qo=}&F4ni7fwG~_wcZP>hSR0=HX%A>qEnL>W7BCcw_xv zhCNd!nEy5*>~5YAzBPG#*p>W?(EInL;r|YS|IV8H^4Lk|*m6_50=c?@;k57>oI`y7 zeV-}~7wDJJ^F#UUC;We}*k7T`SblN6xQ2>1*}!D(W&a^>;STyf#Q#?`I^3f_K>wHI z0Srb7hGIBIV#{>x^+C!9^{N=T2{99sJQ zeaH;^`!G|wZ5Zo6xld~64AH*+ru`L#$72E}VG0f_Pk*q09lkk)A6i?oX4sHWdwWPY zVol1rG1i-W>j^&eZ-m64Z}3@tBP2(y4^11t&JXkTP*de?NO`DBtoTnY~mU(vTjX!)z`veUewNEZ;7u?n^a8HHx z2d^sMPkDcc_SPQ%ul&0HWaY>K`XRJ$cK-w1f3f=~yGFTxGL8|*lSdC~f34U4a$K5h znWt^D#QiUD|8vBd<5w-6BS;{H*3IVVAx+OVo7=a@T)n6xzn^bwuQ`6!{O_ta$4^{! zrYM{sPoj3}fN+{TgBp6nDDU<5fN)Ou0-DZxua~`N{SL|a?mP6sX$K@uX{(b-WkMq! zt^aY!f3Bbx*Ki$uxP?0?jQ?ljN8e*%{f~nWeGmmZL0t0!eUOO0g=2`m#}?mz+dFI! zGCiFQA)N>4|9{Fuh`xtR2b+N&eGYMMU*$jWAADaw_EdKMME?8j&V!+jsorQ!DRLxg zZ(D1V9E<3Kh`xse#tDx{VPApG?(w+x5w>WzZPk8zjqLkZz6R}r-d`7mo+skEA4Q?> zujKuY7g@JzNSNUNlh9MqFH9kOf2v=KEJMYQibMJodxD-(f5bTeGwHK27oF;oY*PK- zq5emXp08n_vj3Z3WS_pkK1B;XGlzY;mVJu$t?XNLB8x6`BZoW&N_##QVlno4j!Vhq zSc$re`j^qLnti>3&B~rnl1)f0VE=A6hyFRwU|1-Oe_iE2wc={_8h=ku^ss-8!Cyz; zfYdwwD=v9kTG%8mz1{f#X#YR$ozlz2Z9`@^+kdC=|Hk@P3GYB|BEJM4jsLG>M;F#V znC1Fjl;#p?zu+85t!IP#&mQc<0USa#jv#^d!=4X1i#^`~p6@8nx1Z;W!u;=I&-aw) zd(rbf@A)=+zG$uYeEqNbxbsLSjW%RaE3U?|d3wVC8~pzS{UqWw#l0Dm{amZxPK!H( zbGU#)dt|cjNx6SX_zHSa;hV{!^3&QOzX{>GxIWY!*9JfPp8rp~zOet*WIi(aB1PVE z%pKgr1N2{|uEAiGpxw9KA^*j^mqpj^hqifP|4sT)c7F2$_Wu%jjV!Fsb@O4~Ya_GG z{#!2}Nh?>u{usvo5RPkvc9Cgqhv9x3iBgP3>t6X&ULHqpxFLVtHr9_m0ZqcGdF&4| zdDYlvan;%!Q^;wkJ*zK)oPnCB^c#?`jd}0;^9}YDX`{Vith4yJ=>2#53XyUA|6+0} zmSZJWq4KNREl=1BfF9QZSx06zKkQ%KK;MLNbZTp7(RJ7Nan`#)9^3p@Egx2qJ5bwg z9|&>}YBF)W@Wb)^`-BgmX@qtm8YgNel83~_aRb$4??;}rf}3{8E9%}D{_78h1erok z8PfM@Wdywq6`v7qG(Kgj`T+(1biMjP-T8Qb`oq@Gc}M+%&Qt1_x706})i1}@FUU`~ z@1C^YZ=Ys`^EiQPzLz@nMLim3@k66=k@^jf#-Y_E)qffFU$^>?Y!pti)0?zcQf&5< z(m9PvKEk-C*ctjcT)-u?uhzdmfPbJL|A4x=OT8TH=G?RT_i=P9{{oJwW78}67u3I* zW&8_>;{(!@+2+#e#Wh?*h1Z7PPbZJCfS}HQN8Hwg1;^|7(Zk$h`2Mod46!_RX+?>#fNty$P6viY@B4r|qvn zul&s``s4I%H_9)K9Pv*Z8zX6+2j@sS)_s^JfNKd%OhD@|Q0jT)pDf_Pt?j zj&C9Ekrv0Qj1xB=6XI{bl)rbr8anpAs*Uq%$kx2dr}e7x=~d;^tIDTW!_gJ5hUV3; zhGQFF4e7P7hL%OI@~yubS{>Jxc{S86cs0~7do?s5fkq_JgcK$@&k^@NB|;yLX(+=C z)Jd}*4M?DIce(Gsod3O?|GhlSbj)nb#e6KpVl2gSv~Mrh{x8@5FXw+R=YKEf|0?JI zDi8T3<-Y%N{`YeJ_j3OCa{l*n{`YeJ_j3Msad*qZO6jb^8mvR>Q|@uZt6>8@JK3|r zCVDyY13VjaJErTIR||WWR6F0JeduaEk8O_2eal)NOMZ7>RpNG_=>^XPh3DkhI(q#) z$I%m;#e3dK&%5ca_j^-)El*W_MxEfld$11&a0u1-z4d3<34b*HhhL%jAp4(8&td;B z@NF(pKap|$ne^q&;fQn+NFj~Z8=FH0ZS?FS{(*UMY@2(J>{mwY*8U~qUKS_BRqx*% zPLiilTfaG+A3c_$@HEtnG;2h#{^75 z9p6Vie|+E)2WKuZJO^)?!>^q<0xM?WE49vuA%td>#@gGm|J>v2F>hu10S^Z8P zRSq=c*hbHKt>?eU^GEAc&wsMG=d9TyorPG8rFb-cXgNK$**md{z6O6dfB*gaUn_&c zI>+?=vOM&4`F`m=A5t#-8~*)O_C$Hu6yv`5YA7drKk;g~Fw6raa;{qMeaZ?J?`JLi@pc@Z~%|??>R)zc56GJntlYaPawBd{YiH1 zRsTK9|BZ&J+6Q>N|Lzjkz&=g*PYP+YA&Wdt;3V4LQU9a!uKFKc^ls#qsQ=aPkJ~r> z*t(;%Z`@mkZ5;PHY`bXuU$gS@efGy`Y1Y;o|95y`I73g|*EYd9dc_ZY6Q{IIj%%Zk zjpv=`C*Gm^xZw9oXsy?tAbSzlAuP-bQT|u*?N$x(UC`sau{igx8;|dQk$YvE7xw>K z?b&DK|9bsD`hc!U>pJ>y3wLl25AdIkKf37qJM8;I8ZC%xjJNLg{h^vY-hYieg~2F6 zE!(_CAJ9;GVuidR{|={*L`r^X5|>;rub@=iSd7DXOu!^e!8APfKk-qQ3D3Z6W^3-kRHe5FC;V5*$0!YjYD6G{5L-+f3X$NrQPzle?z-xx!-%g(J!ndqyKCb zxd!X70h>^cZK%Qy#I?+Kk$X@#NB&p7H6T&%o;utcn&>H1yj&diIqm=sp&Cb!K<*pG zp?$UdKR_N;4m`I1gmc1q^5{kVpE%~Y^a{^^v*%B?qHTfo|Bc1!`D-?ROY{K_d^rAZ z5c^H}mvW9YvdH5E{$KUMNwSaZ`6h=uqxJdHgPsg`oaY`Mp#Rt9p~-CI0elPe?0Wyj zV0sDi3-}(;z2f0m`=R2h=h&x^9EsYsPli%*ENWhG-4k6mDt@S~h^D7pHyX#d57gDT zKlju?COX7l7JuCRA+9yB<;Ful?rWaaIB6E<-?x}gOP_%DQ{F$~8Y|he@7Z^rac;l& zzt{WU?)iB4kJo?m-ZR35_wTs$b5Djz(wTy3C`0QD?%zGmpl9XPPT`sK*~km$an&!GKTa-0?SLo4Vsa^J+-HMu!aXb(UWulw-o0?+ZS6?%ck}PHjrp@04{BSJ zO-N<5+x=$^)?ouSp&Z*#g?8=A|JnKuN8e`u*RcQD^J%ouGfDYd8@~-(inUFj^1Wk+ z^!k1+Z<2eEVN3YN?dVy@%u6($me7GC)kn(|^x@ugp z->x9{`2nHgivvP0{Ti;L54Ug!_wWGkA3xK7tupIl=J$N%N&A07IW|~a35H@g3hOg} zZ9o`FFU44lL*FY;hQ2!A&wAgY@B~c46tqtCjScG;rqQ#bd_VLu`V8cI*oOC&q3FUu zaWgR+bFt4g&L{ePyapHbNV=UMgU2=z1K`u|IvXE|146>8U7|7L-G<>(3RfH*gC9Xb&UuJ)7!S}7A+^Z0iHmzQ z#yO1t{Q48&(D4D`%~ONIL441godexN^j%rQv7*$9+wyeyCuQxf6;Fn5cKmJFNq=qX{|MV~Bwh>Pi9ZkD9#mrN(g#CUeBWPv z)Z8%lg*P^TFzlT?IMna_XsFx%(U9o(A!YSPLvrnhLu2N{A=UAhp{d`e!r{&4`uK14 z;nCp-_4;L-|2+Kgc4;_rb!4ceSNw#H{>7)m3CEnoX`I11TtIWbPpX6e(%h8)5{|v| zUqa^gUxb!^p9mFS9~Cb7y)eIbnD1wleBu7Di0egFyZ-&pjtbZ5eYk}?xQ7Sm|F_yq z7>p7O#c=d)F&~-i`MxoCWS_BvW65zCj|rHBDVT;b#QptdkiGo>Gh>*Kc6JOm>i^LW zh`#Z7eSJ^sI~G45x%N_f0L%aH@V^MhIsTRQ3Rp^S_3Sg+Gt235&S583(pMqh;oH3U z&yO=&da7}98|F}|SQ_yT?ZZF}|qKWp85 zZPPfW(Z>?<(v0)w<2?Ek^u&tMp#g>Yf4^0Cu6F;+-2Zy_kCTo`?;jmblV{MX95_c_ zK&E+g=$xfoz$M`;$W3<+Wk}al??5?H_u}YKAIDfAo~v=QwD)V9z3yCFwf`Hn3;pjJ zuA>jPa0mC$p49%(rs!PlJY-@2X)-6ACy$=e{>QO;-%5@4XScfHu=WSq_8LF*($o4c zo(`SIpAK1cp&L2marAZ1568wl9n$83wv2jO`TDf-^=bZrr$gN`?+*<~pz)&jho)2B z-|^D$z%}+?C$C^IN-z||F%qR1i)iCl&l?rS(Q7u23blJb5XRFdU=kkB&%deuy|4bg zt<3T5PxD*Puhnm4?<@TZdvR>#?~3E`XNW7TKk&n%Fq1wTb1@$a(f3V#kF&J@(Xd4Q zt{iLJD19__OFyIS@%;f|v16BFIdWY^VI{c=Yta6j=kGmsdaqeB`t-Wbd;XU_|F=E= zx3&MzYyV%?2T8UF$GW+7l;=-$S{12&-?+faoa*oAjuyZ_hr zQ~2WOutzw(*BA@(01lxVtsTzM?0)Ev)?Y5{&&ZdN-|gDm@1ymXr^Nrv(ZgIgj4F{Ml#9%-}1V#fp~4d`d|Hd!ZDBf4^Glg;|%_z`G0jU zsDI}?tmFS^{ecyK7S8$a1zf@v^x_(>qYv$i*#GE!N&Swljp}#gRy=H99DR%b|84$% zGJR3|Vl>dV$#lg=I7!voY_W&hu{*4j7Z|3R*e?_eN(F#c%%A0K;Re1-C( z#P359=l@0j-*9^6kJ$a%A0z3d7>jWjj|nKu|J%v`e~^E7uXefeIf*8DHg#P7|Gsh7 zj+=sMD8mfQ#B8*0^!=gpZT|nawCOHu+mSiMvEE0OA zAr_-`s_$o_b$saA7u6qFPG5=qLH<8$ zm!s*Te1%3N5kKgyh~pc!`8}>dxPz>!)4uN-5_Zw|U>^?P5UOzm38aul8?xyAs(eSr z`u7AGeSs&*(>Q~3xPVKzg1VRFe>7}#ooJ*d@hJc28m)=wxNEqM$NtHB`B1ohp69>F z^Is2S@!I*2pA>VmRXXoJ>7ye7^W;Xb?{9)Sr0OI(@=3FcY)UDlT*T$uO6m zJl!%k%fC&>~FG3I7Jrh!}r=hi~WBomZL&n z%u2G|HFcnKD*pny=-tTO*Ux|3{DURzZ|NM9UfgT2Mf#aB`rqkoBlMfrl!R5%sr;m8 z@kQ%r(rd-lm`A^kp6D@$et${WK;MMaRr3(;nmd2f9D1@`T>2e*6_QnG-EEI{au+gJ zU6=dJy3akr`|xO9VAp=vi8|L&@7fCcFHCj+u06TH{k!fsKH`A?96~jYAb}LpXhZvL z-=F;5i7dJh_jZYEJm&Fu|Aj&BA1yuVf3%`a{ZySS4q55saRMh%d*1r{?1Bo zZWo6$^m9l(BX1yi*|&i$OXOc=;A<<`U-Q`BZ?dh8hiM(5ugvjr{>~M$u>TgHUYGc8 zVXYNSn9v+}V|7ZWd(_X<~I^nEl-Ul$6-7spjMe0%f3nUIQKcufu2I2hSYh_4@t)~qStrV6Zin0 zVE<#w{rBny&%kWV#eDSr;>oa(TpabUd8ez*^Q9N&{|^}umeX6sMIX~jdiI9C zVXUIBLEdx8DQ~;^0lLUS{lmU2?7P~nUT3Ek_SYd(BL;?<{<97nunFbZhAQkpJ3G5W zecM^A{vDwH9i{#ybI4ETe^}OG!S{l}C_&u!Ak$+V9(s1CJ^&1- zk3{|jc~gB6$6$AnbX4N!WyhYoqDQ%!TZ|h>aL_b-PEjn;72^FtZ2S0f0OAO z^7n1|`@Z~5wh6Z@V+!Mw9Gg|X6!sq`bBOy7$NjT6IL{`OV;c(j_o{v``lI~2gT4!? z5$^pd*FZLo(SCW!`$1uRrr!_X5ULU95m&s-clk!Ya74IQA5el!q4LX~`(N9aoZf~k zDk{wRB~Rca`t%LtUjA(8QO}7m|y;)W5=hd9@~apLnKIq&(+AayU@Pb^Y7>RBa1F{TQ49t@p&pC;p#)hsbIiK?3ayW&~|5DOPqYZH%pw(`tR7w7+-Bu}GO-aJE|LyiA6NGCC!4`28anrgwG;8af<@ zMtTxWQ@tx}QBFQ8f3Ns|FLLbz!gaC_?e8dm(W%Xq-L6bOC~s}`u944Zf6ZclEnt5w zQT~#Dxc+0`N9^$?ox=QwpAHCj=oMd9zmR=Dem*=Pd&vG9o%_?)C{TtBq{n^R2a~Ov zKM+dDp~$S)PfMm(umi|a6#Nj&T-RmSb>4Mebe+omv3?teYU8`FImdYV1WZD$aosiQ zf+_U47kfjoamMsA#65YM)F*MT_C|7sxS5!Zxp;s7#(d%2%ldT5LjSk^oW=B|SdNvb z>oEQg4cZS0ZHLA|>UQmhrisS?Jv%T|XrHZe+#0OI25dq(wxRu${wH)^WdEMkrn#$a zgWP`YKO9}6{V_)S1L^77AJWXAwV(C}(%bEEAe|l9g*|9>k8zIvK6>`5`g=eBJN*#y zH;i9E_dCWf+%~R3+z}*@LKnjGLFnNR*=bwzf1`Gg1E=#?i-P z0^%5i#4y*V&5>+=ul^?=jk_w$M|WK1uh`9BRYrVhNSG$P!M9yT&cIB}#$3$DLi}gt z-(trs#d55~Dy+deY(V>Z`G2$LOlFl8UGi9*gP1!d{~wqCGk;M2El@XmACJag#eEhk zzQD&YL|>EiD!w};l#|;~g&o+1J=lln+uVY_kDIeh9uRj3)yVy5NZ8(DzpxLPJFV^8 z8$KF(_@NSh>;1C!z-Q$tdK#7CiBQGIkUm%(rUKZ2 zEyQb3nk;N`AM$(5PPtT#7-=YhJ{dZsRKK{l&2Kpi^e`S3A zzv=sN>{yIL#n<)Gk`vJLGyVN!?-x9GvihZxFpVt349rCBGX4kgv+0TLo(<;G=Oaar zYcs?#7>!scE_7XN@ZFyO8uqRJui-l@{;R&evhc>T=fj@0&xdbMo)&hm{qNyh8~=OQvR8R{ zP_?RaKVsP?}j_@|D^Vb`!J;hU2ug`Ha`=}(;!_FNnk zaz7ps5`O;Z5zNEWPIozEd*J?LR@D z#A%$tIW)g8Tp!l(kgmVp?^wTI^vm@7WxtlEuJ`Nv=9A%q-|OGfht}_DK34swyJO$W z(9md&&(y`?p=rR-aQLP%3XZEjH8A|(IKM={kA)wu7#NP2=U97pr159Q=P%*sU%MAatPfpgscq1YI5Gh9Sp>&Ex3R0ipSZ z`FCXczWv|m8MbBXd2`CTts6)0lg=$vm}7W{ti7xKadSYpM^CuEhB4Mv`Y!u`FFZBE zwTVljapHjRz%l7%Z17i<2WTB*OAywdV2Y zx;yDvw4(!$_Ni;x?7G*xkB#mFM~AsDaZO7+r}*4nbWQYz8gnkly6z8#`s4@0NarlY zSd7DXOu!^eL1BLd?Vg6`w0~x4|IE?;@qU_`+5ed4m@>@3Ow7hy%*R5sFEBsD7>Q0Y zyH=Y-+oAiW`UiO&O|t)Yvj4Sn(g%%yq-Px0s_oFG9Z_9V^n2qU7fZADYEh_pt0*j` zCuX^)m-vNl$Vs__+1*r(2K>Fs>~WZXkM?j62?z6s^{kJ<-~3zQ>E zv^SP1S5T*ZuNPMsk2g>IqtQCL{$GV1i0j|)BKKe)y5!@S$79}(dw%_)Kd@!D_68n} zH#$mh#xbOEKpKZojUz}Pt{a#ld({cKztQhWZ$sa|d&XoQCvXzguPZ~Id)TIneFLY( z)$TNZ0cYrm0qzCo=$DY9H{p^0ukz!b-;YOyD}L+6HC#tu#Jt~IWY5diCnS5nF)-XC zAE5tc-#gOEmce8RhGIBco3$}I^f}Svn3UKDT}mH|{B7+{bib$$K=nNP#Y=CT-^ODC zYS-%j*3UnQp4dA&G`v1KOrcLhYO=mvB)5+ajo7mKz4oko@B8hDQ0AB!n2Fh_{E>X? z9nPiC$3pac>glkU?EQ)UJz!|Cjau-_ZYmRsVmF{(t0<$5He3o9B%V$B;(LP5u99MH}k$=hyFae)p1y zYZ{PA;ieAf&x{V6oTD7uP=y`Xg+17ZcK_)}>gTT+9kTJ6YTLzUTUg&koABsa=f^Rm z(IU-E_vp|{w)tPWcyu@*okOU`5wwcSAVJR#lQvTHH1gg{ZtZA3*3qF0uX+B}v%H4| z-UAhp#b+Si-a>o3LjwQm$D zRPc4?rF8-)aT;fE4j1r;`K{}`{Ky}dBlB--|DV^cSchiBZ=+}H)h&nF$n+h^t3z_=u2I*hTY6dshF#)&|Gp^fA-A~i!g$VM)=3sF zqXOvMl{HgqZS^eu-9dlgTeYGIY3uxt2Xmh^m zVr#ld^8xysMikebdNlqo=HtY&hxUKv6UtHkg19DO+`}NL{*G%Q5B8t5@2G?vifC63 zCr2WV^XXjcyWFV!7aoh;MD2peFW1HZri zwYK(pc~jdq?h%&f1N_g{zn&?L*_ey!`{u%sHSc%^qwKXpUyP+_n5ukIE<9Q{xM`#7 zlh>E~Z6#Kr!q|Z=BYb~@v`?q<#fn>pO(;k1XG6j^vI>=7wy(hu{g21x|F@MBZ%JQS z(0I!IyN<&6BF|}uV|QT>_Tc~yp&GeQ4+-sW$^UGu$K$_mdafvp|K6|s_f0gS`to^v2AdMJcl;r-39Uz zdjC3vD`Xu1&`b959bYG_J|6c6ED44A7-Kx2XVll?({C4t4dgZ4!X32UXa65B4)^F; zea4;ro(vD@{kO>f!a3n?vWp*JptxQ>i@{{iZ>?od4#jYcL@CB%9L8e;`o20OOd_YC zdQNeeMwVd)W}+E0QBpKK0i}hc6vVPO|>c3Wf{uMvv zb98LR7=X^R`~oPHqd$%1GJ9(UKLOjnd!zOL^yk;#(8sSYKY>QJbP`QSVX<_UVmVe~ z71m%KHlUpy+R@_~A&ai7p3OU+k-q&r`~2uU;}38QahyQQeKxCOT5lMCKvwfnY?4kn zwxJ5O&F25JZFkTU(rCag`W~dlxVEXTak~5G2iPYruKRF+JcQP}+H~aqN7?W?F2Tthf?OnXILJW`()qsDrtod`-h1x%k9R(w zXYJ=%`&rNPto2!+_46PyM|_v{^1t#S={SYbx(~bL|0dsQjr@Pv`V885VjVTk(<`;F z&G7wCk-wI(*{*3r4n>smmpm5R+m1SJpJ4ra=XT1QIdq*hzT~3uC1g>)Y8l7=`~8qO z)8t|2XNK_)Mq2-#JRy#Xx3$@m)tl6l$(lFi|5k1G^vX|`X-_depxgc3bAOlR&+G1w zjPnR%KVL!r<(9YZC1Std1=pm>OJolU%A!*LqrQk(F2%XXaZd6Tzghx)JTJAg_K-yWMv`9wrK#jKYuDeDcBg|8n|F%tG&9jQuzMe-1sZ z44+F5kU!^>3y~e9jCsU*1@v6Ew(%#`2k6UCJS`7gRsTn9D?cuMQ-!a=ZEpThp@w_9%7$a|FaoY*oxX0+5g$b1<;cR_{NJq8g|lmBemoG{{MQuvVMaP z_W$qo`+ig-?jKRWL3$FU{)e};AJEfiLk@NO-cKJJos^bFX)VofI4gZ8)j^zd6vuG_ zr*H=6Z~^Vg^p2R%kVD*4H7`Ft^uOct+4jEwyzbEJwY43czSRJ*N~m3 zjxfgh1N5Bd(8JGxH)d~8@es6eo!;DoV2coq8zzpdcWSp4e zp=f4vQkT_po!4~BeV_GSE_zQ#yeAYUm4za@Zg@{P)a1QpjN>3%YrNNe-s>&znV#9@ z-Mp;b0uRDJp(>FH!i# zUlPmcE3gv1?9eJQ_W!RYd#qovmW+9S6E-5+|8DK_r83}0`T+dC0h_TEl^Z>;Kio@f zr|-mW>_xA-VXuCr>M7bZ9arNZlBm6| zN74Jvy~J_y1WuvnyY~`j$nG!vC2@|tfJ-qPSCGE^UgCr8AJ>Fsg=KENm*}PEUg}G9 zo_{ZKlV0&J-pR{-iGr~Fn!ZHWYki5jd+#Oc$Gn$lnDAaANjA=SFVRFcJ5G^>XSE5s zwlw};Tfkj>5-m&KOSE3@OB}l1mq>T_ zB@WXw^z8Y*L>sDW`Vs@hGZ;fL9JRt~#4&=Ne8#Qb=rH0vBJ_< z-%E@qCnCG+y~Jd4Dl)=5Ti;9MI^Ii6cU+FrzA(ywt{v|s@~9gr{nFYXtx0KbB%9{> z52P^DHM1}Wb1@$au^3CyeqQ;5&g;sbZt)_I0*YvvB>nF5P~yEr8i(cij4~igwp~?M zXu6kJCZ58V^|O+zP<#4bqDJ^?dh*`AM8mCniM8}~NU4W6v-3@7?nR-QvH6H@qhc|e=?=q z*z3IgsK!B57%Q73Q%Iv-`+o=G9Kam9wCU$jpcm01U$-g)4jt3p*QEckLwg{y^tKv^ zBZnf6;y6yA_Yd0d$Zq{-XULv!saud6)tfiz!;NENE;&x$F-MWC{HA`NujxytXP2ns z;2J$QTe-nk>80O9QJGRe-gE4lr@kSq`}4kYYkl_ot2IL$_n}{f?;8U#7(+4KcR!pQ zfl=uF7ybC;SX3+1#*;N??YBlwMlF3RIUPy)z2S!U)+*nhS9i*IkLdm%^0oi#`G>O5 z`~5Qetr*WA{ihkmAG$7%TbW7D!W_&+_m`dfFV;N$P8pkT9};1udECEO@21DO^%?%@ zVtS5!?|j4@Kl(Bh+0_E_3GD~MAI>k_#ZIxojqGL9G4%_iZrS(VH7l_StFadAumPLV zzJ~w3-hYs}lj;{|*)8_0Ko%Xhu)VF~KD3YjO_tWbV>hFXZA0})?H9xTn5YuhR%}P@ z3&#IX84z~Tlk?cP*#p9E`d*aAAMRq?o@e8P|MLrX68mfJB;MX}FR|~%JBgnzxR=it|nSrM~0S~ zksgBNm{Yqoaey-o4y8lN)+>`D=-Wh|$m ze%#O5^%`p~zH@OZ1lf%?)hJ;3aMJato&1;4jhtQkY zJgzs95=R=9-M11|L;gKcG3|dQwru;KiGO_U-xFKAZzSUSb2-2N(;c=*nex_*e@kq8 z>E9ARoc&&6=f3yY^hZPGw;l~YZvBtM?mPX%j~4w$Vi$g@{bKJC^(faK#c`a#+v-TC z$hxMu2B1EMllq@7>Qf+_?&)K=rC(*FbpxGq4i|6{3Wg&&e?y}H?Zgc)B z9@YQ!|)xcA^JGDFTG z=OUXi&XQb+-1ExF*~&(;Ymu^%EI6)y;nC10&10Rj7)w#R-Ws05m(i1NJ{lS}JsMWf zS0d%O*>MxuxI=r(u)$%|$id-_XY{4>O;s^tD)r&LjL|bRmxdil}SS|IZh0 z;0q`D#*JhXKRNoiDQs}jG-HY#^Z;OC=d zHvb!k7V*Dvn4Uq_d2P1_hw4|%PZdvG8*eYUAGP}4YlK(RliRcnpj7|(mbx>&8BJ)! zLFc61f09fgj{i@SZOC+M7syC=9*&DBu>Emf-ox?#L*AeN@QgU5x3vGl0_k5O{n`vF z>iF5NJB|}Lg)=yZ3utHeJNWaRD6Q|9VgJ{#|M7pwmMPM&t>Dlj?hA+6{1=mm-|6emG^wN{DZNT_|oAldAJ;QFWT@T0q5Ar^=^+tPm*YADkSLuEI zz&HH5yz>>cOF3=U((F&+~!8B;MG<(P@mx(?n!-7(MkqUXBLbM<^1YkX(Bgbz~x7{)dt z`6Ao6NZQa$Prac3!@9(?#4`tTQPEx&=93Gt81478e?0H`BZn@0c>J~Yk4LnBoKy!t zrv2lhx&fJW+(vf)mjC~lvHySK|37M-e{n9u3arE`tVZEeWnmqu3LJXBh+oSaj^9|W z{fj*>wST>0UAKM4Y6!1FW|z9ho7!biS*ITMd+%cj-$lCO-feLVWwr8hfOFSkE4HI{ zk^Xn(<4$@~8QFl{^u0)JQa0~VM(++@@x1r#; z=(sDR4nWpj=6_$;zCb4LvH#@5_Jt*`bxjdRaU3Ub3TJQ*57+;>$B(_m{$FSR+3&)O z>_6EuhW($w{*&n$>_3@toFz-^|Ge_y^?xpivsPFQF42=$+5c|-8NHxQHRZV3aTD3d zKfWS7&U-A)FEB1Y>-}F7){Bg`na&q|7h{aSb9@^GdU1m9WRmX$Elt{ok7*x1qAdW2 z>6w$_xgmd|Jt2Ri6FL0#{%v=~*@u2r>WkPd{|zJuV<_rg^nA6SG)Q}Lisvi;H(l1h zg;dP{XZwfY&KrSI7>(Ng{73m=EIs*>yy5#DPoId?%kqY>2kZY#7B&^rQI47DwqC_7 zvNYati1Gn(ykqa5J{sl-pNsh@n8Q`^OFlF`-A(_6`VW78spDnHvQsO_m3S~-C!f)7 zrCwho;~odA{MH@)YqIBSzVmPEPiH^ZI$nnj*o-P{#dhq(ZuEXv|DQZiFE2DmQ%q~) zX=y_<;`ryi&fAY_97GZ+q|t`x0t^^e|f&zf?-{+$zl5l2z@{m5{FJcZt`dzODNK9nB&KF^UCa0xw# zW1!>SCs*hX<}Xx!$#c=)p7Q=<8xZ#{G(IE!Tl9bL&`*q{{^&;i(oJXef1lP*e9ZiI zaa=<$ZsIoXq7VJPFa0m+e@7>B=t3R^6w#uuyVZXj+NQ6YJgmPwLuMVfk?AL&4g(spMqDF$@jj zN%l96;2rfH?N7}krOR<6*)&Z5w=~yFa|4p^3O_F#&Ggifr$4m*&s1?%THB)PJJwdA zm*YQO|L2AOL;uemdEWia6wfTo!CYjeDA5$3Tbt|yKwpfa;#ytk&+~PRLc&*>oVFNa!3KdUS+n?Ny zo!E`N*pF%?kwW3y_kEJm{Fm3vbqQtn_rLoNU)%4!j~Zh^b~hRuLH7LX)1gQn#c`a# zDV)JMTtM$v%oikka3#uj_|ars|EHI{iQBk~KEyqR`n{>_!$1s1R#;|%bph$QXYSW~ z3qMmYcU&CjI}nziq8>a=|KBwE9}U~&|1I)A``JV`Bl^gt^`CATYl~5?uQaB+s;qw) zO&^PP@3%u4-$~|{cqg;pxBv2Z5vBS6HSB+r{C|M`r8a)Bt+{3`)?oua*#EQHar@=>*9X4CR-@}G zyN)Bx4_X-F5R1 z)KeP7mqg(S~$~a$G#;Z~>Q)9paoP*#vry&F;h%`ZdHg{R_xDr)!LUlIQgk2)~Ki*&i*{ z#W%`})!p_+9A-ZqVKJ?7&%T zXgvMrw5=Uem&Aj0wTi>!BMfxjVDx@f++UFA=)*Arqc9p{F&+~U+q)-|J)iR5-{wR5 zHk;7Q2B&_`*AL6_N`;XIW_Za`T!?;Cyl0V*nD*9H$KGm4kIOeet+lB4KZtTT=RO28@ z`+r#jq5Cc0rsKH(Pl`;V4Y{bR!xhP+IF1u2&Hvk@eTDvD{@)q;1w33obf2;cb^ZB8 z{KAHj{Lf+hB0gsmn)%2ntZ>a6C;9k%<&AvibB-^e(%iiYy zAnq6Z2jW`$BV02IqcIlaF%gq76{Ymw`m6Ns(7u5pTBNh}vh;f{ajk^E>HqKe@cswW z#aVky`lYR$p42W7*Z-eMFYqrO_W#Fwou&_1cpUSSVSi@P;~cZOWaW3gA9Fy8)6^IE z8Tm2ZH*)m2Ch2W?eZKP+VlgVpv<1Cs%@6v6_3xI^i=UnrR*(%_9LEWqLTUY<8J_8N&-Suse9yBc3yzCq%LMhm zDf~O;$mgWzTHRZ{_f5Zx()vG+^9S53*(R(H4KFBT*oQ{4=|#4J zOtBmN-jc5{5Y?xxg+Ru&>1x@`q4eSS;QZhbjz?iMV*mSCay%-(qzqJUPNYx9R7^)* zJ^PCWefdemIsHv&PUuJ024C)+nV5w+n2Y&Xh{b515c{o-aWI}C_x_lMyzvzU6!#f_ zsr|lniGFP7rN`*Eo}u5G%%W|WFlkvTo@H2pm56N^asR(n^xPiycbD}C>1$DBR||L8 z-y7^NxlUO18eLRgX5iO&U>yko-^5$iR~ZO|G%Yf?z>5n zjlP>E-$}FYB1LXAMq|@S-^Vfa>#zH+zN?*5JXMXp6@63f#&dL>mww|s;#iNa9maM@ zcVUBkZGpls02l{{;lkKH2 z{-GXmEz(rp7-ckVu@7MUO?y`oXIy&@7jOwZxB}yxLnj{W16aK186nQW>q5H2zJIMD zv>xy*nxuJ&v?JsB9L6ia_-|HeHICr2PFEQ3+>+&KAfjD9-$SQJOPKjfcre}3!v?Ej067BWGAaD% zm1o1Q<GWgw`@hZ(3C-spGmdRc*gRoO*ej0xs66s)s2VaMRE!)KwhS8={&C!du+@E6 z`|Tk9Y2;_Zc4>X<{Aa?pk(0s?Uz!qjo}3cDUKP*ea1Vu{#;0Qd**1m>K&YZPWzPk z{qxN4$04L~__gOeL-YGzF~8q?Yk$W20O*`%eE@Wgvp&Eu>jMn2KER;o!bWw~O^D<9 zdi-BOUwwDWjBt(K^9Ab`e2#sm-$d`9%EN8)E-HV*4>q4WEo~JaD+_(X`u!7If$SqQ zj6aiC=($_=w#Q)lP!uoP+aCGL_PWP#VIxp|Qur^+Ld}sGVU*+1sHKl3$0IpohPIp; zp=rX5(1_Hq8KD{3m-To1eKIoisbqS#bpgq8#Jz-yDD6vpL>=Ldx&mhUZ5HY#SbxZP zgVOp#P0FQ=@dtVBL}co`_6=phgZ_;<&Yz3-d@VY+W2T9#DfNJbuHBDn9E|ZGg*4hw^8Yunktq4UZ?cUj9N_<=MVzgJ_`m)6y4U%?x7gP1 z_x=Bi!s_{{6Yf9bjU(AGab|_bG4e%v?p<-Svq$L_#ut?Qe>OWmjs0a?~?Er%-cMy7JPsP5Z%@)Q7*twy@PD|9_Wsu4jMUcfH>mTvrbG6`M@l~i&RS1EH+?Vmqd{Ix;;+|7==r7bKaW|z&ssfkp5De+r3bM*t#)1IFSHAM zg?~d&q1Qe42K7#Q8*(V(D30RrC--+u#4hr2Ht^eS-=P~QZl9!^6D@fnc-$eG} zCT=6E&KK(+cj>u#zD;@`z26V~*8_j>dEct=hyL%3_x;~Z{NHW--!1&#%*Vq)*9~yp zKn%tQ+y90-Zr{iMZ{q*f@c;SyU3~xi68`^e{{Qp*|C9XRv;1HFY-#^Hac1)Tf3nSW z)nv@W!^JfMqfk4D|NF?t!)SVP2>+K}n*U-9R_ZnP_NseK@PCDmbr~9euvyS%wu@iP|C3|D>@L^yC!x zhSl`7NcDGbXmU>Dn2&$RKCScn25d%!I$r^mUobAsn&?}FZO2aR#$N14-8T7um;8?; z`_-sCh&CdQ5l!9GKkS@?NFs$|P*)?{kVE@=`TvMKh8#+E_FZ{Q{wtzo3;Xwi{Qsi7 zN*-P!Z<1NZZDd^krzoDIIF1u|Fn;b7J(ek*?B*HzIh6MG+#~<)@L%KwVR8MROJomf zwH;g`uOaqyTY3C;^er--4xfKJ^l zwe+|qNn8VB9eo2z>wigO{)qff)?GFJ3JrIRzd|Fu3C(On3b&wnHpW}W%bkFOxOD*^A$v94^?wI%g zuJ?c5``5nH=o@H4vu`N&6%BCBKn%uEycPXias)=9{kr!GqbUYk?;bYP2-W?dMYfWFUC?V!wRfK?3*i%KRB(g z%<*bu-QQYr9dbvc@s_j=@$Ak^8(DZV5jOa3Gpeu^+p!b7(W_5?FWJpc*iZIY$FG_^ zh$K=dweM)-iTOG0xbW4dLd93D?XT^&*O=gLZ9Lt7@%}&0KXZM~IYm_dM1TIDo(k13 zd)E3{j|w}E6NqysW7&6#o_xgfo1y%qpF_&G5cdv@d(6i*(CT)K4E5IgZ#XbAB++!zoYY{KHg*c+{N7}po||1Z$Dy+r>%**5#>(0citM%uR z-e&#&UFPrKGJoGWZDgJP?s_y#(0{Kly>X2GdwMgO66a0#cpG=ohkiRf2YF!tIS}c~ zqrzZvD28JMve*3wM$vOqr0p5w4CrG~d{x@?tLOb^m;ZR)dYtZ@Cb@q!JnuY|=H+`X&8|sdvUsLqI?6E< zvoHs9(LPT9Gdicq=Tm%}6XfgD>e7gFy&kUr?|r9nSec(e_DOBONPEZg#j_BLu@qTh z87!mc2Knx>g1!<(dI5QKVU@6I?{_u17PWD&2yz2z=&|oPiOr6y&`eL^LBC(gzaHZI z$5y}h{#u_Hxf2yX&?fe*|NFEyiQj4W6ju3IS!q7tetI?1FPY;-CXtHA$g0=JH3r-0 zIrWWFUm(30>Go{oxBQL!^WUoF`=i2-;{;BjwnP4Dm1pP=%Aa%e3y5n1#I*sNCdohK zC1E}2{l=(pg}fGZ+{A4>Oy4kVijKQ~|5WHBd%iy^^!t%C#ywZx(&qmq?FrA8g@M8b zqxT(a{Qv3c&>ip(_6K;@J^?LHhoOEOju9Az(HM)0in0**7aUK|XkW|@ON5E^$(V|V z)+_hR{kR6_D!;ErT!VToxek?I;P-z$gbnn~sKQok$4)%l|EI(M zps`;1(d-$du-iF%u^-hqh$K=-qun#_Kxv*m*>(PY-KwB|RpgsJ+#l&p_8*7m$$!YA z4YB{BO*}aiaTE{wACAZH^=$YWZ5s4bDDGl+koTOra7I|{e>g{8Kx^G~uy1VRlhTR0i|YT^*$DRdZ|2v|=ohYurx!O-_{O8*HhC9)=(kJy)%82j zd0zehv^x16b#oNR`|JCtZ)%6d>y1u-4n9MkiV|3b_w`qYkfZ2Xq&X&_-zrz6W z4n&q-^8f$ke%nIlo8smV#Cm_p4{#iP_QB2_is2Z6ICr-Oqv%QZ*1#_rO&^Pt<7UTA zWTX4-`5hbE$bWyEjm3E9R9G8eA~_jTF&*WYiQcF5vyyY7&OXPn77x~QjO!1~72cz7 zWa`?kKCUug$;XHZz`co|k;B|6o;a%ivVA3Dr^D5!fC(M8Xzn;jCKZZeNy zhotn=y7kTxJE?XKKXc;@0MO_-+I;i#VY@?8f&o*8?YIL7WYHeJ;VP+ z1ClfD`;<)+_?UdlIM;rmbGBkTc49a7Vn3?UuAjeyuieS#&b`9FUct2$6@ieS=9@!}Vrd!f~&v|5} zac5QD<0;pk!8u&OCG_A!>p!xqvCk=gOkP7N|8JB2UDCf#dZhPn_TTj%SHyD-y|{_& z8tK1m%mY0)Px@znJlv)Cp}0rfJN(BB(l5NoAZ5Q9J45VK6xqHJ)|DAn6)r ze?-S4(0o?EqI+x<9@|jr|BSrf6%%cWKLLg(}Z^Xb3(Eo<2f4vQTx#WJkGf%<;oXZOqnInpoue2{vGHt1fyeUCC} zrQcU!H7d%=LIG>(anG-H$5<^iLzr-RndB zrq@E03f&<@amq;`SEi|Q4}j6XnXq;_}be9-?N=y))OqM$#% zoek|k=QzHZHiNEb`2Q%Nh!$mVD-I!zQv1gg<@gMCU)w->p zm{B(xP0wY-jj{CcDBcjadPHgcrJ9e1iNdNE4G5FTsi;-wnogFZW{7k-KZ%)+XQBBd z`yjs3Iu70JMm_(ZEosQ}|M`iHW3>Ox)BcAP=D21q=3^l$K5q?Yaw(Rfy@vna#77`= zc|HM|cU&M#{{JNYKMwIn(>TnBey~4H+dh7UI96g6($o5f)nrDUJgZ)^mc9-f(0NDv z=;DXx5$Dhrm-8$9R=bJ4zh}%cy?Qwt`qkG%Hk|zHZ$M(iGJmOy8 z+nu`;|JnM#o3>~Z#BS%lah(rB?ngBa;zQ-XcDDa?{aDoJxJ|!{)UfY`W;8jc5g*)t zJl*=e(C3`Y@(uc*H-vsaiS=(~Um5#wbwkLZ>&^!C!wvd?H-zHJ4Pk(D24cAD29rah zju9Azx>vpz>X&>kG%WaDNN(8>8aHhSO=w06EzfTVtvG}<4zJmu4tf9o+sO7M8$!o~ z4WV<4|DWOi>G_fVf0+O016AMK5Jvl-u^5kusD0*p*6;sbm`qRB_-{<5Pe*E#^zV@V z%=h#KeJ_*?n~7PNgSnWGg;pjsOrqwzymm*PL&|X)&Fk6VXnV<~HLm|vS!ffUL*bX|zT{CH#|eD!`JHln1{KC5 zUm!0b&f)1HE5D>&;49Ykp=!7=Pp8^X{Sro0$V zj=(64M#Z=Khq2^%OvGgLKCvN8C8r~O<0GM*oQXM@i|kd;uiM%o^xUg_!aS@f;g zj-A+zz1WYsJ^TtZTxUn_u^X5974*{jKNs0IHmJ3UeIw&o_QUjOyGr}}yl7pSHOA+y z*Y-~4W@{f{tMVw&Z&}lKxkX5%{V1E4~5bK-9iwOaM6)HS49z zxycFUaX21n=O_8-@0VYrTt6Cdeo)~% z#s<(Ij<ZXff%^Xv^?x$W4>){B{U2F+Teo`oC;6d|85b(9 z$(V}iC`YgPd^5>z`C%5>V=uqLpN$cq&&7Piu{@ppih_#tLdQk_RX`qH;!i(fjE(T6 zScVnIUUUywNw4#c>rq5@km)d{Ki%kvFr{l%JM(l7Aam+;$XSbty&KYm>Q&}~j+_gC2a$JzTX^EW)} zjrAdH7xx>}^h;VZU}yZl?^|ENIstp>`_cRB0ioC2!Rm3!U&jZLL<+U{?$?LXF)WtB zr}eeabMg1}>O*Kk;}PxueCv8XYQq*j1|K!9DOc+M@8D}>tS{%9B97uXPT&;I;2hfX z`oE2@@8tXE$S%Hro-8;nk}YTXzZdzxWXb>E#kci8S+dRlrum^4#B&KfxPmx0AcJf4 zoPO}mA^bD?SQHoW|M|ov|DXSQQ&=@W_%?YLwe&u+-(KH8yxI}u8(7Gs}5UllRgv=*MC&r=H&S< z<#GO=GJcQzzf1n#CjHN|zewXSGKlkp+mLo2!^JZKqc9p-VHx*5mY!4hjQ;<4`a~2R z7mycs7bXj<_8g{?(@{$=CugE&$ou~P5p7nE=b)J$=l(UGW*<-*|2BhJ<-@f1>o3xs7SkeGzgl%4drra=Q{ z(dQsFM*g2B|MLkNXUPA1-nZ{f>_lGq$BsAAo5|SzeZ@V-zM0bg*hdD0x#F0QIQMfQ z+0Kr4ptQ~|yWhp;=U-)`+54h4{}$zGD-J1J(`@TQ`}dmmf5blO#o}3tIPR_TGwl4= zyq6gt4ahjdna=AV&qA2JI|x1I{!zt;cdIL`m;F^+X5z56rj{A9H_SCdyY1V(b@t*as>-GPmbGH6_bS=_<|APMe7xmwhEjP^nzhnOY5p(@6`3bAkJo_ke23@^V4WU4q55WykPu7 zJkCfPj?#~#xK026CjI};>2m)kgjKJ9I-DZUp!Tf&6vzvxNqDZ(pTs4{J!n4W|K9bS zyFF*;)_Di@o6K{8bxO5wnEUdkamuaMD#aDoTthEz;x_K05B>gG{$pc1*xF7q&V}hB z^NtH-(f8eQz}%N6b6{$GpB?;vb<2$Bm-YPOT$lmk_)z<%)aV=dOqWAm#!cp>g)Eh^I6XYqJ!8u&OCG_A5dcXK|xJGvW4`Tsh z_)|}Zo8(>eq2GSZsm-4LLw~#p7k6mLA704s@K{q#RJ*G`%X{?@&KL<$rFYQu$oOk9+ELvYB(~^HE&H{?2>f|6Xo>gs|%8)nUk`i2nC7vVxCL8h`KE#&vh%{$eYI zH@lx$2Pus+Bpm z|4I3u%w3fK=?}+u>{I`~sQqsY|HJp3eun>n46mN1<-;656@qgL=+P5zD4_h6V*6n56x6^l`d6RkrO6zFs&~7NKnoqo!+>hGlwKUKzhos{aDmy+As;&;V|JEnMmOY;c|9IfZ0I9|_y#z5jHaHGHI}==Y;Ij_yy72yb6h z@A%~4u>TorII7p~n<8w&h_KhbSU^cX)-QD&U}Q^unV^1h~cUo-3l zD!;1TOVin0cG#cdpZf3W_gMpO zmpKe$_{TH&$Nb7>q|maSf6G@t)SoXuLI3tNzAH-WxH+P*>V$ zUNWD3zOaR;InDodZnDWb{*ITTSzIYJW~9S?9O^$sztm?#%PXIejwxZ8b5`JR;?vq` z#)XyiRalL+SceVRj4J&6g}8>6eIn=wUQC2q?LG$+p9}Apk65!_zgpt+A^F0LP&a#8 zsJ}c-nL8<@)=vtJOUz|Ud`A6C`-6Lqa~zUreA0cPd64^@V4l2qcVaj8Vn3>J5J|L; z)c)c*cRDvmcFpi?PqxF2Nqv)2Fom~}Ap z^Owd$oK}8ds_Unt95XQsb1)aZ-`6Jgtob9u)R!D@`Q`YqkX($~X~sXu-%IJqW1cOR z(N`cvZ$^{yJ^FfegV;wlsnkxqfc;^=n$X)aV=dNU12&`mMfQ0C`#g_* zewlsd)9077&#$o0WXrqkKMq~w>z?KRo@D>mu_f93{@oS^1OiGMXO8 z|BNLc%-ika3&g(10*a_|UzOkZd^n_DGTwO;(etsgFq!QB<440(vNx#Xk>!|)S(t;l zn2&{6jHT%L8GnN8uG2qD_WrB6(Ib6Zi1Sld3R{KMSck%wXM_!8^#S+vh0lk$XX<9h zRfug772h&0fgZANm{WbU5%^Yr{h^XJc+Kkpe173Xk_z$lEy0QWJL9FMvsp6@ds zc)r6tU-2~G@_fZrEuM+an~bTLj@psZ@0@aaa++%=Jr`!uXCc+!wP+e5{mzSPYsNa~ z9KXde26M^zD9!(S%(&Ay|1TJaDr_m1VFgxV75--apE`dNO6zp66RVxG7VEG9vHriA ztio2bk7R$@(f{rIzc;o2;m|nwABWkd46^h#?E8EQd@o1m#sPa zC_muW>gw@Y^Z!nfXK)S|a0wO9s&kN6a1C+(UoUwRas98`q0AVE*4M`h)p@bLeyN|9Af1acP?Bx>EhySf2Uxg;<68!OgF-vsmc)eb9dFbr1Vd9q;Aae1~1^Grcta?@N4sdTmDk#TNfZ zPr8=|=d{stNZk`Rn$Wn(JqnBS|BjN!5!)_Kkf-p$`G02|pTpnG|I7cM=l@-B%_a2U z3S$1hM)u++9?t*c^M7dmUn`#*|2OmhZj0wG`p~aN`d_<0|8D^O!Tg(n^uhS+`G1>U zk-st2Z*dM?;cJhE5%f{$jceTC9sk?fdjHw@zlqMBjH#H8 za?Hdm%)y7o|GoQx_I-Ns2>&ai{L3ru@>v_VEo?(`h)R*o9SEex8wf?h2g@hupK*5 zTK}D&wwwN7{r6J)pZ=fJFup08_;rogXpQ|%*4caGfc5`tJ|6bECUby)X|DZ#dNmHB z^9ADrj7RPw^VSI{kVWIF>-eSh{L_Y)_`mDhu#(CLSt^beetM*eA&ncY2In?rz zYpz>sjs9T$flKrrq_j;m^V1)!KX66Z2lMwe$Gy0TIR8G*ySYuT`n|S0cIGa<5B+LA z7vFo`E7Jd_wpnem$sO8eeKSqIo#s2r0#y7=8@+P}VlakcI7VO;+W)imzvs38;eR{+ zV8}T8=t&Or^m z;RwGTa~;n|Gd+a|>ukq5%R=D~>XnP>OR)?q5!e4)MXttLtiuLuMiqMhV4p;?)c^1g z+Vtr=W4L*ByUA|vU@zJ8Rc-iD20n8P|E4`s$hudxKcE50diT}AHlvxILPeu-(auXD zjW*;^#8I@r@&W%)eX#2h{^1P%;S~A*y!@{&)rv#TOE1^{x?bI!jQxXc3-lj+-CP3e zGsL<5vHo#foN?UGCUutBcX7gTY{NfAo{74>!bc;IZ$|iVdajjiJzz~5`Y4q458lDX*08np?5F>#e};|2 z!}<60lAlVBcFkCf$3#rVR7^)X+AnK=(5Bx>=Ctp3k$J}jvZ!sMwEq7T_M2UdbL|dG zS84vew6!JpSM}EaSD)IHQMbZOan>HN{=e{9^rZSkT>rn~XU4)iPQ|_hb&4kOH|n35 zD=h9SR9gT43GX#yKO|ubk;zN@Z2s8-^>4>ZQJChrqw59lfvj_1_38|zb$aHp>94Zw zXzq~z5zCKdu3LeXScTPCi*;zP_x&M`G0matHSwZAFQPR6_pJPnbiMCSoN;gXEWPc3 z`p0SQ`{LP*Dr`mV4fgj=|FE4N*Su+vk9X2{<3r=e657^hnT{RB=S&cE;Zr2V$&-M>nNb7b#ljAJJ+p$AuR4ZXOD*#35# zyo)~ctMmPJ+;4vyK+nzNo6OdBK_84b<~#0tlYgH7K-Nw8-?Trn*(v#{@Xe87sOyGf z1Y#RNX}nUsxf+h!^#OJ)^88=&{LwYU^G5-3jeyen-)H6ji|jvn_@uJ+RnH%7=>F>a ztg@U$yi6-uJu9I3LHeFb8w-VE*!a zdQzT`bFdfE7bA6xZE+m;K96&->s~Sce9hCLK_7Nf-*)3J{nGmWo9QY2++$s{6w9yz zE3pczu@>!b>iG39$)Mq3zkB-UAtS;*{dRA^ zrtWayBjM-!^hfO)6@K=nIqu)LCg#W7*Ej8r`4|1(|HBxW*Nq8z^Sy95@t2TJ{D)`# zAE7nzXJbeHJ*2w-Jv1l&6q?5TTWGxgZy}lZ*U+F&SAW*n&Wq-jTK}>3-T$IK_h0nM zd@RIu(SLr!-f5SO{c-dEvi>f0UN3{fBCQit`8i zhkqI}JZ#5Xt@-)n?#peNtd zht*;3LhS#4UH$)_wP6s~h-uu>KWuEZUm!pF4Q=mneBWl*WEN@PpQPQ1z7^Zi$%f|8 zg**xD1a6j8gL|2>RPO;0wtHyo#*K-?3c*>M~r*@#oZ z&fpv_;GunB`z|@|!8MfjAJ(tbOTUTRxQn`N#{cY5|39z(AM@o6^?$@QCgS?jvHsuZ zynYSZ6)_NlF%-kmep3JcS^fXWUDp49PyhceHg2E360&8s`acdWV*hcN9@mD;I-v|4+#_Y7-TUZ>?}c7O7xFl?&GS*$X~}qw-cMXpsZIUr@D0xq4eD6Q1>WOq^C;1L z(R(`<`&Ycjv*tFs$59xKu^5kun2f1NpEi$}EJxh`ZYG(%;rqP9kDxzTe`F4QF8+G` zk<$1_Ht^y2$BcP5&RdL{{C(RO*B>d(zi+g69sPsrkNhw9A8u9uM+$NMy`|z>h80+e zRalL+XrILXqZ6^M{cq;~omT(HA;h#E9^&~Ui#BxY+g~T1p4e9Lg~4G1J@!9tj$yyh zH%4y7cJ%58_@nlhol*aWwKK@Q*pF(YJIuWzGxh5K@_3S-is6W5L=IijpGRqY`X2Sa zHRetDy$v}OQQJBw)GXJJOHb~)-}hg5M_tWv+&irqO)~!z$P;8AvajqPz=Wi^nPkk7)6f8SoHAI#*^Jo*xQ}lI86PK z&-;e9s>zP)PRsx2<^OT=KjNA)O=za4P*CPicV4CXa9s1doIVq?FbDt3`G1?_f7rWR z`;^rzkA1;1`U?E@{J$-KzGdv0 zHR$}d3UThqYH}@VZpnY=eTP`*cmw`={$Kl5eBP%^^(om<*zkZEzg6s7Kka13b(`n-;k-}#GS%s~*QZFBoJFy#kvGJbu z{vPo@o(y5X<7yPX=$^?G;{1a&S@maSh_-+>dJaV#MOqqZU2z(J)9k&$c%9SFY2W$rO9q@(8io!E`N=xr$r`^ngUTTS+Stt=cQlSmx{yZ!MU>Y6A0z)i^ZxihdK}yO z!2X;O$2p|8@w3S|@AnefgRHPCP9rAYL0bis2Z6QRoTg zz>wX)PlT~#XGXh1-dsfbM8vgyi|DH1IC0! zilUTsC?(xwBrTOLujHmO!I50aNG4P)r^{=(ESI>%HMkm=<+5Djl3a0#OT1Dks)6cC zTT!A!i6Je)L-TfcI~?B6!E(g09C5@MoW?6&vBVo35wRuRWlQCLo>Nv$)Sj6?zU%kf z`+1)IyzZCZ-oL#cEMWBVwJ?V9wQJfxY&dBqG1-1COd*kfE&Q!?1~GyJCUfnRWjlQ? zPuiHHFGRTBG*UU&v{A2xe=n}5>RR{@K(`$Egz+2Oy68!mTW z7_wjd*fsN8mH+DCb4a+Z|HnQ_*EzXzE&RXqws*_tn3X^0lQ{|GO^AEWPx=Z}F3*IDMe+)KRL^LKqC zuK!=y=TC7b{x|&Z_?P%s_&@MF^N-a#H~aqzU0;FkZ&Cg49>>YVn)W}L`M)SR)tD9j zwKV<>9zxH|Uii0UY$C;91ACu366={(DU-o6XKr4(|88kpY#tDasb1) z@#g~WXp%xE!r^6o9gSUA$(0-n>yqC+jGJ<=-aQm7QRKU|HZZN z9Wt8#_g(UP_yK-|g!IUb!vHoGk|G$!r zi@_x}?F!p=jg3o=Ts98wn0+JR>;c?@gSZX1;|~1J`hVI48T&g+9gWQTe{B0P{EycE z>sdVz4oT+~xC{3nS_3eKmZyAkZ1Dj^{l~AeZA6_tJfjVw?Jz{1_5DTP;JIq;Lw4wW z_DMErf7HGx+)rPsZULhw(wN14hWG4ETXd4k?Z#!2=|lG*hpmmKR`c<*W(T7(UyM``4&WT|K3Kv12OtQy>?|| zR(PlIyO6Of)eA$~mhmITpZJ3J^Bo@`|6u*Uj)UQ#W8Q=J;RARW=ej%toW-BuK>r&1KbQTFp+fe5 z4*Q=SKSF-k`96w|Bh5GSOmu#n|Cn!rAJ%k%^A|b)VfH`f^=aLl|H4KbW#f;zzHv5x zg6%&+PnNO&OWFTulh(Gi5FaHU!{c}YPvU7jgI)|^7~{Comlr0<^-t!7#3jD>^fbPL zuVHf0JuV*%-=NQ3wq5vJ^zUFne?nSZ%5n2#GiHl|U$Ebe zdbvMWTX;tLdGZC~7(z7X_c`hO8rQII%r}o)kbbo_FOJCjg~|X-XUU)PUm7Vy^ZTMX zztP(N(HfvpSuj;D?~6|!Qx*sxlumCuf4R==a2vgcyq&xQhY+nnIIJv)D-(t=NRR&2 z^R&G93fu3(J$M!F!~J*wNAY^R0dK-v@HX7+pOqKiA-rJh<(=evux$+JePqjL_k|CT z597SHPuqTLiiMYj4-0=3>z~dFA1AMz4dD!V4sD3afJe#9_Mu_e8$OW{r^ew%By`#WEx*V%d7WiY^;nwpnj!j z4&c|sM{@vs)dAn2x4lju;-}fct}QADhHB+|3?J7YP$B=X@&8gM{IlclT?fLur133$ z2Q9DG{vf}HAK*urzQ}*B!#KBn=g7?eZiGG3^}G5%U8{b_ zsI?T_V@43 zj8*k0KfhR*6S^;2)1_Mb#r9X=F5H93)9ToU1L0NlId$!<=X)Rhek>GtHb|-4qxt($ z`{PrczX=c6_IkVlnf8Zg^CtS>rgyW>zaaWQz*Fq$mFsP@_=dKEXZv(rZulGb`&6E_ zAGdPDlji1pp*+u;kiQ6>%Frj)cf#j3cfw~Eca6#Yy|KD~2%p*64WHh-8rrw6@`-)k z_}8z)Tb%1{Xe)RwoG*VawB-IewC?;WJbLLD;jfLYy~DP5;<5Ex!>0-khEJCLGF;4( zUi@JAO!JGwyKH+8p4i+UKG$IWUE?jGW9OFeSAUr@rNXt4LnX?Z zq%tR^{X;%s-zV`jp25xXW5!ss@bpRce+~P;PXBr?o1pOe*jt*ukj4LTN!hoe3|vzd zk`uzw+I&&}{1iDLjh3%lAD@iY@t-7TvHeEh+rP8+XXGEi@9zpfrYpgB@KSyOpC}6H zUl}*H-&ZhsTAI7sOZ4a;OWz>Bh3_EKPgAWtu2deAbIRRW^1HTu4@WVKAK*v$34V&7 zLTBidQchId-YJb&3?BdX8S*l?Nau5cgHkCT1Iocrkm zu5EBkykp|4?r+I{i-*D?$Grk=(fq>?W`(=x_uy5y57RZ;-o9>B2hNXN^4hyKG*T%>=;7>$249JTHB=*=$* zZy?`=Rc%< z_M83TarzT@5>Mk9^kM+Rh%L5`^UXRb8`mCA@>xm z6*}DE{*uZl-_&!CIlFp&oZze4-@g{#H=!Mb>3;biH~aspJzI5a8kzpTr5B9<*>6G{ zD~U<^RJVT)-ObnGHT<4FB7eWkcP`C?Xq&#?576@37lqq|Z^s=-n`?ZC-2Nuth_CaX z*xeUiA$%9^!K=_)qd#_IU$~DR&8dw1f9|J0fZv&aa9sc2DfTCBtRp@|kK@KV;@8~k zQOCU=Z@`=I7Q7Abz_dDj2D6w$X8oHnbsQEjs-7BCPmPm_J@r4C6rNmF|EtR)|G{@k z=UsRY-iI6hgCC&Zs6QX3e;5nu(`aqo6ggk|+x7qA`c04f4r}E9*n#j-`+XcS@dKyX z)pg2Ew$*uKk;B3x!kKyYoF+7eZ@FbqbGw8(tuDqWQ1i7vsC$a3_ z=#!X5I?~-|5?`Uu^?Nqm2g29r-@t3+7 z7y1`}NBFz=9)5&rZOIvA)|Zj5Z?-R$OAA%XpJLCy#5I!9no1M&WRd4z;Q9Ne+s=9} zKi(I9BCQ^NA3r5yWBfbubNVmw90s=dhi>rshzSp>?7n+(+Jz2XGXxN904B8Q)Vry+JrVnjPLmo`0?K>HD{ax6$8$cj8@m z54Qi}KzJYd0i>TbH}|s#!o&1+zDOS?KZ=jz3=)<4j>$GWipLQ7_9Yv$!sGNC{clgu zpTxqRvSC*lk>g$AX>re>7cH;zk6LmVG)eU&C3p zU$Zj!9Q&_lO`Bg_O!~df^DX*!@Lfb}e+**ig7!a#UEhf7{DJrN7TdpvA0Uk%;V1Yh z`nt^jkNOA7^$(O9^JbT3=Ku56AK|+`TFAb~xNQkcEVF%A*(*!zdv@>ip84s*v;F3$ zV?M?9jj@5X8yCpFX7)b@Fo+?vlE-W;< z_ImfgCZ9*f|K#Ms|L%Ws;$XPPe(9I;KNLSu>A9ihmHKkT50`mv6`tcU&k=p)`d=_m zeSN))0FL7Icmv*q-<^N*d-nY${U5%CG2g;CnK-HaQRUmL zG5$wR3D0_FbNWT5$(iH7?YHr4=IN;h&n?R{!f2`Ih)McXp68hFIbxvGb=$XZ!+p?) zk9ltxs`MW57T5STyaVsVyYL>o4KSufIo@m|NHKxy>85ZoV@a5{WL@S z!z6tc+qVjnUy1a0oB!{B1z)3o11*2PKO|1=3*VwA$w_>N{#|?zvlq2bTU}q5>m$>` z(OQx{@tp9q?LWYe@Ducki{YpAXzu*Lu>;}f^j~5`cvv`24pklq&x!jru3_Jl@<=(G zDR0X|I3Ror4kF|K_GkKd=(pnz973NuxId|XBK2GUf?R$3rR;xRn%MP!w-(9!| zuflz}9}i$!KiAC4|HS`IpTJl#`(Ij#3idylEKsf%vHy2NI4X^Ge%G%july<}yn%cZ z-h#K`9e5|+h4ARX@}6~)cv#C2p^}X*$>g2 z$eZ=Qy1yssc-u1b|K2RW(|b#eKV<3qrw^X;{PPcl$LNn^q|)=R_52a7)$oM4L`pvr z`7|b#>(7wAi0YtO`FyU1ogqAoXzXWULpgEWcd*BQrr!6D%={Pf_y2F(@-uVm9XE;D z=y-euU&A*rU8C<$JNtjQ{>F;-@0#&%GEv9=ZPZS%|0I2ie3kF!ThjRsqPFaJ$yh0W z7JQHX1N;aBMe=8X>&ufrliCDh>~rmajQx?T{woaOC-(a(eu?x;9}LftZC`#cocG=R zntl!Yrqvy|1qboJn*Vp3eQw7cID}W=F5H7x;djel`D@j4U6j8N)%AZc|L^z8-}Nu) z8n26?jh}n2ScK(BK+KQd6&2|IETp<|8GxLgf{xzRsV0_ zsR)nKAHzc7QRVN^kh0G_9v9cMS`nTgpG0qaMR=Ng1~JDCNM|rr5qgCOFkE$1`Fk`B z*)NVh;eK=Z2TmOggBU^_!x%x7|A!qnj!DcSjj!Nqm_GiH{ulH7&E=n~Gq2x#|CIUu zX>vh$^wL9N4C6>(0!d6_3TJ1O?}(n=H>9_I_GtJHdFAek@LjS^AMg47{B~bl5xyt< z1N;a-!B3Ise<*y5{5gJ!=P=pr9O8dXpF4gu%;Fk--;C!+Pb1~N=h0L9P&gp|797NF z=sopNh>5?QKG^En;12pBjL@UC8R8wD6<#4O;ho$?-h;`*N5iYg`;fG5z^uA!?rK)J zU-$uJ)}2&d%&V(X=v%G`{TT2r2RAFU@4c6eiZHxY5k~Ua_gAgYv1Dw2k-cmTHkyz3 zdg+`W(te)bAEwpEGbQTiV*Ov6`oFHKn-Td2EnrlcGS(>nACvzvAzvpe)K6vX-y`hQ zx{B}y>AeZPO%>rS5E~%JF^Tk_?GLl$_TL>1+u!sZ(Z7O(=bV}U=>DUB4}4AB zH;|;ydPZ|e?E>NNAnn=~>Ro5db-Ko*;)d~Ed=EdskMI-x6q)t&)L{e2tPdPl_cW`6 z*tgMG_-F_FvWtC5CTyEfpGEWDqIqyr!qehsI@N*t>HlE;e{n;`AL4Awj6Ipl=5)?q zVt{QqxReu~qsPoM=*6$;*RXF^8IB%t3GI+u=m&8dZpWlLGkN?#xPv}d?%e9r^m{`% zB%E14SACi)WPcx4|6VY6L7Cl*v-<8{VV}(Wzt$IpyXb>gwM}-|T`%Sb^a*Y9Rc(`H zZ4`27{YBv(`y_Vt^^^Buvej5E`2dohOJ-ffx~y|&qjb|n&QqZN$&315 zv_ChrKh;MQWKwvNoU&gJJO6#s`2f;yl)tS@*Lz9(Z#5@8EN<|Kym2`ve3<@Gj0g`O zRtJ+qrF>_^oxwS@;f4MQj|#UaCmts={(axPH9SFo5>Mk9^cnl>@6`W(nQgC($^4i5 zk}@A7`m}rPGk{@?V-mAS<14uS|MB1YKl1mT*S<+1-Km{}(Tn=uk6#~y{Ad3^eoTIm z&Nr~I|0UsDx_~FKc>bt9?GIndw}xPmv0(If7wID?R5!nz51YRC_WUqp%xomv*?h7f~F5$7U*Mv`%-XA_$abLJta$k77`2O&j}4!r8BaBbutH9&n`Tn-82QhRpEcx#v8KE* zeuA$BCe&GxkMblrb{jjHTf+{0&Oh;Hcl>+fZu9^C%=&$W?lV8~Z@4wg zW7j@CtG9-$vpI-4Za_MNsar#?a2|%Mlxu9Jq0U=F9Qoo3P>3QNMlp_H zdp;|akn5i~7)r@2Z_Ns2WH~Bu3<=NSIC%mmQHe>V@HDbv8M)o)hZCC8}Q( z>d6K~c6B4!gvidGWzWx{S-2Hx_2~lUk-~;&Rwe!dE}|VB=tLL#jNNDY2a@^+w$!LmXFa-p36Uy-?>&J#mF z;^?hrf2)Vb=z}NK%}CN`FrsZUj5vny!v7DaY`=t-&#QmgU5oU#V7&ILS>ZB$3Cmc) zD%Q|fqyFz;pJOnle^%Q(zM`DcHXp&deKxR(E$m>|K(RjUYda_pik7)g^td#E+>9a2nx#VP(@26~E$fM6S%A583LO#6! z3%;#1ZmeHfC@xxmzlc1H-u!)`m^^}*;|8P?wW~{nOEG*^-Gd?f#ZfM<0_pw6w8Nor zjD8#^a1v+L<<0Kxocn2E$G44oo}bMQm9|x(8Z|hD)41}%eW8|IR~FZi+kdhz)RPTp zL=&3PiVIl(h5Gk3>R$Hpm6sg|7sa)s1D&`zeoz=fmvA>)=8MAi=dI6UY%W@VOS2=K z|GT2l_Q98je%o)XzZIv?EqQ(zqbIRIPa}nSjC=oy)7sHdJFQB)xk5$ z;STNJ`l7IEzcs951HH9HA$F=LY|;lW8vDljY81ijv*{?q*df$lFH!Bd=ci8wm{^0)vS;prv ziZP5Mfr%Vr-?{t`jO|aA@juApe^B5$id+XdFPz%of3U^>fb40%GZZ>c+uhptZ_Els z^xg}18jrd&9HtN2X8^_YBN%bq@Vb8p?E04gO2j2f4uw*(43XbMIaz_o4`S9m&f%Ew zaiklx%bT>*yIlwRmfhc~`&;LKfT2qM2N=c(PB`WyDp7@M)Zi3OV|q>gSHETa%PvZ{ zQ~Mt``gcc5Js*rCfr(xDe^2|LoN`=`cU>!;I@F^9z02PJYEfvU4;uH6#{Qe=&B**u z(>EXA@cz+Ut6r^E-e~`vGX{0RJ{QrB4n*s3b&}`*O5Xk1{?JA5MhyMvYt{bmxbD+8 z7*j8%jPQ|E|dYxbgqZX}%pZ{4Qp-^X4$`*{9Z&g?ud*E*FJS z-|`s7`EevLLC<{OsiH8I)Hdw*{a|3m_k$sN9K(&iADoTJPjUH)%v$iBe_K1?ANPkG zdM;XB<1X^(`6xgkif|Zx_39@KU=TyfsA%2X%sRScW_|w+_Wzc0jGWk0Zjm?E_n%&4 z|JSR38~%CwKe2is6g$rml%N!oj*rIA%jk1i%0T;+(<`tboGxU)=d<6*W8!+WMUImv z5cLzBBr6fy(RNAdM@Z#_D&cAjR~olQw01!p(Z6$}zT?dQjn=b5jeUAgTmPr&Md1{E zu$ldDpJ@G`^pHAPIGRHkZ`T$fPunNa&klQ59dNIFr7o@&SBGS`YnR_6yS!ew0g>;` z0y1_w*|+TZy1$H{g7SW-&hzb3|93c-{M_=htkCGVCN!fJ7jO~nm{y0+U^drza->@% z|7XenSSVxv%jY-8|LpfW_5Z1?&>@XZbfHIkuA7W)>Tgju$LRft<9GaH#@LRC=EslO zmPA_F9r>@%&{MdCMP&Ttjg3V1?q%U6EaNu&+L&|UsnGh7kS7>0SZxs!zjk~o7oRl`v2tN z0d3?#{Qmey+B#uJ>|26Tlp$KHILcFJG3^=7AoKjagL%*W=JThI%Ip7q{KND2JtTb( znf90C*59xF@eB5UwEoYpeCN`yz%iV_NmQZ=)kquruOYWTsQq!b`n!K$I3?WnetrSg z5j;)rZSw9=OONI>4pi<7b@X~fYswApXk(H?$J~D_`~M<4nauqEj~-|H4=35lZ1&@h zX+R^I(2Q1Gz(q{Qjqlrc)-iMByfNcc6C0geVBe4Sv;W19+nyjN>>sV|pYi`EqyPW4 zOQ!>!=)z=$eR2+lZu%U1ezuN3JiQ+amB!y|+5e}Ui;VJa^#8xiI1s(J(e>MJ3^C*D z14rCfnelgP*C)jdm$?IaDy#=bMq{<}?k^gvjsE+$A#TMtKIb{idImkG4~9)~TiC%add0^Y4u-4r%=`iA z?$NU@dH=3&7;!|t96920vHk6rggi1I1t>%j4kP{k>~MrE!TQ_TqU4qL9t>qNz5RHOIXBZEMeRK0MhdK`bT^t!qNP{6>=48SVzy!zOX@V zVhcOy-IV{=_k~^hV1xXRtMonGY`^LM8&cl&9Z~;apyIdv&-&A&vHjsjb^dXa<=CW%rUZ{P533?LI8vj#BBoBl<>9iTx ziTv~O>CwOOqVe+r`rMMVQAjVs0zHis=5bhD&yF?(c?7)|4}=o36ww;G0~g#E%7n`? zOdr7z;>h&BpZab6+a&(9_&WDbj&vOe6^=QE<2Zqns6-X2F}>^lwN+-xKUhDw`G4a7 zsNSD){Wa3K;V*cK9{EU~CTmfLg>v^>>VC;+yyC`qMzQx&Z+lOi|3KsQZ}4pN4gLk+ zFL9CYW25cSzyBiN&bBu`!q3nA0pZBEGvhx{e;_p3XZ!p5XUJAuz(us91D$C3qHma7 zcx7YgCa=6-`-oisi1)Xw{-kH>|FcaYPM>7oM`K)L^l0p9Rvne3&tO4bz1e?m45~lJ zHz;34WBbtk77hX8& zJkAx3y^S6*_Lg;h3@*8>j40H1NKURA*F2*B=!oVdc!s|7-jw!;bk`6|DtES&JJMbk z-o)^la>um}tzWlO+TPt1&VQ`Q*rv8xcqByscHOey4x%!Cm%NHO@v|q@4dNo-i-jY~ z^K$JK+qS=dAY}b}Z5HGr5Bb=BwK=f&o(b#5`U`}kwb2SA`H>Kc$aU?_p37O`FufS9 zA8HIo$lgnQ&%~F|2g!lOtWZiXL$n6S@R;)7K11rPzO^Rr;7l0EIuiylggAyVf(_SK z?zjpZ!*Tr1{J)YjVP^eIn8h6CkwO{^7(I3-jGa6a#;ebSMAexvQFbOIi_a)u&xEOh zGodg3i1Ft~!T<)h9$`~I65<7qgyH;0^xr-bPB`C5RH6!Pzpy^ZbO_b-8cf?~rtpz4 zd-xIa{~ozszVB%x?keAv_oMZXgfWaGar}`mam+h9`AC?ocqB}fiE~}2q;ndzsKaE3 zYiv3b>gjVy_l5>~BNi^Ww^sMq?p|YOLX)_jqBEhHY(?+#nQ(!;h*-Jjy5qT`UAP0o zmps=c&)KtyuXsM%kYoC`#`)?cFhNf)nZLJVeH%=(sb?^YxhD13Y4ul~`y&^8tDVy9 zLN^+`s~FjjIL6SYt%Ojx6@Pd9~ias!*#!VY$E6?@3~s&D$sXF?8{i#$a0fAh%#6ru<> z=Kmh1&&g-A>ihI7mGQzi=l|}q^^wWHm$LtR{-qzWUkPIJ_`s6%Tb@I>;-(c*H|fPBmz6_!c_Fb)g$QSEU5ur^8Fo{k?-5( z`+l}1Ic)pQ`d|Grh@m6uhDvormHO|bdegDdxLJ~np=X_cS?B)HUARAdaYw(TV`dQ5 z(Q}LHs5NyJna0A7`map=U!ndd6M5?Y0(E5Y$BPqHyQb@&tE3p<|(Po8Qrepeqp zzxtu@x%Cf)CmKHzI_f?WK8w%9YyC6s{h___{o&I^4~KJy>%w1d{9oa+w3pC!^8Mj_ zM?+}otPQPrG}aLQdZr;P+qQzo>OT}d)%=O@$@2Pe5sy39XZoAMs%>j{qP8)7&hz=~ z`o}^?Q=_#n{rgUT?YeCn*u)l|>JMRu?C}q+XS|PY>K5c8Q_to4e)h6Mo^Ve=PRJ+Q{)+vmty)O$EjIpBmJ^ETgSGllPUVEd^kR%u%R}Pg z7^=?+N5qw&W&f?Alq|#g2idXYm7o3Jp@KYy)-Bc)<+X}o=)j@ zOLs;eP0V@vt*OcWuC`wdPT@3aQHS)e^f#RF4SL3Nu4|S|3D1)Yt}Wf<+n2_uG!xPs zCzH-GA#SSC+y(tVeJlGz|Jr`# zrhWl^8|&iMU&}|xyo65iUFb#({fHw`vOlUv&PH=ElH?2~=_&FO<~Cd-Qfsae(Y2*f zU|TB|aT!Zk#tK%^SIRzSKMrKEk8#sKoqc)J|G$`h%Z^Qu6NlOEn8XyO7xfRYqoXb7o0wdd#_GYaMUVa&IJ)a=BG%=)XIytHJKnl#=h+y;&8{0mw#DtcXTL=I!I1Sec?6TO zgCUp9Lo)8%>#iI5!Uafgvc*@Nd)GD4dt$eQLh(g7jAHaISw9g+=!12}fKfs(#YlxU zC=su^RbQ(APvm;O7$|a`1+MF`>%uTbQ0AC&RNxqn;{;Bk64TmnGb`#C_WT?dvG?OWP8{)UooCYdru0u#NuwGy=uzIBB4e>UHiZBDeVS1Dnbs z4DlI=`v>4KM%MVV9QWK$Do;HB9^YGCbo?=XJE*5;*8d6W;S%+5vHVpbf04r&DX`{1 zH5;JS_Ga7h`}way8jWZ|Gg@&07txOC)9m{?Z3J?z>H3_%lyF*jVNLrl#s-M%A0QLj zOB3{DmwKi{|3Hnn+x|F{q#5%TyI)_P9a)fcT8LoGe}|G zxZ@=<`uFc58QB!knvYSxKx9W>7Po|DME(cSx{&Li<^TU$@c)n29$2+)4eQvzCbqDH zU0lVse&OwZw0_0Awa@k|-#XOw-&DReuqkZM`Va1dowpi} z_oqF4?zHxErStxs_ggQ09Jhbl59NFPQL{DL5#`zqM_eDjgmjVXTXp{(`aimygZ$_2 z|AaJ7q7qf8Mh#BkG^Xq2Kg?ndH`jk%@r>og%>9qKeKko zZaM3FKFg-MRo~r^f6eQUo)LfL^PUrV35$s4qFpAJu#6R~Vh!uqKti6|ByaflZ_#(K zi`mFV-;h7|)Ja?Nr?&Y5uG+STtgkD7k&8U!qX2!%gnryCALwxmV+5netB43`&xKF;a>+tn;3u9!V^A@4^$i8rxJc7Y) z{S*5B;{9y@8T}I-`X|t03?co-qHzAb#{Yk29RXv3W&r^uG9yrRfVL-hHWW00s6dMC5ys zX6qNxxBK`$ki}TIyOfQ8XE;J%f9;`ALYATo<*2|h^epj1BV*c@C&-iN6<0}CA^NvY z^e>>ecJUBK+;RxEM}A` z)5xq7My4>21*9={SR4F=XH={Wj^q(O_~NDtw87Em8vB=>8-sbu)dDth4%;}3jf`67 zt3y2+5&b)`iEKtIrtLpd2Ud8{ahOZSl;e8~_l>ZgVf6u?ooWk>8 z&j}Z#wLKrgMY0_oQTUVI@4eOl`Ng5oDSYMShe8**&L-%N!o~m-o>h$Ak2qTN0ZcAB z$C`0p;kjkkwj@lS!NQnt;Iemr$+eO_t2v=Td|yB?;|NL+`S2&(4}?|9{(T%_VFA5E0)c@5Og^fowMPc7; zGa326wUQU`1V6Zo7_00uE6@5hw` zne~^;qxv^5Y}juTTiC%au3`_M}S+n)_?R|`4Z-kxrpXqtLu67uoQq$kUjl*OGNe(r3-VpF_QH1Jd*b+*l{DuZI0w=NzZmzfqcG zTzHrqnR!WQbW9VP(TWSWh<0>f+8py4a}Z{$oTpa0d=FCQBBc2qES%zhaM`?djIZiz zzyv*sN&7|r|Lc@SG>?4}UG%fsf|>RCjSD3Tq*o+8>7=pXm~Q*TFl*afuKx$btzRIV z@&Ea{Z-?G1E`~As;7Rv}Bz*=W^k@!39MK$vzIykM0SscOOB$`dKa3=m)jw07Iqnh` zaT!Zk#tLrwpPW+vU>+%?v4Bx!|5(iT*X{em1U(t|{k8l4khq!`R;9Csb!=cVM?I-- z+@#N)(r=_*-lFedp;miWT&jWni(PR&eDJT5d+0sJpZJ@~Cd4Y$t0(1K^=__k9y0Ul zO5|hZMjXvs>|<faoe^YiiPCtQ@h}OKC!;N)F(#q7x2JSm9|5v$A44!sfO|EN9`GUy5DH{K;bbJ-6 zQG-)Bjap>Zf2x!JPka7(o`0d|k2HND$Mav-M}V9E|7h_1n>>Fqnt!$aF?sei=Kfp% zKl1ahlV&{{(1<29;{q#vv)((VLX>_3*G4vyjF(i>$|F?nt z-^9Kq=j+&wi|p$q_BA;woiS;TtLviw15Kz;qq=pHJwBygP872@rIW%XEMoGoeaf^y z=ySE|-&5Kj^kpnmseh3=t^Tdk{t(w=`zpDHUh(VX24eN@OZw5ex|_mV7|wTJ$c%G1 zPhUU#9|Nv$a7_G?`!ALMFoLs}wKWmV58iQn`eWLp*ri{^|I+`qQvSm@qP1dv-~TrH zC*huS+U{1be?Kc^ExG?O-^lR;A%{NK;oCwkJrB{p1Jk9xu~y$23LM+ha3JK{R)F4J zW5i?;qBR5tmep%GEL@CX-(9q>Mdm-Fm$JeUaV0248Ol+CV>pfzIEhMBp&B(fh4hE4 zzZ)Jjwqcz5&yAIR@-L2PvJp*aMk_9$<-rHTMY0_o=)@J{kzHgo{++1w?=X4{{fJ|-G3o=-_NPZS|7@u+ zeFiuBy;Axf=cE3|3i-cT{_m9k+vN%Efz1Cu<=GLm_>XuRDaT&IA}(VI%b2cI|6>+W zAKQGrcY`z*@CWn%#U*j$|9@wbo>$8AB3Gr?Hfmn}4<8I`^zFCnqr2C<;J5pxg|GZo z2pi-kwy=YqO5Y;6@OO8GtK=Sf8|D9cQzvZE|~;mB*kQ+2NiU7cm& z>5A8ezbSidIPH9sD@Tnv91Sb<#PZRQ=REl+z{2X$khVQ#`#k38vztdlp>3b4d1dI| z$khk&;xL$_pFB^$xVddZ{@FHi%s5x-rJ;MBuY-Mivdp)p$GXgq>+m0qnHOiq$-cN- z9=dZX!ZVu>S_`cr9JX&Uj-Ui%m;NM-8avC3lQsQGNUj(cL#b_LICr=>{N<72P)@Hv z+v1UMzWLtJQg|e^;?b;<@Yh#M!ZF)U;IXXI@Tm*037>3#b+}km8XiAh7Cuv07TTUQ zUaU{{iR$w3xl^wVpWS^;=%7E@{b2aQ@rrQLv6X0UQs3dJV-JSNXRL4Yq0qngkT%vs zp20(5sOTZ~&qM5=huA+4g(}BXqXwsN8nviHJ*Mq9Q(*lA%oSVzpu)Gh=b7&w<^OOr zj5b;S;F$FfkiZ0zC#`>Azp1i^LQnI9p+P#0XhJi3TOSOujt4_4eULsd^I*6@zlf0w z4{Dn{Xzb*{Foc$`>v!9#2<^6YU~;n}bdp_2(r4opVJ=w_x`ks%cU6Ri!`cq9ijeBC z{zIPi2a2peaM=0-71kdpw*Ekg^#{n2>WbfK-}gH{jxo$cX-%{J<<}YQk=a7yZ}LBTT7R#n|G|)wR%U*ma^(`eH}3wiNFUsB|Guru z^d*d(*7vyT{+HeV`h#IvT;j;SutKgP@_kw(*Ae~0Y<9(Ut?|(l-bA`lxq$ga*GX=P z+dC(yT7PyIE*X5P(BPj6xLsNl&``=A=kcn z$VUM(_K`MiG@qtWc)RatC?XF>`kx&QeI?fatuyv|`uZ5`5P$hNfB9i@#F%W-bwq0? zrGJfdJef9G%4|D9p~V|bPQzw)9`>KtV_?mEiJ3LJ~VIDu(xh#75(S>p+F zm^bE-!p;8=#M%G)6~>WhW&d{>f6z{u)LxmY7pE<7QaY8WLNz8`Q_{89(C5^xv##|N z{WPLA4b#=`vC_S2Yt)MC(H^NI>k<7suz_qu%&`O79?`u1CgElb7ioX&YG)kQ&M0vI zGwgrd{Qp;lvbos1Em2RCt&VwN|H=j7i)cssLs?&Xn!a-$C1DUlAHXCmiZSU(|;J-7c%SN-F-b@bV|Dm(LcbVe}By#_WnPpo{i2) zU(kmht?ivc&$_xsd_UqCLu9*0|NctS2j!ROe?K$y6h`XgA93*pV}0m%&3)~Bla{P? z(akq$jgJyW^ydtVOW&IpE;()ym$8IptY8(<+N;xz?wy>~PRsObt;%DI>R^m{Hls)! z^9)Y7cXFcIy_1u|YtmT91~##UZS(MV$X%q3v0d@ct*i9)PwZ!$sIPP7C+FM~m-TJ= zzgGKQ-?EbjFvzAIN?O+~Wt}o|q@R7wrXJg6U++b8`PtW7 z?CTEpbQk;jf_3HC@H4IK^NW#v?)&uIQb^Mm(A=dyM7?uGZR`fJ5lv`DTQoMMz1T{> zfQx8HM}+5}=w!pwyU>joCTm;+`ss6(&fVmi>0?+ZWy_yn(;vIu2hmf_HWxpG6fU8+ zOaBKJ>4WWT^VGp`nZAUP9rpVL_Iop1om>{Tf>o^Hh2v}M!Y%q?HpuhO`Ub-Ouu0#- z4tCM!8|nAW43LAx-c7~t=Krhq*+bT{dH}h|Lp};HeOdm;?5aERascgY*;(;lIhpcJ$EibcI7S}Fa7tZuQCl;n?xGJQwX?|+ww=Vy`JUc=rEpq%qUB}Tp_*QUQ#g%U z)S(^?Xp4O8jjc7(o6w9_T);(akMiFqqrUPEGMX3MNp_(dG4vyv_Yfz?ki-nuhm<2^ z)W3eV)HhlozZbD*e#K7z;K6Vy+UH*1QM7(QIUC_tzW&d;o_FVj@9P7-@&D4x_M7vJ zqBg)1eHjZ4$|a;0Jxi=aX-RjLyz*AQFi{vA*u)lgu#2nML)LeE3)uegOG58yeJk`l z#Oln`CVS%iddNZyugmL**8GTL;F#x+BHIq5uT%YtfqwNAhGwq!OApgWP;8$U);~vt zOHhjRN3_4<>i?uVQoq3*IWL^j*Pg}#M!VVn7)JsVh~~vj#?=4LTjhSDK7eTb*hTte zg}!!amm}%}h;08kw7r;p%>GH!7cgJ%Jcw-n3i}?zahyPJqwx#dPSOXZ9rXcJ(yI{t zQ)d`)X-8}AX6E15+2^$TzbMW%#arz|A0ew9Q-f1Djat;99u1hb&y0O%9e4Bp2XSqL znCom}|LwW|Mfbnt{>h0o_fJj=Pmzf&WBbx+LNi)1nPZ>)1K|QaGyk#NymWdyet-UB zhqxZ^sgvwN?=kBckS)IZSe5%~a$mdzwX*mGC;2z=W7d+&WrIEm>++D}a)Nj6HzboGkVVAs$J#4?${Mr9Z{z{!m=0^HI z@;_0RMPs*79%%_hAy0fh(l0Fvg=E{8J+GmnP((kBVjMvUN-?*{Z;@Z(44=el{)h`p z{1%bAe0`38qR6*{F^po$zLVPLNlYMGlWPFc-2Og{H2HQB#}LY#qZ}1DhT}Malc+@B zmi7IO%@33s|3+qA{4Qh6#;!*$9ts8asX{esa0;hUi#kmA8~?_v_5A1j_k6zIxH{4e z#@TD{^!*$EUNZi@X#6{F{Cmb&HYVv)#`zQUdg(Nv5i!qtvdBJY5}wPIcKM;uOpn&@ zUuZJ6j?DW1`Nqz*A$s=Ab3X1pRPz6;wq3o@8l^=auL)XW@mFL6K8o`k9{>=LS zHOBwz?04FJ_8lVQ!o%cVu9N#(@kiu|8aw)A^%Y?X5gtADi>^Xa|o)=?5)Kp(7iZ>RV* z(~B@t?cT)2F;t%&4vQkr-;%E&9Pvz7x{fn$iqanY0fw^Y8yWC)A3sLp>VMh$b|n6&J8Qs!jhTK7!vr z6fO#1c`2VBvICvyLe!_+O~%lVI5P7O)IDSLXiQ=jN%{;H_SB(>#>k_8#b@kq^-J`R zyTNv4MpD_atPHuVEV;xN%`qul!Xh&L31i+DE(=fd;hVuM=GfHpr`g}e0Mg__k^29F zvJ2yE^FQ$avrmtDZb>@Ji2Tr2$ll89{(r0V8|&|6>i4Ukht-_cty#yn9t_^Vvp;TNM%29zA+8@V+TYQ7jx&`N-4WaGLF9|2aokS(7(ATB@ z*A5+6`fdGBkJB^j^vG+~_Nl=soJRV=TSF~bhk8t3mj5xkDu1d^=da42NO#Jg7(Fik zpZu+VUXA)+I4K&fE7OLn3+*Vq8rjTn7kQMtCHJP_{3u<(fRkn$%^X4dzP>TmhCCAfF{tba+~A+H#F zogq`WgzZ;{ut;9U5|+{SL;eN7(vGLEVh!uq{^o(O5$U78M>6`i?-sd(U0lT;vcBt^ zLN4-UiQ*qaT)vLo7SIuzjXkO@2`LRJK2*v z^7~a|JGRfQ%Kyv0fgkfh7mnJbX`~SStEO+q`e_)zAcjiWwiw0;lCH7Xaiu6jIVx}r z$1%OhzaO(~;kj0R@$BXl+c=Gdr2hj{@jtKOe@-UsH*r;(n50jw^Pg|b3n!#=5|yYz z?-=`g=3uC%4|cM@+xZ#LPa#@6e;9ELImc;n34IQ=WF00q`5KT7NYZEZS;d!$({r zA|HaMi>=}4Sv<9QOXw=F-r(`ujknwqK3{!%=&ZawJW+pJ_+0ZJhtF2s7CN?W3y&`! zGR}1Ay3y7T+1#aDMEM!@6^9;9T{ehQF-*)9}gTw}p$be-R$Tr)K_z z4De4`o3=k%{>S03AY}vMLJ^#lq|55l%(;tPWlyg@;X8wx! z_0L-`kc`HUpHy~zL3y~oUw!;nIpNCpf0}*OwmoG1S7|7Fwm-=y;6wP0eg|bwluq+u z?Tm8UCbqKADGQ?c3%T~sw14i+33>Dz^B3~z(V7&qD4-YOX8zYUjeONRoG(Xw5e}mm zM^J)N^szDeH;fOgvo$b8&#c$GY39h|5BKams4oTnTWIELPn=AYp>eXv#ihZFRZ7*Q7vBU-y`2$kZhP>mX#!fDi^ z4$-q}MpRzb3rAx{Eoh`Sp>I+Bzv3K-{`EVwC+#AAqXqg+^32b)Pcz!gy*~eP`ICMD z7txNJ|KGWw{g3%h`3hG}(#+Ik|W1&LYNMXKPU!k~Y{=gWSMDMEcXflOZsr%a1uefJUitr*b|9xrH zAEca!lYLj!|H#bGQQr?0s~1bOk;&-ao0lE4gk`MYh3&sp;pq)6B=cCd@>9~%oJqdvSn^2)vZBmU0! zip+Y#FaJ&U+)3v~uDCobGY2l%oR2kXirt zHT(4|>fe9%KsYY$1Wuw7XUlxQ6~14xTDS(M5ZMOXU)KMnTsgf&0_*GXcKP%naDgHkfuVwdlT+1Hlx|Tgi4vk&Qj;F3=5BFco9zm63 z8qtJiwBiCTq8-zo-Au{z*|X$a#q-(oWJ)+qE-XEtJt~bcj5lA)PGADb3)iwIYoE`a zDtbPt1%E>RNV6{3WDcr+r3F zHaSP*wd~9Exsq$yv&i`84f!VXu4Sil98+*DdmcR<&u1^$Zy76CMQ`f)?AXln*=zK{ zt>?1`uuk8=hIgZIi9`aFuLKNXJrccQ4(wePt9&%oqsfO!q zyr{2vRM|har2HWhYsw!oDIEETPc`xX{{Kn4`}nTwdh!3SeYOb$rjn4LAfl3@l9Db3 zok~hcNy#@kN=3;rGAfQyjwGFY1{*M7z<>b*1`HT%Y-1a{W>;~Nlbl#ZMn=iWH7QB? zPD)bBoqqTCy!dv$7r*cA_WS+uxIJH2@7MeNe!X9p&)3DfzBh4#|`SB|DXzgt!8J*$zkLTY+`~T8~Dx6Ua99cCzEV^)%|0+Eiu8z2+clJLi(~5bXgxsEo8IKN;)` z5bgczn5mzWp`XK?$t?>x$VDEyj34sJXfIF!S%@O^Xm|S7>%P~2FXqE~N^s!6yhj)3_k?f-}24k^Dn_7Z#%;gKb+dSiB<}^fS2Jb&_ zjlo%a4fIz^^SwXzp>O|Rv-dCY{*~UJoZy;tOu}SL!BkAcbTrLY{-kMNKU*62o$JTm z&b>ohTz%5}i|2s&YRSI&k23wE!guFCqBD_Zh-W5dVKy4Z=tGUSwtzXB`#31fT;_S` zm`j&G``x*Z)&Lllu^j{VT|-bILjLT$7Ii6ru>lC_x#bv47JcW$T18Mn+@*R&LSQzx}MTshmYR zfavV~XpdgBKRwa`G$PUg#HZMUhegU}qzh<&$=Tav11iPe_uoePwCD_wNEgs5-nJ-i z=13Q?OFdL0Tz&K`XhWm}h{pbhn9C2VJ8+o!2&$MP9YA!(Xru$^~n7=#0 zd=g#grZ+hinO7!+)8rYP#d-81K3Dskp*_l^Kd_E*$s6zL)i*Xo_G)zD`CZXpj{om`x^v(gA%-%iB86@OXOv7~4p0XE&%+9_j%p_;w{r!J) zn4_~N`}Y6s7!>BRcZ`-dC#VC)%d2GR8RI`=sC~w%p3%PZ|0Ww}Pc_aaThYecPIfG?K47Nt?|I{2$%W?M$~$?6XNMx|J!6e3UlNa=}5YmTaJqxrKOVe@-e}`c@yD|LFZ{+Pr_3cf|+)U(|1l&VQUO&N-Nio?j^M+QWIw8Ij%izx{Rk z|JVJ$g1vhLeH0muhq5BOGZ>@uhjW;7k%xS|fBtX*^ZVxy7cv(iYLnWJdB&mA579qg zCvz`G3Cgg?TD=nM|0Vi z`!?pj{eKUszt89&XnR``o!irX%=o|4xjw=i#u0Sl7>=Xoo}_Ss?7EqbpX^?rWFACc z2B&cbXAzBk&ynZRi-eP&zt{P6NM%S&i>#}6%}@--Nc>~@AI;vB>z&az|E~|x+N^Cx`!wx8s!wVE&uIU9wg37+efp8Z z+W$k^f8k1fi!$G2Uy5-$Dux@EV?T2hvLE%mo}qgv)!)$ujdjm)7>~5*{=zilplezN zzgwrbO>m9AZPdT5R{z&b(*K_v=^FI^$@;1K|74?Z6U32@Nr?X2JDGg{{~A-6r(zoV z{?DO4Xf0Cqa+E<~JIH8{?{xk%P@-JVBzGNmCM`J|`7kM@7ChfY^^J>-OdfGY=V9fJ|f|2f1i!qobc~{gZaMg>2Ohw~_7a9b~n> zu5)qJKk}!!#=2%}086aVDYga>@it{CMgAR5pDVt6v_E1!BJ>e52) zMUh|Yh)_(HpbWL=)gxpL>d}noY=`KK*Y=mpiA(!~3zRqRhtSvlrn93}n67X43x~OP z_qX4L>^T}jCwUCVBX|2fPmoUuqj~o@shdT2< z)!Fyv_xGa;)q_V_Up6Wnpg*lWGD`hED%2kuC4Y~i{~HyW4vz}W+z%d?7G>54G+Q5l zcIJ*^*Vv;!*nNg#I7Xu3m^e!8L1%7>u07}cede*~aG&-G;z$>laO1d*M;ay|9bJ!( z3X{n0x)EVAIR#VE6Gnz<xA0&SYM)Lq91=9?&MVRQl%izP+?>C!@0% zN(+^L>`PVtlYNtsz75&8|1?eB6h{tnk%xQ~pb$l98l(J=6aNJ9qjjwK(SB0?KQ8}I zRsMxJAWSV;mo9IT4WpHR_bL`g3Ca-tL#i^e7b@4xHO%#B#y(}eT-mKSCrt0VV}mO8 zHhzb27*YTC2-%5aIF3Gjq5KuCk2%495?xqY7oGo|)TjSD&3*=F(etWx@Z@>)qWc|d z;D7sW{!b=Tkcz<=irQJ`_Q{bLjWKwC|Ib+FXv}{Qzqf6bOPI!O*ySmwX;y{Y@ zLEUialPrDED6N{vzW=MumA>9KF+sg| zMEarPg!IFH<|;&I_H@Os2vddaZuKt$XDm#M%<@h9#YtfX^Gx&%l26Imn1i{I+mlyV z|7QPR)CNydm#H)7aesgRPX=@OO!r3L|9|eJv+t6(dgU=xpftxAFwgjxEHBjlY0IK> zard_wC)QY>CyoWkL>6+8i#+6`NgLT*sr@T4|DB?JAFh2z-}yhu#{b8(e>i|z{LlXf zjLrls5KkeBP>hBY|e|IKi#<^abH0*@Xt?Q{-vXGrzC@JHvh!|BL?5c)SdK`xMiq z{}|~%PWn%g{*&}=Tyq}1NO)P@j1;6|F#7a=QU-7lXH8cL0xP8_IVOv^qD4Kj(*gEI3ux0vib9BzLm~lneG}+lUkA^T# z{M`@S6~>dhYGPsMarIJtzpx|M_f5DxRHxjfZ#gj>$n&4F`j3RVglp_AoEX|~9u&%s zd_0sM`gka}Pis?OgP_9xsKiigu(M)A(Q(Ert+JwsnEU;lq>YRKcBj{+28&rJIWW~7E9=JF}# zhBJ+~nafaB=j*-W)Cr3WveuB9?*BF}pBy$(4^5_px zi3*(Jb{c1J7Uysty-4U%Uh$9Xr&HKdF&OQ2>fcK3zx=)r|E2v;)&G|#mc;e_>3xO^ zKN6!c24gV}O*!hhy!ZD16zTu6x3RaA9c}OR|FwAsPU)X$_v+5*|DM$U)i-G5AB`W! zizf{ekdB5N`KQ2`nmJl?7oB}SnRyEO{(m=?4i3>Cp!TClVV5#CmESb%Ib=LQ&OpgA zZBMUyLmNGleHQl5rMK4}M|*!NnM=h}hJ6$C|53qQiTy?T{{|n3)lomY z`%dF*@n@oaNK(imvnygDmt6W3{rsThkjI>l=&YWe`;EWPT6@G^h$0jt`Uh`)7ab&X zbSB)vIleJ-_Ku{`F+n+>p^lE~YW;}H$bDr}sEK5_-F3%u9V#y{%s z&#<4xIrN>sNnd!Lxfcn)Q#Mdq^4|ab+Kj)CXj4yUTd|+H>X3efeo%^VsTho*7>R_ zHhiji0OoXbl<9wPYa?6ho%_RWGJ4)f4pYdfn1<q3?6dS#Qkc(s*AdJwNjTM7sM(hhIrXx{6G0wQb4+nS%yx zdM=rVdgg=E^$TX2k76&t`+9-s4EpFl7^U;{Pq7c>_+S2iQ*mb~bX^gOQGzm5q6STK z%&*~K3jI7Ap|BY}ToX^MHs9vZSM>E>6%h=)&Su)=E1BaQ9pcB!* z0iu5dROoj^=XSq;K5x%3{okR|SXqyB@yCQcfhBL>sou1KBH}e_da< z_m=(trvE!D%sHG#FA}=tTcjWrO&P`qnbMhT=~SlG<83FE@gw?Qhv#SsNzBn2mnr1b$X~nB z{*bd5)+B^!?A^`~oKDWbOf318^m)^s`J(i2+0~RQiWGnS=Tod;O=_myTTaJhU@+pcQQ>oqks+ zn|+rx3U`HavLaL5bKIlZ{&(cNrT~Q~LNQ8EhDtO|xBq>n{qJPUT=8azH`AF7WCwfo z5&Pe9;Dr6}sAH~2gD{Qr?(WMwySna{nUx{zIU~Gy+v!_M&PMz4Jr}ZHF)rl3pScPZ zs6_3_J3}+~zV!v>YucC_?2E5IcV{@n++y$i!5VwXnUA1jhG(1R8HH&@=``<;zW1Lc zUR2_P|9|aB2%WAwhT}Maljy=JG-(T)5w$Tb_@MvK+=1%ef9U^fFYB7U2O5|g^*w7( zrH9kvIfJt}hlVbD|JU<>qVor5x_%aBV-Dsb+WR+; z{QrIa&pmgCZheXF{`T>I>#mR?&IQOs7IKh_JmjNY{S}=H(zErhP{`hQ{?8m~I7>Z1 z7IAz3{GVdx56=I2KFRqMNybIS|7Z0(Ci?~`EiwMomi3)~)2vNluEc(AP!$TqQGzm5 zq6YP7MjM*ung2)M|NTxH{~tE~r&DhyJ9>@(#Z{w?JAnUj{;l?Rn)Vmb`8S8ea~Ma^ ziH5P-Uv1nm=9YQd-?{o9%qP%cEZ(kdZkwU~ou&W5Ejs_Ei#&xr!k#A2pk(&DeV20W zJ9n;Sk<2^@SAk#`a?MIaV&hzg@!YTFeOZIt@OXBo*SK8mroQlC1is2ZE z(dd3MDU2bbGx5ff<1ij+n1FOl!emTAPhC=&N=`$mG5-haFUsg-l+8-)XRboDet)`f zGcXggFdK6)7fo}Of8&IM#*i&&Wo}DV_m5ZpQGG)B#{uDLXDk2Os(P{kjoP5BwnQJ& z+`GA)NCzIBO+Qb(?aE(+Hh2MZCR((;2NBKfwbs!6m#GKHEPi|D=xc3Q&k5bW7`EvP->JLPlo;m64UGK|Pw$ zhC`@5Yi^$$=4(K3`GoL_5fxZdaO7vAwI#&GH=e?t|aw?dk zwZ;2qsHfCX@lI)!B7aDmldkVV`!jR~b=KUE7!>->|5uOoo&SGY+MJh0XlL#~wKS~3 z0Yqmd)(w7lZu9-~|4)nW49?;l&Z8FzzxVFHa0V0^jsH`~?%mEt7&J7bG7rX3M020R z$&nb1F?j#~Ut^ie>%AMsF^|Xl=N4C%cvpO|{(XvZ#th?%ImQ*(Uuc|xQ=TQwbrX<| zNtlc&n2IK2kmg?F46?;IrEmSe@p3!**8iV2F2Mm|YR?*%95*gGX`Dhfc5*x94F9PA zRCAUqZy?M#&!7M{|d~N*pFzh zL;R$)c`yI934aLLcPih`bv(k{G*bFYpMzth|7qp_eAKV!&)k9PDbjzM^hYh~(!85^ z8^}i2?aECJo#I(yZan(`$1&zT{z+28{Wx=Z0sT*&GXj}UqUt>Tk9{JQIrKm1;#OOv z?BB0k-IEj=rYD9|+)ktK{E&IZ1=azaVLyxZ@x}#cou&+;I^Pk#!w7L&x6jJB1dBky0b$VOZN3Y$iDS^>Vif3+T+=GdG-lp-}-y|`_h^B zOj94$TmQ&huK%+ygZ_kh3aZl7Er`}0M17#C+@@hVW?&{}VK#d7dD_2~WPZSzn(W;} zj1S3q$iM<*A`3Z))_3NT_4<6#|DE%g^HG3k4S7qB{81!-q=`3O{vgYe%|ZJ&O1XA% zpZ2ga(m^Bo_fND}Jk7OH+ZgRRipKVZ>_sR>37RMM3(*`v-~KxK&c6M1bf53s|5uuyxK8=i|0@&c9$`xORWg?!@m#(75X|-H z(?87Px4?5AGxxx)w$OdaLx}nnhsh)8`+tzJesnHjC;Kt<{g-gEehT+CI)LNcPT(ZE za0;h!20f+8;Vjv8YjQY8cKfH}d9oJ?uNntp*GT{08~1J>B1)eWZhMYqb8X-L$1CMc=Fu2~v1rPa2EO6J zRNsJXmBwu;uAAWe0aWV`)nrI};cCgc8NR_}Y47_slBM~|N1;4gtbCy2n7nyX9zFi< z{M0!27>_h`+-N^6nT|=Aj3(v1d8YDdJl`@~`81|)LpwUqH@}vr{maq*(f8G5DBqdN zH`&NOMI2L6tDH_F>y@4CW$Jq72Ib>$RA*c_LjKamW@uY$+(X;kLAEzr|2HQVW(YeI zvoITbMk;^9ojJ(dw|+U@nqKC4s8W|iWxtY))-RW81EVv@_qBO{vZB)aD;N8fkE*Dg z+&Cg+xNZS5k%e3={kru5BSwZMWvV$(|BGxjR1-H+iXqEJNRVKWUSF&j@`y z&(Ey}_2`)G9nf~%v*G>yK+XKxu**NhbI3zDj3d}%PQ0XKWawlrA8P&^$C!_!s{V?w zAC<}Gzj1=wNp#^9{&D{8H2WExMZNm+dzT4UCyjrl55)d7d#U;VvZ4R5zF@rd1?Kkm zTT@UqdKCT0{|LYEf4Y7#d($!c-xKt|blfdy&8PoGJ33G;jv8?uKrQN0tS!L%|JUn_ z-+yj;9vT@WMOYf$qt!is<{8JD9~k3*XZg;)8aguUNHt#-zap#3g zim*}m@(k%eTbL*v=&GxxN`F*xi_R+8a&k!6d}?SYOt>^`>bf{=pEoLOTQEEnogW^y zCR`rYof;R`4!$Ym&m9}qVB`70VZ+(Mp&;i*d)LNU3qS|Ho(}EoN5bN~>qBfN+(zZ7wjMHvQ^YYrgufCi>p$J8epnI<=YKLH51k zs8iq8cdGBusNU^6kL8s5u!!!L%z5ZXiRHpAeaF8R69$A@X3J6IjpWKxgF@c!gmCnY zSa{A<*N>1pM7ZGt!iqs5T&a6D1FgdUD@F`Zc7Ko%e!|=|cR)BszKD}}8Qo|R??=UR znz@6yYxfmlJbU*;&Se<$-Eh`}7Y}JU6$0zaYea zi$CEi_P>&2$N~L~XE6-taTBVzPbBNfPe)Qc_YJaf-t{p}ZM=pq;(KIm>e$#CvhB2T zL{bODZHJCG55FPyZ)D4iA+f9atGhkJ-J~vQ{LAD7RiQcgNUa(Fck%}EYGKmJuaNhW zv&j4T&BYYLYRyJ)&Ec!oD<7n^EnilK4GcD%vre-WCBl?C6j5t@$pFZk)2#!n{urrZuDtD}y~52u|$)9dV+Bihzem&RICwY3S_ z?Jj3rq1w2<##+$^`#&3ntvlxonltu=rt3Q%@*h32EX}!*oz|G}s~{^6Cx@NfcO@r> z9qv&)!McKx$zd=1Zn9*k^KLxD7T4cJPRF*B$zl8WB=h9aKK|tJ9d5bU?U_G*Rfzw_ zJM3Uy>)V~hIjr-|lYRdb*p3_D$bLXs{0_fW*o+cXM(|6pl-;)axas5g(WF~qoz*wT zj$TQ=OkRwy3>nvqoM!tPZ?C3M@a}RkP*`IuVbgc85kHn5v z;GWxKM>ma$bv{2ac67uwvCfOHi5<VP7%%%GhI`KR4~B*w@H!U?KPKlJh~; z6~92bWVvsA7J~9`AJL+a6b5k5zF7Gm;=Hj}`Wp|&!m=Cvn`L)$SRS4V7xG`Z=7R7c z@-loFP3n^T=bjF0@Be;S_rQX16}O|-ux)rFB{ZqX*8CtPT+eMS`_Z@6;dR{U96uU` z8=>6k7fg)I^xjSSA9pc-0#ooPN>}y|Wv@IP9%n9pCMooVbNxK_quT8ZvS$6$p-CU4 zi5}(XJNhl2eL>`}Pm)PS_c-{fe`IDSh1T#)XnS*EXn#91biDH&g=S(bD|KRQQNqO7 z;^c|3?BNq*IU^^=mJFU4TRL=NY}wd}vD|SJW6MWRjI9_mF}8AoJeVB|tHw`^txn@7 z{`}Jyg`DVqN4higiAt z%&k|>lg7q6Usq7?ON+zoMO0O`~rcrFG-f zRAaf+P_OZfbUV@iqR$FbGu+ykv2<|flyB>F+Gd#ZPa9+|I5iwZGnx=@8xXSe1r`lW z3X6{n(Ep;pJ!xIk@d06JC;Nl)zrMwC=WVY@?H^VS?jQ1;*S)I#+Suw-1Hzg){q-&T z`=@UIux_sU>YC(GFtUHxkmK4B$@(+Np)kFF*sQO&#r0cVU*!6u%GJ>;wF|QcgkpWA zo#fG0?LtkG_ci}ZHqj@PW+jEouh!NTIwO3h{-WooEK3TbxK*X~x4y7{s5zSyex_X4 z=JgMCW3P+Vj~-xs>j3TLfY3C0U}zrGKTrwBTl9%qQ%A-7rR2(_a4GY$2a*C+U)=Ukb-iysPX9|MvU0RJoU#4F@3@WB zuH1`i`kXJ2-@_`r;JVk5?Aj}!TM@qx591qX^=^w!-Vj@S>V{anS2;L;L#*$+`}Wyg z1B`>@d+EIb%aDszIVmC2wJW=ngS?cmw)uwGx=Gi@))d|l%Rf9YY;f+!#*AxY>kp?o zcl4UrX7(*7u8(aZ3m5bYYh3#mZ0#Kwc6h(tD1~XBcr`2ui#MSaKfqT1sjnTnFq95X z3D2=VkE8e*7X9kk@C))+cnL4#6}$>v{rDSr6K~@k7%s$>=Z8H7^TXb@`PK)|4`pYb3HuH`6Us+tTJw`>-YhfhADS7e zQZqw!MyC9iDgR~KzxiyaoB3?0pZaWQU~XLStTOy;XrBJ8I_KHYLbmd2OM6zIE@FORym7)r`R024UHlZk#UJoz z{0)gW==b6qScJ9Mi39jo{0_gzA92NvAs(N;Fl5bM7#6wj;*5pHzY9Z7(ZaAKcVSqX zwJh!{}8fyyuqaAC@7WxnP!mz%1VJN@`=8f3I zuW;J4Ve{~3!xn5!eKr)Gdp2zAeKu?-cd!=^eKzbIsVtDY*-Iuo8}^KSHta>IYszLn z8}`k8*7}HNt&e!t*#FtEpS_B#c3sVwXTyPU&xTsEE?xSQ4KuzM8joj$rrrgi`E*7& zcs?VvoLCTAncKLxAIk_GCo&X@`5`NPepobRepozte#o9QKjciEAC{bdGAzxUZ~SAf zj(z!z`Tm3c{jgHFyrS=iRWK9^LkIfH7v%Vj;W&KlZ`|jDN6WVl&5L^hV8{1?xJ(pR4eWiLMywzBU)DXOuv z`kBx~K8L^J=sifHH9fwqP$B@k9I+FXBMyGocp0Vg4<;@dwo1 z_e`jNs+&*EST;;7&DE3$xU;k@o$3hZ$j8YZe=gxzK#EOa)+^XaXlSdTXYsh zLfFk-a@_d$WJ1_WmYy+3aZdZ(Yke$P!CuLIKmRJSTKJlJ`c>E0l6CC$)9F`dzB|9& zH$R?1KbuMSN%oz8z`vb;2f6)Y(gT0STlgFPj()d<_&|)p9k>^Zu@tMZ4qLDtKg5sF zjX&Tu^dRxp5Wf~T;3j+?>#z}x_z`}J9$b>9UBXxJT`a^>tj0QQL=|fBQ@n`Zq8mMU z3xC7k(eLA)9p&i2pYay{j%|g5!VR~D_${~t@k;yeDFhZ77%e8V*>jFtdvj9Xm|S+o z*?Z>_!}8w5u!3C4p2vL^|JCFg;q&R@*J2&k`*%yh^b37|ePeQydDg;=3w{3!eShb4 zu@`aQ#(z7xqtZEI_15OMnTsTMvzHvd(D%R4_qXo#jQP=X=0|(YH1o-j=g@m`OTT;H_4{C=0`Kkk7k-5C0p6sxVQ7~AY11og|>_&YnqZm2iY_^ zDKt+_3J1xS1xeP6CRs0<6dK4zVX8+ag__i)a3DF!deWp&F`fQ$W>VNsRtZx|ma*?6 z%f+)hUw;mJuopW}jGfqpt<&@eu?^d?35D2lcxW*%!(Ki?IT$u?`*hA%2YCq8ool zzuV<2+=S2JF+7eZum#&ug%|K!^q}7a?J+LFrMMi|;s)G;&*AfU7*F84cp9s*4jZu@ zyRaAKs6q!`#BcBiyoMh98Gpmyk*Ke55iZ3jT#XxW6K=sBh^sHM>goT~AB)Ls_MGF^ z-Ji7Xo?LduSnnMDUvGl^pCJF6SLMEn|7voL@cH!#VXf=ek?Yw_nTLDDeXl;tceF+G zalERdVJ%+WzRW- zI9wc-mpX5*VsKa)t_yi}H-uG9*N4@uWBr@r$-_b+IxqBmoBW0QuZ-NRpW)vzjW|oAGEYtS|?&Z34ex!uF@*kdB4Zd~#pY}-|T+MHD70L)+RC+8k1IK$HGmXGm5kC`8?Bo?1g#S#x?If z?^$hLweV#7gLJmsqdgpjIdm1qc+JcQ&A~q4x|TPS`rd2XVSdbg?@KYa$h#HJHgEiG z_mO9|%`^8{#2%$b$$S1~@7ZhEN4WP8*Bs&YfNQYXT+6l-^x9a|%0GIhbIdQ%D=*_O z>^5^Q+mCT;a!r&bey0B6DFh}Xr^PKoyx9DQeEw1)&@hjo?5cjrzedg#pb`N6D zXV`{5_IV!xgShR-RME9N)%-2~%DUTG6HHY;jW%5nB_sCs()HW$E*I7gMls4LX zHQp(lcdLIVQ5m;q^M21B-Yfsot7^o%;VSRpexLh$=jeO3Z_^@!cdwZ_JBl?#`^8Qy{t6tuo#r^&33%J)wLutC1Ir?^sDy&PG ze0^B-roPYf-uZ25b+dUl@pr55vW02C-8|*R?>_4?e%odS{gn_l*Qn3xH1;j4zm~NSBW9?sEJ$o2JMcsN2p|04^Mv*v+453OG)J9~6qb%m4$H>Sf2Ps@)1|GT*IP-? zmNz>&tYTg*udONj-?abJ&3VsE)c;M?|MhJ$67_%Ofq97`+W%REZP-3FG3=< zljJVz*>)dtRyFo8@9nidl`QLg_YAA@6V|L8v34cR8kThWZ?ak(HPfx1n`!+VS;tf-poDP1K&cnvbUwl@2-#5;r&<|{1h+XMf?W8#T)WgH~9y=h8`^18Vi3W z-@@PUcWgcP@le$J@sQ{n{Vi)rw}joy^<1+eHlMw}u3yhJ8)NzX z`-Kac6}|XnPz}UCf*bKMsD|Qq;XZsC58|J2Yoa-Lbj6=?uPb8RL%tE~y7-b<*N%%~ zs($MhFO2ovcz5iRQJ4qsi1plhN38qKt75uh@t*5O#d-#PDE8l&Z}5CK;SuI9;}+)I zFrRr5mSGiCG4aiK24BToe93d%A?)2y9makB_`S@ZLM!w4u?hc-wfGn8#xL+EypH%Z z=ha~m79)F}H5r-a-!d);OXpq?mQD8`9Q~8!mBus0^bg3xDySmkAN3snO0M;9&vpLu zxnBESU{ZKPul?8LCicSP{leyPL+D}qg{|yG+T?9{+WN6WLg5%X3T(k<6k#i_^$y#} z{6iOnHD@mf>yBR#)}FKQVI@{$6|xX1s27uqkb`V2#S%1^ zD5Ge>LA<{%y@RhBuUF^@@-tjJOJC6RnzeXJdgjx5?se`A9*pklKe2K z>kPp48k88mMy|yc>_+$f|B|?u?0Vo| z68Dk*xszB)K0J}$V$d5gx^=D^VA>PjTK8h-$Cn0p0f_H(;B*8T0`XA zgio}%=YH)&i8@P}-9Y!dmwXMMr3TR4f#`I*F7&JcHi?y{4lXum{aI^>b1n4Fa0UeG;aL0$bR2{B=&3? zlh{?aBC+evjfvgQ>`d(5#C-Fl(Dh(NV)uxJ#zdbC|6SO#QTW@R)@Pm+-ei6&vL9WZ z*mLF5#CZL%kac93`M-f-@wkEJJuWowVLd&T_6`fnPFmw$cv)C}`tq=1+{m!<@Z}-z z?B!uq=EY%k*JWW1_x#DuJD++{SVyimZ(BfaVBbh?%JhFeax?oD@ob%OQ7FQ;DHnz9 z*wK7ZD8|ks7lmEeT|Y3Cv^krB+gL)1kA^wsf6-*IM>7r%xBnllXRU=qJ38=JY0-A2F1zgB}D?Wp-V<9%+ zqi-;8H@3Zd#^?e4n&^ztI{lpJjL`=6=#0^(O#L77;3RcyruEUYtrzqR(HW{8)5JYg z9+N-H<VcfhE3w!LT*OUl+>`5+&(YWxFI#<+smkKH@&>@N>L^3+2&Ua6kUS9>@=26mG*q_%fcsgZLJn#1gE_ihP@k9I?ucF_z>SK(?bZo+z$Sj{fhR5+u{6*MXx!;G+Vk~B04ze*HTk$-8iVB=S zHxA+oW$9v^Wxj!Yn~aeY@JT#~FW{?q3OQJb-6%&Bj^Q^rgMOZA2u5KnCSnq1;D_pj z-zSM9*_NfRe{kE1p~B^o)5*UJUq)`c#sE+2k9h`}h04y_)cRTR5Dw}5^qKK&o1_lUzFmh z!J+#B@@Bk>cPw>X0`435ZzMMfUznC2HoJZcxs|<$`!@dD$sK3XL-D!vu(LNk>>_uw zmvG<1e=k`od|8@(7f}1 z(Sp{&{yUF$bRh2k?pfY_k#}F5dUwbkdUwbfd3RVsE@fYq=Iny>yTfvF1^Y_wdHh$A ztJ&9NIHSN>0&B^2?CZG~48J>UNVz*~#HI;%heB*--h!?Cim;7&J9eDEI}~Fl^DgY> zSAspvdr>;!?oft(%;l)ySBd@1Rj6LzyaF6xu0?%EycEk8`2JYVyaFrv=+}>)c%tj z**9fq|FyfD$t~<#xfk)@Ms8={QLp`P)BcmYI<^1D?VUP#L5TKH?PV`LXMc3B_Ma?g zui#$Ee?M8pUR`g0lk5BTmru37X*%8LOzZv0M)sx*W2{VTpve~YR_<;5+sO|03g51> zM0!?APqLJ~Y_{~CE4|6xzIDlD-(#xpLGEBLPWHWg>s@4Pk@PH)p04j8o7kIkq<5b5 zChJc69^4!FHUIaq?Fh$l=8S+HI+ zEXLNEw}+xr^3xgliQL3qIQe${?c4PUZnqxg_E0eU_OM~@?O`Li#x?m^i*@*+I_NjB zoGJciEKom{FEQTs-%9#0YW zDx~3ld=dYGEUd?0VHsR}7_P;~F&Ur5!*~>PP=IZb-7|jaLOLRRMshTz&=I0)fH(#k z7jpX?em{uLi5#WgUux|2pW44F@^NFg9~<}mo3YsWaoX9N)57)SIlN+R#&3<;wo|S= zk!GLcHDkk8V}v4X!*=X7hu)1##dU)*;bLv-MdED1=Uw{*zlS4t-YZz6U7x5b~XRs@c%pi zEv~7>i{kmau(jg6%JncyU-F|53RSJ32nFjclz<3&=Fn@uZh20 z{2ji_MV@E(gZl5z0oe11`TNj6ls;;G<(h$E-`9tR@^4)lD$He8J~=q-f96A>ithSe z*YEXAZ+W(To+&d*8}E0A_j>|Yi~BQZjMBmTH!}ZPTKp1wU3(k*H@#a2|L=)sg!Flw zd{Np@6W22Dc(>>3z!zNq8P|N$wcq02C`|m6@y{9KpK&*Z#a+ff$iWgU?YhbM_ok3b z@49?$wErn)|C4_rU={P~F|n{_p1D4(#X77XYz_b$uo0Wa)16~8wqWaEb4%EU?bvbF z`e*D!w3mPPV0#y_2YbDKbFA7GEh>aC~W9jmvWx7U)rDc4#a@*sOjnl&TXi#;gD4(!5C zRGy;`LlyQTT1!-lXf09nZ>+z`Yn$YmQS#A#`D3#@I8Yw>t~~V_`RGUd|BM@jxq;gQ zWQ)8sP##)^pW`w916`lU|6$jBQ(pXpxc-7`_bPHV` zqtN_^vG^|Ro~Qo69_&TwG-Gt^%NP{Og{_!s{-fTvZ1YXY>P~s)xV&;wo+0bl>(9wc zz48><#NN#PApaJ!mA$Q={>SwlZ5M^lNt18lY3aHJ-<6(!Mxt+XHzrE2)za=O+Ie<=KC#ak%OyM^oV9lylA0-tpKkNN*6|KE!Kz8f>jm{WW26vbk<#MT11F?xL&bpT~bCxgx>ZBr=zM8QIu7HZ&CY zW;wncYMBopI(lDj|1^dl$Kry_noF z%Nc^6VLvK8Qw7SI_o1x$8fTwalP>MMqwg#YZ<97JNV^w&m$z`ObgJU^-6$=k`yJ9O zCJq0@9DI{2rGKLElYEOyg!>cwTf(+T*Ae{xMYvBWYa8%8e*eLKlQ{p3hsFP0d=D!y z&@=pv-0mKaiQ`gn+>5Vpe+-Y~$GC&ri?~Z%|BU(gm}j~R&*Ee5^;!HA)?yp7Jl`hI z^8-AO{8xsBm&ui$DW0bMr)&Sn#mZK;GL|z_`6rjMFFSHW$W;!PAG;x}$WZ?0D*yai zUHeC@8Iu?umv(DMCx#oO!KY9#LHQr6{A1H_|Q_i4);<;((YVbQ42j^_O6%zAAld9cu4@#*%AqY(|LK{aY|0F@)1 ziI2xU<2O;pT#9mM1MfqL{pY)9jH1ibrjH#JiYJW2A`6jKZnP#0>8mU^5pk0O1``e z-^3HBMI$bfA78^w^5>_pM&8=hNtG!!KB;~IsumV+x z{^gQ|=uFwg$ZnQqSi-y%%lPFU9}$*IzZK+4{f<1W!fLF^H%B;bgtJsz{j1mhwaNcv1$*E5V1@EOs+~Jsb4LC@uI|)ltwVJ7RRbE)bWB}YBL8PPL!(T& z_08KxUmDtLqVsbS!VA*iMZAT6(&%zr8s!805P!xEzT+2hw>&uH z)A%#4mKVN=r|~0Wx=v0LXKP+9*gL62IKjHTneyKJJe`7A{ydbtE7o9XGPDcL=n903en|u}7 z#NLd9DcWY#opfIB80Q6!>lZ%BznZ<~u=9M)`S9B8b_bR zUV=Ro6??H`G<{RTC&JDN_d5HWqN3RI;9hYrE-^RXc0u?o^O7=i^jKDC{lO`7`<^?R z<6qhP?i~Lr_GpfOP0z>=Om9!$d*uuwk!gEDa2#dIpT4p(CwK8E3V zSh(!17lzk`JBws)SK!Nd3{M~j>rsf^a3)>+7)~sEFm{f-cCfJ_ZpFR$Z+IAA!6KBw zezy2={2IT-@DI5Lx8p(lJ4_|S{Zc%B7pCE#@gx>t2dMDlZ7_8cKaG2b(5GQ4W?(kH zf$!j1Y(f!EeCfeh3Aqnds6#W_aSYpy%eQqJ=O3CG+dAAlVzPO}k;42s5yn4)kKxn!9KM81?7%_%8-9!z@Fp(wT^t(|pNPBB zVP8i30(&wt?8TU8kH#E(HKIKmv+Uhqj`nagOrs~5VQ+^$8?`vV9Gz)b&97>Vy&$8F z^T*m7GR_{6RC`5I-aS*VlwP7wFEHMol5~243G@QO?sQF`o`8E?m@L;UA{U>#C}h)N zzEX6Y9VmVe|CGxOJ_|=|$4VmxywdA^ybSY#3`v!8Odu}2N**BA0_-`eP*te0} zd+933V)mWnF8;g8681ghUe`zOP{zKGEazWARHu;H`%-dQraFLJ&c1?N$v=-=#lD(c z4L_Si(R*YlOm@Q)7%&Gh}p z+%s3?J9|*ww!bJfR0&gEW~_(<=ghIUssH%b_G30KS5+VmUToCu;B$ynr{A-DG8T5wq@gd<6=y9c8G+-_#>iobds88gVSg zYj^`A#W4YQ;eI@B0DmcnOKz`r`@VoWWHZ zb(6fg^ODT_B$@X~3M;23g}fTNg5z`sv+V=Sq%+VqZ&+a6-L!#W{UJI9=FM~Mf7VWK zvWBAYEL{To_UQwy0~r{KW?NHHV{HX?Xy1#At@}A?ZAGUw7A4kNq#3K*Q(ewpdTyX~ zumi(>`>3l>fl3^hs~<3RpmQzwmT5t4vC)#+u>DAs=f;+y9UCD8Pm@ z+W#5ae{Fc-$@k9x$#`%5i*|h5VeS9C0q^!lN}PW%PWxZu-!$6t68GHW+PxzOgi?Kh zGJS!4Wcdu|A50k#DlO>hJ7*_F``@MgU!eUTZm+=q!`HjO$5q|;zCZ8c(>SqDF_cg@ z*R3BogcJj^G2kXPae#mWPH?~wlG4NhN*pkc5GQdO8fiwO_vrn8&x~gD97!W-q0n1<>cIRKc9O)(s%vWUVFX%Ywfl7dhI5ggJcK?VABUNTJ3_l$#ooimm{UE(+$waG@!bMsU>|cd2WZzw{~fe* z{QEd=MSAex?B+gD#eD(UixX;29kq zzB_k;wf+ly^Wp+){TI{>bx@0Y1F|0c_-k8pfqA?O3aq#w59eCXU*K7IL0;T~yDqTj z>w^05(+izjE~o>#H(pQ|GJNEMhG68`3mQg-j$U9*UVsaV5pU+~N6G@n`H$p;?Ds#C>%)(f^WI0wg}h@QG5-HZ1!q5E{QnViEg#9f zgYn-kt~=yBWpEbC@8J3OFwZ|^)jpno$eQQ>Kc0V_YsOCtvNcIN;5`QKF}{)JX8xNo zzrTs`KeCzz>|^i*@%6$Far;M(ze`!&PMq&>%x45Me}vmzxZMh+xGi$-5BMMA z96u$N`7X!5gYV<^ab$os<;yqooP$<|Z<9Q8!^jZFVHn-W{3|Rl{roS?6*%Bd+=K8r zxD__T)$n&$-lQ`do6Qs#|jhWs* zqhAhh;dYYaQ;;MtN0DpLi&s=KbAj7g=4Bi=ApZvGM}8LBhr9_HMSc!>is$_|DTik8 zKn6Ph0|ekz_;=vbjhTGHG4pcD;*-$N^%LSc8sK^wL=JHchLOV@k02vlgHdFR<2W+G z`J;rJgqtYi4)Qw)FINuf^Q8GKZv4bqrXS|u3*_r*@WL6mpE`O4Zl`XZf?Bs3--eWn z{Y}oFgC%$ej-yxU3C71z4i!+@S#JGld`!jhDo^GhWjIe6W>QxxDCd=sdktlB755sb z06)A0)1P6F8T<~4KTV%!BhNR8K^zj0gcOV(jqv@?kU~3nJ~B==zx6Y84bR6r1}L8a zz7K@r(J1{M)+}&$LN?FLLehR3ax)nJuVwuILB9&#?^oe_{k;F%FZVmFY0O>x{sDcO zGAQT#_2jQIzrw0Izr3bm)>dliBh{T6RQ=gOHDGTnyZHVAvSlyh|0v^s@NOJj-v3gz zyhlHF8#|4rzJEa2FDTls%*{SR{?j)Zys zQ$P0iF!2+|PlDs*5z6Oz$_MEv^I1;FWYc0+lh4YD`m9{H&&mc@*k=_~`K&_D=Y@P$ zex1)MroUg}=RGLSyE7Q`pWt~6rL**d&hosT=6QXL=QUJK@E$w;g$DWwweRsfhi3e? zoZ`Lr^h!11-@AqP;5V*R>ovR=4}qP_-#u#E#e4HR`0mjf^KELrgT(W_6M9$jeL>uM zR;^SJ2A-!sa+L2AK1+WDhKX;4_=X-^sW9=y*74n=^?U~jqQsLT>?q+9#Fd2XKk$2_ zn|NMRW)|`T_z=n}S&Qd`nk_%&ow`T)X4U6ErrDQUt(Sl9;@cZ8>wn_^{Ux5GwXf;F zkPZLWYr238-FQ})oaeDp*=h|m{+@3g@*NvKeka<<3Dx>$M;ye7JG7!R85#Z-tjA3w^{}3SF;|sn)SHV z>}lhBdaL-J9^cg~0%yi*Irwf^*3Q-3_&%ZBEvuEYakX4qS2IRft%gIZ>Eo}aueO@L z?rOC>&-eU}@;yJ+Ya5|r&uW$5v6^pWt!A$T^9cvY<9_B7`2JocbVpY+x6K$Yw3=t# zYW3~?NWGn_*^{+eJ>V-_EpHXy(63vq)~wZPcd@qw>36SI8^m#s;V0U}Z!y5=Y@YQl zzt0f%ty>}NX` z0~k_#g73W^3n|H1C&4&lbR%OS@{##riuu@P%kfxB&WV(=%TjVRrIh1ODc7A+Ua-u{ zPp4FHO}SMVPVxP>a?4$nQt{RSl}t2SrRP&B3xyf~ms=I@wOEy>QmVp#^<0Ymf8|!~ zW93%e^W|23G({Qk{kIR9|ES`daevAB|2xcoT*3SY--q)a`IvlsoU?7q$1cu)yry>M zS^j^2AA$4zJTya9CpFM?QiHBvX(<0Bzuj8{SU9R|KWA^KfKO3_*M2l zzRLc`S5-d!sw$vz_El9uHTD{){oqyh%)H9G@2@frepQWgudC@W^FKS7|CvMo6SK^J zoMHasjC^O$sBPkm{KwAl{^uFBe|Uzz?iuzYp5Zs(&M*ghMm_im;?zL^KD5Kr>XFn*FR6j?3EQSP0owY(=xCoJ(W zoeS>2FXH~n$LVKq3wxAH{(JBYaGS_<@zUbYkhx2&d2e7{dMo|%1M~;YImm+=V7e)j zpMuTw!KZmX97FyZs=lyZmACV)?z=w4cOL#qW!Lh}&-)n*e4X`K_O~^C`!Y2?dbwKi zu2l1rYgPBmI@SJw-@#>)s9t|5Yk3MX^DmKC!e;mz;=cv?If&?5McDun>$py5>0^D8 zIqSEOG5Sm+@DjXp3C|PuM*ar=nl8;3;6eBSc%zKB!T!#YpEXR}xZP#8Wv$TXk(pT$ z=09U{Trtdk(irofF}XGlGyfS=?yi{fR>heAjIn1U#?v7a)gmKS1bT^o~U zRh8x48)N)aZMBh}KU{4EcE;46Kg|4Jz12znx*iUx+h1+!7bd016rHPf$a z^2Ax2ZLk_vj;KByXU}$xRqKkYW@C+2osRR}&Xg*XaaFkEDo2(@<0^eWu9B^B6<0M_ z?n5KI8+TlVAB?B~xBO!x?46G*cU4?D>Em)Cv)xhpm~lC_#Fdr7ONqPU8og#jsSo4q zNf=QA8UG-z*cHbWMMj>FY6LkaU8c5BJ`)aRomd;ocBe zH*`&psPjZz9S=vK{D9dUV{kE?Z0y_H$lrmQV_mZP&x&UGE) zb0?XuWSer{=~M0%c~)MN%gXn(sQ`b4Rc$KTm`A@a&niZiywj%A7Xm8V*QW9V9X#vX zn5St|)xi$tf7=-UwyAb+8}q+ys(+|W4F}rPn9-)DU2UxWw5erFn_3SB4o3Vvxf0EGToubx@;>-{$lI<6i??_iG40B`D`2WfNdJRqfMD>^R28E`IZCe zJnQHA*Dlw3ziy`-a@YEmm*J;x)Tsg_)5@8ADkjs7d+{p2N>=(=`!eo+m9O*j{+plm zzjob&pX%8Ys@dUZuES5iw^Q}VhKWx0znsv$oNw-QSuMD=?)9^u+%GTj_%i%zL;BYh zSb>N9YH!N7I(GWeKi}#iz3yly^M5WYNd9{F^|Jph-|Bxp+Y0UR^ZUns<~;lwdLiEm zpUAg{kt5_QaIfBBot^@#3)%gAKt0rZFd0yb_0XqjS7|!e zYT4SZ=AGT*^FNu5LF#a>UG-ZFt-4)>RxL93!*=ByXnSGKF2eq_5Gu3RhYXg34; zlPWpV&0c|axw%G*-fvgovxQax>E&~c=DpLd5g0y@YlS;dBSUW#_lbwWEk}U@P~o9_GI@t$cSY^IzR6 z+)`u}xw9;Hs8z-ID{-}|bbpamwx`G{M^-eos`7-FzI`k6Up>s*wyO5oR@LqAQ9ZI@ zUn}qbwyFvD=Dn@Vf3>py(<;xgR(WSzk~0J(Own;nu&ZmHCfW4eV)U|8uK`kl}|)tl@R7^nZ%1$kA3sCyJ~X>BT?j zVg5VQN|L`6xPj$%p^QTx_^{8=$ zM@@4=pCKe}zW@;%VRLVgI*Bo!1mwU8K_;bXz^w zc+lT%^{(@%uQRCromp1su!s4LF7`vXt)au2R(QXM_n$o)dCbG}&uvBL+*S-3-{n!_ zS)Y<0dX!2BdH>nN{m;v@s8IV}G!uL6fV+5c2*b*y(P7aEuYT$~bj-{Cr(S+b&ddGJ%d@hV{;!w4rCvp*y^3Ow&3f7Y>t*cYRWj*SihPaw z$@3L`D);+Tc1?*@TIJ&#A|+P!T!*ScK2@fjRzsan^|;pseX1>USS@ot`iLb~)2v#bKY^Tzf^h z58^(s@^yuf{nX>g&OQy}9$wSO^WI0F#i!U2rxnfbQ)J8Q>{;|Nk4+u!^eJ(~Cm-j% zvwe&qd}`gtHS?ZN0o?pNzuS=Avp#j5^09X2Q^$mlYsSaD)2AT%^(1|Y&_<$Z+Q)lA z_S6TJpbnCdiuN)->Q(=2P$3xbP`)r!*30@T<$Em1^R>w8nC?|4be->2H}r5V2R%hr zudB%FOBPwJFZ7bPUU{3iztA?uPU+vSwY6QLDe1!s_xk$tNuWd)o{4TYCIHF z6Ev^wRST3p&;8{NbAJgkb_l9+XRoTDdR;Gd)vLT05?21ss8z843KhaZp3#-u*NV3W zRdNmOoBLMQkt<~H?>l;hvOh>zu9ZKs3*^V>lh+{s8p-rt=BJT4-)7A`ley!2*x-iS6nquF1`k6i^DJ}3b;VW8 zjc#H6_BzHsKjz&+@!P=HGS9x1an#KO{X6E7Z(t0wjeQchfBSmw?YQ;e_Fu@WcQDU{ zzZm}i2j`!|-=&=UJba3Mb~hkD20y!z=LEcopT5==x(|Qt+nIyrcsF71!*2;X-Uly# zCSo1CJ;Lu(Apd|Y&E|J3nB(eqS;y|r<#!?+*0FmX*2}HPBIFHO{9Z*Cziolc&axgR zj%T3`{uzAmIJn?Z^53~8zONs%%s$?e9QPmTW8SXV8aPmF4IVDG zh7R>948v=Ccz?5p?>F=)3g_^9oN?TBl*1R{%kVXD>^UuGu$Kj@9%UhOI(wD-JohoA zYcKb$W9-}8InI6TG<|aJTOQt9fFj)8FE9rY?Nf8OPfhMVHF95RSm(6r=jgMB`c#`P zv1)esRPE|h)hee|iNA^rr&a!bAM-w$R%x(LB_H;w_*kFZ(|ybf_3_QkJ{7o1to)<2 z_fuz-JI(!ozNhO$pR$q8b=)uBKcg&UCT(Jr4F;*sPb%s8q!KTrta$j7iseVGXjasU zFi$gbxJScDe$yfUlN!SBAbtmsp*yZre~>x6*{IcfjD1mTxa&c7vk|XreGku!E7h@v z-$uxzp6<%B{41|yZ^@O+wf4#Tj??md=(JjqnX4jJ)~>a3FlXxgFllAK-yzo(DJy5o zTIIe+AB9EHd?;WZweUpJD$0mh?p-OXIJ8zJP|DnEStx0hQ;!vA*Yf;h-j~JB>irR` zCYZEp52mcTvZPgiC}K4r8$Fk>&U>kvL)~i0zmz#W=6#>PR9>X7DrL1H{f919plL00 zeQVW`Tuc9VE#K#79{6CldKicYgDI=mpR)Su)~eqfwLyn60^+vu1s2~^&J}hFlA-#x>Q;8PaWjN3E3Mjk?VZI%2^lX z_b*1Ryv|FN4+U#4QQ>UDDq7PkxBpW1zOfGi_tL#lt1O(b%J+_1714xMxj$-EbtbIp zeCEM}YkB^4sjh6T>JMF_1{T>G3DPknfZv6J(H;C*l^RxbUDeM22um>(-4a{+Wh9Rzp@H>}k_`ORs zvNd5vR(2|y9<^epE>)ayPU6{1m4wvvB^q6kurgP~`2CNB<;c2RPRPE7cRA)pssAAJ zf0xs4*!K~-Tm>6rR^i!E>OWz*Ul_HDcgCy|WN8`eJK@Vzp4F*}=w+(h&%Te~Wz;|W zK<-Fbwa>EtQFj@0LYMRX-^(3|#J{K91_fubY)E*gFHf-wE$14%3I zS=NoXPIC^$t=xw%Q}&)7xxoI;RUOye{&waaFQe`EXcQ8itjo|=VvI-Pq!Xb`qm<+D zJ27j7d<=z?RyhAM<~iBtMZ4|NwgK_2M@^0iEnU$BT^@F(OS&_0Fp3QoI`{*~I;u_ZP&NJ=}ZPrgX{uii+ z!c|x4SGeatc9n|Vo0;>tN;UX(<6e$i4u0O`d|7z2hOyUtxS2Wd%}Qav5*m(PrC%WH z$~G(dTlP!6uvP!ganm)wB0rn?4j12v;`!iNzgZtgwr6Zso98Nh5_?zvRqWGc9V~j4 zI@4FN2Vk@Mf>)^r_aF@5_7(i~pTCOhfb{om*2wdlHMDgz>wKFPd;co>{a5L0_=&D1 zPkXlNC)g7Qwkijixn_&9sy5L-*d(WC6W9M1xgZC7Zgdml|1I=?x2PJwg)6o&j@m;1 ze~XG;n|S|oi%Jh}QCV`6%JEb2!6sGuH>v8_CRI;tV*YEBYENw9`rpL&|F)>%*)3`& z9xwb4%FD-dc^JFricRufvx$52CVp>{HJ;8b3Se*Fxry=b7S{i^=y&+<&f3Bp=@tc{ zcjYGa`M1#iwCgX;I*pay8%aRcxFXQ=s9hFYdGn1jlYXKx1M zpA7Z|Ww1{uL;mM86nHE{?R)=T9sbX$v-5MT5q(bGp?dY4s8*2U-los#GSctgQ%V27 zn*M)1{r_tE|JB_8>*@bj)Bmrg|6fi2zncDkHU0l;`v2AR|Ep(j`apuKK`{vH{PZqS#I@%>lp zd&sBYYw!)|-*Yu{bsJcpzgoi_54kq*yJ;IVoZg_3QyY|5b~XKyt5x8+T7^&qZYbWe zf${a#$_!qutQ{NVSj9WFD>mp~h{KhCHS@C@l)G*NuNr99rG*KF+X=TdvZ3YoHQyP+Q9zQ4g5aM230?{fwi~|EO_o>{d<=jdw0tT*$?fO z3v#gMLf+Bca*$>Y`~-@j0KD)AuF2B%yLta*w>s{+LMhx{gqPq=SV2G6E7tHHeFZ zguPCeuEUN!rJa7a)d8o{Lgwg`E_kvsT}ONb+54| zyi)m(p+9*jd>;MNzvP`W^oJ5CJ@QM&&ghRW6^GdSf0TCspc;D()IP*J08qb$cL3Ns zVD|nuy~p<|cn_cjT37H60C>R%ZQzFhv_l7ULKpn$_q$iJ_kRt2--AC_KWT+vU={Db z!_YeV%Cl(=^ZvmIej@Lu+4IHw?+}Oo_xtZH{@rSZrmEd)goXnzt3GSD>hgCp-rr3h z=08;(+)bZmw<_lr+5diCb(5CO-zN;kP{Z zuETwC2V<6lzgO0(-CXdy^?A;X;&(siHsdG$*lxujdSo~Ad%HDqa9+cnce$tU*3h%N zHF#*Z1`h642-)xd57zK^t2h5$1?T=#J+628ji}ws|No~t4=?iDF}s;V+N}WX*}s1` zb4$DB+p}BVV{glIU^nkx@76t}e?N?ohwmbP3_pczsDxkQJ_*~&Un9r;Fa+c92D}2l zf?vbC@Sm`nG3Z~yRq%E0ueTt-NcbC(pMfvHUGNWZC+?3TzYh1pangDc`2+YVJOd|j zt3=*|-=8C`4DK(;-{5`*UWYFF6TR@?IQQ?Ii&M_GeU^Iw$JfE9;W5IW!|xZd-vr-+ z%zXBNf}@T-0FaH{1v#vl@o0Kn~>%Wfc9|G9h=UD#@bNw^^>q2(B zdH1D?v0f9`KeCVG{`a~5KjivH4stw%d-(mIYq*T-A0i>X{{S(FL!ylDKR^mbq1Mg2 z9<--=+DZemaXsx5+01bZvh?tI_HOX)xI6fU+{1i3F2pzF!hAc9{SP(BytQjsA6dir zXbtZOtx-`1-+X}L>EkNdv4$Hl?^l2WoRG~q7vw-LM9z~xh z_#OTlkloM@gD`uZc8v7k_c*c}c@Ft!%>DFZzY_T=2xE`I&DigR-xJr}$P36vkdMQ2 z@H6@pn_*=O0v!oC2;r@@5 z`*!2b@&Dxb_aN?nKHN6r)`uK~mAHKzeu@2R-2VdmZ?K<(zvK8XIlh4XdUK98|L1Te z{0;8cBe&!JIpl3{AKc9GXE?VNK8HPX&2N-t*8ABP=|pC;PttXS_WwNXpLUs-@f+qp zexrgtw0|f9Hx%DN`%h1)^r7FVY=ZWGjP}3cMdrV0|4@DQ=law8p9jyX{yo}1G@hXS zgZ-WQQ?&oBwErEn|6Q!pBilIkA7I}+`7!(EI}Y>Bk|XS^CqLcD9*%>!_j0}u*^mFw z4)%W%eh@jtargx714bZ1yNE*U{j-X5k4VrylBZ~2XK7!^X68d$pb?s&9vYw)>Y%!g zc?_sL#vTr2c@xih7teiUSrz?&v-AVf^b3&gQ}h#Fpq~(aQ+X%oH$cuD{RqgWZ{q^T z`U(26Y*e7%la=vv`XS^45|D&gC-(t}JUgW*a@dU?wBsRU*u{M=$bEq0{(WyMROE?Ly!*AeQ_+3Q)79PR=d*t)P@d@Uh{{kMz z?Ni7G?iUW+bKwcxo`ySdFXFfb>4P{-LN#tn+@IKzqmvvThcoaFZi~nU&bPvL%IsFS z6Z{N7_Kz&#xOCN+%AkDD z2dW^R%2l*~sNTu*A8N7JK|OIa5J%%0+W-5s{||ZoZylrmPx}WiVSUhs-46l6wnGQ@ zPUzxXH`wplaNK*8_7DBoLomR(K^VdwhT)y;|Aq+mD8x7yhXnQ{q{_xLS~W(WcRlYT zujhT__3X!Zk@3nIzU{J}HLvw*aI?4Rq4lbQ>dy75fl};cP`-D)DxhfHdby$an)R%= ztXJOp^~ztlUIlB`t8gppEf24kYs-4Z6YE(|T+aotURignmt)_0IgyEc+C8M|xDP<& z@OnjQ-?1ihANV=@cwblez%x3*8aIo*p3E6d$3v#gMLS9tLhk`p^P@!8Y;)OqV6W`iEmT+A9KKp7v zWDhQ~g5yfutD1hVYBv4XxPGtNAiq-*=667nyw`+mB*=bH(ifXm@i@GkCToo~4D`xy`e zvcYda^ZOZ)0&UQSm$SeL+2Dd4$b~$^%bB)m{Nx>TU=~0jR2b~D6?P02_Ss5YD{a?G z+qKemZO63(2DQN)A0w4n;+QQm%$8xg4YsWqTd{2`!B%41O0kvNHsiN!wjZ-x8_qfE zj8U(R+Q|Q{+N908N?Y`4UCj?=eJ0}?xWO2F~gn~iacF?M1Y zZ`({Lq%5A1`lz*pg3*biTW``~^! z01v=}@O5|yz5xf}JMaiR3Xj1*!Q=22 zF=jA;xlrRbW@DahniLlrJB-=dWz2pP+hUf9e=$F5%m$PCVq40Xev`LFze#r~*Z5z` z9W>^E$C!hr1kUH09R6#+(d)O4T4NTr8MDdc=eO-9-QV_`eEyrG$Cy>w#;k4ox7Lf6 zcN%O!-mV)K9Z_W{<6320r;Ho;H-eHdW@8?v?ExR=e1?4@!#V*iJiar<;^<6HYhTPB+<3 zH!I_2oNl(AZnm9nQN}Ge-C{f4Vms|r#!j4e+D1}r5@P)Q*w(o6rk!-`OT^!r&g4niQ+w4Nv zW*5S?8@0_YfNgdGY}=u2w({F<(l%S=Z8vM1t?0H}w9Qs;+fHq>mD)zehz*lacKRlJ zb}HMo{aR7p+i&3CoZ6njzirv3?VP<)+joe5;`W9Y3KAt z-FU62FD71Y%+QV7bmMm2h~p03c$03tSvTII8+YnPI|5sy8%vwSe zCfmG4wxT$vE4!eY0-9R#f-RcBbs2xY?Yv>-`qpa;>O%yI^iH z&UW4J)Xr-~<=e%y(>UAp|M&X)zu%?Lu~TA~cA5WO+O1o4>qgyrtG=K;+H*jA-p;E! ztS{<|&+0bacDrtS`a0c)+Y^uLcHRDE-Tn>T{uA9kk%W})(3kY3Jzv+Iy7P;=^RA!h z&U+8)&ZD|ZcipSI9?)Hn3&*|MYyRQO+Iv8IAJE>f>&v=ZcYjfLWA4*Ey64Nf=l;XOY z{m1oB`X}>$2%eM?n1}Va9w*B0>bvOn-Ge%$L*^g8r$bNa&<}O!$NHY0&=Y&~1o}OJ zwcGj3r~ml z=8S_w)Ps8JdwS}y zo_bDC>*)vdjGi$C_skwW^CdmQ@q>Ei5k2#qp4IpD{ezDh?6@wyT&>+)~T^Bjdg3RM`J;a^=hn7WBnQnX>34agBlys zSXg7j8jETyrm?uj5*kZtY*gc!8qdCO5@cUuhDp|#_Kd*uki+rH)^~|ns95PSQ90hDAh!nCMq;hsfj90utr#;iCRt6X`)^e4Vq}w zM3W|(HPNDpR!w*`;njps6K$ICYobFFoto& zgvA>Q6C;|4Xd)`!M3{(cA|Y;4G^<|}>ldZ^MXP?{(J#97i$49LUz2W47HhIZlckz0 z(`30OD>PZD$tq1&YqCa@wVJHcWW6REG})-hW=*zevQ?8FO?oxy(_}!C?V9Y+WTz&( zG}*1m9!&-{*{jJuO_IcrCc~PHXfmqFm?q}Fc{LT#RJ*1+G}Wo8E=_f7sz*~nP4#N3PgDJx3TbLUQ-his(R8V%%QRiC=}Jvk zX}V6+4VrG$bc?1vn)Yehujzo;|3BTS=`KxoYdR>l0ZsR7I;80VO%G~%NYi0WM>HMP zbX+fG=_Q9=a_Xggy;Pu=3iVQvUUKWDQoU5Bm&)~0m0oJnOD%dSs+ST^i9rq{90o`j z6`IM>OrB=)HB+FOBFz+QrbIKPnkmyvxn}TCshKLxRBNV2Gqswj(@ec)8Z^_WnI_FN zYob$Hrb{#3n(5I@P&2)n>C;TVWtK)e(p0DEtI$o&bMLO=*@nRis)bS?0lBrj+^omojbI$5ui z4LaGVlTA9=tdlJ|>D5V}PPXZ!Unc`P*{+jaI@zt0JvtfG$zGl8)5(6F4C&;6P7dni zkWPkmGNO}Fo$|lt(5azQeq(eSBWR3aol0T6UdsP#BgTjrBZ2XHqh2rL-yD>^HmcXl z`8Nj@{2N)xzmZk1MU8>2+8DLQsC#Y57})BK(ZIhgO+1bKn}a63-fU#6F+9fb>UE!y zZOaCJ1IE^Fj1K;d?7V2ao@Lu#cOY!Lv32op&JUo=8y>yk)f+y&(WW>2dLzL98|}vE zF-FiBy~YR`V*q2;WsDqSo&1<8?W8QYq#;*ZGz*g$Ha|c+>9`;v)LPrVHX+xZM86L zpUcT}wCvM_GBzTyb;Pg_O^Rr1Vl~3}ShizBi(Zp-uL)%rgIx%A5%lSee*T$1F}V-D zF*utyn~lYGI8$V@B%{(G|0WI$6AQvbKe%kSb24bs!B}>;BOf$z4w|fC*rAt|;%M0s zWe08lCc|7HL&g(^eQ4qyGTJN)C5TZDgeBFeu>4#PfN&Me2v zM|S40n{Y-DGP-QX7USQ9jTyffe-!vNN5+To8^bo6N!!IJwvE{`BT8&z*W{zx+CWyn>oczDh zZVZxf8lO&+jFU?hgAf>h6vn#==rjR2pUr&`!)FY?F^DJI#Dj5Bn4lcl0ZioC_BCh= zA~HuNDw86H?fIf%dtT10x$)W8m8lq+^DdK;X#~bFK8+v_#PVr7L0fN(Wq;<{#{=Sx zzGaJvYdMr@(Z;^)#Wc5NrO|xZPF`(Wj%IU=UksBh!UQn_A4U)|XZehs9`@xkww^gA zwVXFX#u&h`!xFIxdoh0QbJ)>0XW5nbILI|=<{Ix97wyYO1kE*x=5mkWx$>t5#+_$K zCW?G>0uKcy7{;=lycaB&eSyhSfyq;W$y0%e7UPe?gfw}g^%{c+3+xz7>;$AF2^kgd z{m#ZUo6_v4PG{=0Q>U|aI!C8-bvjR{i*(wp)5SVnrqi`L?a}Eroet=9r%nfTx=*J= zIz6b#eB!t;m4higmvgt6LTxy%l%A6~i$>m}KT#sW8IvR%!%Mv@FV&MfI|1 zSQeqTk_1X3P#%#YE2&aDc&gm?l4@KQo@EhO7TwEY;7?%&mV*xd$zjkq%w+~N=WGv4YkDyD$xRXYNJVi(+LLEj(D?%Mc zNH0PiMo2S49Y(0b2s%edKSG`(=pCURBcvH2%?M>5L5B$S8fgIX5Frl{$|izN5$YsD z8AYg*2z3&nP9o$lLY+jYlSl_pClSgpLY+h?#|Y&Zp-v*yNrdu@P%gBcxd`PMp*$m$ zXN2n@G6E5ZLJZJ3k^ssml7dmqMbSTs{!#RgqJI?qqv#(+|0w!L(Laj*QS^_Ze-!O&=pRG> z82ZQ1KZgD>^pBx`4EHL(dp`#?Ui{o-y={p=S&|W9S(} z&lq~f&@+afG4za~XAC`K=ov%L7jG<=?J!9w@ zL(dp`#?dp5o^kYyqh}mFGmf5d^o*lt z96jUc8As1JddAT+j-GMqIZi#tspmNL9H*Y+)N>sDgETyOQBl|-BRe5 zLbnvUrKr0U`lYD56gsBRkq*jS3O!TQTMAuM=sJpyqv$w_j-%)|ihiT$H;Qhf=rxKi zqv$eCp9|55TkO8!U5>nM30C9k98bCi6JlFw1{IZ8f9$>%8f93_vVHNv(zI`wuA!FkNT&13%U z9HPmohX&}?+qrHih7!G<=YVX;g?uOi!sQVzuMGmw&HqL5zuA@#df(0?(mW$oDHK|T~fAryfdilGEb zp$y8Q(s187J_wFvv%?>4v7(E%bYasUZM}ayyO3XT(QTos=8v|4+r@%2l%Dz9puyzFr0IA;)soWJ30*KC=*AYoftL4G08eekDPK{ zpL28>>?l#i*hrBAaF|rP?KF%J0uZrtIl~+T&pC}(&h!{;2a_x|vSd^Z8qPWWfAYw| zMYkZ?x8pPE80T|NkDYGbva-ugDwl0}*o>CuT1vsF(ADtAT$*Zk zI=~6p;DQ{;HP|VdY+uySN!2^^?b0ixGuT1eB0{Tzu|gkvmIpDY1*v#9eBz z6{3E-IiL+MU%>w>yB9BLXWgDgJq@CJL;E> z$uLdAXQyfusj|-)uhoWgx3Zb}>*mvG;*jjV9Vpr1*n#jqUqq61fx?Pl8Ha^0Amaltvc z$w9M7V6Dz^E$YY_ij{oI!*sA6nXS%%(Vu(v*ye5gX0bNuzYq6z#N!khCGE zsS#5@G#h6eOe!>wa!R)x_j24SC@S`po@?N)(ZKm;8cHj%d(ha!u`d_UysZ(4rvrby zPJXVNl3;v&uBT4tf|NxsmD$fF6{4XGq5m+K9pjU8j5*HH7d}UyVxH&Sd}b{~HJ_EE zc}KP8Db;!ACFbb}%;&XhzMxg}MOiSUd3T-WOR4}_TB!N5Zp~LhRTFpsdrb`xUoAf> zT%Xc>L$2lnglP|GzOxsGHQzO$`JO?|_Z4e?&=7=(=EJ1V*nXa|J$nct9}0l97{AXm zCZA`FJ2j^$Ab`BWT6m8Q#( z&g@p22WXndWSTaRre&tN)Y92qfFGuH(k>Ucp%U7_5BOo~Ce73h+aDnaBTDDsKZkg8 z@K0YYol^zXPy?iqL--v0Gc}WD3MQRHnB06Qf<|bDR^VJ7apgH77YLu%1f-Woe)Hl= z=jQ2wik6rq1n z1c<)~{oPrR4ft~tkGl*g2RHuQ9>(~8|BhQp4$k|Eg{`Tp4zy!+S(U_sL@98`ccJ?>VC56r|A87@e_SJ(YKRS z2iff)NGI-{0j0aB2JR&39ZNL{ zr=p}5+{EL?ubcGUaV-=(f&3JAYN3RDmbietl%i9qR|{q6SH`)r5g?xOdLX?D{8o}* zet%$rR<}@94LuOi0xfQ#x(4cixT^bsvZ^8O8q%W0E!6nHuZ7y;g#ofrOLl8HRU0(- z?S(qDpdBvMqj`N9kehmr8*+fy8jPKk8YvyNoGvutpEkPCNC`Dk>WygEM9DT0Z4)_b zqC}fWrkNZx6ZPyW*MbMNy;P@{0A3=ZK`*pbYQgW&LV&Ed z6QBco2MKg>t{d4+;yn~g4|?>Fi6H)is7ec72vVKF5ug})NwAk{>?ISuWTKahv3#@8 z=LW*`5f80yp&xtykQR9MFNE5)Fo2>1qgoiGfClk9NbwDlHgoI?LxdSZ$szm?k^eAx z4^wSn@);)GFongO`@#t6u>7+yg8K-0VQzeZx$FhzbQhS9U5HW8JWCgtmt0^ja)Eir z1?ChNn4eownGMhkQ7vW_KsWTm2qd-0gLRR?*dqO^ zMJIOFRTg=mF1qreSBp7~T4eHKF}GBUTWg_D&;`Vm7lvVo!>AUSg;>nTUp{f= zwLuo6xrje>|iXoAJ|v{}$rsf?xDdz8=cbTLfiV^x@Bk-ACTqx`47_ z!P8v-i#%x-{pji^Uwm$bcPxN3{N&wFUIL^Ua077$N`SlsB0$)7;^nEd*iKpcp)Rf1TvFlXSWW-$j{rlh1C#c2jq>o5f!I^x?mcGVLS%e&XYavlt@(A@Vsuc@5w; zh#VwegOtS}deROS>3c2?5q5|?525oA^&h4z!<0c7f5XHxOxO|Xb%cDd?zBkXbdj~1 zMf#eHti3EU_q)ja?jrNFi_D!aCdn6bri;vtE;1Lo$Q|1xYOx`XQjDBHW8er)XG9 zZu07`0FH}EuNZ$N&X?6_se-gCaH}Bx3i4Q4t);3q zE!DVzGNCiIR7?D|#9NErb-34+0&&(6C)f5;UALCFxi8gYZ}4iVu^7<53HK&+X(ol1G-}mspBl8l=33kld@6!sIK=wZXl4X_#^xrX0C9 zFO5*%Be=5;wM2JjiM5_3)-#q^!&qWHVu^K#CAuq1%&9Lim%hY2_!4XLOU!+r&-6h= z=ehr$XSVQs)&L}Q-cbT&Q2BpzbYF{cooCi=dFf4xu?jio918`&2n4!i%e)?Zb6&(A z|2|j;PO=`gn?%)`k1Ky& zuC6`vPaoG`JpLM9|M%0!t-oe|2Kc!1$Gr2O_dfh%e(CYYnEUgz-1xZP^Z3V4{<=JE zfBNuy_s5IB{=fb4-u!ia|NGO2_tGEUpMLo9_QU(jkH7t~yr=y5+n=NNh9BMuet0MN z;rHYZ@7q57Jrq9tX#3$E+lO~(AAXPi@Z;^rfBd<6H}>IwD<9rVeRvP`;hocm_ctHj z-+Xu%^Wh!Lhj%dJ;QG@zeEc-#@1Mr~+9yKJ^V##o^OxtV=bPud=ZEK~=a*;VnR*sH zi=HLVvS-D!>iK_vtaZdC;d3`Dyq~Gs#<4M0ySmQ~*Pg%pKtnsAZ&w%4e|C9bF z{ZIOz^grqMDQrCHf70*M*m%*8i;kS-+n_$Fu%t{m=TJ_50*Ep7lTLf7bu3-|y4oS^u;CXZ_Fm zpY=cMf7bu3|5^XDe!p&xXZ_FmpY{7SbUf>S*8i;kS^u;CXZ_Fm{rWnd_4_q;JnMhf z|E&L6|FiyQ{m=TJ^*`%>*8i;kS^u;CXZ_FmeeM{~`k(dtnPT|adiVr8{JK5-x; zzvzF_|DxZoKzvzF_|DxZoL*qsNi+(>pjaU7)JL6UVtNvI0 zulis0`~7RY>VMV$s{d90tNvI0uloJ$HD2|<>VMV$s{d8L&nUwVXuRt84Z(QT@AtuB z7c}gGhR-hJRsXC0SN(o|8?X9b_1g@MSN*U0U-iH0f7So0|5g91{#X5e-W#v_U-iH0 z_kHQ`d1$=qf7So0|5g91{#X64`d{_G>i3yxyz2k0-)E=c=f>e%jPYClZ~edZ|JMIo z|8M=~bi3pEwnY#rvFX3`GzrvFXwnk(uK!*CyMCX+hi}4$8Rjs<9A=o~UH`j&-;NF6j*WNy z@A`d9HhfDq-u1uhf7kEx`*_#?uK!*CyM8}A4nI2%-=>Xs{qOpH&Kd9e-}S%if7kDG z|M2t1c-Q}~-#2f=H*e!zzwhmb@9l@rM#Fda!zORo`@V;Q!`_&@zNdBY}e*yIhH zyy5$sVUss(@`g>`@XhJ4$s4{l9e$=9HhIG)Z`kAwo4nzx)M1l1e4RRM@`g>`@N?(z z9ni4J8{W4Jo4jF@H@tTl-n$H&yz#ewo4jF@H*E5TcQL~zZ`kAwo4jF@H*E5TP2RA{ z8{X3lo4jF@H~dUHZ1RRp-uPSp-}-&uH2&8Aw|+nG4)1h^t=_QJ8-MHferNa@c=)<^ z*y;^iy9*y;`MfQGH!@Rjhe)f={Y!&k$@R&Utq4POxtTfO1E*|60c zzT+Bxt{%2}!&YzD>J3}H;T_Vj)f={Y!_VBqR&Utq4O_k8JF#J_H+Z`kS$TfO0{>0zrke6Kca^@gq9u+$lY#-enD2yZ`kS$ zTfO1y?_sMqZ1sk%-mujhK0Azm>;Jd@f9v--V)z^}{;l7JZ+K}o{-fW9Z}{4L_~Hy-d_$|zG2HZ{-ggt`h9L0HhshQjKlYg!=`W8^bMbZhfUx3kA9oJ;q&mY=^HkE z!=`W8^bMQ7VbeEk`i4#4u<08%eZ!`2_+Dq&^bMb(hfUwG=^H*D4V%7U(>HwQGi>^X zP2aHT8#aBzXQyG)H*EUGKl=aC|Brs#zTq>~uF(`+xU%t^xOH3fArh>jeqpp`;C9}+x!hLDu-7R!)N?q^EYh%hRxrw`5QKW z!{%>zRWWS-hSwFt=5N^i4d2I(fArh@jeqp}?rM0IF>L?FKl<(e#y|RP0Eh3ihVQh- zKl*I}$3Oc2(f^NrFG+`8;IIoEc7ek#aQN;i{f;IIoE|J837IDBU|>;i|^DZ?&s*aZ&Xp$*?*44*}ZUEuIt#_*YR{J+nn zc7ek#aM%S7yTD-=IP3z4UEr__9A3u^yTD-=IP3z4S2M#4-C-9vd>=CG0*77TunQb^ zfy4JB!}lY@HgMPm4%@(C8#ufG9`=F5cX{K#`n?PufAs&+Z!>>9DnrN4~#$heTP5({P~psam~?pn()G?{dk#E@o+Hn( z=frdBIrE%*EE6=s(#&heLdG0)O&%Nit^XU1P=gITzdGWk@etX_L@1Fnl{GaD< z&%ZtY@%-cYuSfr9{h#%J*8f?*D>nJ8-*>wCtpBtA&-#7G7E`-e^X0RCi@to;Z`GI2 z`YrqNS-*8(KI^ye%V+&oe)+85(l4L&Tl?j+ev7|+*6(}ceAaLIm(Th?>$d>RXZ==y z`K;e}d-<&2JC=Ob?{CMG&-#7e9N#y`LNLDjiU`1fHDMYsi+vN7yagju`A3M{a^Hd z(QhyqgTWXF=8OI>`oHM^qTj?WU-bKaJ$7?3j*D?z?B8Pl7Hg{5R>kBkCT}rsi~Uv1 z+hX39zw{fmrQi3e`Aff%TI|wd$rbCf z{H5QjEaqbQOTX8`@%}3YCb8;@=~fK3;yc-xSH*NHUK__UDmFZ^jEWUhte|3$65FTP zIK}oU22C+&iXm72(r>*Ko2B?}H#SSLS;}Af|I+`Le%qzkF2!~!wo9>HitSQ-4;=fY z{H33(3Re}bD&9|otIAjXTvfQLc%2=tDqr<;RpF|_RfVexR~4=*zEciYm9P5Qs<2i0 zs{gD0uln8U3u_hMJBPOlZx!AuU-dIr@oq3*^)pxbs{gD0ulo6`eAWL|zhO=|tZ-Q2 zu)<-*-YH-8f7Q=p<*WX$`oHRDv+`B{SN&i0f7Sn0|5yEA^?MH!L!TJ>OS66=9_-5D_-Nrcjft}|C@fV^y7Q;c&(pr`oHP_rvID%Z~Bde@=gCY{XAHBu<&5{ zrvID%Z~DLK|EB+&e$#+(V&TNXiN(+;hDPDUVr&$%ffyUb_w+G13MUq$qj-lJPAr^Q zII(bI;l#p;g%is+{f0^TrvICM(}R4||4sil{onL|({H$x@A{3G@?Ae)7QQUs^?%pT zndQ6w@A|*%|E}N2DP|1$uHTd)2I?_-ih+7qv=~3d_$kIu`L5slg&3~Ka6R7p=DU7g zE#LKj*UzoxyZ-O`%_8z$|9Ac0^?%p@T|d_r1F9HMg>B1s{pJ&4+`_oUj3SI%yvK-v zRle&tvdVY;-}Qgj|6Tug{onO}*Z*Dr5B)#%|Iq(K{}25?^m`W)PA;5W47Kt@{}27% zkHqjk-d)Fgl6Zd|?@IDRKS!4z`hV#Ep`WRXcYWdN@|*i}&Mrn@`Jw-Z ze(wbHL;nx`KlK05?;T-&==Yv5KlK05?|or@=>MVrhkjNs<|FYAF+cPhk;RlGKlK05 z|3m)|{Xg`3$Cw}bf9U_A-`phLImT`e(xpoQ~yu> zKlOWeAJ8D6L4NB0ssE?`pZbm0;vHsw>i?i?i?-{qJ7U;1ql@=O0O z{iZqjrT>@yU;2OP|E1q%A;0w7E##Me^PT+C|4aWb{lE0vGvt^4U;2OP|E2$z{$Kik z>Hnqwm;PV+35<9j9PfkkOFw}T?}hVAKZy|%Bi`A?I56H7$GhVE(*H}p_jvKQr_V3_ zzx3Nrgw)7H|3p8r5n>|~{S*DYwVL>YwVL>YwU2^vqQMRR2`J(PxZ4 zGu3Y!lc|0au}t+Z=(iKjg8l{l-tERXG{&JZA>ezUkN=r?rDqJCr7 zEb3p>Z}b|Y*DUH^)W4|T05;xtXHoy6{zd(Z`WN+^*TpzC_7$>i}?XswUQU9WT z`|B9aW>LTKY}_TzqW(qwi~1M!FY33=$fEv5{fqh+^_%U**fxv$7xgddU(~;-e^I{? zZWi?~>Nm#CqJF!lEb3p>zo_5e&OCNdG5^b=eq-G%>NnWUlKv(AhPyEhjIC6b^e^eR znaYxWyQwVcx1Gw8{w4iO`j_-C>0i>nq<=~Ol72Urv!s7X|C0VC{Y(0n^e^dO(!ZpC zNxvy#mh>;_U(&y%-##l#`j_-C={Hr(lKv(AOZx3svZQ}W|C0VC{q|g0(!ZpCNxu;_U(&y%e@Xw6e%qHU>0i>ntbbYmvVNoGm_)`TGRyjx^&2tAP9}CTS=MhWlV$zO z`j_=D>tEKttbbYmvVL3pEbCv^zpUTxKFj)-_1oEGS-tEKttlv%}%len~FY8~{ zzpQ^*|BC(<{VV!c^sne&(Qg-*75#?Uv5(7&es&-$`VFx>H{#E^}`d9U@>R;8rs()3#ePLGh zuj*gbzp8&#|Em5~{j2&{^{?t*)o=J7d&I2j=Qgsce^vjg{#E^}`fU}ns()4gs{U2| ztNKlNv#Nhp|Em5~{j2&{^_%u)RsWj)HT`S)*YvOHw|mT*{x$t;`fVVyrhiSpv2ND% zujyaYzovgp|C;_a{cHNy^snh(({DSOHT`D9S<}C!e@(wJf7bM`>0i@t*q=51Yx>vp zujyaYzovgp|C;_a{cHNy^snh()4!&FP5+wyHT`S)*YvOHU(>&)pV7&heoiNA`T>Hh z>0i^ou76$sy8d|GNHl{q~<(*T1fRUH`iNb^Yu5*Y&UKU)R5`e_j8& z{&oH9`q%ZZ>tEMzLz;E{>-yLAuj^mezpj5>|GNHl{dT5V*T1fRUH`iNb^Yu5*Y(@v zWnKTee%rjP>$lI#y8dzx`g;^{?w+*T12ELqBPo4gDMXH}u=M zW<&pm{tf*b`Zx4%=-<%4p?^dFhW-ux8~Qi&Z|LWsVk4Lh{dTh1(7&O7L;r^U4gDMX z?Ps&0e?$L<{tf;1wAs+Vp?^caZEZI6Z|G;1vY~%N|Azhz{dTw6&~JMi+uN9@XG8yn zemmT3=-<%4q2DGq8~Qi&+va9N|EB&;{hRtX^>6Cm)W4~JQ~##^P5qntH}!Ao-_*aU ze^dXa{!RUx`Zx7&>W5Kc9-mG9oBC~lv#Ebm|E7NKDx3N@^>6BDu(GLtQ~#!Zd*N*A z-_*aUe^dXa{!RTR_u16Hsee=drv6R+oBB8PZ|dLFzp39YFPr)|^>6Cm)W4~JQ~##^ zP5pLz+0wtIe@j0=lP&#Q`px>YrGHERmVW!bZ0X<9zomam|CatO{dUdS(!ZsDOaGRB zBp_S*v4Cvp-_pOOAI!;?{w@7m`nU9N>EF_i4`fUKmi{gMTl%;3Z|UFCzop;CFkAY! z^rHsZ(!Zr2^vRa~E&aex>=3i1A4SNP{w@7m`nU9N>EF`7t$$lT7?f@O+xoGEZ0q0F zzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y{%!r+`nUCO>)+PD zt$$m;O?VKAZ0q0FzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y z{%!r+`nUCO>)+PDt$$npj{Y6}JNkF@@95vrZ_}F{{X6=1^zZ23(Z8dANB@rg9sPFf z+0k$R8>S;W`gip2=-<)5qkl*Lj{Y6}cA>F*&yIfE_b?#Y(Z8dANB@rg9sN7{cl7V* z-_gILe@Fk0{vG|crPvm{$2gM`r(x9>bGUhu6}#g?CRguZ|5A4EW7%5_3!H6)xWEMSO2d5 zUH!ZIclFyuXIKBO{$2gM`gis3>fhDBtAAJju72C;0BW(H&aQqN>g?*@)xWEMSO2d5 zUH!ZIclARw+11YyWLLkfhCGe;dXiyZU$a@9E#uzo&ms|DOIm{WiMU({Hn# zJ^g$7_w?`S-_yURe^39Oeqbki`uFtj>EF}8r+-iXp8h@kd;0hEgFV^Pzo&ms|DOIm z{d@ZN^zZ54)4!*GPye2NoAK=F-_yURe@{Onkv;u;`uFtj>EF{24P{TiZE)+SEuYX_vzW#mv`}+6w z@9W>!zpsB^|Gxfx{rmd&_3!K7*T1iSU;n=Tef|6T_x0O7$L=}%`uFwk>t{Y<2c3QW z`}+6w@9W>!&w^xMKOB{P{rmd&_46Uw*Ka$Wef`i>_Vw@U-`Bsde_#K;{(b#GRd|vd z=s(bZp#MPsf&K&iuvK8I9OysLf1v+Bzx{p=^dIOy(0`!+K>vaM1N{(X0IeM8KhS@m zAJ)o&{saB|O8Au==m)rRpx@3r2l@~6ALzGL4kM5Q{RjFF^dIOy(0`!+KtG_F1O4{l zInaNg|3LqN{sa97`VaIU=s(bZp#MPsf&K&ihx!loAL>8Uf2jXZ|Dpav{Wj-0)PJb| zQ2(L+L;bK>4)q`Ew@J^Te)ueh`VaNnsOM1sq5ebthx!loAL>8UZ@->H{fGJw_1m)N zQ2(L+L;Z*P5A`4Fw{g#*epVuf`VaLV>bK+0q5ebthx!loAL>8Uf2jXZ|Dk@qB8U2c zIDh{WkN!jbhx(87v+6n0f299NKQtE}BS-p=^dIRz($5&>NdJ-kBmGDEkMtkuKhl4s z|49Fl{v-Y1U5@l0=|9qcr2k0&k^Uq7NBWQSAL&2Rf299N|B?P9{XA2S^dIRz(to7? zNdJ-kBmGDEkMtkuKhl4s|49Fl{v-WI`j7M<>1RuFr2k0&k^Uq7NBWQTAL~EXf2{vl z|FQmK{m1%`^&jg$*3TK_SpTtpAUMbRkM$qxKh}S&|5*RA{$u^e`j7P=>p#}dDCAiG zvHoNI$NHItFfU=Ya;*PY|FQmK{m1%`^&jg$)_<)3SpTv9WBteakM$qxKi1DZ1X9ki z{$u^e`j7P=>p#|itp8a5vHoNI$NEq7a}qhxf1>|HKQoaN{U`cQ^q=TI(a%xjL_Y+b z6a6RpPxPPYKhb}p|3v?Z{uBMMWw?u+=s(eaqW?tyiT)G)j89JVpXfi)f1>|H|B3z+ z{U`cQ^m7|I(SM@_<-YpXfi) zf1;l;%BlWS{ipg*^`GiL)qkr0RR5{|Q~jsOa+g zs{d5~seVvAr}|IzpXz5&a;l$g%BlWS{ftvi^`GiL)qkr0RR5{|Q~jsOa+gs{d5~ss2;_r}|IzpXxu=f2#jXKlhq5{b%~m^fRzI(|@M_ zO#hkwGyP}!S;Cy@XJm7x|4jdx{xkh&`p@*A=|9tdrvFU;nf^2VXZp|dpXoo-f2RLT z|C#q^`GlM*MF}6T>rU#b~5Ms&-I_{Ki7Y*|6KpM z{&W53`p@;B>p$1espVY%x&Cwgyiv~epX)!@&mQGmKgSjZDd+mn^`GlM*MF}6T>rWL zbN%P~&-I_{KiAJJ#c_e0>vv!v=laj}pX)!@@8CfAxSZ=}@ z{!9Ir`Y-ig>c7-~sh^jP3k|u{f2rT4hFt1rXmhFmQvap?OZ}JnFZHvvxzvBD|5E>@ ze%Bpxsh?jAbDK;3m-;XDU+TZqf2sdc|E2y*{g?VL^@ z{!9Ir`Y-ig>SrXw_2x=H+nX!>SNgB?yR;U5GFSSq^k3<}($7=oO8=F94mcceuJm8& zztVrD|4RRrets@j`mgkJbh*-hrTH(ztVrD|4RRr{ww`g`mgj~>A%u{rQdPpT%Z22t^Zp8wf<}U z*ZQyZU+cftf35#o|F!;W{fug^^yuk~N+zt(@P|5`tnm}~vl z`mgn0>%Z22t^Zp8wf<}U*ZQyZU+d=>bFH6=&5eGy4RWLZM*ofe8~r!>Z}i{jztMlA z|3?3f{u})_`fv2#=)ckLHbQRn-{`;5f203K|Be0|{Wtpg@Z9LX(SM`=M*ofe8~r!> zZ}i{jztQiyNpAGt=)ci_qyI+#jehr&a-;u7|Be0|{Wtn=^xx>e(SM`=M*ofe8~r!> z-QUiQ{u})_`dRhd>c7>0tN&L2t^Qm6xB74O-|D~Bf2;pi|E>O8{kQsW_225h)qku1 zR{yR3Tm85CZ}s2mztw-M|5pF4{#*UG`fv5$>c7>0tN&L2t$r6>a;yJV|E+%JG`IS1 z_225h)qku1R{yR3Tm85CdHdYzztw-M|5pF4eg;3c`fv5$>c7>0tAD0{rhle?rhle? zrhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle? zrhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle? zrhle?rhle?rhle?rvFa=o&G!hclz)2-|4^8f2aRW|DFCj{dfBB^xx^f(|@P`PXC?$ zJNA%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj z{dfBB^xx^f(|@P`PXC?$JNoqo!0hN>t~=d*FV=k*FV=k*FV?qwpr%- z=lbXR=lbXR=lb1%h}$Wd>!0hN>u0Gm*FV?qno;KZ=lbXR=lbXR=lXf;%=ORpbJua3 zEOY&H{d4_{bLRS4{>=5eSspe!?v}@eq|Ei->%Z53um4{Ez5aXs_xkVk-|N5Ef3M#O z{y4#(d;RzN@Acp7zt_)F=U%@XFuB)%um4`Z8v?o4&x7Y)|GoZu{rCFs_228i*Y9pj z?)Bg6zt`_-Q||TO>%Z53um4{Ez5aXs_xkzr-0Q#Bf3N>u|GoZu{rCFapUJ&`H)wLN z|6c#S{(JrR`tS9-Z;%K55BeYUKj?QUDi8V}^grl-(C-F99`rxxf6)J+-|db(=zq}v zp#MR?zdb=7^grl#Z7L7?AM`)yf6)J+-;KLG=yw+<5BeYUKj?qZ|DgXt|AYPq{SW#d z^grl-(Ep(SLBD%CdC>o$|3UwQe)o0qp#MStgZ>Bo5BeYUySo#2ck-bBLH~pP2mNl3 zVMSl z(pDbzKk9eqCy)9c^*`!=)c>geQU9a5C0$jKm33A|M36e|NT96J^K0o@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36i|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#S zU;e-RfBFCN|KhYfBFCN|KR;5qsDDxaqW(qw z{D1lXF6v*@zo>svKmT9;zx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-R zfBFCN|KI^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0q9KsO#e*(OusV#bq1g_{WJYD{WJYD{WJYD{WJYD{WJYD z{WJYD{WJYD{WJYD{WJZ}0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhSclz)2 z-|4^8?+iel0jM(obq1h!`tS7L>A%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{d4_u z{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u z{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{muZ?8Gz39&-Kss&-FV4P-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P-s`{Df3N>u|GoZu{rCFs_228i z*Y6BKodM{*{(JrR`tS8S15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfZprB*MG17UjM!Rd;RzN|Gz5kWH+*`fTAeh4x$FddjmiS1|TxiO*T}}6eAGg zgV5R1{REbQf?|+2BP=WIZP^hoZnGZxANn8qANn8qANn8qANn8qANn8qANn8qANn8q zANn8qANn8qANn8qANn8qANn8qF#y8=3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b z127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A z01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j z0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3 zz%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8e zFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~< z3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk| z!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0 zFaW~<3z%T&A01N{#48Sk|{{6#g>(Ky40~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPVBhMe0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz}|j8X#k@Ej0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1pdad|0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180R2cm4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180DY~W1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0rV67G=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFF8!DOOaG;x1`rJ(8bCCFF8!DOOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#UU-~co zm;OutrT@}@>A&<}`Y-*L{!9O*|I&Zyzw}@FFa4MPOaG<+(tqi{^k4cf{g?hr|E2%Z zf9b#UU;1zTxBgrIt^d}4>%aBi`fvTW{#*a8|JHx&zxChxZ~eFaTmP;9)_?22_22q$ z{kQ&G|E>Slf9t>X-}-3)(Ey?WL<5Kh5Dg$2Ks1180MP)t_22qw0MP)V0Yn4n)_?22 z_22q${kQ&G|E>Slf9t>X-}-O;xBgrIt)B)E4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1L$Y^ zX#mjxq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCkSPXmYs z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dnn`)Iy=**Kfn` z4AyxJe=wMRF4SzRyX*axm%$d*gn;-wuzn8Zgzxux$?s%;KeANFQ^M7~# z`29P7`u?47-TCS-KK1P(|9!)qw@3Q#owrB&>aX8@%D--W_bK1q@hR^tah-Rre+?e% zovX?7&dbty=LG(F=XmgW=aAcZ=QPN9?|r~|@5%4H_m_X(JB@hWJ5qDr+k|`G+a+jt z{lWQQ&BpLsgITci!PC$A;6%IOYlA~O=YvxM=gIp0c`_M#p3FL(CsQ>B-^-Ip55q4E z-d3I`&%x)(G3@i?u-AEVdgwfT>qq@$!|r*q)9F0fx^kZE|2WSktk1KV$n$J!<~+Mr zIM4q0&NCC>JllzXp6v@i&o+1%eDN2{>*vK literal 0 HcmV?d00001 diff --git a/keras_nlp/tests/test_data/deberta_v3_test_vocab.spm b/keras_nlp/tests/test_data/deberta_v3_test_vocab.spm new file mode 100644 index 0000000000000000000000000000000000000000..1c4aa4bbb8379591dd3efc9f4937a15808d7ba93 GIT binary patch literal 237831 zcmZUc4P2Dhdf=a#;X@-IdJUHtMYLYlSYwSfR;_}u>Q!r~v0Q4bVU0CbE%6$zQsOnp z3=9keDIrLRK`!A=hOe1nz+r}uVP;~DHP)FL*RX~)tWJqFR_(+!tg(i5{|8NayUXwQ z|2@w+?|IMnbH3j99iE7LB690XD_3VF2!BuMo>1;pFJ}BFcgC8R{*ycN&o(@EYoCaJ zB5G^;%8Va8BA$qSBI=*tGaT5xGvTRUb^5tJ4(DI}YG2+j6W&dT{P?lQ&h5WCu=|n6 zqZ|Hu-X`*T`v-KOk{kk$hbz4 z33ibuEF!VJA~9Z(c!)!vbXsKMNs-Axk)NCuNf;1$@`6a>kjPI5MW$S%okJp1Z;L#A zLnLWLWZE5(k!Clbz4R%WL{L+4V)d{Pj2SvgeH$+5KFM zWPcMQfAd9*>|GZtuTP7WeG`8ozkEGb)>Do*VkXP}mdWy~TazXCnTc}X#fg&l*(CYf zCnw6m&weUzemhZikjAgS{iz&6?uUYWjg-ZRex+V3p$Vd2^_0k;d@QnNoyh7}Mb^F( zB|kT6Wc@QyvhH<_Y*-R4fBH?dq`wg@FRqT3jo(Dbp9LbMje33Q*+|Lw@Co_ziII{i zPslG`dO|jTHctLx;sn{UZk)W_Gfs??ME-L5IMGvnL%qml(p*SBiuP!waE(?Bi?pH# z(_WpFWa`AQly=#vm8Z$$+nNX|8_-GVZJkt(7qN2hv-lrD|3hTi0-aQ$2fh&5s^WxG zPm6vdb)(-&T(QKNDpHiBlfXo+1SwbH9Ks*aiP;b(=LoCsBoA;A-M}>qZF(6!@U>2Y zuWBWexLRnpt;p@L3x-wLJ;d{6gcMHF$W7dP@xP1IO%?qFc!2JvjshP>NRV)WaT@8x zZ6%K-+!K?(i_oI7L1l!{3=gI%MsG5N54&9YR-z14*H}S zcPp4zM#%>5*$R5n%s}n{drp*OBmbT}2ELky83W8Tr~z9IEdh!LKk@ z@jS4NGC_U)cw!M+YC_l{2aE+SZbm#=rgI?Eh6{0uZZ#;pj z-eI^vJcfM2QRWNA6#Gh@+{Dk!n53JIEu-Hh zo@U&7>cNmC(urO=Po$1AG}quq+BD!hR&u7sRV4R(RUR6f87o#K?L4SV(c>wuBwA9Uump}`B#}+Lmw52ICp8g&$ zF|=W!CPs>I>uEa!V@EUjs-%sN+!m=pAEpfDo1T~ zDeq*M3$tJ$OaT2H$^;L&XEJ#-9%ODq8q(=Ijf_9|n@7pt6554%c9yzEeFWdq(wAao zHrHPv{+6@xVp=NlKdAe%#(0UsziN_5BiDxTJC6IW(Dl9X*mb;ICf`LB@p2#kxhh@y z*+S$@+{=*jkcN8VoJoDSRQ-_t*QD7M!?+tGO+ni9v`7YFD-H2-q%2;P-7e(5Rrswy zcAn*4;@ygS1DI&19moUZV=33ok>uq*ZPKEXY!wE-BkO7BJJ=WQUlVQ(eySf7;XVWb z8~yB4k*k!+v`1tZY4Bo$)Z4!(zg~@O;+`tYqyA*(p3S&R$%hrau1X{e_YJ}-+aEwb z3#Z^b6s;jH;xK^TqLEd!QArN@=9-bbl_!Y2O*zV_>r%p2lK(wi^TH9*XlBfATCb7a zU#%rD`bOVFX5pxQX?Oc<}&=s zDaStUYazWl^lHGSCD1{>v1L6J1$E-Y-vg&0(9bx*JYB|k5SXU=GV6oWv?+EISPr|< zS8JsxUF00$O@p-EJkq9&2KrGUY2}jUWv*R>VHGERvXuHL4L!=ET4Mx^#PgCy%xiRF zT*{b6xVtLsm$chDt?VcNx2XF=#8r-Nrw&vZ9}vcXpZNl9jsGDCltzdlm$<>GuDOU` znD>976&?NeZTyVb$vemixFi++U)MpWPC;di49=4;?_w?&U z785Rsx@sqkTAR#AUj*CmH^CX!MSo73g-O)kLFVr%(Ts(RXY?o29O|zsT1Lp<+k5Cs zgHck7EE{3%$vu_K-A4w2_#O~uKJon@WK|>atRx;#c6^>R3W?_u@(P%1SRWKei&_%| z-l5&EMoQ-m@=iU!OuYXUUW2~@4On(Y%XRJx^ys7|Gg`vvZ{U9q8O?Ye4-e5J@H5-l zGa!$oM9C9Mri(bpSK(gjgmplnhV=>c)U3*n^eZ`u^?_X@-ROar$Pa5g1MQ|iOI;$> z_^%(O?FKZ`K$s)U7iEh?{+9SvA5(K`$vo-{KVv?74)Rl2%)RJ(HMcNcB;z+W{)a@~ z;=V$Nh0<8+lXh;V&({+7-{H56`ZT9#WF@j=3u6WT%8t$;?Fkxr7yre?SAt(D;b)`I zhi2MFA4_`l0n-BNb3JV~<Brw`q-7<046glP&xYGN`G7EN zBlJhe+c)%^CUnDT>>pdVp%2OKK6`_%TBupM*nk^## z9o<|(nVw{9>ZLuPxtP5o;R|=tE`(EQ{BIT4t!Wbcp8X4c?RJqbxwjTQ@GA|~Y381G70)Mx zC$9$TTt6uCdt6HaHTM*)q0b`K98{8}k$cpY5gA4X*;59-p}%ORN!dY-L^DoSW-`ZY z*T_Sz8-}KfAt6~BNW+koEJqBcW zYA(Bt-CESRh<(1oK0Jx@3F2tEFEX3!zmL9+_;$j4+-m)`7-`0Ce#W&GDvnLu|1I}_ zrjh6HQ|qVA`2P|5TI8Q7KknOxz7b4!SkDiFTGv-$xA^4||B?IHJnMT?FaGGuxi9b< z=LFdO5yI|M_g=u3s<=KBB~>GYmk0^oVqDlsd8n&D;Db8mg@g&q`ge!b)-H>7}_zgTFvj%#WLu8a}MBfG5U>_9G4*Ho~ zqx^a>ps&EcQq=?bGBCgXiZl(Q#7Dh1;9m>fpr6Tg`lXq6^W#1P=OMu2G5CU3x~J*n zP3|?Zx4we^C74D12TN67V&5?oIc8T=R6SqS%5|=nQYVIV@&do$-Y|ZDiL6>0Eq_e7 z($mq>g8OaOEcdupNB&LMq9uO4Z71ATZbLH?Fwx}W1`9HHFLaR0Lr@(=hq7{6Qc8J{Re+a8T%Qid+p3$8izkMSCb zB<_EW68*oyiAaq&Q#I009Ntk8-ynMd;^@(E{sn!ooHSNJNsNfIUMDBYbmAtA+7G;r z|2{|{jjxFBIxOK@SuwWuRkZjUbuy0h{)Z|Ll^XmihQ1pkWh-_k)u0f<1(fqu@>R6-i|q_Od`kNEnyr#Mn14}BZ> zis>73I15OLmu%dHGvlRrjz|~j6f;)r$Ir<=_(XBMbe`2n2WJP}?2X;@9T#+gVJBk{ z6ww!Z)V`X**hoLnhe&rQUJen*4hVcoKcV9mGbR=%FlSK46%hD(iUhl+$VXWkd6Dw} zHeJhpIbJ@@)JP-u7t*FV)KMDT#pd=QA0U5?yoc=KdUY)Q1N|Ww2V$i7Mw}d_aZE#T z;=%72&k;@`&p|h=B3(bS;Q(i0$f~DExPrZsA&&KboYWklPn5+;?P}%}>Y=qqI2^*x zZ?k41{6>P`L>6y}m!dMxBxqE_nmG0~aT3Pg!rrHNJ9Y8RlQN4XNAD+`*i%N*Ld8(mMEtGT~0U!U|`fA|XY{IFtC3VI$AHAABc61)+ z3S3iR7C(;1iF<|8=^KTFEnXy~$9*Br?6_uzwbZFfa~HCRJdMpubLgMnFqU*^Z+G}Nu!AI zquPRQCyoK)s7JOkXAt&f(o=1vKMOAW4U8xF7ww7Q9FBJGVmwiCGVj_5Tdj#;{NSwV zTiTDZs(c+~PQ64PF2fZV+rP8!R_j!ChUVaYRo8mfcWR#&+)W=kN*oi%bDsKB3A`M~ zKA(Q^WGv(S6Z8+xvDg>|t|kKRB@-n0?YOc2wUs>RH!-Ke^%>;r9r_P#6-EzyK|Hru zlPiD0pEQF!R|;~b5c~!4?h4b)BX zc+R7d4+x`!VVD4uVJgH!G&rfVL}W4qdaxJP9%@2WXD+J$&%$5DRZKsKCago%3v<7L z2~L$;?Th2_FW$mgEAk=Xsg$vDRx5j_ZHzZKqe3sa#{N`Yr!7_gHLL4MA{+6uDxLb> ztgdHreea|6tJl!(gjKe+f@`&e8?!&+ZFqeD2K<{J-=CrGzd`-oq3;r(L;0)x!&a`X zQrGu#ojO&0rO2X@$twSpSEZkbJ^^MEF7PS!@ugNyQ09v&-KFFUE~5{_CAgyeDT6A{ zb@aI^{?oVzXcM(hV;!VNH{e&yy+vmk$B5sh>QVWtypirg{B*RJN!^d1>eCZ&S0AJN zz09eqO?MJD6?-B~brxsD%8zitH|XaFBIT%Nxzuv40D`PR%T+i~?No;>f@P#LU!nX{ z9_1;e0{ACru2nk@z-1aLIBB z{9?HnE-aVo`ZNiwBdn@J#`X-ZAMH((|EkK}$bC=`#mEwH% z1>|=j^{1Zs&1VWTFsJL8BUJqr29eZJp_*gpXZkI`T(8={oH)kv&;Fv2a;Y=HvFmft zbEsczS&awk{%XRiFP+&c`YHMI(56NBsW49B7#sH+ALU*3VSFTB9LgMwl`X}&{Ws~%b=?`k5XhI}8bStfV6{vPF0 zV@Wk-Jx|yN_$m9ngno2{{Ty=a+-ivYTw_e&dRT?IA&=4_Jr({O;qEDa>hB`@!^iPl zM(;csDILtGr_fIzU4xO*6=bd^?`jO`Wv*^UuY;4+{Y6NkY|+Fq0Xc{}P^y(l=)tRL z^mY0?cJsIRGnVSt;$H-M${GBe>tCe_OE;bmp!cM)Zeu;b8ljMMO7`&X0@sR}^Hu*& zQ{ld*UYDal}>KgfIRuUU6rOis`(61PO zGL#=u< z7vs+-$d8fwN9A%7UOk%#yw5oC68evlJQWEx%FWn2SoWnUBzOK#Ncn(rg_8@c!;@f?tQ`d*~n*(>yXPakO^F6!~`@mJ5LOll3dL@U|20})y&Cr_1J z&%<3gg?utj{2IN2y?ZHfzRz>)W6$cO2EPxKExo9vK8XKu{k^Z^d7R%~r7M5p>qj?@ z@Ls~_I%#D-9mYLKnR)F5nd=J{O1#?$*lx#63vdM=AGL=ruDLXRwFAn8;r3 zr)-T9MgIj-#gU5c`a&mN?44B{gD-34Rs7VwgTEjS(qD}G>H*F**n9UUP&de{daYF8 zZ}^aQzonIrKIPo{6|NJ$L4`MJ)%k~*(Lef5D_6he+$KQTKjRrBe$J~pY2uz{^6)O} z%)i0UcTyw=S&MuVSpX%V>?hE_LVPUPt8n+f86#fwzHej1^NSeiLB6m2p}QwWK1RBD z;B?|co>P7pBh9!wU(iWA{oa9%_eAo(z&hM29JH*aUa13?>-uKoN7SLEl)kb^C*7$! zd6#^;kjJvv>!&jYVPhZc;S2zI92$x>@*DPcrsG=i*+t&7h}3#%|K~Y(JF-{;JxiG1 zmPkX5M%?tPdcqzb;moB|BkgZ;&PKYQz_C=uZ)6>P=Y#Pg{|$W*uCCLncj(0SidO8@ zqdX=0@9~d7{u|Qts!m3b4%*9yebiF#vkpQhquJiJQ1IO+FletibN zAFe-(J_CLRD$SB3q>l}$v}&;X_prI=xYqSG^-7#7ze`lu&$ZI^EqPLT`A{p*KlVGu z{oP#Sd`_0*_X504KkR-Ud!s&A<6Z|&uD|$LZbu%c&gvlp{Vzd1L*I?u55EQjG_0pT zBP-!`cn|8~3JkzE@GtNY6tNaE+}Fqn^j`QF&cHu`74%(?`b}sY^-I3);m12tQbAik zKo1Fhojx#pbfT#JM{OBvh$k6G-eioG2-*C{Je!AYumg5MJ#`g6vRjTfY9t%?KKSi1 z_UOnwI0OaIT*ev&X@b`EtkIFSR@QjPYN!Qg3Vj#Z0A}cBzvo`c`OyOAPTW@Lz0W#e z66^IToL}H}s&L=|eFt+-Df1C}5%ZMs7mSs!M93-JMxKZ3hnP?2u|8oO^3k`>;eQ@X z*iy;y2yrs^Uc`L~e3aKWEmGX930#9Zxs0EOIk@i*&lX?4s4-sU+>kYjk857y?FSzn z--7Kn#p>h=;f-{}VdQl%bDxcQ+z#!8aZJJe(NwMd%cr$%q-#y#{uif+Z{5>!lkh%H zfV-|ft#y5#q;)?xT^pv&!uW?i7EJ*`|q(?RYdyfOwb;i7*um9eb%n&gZ86xfFhpC&e>95aT;O zF^xNqjr@z0y}npedi7aN`BR5AZ+|gaQ*qUzIr7b4YAV0KsrkE?D>WShiPEXrEhj3n zIfKoXuJzgCs@W&W#C`P*&XAF_VJ^%EJ$9tdlazff#Jvm}En4|4=i%m+(elyCD6tGo zm8PMo(tPr1Y2mqFYbaUT)+dWqGf`|Klf`bIEbW>n#c@ATS{6h}E3~CYiFHks*pPPi z?Clj%;wa`kUyq$XrIQt;vkKP22G|H0(8D=?Z*H{op1AgdejpF%S zl$jzS4VseUAL0E3QT}__~9Hx_1DR`r$RE!g4xiL#5}!HCv9K_ z8`#%l?>X2z7-uBOT*A$Vg|HZw!3tOfJvSKJMnw8X**D&y{;Q~e@G*A!p(DieK{x?U z=mOVm5%(Z-4GhObOK1i>#~3~?TGkTh2G|H0uo_guV zKK;{@hjioTf$w;ki7#7|%L zZHf{vvLD;@?8PPyMoDi@l=L9S_Rn_40}E#c#9s;3Pz!b7AY3~(WamB`SPj_2G5UN} zl(Zld31fl=FrQ8mGtz==f@bs*!Twoawau9>R2ij9&cS)O z2$$e8T!CText1)wx09t0>A6pwH;B86G{9exEFGD=ACNU&P9UA5$KRKKE@U& zaDO~ZhD4YO$uJ9g=u5qOAN4U0V?sY}FK*Sx{Pgb*3*(QS@dxSjGX9{uh~Hg9-^*hB z7)X-Y#4{J>!$N3EV$DPQFGjcS(MsEPtt>-d0gf%KgYdJ1Ek`S>@LLNTU?XI}X4neb z;CJnR2kvax2PW3;C9gz?@x=()k2?>H5s@-HgZHh`LoL`0GJN!DDMFf{94et2YM~CC zjXG&SnxPr2;Ld0L0Sacn7kA0m5n^Ijn$MJ|`l)eRau&pFd%#)Y67R|K@5D)G$#-oZTnT+mXoan=yh&~nk zBaBD5y^JIM$YlIx!EBfd^I;(@hOjY7mLWr|3s)eAzmMQ$H0mws(fVZ-el2$xi=Zu* zu_=-<3hd(IUj_XS*&LAUd#?1<=s8p!_$~=W9h#+ z^k0~YpLs+l$;eq?WIsI@*@V4WuIXexx)nQY%Vod(DfRbktT?El&c4fthi}Jal8(KDz{ua_FoVA|xDW4s^9qI7K$vM(E4;SGQT!t$!4A-HD{P&Xo zK0EJ1tf79XTQAZFYJJ{un)HLDk91PDE~Lvv`o!IhbVkO>P2vf|UAPDCSi-;qbkBI= z*2Kv}blnS-Jtt0lxV`xIPl}U5_86)S$DV`kQZ4Te7gf?ex)|*dP7WwwJ!yNgo9V7&)JvOk9aD6_Q~V%!awplY{-in4Kg0 z2e7{(><{S=Vt*Hy|KJ2Tp=%TN2X5$w`NZM8p_PS5pFV=~Ijt;4Uj}32$0V(IU#?sI+P{NqvwdM+zZEIfxND&f8X){? zoS2c#V1?pW^2LF4!tiUvjSO{>CuI1;IPoL98Rv}OF=nHmfpg$w&dEcbhl_9t-1xbe zdoQDV8kxhGU$3AKgP%Qv&(7Qh{Vv|C-^+N+`)xn;Ki?}lxlXu}@91x@^Zgcj81BM7 zcmTtzBjh16bcFr8B}#NF8GB&@#6uH)7UG_aZe=c1`;|oWso)?l?emx;!A2gF@pE#{ zFbg>w-0P!dE^p##T?<6{OM0 z{0qytX9cW+wJ=w#-&B{)1CCwOx7RR{+RtEtzZK?V?cX7Hd4bL zmU{}I2ux58l~4_}&_kGB!uB;{r_i5+{eutu&~Y34AHn{?c^do2)?6;FxI1}w73zqi z0nCbPJUc*IkWC@-gYEz)w58xqC4ckCUlRFCAbkqEGmb_2nu2e7D|` zbI6i)`JBnIHbK7#m*6tA)Kh-))pnBj!G>-R68|0Ihbx2`hU;(>&Z_kX@-Ezio|D)= z^nquHdJR&qBiR2a^;}Q;uf+c01UR7!T;PUo2rY?{2gEbXdj=1YxM+&ZzW1e@iytUJM(dq=){-`FsZYX4nebUn?&Z{}gvB8T}_C11+%uY_u-g{Jg;u|OTVwfL~KK?AxO9O&&} zuQ@C>P~(qt!QmgrANQRCG4foZnXs;zq#Y~}&xL|VS1vpttw4$LN|E852xS^^b8)B-Vo`42l}s3wi}f3*kSQQ zNBUvD%RVe8zzJO=hsAaOu(%1+4QBFlj(E<)MYse_uES#4Ss<6utu>Ut?66!x9|i~c zYNxz*>c9rq@l)l$i424Lv_|eC?}6(A{qijR5+2}w2tN9!zmtB7pBGw&@+p5l<>Lj{wvU^{{&y1gKLz`riTy+W0Q2$?;~3H(Oe9bp^&r$|-_aeraNY0Yc)tP8ti1#wl9pGo)@?GHkgE;%~PsGmzPUwb4O@w?n ziFl@n{PuK&v@l<_GDo%DV168B&biIpGr$})#Qezo*}**8nHkOb8fT$W2TseC3fYb)xkoV^8m#VDY@=oS%sUAEa@22mO`fG2> z@uWl2K;C}S`=%UADv;!uP3T)(NpK#2DtjvwT_ncwATEXH2(GxO5!Z+ZXQq8Z0`yL{i$I$oN*|%g+8KRv|(oW!85Fvi(r=GoV!cM!oXlG=ng?6o{ zT}NnVbXN^+vnQH!GuBbi3hm$k8>sIqqDf-{#KUArgsG4Wv!G>Cq_ie+#;}xg2(ZnI z6g&0TzCKbMD;XO0_OlNTDgSVF@?Ob@3kXsC)xL=Q(u|X7dSWZI|YPsk**uA zpqE@_9TUO+G>h{Aez&225&2t6{?Pq=m(h~R{y&Q}YS<^+iPJ^gUeZ81a1Rs5b+`#( zxC{5-0X&2s&wsIJ5A^S$j=?uX9S^Yn8=(9a%1>O*M#_)wLU-3we(K+x6eqeh%&~9} zo1B1bN~Qb|k8a&g-YD;6^h9v%A#eEEbII$$IGKuHGR%V6Fc;>-LRbuA`;`RhCyDhS z?iH{KjJ%(q#)si2<7ETx(A8KmG8dP0&{v=2oE5(e*bG}?8|;Aa$MLcYnGO43Kjgt7 zaPHMf0aBg4yK{NB3f%>@Fn0d_D&y8k)&*x-C$I*x z5Ab_NgfXw7?;&50-xT)0r`i7ona7d+tP8wIA8tRgV+;FV zI6+)a=vvDDH--H#bd#>RK1vo7-!fPMtDwot{10o&pO1w{Dy2HAFbr8jeJ?bhHi)UgOoov zO7?Nje#nDEPyj_>f^z5?p!`FWpL}`9PyaQ_f1C0n)mceLEajg>`N4^Q*9he&oEzDV z+uX?+3i~qyHmcsST}yp8wNm~@o*59omH6AvMoBfgZ#VmY^4PwBJR)tUqofXh=N;ZP z+WqK!z@5c<5I-}xGHLtuj7!jr+X}u^`teNKekpxm5%J;Y1S4alPd`n%(L+ns3e^&%t@P2$$e8n3+?rAT5Jh8Ae`*rVCoRi424FG&T(OUTnjKjj*0p zXN;|TBc){vbAK$?xu*8^Y7gRw^+cu zH?VD_S_7!DMU63?>Fhzk$=>9|Jm&rs)&NVH`zNsmKySfbTghh|Sg(=xD0xWW{4a^R ziRE|c)?#lE50fDgrb05zf*#h{z2m88!h5){e*yIhWBdOR#((C7ANT*XP1hQ1GoAfE zvYWOj`7nw1J9q|5Tw&gsoQs?f3zhqmC^5c1O+tL1uo(An2j@{pwLe&)+^gwRQ`o1V zuZ0azeCt`}R?=WSuo1WCG=PLfAtKjF-SNfM3!R@eqRpebRxSVod$ z7rHf)XVW~7&qm(|j`2MI*6@rPX(OC^=hpygU$~#^d2k3!JgX~ti}?V(NV%aLDxn%` zp$=Ls$ceV=l|(E|4-%lKffp7It0SJgBiXxT_&K1 zzDbhdtJw5&(e36I8zvGYIN`t8&^sQ#BhxUFE6 z2-Y^N${Den;NhJE?5vyOb~Ka={;qW+Ox>C}HJ^}mSv=f2QW)1{Vp zhL=p2I%ETwp&6{;04MlfjgW4nA(wdq!dH{W_H-Wn{4louc#E@rbae)J4mtcG-<=>Y z!X*fI@jV*y3Jk+_a7FU&9r6sg(QhKd;Iu@_U8HvkbJ`^4f$hv`$b0xb0MB^lyqU~j zqml9uw{AUs2PQyM7V{UxqgyvIe;wp3A3YJq=CAe4Un`lvkS$zq-9i}Rw}K7q&<+lm z%00<23ueQk^LM^q>STR`yC|>G6A@;3E z7j8GQ`yhTdqGdkuECl1nwA&Z7O^7h~FUD`IoEpyGOR+o7bNrOshue!(^|FjGW;?&_ zgj@wp7TzI4ZU77ZZEGl3I%oX2GoT%}daiHFq@2hW%HFz@^6w=a^8YQrdjDrL_icr3 zumg5MHtd6*D*7LDVIS){5A^3Swu2A+&~b@f!k(57DiqJjSaUYn_%fX+E{c5n^@!0Q7-qps>+{p7OWGyt&Z|aZ@V6n5W&%v(1 zjNA7N>-IIw?F+Euo!D_Y_B(+6s{L+=Gb+NU_kSEn_5P0&sm7vib#in2a#KUB`Mft;glM((J+rX{n zUNr`qi+N`sX(`i6GXArm$;H0p1>QUOBu1>1PnFRQHgJGyiI{suZw-U8@j>C{&yAetc4A*5!~|#0~zR^9L^T@#>r;%t>CXA?kzn3-o*27_QBim zGvnTY+yzbSkF$~cz=Ga3hkgB`IN6Uo586r3aUou8DU>sT=igq+e}Qmdox}LQl<^I^~G3{SL`(s<`oV{ZU_6H|!D_g_N!&;eajSQIn&}_4gfmZxlR9LB(xDle z?(_cd9o7@*R&2fv9OzDPY^Q7oDPs<0MRwyC{*tys`r#Cufpc&kF2W_a3|C-S@pYVB zM~2i#Y3!SS+&{m-dLREV+y$Rn1FHFVHSgKreh9h^^l6v?@eux*Rwg48VJalUESL>* zLCwGOkqcol4D(&mGGu58Yha`^li!j?u7wS-5!^}GKV+bL#$*3Wb+Q?KEByHW-+k6f zldyjS_K(f9)nosi*v0@hGK6g)9YMyYAm;<)8J{>0-^TqrARByq^R^G!L)-R3-yYfq z`l(Yd_|W}~2OX>}IxDdMV(fn}?a#Fz-~S^_$qede9AhwX<-s8+fFe+KuF4;Jg?9vS z4^!Xe$V#Y&S}Dd+bZCaL_cxO$54v?cWm`&F(4Fw(`+xUo|4G>Y9_AlVbF>v~ z=yqtYXP&Bwl3m=>4Ic2rDL4b?;5_tZZ!TA+eGV=;3g_j!>%@VnUm-e&(xm=mMy zRghh{T^HE@LN_??u>WPm%*QaKW^_#(n4Or z&rCWcoYR<~$<90L_+1B!jrlh*&&{9V=@Hd>1 zAVbfwSC!?mA3YBaK>-v&@$;#?JI(q5!X2DhBdehn>YxG4;QM5`G$RY^u{ls@b1kvh z=T7=?KK(ft``m(kLOW-G4(L#Od^o|L-w9plE^udY{>RwRvzPNf&LaE3!}(x8{_5<0 z>^;OeoGFr?1DxQ2(5pHLf6Mont9gEj`xFc>SAL5n^gX|4gZmtuhl^11+6po4=3D>g zXY&3x>kRyUc)##6`V|<4KGqo?=m#(Ozz^5Cb`!#I7w*9WcnIO|*njL^E}`YTBa==% zoXbQbCqO(*26KW=5|L9O8D>G#Qk_^9@ca|q${5xLzSq)ZK5jLpwS#)c!3Hf!Y0{dK zCT;W5#EP_~rimTdj@yCkCLe_(eB;k=1?XWmaV&(f_mYlNzl7^ukS2ZLf&L3=?CsLT z2mWB1bY!JT=gc%Yu{4cu1=6HzPMWyDJ%zXv@Ux`JV&Yo{D_|A4@pHM-WG%Yq8gU;> zlMUz_!GD{$!3+J6fuA{)CYzC4p$UB(atB!MP_7}$#cxOK!krE6t&|IFV28@c6me3{ zeO%iQ?m20ahdcx-pMCk1YfYLI;4T6m`SN>7n{s)P4f#vtH#?U~{hcLpeADw%w`Gxh zuxXJTyYrm9AG<(mcP^0kPCh3!Tjt5p^m$SpdRE>YoFi3R=EysDo{=ug6mbnbB_`rF zj$^-Gl^~s(gt2FNPVbaQ>km7&Y0*hJVa%-Yew&mkjeJ=8Va{^-=rr%@u8fc-%{XZe zjhD5wSL29AetUznogtp5@y(b0^dxB?z5L?FD}z<&6ua zlDMnk?|RWE{gE6=`a`Km|3jWT{}(B0olE~=jCeIdN*d>jDd7*qIOX@Hxa#-igY9Wj zM>~{!$~^X^PU?sw^h%l>FT;i;P2Sz}M^b%ou{7Wxekx7gOJ63ny(@TTyG&}b{#aV; zQ>CRWRjj?K(q>PkO;W|yn93P?D$h4l#rJiDn7OwZtl$7Abb|+ax1>tXrc|E2q)Ok; zRGz)0vMx*&KXv3ob{bNpgSegepD0UZ?oE}hoK)$??M9ksE*C%XoPslO4w~jI7Ym$6 zx2{~y+_+pWqF(|>?{dy9mvfG}oUv%RT*mJTIP)nlQhoP$9eESfcaUM^UGSV{3<7U2 zV~~sUN!nCBN4&?i2k;QWuP_#?xsbCY^}S{INZOo2aLf*CQkmy=e>fzey)k(XH%p+8`Ny7C2JagE2nX86Rw^I;kbyX#8ixT$m3F zVKFR&7Ur5(HIGyixqE^v&RBPwm6c>mn}Lil2Eq z@4+E=Koj~dWHwmHds`Oy4)HEO?)}h??qF|kyTkKoXz8SUUcve)j32=hPt zI6dnm18cy-)2!)UV?V`jDw>3EREVnpdM>d38)W?#V*LmGRjmJ*w|q6M|H_#ES@U(~ zvi{F!{g3Rz?P6{2hHh{&U*{1|5tyJH-1*G^_*bHPI*GfL-*`c<1@-Qp54X3Gw2*c9 znKe<;pd>W0mT5*>!GizTx#9@xf80(OJ6FtM?RT3!0Q->8GvovRVZPb+ApLL(&cHc1 z4;SGQTn6>-yZQz-%y;pH2l-zF_zi>cQ`Wci@0;i@q#GWs|Ji@8j-d?9zZUww7aM5g z8u)AIi<78l+E=~HXjl2Q&<^;utXV3pIZGu>e0Sj|VH*8g_a{}$?}m;HY$`~Q0O z|23RzuonpNO=akF&H#81aG15xY~q{?^I;(v`CY}u$YrnsR>4{b^V@~tuXulEy;e5h z-Uu166`Js~z&3R29LknLzgT)o|Bv+CVE=!g{Xh87{m?PQ{(mLEakz%PepbA6 zAzkV0|B>Cq>r7#9PCR*V2nxWBpKB4{AE0}RiF*qBAM|qYml3ytG?4vO@luJOc{_V% zWGytI*C89gl0&(&DA!5;*8uKjXg?hVlfE^DK1Tlh2MIrkIi4}(JYlb67Z;J2py>wt zf5xKA=vHi9{Z{A|^kHx;pln+x<0i_wT_e}=y9r^q3+mkM9&(ty`~#%_nFM)=40GP0 zdkMRP2@nsHArXcT@c%E^KZKs=eFWUeFbihGkLM?oc(#RmK7>Dtmxah>&@$!m_rGgc zi?cSjF<04{OWSi<-`Df|M;|4K?=AKv<5*uz<2!WjSp{oh1N4lj-#Fs=2hvl)p58$H zgAe@BagFbPZ}a^x(#f8`3*B{r?|%pQ{x`_)$q`2eYzF5^z864_ozLCDe$aQoF8EL1 zZ_*CF6xx8cXlmsfiFN#53}Mv!iu;h}WBl$FG7sAG=|?N6>z&j&ZPu1cpTh4D*9xEr zOi&J$5dMnad_mSi9Sl#5lLlmn-y<|5_46K|wUGvoAS;<*CDa2=Xz z`2LRZ`X;*7%Ne?h-~LCx3l8S*cGeB{dggGrhu;Hu2)a%5Pf)-0#j;QIFadWwOa|kJ z%sU^kUqT=IKH)0mKTY`uC_h-QQT|cNuWSUAtxP3sGR%V6Fc;>-LRbtv*mEy7-goWM z*<63k@67)djQ`k~`oBykzzJO=*#9W@kL>0?XBN*8h-U??g0-N|=3TkG^MJ0tDd~fa z=o#Q&O5a;R8pVtQV427B8~nF|viohw9bk@R9fY)#KO2ml`!Nn=(EZ<5V`tR1?A>0K}1U~l0<;Y5?hFU1O%6I>~Hx%YO`a0YVV1{O}f&-k;4IT)6 z%()P9_*wo(74i(6gY$3^F2QBE0>jY4Iesg&ffa0Ehjws42jfO3W5x-jlkuYq{fGCD zx*1z~*yHyuV(p!(&XQPvXjp$RR;X`){fq@p=Ev)#a}&aF7u+)$hw(SQlq4SPTYcN` z0RM;J$F_a=d7;0P-w{ivpF#LId$cUR*ZGF}cr%H$Pm<{Voc;(CARZ=zdN!Ac9HuU( zB9mbj%!auz9~Q!52(!*N-s1WH8}t#}!!L2>iChJ1VFRdVrW=tNuoC%AP4DI!li!e5%y@u!Sb0|NwWfE>L{<-+)Q+}j_=l9LrV+98|p&LBlhf~mV zgYu7%$5G0U?7vOkNY@8`=t!dc*C;>!PGlG1Tu3+WZlrUF-#8$ib8sFm!VkZnV0@V| zWhQY`PTw;;yXBf6w{IzFEFdl95B<+v!@CIdKZ|R+Tq4{RXhJv9e#7Wi%F;%D><1}h z&ZDxmBgf8hIG=YKumi%mkT;QGaHE&??PGt=~6T4>p z+r`?qdyqZKB)&T&j>WJH%*CuPkQQVUtU})a8=;MOt=AZTMj3yIw;dbeGcR#$0S?PHt~yeE=QI^_}d&PTXgH zA7Oq!#{3TMGUj&!`!HlL^zCJRwv#nSK5Gx854Rtwo~`JQ@y;b_sdrSJ$Zqg}A6jA~ zr8S@WOXnT2M&@S=^D}FdvF8)a)g8s`gf`q$|`CrYNQk}2z2WtAxD>bNo`Zbyj{CB}Wck0FyE``th8AJ2Nd z-tY4{@6XTokJsz{%X)BRvfDh}jAwFL-t(V3xQ7Sm|0Q(_2BQS+`hPpn$rjI|i{6dg zWqH8&d9+^p7ssTTmS&4*ok8m&?O)_RrTn_kfI@q7vhRDk^`?X?zr~LH zg>gIdX^3NP%g7m+iP{72e-|*&)0Odr+ayt^8Nl|7m_o;X}yr z^H-BckU)>}E=Bg*iy%!_7(d@e_Wi3ehRovxPU19TpWYer94_DzTIKQB{=P!bPM7~C zo9jWphWvf`4{?k@7p{xz`K@)8$XJb9v-0o0@rBF!Ci_Sj{~s-P z+4gAV7ic@L|9zwLqhV)>dYWB+kX^pDBs9@e9VMa6Ic8ucW@9eqV<8r!eK-HZem;W3 z{151w$2YKqk6;%61CG9JZw0pgvCDi7WQ%a7ng0Q8i1Y84N@qD%Vij6lXJ*)wVGTX| zlzSV*M?tUnKAUg7dlT+hB=<=0`u?nO~&y< zO~NV9q>)TIc9Y}Eu?|gLrrEjw@y#I{% z`e=N~pY%`NlgiG`*D6|`dV;@)-Sv|2h^oo!EgX?4;YF{W0HDijyEqY?N zcERh~8T5Ndsk57eliDV64&WALTU>|dwK?WA`Mu|B9}d0N;ply0aESBo`u{J|v~FSF z$37Yc(kuVk`Yd1Q9|qG)Q0V`c*6;cMgqyZ&gN(A?yJLo9BuX(B<1ii-?E^x)XWg+` z9w)PdpXHVE@grCb)DaVG5?93^UO888*d_ zo(!??zvqi3)|WI!=*=g?OuxMr>&?ybJ!T8fMefJy-xtl-rDw?2IeZ!P%FnsqZ<)j7 zyT~ddy6(F^GS}n!&KoPf_lfY&1D*&6FBgY5rw<7SCJqVTyImai-}q4Y?(`3ZednJF z-)SBe_73<+c;o7@uqQJje0$=@!tSXb3*Rb!I_z5ZbhzfZr`gX-Z$PUWyN97`eN%)TIbd8-+610xrjyKjh#dE zZw(6Hd%<`(RQ}6@*au$N8+Xe_Yp{+f3a=?I4!@=T@W1MQzYRaQ$etf|Km0IrKOC`W zXKlaV$oszub$jn>AK&Jqy&aOxH~Gqc<=gxfAMMT1+;bxw?e|M%>o3Bw`@i&Uy~D@w zPB=qIOYHsF_?8#?li@TpPyG$0K#n92+Fjhza7-{OOa(X=qj6l#w%#>3CT7 zWEVvL_x(`yqu+)cnK%4MlX)Hzc zS?wIM=63(El3az_QTAFR*CD}|(J;WZt#yrqTpQW6k-hav`*Qic_vhBTBzrz-Evgtc zS7aMmg&pYovh{h$J=hn+|6=Y4SvTiC|H}Q)aQ1#kpz-E?{bKh+6H=Jsm^e3T>j>q| z*$3f}a5XBv;U3Yx{$1tYyUM?Jz5jQ;|98FrciI2%hWzb!^=IGLKEEH1A&r&=@A^jH zRsOxJ{Cn4&aNoN0DjU_0Y{&$?b?yC-+35WA?1KBD6KQ%I^82N|+c{pm?;YL`S#i~) z?uR^i0=1Lxhm+)K)J%6@Pq{Cg5k7~e^Y4a~Yi#W2K2i7fgHZ3j8ju(x9!WGIg$w?3 z30KgIYq*X++(P?K_wRZ-_qzY2`_H(x4%b-k{&BSDU3v6f{%7}k_g&*t+`nsRbq#Hf zOD}p5?nvjP{B@6ffY!r~@!$SmcK^>v8w2Tsk$+0s&p8LWCcax3msUOMfqf$$zN1>_ zDRJCT)ZBPCG%R~JBvA1+WxoF6rkC8;0{6MzebPrduJ<$VhElTUmHQ9p|2<*fXyI`f zj|r&wg#Q0e6opCjzAwBRrjXMR*X}7JbDx)wKQ|!Epts788Gf>v^sG9%vs)h(eJ=8g zd^hr1H`%3buOB534rBj6^RS;Pj_qlbZ&K>7CV4scHGRMSw-Af56w9#^tI%=RH+WOt znx!m)oo`s3C}r}tOpuaFz?e*0mQ za5=VN2XK6lH<;?ihC z7I~b&N%Uz)R(!umAK`;=TDZ4DT|u711zbW}opFWS{*w{r5Bok5`<63z*%tI{jkbwn z`slZiKj=F^xA?B4_P;*Y`eJtKQxE%H8z)BpH~aco_3ty<4*v7nDf9o14-8xMm){Y- zhl2m_x1RkC_VsObyLvxMb_sXSdsw%}e%Yf-*ym%|=Tq5J(rp>Rwnpn^_W6192liX{ zM0))fsar4@waw-abeKOtk3RXh{?AbQaHRY%u04|52;7Q_kD{$L;HCfz31Q9ljIKU!XE6y0aPFV zKsZF!kk#Z7)Y21V3W>w=chYxXBY&%xQ}TXXL*qa7|FK`w{@;cy@;HH$IF0|m{=dTf zpYyH_kLUkryBGR@o|E64;~XyG3VLx3*U^V|&#?o4)V|i<&R^C3*UoQVqWw>%rPnf9 z-khraPjB-+ws;>~YrMat_P%idx1@Oo_hS0$_S~neKWP5n1L0nK2lxM9y$Alkfn@Ix z-<0xmFum&Y)))Aqu?qCoQ_gXj?LyDW`<-X?InYNU-|gKu`vy9E3&-tOF5Ob`V=)fn zF#(e>1$Bc;v_VVQpX_rq(vxVaXA|P+MfUX#_Vr!1^lkR_UiLNHy%lYllF-hs?^x#h zLl#}=Mh*l}nA(YV*``Ny`%@Lr_MCv}<_a+S?4Rj!C0Xf2+eQ)mK#&o~5`v+8xt@PGr&5?D-;xxYom7&g zVd`025LbPWeNSFN?E>TA$ZM!+mao=o(`{ti3-_UEqB;+a^R#7AH&y>0{zLz#|J=bn zJV5`&>In=+3I0R>r|&+G|IGh6R2suE5~b+-ynXD*%J1^q|C}Ge+?=@fM9*)`Hxs{Q zs&*FQob3t1vCN%BPQf&kA^Kio89alYmCxhYftmE#$d8i$@Mu2ATyZ^rZLSO1`&r*9 zxfn~a94oO3z5J4E$aUC&ieFo+;OD-@dcHhFpI&mW?`Xg82%8*Jj#_2GHnIwFPRMKb z^|_N<$gQ{4mEw0`7xrKu4xqAKe)^X8Q_TNAg8zRt|G#>(QN5WYn~-9^AAOGh|1JK1 z?Sns>U*{a{e(TVF=)7or03NN|pF7A0u-AY5e#rT%aRdp(xp|rM(x+#~_^21hA zt4tKvO?FL|rnqW#ex5vmTKY-yG-`Ic?wt>>`;7286t3I7G_G|YWZf|NZ>#5q1R9Zi zUHRYax#EKVT*4Lf;u@}_54X^+-QB_O+DT^lW4g$0;T)Nl$B!OX{wJ0HHOl`E<-d3J zyY;v2CnAm4lzVZ^=N8-(et`a8ktbvOukiz6Aie4ddHrUBtncp`-!6Ru;+U13@39+ku6pj@`-e&5`+lRXLrz0k3~K|-AY)xKlk7GA zXLbzB7jt7+fB$@PAr>Rn{}uZGV*S5VcsXMIzmi;q|ET`2Qx?=KV;UwaCuS)Z7AY6d zL{DkI7xqUSB)^UD{Yz&ZHX*JpS^4zS+Mk~PP0#8A$mpjW+aDXgia=pU`HEVbAR4W;{W9dub?@&RC4Yc(lIm7)+qY zG&_4r!zB6?-UfUt@x__9i_1tRs1WJZ(9HEtoGkUd8|kK8hlAM|C5FJ-#4}YP#FKK4}PiimSZJWq4gzW1eWRZrDu0b`*m%3`UVun{|a|+ zbuMyKjQ5VJjRh#D*F0;i1X+bzV+D4QyO3~=4aa?hr+kZKYO^wq+~c=>IDkW_#t|ft zLKL=iNu&=2pahk#mlDH2?1c{SvOA7kwZ1ZIRcb zj$2Vm=MH%f577UAa}ML$2a z`XuDFS#o&P|5!Jf4YE}`1%GP%@0R<&8~;7c|10^@tE>}LMz8pKQD~p#{iAb{_m3`m zH*y=54=9ZPtvB|d+qGtlJz)1_(CW81{&$9SW@0wxVm=mPG5Y>>bYcEO&sTgS!o9|x zFDF-G71kh*|6NCJz$TRA(fHqO^u%-Sjc=ujz5}UY>@Fn7u*06^dtv+5vwa&Duzy#u ze>bx&$-+FjW$Fa~*@ZpWheG>RyD_eRdqB8g|FVHQ@4jdMvwOP_zR&*cVgKS7()ZcF zY~BppH?EW4c2V4AW7nlqjUz}P{+7AT{-$T2kv1x|!>afQ^ZXLIG0s7Dx%RYUq7R`h z9*f$;%24tIYWNWztt)U+_%xbUNO!gST3`!e9=-o+?GLiY{DMj36!d+{aX)`1OryuOxRdi9 z`ub8Ue2@L)$3ec?Mb5WCo8p(nq0DhJFcZ1o6o=X5T+Byd9|7%!+%i5twBO*5XIDI0 zZ!GSGnmNxdbboE+QTB4dA1|C1?p3EQluq1-V=-CrC2Q)F%drxN-}ylJ{xE(T;|+e` ze*5@PSBYPPb=ZJS*#4Qn2yrcfa(ZUJ^U=4_E3KDSWy8hJ?a@c6{otOum+)(ndEp(7 z*@ZpWhXXi-U6-$ixc*P>XCe)ZMjdbfCA+BsXbU0%Zxar;-7hVQ;r8V;Nq8NRpi z&%?p>e;(d^Yh*Zd*Ls(}qunokG<>W1df2mic=-0)!^7UQL&F;{eb^e69}eGnduZ6U z;OF*j|9PmARsvN!M}^AW9}C;3e#{z}Bf^fSp0-Dyaf5zKW7qa!;h*;F`_KGg&!E2wb3saki|k(C_fFq;#`mqh%acb} z%WuB(V=t=z)I%+f&5Tn24KoI>SRb|YmLlD+I4mbKP zcHeal?dQTcf}83-ziq%KRKHjp%E=nCc205FMz8#t`ud0Z5U(mv-csjXR^GU_!u~$4 zx6U=yPgdTzuHWrH;&*j)^iftxV+VF&4}KE({mBD3gm%}`k#ub}?q3_fYrXnk+bT!q zg^#+%X4ib|xcf&-v-@}cR?nubp1mNAUhDiFA$xvp+y$9J8f~cfxPDbKj}thFzF*px zf;@xtZEFmYh5mfN~C?gbCaqUy;5!bGS&j3-$ClG{@2_3U-#;N#TMnvR=mc}PVcmqxnttkxwwbW z1A6~u@(McVu+?8=voB%0zrcn^9)tZ>f;;lnP;xj%q7-9MXrHJf8(vb6p;3KSXrIJ- zoDbpX)%V69l5rn^mUs9L(7MR@L;i_&c76vskwqMP(Cr%H*n`+VQGJ{b#CgVJ0w$rB z|Door^?&GzLC)>`)97VLsdJl?j@6+dFAr@mP zmZMI6UBB8n+|Qr#Kdf}jDy+deY``XzV;kDp6di1f!um6(*uQMr-&_Bznf=kh{&=1J zffntO3|f7kZNAU6KKd%@?7%MUL96;VgMIWz-_HU1A;dhLL#)HPkYf|YF%{K*JAzty zJwc{WLvNTU?3+sqSNu%7L4Jtq>Nk#%50U$a&qlxSmPh+}#yP1;?e(VTv@gg)eRp4Z zeVngYTO}XUC2MhlUinqe_e<6QqMybY^zzr8BYS?xe?#_tMP4AU#NTiY*U^Vt=>4XC zEwbmcMd2R#0R8`W-z_RW&c9BUU?_&8@8yAEBw32|bJ{4MpB~22$72Fo#bq#wo;@Z1 z$@f#}(~uX=33roSXZaQ5`aeH1w#WRxxc<-M@l0o})$jNjn2FpEKO1I~bJ6#-z5@L+ z^XY~AFPa_}($n7SVlp$r`yFI12z@zLqO%|S1YPJx4sm=^-S+20y*cd-NWAsD^6~kQ zB%6*uA5!<953Brl4c1`;HlZBbP=)q+W$M2&^dip89;)A+2$!eZic3at~_exvp8R3;To*poyMBBa$fef6eoK zAicXR9CA#?{PNZp%R)81@^kD4eOsNv*(I)Tj_aH5`kr-tW6DC^9QVJ#d5~Bk-ObW{ z-Tiks4~|G9ffUkcLl$|QK>ITJ51q)O3*E>ekE3t9|MTu2ajk49=l;(DT+`d_G*DC!TU`!k6e*kXr59TxW8vYwh=Z=oNPj*U^VtxPyCmfXDvo z0U`8X?mKE%K4CDu1i#n+CC@cZmjC3zCNd>F)G@;`5~Uc6aTt#YXn#k08=bFf-=a&~ zw;MTn9!D4Ozb)Z^Bh&Bbd%vqaj#heGk2qz>B>+#bI>vwhc57X#bX?5;4 z#+W_>aqqAky3w`Oy8q!Lp;|dI({Hm;EA6@DeALhz&hsl?w*HUsV#F~sDKx(2KFQuM zJs*00T4s%1&+@a@j&MxOU(3mrScRBB*O0OQv+t|VhjsK#==*%x!}SOGdtckkHy4ca z`jz{5)A%QA2bVi$8>+AayRZlQZ~%wUZcTy?bndr?!ES38bXdaxxwF z{~-1$$F*e({r|!n9Fs*JCvXy{aR%p5uzz>6fAQG!eXwfAFREwJ$chf8iAFNar3Np#Rrg0|ui6 zL-9Y~e^dS+!T!gwt?nN!$ef0D{?ZPGQ;%rq+>u#R5wvk!y~ zWT$kqd!@fq`V*y(JYwG8F5nWbpcmJ09eubJC40Z3vDo|{-=T1?_Q5^!0s61- zEh3Kj9!!>CD25}hEgaXF8%ZzpKaMdcfIb#^{(~I4^$B%7r~ZG=`0~m6xTmu{$in#l z?fMmw>Uc7obgps!Hy#rZ*M8WF*talAxP6iSeH8lHH?m1zVw)m2LO(l>))@a^Z~VWs z(#^*IuVvdJuFv0w8hz`ze^Ay-tCqbyg)BpLQlCE=_acaU5~PxBax8Q`GsP#wHAMTl z!}|5Ym4Bn2AENB1$F&7Bi%PO_+$Nk2-T=~LmLE@ zpR+H_&y6*R|3w;Y$Rdxr*V(_j<^R3f=^fhT=e5h($xTSzHa5U9Cvh5Q5a;5bBQM|* z+U27T`KS|FbfNo{{CQLU#L=DX-xt;YbJX=@%M$fJ*(%&d#_=mxq|=LQxQ^BrwV$;M z`smrM(w=3HTlyX3g>(CzW4CjW_rz5%GUt%&|9^OP{q&cTgHfYwXef4HZ}kr)!b1`F z{7VTpl1XiYE&M=RZ|E17U;5guU*MRYA4eNbpT@sg*H}3B%c3xr?0X`FafRRcUC7?A znb#56Pk&<^vT(XvTZYUe+4nWt{PbxkL+1agWyo8tF;IF=lbC-r@f9+S?Du3dAu3Rl>Va`LT!N1KQ8>e_vEb zsmHSD+O2LnsE*pp5C6LHZ?n{Y_`UsiZ>s;$hNC^~cN`nUen-m)_B&eXZ78fSP|SW8 zmqnMjZaiAwcaLj1;@$5f58x20V;mBwyJ`Fad%l64pSY~w|J3{DFCKQzXWggc(r7~# ztxMFuV_bg>t7|)Pf_@Tt^=l5@h`#>Q;;OYH&XDI&J3@Pgyo8z+4}Av-_3ahmUNqfx zJ~SeUI{&R_qc_Yl{=hfU$d*r%O}>#7dCh;`KR)2Pa35|V_oaTJeXIU&{obA1)i?Xu z|F0|CyOr@7eI?qYN2jy@dyM}<8ioGvcl3X=FWZnEAb(vnKK@g2{-ZowEPsi6G`=V9 znRCy%9-zYS{a1QESLIJ-!$5jr-OLxvr=gdiiJqDzf6bG>FjQRJn{zlh619i*E0ALm z#|PEdTZ7@S^2zl!xbEa-b0CqrsjYV3eSGTaFwU{#F#(e>1=CQ5j?H|Kh-+_mBCfsN zg*-j(b=JJsGw@8(b36ljW{GEjHsMyxkj6~R#$3$DLM+Boyx)FWF1!kBQ1u(n?3Gd4 z1w|qEFHhUQ=g-1A@f)xS<=BQQ?7%MU!9E;7TnF(GS&bt|#JGQP-((tXF)Yn28RrM) z$+-UCi5OO{pN!$J{n^9yS7Uwrtb4p~-EZ-a#+RI-pF_pVqkJFApZ%WA>z>g(&jxu^ zPaGXqI_3f{;R+%P_XOy4&?fXuvOe833f1JM4g z@dcBmGhI4l*HivC$`}A?y}$mQ=JztpKt+pZjQlV9h1tTD zUr=^_hK<-|EpXxa$hFA-JB>fuEB_;w)s5<-B%0`{x6BWB++r-na;(HEtid`I)}KEv z|09bo{NDQi-gh&OJSKS zqx`>C-<5EKx+bnMwTr$7g?&S}Ye($Yj=(-~)s8vg$nQRTQ)#Jjpo&P0N z)~QE*UzPj+GE}`iAyho|mti~oAIE$+?0ELWA>sHG{%Q9|!>&;u4c~m~!(r!B9}VAL z`{!ZL)jtnuzxBQGci}rXt^aU!ba-RdpNG98Mu+dYwgbwrtl#rEfu4_lIJ|jN{r>Tf zhC^dY!#^J{4F~s+3~xR+(!Ox|(Yl9+{ZogB?=}w)`(7U!zEeLm?8O`F|1#{EI>G$6 z31N5hgz&A&f@}(lq+7-yv4GgD+&)^*5`|tZyX}CbY zgq|PDXFuWpd&T|=UB>c@>%}!xyvYV8b1(Z3c?);Y_aXkjqS4_V{Q>&FBoAOPN-z|| zF%ny*Yp)MdKB)Ifg{$xC$J8!6Jh6ZH{@e23thJ%J-z(wRpjSeA%;wP2@9#rq*x!el z(rv?7|H*w)J7Kh?BYJF(h_;r4muZOy;+8;NyKahA%`(v{9$8_xvG9^4s`em4b znV5~an2&{MzpwqVQyslsS(#KPcc_~Y_bATSXkRR_|JxG#zmbo}-=Fmjqp<$)#-gxT zI!mz}E796rq>rX3tfFW2M|SFuTti=n{4i~gsoEeDwMDiTg$?4Wrx%4yWI1Z@7KLqO z6>8`W=ZmyEioy=zU1-{DT;n|LkhjdUTW|c~OWG%B*r79r^uyTYJs%v*v$Sy*Ym3sxw951bGs* zQwN08zOa5~O zy|{+!=)*1CL1Fwq8$bFU3+sOzeCUHH*a_mA7wCgT^er4i^gXut{@dPRgOKUzYzXN* zK>zwwU4kxyKSrX+iPUszw$L`7xezRDD*rL*Zn98eSanIf4s=L zRYSrA|DS}Oihf}V+51!dQe+t_epDRNr`QwpjQS(a0hmdjjk)Mlmt>Rb{|@y(a`b!+ z`;`6P{384G1@L`ae3EQJY61IqyE*jFc?QEmVf^bV|EU#Mv)A~0dZLH@YYhH6`Ua%l@n3Pt z+tR`&ap~>G|3~}(Y44O?E^Zq#yV?FbjsG{+ze;!qaufL_@M!#h9Xq*0-&{^#H4)A^Jl~6+?|IL+ z+4Dtfz31zH)yJJjI%%{ai&}9tj?L2({@>vLC+H^;uPN@$nC$0T{dQX18Jxof6xt(` zeNW2$OTt&siwfUN4wawQ4*5+8*Twar?zlGi+4uZ^+VzF~uO{=6$rmZ|mSgVV9v-0o zDs>G8qXg}~?GE`b=DjSsc0aVu3;S=BeaW5YdZ}0+enmREL!)$b6e^a*GZPR(O~kjbmYHjAs) z=9ofGL+x383FHjaJf+`&d~M8o-=A-=uSgs14P%|f&qeRQ+gFH;v-x5F>IV8Il%rEyJBzNnzK^rs1@hSDw`%#Yirj(PZu>xxdr*^! zLtat$#_(T%FeJzna>|gtPb(wn zZK(K+aHH`lQ`HYB_^0dD59-dx`_mt`e$G4U7j&Ldzr3Y>xvYLUu6{v&x_$Sg^?v&_ zE1bs(T=TuusW0l$FpD1=jf>Q8cr*^JE~);@sQ*~{rCse&0Xr{SU2aM)xVFUTlp7oOdXqE!M~vX%`D?zKpY>C zp3F9vPA{(EItupxGs<21Bm4gr{SIQABIg-(H$Sxh?}@8kYpes=|7GuQgf-g9!KhjG zJNv&xcqroDg>etVg8hG6`~Mm3|H@ot4SMBoUeOi&ZT}R%>VJ;qyK%NxWOnvEC+^?HDqnlabJ2RRBmzI zuA$+E@8hcPqsRC04*O8qk-|vF_plQxipoQrKhXPce-=t(+^37eShDX8V{?29agVe( zR%M*H@t6>Q`=$K7^VQI?_f>72S3|buRX(j(l~1oKpI%iyy&8_Lcr`Syel;B1_-aV6 zeKoW!dX;be)zIp=w#=)cZo#Xee%Y&`0SPoBi6*2l$$5^r_bCzja7;rPW}r@*^=Lo> zjl0Wz|KR{>v?Q*T<%-e@>ue_`>GPR15Gb@E+{-F$JWv7=Q)m^*eu@j zPI}%=cfH@6>T7wb>NDyD|J{RqIDkW_#_z2^!%q04@jv_u%?H{4WO@$!e}QjviTa6* z>(8VwZw^PKlRye-wBFbpGH9b`7x53wi(}i|dt|>dVz>4$8TYa{A+CD==5UfcjoSLn z;S6~WHS~sI+V-RDe=B?mP3N`kFKcHE(AFofi0j2QT*u?_nN!up!nY9DAi76BK>zbj7g^Ve94?Bay%wr66*Lq>iO#% zYFuZx>mrlFac**~XJg;_6vs_N8D?N6W@9eei;e$yitiDR=U1Qizsu@(@~Cp48OJty z)@wcgMV>!er+WUA#XV=u9_cK^Vl2g@@k7h$vCZCzRrEFZ!}BrU`rG4Yx zGHm0x*J0a52g={?UJU{Uc z-NyyLUqWlW_5|6BxDH`qUWoF)l5e+ai0^_P=Z(d=cinh=|BKu!+q|&<-)hf3BmdXy z|Ir6@O1AJS++Tw}a-x9<4f%ICeI!!yOOv?ddU*w<;>Kbe#$y5|VG5?i@(s$E8%D)7)C-DEO4^EPOWZ&21&#!uZKOYjRmC2{Y*KGViI76O8?ONq7c?oeYMFWy|wIOc$ zw%*Z(Alv*l6IalSYq*X++(O*~_VFS%C|fn5U%N5M_T0+`)o-1;&3@R;zQr-5@z}m? zX5XS+T*pE7vHwInyi31)Hy*9emmc(FxZ^zc@Bsb4E)Pv+BM;zPpl8?nF9y>~kYB*} zfbJC!$J!4SS3So*h2%)ou6;6;l4DWxg6p2>x>4~%ZACOa<+{;0#(ki!#{Idc1~Snh z{<8Sv?hkRTfh{*4`f*?Ltj0;RF#o>Ad|LVhw4d_+5!YDBo_){0^Ne%*z5l)5|8~#E zyMMg?oA;g(F1&xorJs8;Op?wNOhXx3UvU5KaRxmruXYO0q|ZiPI42KxlU>cm6^pBW z$^3D0A!-La85WaEQR6-vgcI&zx$sIfUG?sT8*ghzlE0gOuWihq)p$_bnruQUquuU5 zYp@O*unFbZhAOmcSN_k|cR2bs`@e?$&z?`Cg`P>u-`ec8^?A=0d!2|oglbg%i~9bvMIqadeesg@b;Ko*ALKoYd)$Ay!hMKOqYYWK zip#9$YoMRNNt{OY>+%j+v)}wD`Z;=qe$fkL!*=Dv4duh#n1_@P^wd@3iv4y4xz7&> z6<-_>dg<429euckJGh4jc>nmB{%e(4A2Yw_D^J@06Uwo{;z}?S!%7KJBZ5~iSaqHkm(gb+-@`V%uM9;O28x@B*_exc zu5muO5Q|Z_Q~j)dYdENWx~hJbHuE2J@E^=#OQU5G``Y-wR>b{vAMgJ& zRsD>@{y)#EH%F+S5!e4;>O9M_601?=o4Xa~f(iR3VOo8^odYDZ+4WU zNi?a)Q`jVpa%@8telSLzIk{i>KHv7?r;5Vk`JdX8#6JuB=yA?NoWpp4eh8^Wu1#FrqcP55{O8x7 z2#1aj2ydPm6b|Bh=Ik8k9-{Bc8jc;s;X8-dhJ7!t4R5@-KJ0B?8@_#by)_)yhi_fo z7P`UQ+Le>7i z3l)R@N7%ml@51-^Uk<2OQhrOL4L#qozTqPMYft=n`1YU@W0yV{vf}&x>Z9g{xi7r2 z`GaBaxqhX2M{UU?_&8 zcZ>PRWY71Fxg-0G9UM!J!+1=y4iORp{sRpbt| zt|$$=$UVr2@6^AV#XjK!$UURqUY~x~ME&@vJFI^m4M?C7Ni-paL;h2ZBS;{HG}@3w zd(!xTjPvN*&yroj-3yElnCCom^lwU|8OM-DOO5db^~M+I`)}K;|NmL*=4+eAF^xW! zke6nhHy`KGpP(mJj1CPb%>VnXx^uPrU*`VTyMLT?OnU$5aGE@WR^`As@&Yo=qeJH` z}mU(|@Kmv^yy+1Ua^8SvOh6k>( z|2laEgHeK^7>Pcz*s(_3wT4?`>s{Z-1KK zdVZ~bBYR)zSJ;bVD}PrUk3U0PVf}$07KNGg*_ey@SctxF>U*4}{f~wv>UZT><3{PD zsayIPZIAB{2#X!N6w8t8DheyfRak@e=RAM!vD16alF_HvectoG?D@a#`M<6Ge_s3l zvOY+%ML5>Yt)o1DvZq4-|I4MJ*VuucFI(SVnj5eQ<=BQQ?7%L(6Wjg2wx7ZmM~6Ma z>Al8SkOy!G)oAT-j%N2mf3*H`VSh%xjQnob=6)ZozdR-WXP!P;@(5~Uzc1N_n)Uql zJN4h{@68J5(IlKwA2*Uo_WzdGjSa+W`_=#I&l8S$)PHc2ei~=+AI<-(dqMp>=V2ZH zN9zx)__J`%e=pz?uAmp!a2Be&vV`{L+Z{QqzB|C8y9+9#*9 zPtfYvHdJ4>=9+Zw;2s{J_A2}Twzby2A^#6@ZF~m<>4Wh{^Z)qR3*#%4A0>Vtia7r- z`u~R0D}Ti9*ZvquFU44l!+1Y7!4TZ7uo{Q2Yt)U21U`uPsANw^$M7v(E7 zB8m7xZ$%v6u+8sr4Z$sfqw7Q?J&8y8Ki6nYM8{pjbv*V@*2{;&?ejeUMV|jM`5#>) z^m|ITXVY#~+U*AB{gQrH{op#CMoo;+>J*+5yA?PeX%nVyFJZtJdigo`IQ|jaG4)+fRnM^z32wyT0%F z^o7XRu;1C)-OcQGvThjr`x$mBJ2*i$PG^6UO~NU%U?0BM{#oq*OR*dk`eIg+?XIZ< zom2T2&_(Y??!JEh+vXoEVSh{KnDpXagDuj}jM4v2ZyTZCw5BAil1}9(J&P||Ka*Z7 zuEsq2b@W7!IrRHW!Up;#q^_EWaM#@Vo957y<>J!s*sGANLhEjOw3EA#x$3&yXV!i0 z5#EPK^8&l}yH3=(j(XQt*neTF`*-ch1@7N<$MF#d{O1s=aRdpZkVYHYZ~Ok_?@naV zg}Ap%T;nm1$NMh~a{p-QQU9YAZR)4$WO2w!Cyx_2iQ4nl-)BdhrYG(jyK%cXoS~mX z>KSPdRf(I`{AZ75YE>|DE;<1|#>W0ilE(is9(}oc2B0 z^GS09$ZC0NEIAJ2F#)y8)L8aSqQ|+f6Y5xZJsZ^F#mtZfUun2DlYn%R?@RK^bKPbeGT%S zOHO&)%@5E;7V01NWntgdZuL4lwXnYqnHn)L%=Dji*nmwa$2L@92in=$9qQZ8V)gF; z_3tS4FPTGrGXJ|ewi(CRR0PqM~q)>}eSg zE|8aSCC1|#uA>jPa3|{eneLGfkiKDUo(;Yi3`Pm!z6Y5e>+sOCJM{rzIDI7YFUXtf zi#P_mi>!M_`^UXDkdOO6=CSc-dH&i)TYSTh+83q%KNc0Al7=yW{tom=+y}@}lp&Z*#$iG+hd(j`|-yQT_NR4psPq_xN zag6rMOWqF(<1_t!0EbYGIFGpEWxmTd`h_FHz50L>WD1pE_T2y4zU1^aWKmIJ&M$ca zC();GAoucTLyvm?v~cg&tVc|q!v$PI`m8a-WG}AaI%3~WT>ra|p6&K+;1>N3^6RCE zNBc+Cbx#lV<`XxJcs|6r=8Y4d56OAYhbE-vn0N0#?;qcDPxt})|GjS>?dG+2Tz)=u zlG#~hzUealee>(-`4`IcznQ;`W71Be<)-<|Xf?mR?S{7FZhM+br&4)Z^&?|0>9uE{ zxBmC@VJJOu-?d>leI!zoT^r&W1&t^bmp)h)#**XET2mIrlM|3hmW9rj%G7UVVUq9^ zm@IhdTFKj|L>JiW47yYm#mXh5rAF z5H?Ax9NSQZ9oU8T&7OZh&mUQIq1$=^xrxsk=lHzx*YjWR`8zI6wk-1eU-bOF+cvUp zyZp6R{z9V0H?0ksJntJnD}Q5;bL_(b! z6V8)IGwfe!9^13sOYm(X-mxq(K zVT*F|QTcnt|9g>Z9}upSeQ1A2`HN0%uIzSY`ayYXt9OljM*C|P`)dLFYl-rg{KNGh z`#xfiH|Z4SKm2q+xI?e_vigPW`|(``#0tb=U&#QOBVXS_2(?6FU4}KL|upRe`wHt zNN77W4pO&kH#AK&{_oj=p+fs?mE+c69X4PS%CQaYr}RIe^CJ8AtTxSEZ5!nFYyaWs z677#M+8;kiTL40=nNZ ze&M!p4dRX&ehhlG3n_W=F>hcX|7QG%gp*Z0Df$1n{Ga)Q@^68<+531j{wnUXQ1Jymh9UZz zq*w9XA)%byhAQm9F6_ZRMBnBX^nKi%W%7WyL#RgXM?=E)7W;*L$lPgd-`?=i(8CXv z@LTVfwFf>cPtnt;3{QkAK8Ezc;xI+H4O!&T%Kpn_io=N*ch}m5H|_C8KOKJ`!M4_~ z-0heyoDo-TeE&J}0%~^`hfCxY)c9Y6bQ0b6coV*cCN^z~ZQJO$B@8ef1M@;G|@57yrk&Y%@-NWW7O21_Tm zrC%r^k4U?<$1&oE3TFqf;W3gv9C>;U-TW3^DD1!cg7@(^_A$^GS@|pD>;FyPk7LJT z94fx9kCvQ(o}cOOCwsr(xs%l|m4s2VWf4`E3K@{P|5} zIpX;E!u)yl$u{9QhCGh_iDQ5ol@&=GIy@x&^TrQ`gD-tBym|JCaG?3Wh3~EZZ(;x0 z&xG&x{MWE=^?wcDS@B=>^_7J;mOUT#tbIOwd-Alfd+mP@-`e=!!2%1im_2cGxg%TI>yckt!Z_YeF0Uh$)%(0Z7^ z;8(_{{k%xIZVeCd-D{Oe%9=boB}YFb?xeCT`U*OgMXy~p|FGXQb0gTSKQf-?sj^Tt zqRjlmX<_@4{~rEPyJ^QWlR~xs9l<|!Ob)w-O$pzeJSptlI!S-(l(6UGppg6Vh>-C6 zcbbhCnpYCu*f==sT{bv;Z$L>npgo@QTN-WPo(OMhCuQk9U+|rhac=(!@+3~<49=nX zh2i?JhKF?h^?t|t{i0u{-!J>MJaxTa-#4EO7yMrTmOiw8PxGvTMt*%X z(NoJjOaHxsUR*=L-fs53a9z0l^#P#+od@+1=qKpvFgFZ2Hf$b8uMP;!H_X2y)A#NF zM$fPI|qiL!o!iH=hyO6@FR57bFMXyN7vm+&!Qb2 zc(hMl%VyWT-hFIzA2>S9eTi#Y;yK0V_M&T|H`JJOLDqGDFw`eM7)Cm0DaK+P#$y5| zVG0WSD`@vLJg5CLOZ#Vz_K)||)Xe_JG{=--24-S5=3+h;qJ4q+8OBI-lG(M|B-#$$ zH`PDL<7krozmxs1os&Lj{3AW%xK?e4HtmS&nxfwu|F~G1wO5Nm&09rbDLpaEJ-x)Q zL|=te#yyKmqEWw5S9(e?i->_A-qeiyk1`_LsH$2=bMcHHyp5B-5HyR|p)XuQ!;dNYn8 zjRVp+glZf?0&(5I6xpjz$o-9ePkI~r{@pVs^EiQ%sD51;^4!BVUF;h;Ev|N_`3pEh zPYiG`I7h#P6uk+L{C|}n_xyf5DqQhfFRtM_`Xc82-XeQmwmu=*`;CF&9{B+MH~Zd^ zR<;Z#OE46}(b}wy(V@?Y9>=7_KIl^VSmbYOccS}6bpWd8*)Lvt(P0XG8d8(>?IO8-bZEqu)$g@u-Fx3}M}#uR%)m^{M&*y>Tkmi#eLfbV z=TlFI#bobK^dBenf4}V<7xjOimFCUSeCwk_)AZ3HHQCzozYJliW0zwkR$&d+VFTLF z>i@s2|Nn;m|Ev1{d-VS!hdhp&uirdxbU21IT5jt9M=RP;r$4`br}Mj)L|oH=ObR!3 zIDckz*yJ4L*oG?Xz%J~;KD7H!M^Zn3&FGMg&s5tkKHI|jF4}}g&pJPjA&nMkX1Yg* zReu}il8e#%HGKkPEXS6Uab1y>P0` zBYqn_Td!_8%tof~Kwce^LwAk3M%~iWGBE5C-~0DPVGp^*eHX@a4zo_Oa3TMHSvgBT zglZf?-;kn^h+*lc$TZrJMO?$bMtq(g*CcFkJtycVks8JRMe-T`hv(SN;^Mf&Gvqn6 zZXOseke86r-_XhakK_KX2=^lH9h6ts7RCYcG1N`xf8XjH>c_-hX?92(kH-I9^PlVJ z!!6uFVf}0EpDi8Mp79Jix;>+ey5Udd|I6xM&+3@t((bDTab7?xpF*4SRTo>+O_~qT z-!!7Q?$o34e=#2?mOZrpE1yt~@)yK43F96HN%ePJ3wf~rqahy-* zTHobH<-hP)D?Yl`mG@I&4BYaz7gqwvkn+{IY!whUkAhF8{x+oOnz6%7VsI?%#D3 z#us@`I~=h_rgu^$9YCG}@3wMV&PV$P+k;+|iP7nv89OGvqn6Deo?jm(csyAzUHj z_=jGykMH<8S@rR_KVV5H%*Pnx`8=b(7N35*IBX!V;TGvWfdwy#zdvYj-V0Z1=Vwk!!)uC zGcXgioAp1xV4nwiVgJ3Y>|gqPq+VzL3Ma|9R$r|D(v$U@zE}UX>hrJoDW9WbGsXaP zp5+%np&b2bESK3^EBFc6{@oj`|EE8{{)Rq&efbGAvZa$~LJEtevlPp*605KV>#zas z?9h%L&j?v`UG;3<@r?BC=h^2+=NW&1V~FDfTJE!19n*Tl_ye+AKR>`eadF*;1LPsJ-qoffk05jW|D){wW30UDMgOlY zgV;-vawyUsO0hwLT*^g)e32j>8*GqZ!y!nJAi)L+_F{t#5^N{}Wtg#tau5*_DF<;n z!_4fN*)weRo*#Sm-V!9(AVGoz376nx4nZ!GIUM952kCrYYg72Pdhb2=`^P(<&$IUP zto^L#dDi-@&-&@Rte5|l4@t)Rv!_>nvP^r5@d4fL@1Fa+EPq~ie`K6T82kAO z`Y*S-ulwIbYYt@dbJHbK32``|&VQ*klxb#O9L^#{ZYoXJQt5|6=UF@&9w^X=V6aa)A6f zpInISAZ5%W)+?ary0wizsXjnohT>^?;HvsRVq5uf>6@Av=L^Of}*e6at2uiy8h z8gc)K0uIuXDD^+QrTu`OMjLXd+xLF@*yyCRG)ik}e#2SmJE;!hoTE676F7x4IEM>p zSEhHwe1;t2o~n8I@uB}6pU<}U{qN<<|MlJt8OLa}k?9HA@Ws=EE4YU2JavRI)*qnf zJcmwp{wDo4iU-8q;XhiV4Oe#%R;^CZNA`Qevm0itA~_JH^#^81*C6A>91le^o0Gb% zp6k4(TkiX;_j1vDI^sQ{FsUpQ(RIUn!l5SbHDeqH*;?bh?(<%6dC&CBF7M`L?G|{j zj!`o`g+?@CV~4d0_`+{!kBfadmH+UUL~Ji8d{!QDKXuF5-!<&-dijfNd`14sXy4E4 zrz1x>XEerQJSJi?reZqU=e^(ms4pRht|9XINbwJo?;lbB;DdZ{zn{{6K(GFP{g371 zn2A_s%px<_`-M5=Tx4gckB|$Idr}#fP_{jyj3bN6fdaYMZ%a|}%X^8!FaDBPMqh!I z=w*jik+J`OHQ8hRinV0S`~D^*X?rs=pE2a!bWE$w_`JimB++Ot)sNz>a9>rBm#n~r$aWRR{TI74=S;V+4EEGsN?>%BxTJ@-;yqVxQFiJSC_ zfALOU?n@Me<=6Bjx?bx`)ZKe8Q9tIrM8kyl5=pXg#(Rk-ve|KpEIg}C(6y!U_u2yP z()-YFvoa6uoB9$R=-ktn$f4^^acBHTYhR++)R$;k@?N6#a$n-m^}a;9yDxE=o}p*Y z_a)j;UDKBsD4xL>is7ggRwIrP^yD+{O}wM%qmdft-h?$xaj(U=V!GpUl=g*D26XLsFOf&xNa>f>25C)7dn4I2&wn6=nXZ|I zIhc$2Sct_~iuUu$A9P+<{&b5Mc@$7Y%OvS{pNA6fCDJ%7&u5eYS+eb_xv#4_;| zzO0{>T!q@x_YyV2SJRXC?j;&--Ak;cuR}^byqTSEI(skCh>dsTZ@lqh2wT4M$*{pW z6~B|$e_Iwd(<{Hget*SS8v0gj$4=}A;o z{^&o=F#gbWaooyGau()bF1o+$+<&p=>37Q5eEX0HE6wBny?Qr2&aKbzPZ!g3?0e@U z=J?TW_pVuqRalL+SceVRjP^DB@AdwJ z%$-!fILmIaX9cq8xP|R)75AZi{BN?f{vEp+ZEPE=M{2(q_Qyn(xVBMKg=GiqKl#5jcB#O86miIh0f zsO-L#s2cL`iHd3eGqGjc|4jVjYyY0u+I=Gt*PqMz{h#izMaq=7X8c=X+e`nJ_~GpL z5q1I!!7+PBdr_goO8H+_Qx*o$2co#EO#5RQ z-$@+_gN4Pt>xPoUQ9Df=Avp>)!W(Xsg(OBhj(Zt4pDqh2GPUN3*9H2(kDL1Cif9`%RGWcOc;>D|n>#B+Y7Z-^|%OvJqhXOS6l4mlUujB%Fa zLgb!TM$T3?l3k0Gjby=b^$U-NHfbK~oW)p*+V$4(6uyj}eDl%Ju<6mTg1!c4AEOI-6i`H6lm36ca06dB$v18! zoA}Am$4y~_Yd50`Td`5OTPmOO`mPCI?DBMv$z?f#Qw z3UT~@nruU+Tf0C;y7O>cM1k#(^YR{!{~z-H{D)`6A-$#j7ZynW66x1wP*KOvcHMED zz$u);Ib1+HyWhc|??h>R#|-j%E!*X5Nz zmW6?i2V*FPV+2NFG)nU_++%4S$}!%{1nnaG?2|}l9CzluSI@b<#(Sn0CU~#Iyl3w? zKW13W=_tocl-70d4(g71&KEt`eV(i5+gRf}+a-LE`o}P~5y=->vz8CGB=R$(;?pDGLMNLAp_`$ha(-f;ZJa_wL2d8z&D z73;d~Ggd=*6*9ZjJ>JwVgUUMfu-|(hOZYC*758q7V<@YYmjj%;7F)3$wTtw>D<5~# zlgh{j?56KUYLl{gk1~3PvYOm4EcPK(lLwKdC*ygDeF&XwdR&(`?YIpE$3@3o8Fc`% z?lS-Ty7mP!d5`@kAGR+najk2LIEv#qfm1kxb9lJ^&pm$ZE%yI9`_FzCUS$8tmND%A z1oody&tU(_jN>d>TL0&j53m1oL7cV1YH*33yvqJ}^Uvr7ZK^59&5oPMM*i^?;c?z$ zX?}rm`C0G(ny_AEw9RzB=(`wW{GH?5DA0=&d?%B9CunKXK7363@DXhRI84u+6weL$ z6YUB48=c7EulH}eE6zUjt5RRYZuxH@IT%Ax_oC;k{iH$KlT$oj`M>G1{w<_p{y*D4 z40qlLjKXNt_UAv!4`b=cm*frK?|Aw|q+XUcggsdQXR@%Vn2vJHM7Q-SW|5`wjzg3W zh~pi5|Mbx?NBCUKN5LGfieK`f>FI9zFVuhd^Gh8sLzbOdL9WDu@jCg8b}RMzA{qBM zSmn3w=wFjPU-O-RTYozHxz_PIY`|tzVJo&{Cw8OvyZZm+fqHqNL7HM(8&69cni0o8 z?{(gORO29$NFj|jw7=~Apz{&$2M_1}(Tn}P-^}~t|H!yj;NjQ2Uu5ZRFKPe(rElvU zb^mtl0Iz@aetXuG`}Oaf_=`A-!tY0h6XYrMe%-VDgYlvC*!OvkynsvSK^y}e_ddBo ze=vWc@=KnJ{`QpjAKQSqccJka>EELNdxw5vB=tu(>X&XhtN;77e&S>1w~ON%dT|rC zaTk5)_kHPqN&h=KkwX{qD4>WIeci48=uaglBaUHc5KpqdaRl$E z?`VH&9w}Xp8_A|&`oE>QUYZ+_d{_8+;b^9(jy(OL^?#;{v(nlYRo}6;3cVcv>H0q} z{2%&%?#T1*Z>D%=VGiaZD_t4sn@`WZ>D$_59{~Dd6de~F$2opoJGAQxTZZa2_HiI< zo_sp2Bv&Cmi-q3PYI^dBKJItDcQWoV5a$-hImT=KwhkMx8C9rw!rK1icI?D%?8Sam zBZ(9W-@fmYl;*#@X0A&pyTAY4clg?V?|sx53$nY>*a))cU!M*|@+gku1Ww@$&fx-j zzhb^1*@G)lzQd0uHo8_5FUKiB`Xtv4=I91}4aQ!yRon2Fx+oByr; z)cuvG!z{-=U$K`2IT!P>5Y;azKgp$7h83t4R-;^7Nl(h-4YTd5MqiE8IB65sG(}pa zyR^S9+t9!lPQJoEXlpL*uS-uYH*d`~Yq1U+@WK9{&5qkIzrQ~49kv=>SJ`#MF#<*Y zcFRci*Exr-v%lowd+e`1ge=;SKJVP$Xde>SR%}Psd2#={o%Gx@>JZK;jsN?ua(|5T zr}>Xb{)^o0oNE8Im)wsycA%O(h|>Odev4!1la5npUN2oQNhcZm(Ce<7f1sYyAig9T zcL+x_Qb@a|4LKBX6vuG_r_jDd`nS19GVWE_MUQ&}71VD^@BfDM;}Fuvr2mMtA&WMo zJCx(%Ifo0lgzOOKJjo`|b8L1euF$U`uIXPu-Z@=k^piZVpFsFc)Xx5BsV=@zUaao6 zH{vk+=?IHyje8z8qvEsXqRBV$T{occYwO>;Hm)PlM`q5O|N4*e6SC}In_kh}hmene6$L|Nb@~(zn@!W;QtG zo2?ruKlb;{A}Oq*?$I#S`O{I3Lf4=$lbnS)=z39Jme&i2^NrfCY7a!`689|Z@Vm+a zWY25sL(7xwF%Hqwm@AH+f3e0l8P^Y7NXE9WO^AKviybe;GOWZZ^m=Bi$+f7y*e|Rj zEB_GZ9oT=IUc1Nmza7Rc(v$r022|0vBKE1qw8k-yjo2=1Cw5~m_M;jHQQH5@8VKEQ z`8FNL{eMzq8g0l$T^+7S9>sB-KxzKp9_=gi2lM~V&@bTO`l0)jRjBLFFX9(AjO2d~ z;}`Kco6yWhPGN;>-Z;s}=PPgIGoN#O36yubeca%1@&568p)|MmRGHTwVgne|ir$0X@T;{tKN;6D)8 z+8^PXQ5cP}7>|jVjHxK4|JGlne~0!B6wxA`t(T?WbBSvu{7wIV$A|Ynm@dxRW702e z<@BU>fw=zvOnQNT@v#3t-s?1dz{2C0pA7pmiyr5g%_S?p>;0GmQk2J8AFO}3j9&cow6KC~*djk(_s!n%?c%}ucRgR#w*3pf zD_gbFd8@D*Yq1U+uo+d@ir!DhIq$|Z(7V6r*=OYWm(3AL$n%egn~Y<8Qj`1#J+2G1 z)498`7yD6-gGi#|vTqPMbRv%~6j4AcTi2p|Nsp1|kr~GZ3yWh9vM7!JQO-5olDz?xHCb?6BwHq^|4rfFIVVjX zcKpHhe>Mt_WB%g)r@i(XsBAIj{&(6v#oeoIptS#=wt;hw<9vtb-CJU6j`UahyNkUdc9LeQ0<=8N)s_l1(qN6=aIt==YX< zg@LF(Z7l>cu1#0VW)7tf#|P&Jk8nH+qY?Yx$CBew`6Xqba&sbmGNxiW>gw5FH0aAu zBF^b=LUTetvNrf~=gh<`%)wmD$3iSd`-IqUZH$BQ47vBmG~|u1D4@8{_)G2gtxNP{ zJ1;#(zx53L)?^lK!-Pr8Qt>Rq3amtI!-)I;t)l1lu)n*kKS*DTBD-3+!~Wi2f5~;i zs@E99KyF4Yn_5L~MQQ)P)1K=^V;LOpM04JAt?``6rc7-Au>Suob#vcMl5F(dH2F@N zeHSTmqcIwrPWnEMsb7EHclBNEl;Wvs^sVTdYB!#v6qWP+4m3ih-;Ci z^2R8mX^VXT<8Ru#k~rhqbGU#@=)n~j=NvlmU?0HZMb8Lv4qg}19rpce4WadbXVE0h zOQanc&*w0%iK7=caT}Yau=|6wWzyfE7aC`TK6<}x#@K#yMi@vAM&;)|AF6(#Tv6X2 z>i9SAaX2{wSz#HBqUXlRe`EC1(Z`}V()}U-jQjIj&u9N1?S6d!7Ww$|_IY;BL`=q1 zR6J`g{m;x(rhA7PhW_|=>3tg^JFY9mXUF6;0p4M ztL4^QDi0fPl!xj^o(tO(p9%wnuf!^p_MaENnx1_5xzMouxv-YL4yh&2slPlInqGP? zH2UqIU;13wzvgq{?IWKJ`!dtQPiv-Fv&P)7*FF>WygDiT_?Zb|_m)ZFN3T2^b}fH4 z{P49;hn+PO!Vl(sD(s-Q4thMa^nX00%}YA8;BjpuPlUsnC&C8*wHZ~|itX5mpP&A` zaaxJ+vo+K7e|4Rx>R|2nB315KwN zyWjtHc1UPG|Cn)XW5VVMW5Ql>>__F1XG7JH387--xUgl|xbTnTCWNi-yV`FD@lPW^ z6Shn1TjxI$wvC(=e)!Uqu=C`U_#JOd;m0pc3%mDxF8t`g=fbY_p2tfw!ruPYdGK2s zawy_$Wz|vgI8NXc&S1B1?HqXlmr%FkIqfsv!S&}tvfDF9(^c=_^mE#$%XX8MSr%%J%m||#k47zhEIA&@88fuy%m_^rW`sth zhRq1g$iA$<+wYT+p-&~#v#kqAmLu*ZR77cC;v?z^chnUy({Hm-H^KTt#v7E@A8JxA zWsE<_YbPR8=e2Jr3m)`u%yIr)%tw{AVk+M*3+>9j4s=dY7An&|H2(2jbr1K{ibKyU zD{y$4vK84O>L6GwuAa|67nYLUjn9Q;Wbf&S&XKO>Q{Ks|6j+?tY_=u82jW5c4-W|w1oX%z)qMo1HyMuQ0x#6zN9|%Qy#-r%~@_Av?u6 zo>#x^(mzuAF@Qc0#pTlcn)I)iehd~?y+B=z9FDkl@(6MiV%tC*JDJ32$79hvL;9cb zo~KDC>e%Lb-$TPV>u;fPn(rUY^c2RsW+EnII?6Ejq*lKG^;@ z)N%Vh{(lqyw}$`E-|yo4=a=yRXY>D`=l`GN|DNUl@@Gr?--$Dm=l_#!uB#?v9v&{P z5g3KqLHyrGJ|0HXlSBBw^wRtnW3W=Mxwlu{V}k!He5`ZEVbrEmY?{kg}wm-(P*5N8r`PC*lzkwUp^XJQuSU@qokAr_va|2XWAa}SEnC>X7v%pJ0~$0(9fZ?ujd~5cZdHXF9?h4|6C$_P^<0W3V98& z53JOVcubyid=t&ef|TP%GOjatS6JMCx1vm6y!OA1@AAj*-EZ$NJZ65m-wU5E3;lM; z2N;On$E@{A4n^hf)r0I=Fq}RDJ?6lSBD;S-IE*HHA2&vU9FK{ZjH#H8a?Hdm%)wmD z$3iSdZ2#L*_IJkU`9>VaH3CZO$FP&Jd|4(eJxM!1S@COg`pDJDj#J0rd#$C%HA&(c z5bNk0P+I>>8uLfwf3ohX@mFZLWBe5w=}l;68&bF}{VQBo8vhcsY0|5(72DCy26voi zqq^CDbnRf9_OVgyYuxLnWj6bVLrd5?cIxnpY}*a?4{fM^)i?w3?8aW~NA1hTAG~Co z8$H_Y1{{n!O8XLtw`mg_My7^W6zP9N`k$13Wa({)^B*g$LtXi4Yp8!w`7Yktru*|BFVT~?+}l-s0rV?KU370~ zLY#|nO;~!EJ^=D2vNOCh@-8y|v-7-q3HluO`=NVYuU%@ncTdJ~LUqTy|98Fr^WMMq zokrh46PkTPv9D-=YX)L4hT^U0=aM5Z3hmd`f93H`C$_#@wa?7xcR3d)6V zT+Z%3V$E-Q;U~txk#kY;DP!Qss&5*P^wv{hA$>8HVi{IoC1T%PY5c)yePxbUBkTUw zlIxH=B8|7CZHQ-gUfRgQlZmjwZ<|qtt=Nv8*o|I&`g_T4e!_mT$2xx13X{b%71L3UnV5w+n2Yvt z`k&D`O+KIE+ngX@pH`Pfoa^;){eSN}jl;_P46;vZ`$gJ2o-dw-Sd68}3d>*_JvYdA zj}`QlDAEhaqYJBqReQgy$+f7Bdqt2NP(zP>&q-`{T!m(O3J?1IO8)f_-#@ncz4zDp z#K@hf_<=UDXZ_!&wMqO=yQi?q$I43c0r%6Zk$%Y>FEWW#JVsW%KCUs?M$f5ll==ec z#YnejBfsTu+@JqeE#Ds%ejF!o3bh^bPpdpbe^CCMqhCN=6Ckb)*fdH0AukE*LGL$4 zg)8K>sN*JX<6-)SX;XCE{rjgvAKCN$QK8?Dq%rQf`j$5TFKJJBwk!-3HWI9K_7HR2l7Ysqz}`~tuK>mh8QZ$=fiVmo%?;r>4z{s)cq(vN1( zAcft|*^B+C#z7>JLK^L!c?U}K?8&b4_v=;#^{XP^?BV`MZ?gY5JWu{Z7Hx?A4{hSf zp@^e+(Eo5ehOcMC*J#tApF(jLyMw&v)P*y`V*kTA@&aQ2!zHo@rTz!;$907(8eM1n z;)8u-8=sU;)Lm5nzs^Rm$A2@wc1FK&O+3B0iNZG?4Y$d==tI9<(yy-HfzI>l|EJZ- z@2H!jut)uW7yGY{->R;EDAx7m#lvLAaU7%5=DbY{v;oZa+j6#?9{U{zh<6~e^pgMo zFZbIPI^PsGe<0TTOMZak=(7)Y?obTJ2*kO&H5f%ty0->?$!Pjmq#QRpZXz4qZ_n@8 z*hc>Q+iWbxJEy|h029f{n2PBr$4vA-rJt3Y6Lt1Ejh{UUyL`9wQv24c-Y-`9kJVU$WC@NZiW@PCoU zVPxw0zdQK9NYf9B=ZfzsNv4o(at_k;+;YCP`)Z3iim&jkh2_`qwGs1u^>uzxtpCw# zuf}=z@)2t24cZH0ANp~}C(ulf`xrc!=U?}d^oyrqz4ORM@i*O){(H_ND~&s=@*YpQ z_6*M90xqEkA6oyBU5$NC`D5}LO8I}A^zV}XebOVnf3yFt|F|NaYv{#IWYn_kh{-5?M;klYzD@q;i+AGT z`r2#Qm-Xxm*>Y9>$02z=eM9~~FOMVJA^&IO{U+}%!~W;he-BtoT$~v)uH`qA9`_HJ zMRqScqx`)1rF5r3qQMOF36F7;pcv%t^v3N{nk4a z*7L8vNivPfC!%j#7WP~o9)5zoSFJTTZD81U&pM1Z28R9e<3G!nJI0L)KR7ue>>M*H z{7_wcm$d#oN`#nmUm zmi|wK%F9oLsvD1oHoxWY#-yjh=3!5Tx9iJ7(eY6n#|fOmPfwRw*QzZ1$od|;E`Kci z7`rnc3upXx4nJJXcXqrU4(|DGsO?@K>NmX>>JEH2 zB;Wi_Xh?j=H~a0-xad0}wdLEPdEd7~YxlQ8OX6Gl55E}>jr0Bu(jy0i5#p-+w0erUKBMSy@5GF{(P(-u zBW{ePk4N!_xYZ*{>o3)OG)xp$y=Xv~Oio3uI@ffv95q9v%lS#nbUX{qC)o$_mDX|S zW;g2j|7=M^p8wBJY#gKgZ=Uu)q%g-db1@$aQSo_eIFn1U4DB`i|0X^HnalGD$h_kM zS@QoU@&9p%Kbpp2KJf~AVlC|`8*nrME;zt)hJdZes zwz!;M;kVjN?EO7smg&{Y+0d`P9x8wLdZ>E)b!}ME@{%<9uQ;|huHg~)`rhu`o%qkz z|J}4jn;>>O_l@g(5OP1NaS$IW|FyIIr|bW!^L?=XmlQ_|X|$pHGwLJXFel*C>Lb5Y zZ~Nr>kQ3gk@2d9+-xU2Qj^hNOdQp3JeW=N-4>#$_JL`Qz>%(pOU8IJ6FEpdcIgR+>{^RM^?}a|+ zWR`Ex|GXje`$?>SEBngWhpQVx4qbOPs2^_72fQH^Pi_bUoHG!^T{oB<8g-1oDAc|3 zy->g8d!b>$_d;^ZhS0caLuf)XQfPU8Lukbzq;Yu726f2$|KCQoFWC?}CTs|uWBmUN z|4+}4^#8;BKOd<2-i9#R|BS_WOhoN7-?M)I_rhd)vc`X7Dt$Uqo1}k-^k=@OFX(%r zT-Z#^!W_)Sd@RIbl-66)E*0x3OC2vmg}$Ofi@BWix+dd)ypIMv9Dnwtdio>mG)nCT zWyaPycNJD+E!JTJ+WBxDTa1C(rtg&O+Q+{m3y#e<3@xu2|ARx$OJ6qTh3}UkvuHaj z%=fcdJXP3=?a1!&{m3gj=`l}tVmEy+itKM;8XG)`EheLHUj3voM1HGA?ZuCTgJcpl z=jF9K@*Gl*(`a7L21naVHmz~}ugXH3@Ei)iRQDy1;y6y=gU|1j<1?r*9{B=!32_cj z4_Wyo?E+u1t`GfM4988}#$EKG-yV6u{DJ{w_ZO_2Ne+%-pV|7jVKgef z)jy0S$73QUqxXpoVJbNt=^Gyj<>XAv!CYjodVby34x#5>4WpK*PcwZf_wSksx@fZ7+mZ?XzCFPJ;zxy+((#dhq(ZtTT= z)a~I{py4_@a*y4(%&(xA*8jQ4zOg~AP3#*P$Fd)$N844}-{(c^%B(RyZ@sp6GB;cM z09%zufgb1QrC-n{>VFO*i4?LgiUVnSY5kk$_1}Mtebo3|Y>n5le}0ea-@LIz9!~JX zUhur=wU4mFt}CL3-q7ED4>Eqh@o_ZMQ@7apVbbCE()_pu&O`D|>1!39m%e)Eq0KcX za0+K|4i|6BnMLgHgi<^jb zgUmGT`1IV%?EjP2`JwlrxP<*jev$OQYK*{7ygVeBEeq8Dm#F`fX@0=rJL><)(%ZV#%Rk8veayH}aZSclOh-9- z&F7m*cFPa5$R2z775;3D0DUgzBaY?iko{%@5gtYzwfsf zPAbce@!MbNA6nP&|y?4Jpl#XGs3_h)|g`SJQ zuU8*J6B>_b|L0rR^HCeN@GYaXJfUxFa(lt^#`Ok5k zzq(fGN5e_y9TVxTYd40L@Qb6a)TdYkm2c)0$f@-`>WcPWqa_muH_ z?C)|d#`xq{s5g3Kh$O_B2@3Hipx@YwN$I~aG=(vEqxVtb} zSheRcm7I=RdO0~0HACL_|Bq<1ay$pk^f>pg@ihB@()hO-d}R4ODGxX9k^j+5Pszh` zT{9mGu^3CS3@fk_?M?ju(xKVg5x6D@~-@kL-q1vjr^aN_YU0m@7s1M zU-HWB_sf@6;#-Zv@A%K;22?b%S7g;6logMbg(~`1Y{yQ-b-Q+xd$AwYi1UWyJmQ1& z+>6QwB$0#4By|jFYo<;nr(!zFQM*X~pQr4k$2AQaFpEA1sWI~Z zH2I%T&^SZ>-}AnGZ(=9%%0G6ziQY`c_U|k1G4{=r_QyUlAj}oVe8jn*3(0nNyaT0m zcG>+dHb4I=8_nJqwfVOwPg`+F*_visAKJgywErXaQ7;zHQp9m@m7ihfzvjKn_-I%r ztTfJT4Zr6Fb`NXa&kEr=VXIX3r&sD%WhqcI?D%?8Samqx}y5A90Vr+!i)@AKOe8P|T6we{|GVrz;+)8X;z=TfG_vs=zIhux=iWP!qZcDjiraA>U5Io1st>6DyyP7r zjvYAaytw~xO^tNDEM2J3hIvAGGd<1)e2~tP|GUjMvWxxi@J*->l*a#%sgrU3pMK_V zSPLu0`SpI`40#S0a0%^eX*#A8LU;Kx*@c-q*!wcliCGtPn z<~WZ3=@C!G$JE`)Yj`mJr6D^frDpj3P&4EXJeoho{0sax$i)9NilS zgqdW|*FAT2&iLHlKzhA$R(R$W<@<7D^XZkJ94Wt#44rq_2R5YCKXhIh-pW3p_h%u@ zb>4g|#9}PPGQ|B(D!%np=>E0-FUN8IUypIDE9u>zQRgSC#krc?VqDf*avf@~diUMN zCeV}asbNhbY^GNsHG^-7rUh&O>aOem*I(W+P5(0*$LW_w^FIC0JFNTfnyuK5o!E^x z?ntx#(~bA6e_gNtADy%H-=k}h{`(j7-@mB;o@}{c{{J2G|BsmKkHa1M|B-Fd|6gN# zz!3fWIE1t|%EQQLv&=5hzdvtOXn)oCfY*!cjg7-7vgb7+HjP9 z9K~(=|2OIXcTSi4KOwAo{nOzTc?Pv-?WaIqKuyAPmHs3yIqpI8G5`0j=iKc%JGag| zsNZCs3#?PBeZ$E z5%tcE z3HBEvtBgCS{IYd!=viSI?4{?TZ}(av?59_wn3s2v?~s@0@!!8YDjXEv^Bdn6nL@X5 zm+2VxO=TRJLlM2-?-!1e$D`gjDx4rs;SA2<0xqEkSJ3;#r^7X}`+pb<5W}B(I@~1h zq7VJ{o3He;eCSYic$ z*pY@Y?8FRqfh>)GVgRANXXxAR!F-HAjg7yQlv~E9Ha9kuu zxrfp5e%#|&ay%wtGNz(#nDnuk4dPCUw~=fTXS4rIb@ z{7-*4zGI*I_eJf0WB4Dw=kzoD4`h%<8%pEa)Cbp!YaNQm_}Iv^|8bmKdSCyrLHK4= zA&&pc_Se33v47a=xU_CB+rFK?6V02{8&FzDV~2J_Vby%%z2tt>KCjK0Jcyb-{lgnO z{<|@y|2-rfr%>7PiBNTQxc#?25w`64MEJ)8e;2kQ?YB1klX7?aqNl@KdxnN>L!J&l zeD&|c&dYxva(?Um!>7WJZy2-T-Tg?|F7N%PKY^q(>wWsvtZ#rv9JFChPQ3TIIGqX4JKJ^;Wz;{;!4I|Gn{l-_R!Hx4Y;=zn`h+U?2*CukZzZzx1IP zju9Az(HM*Is8;{^ISwd$e88(LoZjC4#1%bc?UhZCRHPBSj7q_4ti zti?KPz-Cn8=P$%HwCoc>Kk#BA)N1!RnD|_H$9%+^_4?HkpAX3wW`w%g(?b2_Y0BJ5 zA+>%|Xk210TjDe7U)mqsbDZOlMB|h03(bSv=LGZQ#k&)`u^0PMje|&{eWdmm&$-jN zIkIbpXCu$YKAz%9?GFpQpC!^smfp`4&(?Vl-cNedppX(z8aWh^-DCd;dovckQWkRb zPI~3{%R*I`I(@fr>jCEP*#FVBZ_H+Ujt#PItad_rt+hvwisLwH&iZbU6n4V#DKuyN zpS1-WUDK4e|D*bU{R{k$CF795-ROH0rY{0V}AydLopm9 zkltmC1UVYxF%fZWL#9=|h@O+SPL$T)dq5Jnv1iR68K%DEc*`%xhlS)~)J`-0LH=G!PagAZv5dX~DS9)S zl<(2ks~f~VvPq?O;sxvv`_+Wz4t5G-U9%Fauo`Qz4jZr;?Ju&=3)ts*?DNa)GoL=c zoPBK**B=apHg%^P#=y8S4v|1+CiMYZFDXns+;Zm9>5O)sc_w7NeW zI_dt7xIbjxRX_3naehL3f9?B&)B(xdNOe9k@3=q~)d7>@Od*Xn!#AGxzi37ZvHkzJ^G@It&LBIOw7U@%*A{x#9}N( z&(HW1WOtqZS+e(E&5a)E+d`b5x>DFGtj0PNzC0srAgd3!r!Ray#643tJFY@(i>UaP zaS8M|wjz$hZ@MMlsgtD0xW`bXYpM><2-}^r6T7h&`_cPF-!0kwTWv&S&!g(FWD04t zMS072NfvPw#}W4yifg={pqJKr8E5<&{R|$iKk%e)c}#5oHNSkybMEaq_0;D=qvNJ6 z&xK~BI?OkA%{g4aC46xHUytK<=X9WRb~*pQoc~`Q@+iDm9*WPGhn6Pu(=+C$x0;{6 z&HVH|=BFe3rupfp_FJ6)az$L%(2Lss&uP1QF5ILiUv+OUKX*U>^QRuJKd{KXE`KiE zc20UndALjVA?v^URjU^v(^0Nn%6+Xb4+9+!M!`MB`zqb%0{6Ge{q1vq_3qDq9zNjy zkVPBXFH7Hb=_7OZ%%7*{FPcAp*8F+TaHu$kV+2NFGzPejvE+EvE%AJx`M~oX=J|@J z`IhG^u4?g2blzl4#dOq;lz!)w)05L&JL$PFlRgWn{;oyS5b1YbTw62NIp_E-jxm@^ z&PQqf-($v|#`%B2I8A4r#PgK(nqIgd} zLmpj73QHl4HgrGgJ&`?i-ZQyLJ$&ONb?JX7FXiK-!U}&b3zgdHtG;Z_(MS0KzgAa| z*P8!#iadjJxPVKjcvhW*yn<_p^Z$Cun~3Xw-6rp%5B=Wp{`TBo|7!sK!TMhV>4T9{ z1~kWV0I~gjsIcJ}fl(NZu^5luPy05=?$19LCX+p%ON5Q%ltsgom)`qyVR8On977Su z4wO5di3julX3-za|C>Xfi~qm#|Bg%3RM(a2-^TLHr!T}}EXDtF{@)HZ3Pt?i%>P>^ zo)uV$Rj7UW{`|ky^au0**3#GEujl`55SG?2W)WKd578D46^jLm)OtQ?E8Wb z)SqI#f(>qdm7T>x&+miwW3PMIkLq|Y-{w2)VxQ@y@qb_9^V4fH`Y*QlKYG%=G&rY? zoo8+LT_AKU;Uu_DRT@R_xxo*7)y>vFK zH2&}1547*oi%0lh8RcJI`RDnh@z?V|=87Y>9n2>eqVUwCVKLcjeZAhFz3=~j(fELG ztLy#5+!((t!wRfK9KW@ST#dC@hX>>THqal8|JzL8ioYHIHz*7jUWM)0iPHM-{IuQl z2kXC=+W++bq=xZL(ZsK7#71lEZ?ewb8wafaU-R*>*EN{~{7ZA~_tUF!5S=d=A7DIk z7n!$CK!Ge8S6#<1t>>RMyu|-q&;MO6?#$rOoF5!gWYV=Mq|t^Pia3hnXgAKeV~z0# z#vSCyF5?dJL&T3FT22nuzBriw;r__Ojx)~7zH0q{RA059qIgc>49=mJk6d%zT5I$N z>knL__aLQhqM4umVEutB!akV4uQ~3;O~m>4ao){sde!f>-LW%w>3!%|>$&*e>t2!m zH?_@blTGf>HtU;d^6fO=Q5K-$XWHnUI}n306vHtBqtO1Jwf{Y@{SW`!@drab5=M(> zEXHFZva{J=Os40yu)ml}pN=BEfc!4)gV^%#$`e^9SnOkav+Sc$m)-zsu7)?yttU^A-F`v?0ZlBNEKf6%5+-xdk1^Tp08@dk23I?WB51ikwVtJs{H{CNY=Zr4z?N1^b{%@jf-|(3Td<-uetL~MHT@cT z(K%1~j4u8{X?zM|SN$_=93IZUrI!GD6P{ok4=A-ZAWv5{Et|E zEOXroti&p;##*dHd%f=uag1pWU9X831$q&s`M+o7f28Yuf8vaL!)NJj2h=}KYu^{o zW>jG-YHzT=clw9z^tk3tgM7S`z8fDJKbFw8E^I%laS)~NmOqlD$L}~rrcwBm_BpbM zqp19yaRut%$LS|<3UU5@&nNA-MeqJqBAg?8KVuv_c?mtZf@|o-O~m%M+vHvJplQ9^YiPwhQ`T#4+D--<$mN{0Fjb!vCiIkdn<~+^!F>W0B|olIM@EA)Y@9h-(Ct*8e^$|6gSP$-^g=t*?6iXhZi`-yi>HAVVDg zH(H$IQTc@D^OccdB7HKZVmbx1o`pG>iwEE|Bnnx$BV6ON>-Fj00KbgL$|DVh_&XR4;OW%4rtP{@$Y(^Eb zBb|e-^xPxj){nlOz7s{q1;=@^OaD*Y|F^pJ>HGViEZ6?G=;=^l{fFXj^uN&?UXZRA z-=BZ~6KQvxa@@E;I?27_+>eS+iHAIhB-(sSyUDtY=d(pT+r&dQdNxg-OY=SPxc3&} zt-GWT=^E+FyY~;g-_rLn&-a127Smsszv(g3^D%vq{wp2-7ujdTF~#$w=U$cmmyJcB zA4MEf_;CM&m+a3VteQMQoKXnw?tjqzmA?-? z!h62^T2l!w+Wv z*RcD%{<1ZH7j_NOUv}ibg}o#7r#=6PuxIus!cPVb5BuHIKMxrZ_UX5K`!#ik10M-L z-={xn*QoHbH_dVXzBMsF=DxmZZ_K~w_x>No$h>Y$$eZtl!->CybmBie>;DL?i9Z`V z^6w$l{qLbU@u$!<=HEi&^?wV=#J`3Hb-Mbq#&%vbx77NNweS8H^|}9|Pv&DGu8aQj z8}?4SZ0wKwe6@}p`7gglszKIo^KjoM9#rn%t!8J^|M9tH+?aR zBb8amKgrg=s?51=O=scDumUTQbw3&5tLW8(?Egcq#yYG;E&E<0d;>lCrar6=a~ER& z|Lf}i_pA+rxJFFlj{ad|tNjA`(QjybkK_9`yC$DPV}wVj!rf-hc4t%K#?u2 zV=wDF*uORG{}wiNAKS`~H*2F$J;`r(?QZPFepKTil1Sl0^9%Zmf28>RcL7DT=#yy0 zq1W^^;4uAz^AoE5S6V!6$f1bZ_5AN)d}?~K$-Ut?{RHBk0L_l$7|BMQ5_SgXZ~+hP z1KW4WaSyJcwEwVvrC$0?+{RthZ8QF7kNW?4_5YYJZ>awxt}zkUpN{qaKIipo(5{Gq z7>uD9j`oxK|Ih0GNA9xz|9krXcd>E%^p%h;v(^7`Xc7C5!}Pc|T-JGQw_Ml1EaV<3 z3+>)VM|>~zBD#>rp>3Xzx=u^RbM$`Vno4czSBGzSj%ZNFN-ppoXPZZf=8N9jvDm-j zJ)SkU(LIjBXpF^pOvGeNMf$XP#AG?*{&zFU>yMPiKeB-j z$3JGwyK&xP)a38mzPSEKY5skqz3b>7Tz}+$x&Lsh`ae>L>+dZU*D|cYO02?atVR1I z_8*;yZS8+E|L?T=KMo{mO?!|soBi&){6`845|Ch&;^i&K-EF*I0lKwnO%tC?@~7B`QZirc#*7I#Q)WH(10Ww(S&BCa7G;GZ~>RlgDbd(UbH9p|L8<+r1%Gk ze~S2L#Q1&x?xmH#e~7Q2ev$vbfd9Y5{n6W==l_p0hd?~HaTk54eTM%pykAoK`TGsp zGX~HHB90x1YXdfsaSyq{!iHivMxggogTg3sG{&NbpEjQCe!|}F%(S<=Wrme;m55 zd{#ajmhUrU7Nz-r?C)Ih%*R43M%H(fX=OL)xqa*lmeE(>ujl`5`SUGf `w^fL9 zPgav_QFBZFJMTNhI>#IE*Yp3{uPVRL`Lgm0U9Ty>mb-uTx+2-qYW!k{@r%-8-*)|n z?jKp{X_JOJeg5@pj1ye1FMy15`kPK0KZz7J`_C$D#g%&bh}?&>>NGE-|t*wT{rqA z6l2=}^6CIx=>9hUUwBWuIV!&}XW&=Hd^)~{-WdLQ-zV9vK6{(2_|AZEH~!|k=p*|z zdd{y|uk@sGfAo#d_`dLlayYIH*7)xG?MZyS6b3qHFdDA<-n#7tBaWet3tzB?AzAqu z`4uDRqfj?ZxsHZG%6Yc6vA=Si-b|+0)0Rir1sr;kofyI{I5&f=|8IL&`}!Sr1)V3^ z|0D0Wy9+P1!?(Jo#yofbGZy195!ohjU@|>--L;rXpN?2(C?J1XT*AwRRkK?&$yunq zIv~s;=c0z*Q18Am-|<2;?{VKK?K5y-Kv*nnDVAXcR$>)aV=W%^udSoUZ)5|x8HIl- z<;RVczc((4-@jGZcI?D%>_u-&S=dj;{@ZG@=WAu*Aelr8Y1Hj`-~JEsKKjf5BfT%O zwEx5u_W%3rg!6JJ;wX;e1Wuv-i2Q?2v)(7_Ij5y9A zy^Wtu#(BS&$R1>cT_LX_cU3vCiS3ZTyHMibSRXR_a>BD=%5WU?F?=hU%<^=Me+-cVYXY>j(< zmHmH3n!b526yE6zGo3#Rb1)b4u@LQdtbc$~JI`!+dWpRKygZKLdj9_~_J1V%Pp02x z|IaI1k)^kx=ac+@b%40PSOCI` z8_1eBjXxl(P@7j~{o*gi&HY8&vNSrUX@N94C+@Y>jJoyjh5AkJ`R%=sL?b_|3C$hv zd3W!H72?^7?bwOk*o*zBMtkPH(2;*n`$wPlkG{}_JPIhH<^Lt@?xQNb>IC0!ilUTs zC?(xwBrTOLujHmO!I50aNG4P)r^{=(ESI>%HMkm=<+5Djl3a0#OT1Dks)6cCTT!A! zi6Je)L-TfcI~?B6!E(g09C5@MoW?6&vBVo35wRuRWlQCLo>Nv$)Sj6?zU%kf`+1)I zyzZCZ-oL${%h$pf#@DWC|FGetnZ#uKwJ?Q5{Fq7XH1so~mo%Kal?sy(h1Q|3v<0#E!eK6YdKwcV~zHDsH&kePPIc@nhG_ zZ&m)QgU=!1y8a*gBwgp^%C+$S(%arGpJP`3oRcr-<dAO0J*@%LE& z@?Q7{^2#TF6aI;O9@)dTPy8mli2NhG1b>XaBc4A7DqLro>vAvgYR}*Gjkx}QVV^(6 zo%rAIzvExxU*Z42@610|@7(PFD|CGYzQ0BFzk3`f6KmT4Waj^(Mk9Y=6=}P{;uc5t`vuaGStVz+3+G;gY}uZHk7ac%01^KZ`y-=J^5?ppX3x&9Z|!gt7M{@-`W z@8Jjd5fbW;pO8Pr&+$u4`bMKQ0H34JRrq%CYx*@Ttg9QG>&E(r8UO!EHZBI2*t9Eb z+ch>WIda)JxMTK>gtG^53l8Eo+>Sf&JL~^x6J+e~EOj(8>;JLs$M8Q||F38DKsY3w zSKuz(gJ=!F7+Rk4&9TJ?5cMCw%C-@8_VA20h_=HJdDizAeS_z!wGY{$_t_`er2SF* zqHsUG?RDyB>q5-hHh0AJm1{d7OyjnsJ{JYc`0cs<^LUSkt6eHm|}4{kWWecnQU z8%DM~8*%Yn&kF0W%?a-if8~|?!#l}$;XQaCK7fbuVSE%H#~GZ%_OJGbHnQ)k{*PSe z$oKq9q**TQV&|ZbkdNBuF+7eZaATY-vLBulp59=8Zh6P#+@AbTri9bvLbd$wIgS}e z8Ak#Wi2lnQ{bO(neaq@U^-+{J2j!_KZ^p@CjFib|@6|u&JiQpeFvc;7SxgtppCz6z zIVZo)lbQ9o6(-kDRN%kZvELU;s5lF_AF(GZ`eP!mL0xDeh0nczDs@&gK=fR zCHK~Sy$l#3qcY$Jw*3e{!B4UM_3ZF-@|SoHS0=K>{C|-{@phsK&P2^h;&HZ~D`3}VB1NGXKjalKH!tX-H zu2e4!X|4rce=K99j{0X-I1U*^C z{x47{2|S6X@eFz~fMJZ|N?%@>B-cNg7ZR8F-qX|g3ciNPMfbRT zFnohPciDE~Z_&Sl1^o$WaVf{mlhNG$?~4B(et;jLH%I?N{=x7QdNe0sK=`Nh$QB)` z|6Tusvz6Y3vhUm}<=>*R-8YjYCznF_rDHPwe}CfJyUaeo{Ho`JG=0H-H|pj7Ty5bQ z>F3E8h+_!RoZsi9^J`qgzA@iCZbACh*1R|(?-wcqFr6iT%71C35Y6w4=KMx$|3_

S#w^-E_URS|lG`!Q5?SowQ64B9V_Ug17-`TI{C z4g(lO977mJl>dhuH--t!AdP46ZA=|~Q2&d0{^s(})|uCDzJJPm|1>!-JaX~DFp4oG zFpeZ9Fp1OC%6CN1?z_@kJAF7jOJ2USBK&}C)yI2oFTdSaR)il4{}?~T&+$AGeGi83 zlfT3Zco7p_&LRFc^x30_!wjy{_e^_!^fXfLdk)>T4~Bi>Z^C}uf}Rr(hM4$U=>sjE z4Q`_!z%V^pn<3urS>g5K65h!jiZGndzQ1C9jzwep3+!cMu+eG7wiAp(EoKs-HgaLXdWZVl+i}{|A_pLarrt~p?)f3{~lte)>VYJN$(x#X{reC zB2W1qyRDh@9(w$OJdfcub@C=%d+2@QGyn03v!DOT*p~3+U#sK4xi@@({vmt>AHzfV zBtDJL;uOvxnpf0H_9|2Q+(WHI`Q9)?ZvFk?u=PFP5&an?Jm<{(NB1B7d*IvRzKbM%#xt5tY8MDU zi?nN-uXmj>*XbG$iyOiZ@I(9|KbLXKg8LV8GAC9&FP#kpr36yu$U8Gq{qxN z=)rI3SFvYC8IEpo3GI-Z==*UCZpDN;GkJ7hxQ#wr?%e9r^oK(@Ae>o0SACi)WPcx2 z|DHE@L7Cl*)B5gSZ=cNkzm`{oJLm&fv`x0zU9aQ^^f_(v6>XCxZ4`2F?G@oJ`y_Vs z^^^BvqQzJ(c|Ve#OJ-ffx~y|!qjb|n&QqZN$&315v_IFiKh;O$WKwv7 zoU~szJO3ln`54k~m%pt`*K<+(ZzU%@ByQl4ym2Wfe3JfY3=0n(R0orTrF>_^ox&Nk z;^qDcj|iVtPCQ0t{QJIlb9kKo1fIgv=r#7&*P;LY65C!Glld?AMP)vQ^=bFmryoNY z!vtoK#xuC~|MB1XKl1mT)4oX|-JzX>kqi3Yk6s&t{1^W}enfte&UZ1t_f_Hh}qg{k5-~-~V-e{O^86X!-KZVW0S|xBpqVDcb&qJz+n2`4hK=TgY2+ z8@77YNzdum`nvTXZ_e`_?+FLQ$K>zVlXsv;++E~7h~_}_yY_gs_g1NF6OR7z)O(WM z*1-N`&)@j}U6LKgPLJ0Azv|lWb^Lv}ABS-a$5DwX{Bv+bbt8Uqpu84<>iJ> zJlXLl;cE?L;fegR(9!X4%~vZ8UnwpPkE6Z0B78ZoB7EumgW<7)H;1;0^6*7`;bbu% zSnK13z2V%~vqF>eG@}Ju&t--43uIKzw?&dKOM4Xm%Dm9kl@&VZ-O9eFwTWYk zd&4*Ktsz)sEEs)VMfwN|)y=Qt!=~@OEk6tzGaJr&jrW%yMz0j;V=d5+TFX}dXh>## zTpPPKbV{=eG4vshQAB=vPZv7xZeI90&J^Df{wnAGkhE|2aHz z{;=`Cdqc{$i+FVOP2mfr_l3__+#4>G+#4P%zAt=f>6PK^Uno1&KaUsuN%)HA{pA|t ze^ti+d|zKXzdtNE_7WDcgeRLrSSF{`P17lPjQkhJ&l>W=Xj5JoJI2=n7UYv&|hVp!CG@t>>D?CWvI?NgZX)(!8y(-?+le>?6Vc&RXpmuPV4_qblz-? z#h9z{Zt+y1?Ar}=+>ZvDPO_n9C0H{2ZNuw$R@m7BvAau+?u zN3-U_=^Vry*Dsxc)XgDRI1fWr$~89AV8_iNj(l+iC`1tsq8Nv;HJ24i$hFVy52fVg zcV~q%vK$pSf`sRAlstyxsKkWl8I3Vi(=+RToX|cn&T>v)!teE;9JH=dmA>y~bNCkc z(xJvNJx$90#+-11K5)hSzpk8cl3t5p^}^7SI-49^%L#Sj64kE=^<)DgySkBVLS$#p zu;*vdEZl;$`g9(1NMYSGs}g@67tn@wbf6Qx#_luy14;b@o9bN0#4$u4UN^qq$G*qd zG4?&itJ(LMpig3|k$=EZ*Kx#k)Hp9ab)0_yIbY8IugF|b=ZT>Yar9KPztzK|^nv5* zW+ds;7}mBKLL7s5`TvJgwqL~Aud099T?_QqV7&HQS>Y0W5ldLc3RcluqyBGapJO1V ze^%Q(zO0AQaSFnqzYV|*6^n=VUsQ*^ge`FfbT=$Wr{wHaUHLL$I z-gvEVdZJeSk7)g^td#E+>C>+Zx#UEZ@26}}$fM6T%A56jLO#6!^S-S#uCHHNC@xxm zzlc1Dp8P$bm^_4-R$Hp<=5^D7sR!p9UZtaeoz=fr*Id}&J~5NuUenS*j%*!mS#sd_xDAi_2aJ#eYRg; ze=AO(UG)4gN>5^*o<<6D81w!UC$*!ac3PEobCY&+r~d?zkq^nt1wK~V!I}R9x9~$+ zPzGs_&1oN}#7|2zg^O6gB`jhI%gFrKJ7t_HOFklpwB3g<_;x#d!|mF?^+jRDeydo+ zI(ll0LhM9Q*q{$wF!qm4`Zk8?Lx^J#JK_?{2f`I{7Zb+Kv(mmNB#ocXbQ$+Yu5cdG zF>~4bjNKb+Poc~)G348JcCY@N=zSwv->g7f??L19_@n<5WEr2w2u3l61jciWedqE& zFt$Hg#{VFX|3QK4C~_U-oN#KL|G_5z1G2mA_E6|Nt#@kQzdb7y(Rvau;X6>C=r(^IS@+8GDLn4IOdq+s6-X2QG*jWiK$ijU;UQxFS{V!4()$j@82CM^?Wdf z1jcvd|6T2Wa?)|#-gT{X>QIjc^elP*D@CD^K49EG8vAdeHzV^uP2YTc-TOy#t$MXu zd87Sv#u(Ij`&>X9+7Ye4)j^*7YkBtq35E-HSQpfo{s_) zq6i1kTd#gXKL#+UjEdIH&8(wKX4dy#Xa8?1$H?(rO0Q--)Jo>)Yzxzr1gKAUJ*{v2b$Uc_KDX2Ne`-% zg`+uy@iuJ{@}zwdeeAI3)B$(PSL)(gadk*`xpw(Ivdim*8xZ;4%p+r`lf6rxulvjR zDJbs;>pb61^?$o_$wYO*Tv%@BR8}rlh_NY8g z&ShnX9dVI=&=vBs{+V4e>znEyy1fIl4nNLwfD zkbO%~iZVoN6-RmMG^RYmX=I+icQEI<-+2D?5qbT;jDL9kzK5jmA=CbH+}cOAKYqpj zkJkVBweMW|6*z)pIF3qGp&Dsp|25>+$F)E1RDbvF2`7YGKgut_I)W$ZJx$&nYU$CO z#{SAZp^jdUXid4HZEZ|)@QC|wVgFxXCzF}~|Iy=Y|Dhy1nazIGF%4)$6PnS2^SFSi zxbc1a&Nyb4oHJ&eYGR|4^X&VPKK8%(G20X5xc#HG{WJdmWc2^PHtDpZ1D%+tuusnZ z&_$nR&(GBHho|>pzS8)6E&Kn3bCFTrjsE|a83&^GG`fEKjUr}zz5kH=Dl`6W?fRs+ zp%V91VEmno7aM=?bpJ8?#qCE9y3V+MgP~RXE&A_(K7(@~_KqE!!bL3L5*9JlXd7l0 z<-Y~*f7SaZGvhz<-iY)@FS!2}_j=6xulD{iL7zm@bu39|858CE!wR{IM1}R>$Y`u~ z&izGWwb6h7*2OLR#%Dc;8PA~mWBZWG(sL67)Y!~SrEo|!)&-CcUtMepDB4Iz%m zmm^19F1G&eRUwbeM*#{^go8+bG&>w3OR)A{wkUb|!}~)S8P)yGIHMhK_Ve=euhcKr zdnmW90!MHZ$8a2#s6sVrZ~`Y$i#pV!0gY%vFW;a(^y?!Wz#u)2A>~6fKWRiAK8no$ zMVr*`1>SR!`kkE2Gbh1*H|7s+Dl^zbnf}2I?E^GBM+?p)QK6rPOqS?h(Er~?PaEg& zAZMoaPhc*t?ru>R?CM9~F+Ork8ddHeXM_3sordPbYh_mD&8 zV(OCm9Z?-Ui#d7_q-~`Sj@Dc+vQI z0eyB++9;$KVV<5w3UfFpu6tV>f;@zt3;RL|S&C>4-Tw3L3uVIP7@`kj5OHMs-%tFm z{%sO}Qhc5JCx<)tg$lJy8aqzT=y3| zL63YSPm;B$!+g2>Ep@+SG+uFiJfqlqskgm5&VQis+BbMQ`Ud}!@0Ym9_p#CT=-+>l zZ)fY&f@~w(2fqA{km_MoPR@O=prwFRQrfr z`;_;$r2eF5>i^SCAx@uQ-$!Fyqx5L(X+|BDq)%gBUcJ$OZVaj~$2TZnMPvKX7=Pqj z9IcBztj-#$k>1}y@L))K>{t=Mza~GaJ?mW&FjlGQ=GWM2r zZ455Cq>L!kcSugG7}q?c{%DWpBzT6t^PZIUh;&yGODcCZl{?a26W+kks&dD*4z69Z zQ(8aR6wZC7$=Ig0T6j1_|90K9-!`H$euun*S@AQ+)eYhz-;4P}%JXvV72CFcv@c}+ z2W=MQA`ki4dZRh8cb^Jt#`+6{qqWfrBl)QiipVwX&F)KC;UK*jEuUx%hsd6be9y#} z&WV4{;=`qheJOG zHXmkFKOEu(4~L=rhxOk+9F95Paa5uTt-rE9$y5l{^cqarXS(p=Fmv!>^Zy>cR=)3Q zBkm~QmG>j{4~J2VA#wEKFn+{4I{t8&sCYO`mWgv+C!}){wWz~HyK8JZ73%4;N%w{Z zdL!n~ySEni*ydhir$Ups?xItnnQTGN(y4HsyntA_=eq5=qD{CRLl-^QMbFu@i7$IT z+K{9Aw#NACB`{7;E}Fl$ZG9U|v8ksqgV`qa*GctPo%_itDysH@5hd4&j ztF6(8e$QfH-TwtWn>aay;brHt&&&Obr-f6vi1d5)8=qE}{bEnJB)o_vEMo;}b1*M| z>Qq>zuT3?DHF6yr*u*w=a0R=_`j&6{o2Nn!nTtF`^MCWn0u-VM*XREpq|eG{GwS>F z8l;kDCQ;TU-wmDu{(pN1+jQJ)p6$r_x%NknU_ zBu}sd>9g{9v^H2By&m(0^1baTa_(SOXb{(kCN!f3=WzjB-+DN-k!!y_723(mzk1k1 zQ%*H#Z}=8E#C4(z-B+X=UKye}zkR}S^mHhbdUL`kJ@WnSpO)|2V`^nM3wsQxO&sE(YRTXjG=pte_6-g&{eoMe0^KLq+_NL)zPyH>Znz96`98T zw)(G3{a>N}Clh(<{{nS;k@}yU%r*C|>*FE8md^CQSzmCXOMkudq>vodZ?LZ4Kwtkw z;RU4W(HcN=JNohIe|zOK;i>h{SWmJsJbCbQ;cMHU3s0PAGJaPdzPj>>@RhYsgvT2{ z724}Q6~2rw#cTaD?xUft@uT63MGu8D2kXLLt^aS~lC&4mdi-*Ww z4tc`e1vw#~Z2fEYqqb@xy{FjtPgzbVq7T&SLphNX4$_M;TrCfYi({}pCma%2g0p*X z4y9xn);`XTB`^Qte-9Pp5gf%<@4j%1JdR3Kp-=yIZ?is{xbt*Kw@bRy`edL<`|iDj&X65jpi=s z|LI-c8~RrFDj)ZTfnw!?@+g{Xknt~6KGZu;0~*nUm-{y~3s03Azr>7oM)WVmIkrJ6 zsoj!dACM!p+CjMC|5UCmgo%7@5VU^XbAM-EI4`XW*!sr4&_=dL`uFq;=-XHmxAtZ} zLgpoOi0?!fV(3F0iITlhJ#splgOMbsF+op}7csl;8j)Icjfk!-jRMr>|1thf*e1{cE)61=k~CKKhfR9)&%l}8{b8G)zFVE=_;i$pedg>F*`3{+?ElCPt<-*N z-ye4Ddj+vh*FEjJTiEf|RXfMV7;1Li7_=>J-(CA9+V+R6Z_6W?i0u!#WFC@n=U#K& z$QLd^dV?*#?A$x9f!-avDHMt?!a)?HXVLnJI7A<)GX{(jdMSo0tU-x*)y?`+{eL3Y z^F@D=>nw0x2VED2FpM(Cl%oPia1_UI9F>^ThMQhi$FS#T$vNRvrMdv~7?H*(#)`ck z?{WN)XX`u@&Nr!lqDmUos6n^#<^&mw<*_0B=kG^+>h#~&itFD{7GaRjK-@n7hcLX# zpXI3Meq4Ft`FH!?>Z0S1@Y_K>J+uB#P!E@=hl}N}3i*p1!f=5#2ddcsEw(q?jz7$Q z4bo^t6PnS2^SFREOr2!k*J&e=vrX6L{H27`!t<-ze=#;dT>k)>&|Vs+Cp*drcM$Xv3Rcu5=Yuyee z)&D8=KRGPCEUi4p=c53HD8fM$W2#ntjqBt5bA{?=gqa9JM-EheD@k>Y-xxN+m->(0o(>ch0>Hd#N<2Wi&g=*B` z1WsbAUjD-jW^rTv*JaOGUd-J8sQX9ag7%N|BrEm*VG^~{=ys2FWIcMcrA~V04fGhj zzrc0Xv*m@GkZG5-X~RV}e9gYlEUpFTaRF^;M+Z95g&6t}$II)lQQ@d^PFIUOmjov>d3gvH1 z9a27Errh~4N6)NJWLxCRlNtBPS9Tq|wr*jJEOg!?^c>m~4w8p3(4~Ju-(S3s?LV!5 zqFw(4&Kg5Vzr84&`>^r=V7Rj5V{PT(YJ zu{9pG-;2W9=X@W+(LaFd$p$o{2?_1-X0iq6aRHHUXL5(H8GW|Px4EJ|_}S}1hwyxy z9#amC`c~Nky~f1*3XLBgGJc56{QhF&hsLyr$zsQx@jv8doHp*=F5HP$|0gi-}#iu-p>Gp=P8a~rM+X=DHMd;vxZ-3P|R zB`|KEBqr#S=w7|HhC!yDC<@_QeKj0T{R$EJUZmOjMf9y+z7J$E z=I<KOU{jfJmqQu8##wUfK(1#`?s?+8^2h33cp2=@jD- zN)Y+*C)@UgQu-_z`S6#~%Q3&JZv!dJ*{?!ecN0GZ@+f*PvT@1dh%I||m(<^6rEoO= z@A`k02kjT1HvX`ozXttR`2S&$9>?|d|4(b9HScQQTnwQ~8qu0(=RT#+iJs&6{a5Ed zwy~A)-&_BG%b0VGv`*k8qIog3WF6{p`6sUk4P?~+)fk11M>R!Z-)u7(`Mq%l@}v@)BHcSYMefbB>I&FQ5i9asEmlphGD$i zzhK&aDO^PQy*GzTWa~HC*Kgp{Ltnx&R?dem&>F2H!rN) zZv&gy#tyDv7gHtjzx;M%{8zrptpDO&Mt&Zd^_MHACVH%pr3T&B4ec^Ktg$uL=cGod1Lsl0`U(Ep35f^2U1dCB}Zf$A4g=??5@d0!MHZ-E7YV_fbYahU4hTGk%OpdSsJE|E8~^S7Z1D+q8*|iovY? zp+;O}!=E5eVuD^v)*(rsF$aGZ^}-EE)8}!0oxt82_HUhYoMit-X_9f_A#!;7RiV)_ zO=w07&f@~w(2gl{%%{ykn5lA}TIupVNSTX}=6f)Ig8#uK^VTu8qOSqt^du(i7ybXQ zLmJUM_6c;-PiqTi*5@}alqis1k@TdK#=K*?>=VO`ZL_)l9}u^GfpEtE=R3X~dXKmm zM(G2`-5ZkhX$;e&IS6q?a}av#-9P##ouMwI=dG2dU8?+@ejWZd`H=KDk9N?urz&MMZhj)@%gq`Gl~K6^sHk$QQP zzK!`>?Ok!H2KFy@#C7w*ze4V!=Lmn|?GL_B|B^le-1z@TgXiDm`IFK7tF_O_vu`r@-}?WNpMRY+>(PKl zG@%*iaRF`E`kA@^d;{9)-JR?fV?!A~%t~dzlYF=8wF#1YLWk{{^?%%3>)Sog@9hmY z{D0i{ATsuEOg+WcA0Q*2_IQ^%o<6*~FSJOb6J3a*4{?kliOl-H4eb9W_BAXzF^z0ZTCW{cQA<)01 zUc*7*Vhs82qIE4Y{~5iQ6%L6jK`F{mjtU&XQ5?f@RH6#isKE)OKVkjd@PM%mdSvs@lnT?QalPLwrSEYr z>VK?||C{Ci4*9=Lp3ol1{Qpy)9mZMz5lJwE52CrhR~LCXLu~{&hVv` zJHwY-NgT>*m4i$%TdIee+4ux~gcZaiu zhe8V;$tnqdbEPC4vF#Wh%_b`L;KP3q_^jv7=?-ONC{j^*Q6k`eYxkE)QQh z@#gU5oi~Mc`V(Cbgs&Z~2*({;iRLEt9iBY$K!|+CdN&>neY+28V?F2@JQxOx9%TPK z$o_ed{qta`a!fU9Z~`Y$i#pU}%6`)Y)<3{(vGorse5<>j`Oabf4~N4@ll2ddSpNVC zj3as6`Um!#EPF6?H$M;>q|=BdG^3~Gfe>qdAhgg2=>5|Vg!A+Z7(V}iw#fs=P96w@ zIQt#_ZkrXM&9-(-Y*d5}vJ*-AOuQn@CM!aha180riZFjr+aXpFQtj4%$g}=Hk@W`- zT7RIz`UAz*A1JZ@06AP;@q6w2KF7x~is>k=DYn1-I;}l2Q)v7RbJ{8?q@5!=&xo`} z%PPVc5;+xN+_{onY=1=S?{)V*5K_|0%$rVJtPpjk_qJNmpEW55%K6=6%NH;1MFt^}3$xU(F=w6qP$yga* zJ#rU4^sN6&TM7gD^0E7iCzajqFKQbO;qtGP4}%YewYMDRtMFjRwQnBsQGkqnq)i*m zrzsTP>OCBa$b*so=Z8aYiS>W$jJ=+`HU>M$Up~%Xeux}4CYy8}(b`GrpJaz($F#m$ zU-4J@SkOx_rGB1{>S%RM4ZQ zx0(Il@w@eZr`i7)T4Db$zao@6M;VT~j&iaBN1`x}VM-ffT3ceqc)~2^j5(xm6GF(g{p|DDDkv{NRuS0?MlX$u^eP9>^PjS1J3bnP|tS#|4-Ydt|fiD*s3bhUe| zbg$YPwc@(9N9xFWME?$KAR7^LY`?ZgG_Su&xEVu5+8;aG83(m93f%uR`yV&{|5c%E zF7|Fq)YD{(V_x3Ba$fiX+K~Q4R+wt$A9O+gM~nWC4*ehJ*$e6u>$tI3*#9-!AIi-! zBruNT2LGZZ{zb_2AIA2C%zAitUdtC9((Xj`53uOpU$Y0j|BtI@qjS>d^`S>=d#BL7 zrmhj+hd4$N+3wN5zmoI;`6c?_&on)S;X3(8T)e?pANpK#ZyVpFMQdGj@l9Igql97o zIYZ*ocjtwRj$6PbEMf`ESV6S*>Qtk9Cug+NGW}XB^4Nkp7^9xe2ogs;gJbTU9Itlo zsvR4Cfb{CMYs>hFFKfTquMkBzh^_w^LNR$L(%&h6>tkOVSO4pO zxolo(iTF~Kp&S)Bg6?&F72h=O+>sTI3LisHTUIzuR$`#VH->n%@}ttXb<8(L=Gaz+ zYSiEaPNEid=&fbnvXlEUz@{BcTGuUQoicK`kA2Oi9^GMI??!X^+1H!w>vr~ZC;R%m zb>-Ob(=F`t3z2>9`}EvWNYm%h+^If9y>mrv>;|$CO=w1IG&ZHZ*g`*#3ur@ogy)~= zV8hcp(S;Z$YFq>Q=(Cm1-Q=3-qnIyc%O7LYAGy{C(Ou0p7e9>@E~2MX{|6T618r>c z)c$aZzKG#%_WODEdox>|ToSj86|Car<7;ceXZ6FZljolE4TQa6gT9Gv?4Z{-(&wA$ zCkKkXn~LAh|5xm@i>xK}0CJIsd=y~nlKhXE6?s5@{-gCTq&ccA86y+Y9;ZkDhM91_ z$t&svX%yig5^d)0lGoR#Jwz`-DQ5J^MLr~RyXq}6jc6`g&-R{BW_vjzpYsZ``_i6p zgglC&l)CDIwq{J-Mek2)XOqWlJB}OkJ-z!%;k5R|+1F-=YI+S$;3R5Mhk7)iHS)1H zw$?~*LNi)$9v84R!hfHP`pVnMXkKs!*@-U1(1&Q=L!2B%64O{4RF05Q|N7}t-)M#W zUc{dHH9P&|`@_X(pSyiW(fR@9Y=oQn`akD-KA01Jq!0A^|4T2~Z`L!4+5n66CCoP{ zmylZUEU_G=CEXSB^1J!ML}9FB1Dn{!4z6GqSe+~(kpcLs(X@AAl z|4DVEeuG(ZPB^8nJ&k#cbg}<2h6KhD&5N6essEj~%Kb!r0MYuf3-pN!eeKdNN7M%p z+5WR=eI@&t{gbB8W3JwL5ZV3}_C125IEJ1^;}^Carw>Rw>I0~xS0VbR&Jg0#j@H`E z%)hU*&q?=xL7Z!fx7de1OjbLl1}AV5wWvcq8Zc#_Y5UGN?#BNQ;@Su?*V)AW+jaj7 z?tjt!ljE!IpPUe$Bomv)_NCK=X0%`;$3FS{!g+dT{$sg$>GU@I;rz#Taoyfi2ib|9 zBi1t@&-(6TRqm_FeI0c@%Ar2-LpAP8c#w?S-rK?cz398Z00t44e~0AjVKf`7KaIF! z&s^an@I~|Qz4uY!B&HUOuP?H($=Oxy0(#0Ae|kqdq4Rg+^Tz1Mkia;ie}7D15{YBl z2GU94A{H?=QUUqis%PXj6*0vDP|Y=E%Hm8=94(ZA8~$>-y%|%uFdgJ z6!~^AiV;lOcS8F-iE%`0a`hvc+uw`fCf_dN7(|(Kl%oPia1_UI9F^$Zw7$Qw`TkPl z-^i?s-)W55*!A#*1EIh^Rj5V{PT(YJQHQBMp3l`AS4XCyWA^G(Lqky-yg z-`Ke}ME9{sDLg?= zYKtW{onJa}j3S8%aY;V)XtT z_m#(wL3kBI!o$LY1@4pVJ*W+iehgp`aSUM?YmQmR2GSoi4nS_>3U)DdUjE07ygnP{ z$!U2LY58IvBkRfv<;NJA`M)1MshpTtmj6@AmOk%|t#T%%jULMmS^v>9ASdGHn9R)Zee0tBCb(F*x&Yh=0G$t{FBz+q5yXsIxW8~4l;xqQQ`X&0u-9Vc%BdP3I zQifbomR#hE=9m;NVgZ@{gi-GcmxQPI@J(X|vux_Qlk9I}0BLf*Nd13a*@ZE-`5*cJ z*{54Qwt-3cb2(b(kVm{4x*>f{d+FO^nqO0R$)w+ zUV`B~*M>L-Q7Wzs<*2~R?T;hEXMKawx&`N+3!(L$uL{S+9Y-aq(A%m0*ADGp{9XM| zkJB^j^vG+~_Nl=MoJ9J8n?o&Ghk8t1lK(NYB7dq+=dQ?~NO#De7&$8cAOD?yUXA)+ zI4K0_KI&!YOjDEjzS{>(1iCOLm|q z*Vq8rg&4hmLAkc5JP_`~knphZpzTmh?Y;f=PS^I{(LtZxaI!&f<5nFEz zVS&7aMJ%E9C;SV3tsPHa!7A3U^}T&zJ<>;fk7V?3-%WBGJGg>fWc|Q5gO6YpoA1ts>#euhZT*q^v1&z3iof;xhKf_pCqnQR@I2-(UN?XR{}^<@YPbc5I(r zk^h%`13%+~E*!N<(?}uuS55D>_0!Og0SuP1Z83ylBwb^%<4RG6a#Y|5j$&$qe?Ml} z!m};>;@Qn9ws9KsN&g3^;(uPl|C~(NZ~TfhF+rbP<3Hb+7mi8iI4V(vo>BJq^!`vy zALw9zxA8NepFp&B{t)6AbdHna68ao!$vRAI@HHSCkfhJ(vzXOaanyY@ic9O8nAc}8 zCqC6>-TH&Z-_egh`v0M8ZgNaBT5ujw8K3F@N%3_No+@|$n7QQs5zR?RA^OMPJVr{~ zf0_Gty_x?b6uAFF_n+(j(cNeL1L<7z!}Y%P5%k2}KRUz@Y`eA%^B3+kzVKT8Vyo_d z$^EapfAO96No=YE$QUBu&ptAaXr0f@xOAQGISM09pRe)mBO8?7%ZI;@Uw{8W`5%Lb zBU6!IMUB?Q26~p?#hYq<$L_P#h6t$7^2|zOwqNuxZ1KKr$LX zenQ#xHRa*jUiI-`=Y-2YdOrJ#ZM(?&Pts8KY<-?jz$frq{SL~WD4phm+8O1xjc;b3 zQ5Hn=7jo^NY5&}v6Y}WS=P%^bqctgJP(Uxljr^}|8u_ZXJ713YA{<094xt34=w)N{ zts5U$V{2fLo>{MV$@l<9*x#cVLjvRKxFn)E{F4{0|4U}p2WZqjKxX~DVs&r6Iyg`L ze=H}IIZrt%a0ETa%|F9Y`ap~N569@oF{~~eLbP_-AS%UGp&B(ffs?339inH|jHtY< z7mmh^&Z3dtgx&@9|FUx+`q%H^uC$BvjTY!P$umFGKFw$~_xjxHx2E2Rd?2v4=E|2x&uUFv_#)oWWKjd_f0sQ)px zqpnx?j<2f!lj{Fb^*_?z%MPp3S;Gc4v5g&U{mfVx8TH}ql9%u1AMy9TS7g=`e%)`g zXOBBKa>eCgK3|%R+6VQn<;b;jo_+4O+4;5=pb!)4{UnO$-^luHcC7fN?4FgEvj2V7 zKV=^j*HvJR)FQS;g)%1pAG70Gzsepe`G@S`tbfSv&Hv}@zK(y+?$7#l_CV1;W{+n* zmz_NMpR?27`fc`EjAZ>Hd$ejRd+hjDcB1~5*~QX3gc6jZ4CSc65oFf?ebZk3iu(7T z+ZT?CJBH(^#OX5MZ-wudtQM}p2}HKR);INkDOXPlx4Or3pWDr@rS~XfBOl5-`aqp` zi+XwkGJe(7%Fjyoc7p%Ug{#?p?N_t=JFjLBkb|RFv*W3&*+YF-vxiaTm_{_A87(-E z3uwcXXE$B)QuYivTk%r%9GMbMlk6l2X-vlAFc^8D58iQ1R4CyQRnZkNW| zpZ_+ygS`Bi-)47`U5KF%af~8~X=LhO-_;px-MpKfsJfb+5`Pitw`iY{6HU(1cs2VH zeYWIk_6#!qd4s;mysO#i9LE$~&7MPd`%Br2_FKX-R?w4rDLXd(QuZo+VDqKyeyq{g zG3=g(7GBDZFTa#Mhz)Ux?W@_F;U*W73*C;$MJXa+TlfAC3 zugvqW@cfT?{zp82a;V<(M~-81k%xQ~pb$kkh^b@pyR>F%oQIr~W~$*@8!zf>9#Qs> zE-HV>#H#X#ObSOn;*(AMzmvP!#nL&1s1LS;?76g?9YZO7K>ZT=-j>tT#>a<+Bj4LN z8Li>@^7U_``F|Dmi;+DzLeI?qdkq`>kal>n@$K&_=*}kzTxGjW{i2R(%%q?89L?&7 zgJjE0bpqMO-cEL$x1L75Qk$v_xz`*-ZRcEa9-=*f2bGZ)!wdfj*X@5S8u`hC_?FC9uZa8pt!<5r29D151J zMRq;;_rzwh`wjV)JcPqIf=(R6an$A~gcIa@`^!!;ci|Kc`qnM-L+f1mqgVZWPFXl@ z{69wfH(fh{KK*xr_OD1AOGf8}mbmsb&fqN0;XHbg@GJSxSid>h*jiiOLbe+7w~>AG z@9D<>=d?}Q)B|MgZ1eA#=HKZG8W64j>%KTSB#Wo(jiiu5zTdyrW~Z_b#(V1zhA{Wo z<2a05N*}vKe|VQ?9l<^dqcH}1w5cT+%Ur&|yUjCBW==zNX7K*=))<_%*Fb-zG~fGU zANuzHHGBUO?_cTt$?>j9$0SU~6imf5Oh?mfV=bpe@)J;G;^(HX>j>ks%vb0F>VWG?spqr8`IUXkuS)4cm(Z9t8_Ke-Rl*uR3T zI;Wf?&o%idKp~1yj1rU~8v8dLQnpSgV`Mb;Z{-$^{oBtfo61?F1BlMvkM`(A`_m&G zKqDd@Kzxclcvz%tM!JCZmz=##HlR}cegAEwPm9g~iF5(2;%$rKW{z|LyVOH9!qrF5 zf;L1tfN1Q0h`Ic*x&w!qkD!V<(g8$gj7BZhDhbk$Gi8 zI8C0xS)4~N;&Zjn8QP;v`UC42m%KqA@KkC@__cltQjm(l7=mFKfl(NZF&K++NW*xf zV-k9f(*M0~Oz#|^$?V-jok2oQ#WYMu?J0Xf$n5Nk!c1}&-rN5-hdDZXvTy(2jzM8A zd&ekwbG$lWoV-ewo-zJ2hT3PGT259(V{5YSoILWJcU)v%KkL**DuRo`Y;nxPl1=90 z>>2Gl|8KH!_Eh6+vK4L2?PSLS>jP#Q|DHGgHJ+_4G5&8e{w1S*_KgJ>h757_c*g}~ zCbE!&T;!o$`sb4cC`1vut)nO=yPP?(>y)uFa~Ud8gFTVkIrm|X_7vaRicQS0 z2T+T;vz`kLXhf}NJ0YIF^}kKlqcBH)k&dK`x#hUHafy*;+# z8I3U*i-uvs=v$0qZt+|Pk;Xh8(Lc4@`M2?p{@DORO!sUR!)|KI-qHC;S2FcY&7{R=Q&XdKvAE~2$Z>VLey|L3H#rEm5A z`H$YOrp^0jc~`vu|3&?_=={go;+%uI==r(wu05Q`oDtcL|Jz@u|9{>8E7-e-(?^lf zcql8fJA*Mge>jIZ7kS9Xd*=@qFu!;Ha3OOMqBg1hm}eX!{Sf{0bu#y2l%NcItko;A z{;!g`@BII>*6=acqe@#8t)Z_JrULP4>c1K4zgct<`V7rcxOwmFzi(si+yD2F`umLj zfws35(YZbC$Bh3wo$DjaVH`mxj^Q|Z?nw$K$gW%H_{r||N#;THWpEm2a2C_fCq;J^HTx5&DB*9^fh zjKDva|55Bsx!xIl^Z)t~tlwO-QvDrm&=~g|i*ZPc?k`L;4!Wjg@H=&S+j!UL+eZD{ zYW083B>n%%k*-1ipRAv%|4%jwH(ng+n1txRy_3oJ{;x5Gc`Bx%@BbX?gVrKtFGm>^ zwu6lJ_)h0P10~A!Omf$8XVQ|hv7fo>7TgX=Ja2wgq-a%ID>pB-l{Ud*xYpiRw z2C&2$onmVM5pPqLQsm!Z^ts~8NBbkzLtbh>|5pEB|Hg=LTwPkoy(sc)9Uh9w5|p9# zyn2MJK|Pugo$U~v@!I~9IdN%!aDnp1{Sf-v-*k4g3e)w?e&I0p?*8_>kUd93=p>Kf zc;s%s=LxdwrwQRC*|+}U&EX;X|6dpTE_K@}@-)uiEcVD#B`2IG#awZD^Vd#0k`N}k}PN3$C2=#+6kl}6{RU7oloNo{1GEV`4DH`r#k!I{QiDap?dI0 z>&r%l1N5i0M@FjOM~3=CBjxXr^nW8m)8Ub!nft-x(xS}zfM)9h(9YaZ>>7LY2fNP@ z48sUC91};0J?P9W(Y5ECzt21d9q!XUUL5J-5^gNFaY(~>q@(N6kzo?qT{k>TCZ}L3 zdcue>jhv1dn2B9!SB6>SY|O!2>@lxiGTOQX=5q7w`!eW>ON{~9t4gd5Xf}7=W)5Ar z_*7{(UH(Ds5qo5Xsh6$|%$dwfcIYQ1$phMimP+5e-nW(b>- zvSF0+?_R~?C_x#be@InE_Cn>FxrVtO&Df``mn*v!=Y;8fXKYZ#-p20`4kPOS9w9q% z49C%@FO!S0&llt_3r`gZoEP7tG4xT)ZUUa{04g7E3$^Xe@3Q{o` zLr^=*+&(!1qc9rp?f)6W9F6%8Vl4AGbabg3)Cup+e^)nqPaH^*KByZeeUhaQ8l_be z+4q07xzcyG^qnPr@!tN*oGCXtgd1*PNYFUG6)jz~XLoREIl&s>G* z%$~0Jm0_x|-L3vb;EaW7ky*ZJzc?w(V4jJdLGmd%8*?x>a(nVh>)-7Ei`w8x>N0ia zJnrx9|H)u3pXuJ{`~S~fboO2HR4^Te?L znaDy8a*>C8G-)H7E46+4%pM_74Y8i~srmfYF(N1>z}05sJ}} zV*Hz64A0y$MBFH2u0+QIZR+Z;J9iL;lq6v@Vc8(JmkN{r~Uq|BL=lxFO;gh7s6hZP_TY zgxrJC%&SNJA#n`3FIE0cmVZaczvO=QDsrPawGFe)txX;r))##+tef}2uy*nM4_oJ49=1#$W{%Ez7Bj94naR{x<;mvF7Ug%d*iErUYYk&lMbLmv(0_Nndb zx}E;wR&~@zLe=@Z!~TT3Lh&%V%P8Kt7y7?iG%g$vb~|5jZ_dE5r*MGpZe9L;{)3kC zTH<14kNZPn&X7yPlJS>?rJa|B>>a5g-Sx{yUmRAXUJ_OoT@vzAhlf?eMuyee!c$S5 z75w9H?$EGq$`xUK<`tnJ*?-Vx^a~rO^b3<*I~kiNUl6v8weM@1`9SYHgy|NYg$>@A1s2M*B>pdB4Z6Q(4(2F+;0AsogL zlukB3pJo4n>&sDr%F*^8xJMOPJZ@b*8Zbmg#8CY>^~5$IgNe-Ey?cB zz3r^J6&=_$>%!3KKH1MDgk$7!?8%W&xSwDy&ozc-KgryMsyzAwRH6c>xShrsoW(hu zM=uh(lvn)o`soz*R18LYo%**@`!B!m!+&Z2Q}zGli6wD;e|n!`!jHfxjK&y@MN^J? zF7Ms_KSlb#>}~ArWJlY({eNxVfm8Y?+P%6n`oAajfAtL-`A6f&apFnCc%-8tNB$`= zre==T+(l>KPiCHizW?8ip@T!T2dMpMQrM-8P31QYdkz^7kTXznOxx3|-q1$RWS@oo zbLs81$I;%OO6F4Wlwsd^{eM(2S7LvW{(nITvt2U>b1@GYSX8FWNVhne3cY`&_b<`^ zpX>cIynm+mCp*}yy>HDq?~mHU-ru{|d+&yN?{7TN^^`uiadp(s?!L=7Tl|@5zceXi zk=YfokV`Ioihh1ja>!%OM|4(C&;7>VXRSSAFGLZF5&eU=zKaf$IXV;W;2htWIeSM^ z=oqh@&rnB4b+vv(W#qmxDbz$V+~GRq0Btygeez$F2TPlkgBp3Sm;RftF&7uxpmQD6KU4jiDwMPVjLP~=zmPJcAB}RSQ|doJOFb#I?D7v zxV4e3_0Ij_HW@u{B!?;FR7}Hk%s@O{e!5|Z7{W0+0u62}~}Pyct1 zbE28|oY40<=Bzj7a%sG;lb)Y>0V3Ufq{FWyBV9!%x7s%4fy_aJHa(ZjLp}4s>G}mT z%}22p;61%SbOwF&AB@s@`lr~3a{MpSh%6ar{DH&FN6?Ar-vH4+0xI-7qI0|7 zJD;~_sQ&K|X{@YAy7*(lp1_j7+@;>Mwt%?{r*InG-{=>H$bVg5wfC0&|EB*tE6h2Z zM=uh(i-Y=Ki{Qss=CMCzQN*&{{1tA z%pRH^hLIyM3ZpRwV=)eC7>^!%71GJ9Oz%A}gh|ZN8kZ^L)W~1E(f*LL7uF<%Y3$w3 z5S&iVz)URprSy5zp82BmaNK)M<~9qnvFCU?{a<>R!(4ve-t4pKVJ`DLR1MRw8hm%C z6s7_h+-i&O4hzUkG*tSBIhlj{8hibx*q4r6_B^yRcc2w*D4l+HD4Tt^H41l!a* z+;iNc+5UIryQTnzC_*txP=-o0O}GDjrv2|^%Utnhh&R)j4P*y<^%48uao~jg@2F$0 zM}shp^CtG?on2iMWoBgvd(H?i-gf$ylC#mie9wjKSBwj}?`N(;1u9W{@~+U#y>ESi z`I~ zyT5(>-?%$uh;solk%b)OA`kg!SARw4g7j>?I~20_o&PgO8qQJ=kVV|yJO8Jc`Tg^M zo=*0ON=uACwPk(h-!yAem@Bbg8&rh?ag?A8m8d~In$d=)dFKDo z_kX{W#{Y+n|LN4*$&Oy*e{t1l;|}0|oPVqRou>UobpFjD@f^kxbfRI5_E#HsjJai= z_IIxS2lEMZ7>l=So7-k+e`o1`aEs2r=^{^IkFcl7Gbow;PT!?mJ9(D<9QF_KUdQw^ zMtjc@7s#~IPWut%zw`Wik#I^q{E~fM2$6+ziabEX>9n%tg~& z<=;5rpfO|%TAABY)&1j?e^j4P{&7IK+S$s#wyK_NK%+J&t1Z!oH1}>UC(?mOXVcFU zZ@co>pbcKYoQW1~??FU!d#yEe|7Gd{GK=4yIr_TvP}#<>yYloWxh?q%eM+=n<1Af) z{&1!Ka0MCF1<{#cxx(Zj9|b5x5xS*yG1;ZwDCXu~1Yo;9~mj`rP- zkez5atzXfV7>+TwSO;_v$C*!{qd?yTZ6*3DC>dWcO}oBMcf6Qke&12%@>qVdMym!f3qr|F1F3 z<@MeTW0}X{y>p8zOS~)IU;jSEIAex!#T?@b>@PIVz$wp?=DP7n$0SU~6ih{vF-UW- zaR%99oYJ@c-*~wlee3^E8<*gKFtulmOO6|toHR}$8#}oja)y7@f2&u2VVXFjy#e*c zIPJz{En~%v>@O#V>HIo|aUX2{d$fCvu}6U0F85lpOgVK%MYMK)CbwBADKU1Rd}nS1milq|9=JMO6*6p*CBpV+Ps_p+JrxZ z?7Nh2=QvrX)hEDM; zF*hFl|Kk|*9{(gM;eMRCyny~E&l!QtCsB2t{>MI%${hM1baAUKQugmxuI@<+4bv0D zDQ>6HcYeq`;{xjd&aj_F`#9qQv`$k7QJrs%P@%O!#p)o`9aDDEfao7xP3Md+&MU*6 z;yLWPlddD%**nN{;yjODB>dhtdS+NiCR30h@0N~IzLmS^46OI{muHP1P=!?C24e_@ zq31#8O_8H88r|6;j3N8_A7tP9J$1n%eeH4VyFB}NvTyyp{e9`od#0%m>#cueF4zCr zmqCBRJOx#0>J~(6528NMRBqES9WyW!voISy`aJF5NHRa*OilLgON|f7dC0&5WFiYW zh}L)JlJ)w0(f^(EnDbG9XbpKwj{H$1f24^wUH%};lFdQ;H%hs7ai8|EGSWdK`u9(? zS3J$NQQH{pIf};ih3rKrMhTiH^$XD)K;Qm4`p&-nb#$Na?XQde{TKZsIQmEKfduWk zK1N-Qx$jDI;N-4+d;eFOpSWK6)&DCK=N@57_*F8OAMsqh`Vh?Z=+i&UA*7 zbz5>cM|S(C<9V_d39lLlV%G@&-y8c*A0kSh6mENtdwzY6ROa%tp8vEl9&`5f+Vn%x z^`z(T^8DQYxBCx#i~ZuQBHLf5`*UsI{>Q82P3BP;jWKA-lm@=x!BpRXY?a1sDXtsu z`~g(!57lHyd*N!yx*5K~WNGjFHj<_J%15C*TC9Aa;+VX7QXW12&ivF^_ZWvXblhY= zESZi;n2aXny?LhcX*}ODTlqAmZ$mpe&^N!9r~S*({?Yf}BeD<_6{Ca8zepKV1IO#%5?+YurQI+(EWCTmLsF7G?-L6SFWIdqyaK!<;$D z+_!!?-I`wJd8ksCL}kB{jMgugY6GJ)$oI8*f3l*|`zsgwm5-{ZoZK`#WVmhtGLeN` zEd8qW0mDaxCS|HQPydT-QMOu{-=E)IK>svN{|oP*|9Dpa3w{4zKV=R7FV!X5y)636 zIP%1uj{@{O=UfM}i*BHZ?7qVu0qX;bBR6@ege*hfdOvBCea~=xJkQUq2KDHe?H$l| z+_T}m{Xot9+OW$%#B<0)IE*9MV@|xJWJKs>E+1n48^@TBqpJSOupgDl=D%@*+evic z6#jYs?KJxtoJGC*@;jFcS0{~ssSm{dG<&J}|FR+fw7y`R^#$hk_ghm?HEJaN$o~kx z@PE2~F?-W7`ri}uzjWL!Xw9epMLRlBEsh#-9zZSXQmie&d;iz#i{F24dL9}PBt=*n z-J{h#f9e^>m>(GJe`op5z8W$j>_|0V6~8j95T?78`8j#|xV`$?=jfeQzk6+ia|5C? z{wrR#CQe!%EMfP)>-cx%*}soAZmrlg#GLPY_Zu9->N%sso^rdX`0@TQa*NI?*mCmHu=&)GP?&I8*wl4#*gkJ$*tTF;C^|nZY)!Z#tUEO}tQ~xF z$e%kVtii_fgTscigF`{iP4=#hwHAO5d_5i7*$;)qc{haYf*V3k*G*x`h!2OQ3qBkw z+;@xTSU&ycuwu*y{P)a$xM7!sRc9{^t4DmW@4dG3UpxJ>ur7aSSU>ObP;mb8u;Kjh zurbs53@;~!4Fk=8P4}$6(^mO($sla^L!)}P z?>v@M>cb+sUoz*RA10OyxAblQT1*%aYMCuZjW?1jPYnuryA#6EH)7#AQ(Zqq?xn&F z8xU3u3gIfoO z4>^}%^tZ!NXR!T|Uk}c~Fe?5x9F51qdiw|d&b;x`^+PwW(ubYsj|K$`MH&5VsvV-aPEa z*uRr4GcJu?-Cy19877jtr13A3<5h*`5)@z@O2gk~$T^fu2Pt-Wdd$gv-I`2x0dHnc3L-z_);RpQw16_C(NBCcarebBm zw`_!_BmQ%Kx~B1y$-XJKgURZsLh6f_A&h8CR$_uCK9H zw88$*Mq%sDIfLemeWB_4j)(k5k1R`bZe*u5Cj2VM%EQTFC-+^+$zg|k6py#AU_^4* z%f6c|8REPf&#=Yycazhx?PPM;J}${Td9;r|Ied#-E_Qq7k6s<(zxED0nAiGtXK@be zeDh@AKLxhq#y7GbP!_+%ZxuG91eFo|5-er6tv+u0IDRzg)>votEwQ6lkuQ@MGt;%j3^v+s2Mo&34Ca<35PFYf>H9kI?Q?}(Y+j{jNuJT8x~Lg!UCxc0`_ zK5ie8UPpI(-W|f<@uAp%B(dKyLH-fn#p7e0kKP?SDlyyB%>|yhDfSEgE$Y;M(x^H8 z>ex@j;n&dd67GA%eSmyIzR?}=zh`3*=BhUTvn%{?tg>WK_&$62;j3eb{Ga204f#`& zS}Z<>c_wxx42|vJcDZ;*VEfELVcWDp)`tuV*RgMzF)DU|tV1JOa0969;xAy`*bl{i zK|bMIe+#RV2ZdGh)YG#Eg=g4TOuj1inCH(;yE*n1@@rVg{oCYxP<6#GkSAH>`VLLAaXRQES*Xypa-`RAg(umlAH^wwC?qU)14s-02)Y9EBUM-02rgh|Kif zP5K{qGk*+I@F+@G_77#RJRKfqE`KH|^o4W%Jocm7?F_PJ{nMdIAEb#M<>=e`EuMWr z58I~G=rn-E)_#!vkDr!NXQ(fy8k z2kB7|nP0Lt=#=#rV?5V5&*j|<)*}N^iujgZnf5T)LGwgG#Fo`SpR|O|KUw4 z%cczXrK4yab5S@UY`elA#j*6%h2iL{;#Ze9HAii|`gON*8NWK#`HV8RUO7)16YG3k znfbDCbSq7Vvw#y>87kNW7@yjyc5+1toN*gogZ*h#W0a*xvCIqxhj4znML z+)i2VOCEdI+;rNr;^@&^W2Yjw(af*j8fzLA!fEz1k()G)zHOA&jZ;&NBG$bi3J~BZ6i~jbc zby3F$gr%MA56b`g7R#Nty&|=LSUI?V$a7xzs`~3yIi|N3YT@%pMSm^_6y#M_aWEHA&vr z{4d!=pHP~W6t1{NTUY3e@R|CHo};oXDU9S+mDb<-!v3M=Y*P5Ca$TF(Kh%x6K2|?! zfc32dw3h=y)2M-=d366kB^+cA$e)sI{a12WH!C@;M*;JO*~ww!;N-9= zEqedtu$kPFnjE$cu@+9-w~gGM;{D4~LUFHl@|<=u!TW0)ON84)?sZ)$S;oGPEFY#_ z%}w$C(fQ2&1>BnwsuNN|O{VjeGqlsLubY!%?WA^E_(s<^k`8FgiP12>{1T$Qo`Ei8)NGxT^CzZcw;R8@W8OaxgQ%du8XZdoa)@sYh#<)x16{k zwuvlU&@ZfU?Vqu=cVO7z{dS`irg`Gkup}(rgjReHTm7fLcF4j|Iyfaf$NoHy;-^^j z%V)#S$zS3nyo^`yDs=VZZ{SV*1#iP}A)bW0w1b~jN9>eFJI*~6D?ag1Z0G5RV!O^| zgxzN{oP9e#>?xQZ_O{KpK6rj8JM&D~cj%c=J}T3ipG@;+nPLBs%utn@8LBff<-bh% zFVp_bXG7i0XG8teXF~&XyM&xRJVm0w%hv-%v*hK{lRYpyM?@EpG) ztH`>!3+?@~K1*Aca*cE-ek9g5?x}DQ^9$pQ6DG(vH|X!;C-@D1k3ZpWNW4+M7hl67 zti?_o!1wW6{0@J>l{bZWeEPzWHG5%LABP;~Cuu&wvmu$|n&UOeR4uycg6K<;KQ8UJk9Gv?W_7p1N#oBeFqH}_fVBc8QB z;#p(=XTyH>Dze&jHKU&m2gW`dYRS5E=}$Jy_)cg%o)MaQ7lh{18R6jhjL>poL1<-e z{ILGS{7`UuzH%@>Y&#dfAW9NU#LKkObgKa`A>|8hSU z+gtd#SZVX;Vr3cgtzrL8__*)&FZd$<75|3iScN^&NH>($M8}7h8 zxF2ipc_utUK8bw&xYx;Wi*kr;EJOLvp9!U}J`>7bekN>X-+@w8V`udW5YihVV?rl?=YuQdKG=@-_y zW<6Oj-G4A<8vl};=0@Y+1moX?u!Y>pUc`MH|Lx=sW9#C2I<~gxEQ*A%o4w??@$bom zu$L@7V~*mS_PN*kSh9k>lKX!CRb;jBHTCqXuCFEQ*z2d$ug-jDe!FjeJcE8VlkSu3 zJO6-xJO2)H`$wb){)D&iH~a(rZVmB)7>PS^FBW4dR%0EuU^{+*AEFz-$7|?8;%y;* z9d5+U_$=08BO38T`~*F?Bu%@7FX7u*h^1JKb=Zh1)Z!<25x+qnM?E{r z(SbkVE&K!93I~N7Zx8WXaVO%H_TN(oEHW@!OlGs^9Jlu7q`on^?2NPb&LxKBy@_E3 zxspAP`zrpc$u+{~)5WjFI;{8amV)UQ`u_UH@nUW|$w%G(SqVvbS+>=ifoL&PfVw8A;YOC4~;MX>w9%o|+U6k}V68 ztQSqPUNk8*kd4Ark4OqNsY&5La+39=Nugpo{pHN0u%E0Frj#sW-$#~X7O;z5=b?hrgE01H9M{;$#@|vu? zrYf)G3ifR7IsBK9OWEU-m3P-KA{VnSlm`}L1y*AnI`9Mh2){u${(*jX$XB=-pTT2z z98X{iwxbF!;5X<&zwz2*T!PDR1+K%5xD}tlXYnwez_;-l4CS*RLbjvzsyx_lo;oeU@)&i{|6TYqcd$83P-O zH*;U~Q)@&v>5N%ll;tdiMXy{C7B5Q**+0J`v?rv==mo3bN62vxmiEMzk9f|z9M;*Gch9hfVBW*&nw2(k<3mECnEVvYic8@W8Fn| zKW{(FOV;h}NDAGrCWd>X{8DNSKon+oQg|_V4so51!s_4mh`;Bq=uAB3`>YG#eir9& z9zBmFh2BVhL45IdNRW2jn}mNdDI_zeAQgi#96gT;C+)6cZVTFf+|GP0zC9+yFC491 zM;%^9_il0BN8j_I5Ra#PDz@Z)?a>)&C0*K|k#>W%StI0$FX-dk!#u_}|Asc>(O6hv z4rbeUenW-rPsbr03#a+!?5Z<`~|I(Fd~ zZ8Fwf&rQDB{gi9I%G|6?WG*x&t;&vtn>}X~XW#RAru*0n^R$g?-hJM)+PrGv$@T~7 zY`I5!I0|#Ap$4 z(KB=Zs`u@AgM0G3&z*Wf_$IeSPrANBxb@Cpq*&~JgO21u;ZFAp+dJ8Bqo4XhENnf} zFKjz(z2)n|M(M*GrBBau;&#r3U9J%_j!8Dw_vh5=;vEAsU* z@fbW7o)SJvLuI1pd)`f5620S7QF;zCXMHiQ7v4Cn+kmxuxG-bi2@^fzB5AoTouB6_ zq~BWZy7uV(-^Px2;@sBjJTy)=I+@;;3M_jMoT9o9PSFD|j?k^4d z>Nn%V!rAWaxeAZyUlz&xUv{l}d3zT3_pUGCUMCHu>1O8W+bycFE@AQwVbPoVKF@pS zzeuZF%)5!dTYZ-;O#2E4%r(mb;a;y34is%r}xZV<7Xb1c0aNp zv4!lw5AZ|0|9{UD+J9urOF7XTbwW~DIwCnN8%_V2M*mNjwt`-7B|Tf-?BuYDd9}Q@ zrtE*y{!cgOJu^}NH&OrBx5-G<|CI;kC5CAKXA!nx`_#m+qu$zrHs?)}yR2v1eaKnW z*u%WH*ZNeltn-~StjbSVvvS1Rl{9Nu(&@j+YH`#|w|;J>^>bt$dwmA|cc%3tmv2V+xQNi#zHK{Qmnvgti!>)<6$GY1>5n2xW0{D%!`zly<|E1aT|z>@WvhbFfaWq zb_sL$L;oJTjJyIPaSh7plm2zA{wDcD)MA%$T?^T>`{md!vis7dv92|Ligm3ligmAf zi!5hvh;@h0CU(bn#;$SCGver}`&432Yi+FO+7+?+?EQ89dam6V%kSSWT*$2G#V?0y zApRlTgpWWq6u%qy;gfg}|AN~R&B3E9{*-%N8SB3EYq742FNt;SxG1LTw{G#mSkFxp zV;_&gJa}iU=e9dz-FICb(-n*NTt70_Gw1`c|Hgcy=erq?Fn@4$#~<-J;?ta0hecS7?0ME? zWSW1=xF9T@dqG$>-G6ZOPnK62&lJ-?AP=jcij05Qb9|p%>))R1{O5DM_PfBO@P=Od zugOj9g~$7a&0{a6hwT@(vKMKSx8-T;$6Oi;N7GSY3pS$&TXCIt*hc0bx*)7MdqG%t z{DQFdr2P=-`uNz$Zv%4mXO>|FmLm@;mw z%vdpSD1IwGhEL%k%*3KWiQy~cT5Q2?bl?APiF?Ve2mUQ_AL*YviIwET z6X;DIP$x~7$G)mgm}gA*U1d5?{juFxvDEV&w2tIC>i|2gq5Fk3M9xk4Sc`k^*FKb} zvy|BlbiaGa*YIh&unX~d&-+{Bv`duljm9ufu;0MGoh;`z@{SPyitE;L>qkFy`*{BB z2T|_&Bi{KJ{Jw(8%;WJl*L^4oGv=qET%3#e{Yv^&dzV@?pzG%o6T2R{JhA(wn-l&2 zE3xa+KPGnF^Fm_xJ%7Lt5}SoNg`THgOYHf=9}`XE#$Su<_x)#L&!*9dU3DuGyYAYU z*!|4T#O_VZw@eCM4^||04_|0Z^zrcDg*_XEzvF3r=1JjA=C>mI(dCIfS1nD9*AER@ zM~0gJ8yFUk9cbR;Lh~Nh(_?Ax(6H>JHSUF%hvlcQ2rI^p2rCa?5%SJn5mseh99DN- z9@cQrpX|KzsTYNH^?yjpU|G|K}q&vu_d4))^OtB5a#-QP_?h%@>7Y>^yQ& z*oEEo14Buhvl+;}$DPlA#ys#j`~Q3G|0gThE4lAKX8%8`YwZ8W0o0;yp8fx5m}CAI zO%{7Jk51nZ(RD( zFpV53FWoE72Qd?0!!yXm7F41QKf_D-1Kvc!XwQk^$iOqW0ZR}ctPhMuSd8p(^nX}_ zrC2tY{twHs0xRdy|6vtYW6fatKdjYXS~rCLFV&g1>g)~b?2Y6mb@==1pE53>|Fixn z&A2VyxQ*P=>74W9)<3CRcagi#>i=L5_M-HdI_~_t`?!pcDvXILvA-4~|bE>@AH8AWxFK>>I)+qE3%{t0W`KivfGOtdyHJfL5 z-R;u0ovb;L8mf<_TK}FJ4v_Wy>$o?vH;{J=_i4;R6TXj)*o$8v>r|==AXWR}j1uG^ z8%youUov*6vXvS(iE|6N8AVf4!&Y+pyj0_c)R1p4#~S|Y$hEoVB+2!Kss4$O8aA;1 zPX73i`>eyy@Cq)JM{mXb_%nMTKY)?A9S`A)cm@yR8+Z~+uojz9hF7o?O{l~V@GHEE ze%GmwF$&YM31=d+eEt|7$Jg;^VQ=GpA3lvSn1MOS#(ZqW^Y{rWa01;ph%1$)i*c6u zM)EIYj2w@T<3W55U&d3&!Ak5#Iht?`zs4E#^GugwB*tI@CSeACpicN*k~osh$>Nd= z+y@UrmWyX&3%Z3fe2)(i_IlijPh%FI#8PZV5XVKh5@T@}zS5um6uKqxfAmvlrHJp2 zOTtIVvD|OLZMc@-X>HJp=8S%)&3T^uZ`!FFgzI_bl8{3FR@VBNuuCvbc+>vzx5b;v zz7b=E8HC>n_ecCEx2+f=TrN4C{D<&mMA)HC&FP zC_xXd#7|I$zaqG9g?RIrXOM}whueL40ON55{x0sfgt>+JBK(2R<<1y~l!%v$;x^_-Abiiy@jR}&B)l#T zzvqb$7T=}#CG*d5EptD9Un4Dzh%X>-BX36~vt=ytR&@XLV*mI>DV`b}x*s5K!K--t z;?R?gO}GhHAqoBQO+;ZgdFCj51pn@5*zdt3h+pDaL3i8}nfdkHhifquuU{-v-xad# zRbSM5S6EDDv*!#+4@*X*ho$5)|4+|NOE>;a4=c!(?0MW*@n21@nd{&08R=ndW_nmh zu4gaczJdQna+C0dY3X6J>$i|w*^9Vu@y^{NW{#9i4T>p2^@PGGA|92&d&{KQa*&CFY{m0uCIF>l9?^Akfcb~5k6Zhj@$!@L)z<0pnP z>|-uR1;0w{XRbo^0_PRr0CO$s_|>C66U2?w!rtta^@9S$uAG9m{()Xc;6p=?f=vZ{Ab4)X{Pp{+{nHuL;J7Y-Ary_ z-^#s+|2A?v`;L0;f1CE7+|{Z5KW^{T$qPcXhiWf-={ftOd$s>$IeP{7O8)!FD)#Dn z`}9j1 z_gv{s?)I%qCi@;!eGhU6dvUVw{8ubMFWn$u+LY$6Bnz57a@whUHB0KVgCTX|($C z88RCWGw&dOjPKz&b=ynYrl++$BWuGYV~-Euva4u_v7>UH)LTw z{tCXYjj0bWY?* z_5M<0ufJ>ms>sKU-F{@;_wUAH=f`SiZ%GR`kmvA*Bp8`E)&;{#)ONtsTYZ}1)p{86Z{^I+>H-=+-I}K$~T`uxGyC2kl zcMiawN6g=c{-N|y>nqm`4Ew%1B$R*SvQS|zv+~KoVgEB92vu~~_qu+sXL`%C?ek2T zQQCOFJH6i%xJKNcLSvK;-oKIgSJL7a*z4Nc*}v}HI{1G_Jj12Wao2u>d!sP%Q^r4MjDN=792R#O{~!lTu(a!DuommEey}+JY`{ir8b^1I&DesigUu~r8@6M|S?iy%6VYD&-Gl92 z#2)NL-}#~A-rc`UMtc@3u^&~ao=1m{1E@va)T_0B<`L|HY$Th;`afHm`9FIgTgd3l zmgwyCc66Yi`NFW?8s?2htl1#*+1HTk$}Y5~!&;4V)@;l+o*=iO@Vqr0XRYNpZA}N6 zlWcE3mb!k4J?mM;sS4{=VR4D^3i5=IaWCY*ncO1$)_U_FuHQy(XWud1{70er4`cCN*ga4E zfj!uZ(rL!%*q1RVlnYxi)%-`jZ`tOXlGUB^%yD_;q&!2`vDcrImwM$XvWdN!`$7IK zWGj1HJ^hdCJK8P^pOGeC$J5ev3%)Hq|Aa)}W+Em?uhr7-OWb~q-O}tW^~M*uPnU)n z*e#7;knX=0&r;vzI^lj7eOK~w-{m9H|89&F=3#tZxJKMA-YeYmQ||K@@%}*gPm8xu zoD+rX@g2Xwy#gP1{g3$no&WE|nIp_k+~aEQParXhTbw__^B@_pOxoHgmeIQMqS7SmvA3Wk+XU?G?u!ag~s}#l4H%$zDwEnB@#X&#)hro~Z)m z%==K*e66!jtVx&l-O+cJhPO+b7o^<_zRO#iE^8fw>V)86)X3wdrFdtw_(3k zRi>7x^RiM+)?o2rYeBHoGnb<nc2m zZ(;|UP>NQ(jfdpJNAcAtKjMS(;|=(_{PY;s;TrGO6G{2&39LXBqJOz$Av#lbF|wPb z8I~|F#WH@m$A^dI(r*R1Qokb)tFRht^34&B9d2&W{G+hzbFBr-`AFEXU})Gl?((on znn&xfHV^Sl`E5Ptn->{Rh;MtIxw*5$L-9OwbJ&I5`Px71IWs)$Eztf|YX9oBe{J$V zS;5|SK3Jjrk80;m*PN06kE=WNS?ds;ebs z@kjg)!!FZC;cv`Eofm|^klQJh|4uqeB0d29u)dH!2OCb)=OC3^5-!2I+zZ0RZO->Ha63L<vo#lCFdtUB)xtC7&KGW#GP|jR2h5lGWUY>A#LB|MFX%@?J0R zHIR*D(`n`Z%sXSH7WuHXOZh*gPsJ@dA1uolF>V~V7>k&5v<=xQ{(ob9yTsTrm)kOa zE3lk7Z=Nx5hOuv^G4KLo-(+K8=LD?7TIK@h?yNWV+$d}`1}+rGCUI>sX5MTJS~SWS z*tmH+_ifyZPZ$HAH1<6fo%?U>JLj{p-E%)1+p7(Z&Jr){H3mNKY##P~?9tgo(LXe* z#1)+-S~KRevFfp(jXmU>-GgcP92UyQzgGrsznD%1*W((D#YZp<4-1#Q^}_JFaA%Rs z?Mi$RkKqaAU_A=48_uMQAH#`d55~@s*9|r{#BI12{|yi0OIU<5*v}R}j$h$781@0z z;0`>9|A484xL=CL@5VIzE1tvx>;M&hybY#q;-_)%rSxf-iW!)Vui;yG7MoCn6JK~R zRzmJW73$E8b{xZYhH3N!GwkiKXQLJen4>f8s`*uowijfSasC*4L&n-8l4`F= z$~$N3mC{S}=>^8wQ<6?kFrHpO*qyHF(-Uxy3zOxVMdadh7lmv(j2uL|jHOtHTr9^5 ztVAAG3BTI2uOahYzm{A#f-Z$DVBbJ)bk9v>A^T=>3;(TT5&JfBdoNuDS&g=$V&G8WL2g#AZyqUkhT2l$a?k$veElCNsDG2942ki%G`!_ zbw$+v$Cab3$;$pzbpV+?QyoAqVP8rv%Tx!D%h^|uEBWVdc1eOaZ{ze?uF__iQIj2G~xvYV`|E@IZ*j;}xgwxbNS_?vo!iZea{Pa}@ycnxo0ggD0IZrqQD zm5EElHx##EFDmdH{w%JGT(cTi3wzSNCkvm(Zw`)ND)+B(FF_M7Q!cN;%~+?NFg+N5 z5icQ;TYo$u+>>a-B5N5IBYtLJ$Rd}_ND51lJvPZaP*PZ~O zNnz#Gq>xubS8$xpV77gLnREu)<_!z1yPGyJtUpAjz`S{`{mzczJo)bV zKN;_?f6Ez*(#iQuWERK2z3+mKqkkiKU>=s?I@(JHJPuC+%N3b<&;YG)54;TzbM64` zvlGVPC3p?~3sw=&M<5qofGA`T=5Jst`~w_>N8ki(B`!BI3%XzcZpZBb_!n?N5d`5y zn1+9cKfrtN7238F3Sb@giNA%vg5PoccgS170r^k|hu~@OLl3Y-lX-x9%Gcn1>_Yw; zxd7K<-wY%0eRLazeyCc1fi;f{%=KKLzjr}p8!o66D%M=!yK@&<>%YJ^FD|gwe?iSq z2er62AnU=8zqU&+FpqaZfmIje;auzT1)hZ$T`=S3cu^ z=&57;553s?p#Od5zhDsi5Dc&Skmvu08etA$6vB-2BM^lc#5-5^7$=Ft{6_c3xhDSt zs^C#5hO$fkT)VmUUV}>bBG=Yxu7N$!K>e3M0#Y28ac%uM_E+%p681N68-fq9kHHhf z*9$+u?H@V*4rO%*alXSbpApRbA#Qi$b{mx9w#d0Z;D4BN{FGScZ#n)Qd=IyeA_J@` zU$KSf9JDfgo8*}rLJn{og48DFUtxji=YL_YzyWvR9)!=pZLkHdfxiR$`)}N*9LKmX z9N<3T|ASn0e^Aa5?i-J-(hd080k^=L#Q8RyBu(CH%=G>l{c?B%9A-n8O~FNnbg%P%6T>9UQ3x=&AkRHzz;9M^rzTk2ET*iPtxbv z#PbcJ5Q8`*APK2sVZQ%4pn;t{9~mc`-})K8mgnQ0gOtx8-v>hRNQ8b5YZkaWA)9Ar zA!$DixfzWA*D?Nozh4FK^{epRe%}A>m-`*oH0CaT|A0PC8I*JW2J%;#Utv`(udJz< zwUt`>NOh-&RDW(r4cHsYF1~+&Y}v>7Kf?GQyqktr_P>;^?9mVG9n=ou>qztbr!Uck z?EZlKQAW8?3VePhv%iYxe-rv43ph64R{kGc|07(8qamLE)Q|l=O#H;~6X!T_l=69= z@vB{KC6)Pc>_Kxzs_eB)88-g^BxrE-5HGe zPx3s5(pmaJ=XhRE^SnOJ^BSrqc#oa_LIeGT+IM-LLoCK@S9ev z^;+JGhrq6t?;f@7=DqoyeD~;5^KELrgT(W_6M9$keL>uM)~r?#2A`)ta*Xd2K1+WD zMu=~e_=X=_tq}1=*Yn+@4SWX)BE*vIGk;z~gFANak|%{;FuGYk1Xd;n#Yti|&| z&6c0=PTiw?v+A=S(d^5u*2_P0@$C(l^*{0d-V)Ez+Sl}7$cF#xHC;ds+;mQtEb~~Y zY_$d(f6uoL`Hqb_&Th4GuVby07kd6X$3<%yM-5y0EjU^&U4fhVZKiVGiK^i}zXL->qaQ?5rU-X(bBi+1=@!P9e|36HaFzfuo+~?n5tWJE@=+w8H@6z3m z{KzI8;^=dVE)}2%H&f z<>0$vS-aM9tr`xmrH{XszS>&)x@*<)Jm2#>#`pYKuWf{i zy=zr|=UTpzwU)gS%qJWqj|Z4f;QM=(&>dOJ+%{vpfwesA)~avchwANI%bu*Y>H%Nb zT6wGZhJM{zwPvkVyNkUgNWXin+8~B|6hDzBev1K8vw7BU`F)1a|G{1d{0QYu*Y zQRcrssv_6NRFb$<#aVHc@nhMg19A3CG+E_kO;%Mfu1csmonj6+rMiifYN27E#j2?Z`mqfI9ON>cG7lw=V3#cN6My zC)Br`U_V1b1DjInZ%Sxr-!ytA6!IiAoJgQ&LL>VI6gfQ2JAeaID zNNr*)L_RVR&Keg zk}BRdsFI0ht8_W3vVjoe|8lG1-4?6zbW&CLubxY?|F7JteXQK7d%oPNk0dDrzW??C z^B+}wGwv^0|9^-1k1Lt~;QMgiqaTrvk8`$d{m8}nkJr@BJj?&@?;~)&pNHl^)hP`& zozjr&mm1DL#cwy9(#Y&<8inxkYl=Yh{nr$OIQ9f2Bd;k1nfqQ>7C0VyT~5fx?mGIa za-M%xxyZczuPXoGt13A9I^*Ei+5hl5`yXCs|HJEygI{I;z-vl;#qzJ?ksblXVrtBAbxwH@7P(s-+NXAFu49K(4e1}rgOpl_eI=a`6&GiZefpd z$$uA~0d5nSE?!#vDKdA7HSdkAOK+oJevtm4IR|-A157t%@>8&xKKLZhhvUd!LDlCr zsPYcJ)qVFT_|C&$sq8ww`FTHMfv>SX%l@{eZ(Xj&N3T#z-c@RTa-Hg)S+Cmf^EX8tAeD%b*lL;SZQKLcT1rwAKBq8-=k9DS^hGiUu4GD@Fm6kdXNF5!8? z-pF6WU(==e96Si$2XBP&HrU@;^0S7C8@Id6wyah9EHX1I%=~9mjw?slPa0+ZGb-1n z5#~Rm%H17R-kK=$pHcQ~M7cJiy#EuG`}rvIA5oP&7FDSytg@|9uKyWTAS*XTRrSJ% zs*yGCji`2chW)4`s*luL4aCzJiK^+nDEld+YIz~5)^$;N)>K*EeNo0g)m9tn`9sxK zU{_S_`6JB#)mxq9uj}Cfb^EKWo=Y37U`ABE`0HC8<=vWUYv6@yYY;iKA*$g6VSZmG zs*#Bijh>7uOgfR1BZ?wp$D@kRMwP%lIUQyGlRT`cv6|`EHF;vJ%{EvKt4CEIim_+A z#;SG2RI{nZs!qrFZf8=JiI^(fF_k0BA~BV|7gNc$n2M_!EcfA2-iUrjCarYA2n* z2Q`-eof@kR>Dw2R_s*C+&&Sldx8BOEYg5+NJj>D9Cg=JN@wt;sSE5Zh@AN76$~-Ht z$z|nx+EjqQ!m2hEZOWtHmuD3tOWtWy=?ejs?Qc{0!497FZOqfOsp?P%^S^D3f7?{M zuZ{WNHq}4WriO!UYRqU;)9yCbe%jQswN0&u0`jEW*#G{De8{%BHu+a|D1dC=*`bbW zUs0#4LtV$Rt?n1v)N`y&!JTdDC7!-jZR$VJ#=KvlHMpjY^{+f@nDj#LgeTt>?toVMHm3X#|dB8TM?rc-$x_m2ZRlemwI?wre{_4Zo0Yg-+&MBgj$m6+YC?`k!Ci zk}_j+e#Ig2LZ_0*ui=(?Z9rL@0&?sM$hkA1>;nP)1MWHd0($x0TSDF70 zsAz9MZcmqrktJ1KD&@KM9nP26tj*9F)s6HxosfI0>WtWHmX)rIVS zKA;}zJ(vin*WabSfdK74pn-S0c(*Q~p?3lrem|hly8(@m&S*NIuqU8MFu?qOK(SDO z{jY6GR0UXbdQ~Z8O{87bp>|cd+o?NrSYK$B&-Ku!X;*1F*J|0;uI63c;`2Y5jX~;g zu3h!p3az@`g;p&x_k(uj9Os&uZdbOeoqlAy9Ijj|>sU7f`co=7+Ra{pcDcDmi{5Kj z;j@KS0qNy)jpn`6u2C2{m}`YPyIH%T4?|jmxD8y|t$t+6Q)ne0Ygd9c`V4u9UD~ei zBO|-onbU69kFW=b*N@+yVCVBLncm~%1(^qZ`R(d`>=g5(JZrfgx;xv|&$g=WK#%H?4f|Vp|F>05xHs=>W&W#` z^`BOGj9fJWQC8lDl$=IMM*FAeh>5CnO1`QB`M$3dqq}e zBGbys^2ibK$QklbZf?u9&TZu&bB}wJ=kY0jn@0sZgDTwQ;r{25dsR@y$dU~nl^*sm z5A9L;F^?*adQ`d2!}pIps$Sz^&z48ESw7Vv>z7Znw(3#iDvz4xf_$II!#Fso)-|W) z@dxF7D9iHAdDtuFk^f4M0>sn4$;19{k2K~H@esl;kJg4WLlvE9^QZUX!J1;&p)>nnR8oFWNf!b@n?NXeBe&!7(rU|GX-q%-sjQDyHvTf-J>v*~wm& zBP-Iq^nYJd)zMzwEp=EmxYc%gRhRave$K0glV1A2UiSaFt>%cAv0|~+y55Wa#g>=! ze09BQJM6IhyS)k=>16*?vDLA`X?1S$stbSJS9;asDYk+G#a1t}k9_r0-vh_J?0@Rz zx8%Is|GYdad+Gmr*<0#Wc-pH7_UNpa{l8wuK3*jfUM0y_%1@rJ>{Gelr?P8HtkNnU z-w-LWs^>aXHQ-Za+G#b^`Baa4UC^i6GKbYN=cA8UVm0k9u^N%iLAkLS6M zK9f&5E~n)>)~D>#j2X`P*gL{CwCqzJ>6AQMVikvca&zq!;XZ`>;Of^kfb6FpM|bsU z1ozOTeLU}d^jUn09(7ug{62-ZzRsRSAM@DM;Vz%zM}6{f-aFgJ7{aI4{aiEe`V_#; z&-1$t**)u1*J&SXXFhdI__$_#+&g^=qF+zKr!Z|KlBRvU8)Q#?P;u%Y0m(=&V&T4UUfqc*K*KPWc9j=tiD8%)%rp&dFz$8 ziTit9k=4daDgLS=E07grj2l$#(V*&{Ewbtl7Fi8Pimb-NK{Y}1x?Z(F>GRxQ?lkw8 zAY+H1DtGm&3aZ!lQdhmodm(P+?}}Ij2d-2h9O4;W$$hPOTTms}(!RNGWgWdz_Wr(O zS1SAcxaC?Mv2xyzTDfZ?))D;I;{Q2hBa$I`<~__m79yFh%uFJG!QA94$iF~-gg$u< z@~@Fh?`3`xne#2y%rlugzLyPdxJ|*A;VbYklrqmUM_gB4&D`i#)^D$8?DHeuEfl{E zd>!-b+ZadPLeRfs9{EPbFx%NDf%~^^;NFf~4{ra3yk;l!O!$l9|9^1)Is9G5xzEBU z*k^Ym@+0umn|MyZoA~K#U8S$$uYCt|&>Zg}>{szyf{tH>mp>J@j^7dH_bHHnK$d3n zI~L4w^}DR&_vi9E5f1D4eGcp8R%8+K#w>oXB8%U)KxSuI4-?0;PzV1EK6o5l@F@B3 z+QqsB%#ik3_W8a1wFuwWk6LCQ?b%6*>u7}B+m`_^&x?d=-pK6Zva zIrl9O?=3(P?(P?ugNXF0In<{ncb^)$uQaT8TJ>}ESqJ)5n=Y|xcKTH9>QmJkr&Wo+ ziVUY!{$3yRKABcü)^r`rGpWM@Z%nSAL&CNa)xJs=2W3>0vXO%n6{eZrw>jR&% zk% ztC(x;llL8`<@vyAwIVaugsrUI>*Qe0)cHZe%6_jyt}ByP&enCxeV0B8i=z2Zz&vW< z$%Ivu5w_gBlUDJ-I+Z{vbFXCs39FoXtT?xh=O6RFEOu5O2wOG5gjIVeY1Ne_top-Y zs{z^QxrBAz%hWv3t(N@DnA2n4_xa1@Mf$3eRvXfP_%a2W)-l(&P92GL^ncg!eSYSF z4|S`Dfp{>Ow0iwXtFLaI`rQ$0;GK0EOs~_>2kYqn(f^-Yr;+!W2hK=XVd^CU(Yde{ zV-r?o zvvI5F(q6g!m$CPaeGs^p?u%Gup}1APFJ)Ck;#TE>h*i}Yx2p4*2M?~}`PZeovURFI ze2E%ZWNRc`(~*?b%yCQBI<-<3o+k9Kx{Ud+%hZ;CiTn?B@%oRqv z!&Wzbd+-}X_Llis|GSL!|4Y~d7qrzcj@yZ&lp#bzY&ib1}=$Mz=ud3bnJ)-H{(qXW13Z zee&+c-jvl-$MXex2clLV3*!B(wG8Y`S%cf?{~V87!|z<7(5?b&oZpGP9mw+VuQ&MZFhgAuy<=v#!xGH8faXr)@V4o%n@U>feRL5d`HG3v%PA08N z&*iFu@{E{Ou{&Xvu0e0wYw-)0s|1RU^1EpC)eA!OlMg1Wyk}WA;yTSa9J6vCzFgUR zd*lN9J6CmFcL&;;cf6do-=h@7J6V^Ztwb4*#7HMhnMNqbk$0ljDESx;C9F{X<;-)k z&x>}|?_u5N*$&43yhoY7oVI_tf~PNMk0k3v*HUggygDZmR)7aj`v%H&QwQV9E9Bk8 zJ2I=UQ0x0K%d;wJIXqkR0Qb?aL&deM-z_ul9oV8DbNnw*4~1*4)-Q3-f9z@%ySFgs zakXmj>&Cqtw;cSu$@#L-7L8!9`Ctok;9Hc$eibwvyIMa-)|G8hdSRRXo#Uo! ze@T9}@EtC`6UFnvvtf%qifqr=qBhUf`Z)Hk{Hxig%Q{%(YIUZsW)Hv?^#!k15AH!2 z#O+J?>tDW_>wxt4Z_()UTQt0F3+sGa6n*b%`u$hyEBJ}5BTsv`>BrdP2e&B)nR)3} zWmRpaf3R6j&t|Uwt#UyQ_T0#3#{XOC|87+^ehXJ^WgNAY{{L1LyEgOw=T?;-+N!d| zW|iZo;{DC4^lw(x@y)89*v$ObX4RhD%=N#S@BeL8!?Rn}OgvurAC#Am=kf@4&y}0y zy>>JA=FR-xBx^jKTNS|GzH2k%->t0wZPoAa-<`FUInu2PLhtI$>ho`<{cqL4?#&v+ z&(N{W8s5tLpZhnn4{EbUk8W1@z-F%h&CLI9rGK|oFA@K%a0V>;0-2E;lm(7y-hYMc z*&F179PGJ}_df5xLIH07PMH6K58=NN_P;}Eh~E~B+@SJ@wyWac4pko7p{i9oRK0D7 zYC=0y+q8rJ-Hoc>ccU7hap#S^|DU1e(-~@+&R`BIL!NyZjDIrN7nH$1p$z$-&rsm8 z47Kn3dv*9fqt4FHutxM5br005=VY~l9QQVTMwgR*|K3Xa|JC&W>*@bj)BmsL{$Ef3 zzncDkHU0l;`v2AR|EuZ$SJVHmrvG0}yQ`-EU#$tgyV|+`8g*^DhCL)*)y z;x+nf((r^f%A3DYo3Xd8xkjHv-VE(^8};|dNMxhFfQ%iuM&Cs~1z&-$L;v1un5)~! z`usH-;dt1!k>5?*sFCzWjh^19ys~TPmt3O)&owH9B5*_T){Ts>uTf_38fER=D90M! zsa>^E|3VzD{A-w>-KgC48yR13WUg|f>d#%n`05%pK7YNMvR+s7;p>@Sxt{sCYuJ;s zQRSpj@z6&0r*7o;X*R0*v5l<7ZDhf7H|yWK<=D4JPRM>}k6e(0Js0wh?U92tbKu8N z37FMHH+_mxWG_9DClZ^A12xn8k`_bA`_`VQ}TJ}Cv5^KSn>eG%Rb z_y6Q$TJ^77OK>TE*TI#r1~x!D?Yf!VXuNR z_-oELA^#THiS$Dd67T{H;Xm`pZOS@!n;f3oX#afw16;u=9H! ziun)Ly;+xbBa7dkqW!b({V@HX$Jm#4h;{Iztb4P@T*bFU?C-U+)?ADKy49?MuVNjX zv3?`6DJ#wRpLKBhD6MPg{}9Ie*k|RV@6@)F{?9w76?k|`?FXmSaqhG_iMwkr{hz&K z>bY~Zg2$%V|Ia#j)g^rYjORZL^1u1i?ky+n@CC5-=B4=2B_O{cbv_^RUU=%;$ z_tNb7;{A7s!TKUp%3$)st)d3cN%>>dpr-lM@odo+OT_x}fL z_q+GI;V1A6oWiXVc`tr{hO{!czaW2& z`&oD$y68{z!hhr3zjH1|Ip6+i?g1QM51)j`2!9^GpT~YPd=oPB*#`=aI`#lSHg*@} zux6GEdFRHJ?`B`Ai?y>R_L(Bx92X-?R!ypub+j@lhYF~KDyW7UsD1Bc#*(wDCrmND9@i6Y8_kO04GOm9J5Agj5h(ZkFWqkhul8}O0H}87Tp6Y2U4amj~v`=I+ z$1TXxBg^dF;M;L`@(sC%`F7j@-;fLO?Kt*7)FAWLUCR2%rHqd*p zgi3Z^%8i)!E5HFx$mX02av&GN%j6HD?~^}>V^2VGJ^6$FhbA@9d4hf(`l07AGV~7L zw?K~0a(_5_MxF0cKG3~;QazjtQU<-0Q6I9EFrEqSFUPsRte#XGVf+x7rhFlj>+@-@ zizm1iz6j6358$AGj{GUqU~dGcu|o;`4u1{EZfJ)gm|dnF zBR%*%f$T<}NB$XeKmFLRLVf~5*rRX@_PgNs#B~qy0`d{$M{p^c$BD2{i={idLf1dVFyUfe@HS-_8R>5A{KNNu*itnWT zr>9i<(63cCLHj>W`(O1U^WU_8s6O{I{ptPBL+4cgF6|#0PtyLu{!aaA+W$7%|4!Qf zZr16MZ5;a#vTvUJn0@mdNBCyRQTEl7pKfFi$3fhCIp2ru$N#`i_J0z72szAg=p^j} zMj=eQh(PqcbBb|~h|@k2r)gj3XkW-?=0jSb5t^VL8lV>Hpt_EE45&QL9u8!A6VG`U z&wXTB75#v7^aIlL3y|*9^b=m7pAdRec_-;NK+YWf2*{>y;{wNq3Hq^YRG{CJmGLwB zA>;$%kbr0>_W=k$JEaJ6#El-b<6&gT#eFWweSqWs{cmc(Pdi7)KIoy12VdYmfKJ`$ z*0GiSg2(`UtM&~Se^(Ld<=BU8T~EJj-J5p%-_Eu0gt<15KSVm=v)K1S3-&J{+mR3a zEAzBmGb!w2a2NJTS-_iTkIxIiV1Kgj)&nF!HOMdk~(&{v3P* zdmraUAqJB$1}ESba0*_BU&A-?yNLV^Jc9lA$mfaUW6VAO1w4-1Cy))?FC4h%!V|bX z4R_&Q#BmGK2Qiq0YTTB%Kd~i8r#L(7Lg5{Z-pI{*==wa_&L4@`9p9*F8mZK zpa!})??XOEy4}b=j(>>#JoW+V-Fmxc?Nk>_Tsjo!F<4nd^D}gX3D-KV*OK zpK{Tba%fArw57Z&d5>fR&wpegZLjDk?f-e&KeB}5(lui$gYv!atAcnc*Uw`Azeh3h@9XhagLKo+{!G6z%@CZwOlly&C@IreXm6B*B^-9xgD`v8QGY*2*u9c?oAfuFID z_jQF1ZqNwfMv?so_+A+d!VvVV+n^xyLLYQMCv-tKv>n($-*D7f-VY+@;fCVeg`DMdrinj&NX%N`%qkm$GQH#fz0Jvd;};>99?w zZMtkT$2N0qGv77~ZL`QWi*2*SHcM@@%r?t0=fft>`LKy?K5W9yN1BWoHDTwYmBx(u zj2X|eO%pcl>bA|GF$;!`={9H6ZsR{)>NaK>zkT+pbOVo|Po*1Mj2R4IF4Xvq*_daW zCdGxu4r8`<8MEKSwwPt&U(Amfv%#dk*p@V=-{fu4Z_-`LHU5`!hm1MsG3Jmdf#qD2 z!+-5Jdi};xYs|tnV>X%m{HEQc`Bj4IbKf&#+Hq*e5dV6Wf)sofF&b6Wi?*+wBuOl(B;o zJM0rXY^R%)aT88A*-kgvPCJ#sf_BDE+i9ombh9#U#_49;>1NyM7G>Om(=E2sEww+j&(-^m%>$S>3MN@6hc} zU$5J7d*X53p*y~)JHDAV~bxe2b?)!B2 z1G@Wh;kZxx%s+fl`wnW~1KRgBeNp%5p3mzZ%>BAo_kL0L-hYSg{jTnPaa8-Y|MS{^ zyY}Cq{Y0>zSoeQL`ybH$2ep5uQ(w}TzNjyKS6_NsU)FuP?{3|9-+p*N_kBh8-G8SD za6n(tSDw>X_0>D{)&2S^mLKST-GAr3x}P}j->-vuKo9KW-v-0h{Xso=yB_?y9{jex zrmyYef1e)G*YyqkgC5pD>YMtO4(ZVKN3PeQ8yoylTSacCx4_TDgPtF7oLvn)sY8u;3Z=roq8u`G=_H0IP;w#Hl<%h6b_ z#_}|ludxD+6>6+VV{VNVYpg_LWg08jScS$aHCCmuYK_%ttX^Xc8f(;8lg3&#=Fyl} zV?K?wY0R&&c8zsttW#rM8tc|rkH&%;>(yAF#`-ljps_)X4QXsxVQv6RL$HJ+t$hsK>6&(^q0<2f47)p(x9^EF z)_9M`gBtJEc%R1mH9ny6VU340KBDnajYl*d)p$(fag8U$CjJShCbBi*(nO9Xay5~s zi9$^jX~M0EVoj82qEr)QnyAo3r6#I0!5U$WCTcZNr-^z^G-#qx6HS_E)AVWSQ8QP zCc;Ea6LE2qqFMdCSU)e-&s+6#kAB{*pZDqK{hD-ZvRIQPnk?01nI_9MS)s{FO;%~L zT9Y-JtkqM^m|)%F|T7rV2Dw zsHq}NxiwX+sS-_G*zvs8co$|s!mh&nrhHgv!+@!<<(R`Q|+4S z&{U_Ux-`|TsUA%QHPx%BK27y&YCuzini|s7sHRIbU8d=BO;>8VO4D_kZqRh2rdu@a z(X>y~eoY6&{{QJtO?PR!Thl?Y4QRSw(*v3w)bx<1hczA2bXe07O~>?7mR@q`C8u7> z*GmO@sZcK!=_R*bD%DG6dZ}D5Rq3TBz0{(YB6=zQlo;eN!eM}fQK6X}&E#n&Uo!=o zDbh@_W=b?us+ls)lxqeLm71y2OtofeG*hdYI?dE;ra?1}nrYHZvu0W})2bPdW_+4y z(~Ms;0nM~)rbFEHX1X-ft(hLp1U1vEnLf?*Yi2+*gPIxA%&=xcni&-^C8pC4@uU_{V*|uWfH(+e-#^~VR z$j*z#>shw_bqB(>8(SCu=KLVKyy4LsUcKSd8*O^SuQvkxztL`t9%BTJ(QAwWV+>-< zx{Q%yj9g>n86)2q1sG?|iL>U!S##oSsWGZ#qHLdazR_XgX`jvJ4?pb;>@kM*MhE{U z#SW7qhKZ}gB#g0QCtQa~zr&>8Vbbp~QFNH3JB*i3z0t+Li3P*>GJ>;4aK6h#jj`e( zxZ;5nx=6wH+`Ho0POr=4v&*E{WuKgN*})Jd5`+mj>#{>2O!Ov8T_#IiE3UY8^KZg+ zo7lQdY~3cdZsWDvc^_aLZjGGb0bvApWG3+A4zpWOA?Q#q5h7&z2jHb|5>=&`L;>8{yw%e#9h(VNMufPMI_@Ot=w~ z^oVV?WKMgp91% zu|@eeVWY+`#vcWK&5`k8{6?|OX3};sifv=I%!m@(Sh1HHyP1YV*haZ#v-l@^jG!@k zZG$vsvo0DY-YinUz~Q@RLzVyrlsyqP20qY2_LHYfjYv>StDoW`fqB;({# z#UKR6ABFL50y<4V&S!HU#PAuzZw%teHt}Fw6ecJ~b^sH3wtWrSf{4tKiOQsiVSB!4 z*q&E1Yi@k@b!93>=Df?KWEz1nj87wo1F>@2PSDmHW5u7j_VIvtqi@Ax;#vu1TC}mR zcrndwMQJo&v6ENZmZRAm;}^pui!ecqz=sip%vm{Or-yyzjIC#mNiFA%0b>ke*kOs- zguNI)_c`q7o3r9dd>rJOG;@u2jEnY_BZB6dM02^v@Lc&*1LMv!EE7e(Ie~`)6AWX; zPTmVv%D%wlsleo^z~rgGM2qo9VM3Zb(Rz(RgavjCCUyc+l7x(i_kL$%noVjpr8Aj2 zrAoEl<7>Z&UkdDO=ki+)2TB-o$1q=0i7ArnW)ahbS9~@ znL3-LvkskgiKipa#{|Ma)qC*&|7U)Z?(k{)hnWAMKrDm&x-J_ zh)%q9uUPw5!~ibCdMn_0D?l6p?~3TNMSG#%YA@1T9o27jaI&LjMKrF6?iDe(BEq)l z@x0Z8&mONWf_Zu?NE|`p=&jUSeFeaAAIAfn8_I-SFm}!jaV~@m8Hu0KdT4|uXx3X1 z_gmpXy%p_#D_XZAJbEkUek+P&oG^*Zw~`@*MR-<3U`2GVh`~RF8C(fE^e2ZQ<1m*Q(44bT%z8r>IKTVaKq-_#IaERw)Psq;37Vl5Jm3W%v_mI!Lk|R@7y4ijMl|OlUoO&j zk)EptYM~B@%SBu+;&Ks}s}1~sJ}%;QbpUa@x`6my#P1@07xD9Y%ABho27q*2Lof^> z&E=qX4tnRHZw~t8pid6^aP6T=JVs_+0Xw zOZnth0C~?P-?@}UZVM1DmvB9lZx4QZDAykR_u#*WG<#x@&|DDxgValqIvJ*JhO2?N zhAG!!;u=Qh5PnC;Fid)3>M%^2Vd^kU z9fr|4O!{H+97gXj^%y42FlmM<`!G6$sn>7=kcTjN2vas;bP7`^Vag~>orI~AFm)0p ze_`q*Or3-~fI10NhGFU?OgV-r$1rsgrcT0?XP9!K?aYNK&oJc~raZ%32jNi&LjM6RKZ5=d^pBu_1pOoEA3^^J`bW?|g8mWokDz}9{UhifLH`K) zN6M6RKZ5=d^pBuFuUyYX z&_9Cyyt+OYLH`K)N6M6SKZ^cQ^pB!{6#b*BCSqGuF6qv#n$&nS9E(KCvkQS^+WXB0i7=ov%L7<$Ig zGlrfq^o*fr3_WA$8AHz)ddAQ*hMqC>jG<=?J!9w@L(dp`#?Ui{o-y={p=S&|W9S(} z&lq~fsOK2<9HX9N)N_n_j#1As^pBx`4EGmf5d^o*lt96jUc z8AneZD|2!5jH729J>%#ZN6$EV#?dp5o^kYyqh}ob;^-Ggzc~8E(JzjEarBC#R~)_K z=oLq=IC{m=D~?`q^opZb9KGV`6-Tc)dd1Nzj$U!}ilbK?z2fKJjbJWE`d%7^huyg0zJ&LG)dl*ZG9aHF-LdO*PrO+>hekpWIp;roBQs|OGmlXL=k^dC= zPm%u=`A?D86nRaN*A)3ok$_{R6r#R@_$kMZ?=_#(A$|I&KhCc9G!YQhu}Qs-{vv@b`H_x)I$UG>g`-N6hn#L z&T~LE`C*9Y?Sf1od;#GLT#yIEQP2Z~FCcsY;R}hg&;!I<=z}gGkA?Uv#9t+9 zRpY0oP;Uo_Cy=AJyPIAnm5_&@&CLP_N}&wOq0;cx^L!8-$!3Q? z+M-1lZRx_MKiYc#cy=Mb;-cF^Rm~r5jXf7_EL#83)^PExiM<+XbUusRWRa6Ba*;(2 zvdBRe=dx;{9vYz;TEPo`XopVdhXELZVHkl?h(Hu#=d(~aD-H>rcjTRSPzDZCV8e~s zZ*$&3%nr)RLCg;Cd1u*qXZ*aA0y78Q=bcp-&z^S&Xo*v5IpZRUOCfaupLaY*vOJmHE1~R^#92t2N&IfWZ#a@q+^`VJ3V%~ zc`M4UIFaXz>Gk5RcO|9X^G1J@E;jO!xpL5l1OCY`%26yPA&!X%ZQLA~;`(1{UbLaj zN?}p4jsfGBO%LZCgJ5)_lvjjZOQ=P91-841wF%oWgbXH8I|KHCT`P9dMnSF=2Um(? z)GmpORTsv=q{XGPNy;43jLuZ{`dZsLCkfxDQya;ninLJ~odZ-p- z<6@waoDBy1Dlvk3H?gkROkNrdb{Zy+O$NKvxLRxnyIPGdGzn+3Ic4_rEzCfm)l!7%PWT728pc72eq)WCh)+YzX-GrpPD~WK$MuAaP z#@S@2XC;FOQ$Dn`m6%O&7^Nv06C*a7tCL3Qq$%2I4veTsRWck`LG5Yc>Aj^-WJnx|CfnU|QSBQT%W zuK9vi%@<|Cu;$%$nlGsWWND%1%epmR2~|zt0qiw3Kzz0QsBnE!^9{M04-lq3p!v>T z7}0#!pyqpqG~ZXO`5{9P!kQ0}K4bfN#`f$XgnTFf(qjBR&zO9kG4?#;>v_i0^NgS8 z88go_UY=*XJkJ<;p7HTKW8-ncu%`^6zXPh<9 z*lC{e4W!GlG8~y_NHI@WdY&%tJl)%Qy0i0iBj=MbNGVO1A)VQ+G!M`;kI6J`AWh3m zbE&1Xy8u5->!e*Sa6=`ufgkY0)J>YH8@4|}5Jr{G!G8|%=HQ>cS~{l+s-Xr*BZu%g z_-AS+%@j;JhcLPMPy~(846VSqJmSi8LM{+KuL(#mkNoDvl+Moq(#anL;-O=eE@%KB z48gF{g@i4{y^y>V5_ci#7ZP`21c;{y-HM2ZZPMu?(kMdzqA(DD5&FBcARF-KCLVVg zP!4YVxyi4a{J8sp^xVYDmu}N;!WB~n#Uo0WkpGfAC;)UUX#w(ALK&7&4kdjs0E90k zAEo#$MW<5gq7;2g$xA7DEhD}%&XrN-G`DoQ7f7puG%8D#uEKv+1)xV2@mG`QYILur z%&Uo~nzX7(qZ>$5!&)O4)me&qH~5$d{LRywt6iw0yNdSRZNodVx56`1es)ZKU5u+HI6e8+mRc zJhy~&8+mHu+G=ZG2%<(C#p_2EKdSr5rk|qsJV{;NH2uSA5N{^uONL1-j@;T}O;ut0T zDES#B{3z8pNV#v!SNBJxy}(1M$I-1v2qzB{Ic zVkeNF;!Z7;kk1kqke5<)D)nli4E@SDS2ha7Q(h0GSApM3^2_fJEYRu}s;Z#}!djrk zEmYS)9S~P_KTuXR#9c#Lw77*DANaLUTf8txHfqUkEvIUO=Dxj9hZeNMg?covF9UK@ z&v8Qz5L<(>lTst4!zYuVDBJ-F3xo$yGgu< zV(CGT9x@Tce-Kq^!3#mEGdKzqLoW&TQjNW2qL)nck};NV7W&*km_Fj6)h+a6?;q9z z&;Eshb}bB|=wM0ueupT&A<|}!ePNg|!zek7|6%eUBJUxpEkr&;q#L5Jm~&qk zB^{Q37DjO&B`?g4FEE$Az?|*^^RWw23Yur>0`rm!%tbCR@3_F6;sWz?3(UtY&~z4< zYg=HBX@R+vg;X~rwV2re%@EOIRsnQFKa4^`i#%8t8H_E`uUd3sXI*8H2kN3LA9}Tz z)2KxzFBWr4waBHu$fdrR*8yEXTzMfFff%H;$SlNSKK}BFD<8kiMDS*S7MW>Sq$w;i zv#`jF!eSxEG=aq;;^D!x=#FZ!I2#CGOxneT;D0?MG0d{t zP1@DvTBJEF)}l*o6A(w814y$Ded{Q@ddjCBof}A#hZ65TKs(@0Ls)Fg0`krSYOx7@ zoAAd&YOxtVE%Dt*r|v8x}mx^}ooIX3>wXe)7fV zR(Qt(NW)Lw{p2M;ngKTuXP^YgOCSt{Z6{uyN{j8J)j=6`lBZ7GI`P*@emhC0i|}2P zX*c=oCTur#N4r_<#ZMpp`zX^s((fleo;ZsGl#hmFPbEAvQg)TD3xyU17F-g5Jr?mlDh2b2JL@-+i~eUycdaBZaLC!C*r`6**&3YP+uPk_9(lec!lcMx9}adlDV zUF4H%ajBbg-T3Jy@7=`JP1qjd=^;-+%AcOoQZHrHKdPkx;$*pfX`l-5J4pEsl7~Tb z8bpsF!VQr}mg1LKieDO{yoZt8tCvFLE5xG3Qr@GuvktXHcV>yT zo+Z{ZmRQ4BVm)Gsb%-UpD@)9&FEN+C#60*CYx7IYeJ^MFAgpEXzst-PE@urwT+5CU zD1%BZGr_XVEWk43+GS@#%dCMfXE$h>`|vV@n`PFYml>cevo^e(n*&8a_+0$a?O4tu zem?KMoKL)kHBhVNqHHa@NwXxP<`J3K^d^%w0^WP?5djEZs;U&ngMaTq^y+@N4^ov_`K2Th5qGbXY~|;!Uu)N|v*$l& z&tLnVUw6;Hch7%+&#$%rx97~`U)#U&_}BOS>j(bp18s-DJpQ`;0srX__|HTC-b3BT z{xkf(_j!Ec@t^PazR%-7e|`GDqtBD?9{>7@zwQ(N-jjd-_2~!pPk+9Dp8KEigZk&W zU+4VMI*U&rtaEqwS}6Y@gntefl%{(~q~G|MA!A z-Pou9Tlw@}>eG9uPw$*Qy}$YN{^rxWm{0FuKD~n(2iJd%!^gkI?BlO7U;B%Y^L+7q z_5AJm=K1dV;rZ$L<@xQIc&45O&!T6^v+P;%ta|?6ueI*k@N9avJlmcf&#q_Bv+p_Z z9D0sC#~y!8|8?p)^PGDwJeQs;kH6Ocy7An4?mYLNnP={K@aXuj|MEO}o;@#~SI?X0 z-J{W`nDJl#G%KZ<4M2&e;-f!pY%WJ zf71V?-_K~{N&l1nC;dwnh&tp8d6v;JrO&-$PBKkN7V=6Kfstp8cR-$Tc<{%8Hq z`k(bb>wnh&tl#gi<5|DoW5=`pXZ_FmpY=cMf7bu3|5^XD{%8Hq`k(bb>wnh&tl#I3 z@vQ$@zn>|FpRI>au*2`$!|&VUMgNQb7yU2#{XRbYK0aRbn^g_7s^Mqx;b-yT`>*k$ z|3&|cem|cKzu%7+{V)3ctTJBozvzF_|Dyjz|BLVMVm=eO~y|5d-u(0JAVs{d90 ztNvI0ulis0zv_S0@8`Yos{d90tA5{?4xfj{tNvI0ulis0zv_S0|Em90|EqqVnZ~RB zH~l_44L>&y-(rk6{crl;^uOtU)BmR5oNk!Y4RgBjrvFX3`Gj zJJIo`|4sj!{x|)8E*)?B-}Jxff7Ab_-@DB5rr*!3!_NuhP5+yIe^!h){XW+XpX-Ki zSH_$EH~qe48O|IJKU0i1{crl;^uOtU)BmR5&lwnk(uK!*CyZ(3m@A}{Mzw3Y3|E~XC|GWNo{qOqU_50p>yz770 z|E~XC|GWNo{qOqU^}p-)Gx~Vf|E~XC|GWNo{qOqU^}p+X*Z;2nUH`lOcm41B-}S%i zf7kEZrs3zR;m_9buK!*CyZ(3m@A`ccHQx36xof=Z_qkxa>;KUIq5nhwhyD-!ANoJ^ z``j=-^nd98(C_E9@uB}i|A+n${U7>2^nd98(Ep+TL;r{V5B(qdKlJ+yK7124%rJ); z<}kw?ANoJ^`*v*jc5Hm;|IqJSvf*2@@uB}i|A&5`-^Yjk5B(qdKlJ~P z=>O2~bI$nC|Dpdw|A&5``-h)5#)tk7{l0k{zIhuT`h9Odd~ZK|HX6RWA2xZzCU4l} z4V%1SlQ(>38otjTzRw>vdBb=5!zORo`_;39-dBY}e*yIhHyy5%* z;d_T+lQ;fbzfIoov(NZ%{Wf{SCU4l}4V%1SlQ(SghW7=-CU5w5aM8KZ}=W#*yIhHyy2a~u*n-XdBY}ec)u`w&Kuq_4DT3*P2RA{8#Z~vw~fOlZ+Pc0 zZ1RTBg2N_n*yIhHyy0`LaNP2RA{8{TgWo4jF@H*E5TP2RA{8#Z~vCU4l}4V%1S zlQ(Sg#{cNI$s0C#!zORoF_h`u*n-XdBY}e*yIhHyx~2~u*n-XdBe}N!zORoH;sSk|CfG0?+)*D zhOOSP)f@lP@BPm3Gw|?r@37SywtB->Z~RNY&q%|&uwkn=yaO7xdc#-3!&YzD>J48F z4_m!qt2cZ_JZ$xb_h!RZZ}^UD__=!6>J3}HVXHT6^@evy!&YzD>J2}04_m!qt2b=* zhVR6Nt={le^YGbh_Z`kS$TfJecH*EEWufK<_-mujhwtB->Z}{vm{;mJt`v0xp=ZN8R#Q3*< z8@}PC+4zrs8@}Od^WpQwu;Cjve8Yxs{71hH-|%(%u;Cjve8cCD;q%9^;TvAK4I94U zv&itB;jrNwK9h|9=(pt?wtT~uZ+L$>Z25*Q-}sOI|LFI*W!Ur$-!l&1GY*@+VbeE! z1|Bwj<3IXs`i9TL!=`W8^bMQ7VbeEk`i4#4u<08%eZ!`2*z^sXzTtbFVbeE!h8{M3 z!=`Wed^BwOhE3n_ozJl88#aBzrf=Bv4WFHcP2aHT8~^D4NB=+iZTp7LRKvD!*!B(E zzVVO#fAo8;Fno73eBK&fFAN*MVdFRc(Qo57{?TveH~!IY>o@+*r zNerLyht1!x`5QKW!{%?;{0*DG;Z?=3`5RtW44c1U^EZ4SJO0sc^Edv{@4KtvRmQOW z8~^CH{~Q14w*ef!(;B|h8vp3G1swnA|408n`n@C_c7ek#aM%S7yTIYQui?e%unQb^ zfx|9v_%3XCO)|VD89rAIpDTx5;IIoEc7ek#aQv@+yTIW)vtbuFyiOT*fx|9v_zrFO z4rBN%I_v_6?=ptZq~rg6CbbJ3c7ek#aM%S7yTD-=IP3z4UEuIKX4nM|yTD-=IJ}w} zUg!?Hz~TFlVHY^;0*77TunQc%9~r(M8McALHgMPm4%@)t1@N#B9KOpN|Eu52;PFTQ zAN@9i!|R@5GdTX}_g(j}8yt3nMbtkN!XU|LC_v9DnrNAr7yqhArZ-MI5$>!)vSIyUX!Mzx}}Yqu+P< zdGQ%_1t;xJu}bT^Wb^({Fmp+ z^Xz%?yn5a|@176Oe|!GN^Doc8J^%6iMYsi+)#Z@%M%^Z{e3O`mOx(MZcwAzUa61%NP9?fBB-{ z_s036-|{bC^ncNB0hllPtpM{yzwh?)MZb3}`J&%HjwfIA`@T88Z;pjveD@bC!B`2# ztI1dj#=Dq&(QhpnV~2dv@B6`cKNH_k$9L5Es^4qOeAWL||5yEA^?%j>RsUE0zQ>MT zVSJaJulm30|Ek~jjQOhHca8a~|EqrAIp(W=o0NRj|5d+tL9s>5SN&i0f7Nf7n6LW3 z>i??$tNyS0%?)E$n6LW3>i??WU@!)QF%HaE{a^Ke)&Et$iCw-FSZ$Gq%{T#c(Zu z>;GH7?^W}+ej~NmrNxpf)@S)!zg1bx#qzg)uZ83NR}4&I)fLmN7;MFNvN5lU=~TQn zj%8GAcw!k9E2vmO#ULfNPqA@|?NbbzV$c*ruKcavdMP$b@!f80mSVG%zxDsE|8M=a zOR-&w?NV%)V!IUErT88=_DlI&KUWp5DqK~(p9oi#Z~D2aa8>a-J6u)1>F27#RfVex zR~4=*TvdFh9Ih(g^s`lAtMX0%H~ruAyVV!gD!z9PZx!Auyj8yGXRhMiV7}>RuJTR) zH~ruA^H=$%|C@fpoN!p-u)<-5!-~CAzUlv_pU28K{onL|)6Zt*oBnV5zv=&`|C|1A z`oHP-9wvr9G4#oI{onO}*YDMRyr;}}{ajbP#*gpH^IiXU{a)$E_vZ0hKi~C#*Z*Dr zcm3b>8w=&T{_pyEu<&5v!SY@Icm3b>f7kzA|9AbS0pY~LiG>r3p-~Kt!imM$C}smO zHj3}*V{jBsEJjE14mF%uII(bI;l#p;g%b-Wmhbuvlk#2vcm1XZ`L6%F{_py~>;JCb za4A3Z8!zRDe!eVxS$^pMp`SC$5B)#%|Iq(KzmZeS81h5EDMJj@WAqdQ^{{9$ev0u^ zjGyvDzxN9MUgTgwmqKlGbLMVrhyI`Xf9n6K|EK<+ z`hV*8E+m{>IJp>V<){9i`n?~C;eEWjj`t+-{yN^3 ztX|AV;vHgs>Ng^bDM^0n|Ed3{{-64P>i3Q@KlT6A|5LxYNxXB6-C&H);@x9@>HnqQ z04=5{0SfX<|1bUCOXipUU;2OP_wGKRK|q82(*H~UFa5vt8?VJX%>2^-OTTxS`K8~W zEx+`8ubE%^f9e0F|CfGPA;0tk3$bI2O+bv_Viyp*fc(<`OTSr6e(C?E|Cjz>`hV&7 z&MLM8u^q@S{oY^2ZO|Cc#g-ty^#9WTOTS$~e(NV4@>~CJ{lr6l>o>BC%|U+aryufL zzxhpm>o>m3Z~edZ+a%<-{@?mdbMjmNZ~edZ|JMIozs*8^>$h9TZ~f*w`K|xA{@?n4 z>$hjfZ~edZ|JMIo|8M=j_5arYTmNtUzx5Ls@jf`-2j{nb0wdlF=eK?mBP2$=vx{+H zyep1(#rduOw|?*O;vY|+-}-;+x1R{9k%|6^eqtlUMke|v`pJzL7-pj1`{qpaPxMdp zPxMdpdk39~{)zsHetV2e^iT9p^iT9p^cyK=qTl=KO!QCmPxMdpPxKowW}<(hf1-b) zf1-b)f1-b)-_9chNeGhIdt{>DyYEc&d;cBd$4vB3^iT9p^iT9p^iT9p^m|91iT;WH zss5?{ss5?{ss5?{seU8MO!b>sWvYLwf2x0~f2x0~f2x0~f2x0~-vldD{Zswkmu9Me zs(-3~s(-3~s^2IxQ~d-?2$&EsAz)mo^yv5gK2!Zu{Zsv9Ovsp!F(G3z)j!og)j!og z)j!og)o*w{U&0W>R-@rC!7WS3;Mm=jd5sNl^8 zact}>VqXy>*_hj9QU9X;Mg8{IF`Uh!e&gA=OPodhi~1M!FX~^^Z<~=t{fqh+^)Ko- z+l#Sn7WFUcU(~;-e^LLUek0s0>R;4vjGIOMc28Nsvzki&0?4V-)mqq=?x>?k3 zu$v|QOZp9WV;UG+sVwPV(r+`BCH;0&S<-Jil_mX4`j_-C>0i>nq<=~OlKv(AZZ2m@ z|C0VC{Y(0n^e^dO(!ZpCN&k|5Q^YLkU(&y%e@VZ6R+jWH>0i=ss+cAHOZu1e+plCv z|C0VC{Y(1oxw52xN&k|5BjhaUU(&y%e@Xw6{w4jkFIm#Rq<>levi@cLM$0jYj7emc z^)Kr;Vve0m>}0a6-&Q8e`j_=D>tEKttbbYmvi@cLw)R=pzpQ^*zukS7^)KtUv&pi4 z+p`!&XIcNU{$>3J(lN!1k#v^z+v8+e|FZsN{f5(7*1xQOS--7Lmi61~WLdv4b(Zxn z>tEKttbbX*oko`RFY8~{zpQ^*|FZrS{VV!c^sne&(Z8bKE-owj4YOk(mlgf&Kvwh{ zYG+0NivAV-EBaUTujpUVZxfOg{VV!c^sne&(Z8bK%rpkxS<%0ue?|X_e*3$u=wH#l zqJKsIivAV-EBaUTujpUVzoLId|BC(<{VV!c^sne&(Z8a9MgNL^L-H7sXGQ;t{uTYU zOuqJLHYs{U2|tNK^{#E^} z`d9U@>R;8rs()4gs($;ztm0i^orr)qXYx>vpujyaYzovgp|C;_a{cHNy^snh()4!&FP5+wyHT`S)*YvOH zU(>&)e@#E5lQsREPS*4T1X-x={v#x(#|GNHl{ptENuuHS|<>-yLAuj^mezpj5>|GNHl{p-8{&oGfd0E$QpO-qtStm|Lbzpj5>|GIwry{zkB z*T1fRL;r?;(l#6VH}r4lw{gve{tf*b`Zx4%=-<%4p?^dFhW-ux8~Qi&Z|L99&q2jT zFdO>qWV4}vL;r^U4gDMXH}u=jW<&pm{tf*b`t516p?^dFhJM@HZ0O(6&n#s_|Azhz z{Tur2ZnL4^_BOV+F;CBi{tf+hxY^LZp?^caO>Q>yZ|Jwp&4&I>{hRtX^>6Cm)W4~J zQ~##^P5qntH}!Ao-_*aUe^dXa{!RUx`Zx7&>fh84qr^NuoBB8P+W==%|EB&;{oGYH z^>6Cm)X!jLQ~##^P5t)5+0?(Oe^dXa{!RUx`c3Y$see=drv6R+oBB8PZ|dLFzo~yy zzg=E7^>6Cm)W4~JQ~##^P5qnt?e?;ze@p+Cet;%h`nU9(^=C`}mi{gM_I=sXzomam z|CatO{agC&nzN;UOaGSsE&WJ9w)A5G+0wtIe@j1@lP&#Q`nU9N>EF`7r5_*2mi{gM zTl%;3Z|UFCzomamzl~wG^l#}$4YH+wOF!t7E&W^ifuGnRW=lVckS+aN`nU9N>EF`7 zrGH!hwtg@u+xoZlV++~VzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2ma ze_Q{y{%!r+`nUCO>)+PDt$$npwtk!NAQIWuzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEH zZT;K&xAkx9-`2mae_Q{y{%!r+`nUCO>)+PDt$$npw*DRcJNkF@@95vrzoXx#H#_=w z^zZ23(Z8dANB@rg9sN7{?bx%U-~KmDM|Sk@=-<)5qkl*Lj{Y6}JNoTHWA~mN{kHF6 zK(eEMNB@rg9sN7{cl7V*-_gILe@Fk0{vG{0`fW?Iqkl*Lj{Y6}JNkF@GmP2MzoUOg z|Bn70{X6=1^zZ23(Qm7qUH!ZIZSb?Je^>vm{$2gM`giriDcRL;%bH#N_N>{}zpLNQ zIUref_3!H6)xWEMSO2d5UH!ZIclGb;w~5ZK{$2gM`gis3>fhDBtAAJjuKr#9w$lOB zVn3Z-{WjFu)xWEMSO2d5UH!ZIclGb;hiI~^pC!nyew*9u>fhDBtKa@Mj6rtw@9N*v zzo&ms|DOIm{d@XtbhD@5W;=WO_w?`S-_yURe^39O{yqJ`PWJTg>EF}8r+-iXp8h@k zd;0hE@9777vZsGf|DOIm{d@ZN^zZ54)4!*GPye3&J^eQ0+0(zLe^39OenujD`uFtj z>EF}8rym;1o_^cn*cQjOIJU*v)4!*GPye2N#x;yZ_Vw@U-`Bsde_#K;e()&!`uFwk z>)+SEuYX_vzW#mv`}+6w@9W>!zpsB^|Gxfx{rmd&_3!K7*T1iSU;n=Tef|6T_x11V zw|kD=bN2P`>)+SUe8dhq`}+6w@9W>!zptMK$-aI#D*O8P_3!KFL$a^mb~^j|p{eZa z-`Bsde_#K;{(b%X`hlwOBstK3p#MPsf&K&i2l`>Fz*afXf1v+B|ABt{{T%2&(0`!+ zK>vaM1N{g3A<6(+InaNg|3E*il>_|;`uUacD>={)aOFV1op%oOALu{OZ>tvY$Kr;vW?Zb1R|3LqN{sa97`VaIU=s(bZp#MPsf&K&i2l@~7AL>8U zf2jXZ|Dpav{fGK(&U2{$Q2(L+L;Z*PVY3|SKh$rNobFtPq5ebthx!lo zAL>8Uf2iMnJ%{=a^&jfDWzV7hL;Z*P5A`4FKh$sIoI>Oa(P$DKp{hx!lo zAL>8Uf2jXZ|Dpav{d`3Z^#gJK`6nLzhx!loAL(b+bEN-B|B-%ZE<8q#^dIRz(to6% zG0KttBmGDEkMtkuKhl4s|49Fl{v-WI`oX&#=|9qcr2k0&k^Uq7NBWQSAL&2Rf299N z|B?P9{YUzFrX1-%(to7?NdJ-kBmGDEkMtkuKhl4s|49Fl{v-WI`j7M<=|9rXmgGqP zk^Uq7NBWQSAL~EXf2{vl|FQmK{m1%`^&jg$)_<&@Gsv<2WBovIj`bhwKh}S&|5*RA z{$u^e`j7P=>p#|ite;WHvHoNI$NG=;GYer}!ffSO|FQmK{m1%`^&jg$)_<)3SpTv9 zWBteakM$qxKh}S&pL+p#|itp8a5vHoNI$NG=;pXlc#a-#o4|A~HP zA}9J!^q=TI(SM?!qsWPV2s$VFPxPPYKhb}p|3v?Z{uBKt`eDm(7dg>?qW?tyiT)G) zC;AzmoajH%f1>|H|B3z+{U`cQ^q=VGHgcl>ME{BY6a6Rp-B-woejqzQc24x4=s(ea zqW?tyiT)G)C;HisoajH%f1>|HKVy_r{ipg*^`GiL)qkr0RR5{|Q~jsOa+gs{d5~ss2;_pmOa+gs{d5~ss2;_r}|IzpXxu=f2#jf z|CxU7HD~(I^q=WxU~{JbO#hkwGyP}!&-Al|In&R`=1l*Y{xkh&`p@*A=|9tdrvFU; znf^2VXZp|dpXoo-f2RLT|C#rWL zbN%dO&h?+`Ki7Y*|6KpM{&W53`p@;B>p$0juAfuOx&Cwg=lXf0oa;Z=f3BZB%DH}y zEeul5^`GlM*MF}6T>rWLbN%P~&-I_{Ki7Y*pIM6I0y)?3z(CIRpX)!@f3Dxbf$(uT z*U!j>`wZihbN%P~&-GvEztHb6K`!)P=)chKNI@?2U+BNkf1&?E|Aqbw{TKQ#^mBK) z(0`%-LO(MdhYfO}|3d$T{tNvM9ppm4V+Xm=f1&?E|Aqbw{SF}HLjQ&S3;h@R9YhF^ z6&@?RYA*C&=)cf^q5nevh5ifu7y2*sU+BNkf1&?E|Aqbw{j6&)^z*K{(0`%-LjR?H zZZMp8F7;pPztn%J|5E>@{!9Ir`Y-ig>c7-~ssB@{!9I?JLFP7zZm8=m-;XDU+TZqf2sdc|E2y*{g?VL^@{!9Ir`Y-ig>c7;_NQUdpm43E2SNgB?U+H&gE&ODz^k3<} z(to9&r_7c9EBzdBIN)6AztVrD|4RRr{ww|bT(0zA>F4NjrTA%u{ zrT%Z22t^Zp8wf<}U*ZQyZU+cftf35#o|F!;W{nz>#)m-bp)_<-4TK~2FYyH>y zuk~N+zt(@P|62dGel9WB`mgn0>%Z22t^Zp8wf<}U*ZQyZU+cft&oSm&KNFi9{caoN zM*ofe8~r!>Z}i{jztMlA|3?3f{u})_`fv2#=)ci_qu*_W+~~j2f203K|Be0|{Wtn= z^z-4l(SM`=M*ofe8~r!>Z}i{jztMlA-*uDR=)ci_qyI+#js6?`?kD9&|Be0|{Wtn= z^xx>e(SM`=M*ofe8~r!>Z}hvrog4i(`fv2J>bcc_tN&L2t^Qm6xB74O-|D~Bf2;pi z|E>O8{kQsW_225h)qku1R{yR3Tm85CZ}s2mztw-M|5pF4{#*UG`fv5$>c7>0tN&L2 zt^Qm6F1qAa|E>O8{mf}@_225h)qku1R{yR3Tm85CZ}s!`xz&HG|5pF4{#*SFes1;O z>c7>0tN&L2o&G!hclz)2-|4^8f2aRW|DFCj{dfBB^xx^f(|@P`PXC?$JNA%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{dfBB^xx^f z(|@P`PXC?$JNA%x|r~gj>z5aXs_xkVk-|N5E zf3N>u|GoZu{rCFs_228i*MG17UjM!Rd;RzN@Acp7zt?}S|6c#S{(JrR`tSAM>%Z53 zum4{Ez5aXs_xkVk-|N5Ef3N>u|GoZu{rCFs_228i*MG17UjM!Rd;RzN@Acp7zt?}S z|6V`S9=8^9um4{Ez5aXsoO|x|&-C-|ndzVDch4j<{WJYD{Vob+rr-VV%=Ehfo|%4k zz%$c7(?8SCKxd|Zrhle?rhle?rr&L|%=FLn&-Bmq&-Bmqy8#imQ!>*((?8SCQfH=r zrr$NA%=FLn&-Bmq&-Bmq^VXT^pXukW<2G4l`e*uQ`WffU^t1e#>36d{Y7VPL>!0hN>!0hN>!0hN>!0hN>!0g)f!0g) zLm+egJb32%=lbXR=lbXR=lbXR-Hpjy|6KoEzpG7|>!0hN>!0hN>!0hN>!0iA%QM$M z*FV=k*FV=k*FV?q{!Hfj-Jr=_|6KoE|6KoE|6ITO26@o`p#MStgMOEy@}U1g|AYPq z{ca%SLH~pP2mKHF-R{VP{s;XJ`XBWB#}nj1|AT(lrt+ZwLH~pP2mKHF-MGtxes^*5 zp#MStgZ>Bo5BeYUKj?qZ|DgXt|AYPq{SW#d^t-2%2mKHFAM`)ycV8zD`XBT^=zq}v zp#MR?yE}1rClC4`^grl-(C_w09`!%!cNr^>`XBYXN0LYVkNO|=Kk9e0Cy)9c^*`!= z)c>geQU9akgeQU9a< zNBxibAN4=#f7Jh||53loU3t|1sQ*#_qkfmX@~Hn&|D*m#{VsduQU9a5C0$j-#=5=qo4l| z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C32Ozx;ps z|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-RfBFCN|KR;5)|Cj&oqW(qwi~1M!^Z(`l%m0`EFaKZuzx;ps|MLIk z|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-RfBFCN|Kp$1e|Cj$S|6l&U{D1lX^8e-k%m0`E zFaKZuzx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-RfBFCN|KI^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(E zX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K=1V5>A%x|r{5WXIs?!< z{dfBB^xx^f(|@P`PXC?$JNI^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_$Po-|N5Ef3M#efI0(EX8`I9K=1Y6>%Z53um4{Ez5aXs z_xkVk-|N5Ef3N>u|GoZu{rCE3`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ z`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*uQ`e*u` z0jM(oo#~(HpXs0JcLt!&0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXS0IpX;CNpX;CNpX;CNpX;CNpX+x9pw0kvu79q7u79rI8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0CcW@u79q7u79q7u79q7u79q7u79q7u79q7 zu79q7u79q7u79q7u79q7u79q7u79q7u79q7u79q7u79q7uHPAeIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i(EnS-z3j%76<`?U*+KN6IcER} z!2pC4+vOy*xHKaW;(^frN&6941{#_{cB(9^?9Z~RPVLLW01N{#48Sk|!vG8eFbu#j z0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3 zz%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8e zFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~< z3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk| z!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0 zFaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{# z48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbsfy|8UxR zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e7YxB6)SqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMK)_uo$%z-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)DL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn4nhx%y%(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoKhjSFhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoU+bpA&<}`Y-*L{!9O*|I&Zyzw}@FFa4MPOaG<+(tqi{^k4cf z{g?hr|E2%Zf9b#UU-~com;OutrT@}@>A&<}`Y-*L{#*a8|JHx&zxChxZ~eFaTmP;9 z)_?22_22q${kQ&G|E>Slf9t>X-}-O;xBgrIt^d}4>%aBi`fvTWei}eDfM@{G0HOgz z1BeC?4ImmoG=OgXw|*KxG=OLT(Ez&j-}-O;xBgrIt^d}4>%aBi`fvTW{#*a8|JHx& zrvXF*hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLaw`k8(jKs1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh(9iYL0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz12{jmP-yt|`|vx1bsobX3}#;pm%(+s;cxH5-wj^c4euXw z=JWjK$G`ON_5H@L{_lo69_v3J^}omb-<>~x|IVMjf9G3wzWR$#eSgS*-*D&sk^Xz< z{gJ-<>vx~>uN&We%6E5s%9ACo^W^&1;IW=uO`az&OXtZ6{PW~^@Og5`?L0XRaz1z; za6WkQJ0JYzpASwWo)3=HoDVkPo)2~j8XiA5AFbIKerqrbc0PLgIUk*9H+*eyXy<%% zO5i+Ozdz3=L(j8Wr}J#8#^8H-HtAvbg~8j(^XxhJJUfPco*ni&&rT1W=WqR}pKaJZ z&vrVUXIod!v;80E#f0^FF%x-SOwF7Z*9zywAK!Ul0-P5+@z0BW;pfE$4}&lMYI*&< zTB|ht*%XutsnmChO0`OzU}=+B*;Lx6Z>Git}&|Hygapn_WlelgmAWAJ`{PMCX%37Uz@g zw&%08!}Hm~&iQP`<9s%4az1-6d_KD%J)ghvd-lcB@%dtn^?b32a=w^7KVM9roiDD% F{t5OOVI-G)t!B3i>S)>vbWRV!euajP}dxD7SdaEvupEpZ#RQsOqq z3=9keEg?vVK{jDehTF_A;4s5wn3-5(jdiBRV>pImI65WPShW+6;TUT;p5H=~?*IAo zJiq6C*ZS7C?(e#N-#0uK|5#+&n_FMcNEH5_&^;FSSVY>qy+1kC<-A|zJaTIvOL#15 zbLR35YqKAQk05;1zr1hQw{v^q6Tj~CbA24nzy9@}{9h%$ml*lUBaiJ{f4y(#Lyw0y z{LP-X9y((mi{yq~dw)N4Zr-kKIlu4m>pgkDdVS2}XG@;YO^^8x&7)j=qSbz;eemO- z=5wv)ORfBy_F_chyGPcPZJ6Di~M%H=o{fO@mQ7Bm{s9Tw4+i$vClL{y1H*Na3Q6&c?uGOkf1#xC-h zMI^3QB-SgE0P*M(Pl-%8Au=f_^0PA{i31{!pBG6Q68ZU{$mFXuYe;0uEs-a$izJVT zOua3VGAc6dp2*h4<0XfDZhJmn{`%W-vi<1?GOSGD&`YbCTpeJwf)pI6?A1pD2I#_ypPi z`OoF8?$h^qBl|LZoELWAgG# zkIBZ*$H`w#h>=aJ$H^-_P0S*<^u9jyh|%ZE45-+s1-e!cI%`x zODBdUw99s_JV_qk(L_l3fKJM8>7;VJh?RSv!T%8YA0x}>>!b=j@TJIR6(^j0QuOPn z8~t|TiX+Z+k>X^X1SV)DNV$q;6Mmmg%!VjAOIUp;d4LP(2Ci9X(@W@qZ*&rTO)FW% z)k3>%Ms9^2Fs#DvBA%}zq-df>Zs6XH{~e@mis)nDKDwJa3VakHLBa*bX`~mol{}Vm zPe|s-mA7ZfaM^0@zr-%qn(;SPX~ctcf__j#-H^to7}g7~YwPfvM7-hecW4b0qowG! zM!tQbR9l`!y%Fns?=dzeOKi5B9{`uHsIUjgR zKL0XRE`0NpbgtJ(2G^R0qU9v_{rCQ-Wbm_D^5qwQAeX+MB@0yEZbeH8@ddt*keRsW z5YBWaN}S}Qiu?MJbMW6wn5D=Hu2-tIz859&Nc}*R7*0fu)kiQQQi|47#=L0J+wt#> zlG26IVwxQ-#^oYa#L(NpkjFwvbDo;eqA=0WxzeQha&W@4}`lK0mE0~u@ z$y)B&40_VsfZPW5+$hOG{sVaod{4i4MklAx&Di*7$U~VTF79uppLmd+=}}@L-qIS8 zmSpl&E^-3@qE^~wK*X+zmLTpb`exBljhrQpU~D9PBvK6cnMyQL=GDjn(mYE&bduLY z^o>mPRV@YhFly`I!E~eS?Ir#!vOd%joUn>F>zEH(Y;OD|K8m zQ*L7<^@x6zYffZ~3wu@fk*)_B_<``B(BII{D*qClJWg2*S9Nlmdra4L^3SR+Ejqct z^-=unxG$-&N3rE0ogDH~_fL_x>7eZSS!|cF)I?v z3_mWPj-XIeeUZVuak1>!I)0^*rAd3QgfDmyc+97 zbW^FH&!Z*4I2(grRZh4Uqa+wXe}5CX5BYqw)XhzhKns1x7A>W;r70_c{+=MQv|*7Z zR*G@!X*&aBM>F}Vq>T^V5~)ESrVJJ2t890IlymRTxTjm)mn!m-Y6t4;FOgFy?#q`j z%b5f*EfM)2)O~qlf<)n8HBqFIYeV=Q!~NIj`rZWWIzcXx@8ZJ=au5GGDqZ^70^|(b zOObPthI-2y*R6i)jeEE;Kc^1w|`ZBy&751Jyn*6{mINd8*!JB4=Z|Il}I-3>x5OdKY)G)PQp1T zUP)ZUVF0~FBP(d5(p>V*H6wYeNECU8a+FioWrVFH|GT*6g+rv#%$VJ@Mk71Pdk1+k zZ(?p}rB8xEL0R-*&(+AU2xC$49;O~iXDyhi?`_DUnEHI zt%rWW#e}&^yF)P)!A0&hBGnpT74er_m_r7szcbOY4EIJ@r_y~+Bi|ENKSX_97x{%6 zFGtCH5cfS*htaZ!a66!!G6tSzy?~w%Uz2}z|NlhlvkAMMexo4|ThZxz`ZXep2$xJ< zwG&3IP3EC5ge~}+;56%^Uy^1~GWEBg`FnCSVt4CWfv2PM&>)&zlfY4|%y#w+$U~`7 z@|cpDA`bFZw3|9%9Z;lUeL_7otMViLN={^bVAn`Ddf+AU!y3;(yXntRmq<1K>qlw3 z0gW^e<`DBm`9hJuBYxG#)SOy6m-@obSiqix{1laNFS=gMEsPf__>GPKA(6MauL$Cx zEROo5otx?NwZ#4R_${SA&8Zq$j_laPSb@K?qti%xqDJ1se-ZJO;#WraS?KeinYPi# zksf`(G@trhLz_*0I5#ij`b6$KLU{Xn>irA)@%I{OS)mMi z7yJ#hf8dE^2~O9F+Bf|>e!(8*ep{3@bI&&_%xsZvbO)Gk5&n6?vljB>J`INP^nc`- z|L0n1VGKwjUn&hXo(5hc-Pfb!JK{=#i-fJ3ts(Q=8%&YQxc>wHF!Gv;=TpLyR|9pf z9~AimuBC#Sdx}@mXOU_SD$Ul&UFyn+3?qZ=DFffqUo=yte7{Db87C{Vm}9nTz;*k= z6lvZ%O`1vXW9~N+whd{<|Bp!5PW;AP+ImKcbf!;}PSX5<>(?kxBKP+}po{kIm?|6a z@1#7NkPY~!Q2r#C2eUxmOZlN_2KC6A%}Bj0!oRc}TcS^tVZ-IuHFBA_D@LietBmvH zCxI}PL)=51S22bd2&-2#qQeiQr6o;O2T7E%yhbB+2CfCU5!6{r@LTNSDrXUDF1v-@ zTGY6ReZI;*Jel(e;%K=iGK=efh`xpRw!=K!YW=kcX~u4z;@UD5$9nGnj{85?$g}vV z_0vZD{{(#%^3RkX_iaI62d3Ms=LbQp>#ML^{PKza&^>IP^}VSVfAlo&3%t%b0d{|g zuzS?K=dqgq4JC!cTy#J5`A{{(fp1e?UK5CX3fSE|T- z+EdNHYRzw6qLb%vFM?*mG;bna&Irn|CG&cnIPiOoxQchjNprnOlO2C#XB^>}AHU>U zvkQL+s5BWr3VbS2m|uTQnuby0quv|vuZ3>V&)_=!(oDPgai4~B5Mc2bd|oTvQ+4td_nO#S zU&j9;%%uKftR^AjNe}&tCmE|pAxR@RJ63< zeup*7U9Q!Uf78`y$tTYS=5IZu6RzkU=O3I0s5P*jGX&KaN<&d{kb8~v#Y)1z!<=7n zl=Z`Y=DT{vS?aEG6KjXv(ei+}1A4}pr?k?+dgpoKG;z-?@?VHF5s&^b^+g*8=v%>W zA|#q?hw(SBiI!?)0&cT6T9S}c$i}~sy%^-jmT0*VLHd!L1>tVlO8vxPKj@z^tW9IR zV&6sJxzukuHl0lyvldtO-@|=2Frk-W+vP5eyyMkKI3GsuQIk5K18 zA017UzvGzh7x)=RDEHIc|4fAZBYqCX@0J3_C(6;bOCwp7p^No`Yc~C3yhb94``@BO z|L<@-QX|fEjr0?TcT~hT$X-+<e>CC%o|NlJlRV0-bqj>1<17qW^#lnYX0K$3XZ;^9HHYXEuPOEx7)aXDuaG^$}`Jo}n>3FB{J?^Cjsy7=~SnaPr)_fyX7FzO(C#3Hk{B z{&yqC>bdhK9eITK@o%QCT=caCT<@k$^_0Otc>+&k&y+(S#CwFty-ex!jUvL9EELk?z7S`2TrQtq<16fR-#^$Bj^v`b@OFA_2OX_SJ*S0{b z(upqz^o-pG(%OrDK)Fd@otqWWM|R*2GWYI5=0hQrvBtL&$JqX?pY<>Mf-K@IWgTGV zx*i)gj8e}kUh-*JLte4@;8)ZE(n^?Sa6pjx_Xy)kHSq<>XCRik>g8V2C}#YqwxHXI zV}Lm7k*&-bgnfndR9orKfD3;E;|c!7yCOJ;qn*1LPgI=DyEejBYa$pwIBWWj_M@yS zUq_fzFOr8#a2dw-@2tDkI#r#aIk;cdwVw5z+NT9~(ua-^$As~mr~X_5uf(&@r(ZlC z$2k8O{eyEXHb#LfNq~ECj0C?MH`c#4lL!5J=2W;goqWAZ|DmnI=z%YZ=O$}%yJQ{hQ zFgh597?=c8AOWJmNu4DjQy|cTy|DIB6RJ9MQT=}={wl5#`av{d9jacK`wdKRs@!T{ zoPdAHCeB)s4+u}CjFq!m**k4xyv`XFdg)d6r|LRwsrs*3T~8KSho4pH)bB=hJ&WtR zAEsZul6EJovaMxYt0ml+{Sj}&qx;w5-~8zQ4eI{u)ZcCTF7Y{(zsf&s=GqE%eK*&s zQ`J|BEgG4m@=tkH`bp?9FpF@3&!~^Dv~rv>Ur^~TAzyF_eHbpnW#vy9RC%tU&r$K8 z!aYEnsC^piAU(PPzY^{(KEpUh{4Q0G%3tM;bQj>KqrFV(e*9FQj=^1hl=Am7r>ZvH zPS|wpi7?gKoDnNO!Uf-?pYMy5Bbqd+k}tD>X5O01J{rAX2^e4*ucLDXM zp83sV3NtXL>zN}|{S^g~)KQU|W9Vo4O~72Q+P{K0#`4epqKI;-Gr_Uzv(a;@Uu;>8 z2kQQ6!m2S{^{qY)cHmceWm#7GD#q85~RQ)?wL&< z4;Ok0V`!kpFfH(5KDf>VXG5~>loKNxUY-x=Tqb- zNd3cdISH?xO$0t*oOlWU>G-QOV%4|-Z$Sj*+9YynEI-uSry6+){VbeU_dm}4E>!o3G-9@>r!aMsRiEu>9;7>i1|Pwms<@hdex zEB{1W>p2%F9gUXXs&LFjW!TKe=((WuHs!yX__2|{MDKvwPa@@g=9HQzh@bYaeuX_T zX_k?vcfU`QQtIcoEsMu#2$SA|KcyrTJ!rl&QA(l6aXv&HWMLR^eXL z%4f)Z@Xuf$7a?CDt=~oPyoS6*^6V0K_c!cWr$&hDY5dv8Igu6nNxy(+>-!?4l)mQs znEgf`e#txspNRw~F-CD-$DS579M zj1#{>Kg`~}j5t5wIrh@W`)l-?8H_X7!(UEdul94cMoFUo z5~<=yM|XXxlP>noDvrTdwDKB$>fXVZiG%bP;l8qua}D<1{fX2K@`_$7hw(RjM7!VA z%EzB^Zv86P3E!Z?8@1~EL(J$Of3KA*-*IjepzNRX3=%)*6`eG3PcwOVk9FqX;^#Xd zl8dZGzJ)A=Qc(62=wBv27VK5H``?NcFM8j1vEq3-R(g;hD1YegiIq=~E*>}?|A^<5 zAH_;D?#}0R(oVm3VBFHo=40n2rLGxB5V&{9TUS*Vllbe+6MK3&M8 z+3fW*8H2E~4|j0}fIJ2bB^voHdppxHt@!LB?^{G_y|n*xoVy)bB!QmA%x{aOp++Na z`c*w)kBxBV(y5X5w>W1b-B00YI^#F8j=uBZc#;2xJ_uJ_FK5_UZR_b?)9NS8AT<@emIbYIB4dJJ%I2jMGP%lpUy_#R2#_z}L&!A6- zr$D7ydWiI~A(d7QcK<#$_bk`CzM)=;Q{{KD3j2jty1pY%DlZ>t<+(?GN4dY7Yn;zX z8h+2iJM_cu=dd^G^99_i!O8U(AIYuAW7JtaY(W1jP|whJBKN{?zyJ+v=+DSXcmv*t zdbkV&@Gbl+dEJD{Gr3Ln}j#~L+~gL@DB?kIb7WIh~# zLTD~$je;~m>l)VRNLwpwJY+T0f-{xAi);WhbhF=cFX8-XK658-EA-xD9Wasg`ee>8 za645v@PNL9xu=Zz2)&qj%J?#4<*N~L61S1(;rb!w)48lq7>9iHt+V)_0~5AXdMrYm z%)J+IUj!fJ^-YZwH){gdpiVB~=V1=+yUnx3S1xFbS2#ChjpE~)mw5ZZN5{8dyG?OA zxlDK?9dQ_W4b0qUV;;9dJ7FA?aeq8TYyav=Z5!!YQ@Q`e$>Ll6q}(99j}zdoD^F@& zUnFbY&rZ{ZX|pi?p^rH4T>PYVm@%tQI1hb&s@C`2RIOhoXsaSFXx{z$e`~J1!1L8{ z8QMF9zYF)_0r+fFMaOnL8e$*;CP5NR0Yk@bDv|TKslSw>FY~2j`iEkC*C(cN=dh7~ zmGUVnJA3x_!%^ZW z;XGfDoj;+IWu&tLR>4|W2OFS=bNt@CXz44677wx?x0iDTAI}E-(9s&j^SLNFzCT)= z(8Za%3*6b!(w!A8r>SG-i744fT#LY+93`8PTfl{X->xX}kgsjHcYv>zc%dJ>U?%Py z{P)0K$cLs))B*kn(5)KEHaAKN(Tl-xD_YvY4mM~Zjn=suX~T}J>6|Aa?YP^K4(!^* zJrz(1)ldsXUfKZk&{NNOx}E-I;hj;&*8V*DR{^$y^yBVeoah{7d^y3~09`I@(~FHF zyScB9_!__rPR4^~qzmZ=D|#t?%fz~}kGy#na-NS3d%;(r5q}=%{jX-PuX&L-w*vXIF?X0#s+&9Wref&or4Q-5iUW<#(5NS__Zh*MqYy(5PmsI!pJ+| z>v~e|BJaZkDBU?#OpNKeG;9T8AOU_n2T}cX67DIG0yAM2v?Mc6FV{&MSiuJNHQ0MD z_72AB$ufs<^I!oif~BwwRzS~n#K}ZJoqp&D@q7@DgA=;IbxXuO z$Xo-%vC$Hm4$m@%kBgR7#JLvM!3Nj}n_&xVgYXB@vICi;^fLN%xmNa|?**U!Ny$gL z@pHiebkBC$4hqqW!Ot4Qhucfr^+R`hl(@kKT{CzOcOkX{o%Gv|@loQZFZ8YN;n5Np&Q(z)5HZHbkE>4 z>EnF?Kl(}V?@AHh))es~`%g@hmfO>$723cGwuQvcGyQg?W9~FL%{^z~99)2la0xEM zF!Wqak=|P=(uefiBhKr@T}2w;Kb#^RS-c;RJxz`youetzMK~AX-N^1C_1-~>TqB+v z5QaO@SKQTcZY@X$IkeJbb1+o&|SpuuA%Q`Gky#t%PiuV z1M^@3G$pgmm@G??A=ZV!qPR`wtJ-6t8CG@|;^gm>OJ@$|E;r1gth_5q_@>8$QtMotG z-Gy{R_W<^9i4-ewI=~4YXljfUiz`z6=+-ll(snOWN_p6Cnu<-$z&4g(BlEG9^O15= z-FI6nr;%sD&2#p1$P3^KvEF75_w3inMckLbH(w|Ibk_dZq!(IJvA>1%(RAz&X`4&j zRm}h3fXm!74AZ({=&vBq_dlN^N1JQ_aXhbJ8tWw^B(UuoL~%q zu1>}VFXsR*_MfdfxlbGqK(`DVfEY-CNf2&%QhdCa9Xi6hd$@hQf>0d+pa}Ya=>-lg13ZV%>c98gkAxTl6Yx98c!R=iw!yU zozr;g1DmeodiaU)QjM&IIv7rilLlnyl~^$&OF1ht^>ZeTZiN=&X(gVvGx1{OAl1eJ zsr>|LfMX8pPfWpNiYRcU?$9hIna}f{lS=>Bl`!izai`o z=?`Lm=b8WDI5?qeJ@yA~=!SX3;k>Su1xTMhg7Z18EJ9xjW8=p}t$1J=?iJvxB3|g% zXvGVw@LLOJ_Q30q7Ut{?$c@mnUn`rDTfj;=+rXYl8Nopw+o5HE@(*!sF8i%i=EQX7 zMr1o~2XY(t?tmQF1AE~B6hbldlra95Gyc|NUp2IUC+!cu{p>%WgZ|KYmGxJM_18Vd z-|LLOL)hm4LO3b@JI3DM|LyL8NX-DMn4T_!O5JHk30t#;3By3b20Z`LiaQ> zhcUliMjr-0dj_AKxeNMTyjQ=Q@tF79e(Zm~S9NlYaHZeV-`?Q+E%Y$lfxB=YhF^$~ z2guMN_V1P`(Jg1}g&0VHCj2bKJqg{)T&VUdN$69+L0;PDGDm`qJf`615gmLw<{%B?WK{?G{ z-WMgF4PZ2KPWmq21I1DPM9x9*vm)D|bR6}`^Ik^*dCMh_`^l>&O12QjX{VnecYwQ< zIRLo_T=@6xrVr$4WH0V~@MSYDEu^1rr=MoA{=oLf>>p_b8`v2G+Uv2A8uqZ)zmaZ<~ zOpdh)`USWMm!PGd@{_N&6T}ZTbbFBaZxcUUCd@EggBx&0tv`@=;4bu>!2Y2RJVVrL zka``#{zs|jdfIdTKBs=h&Am6GKNCp0tPz|-vlvyAas6)4w9F#U_KsSQ}y&dc|2gL?z z{Bh1d_|y2~zFjCro=Y?n)-{8)gN5QbU-)nw{X(6~6*4XtvQ8*uolqz(R|}Y^l-Euj z*x(v|s{A*QVQ`<)$Q|TeaGj@Lo}pjDecTVgNB{J9(oga8Ld#GAv#Qx`F|IiIS&X>%&1!K=3)coI6qLnGcp8}RD?6MrYgi@aCnqrx=4?na^8-8|} zNjT$Yymu6cWPhWRS-4$D_br{wL7xW;pl>((gYB#v3K*Y|K2YxjFXGx#@Joc4Q(0%A zTaZn2Iom*A1#6+rPChJ*4_?wmwu2fU*KutFY=q6Q1%?AU*@jf-s5_8h_Dwm+J+K$@ z;Q$mu=*>desO&ES8^K+n{Mc_)B8Okm@UW2a42n{*;ja%$HU6~_zH(6NkWLNHDv_n% z@Xg&blmXqnkTE8bvt)F2CfXO`y-aim_?fqS=Q;l%&VKxp@H2rEx}i}MAs;j-1#hwZS*#{keOkDto8Ao3&G_2lvT)nY*O^>RWOw`G7Q# zx8L@@B}bDB<%6s@rLOU9`S9M`Qnd9ADK6P17l`{JlupbO6Bv1@S#o$M-|p^{ic`Oq zOI*7Qe?N3kD(4r-q4ZoiT%RlNO)ijX?{8$7YvFgt%lp(#?Rws?%ib?Fmj?8OL|KeBaVKUYfk)rPY!sEj+8S zLfdGf*n206t#hI{c&^jlI6*o`r%K1wWN}`fEXOrZh)eU7bQMe%BhMgxF!udK4gH64 z@;>R5zQi}$uk-yly6y$+Vt{rTqMc6APT-p#A%5tmp1p9~PP@8jXJn^^cCDvfM`&ks zR}F2mE1GjN)=|(3?ce|#sP8MHNh1alU=k$36i9)Y&@wSnT9Y|rSi(63*ycuxo%(BE z6Df}6kur-gb6_4UfJLwrmca_>$!7f7#rSgq`-c8|*f)&r6I`r6#`BFS91k)6Kvy2) zPXXf(vU@jvoe^T>8Lp2H6IK!5T5z9_5SJxF)}ecX5zHYGvH^V~_o^fnE*Hg*rmi(zl86|5lu|j>bvbNSs)AvoFkJUs%BY7wN#ggL|973J!2WH+aAg zJtORYN7agH<|q}(ivp`JHXy{i2X0J8@IV2PEHcfX*dh# zpo#b_Z~@&q$R74Q=Kw8Qxro~_nY^*@wIgjO*!N~qUs==_IM?$#1%z>tt{X0+mtJ8V z6T$v8oAUvFx1oO_`CCH%(EWUu(UQgfKbtga*eBbG(?#4~(m*~VxB+3f19#y* zJb<6hf3ar|^zWjM!8b%553v3lp!^ofPh8GM%8%|sch^&X>ff9kFS?b?v2Yigj6pV~ zQ+`N5w{9hGly?$(5;%5|H~j2*%z6;_GFSmd z-cL~D!|>w?vKDvfN}L#(i%UD`tB-Teir)s<2%BLGY=iJ839eGc*2m8GGCAK{$@$)L&KK5@KDN~jJu^6;fIjd*KX}0he$cZGAYCW;Ch$DsGoCA=hd!r0ktg9aoP~4HM&7J2cK-evJ`3X8|#G~xO2dl$GW3{b%=ra4cS6ITFF-%`Lcoy-45;hDSuv+?BSlh zkPio-5Q@PB70@$4`G+V!`SOsT{;QP#7Uf5(vyzTD%0H3vgA@O*5z0?EH?kYIxsx*# z_GbocRJ~)niu!J9rTmRNGa!B|@wc6cl4^9{PWJ!gv3)*yMA}Y8Nge*q+q`SE^Wph` zJDc?&er9lG(e`T?m!KK96@2OR;~BL568gYG;=|7gM#e~=eyVh%hdAf!HLF2W@+GpAlgS_ZW;jJyU-=e2SJ83yYqY#8jl*oF%mVLh$R7+ZHo zO3NnZ{y464P3`T~9>fvJb^LrgljRQg--Y`y_FngR?DZ(mS8?}-=!3J_dm#I+W7|lz z22f**8e=*$*@J+Sy~**p%>AjX0hTcLPh<^%-h#chlFv4_{um+-*oZ?slXjk>4dCj2W823J!2WH+Y~Yj_3cAdHz3x=l{^pzDDhBd>qvH zPjLR4%Wnk0ad58W`TugB|7Y_2Kb`0Q{GNc5cMSZ*a}rL&S#aa$f^+B|7w<2?1@w#H z=h?omWSV%Zrb)kr-wl|}`TH*Ff%#S0a1;6^!d(Un`j7854C6Llp; z-hZGCTCehs&MnTLkv7`Oj%>&6K>9x7_d)3A*SP-%lup3T_wm~f=y#x}K%GCa#{&=a zgO{@VZc~2fsHgv}IWA+}+5##Bm=UfNnMG5D4=QX87hbi9rv2n=Hdu zu<2)~Nyw;?1pFpJ5=;RnHgtqDu@v-~FbkAjss1+y-GhDg(MRW@F91Jo-ymlM*jqnx z5q?Wy8JMrrmywox?7z`hp|6E?&^Ca5G9K6xAC3`Y=YP2MTd8+Y{ci)|HiJGyUu&4ZmNS1LTe#l3i7>=(1sm9*9UL%)ds1K~ z%z}sK?|i@1$@&I&PbT};Eav%a=6Pg4Zf_p@*aGJHT=uP}*tZ7R$0D6W>|2p8+-_v| ze*CUS%RJ&)0LD*fw=Zd%5Ml6Ngx^>>HJrbfVRxM8_$jv!w->4EWhr6Ic7EFlxdNIj zyhDau3l{v_R#L7^&iHX}fOg#KxxOunaw1zOd+T<}zngH#|F`_={hy88w;8s;HrN3< zum^go=zq+GeXQ#|(4WiL4nFWh$8Gu_93P?o(J#AB(f>LrBXooFYNYHXo_sg}h2RcF zQvOIOM)zRHePBYb0Dm6#tHFN9W4|+aR~tWbBhROhwa`SrsY5n^#m>Gy7rO#8Zr{_a z+gCET&&Q6pW5=1;?*R6z_PZg@s0gFp|8XGI`#(;k8jHG-WA7jAWc|-rBnR$><*T1$}T4{Sx@mec*+DXjzW^laDs?rQVgXts!3ptUV~F zqgSmz;^Z>-4Z}4sz7Z=okYTt3J=k+EcHIZ+UB7$e za1*XO5BqQmpJdk1JM0BHU%+|0F&S*oYS z@0~aHZ0ngu&9)3@t{}lV{Ap2{if0X?{adxtoJ`Teb@x@r}u9Rv_JHh(Ef*Me{4&gvv+L5{^0m6Woy{qDE5c^@%@_$;!w}* z)H}bG=*~6qQjIKK%y`51i2nJ+MLxVUh#To4ZuPEDGyS8MaOTPJQip6%Iy6JmJ>LJl z&3Xdeip{ry1KkOZt(0v)Wz40l$Zq_?U(t3*Kb(Zqa2C$N1-J;8;4%y=zKNG>$dLLd zjeYY^`{(Cb@8chaJK$4mKsEoqz1pD7j`*ZE5_x}h}I-U9%#~4gp`EURVp%|2%tMZ3lL zvJ$GH7R*^ZLq#?y9hzb6{mo>`gKiy9*_KcibSM1u{@*>?eCTeI0ro=?0@gE7C6kFv4lC1`@BdW_+9LOZ?XR+%<)n7D#$L} zuJi1Fp&OjH+5fU`Hn=0Gi-rvumf^n_*>3MkfCSU zt4f;eMbC!=Pzc3P@?1LaPP2Z1a0h4B$ZDvCI%ohh_&!aOW@J%4HV5i#t|bop+)f`Z zpg-qfpPR5xXy**j0Uc_O569W_JE05R1@3Il{}>y3c60v6S!5r0I3MiCU!C2Ly@xoP zGey#KfD=3rdQB(c@Aw|`1)g8xJ_*BV%5RZ`e&F|PaG!;9Z~;nRUnZuVeCz+*4Br1{ zoq^wv?-yP|zYN3B$2!9U{on;3_~9DYZa^6Bz+Jcx46NNZ|_w9U;B zE7F#pA$DXtZU?fPd=!oFjX%E?podw+u>i*2OFBmV60UcChV+34`p;*ux62S8_=6eJ zk)0u(Gcx4(k_^5T$dInt8R7!>Wa3W5&ypdFh;Jz@gB9S$&*jRHRp_3p#CP3T*Y+rV<0at%=~emi0Z?i^@urCeYGJ5)xdij#8g;o4qs z&(4s1?@#LD>I}JcQN?Lm)}d;l*@~3C|E4N-M&QXZ!eZ(>z|XlO$+72^$X?b z?PukKxcO4MeZIVZ;#sNLG*^yf&XwxWGxFZxY^mBbTi(6>v~*b}i)-i!F%h?M9Q*aE zMCsHdjy=nBdM7_zf7r22i%u#CV`h!_yX16fURKdw zjUyWQ-F41(hIpRFH(&Ns6QzBSXNF5ANXzzd(yED-wn3ivjE|Fd=KqmYG%k=z;;x3j z??s>ZCvqtHkL7UYAM@P#zesWG9QqGq#A^{!+Bi>4iGL)<$$uy%RevZSZq1N7+M)C_ z=CQAIQb!!2S2N^TIW{C2^4_jLk?Q@6qyhi%6B+V;=2EHcUB)xprBajqr_x%VE-mHh zV(m?rHhVg4k}kH!bk5MzdA^x0zHcJL%)QNE1qV2x8$8gvDP4Njr}OM3UHZ1C^Xw&^ zbz!>rsUsh<(~vG5#O=iYczHT=Z@P5lrb{<&H_|*KP5i`j5>CTeXquZQ7C47)U7p6= zm?js{FM^{tjdRO1&N0&%i_+v0ewV>nKzWhsyT@zD8=$^}3?uJ==M-ZQczYRxT%1qR zrs_H3U9R1S2M~Uhu~^N8oF%F6EyIU09?t(iWQguBl>KK&3{su{B`EiF*8L+onS^el zo=V?{kRolHTuvd3wI6!e+kNM#Sk_+V#zu%+vymUN@>p9OPZ9xQ-GuoPOD zYg*Mja+rCfgn5Jc&W`-?`@oJ-=J)07ufYjj;9A4}n!R^7^vqy?4t=TY&zG=2pUwU} zk^Olx`}0xmNo8&#ofWVO*20hPAFf09u%A}%A8tV32!8g|KK#5c;)2cinYZ#D9C8~p zq3=NEfQ7ubWs~m^@ABi`3+?C*_V%{hJfDV^PRi#cT##@>ln<$%Ewqm?|Fe(NvraOw z1}r+on(lS>Q~ai)N%%&ExC)`?JnO$f)_)<^f6!mW`j2_bSHt?RocW(MUuPcc{{q(k z$S&M2*5+>L1}F1%KJgTT2`a!{!2FMYCAz1RxLf&+7xY?C@9z0Y|{j?aVxYLSYLn*)O$rgz5o11oFu4p zUW=1S$RwBoDKHa$+W%+M{#mq1CT&AIkA3HX>}X{DZ)g2)p?-SV|F^RLuV?>X!?^}~ zfe_zRhQ8nofcF50SsTqF&N(m-7J!l8Ra}Hz3d>*xtb#DVT^RnF_h;5Q)FXuN7SF+d7PLM97E0g^{ zvYU9FsqD>(Cm#+#A-M5#E#&(HbWaI!PiFsvUIG4c;x>>5vcD=pD)BRKWzUSPg(mbm zWCK`oDOWb-I>G-Mz}*b(rxL^wWdBco?Z}q%?EeP|w}kyN*k)rF(2fnO?|-e_;{Ydg z!`OKM`xNyJum^V!VR{MspXYC!zhfgl@Iwc7);aR%{EacBYY6*y{pb0cpEyo}vzI>e z34H^bK8^b^1D-0`ej>U1$H#Sab>9 zimj{P3cZXz437DfZ4+f&Pg%EWhSaiaL%W?wRn^~F@aL+73qunN{f&xrbsBc6XCJ%`!T8>oNqfgd`q z^8N2EzW+r!+4FayyUz3d?*QNb2KhZX;@AKi!Fhu31(0LsbGNY{^lh*M{y*Pu(hj~< z+JLrbYULY=)%;!zVbuGIdywX%{O%PpAKDA(N6V?}?bJDK)|N+~!tVgr3ZWQGPyv+? z{+i!>LDoVY3{Qxc24sleBQzuRb03|zBvQr>-g#8<;ct4JZ?u^I9q6iTF4A+NdmhzI zx6;Ai$bIx(?+|qbEzC8oF2-Fi<1W%>XB=jJZfAaWAe+@aa1u_#SvUt5;3D)er}skN z8OB}cA7mT`UyyP66zA`(0Xp}yK3KzC4_$4SCX8^FJW zzBivVN*D*gGMDE!_-_Vf_gj$Lz#Pds2x%vOHW)kiV;soAZxMB64bm535AMBS{FM1v zzLR|P-><&|xC@~eeC&-Ykd;skwNQG6@BVpjD9m^Cb+{YA49#E#2RNY{JP`VXb0OsL zGyIP#N4IP~QOi84H}ukJm`&287`bxMwg9<8OQ^Sv=Uc`nKUd{tv*9 zZTs-^LVqW}BbG@&gYYr-XxV(P^DXu9Rx)d!WYPVS{s=LU0FyvHn@d6tQ>!>T7<>PR-yrHa zHBEYXp52G^4Dn1H>Ba3s`Zw|YeLCOYNAmrB9N*s~yOMdf&ieyy^zKpodZ)=A;@b=P zZ~)xg>*CpXA-ad><$d?2Nin(!{9N-5lE!(`LRR2sCZ0-UH8dG`o{FpkOBLnvQm&F| z(tz6x?e&z4FgB#UhUf3IDL=Gj5pFmBdH5Glex!rv_s!g61qV2x8$95LlhAXW@{f?m zQOb|(zeV0i*9U&+NT&Q(DL?*BWEbIFNH^|oq;rVhI3S+0a1JiOkH4Q_e1$P(260nP z-_tz1<(eP2ZwYD4CoSZU{m)#(y9o3@i))%(B-~|aLO0QV!{}DZ(nfyl`zd4Y!?Lv_ z$Ifv$pLZIt1H!qGH;`d)qnGi#=?=PwKGk=Net(sIe~+`P^Vk9UU9Q~+BXRin{f`iH z;RD>m!aD>%q=;?&_=x0tBgORj6cNN4vqo#>uZ>6vRDs-nK8ll zK?LV9tP5ZR_m%RydnOqBzL2roHj#BeB=bugV={6hVK&1S2!F-zs3CVij&hgr{|Fc- zLpQMz+{06+$zG(||K}_Bvy5Mqt>m?Tl>@j7p%~nZ>n<>%dt49KrWNRwp!TFb1M>$` zeJj2SziOz3I%ohhG=mjdG`!ahZF#)s&EDI_Tx@4fZf|9M03FQro$SGm-(!9sVSYc# z{0{DN=63`8Fk~};P&LwH7cT}CoZt#E~TH+$5wSfA| z7}|T;?}F_N3v>oR8cRjxchqQbDFKPE5WOIHe z{{Ai68lk5%5fN+XD4CV{%B=W`uk{tvnUX0{qO1~ETyez}*UmMrSh3dEm}a!@ofRu$ zM8sH;PPgtpefnspPyagI=S0brRWfB=tE>_wT^(1Z#O)|iqQtn5_c3JhW54^y{o`4$ z*ZX}w=l%Kl{_%Rff0?J7@k}nud;W6=_wWGyzobsVV3eR;|8EC6+2UDr(YukmED!iT zkJfAd;+Qnk(rodpGiY6;{fpeE^uI}GI7XrrV=*2RP-t&X_I*#c-js0Vx7d-tFm8uF z4ROqE894(pF&l;TH^k4SXP=WVF`vE=`BCyE;uzd6b#vlvbvqXOZ7Jf|_$DNe%WJ31 z4XQDANBl~x!Wz^byY=!u#7VJA`*(4=U8TmH+DcKg~}mdp_+!v$PIt2`dt-&g3_>GI!Xb3N$S zkiRegA&wE~!gX;yzqPIs*=uisTjU+w!vpkR;2MoN7)+L6D28Js`aYunW{LcR>Z^QI z@>E>+snl;{QM+FmP&Xu0@*yOY0}b8Eo`cFDq}D5gP{+2eM+2KX_LDb0$Nrzk{znS? zT*E+Vjl+0Mz$8q;G_>C^{{QxS6$#|+HGY|O=cEW~28@8*Bl&qr{W{{da| z_y(5n5zOL$z|ptut-#hlcA2k%Y!S{h^FN>sasK^M=`6=etU{~n%nW-ntf6P0a&Lq9 zDCia6XY;LhZ^GS<=^|f~*9!CR<)yg)PVI|+S8s1{e2wSVFvoLUVE(;vl}t0xwRUh&a?aGmT!?F+@BW=wIoMNjP3E_hu# zgMJSwb#{|*QrjfX0obHpiSMzxRCY!=cwY9KBBr4srfn|Nljr)-CM&*hj-a zdgWhRpXCev!(e&|3jP1m`aS=jaMN~ekWtoqcg%2%L@CB%9LA%feL!gUtUET#<79S_ zJgqD*tj|Wz_w#;~1I@~aV`SQKEiZXLOT3>2?7unI1eeYvOu;miVFvm>!>0JrlOgu~ z_k6L$`jW;7z4>IA>9^Nny}4Pw$86!b$o*LT`=a@}^bFZLhcAO(`8oIdEpwQB7g=RQ z*L~MV=6YP;d1J-*J`w(Tz!Tx%<>K(>^daHE#3A8(w~NF68y^bao&KS)@BCBYJI%ww z-T@y8Z(JP~_GCtcZ%_PK*gf@Q;akN|hh58_4%a;QH2ZmJJh!@WA-No_>gAQ>DrEPn zN7222j{te(u*Pre@JIa%Th)^peady#-u3$?R6jEyl#?}N?Q_Pb(W|fnyRZlQZ~%u; zjUz}Pg*2+`2ZSG-EeeM(n&-H}{>5WPg>92Zg*LxsQMp%N+~tqx-x?9N;~$HMg&oZw z33TXNciU49|}81Jr%yatT^mhUu+#p>%98?J8um#7qKY3v2%$2twG^? zFBlJp%71wf`@jo(<8Ik#4c0M5;Wg#O;kVQu{#V`ax8VmD+4IBhhaYC{ha(p4tnK$3 zdH*+|Ztq>~2vHRbHe!>)+XOy6q<2t7GD)wjP=i;mEMNxQAHv5li!Bn z(i(|U)GjItHIs|NSbAcKYr{DDc%%-RV}m3bF+p6KKYbE84Xw(XGI9no9S_T%?1Je3 zz8|W7^xKeQQ{>qKUGhjbS$Em}&(a3bUzS*{&B5=O+^Y?ujgjimzTc_;M}B!tewiuF z*_ey@ScrDVb~v_^%qF#IGTJuX+Bjt1d5>ztG@s|6K-%vu;vbCaIwbfq8V0zwwXSiHYa^RBvbR2IUoOA*{@i+(WX~t9MHR#5ifkjR zumgQxwmuKJ2m509U(6jL>*n0&U%4L|&fX6RG~T?gU+jKpLJCtH6X!;49ihBA`yd<= zu13W-+#}l8zpMOvSNZp@_y4Z<|E~A{F8lx8kiY$|{_OkO=l8=gq|vhAUEk=t%D;D& zfA5+T?pv2$Wuy9$4Vj>~uDu^J8=aq?U2s2iB28~Ye!sMLJI9Ony~FzC+gmQ5bE7m0}^AzBZ(%YaKV2r;RfP8cI%^}&qaQb??zth zCcD(_^`qp$VeJ2B9`;kku|19QO-lXMBroT_rtjDP7Gg1$VmVe~6*}(v25-uHbL2s? z`vvJrJFY#Fqc;zd2j%m$e0Z#%>$)fpo;5Z|IQGv+f7}}B^#02H6>0;l$m)0466B-#e~0J=`(!`cg&yZDHYg8{L>&eHoBJx<=Wh8}TpDf2B99X| zi9YSfitiWcBYY4}3-?y2E68)WfJ;cLGp>-^e=@@SVc#cW-*V~c z2YmS4cYo<5~nG#idKj;1@JI8eQ@A?Yk@?Ufg#QA?y{AU`j$K?RoyAy+<*0qj{kzBEo=aiLOMg8ylj_AmP*&hJ^{n8N%X`Z{{$=aj2o;~#id`|la=e7bi(#y5bt z{^$nBY(hnm{1S$QUiC-c&#X)LA@zv(zHjkoXg_bG_xu}slH7q^*n@pIfa>EP2#3fT zvYI@CT6%&^A#qs#PWtX^22P}7Vl$gjrW(--Zu{5mNf6+UQA!zp8J&b2hIO`Alz&3;Qs%s_rU)*knA1en^Jxb zrdNI5`T}1xR)OAn$~i8xUFcbPzw@j<2l`0lyS@8n-#~|N;kflAk>(_PD4ar2S6$}g~yUto^03&M+$o6Ilp3_rmveu5W`3t)>c6~7!Su?n@%I{z5< zKRw}E8oJs4^bJV$ymx-*ZsgzCB(5CWP=y`Xg+17Z12}|g96_N!{Ji&lnZHlC?gjq8 zd3-ZStl*p3%ty0ZJ&jbvF=-U)Z*_R3`l_nJvlN#{yJI@gi7dLBJzwMy*Lpd6lmG87 z|KD-t16sso(26$1IVmTkll!{-uMR&+Z+%J|^|rPC>DlS()ydY)qMt*4z2ne5Og)PW z;;Ij_@5w8uU0@s>c?~tq^3_^xx{Yjm;XX7?ROg{_p0+INrt06rf9U`8pF6mR2k5_8 zJ%Pa}!GGxg^xfz2pZPzBN@F-iq7;3fw~rlJ`CWedpYtP_n-kZb==qKLX5zO@)y_hk zvpqpLmbsJ2DVT;bMBhs+gJ;mQ@_8IPFq1wT`BCy89?j>NE3W6S&2=GrKkGXs7h@@w zViuwOX@c*yo|5tA|syCBl6H@H=qtEgGzs3Kreeg%~ z>zt$AZynkXofnM{z@v5ha|ihV_WF?~ojv#?JH!pKu`toPHGUurq*py5?|;);;Pf{6poAQX^ekmNITEE9i&k+N zjH73t_5D5L+oexH9J7-1J$56`RnPr<|1e2>-*2>a$Z05xVQqjJWUOmulD)?N%#LCC zVr~rU@1IXD#A3wyze4|CtpAq^FGsBZSCXsnAJzYL%7S`jOv7a5#4P2)BIN>_=qc^@ z!v2VZG|LG{C9f(dp-ZHoF+HS}S3AheW&ShmfnD@H*oRK{66Z2^slU6YdTyR;9<@8ozw!Ga z)NI%GA!DEX5i)_MwZ0=XuJ&Cmi}_PK11Z0y(T1K1ZD+Fg6WWY1?3tg4VKzqY7o);S z`f2p>Ay@p?*aBk=&cyH+28MIw1zf@vJo=7$>2XeU<>!n?`0>->x^N$E;STQM0qXP- z*AFoN-aPsQ+1Stg`$6X4Pc{D@*BsM-i82?1QG%fuj*)0TW&Ztn^Y71^e{b#muDjOX zzhV9T{np>d(PzxRf6n}SX{F7#Z=q+LtJS&Mj3-EMFAb&A8H;fkkJi^6g9-GQW@k@n zm_(m~{3&UR>lWALJk!KgA1@7ME4@n{r>U46<@W#qcj$yihrf@P3zyC)&9FEkM(F@j@tnrDrbAgfSotiTR(7ZR?q;ka+`ly8wtZC1vSd;GQ!2XF}0ID!OHNTUr| z4pBsXRI?!pM< z0fq6u^~N4_yVi`c2kf2X4N z!WzW!zw5{i*o1OC8vnbEo_Nl^@vT(RcOW&4-G$^BcG$CgFKpj>wr|4%_U{Vz?`F0o zS(qocOr79AyRZlQP-wquH^%jE4+t0RUp8>(-S_N&c5nB=_u0Qa>|Y#1`ab)Y&6{ES z#&z=BE{eNs?7DQSaRdp(-!iw^-}LM=(nh6rSQQ^(o?jw2#yQ9?*PeDv^dYpxV^Mop z8A_f&4L`!8bp=iepGMOP>8^I4YuzVVu>Yq!?sfN%MrjrHKcc5_#(&P?0xsbSdT|Zc z(Y}-Y@1A4dMz+Q^b-T8;-aiWS?>(31x7~ldSG1gRZ)kN~TSikqiT{!5j?$QQeRbfXJ{#Z|xfXW^v$QbLbwFAOCs_xUY@wYmWaYuZD|n zn&rL_*Ir0si+=X4r;J^@%wP4kwKp9T*YK&Om(nY}@Txb|&u>NhSp6*TBuA;A$&_%< zo5Mrz$M^_7LjR0=`It31{cjw`qxWB}{XzDaUoeTBg1%2V?&r^hY4o@jcXHlCUtem4 z@3Ei!ILJ4<$oUp%Q~a_xlsRq&W+L~S;xL<>i}@(*BcQ#ITgK;y_8a{1?21S0jm5oC zGw0cb?yrqJ%3dz`+KPhReDuk1?EYu?cg?9g`^j)YnwM||y|{+!h;8gR=IFJHe4@g2Pw{W?Ycv=$kXXRK z!B>!6%fGRne`9q?xaF98c)0(FHK*wPzbY?cFxvIwcg!_zpTdZ@**nNjM$VaDJU>!X(5Ql$G8hvj63Tt!wWb6U+$%zdgTtP#$tpW`^(?z_&R z{ahGFa8upqw++~Y>KBVcIax#2&M6Mt=#@WHU;j`a;#K9zTk5>a${W{K*x$$X*14wo z$;uno^}GE?{H~6UKFTU-?7%MU!A}CeKY0L$(C%6~lCG`B{cGcQtylkRTjj{S@KM*; z?3#}qcmHTe;l_vlpb%Yn{I%WY4dSyC73YqYV`w*RM+EaRMjN_e=XykY|v- zZH+;)(EsqwqLBMaQHXt@8TWfhe0H;ITW@VPdN1-*y&L!2z2IS4R6Th>SSkJjuHiar zcbilC4F3W>(QWKq##%u1J4oHt|9V^h>t6k@*rJ@-ir3iL>7CXxcT5~R7xxf)K<~dy zUP0#^w)%@~_9blh7ufK~W3b;!a7VrxN)E?JlwvFj?Gtrm!%ONhG^)=E?UPuK^C29) z`ri0MGVTM=@($ktS{E6A$Uo7}&hJ1cvWR03x?MvYdl364s*m%5IL~-Yz$Dc2Kh&JH z{trDd$hn<=8odlDb#4=qXjDhPfBmx=e$RcE&5oJ$*{BTW)Bo7`efoSX#9}PPa@47> z>sLF6`}tG;hn0?5g*8}*4cLToY(qPnqJwQwSbyde`*Oa%`eFrlQ(!M^G!TC&(0P z=nWHveRFByil1pW$PaN{{l*dUA#(rl+2|ME@@PNLI43o!z25Yk_61p}@9rzFkMk94 ztK?(4WGzn6E5GXbe#sg@^wT(lUjDjsWX})zZ^*u{$P46^_#3X_I{I)6z2DTYMfQBQ zDBL3-p#T5wyG6yv`Pa!348?Hty*x0CBukNgP8;R()5BQ$cuYX6xC|!Iv!~=g`F;w0 z8uG$9;cl|)EWcu0|K~@>_L%<{*Z+Aup6RT$`W-(5Gm-n@XTxlAF8ZFbS;iMO6tK0Y6kWYh8I zL+bwXVU_=`!8&ZfCX{0vs?a{KO#N4;{wq`el_~$qlz(N)zcS@tSvdN-`Tsa(4u2Xg z^bA@zmW4LQw0YLmQ=bnzq%{t;FPM{0?m^8w*EP#^VW02;G|^LNL=uJmuX(-?q<5Ev zLypOqU*7s+S*WI0evaLsZ>v){yTtX)aedQW-?Of7Oj)R#rAe7t^J-4z2dInI{I)6cW@66@Yr8HAcX$Q zeMjxeCk&>S;P?8!4&yNa?eA!Bqw{s`TXbprb|Xj6 zsFu#Ub7eV;FTxc(r2?`xa+=7Movzj7aM z8vkVN;Bv=oLlt&l7xrKu4&V^ltx3><&i&Rf*lq2C4r>@7ch(vPI6A=hQ|$X8)1!Pp zWJb7^Y;#=QLEmrEH&WyK?e!>iI|Y`{VV0Z?JXmvwz9VW$lCW@7cfX z^$XIuge&MpYlmZ+jhUxswIw=njXn_h5o{vYRj~hEXUzB2SCyMDXp^GWIrD_h@Hg=eFwpZh{=yChNA|3*DjSpW7<@h zEXPW$!tdpOHg4BJc~id3A)oor^Z%CB{0}wqPg41n}N6h=1{J#C`FNJb)8*1oQaZwp?R$oD!|K26t8Mo1^?J&_cL98 zg!nv8ps@enSBk@F`Wc+V1zf@v^x_(>qYt;DWbaor7MuU$I~4BKKDb9dK>roKMZ_`R zgUJ#M#c;&6h2t7?Bk6_y$1&yv(8nUre~?4BKB2DX)c?;JUp`qM_jI-gSs4GnUB4ny z9Z!an&Na^e#$y8F+7DY1`xYh%w=dGak3v8DMmFh7Y*XY$=x4{#8sq=#jsKTcy4m>u zwQO6&_4(USqi;R;56XIJ)v}kTkY%V&>hmY#UIcMZf>e@Cj)ksgruc-ohG;)`SifGl z@^94hLzMmWxVB(sQAwCj&#rfESV&)tJo`6?ZpU=3VE?Xl|7bu0jk}F4Koe3}>Oae| z605KV>#zZv(C#^P>}3Cw*@NtVdUw314Evvd_vlUck7G!`<^GSGTYy%yq5AHVpWEzeuAES>#doI{SCG{J&Q_y+gbFymmP|xe2M;#s)a%Bu?WD;#~Z5aBjbI>~=2lp1A5o<{Xm!{}0cupZ-#EFlv+y4aM&3t^T1zcqrnYe<|Tc zGO2B_g&%0^4gKQsOJBS73mnt)<7mU_)A%>*8Vl!sSro>SeNTiiuJAj*3)%ZM^Ev|i z>2Hif7EX6-%aEBQ`@Tk-pFRy`=zKx@5MAQ~H69 z>jxt1-ckP}?(v#HBRz>Y#w@PYJKTR>d;8&7fjDNMN_ZD4KlX5ZK>Jz#?~Cdv^;i~N zyVXqx)lqx-;a@lYZI=2EzqkMHP4yqzaI}a0j$?z^?`Rppen%_44TbduirMetvgi`m zjYsSI?r|+gy!(CR0USbgj6(u-H;rFl&o{916PNY-pL*Z?#lz0|tow9a8g0m;b&2|S zjO&kKb!{h3&`%<-e$Am9(bs=kT(x$@8S)%zM`+KGmr%3fq3ABIhejk( z=fCxA^oCi+ANVF3+44!U$v2WBuldjW#|K;&?!ztQzSJ+YZ`J>;-@9|W`er}-|8-@1 zw=zDXuSA>l=ydjfkMTc9qtO5Tj{a}eHMv6&ANaqaC+#I?7(kf+DJ z&YJgn2A)ZJj%Ps6Eb$D`Cfte{(wK?an2Y&Xh{afn_uEg)g;!w>s(#~{y)sI>peW@2 zmVK?t8oO0822ykn@poEhNYP$wd9bEtTEl;KOi9eU|;Px^*g=7~QczrXC=9vmIw9Qq_0UmP8-IVL@*G+Zb9&`Q5W z-a&@mIl#Dp;?i(W_yKaldGTGt^x=y?de!)V8^#Ba>HEe9kePRk4;W#50NS55zF@L+ zrb~zHddmMs83Q1VytMlNjdl?3_&x`dB^Zj~7>T;G_7^~596@RSz)Y@{RL#gA& zVjRY!R$L7x&=c;x;f2v*5`79%;+uq%;u}4q_t(GE{9c9`sA%zwk^e=%Fk86t3(C&V zuo1hg1ui@vxfc00?UL=!#rmigh1Ta2Yxj+I!2HCTtj`t!%-e`L{x z-&_CR`)gW2 zyAp0t*TglZcG35suy5#g?TG!_5!ffL+VKa-L#UNrHF*RzldU1PdR*AH^S^}3I`ydU zt8)KehN`zGgo>yBGHj>+A?$9R z5WY2eeAt!zi_rV`rQ!b$f&b2${PNgI=h$*ny8^kof#J098Jt6W|9zh-4HxK_(DOt2 z>?izxuh?Iq%UFJKy|{*oH`%~s?q&ZWZ{ZI5KE(f5G&OdnFtj^h!vN*&JH>{e8#``};6cx@{Qi zKew_@?TZEWe_LYzH}diL`?J1b6xJW!SQHjZXDOCr zC0e_S^wAWBRrIX>$WHx{Yv}8cAExawRU2fYw#e3^ut8k)^rEndEJy9#qOgsuLJhs) ze35oXQP?583r(AiYn-PY@|Jma>y1BrN&5s18?{d^Y8TwrE^tqU^#`vi-%ojei1yYV z|F8VI{$%CI0s0}dZ+8C!+<&qAC%ZVR;X zJcAl~!zl0d_JD9s_yU^FdaswgXZ;Sz`0hLO!D$C1PHC%?No7JK9LDp zLx{eIOb45R9(@jRZeQg;@E?3%KlW61{zU%!?aqUtj;Y>gO(}9DYHwR>lN^iagNVL| z1jY%EM`2%q&F=BI_7S#dw{6vadyVY-SH1@Ag5F;jg`Owkx*tWM@2}+jj~7|DYDk#i z|C7*D(JxFPdw;55iY!CLkBUS36nlc6QGdia05j>cF&CZcl5A4_-=Y3Tj-Ib!pR)g( zUu2)Yz&=F_Ju`=Wx|V&4_O0w&bRvr`bR&m621d5%lTVjlo|>-+ZHM808;MekMeAPqQ(hiNZ@3|U-8R;bJ^@X_sd?-VGI`b5W^vWp98<_? zs6DGMft-Pwr}P_;uZ?-{`|}O<6=|crVXU+Gx#<0O`wEe9{QqKdDVAd;R-y8%+AUAm z3xFQi0$E39Hb3lN-9X=ja&&5IXVG=n_i@&{KpxxtRxKY^kvmY^Z66474{9=Tyzs;E z{QHCtplO75AsQ!YCz6N6#c>1GWba3ww1S&<$SdmJ82;-Ih6I^HP8rhoX=Mbx4Hcge zZZtk+s`>#1|8%|jLEZUyfBM7L&v{4vg3eRwm$%d}m(?%F)i20Tx9^^`-fy2~h4VOp zYrdB{^+i1zX7NL#agq8BkH(?ZCDnf!^DlhQejN5xl6qq>*n0E`uA~kEB^wHsbkYC_!rc_nPvP7h~op&liB9d z>BTi%N5TGoM!8FWWdGly-$86sWss|39PsKiPZME>Dn+&v~!Ae>eYUxc`hqDaK+PUhC#F#}>4+`8$%@ z|25kGtF`~vYyWG9<;cA7pPc{G&GyZ(f$OcwDZL4pgo-Wdwx{i{L9hJHEBfQ~Z8yp< z!ZR=vvoRO*u@G_m$zpOTzU|vvPPWQ_as0_jdUmt?z25%U^fky&mA@y;-wWh#R8Qu= z62AePP>$N&{P)k8b4X9P$A(O_J>?Ii?kbyZ%iDY9apZnv4^i=Zun+Gq{|*QrLNzM? z#r*_j1wDl{+K@#aCvXy{aR%oQ_W`K*sas?vWP9s*Dpi z9uwkkzm&gsz8X6AzN(G$YRJ~S%BS_J^66FO)2qs-SHsa2uZHH;uZCkAUk&NCuZEUI zukx+G8d@FKmU%VQEqFE5FMBmKAc00C(S#HxInNRIJ|#jQj%g^v4Ae=p9t}vKad)}z zznuTQod3N%%yi6b%*A{x#9}PPaVa{l*n z{`YeJ_j3OCa{l*n{`YeJcX4;i!%FF_!WyhY>r?J=#H(QgJv-U6!6te+@&h~@bUUW& znO6&YmsC67qkZUVJ&$dU%YDmQ9!q|AUsd9Epy>tA1%>D2*gAUsJjc-!o5g$HNzc3K zuJ?OWeJxK_eMX(&zk9F`2XF}0_`UUK*a?3${)bAbf6W$las+WO=balN>P>v%jqbE>*n_!iLCde_$|5 z5a%2XC9C+SDt}oNhSU50!8m5JH2(f_QRw}QG0F6vFPRfXj>iN{LLJ{nJ%4>ejqB`o zU1U-?&P|T>Z0tLq;<#xj!wk&CY|KS_vGE^I@jc@4{Oa@mcUk>T9#sxBhkRsBhJ?N$Fh%m0mrsoDp4 zy#MYJ*T6na_)iLHv>}T;PT(Zk-%*C8y-3sL@8^6gd)@mE4-fF4jz7BS`#bFWLmDlJYmB$<_WhxnJ>GwfJcYq1K`q<7Mjy~ndSZpV zA^#4ik3>p-X%d%QFR!3f+*pjmcuc?~Ou;lf_CN7amkH0nY~((xua>L~?COtLACMl$ z<1ZvL)7b};t&Kxpiu^Y}D1WgP(52n-xPL>tXSv^dztJzOB%}Xq6}blMumPJ;j%}#I z4#c(0caeKgH%Idq_BrkV4xt)HkU;Jm#i4z*{69b*RSrD1 z|AceGdGhE*{hv7IxbzCof3xRLwxVr;_5Y2<>iKIne@pZM4tzNNZxH)U`ImBzG_uI! z1pZ(3!AY`@?E9Mh`Bl&F=R-oZGWoRlnvEX_XUKD?U8~$BFCosQXh8C=HpETe);rn| zWSie+;tG0k4cF0!Tc}&WK3>EIWveFiYd0p@o_pD#`mIy9*$=zfw>XA09^1Fg>|3;p z>o~|h_Md2ncj=e!#-sK5(u1B1cbw-Q9-#l%<)O)JKkjRu)i`Mu=HIuNPfMSG_EX+J;u4c1`; zHlZBbP=$8w%KzE=4oBZ+|JShp+4E_%&@)N-TN}R(TZ*+!pYpw9hxGb>EpL*0kmJwk z{fIoMKJVFLuM=^HP>rg8QQv>IC}jJwFJ7{~j<^K!gS=;PkNYoIxDWAZv>}UDahdge z4fGQ@iPNZlUEU#U_M87iKS!_7FM5G&*sgrIp?tU-^N{j^p1NvWvEQyB_xS;#;)?@9 zFZ~*>qYt-m2lwy*?;k(Yf2}g>W9IjKfm zPlmoa-_LsAqVNPv!W6Vl^oQB55JMzGSBz3 z$oJzrOsns=bAV(vJO6s)A^fK@Jgt2644!lTXPke!_5qng^ySy>c7JF<0*&q^i6-@U z3Y(-+j%}#I55}l7C-)2A=i5H~R8e?5|5Ka#@%}H$fk*zGqcxt7cEB;Rus$)pFh9aG zjq^X_+QGXHZVWr5y$gE~_wcEi_-A1sJi-Daa3o#}D%bv9sM`N`p<>Yg z2-{cxUHBgV%K`OD%5Q13q32uHH(aED?TJ4R-yT$A?9vBAR(#)Iebn4A_k}k$e=zKw zJUGx zUmq1N`Mof|cbM;Ilzid-uZZhKRlENE&yEV$>3z6`JGh4j=>NCcOc;z348?HtZZRL3 z?D@VicVwTjgJa2Y7>@~pK=d zAG!8YdjQM-@9@6}$2tC$_6k@^Z}sdm+B3`Pan4~UR?=4?-{IT5_|OO0b=&v%?5J?f z{Vep`8mvR@{!yW3_o%Reo-iM<0h{RMNNv>rtzR|y)acMS*<5pR>D8s7irj(L6{TSp zxd$2Xo%&a^*e84dxo7m->(lR=s2?A7hxN~+0SPoBi6*3Q$bYJF1PP>&MjNtdPZ}SP zaUOm9S+YyGdx7x*^PFdn{!M8#;~3ItsWHBw-uME2|80Bq|37Qpd~MS>rqRa|^3sg+ z=Hop26ZFK2(V+o_`G3DvcdmB-%iRBZ_m7i~N$(#WPLpTQsvI~+UO=XKbm*L=T)-va zE67cE4rNH!RPR7JQupHMP#?!wAfBsnv$XeXo4xK_TebfiwF~|48m^-cw{Qpd(4N%( z(5C2I?L1^*|7kKOoF|W-(*DP>df!To_Gh=c;js1x+V&bh^wQJ%FP;va$Da;abfFtL zJfwqi#TKW34^7U!{fu}>=GVc!!NTBhe_lKra-rw=k@W3_pUnj3% zFiJ2K!!Z)27>j7*SI-+2#?fmwjtaGVKM=;#CtwmD&(FW9{=KjMy{*jg?N9St&#%>Q zWbZ5e3VU&EqA-&_8*?!q3(@yYeUG!W|Ix5S{jMBq+$eoCbxS{^ z?eYBqVX_EY%c=&(mPz1J8E z@&FE@8m%49(d>TckJeuBmw-0!3Hm#4)4%+n`J9zkvF_a)m!qxpYzFQ|X#JgnpYX#If| ze-_U9?*&}K74+g7uA>j_i`f6@d`bO|u8rz<bh`{b1N30fW7 zhU&}KT$9cn+`|LZUS@}k%>UcT|9_BwcdvH2@;Qknc{X)i{{Oyl){dKkX(+=C%*1T8 zZ}k14^KJhBx3uXlYuk}I#IfE-l_AYIHcS4;WB(JqEyfMAe~fhIV<8r!b*k@YqIG=e z*%#FxSWaJw{6YRdbfXK~UonSCU9-w>Yf!tHKcC!yn)UKkKi?rX374bkqI`u$BoRO8 zt%&0rw)s7-LAZmgs?)yj8WMKV_h26m;1H^D1PP>&MjNu|{i=LN#`^aJ8GV5#$rDVdihYeeV*sP$n#$&|D$V!{Ex@> z|DVeLYvq5mq4&qJzfNEO-}Bvf*>75!6(6&fUpu@0vvK}&Nw_EO0s60ZKgRwHBnLFjT^NjOCo?8EokKa2f;DVC!`U(8Cf-8FTfb1MG= zy6D}=-Pg~5+x&wi>~HBDlV03wutoZrG5X)>Z6oxX)|7-*(y9EUXYobrXVPoM)tE=W zj-Kc-hkk!a*g)Te)K&8k?wUJ)(;RxTTwMAcdliyZXx(j(c5)XoS6!F;%(~A#!u#-O zUSQXL*NHmUQSaIc`!7s&|E@i`!2P@KI6mTl{~SU!jv#>)(r82bZQq~#-H9x^5chV8 zYdq%hc>je#?jJ2Z>VLGNP5o4zEDl-evZsabqzfrf{`d@qX z@gc!C(5SyJj>~U4%hp4|{{4(&zdtGTO6MA`qYt;xzSj5ybRvtc^U6PEV2;cy6OU&2 zAEbF~EB^yp=D9w^KFT)qDQE6T=N=xQLjPy~ztdj9VB|hEAe4|pF&w?0)4nHrK51?M zSuIbECC6brCZJZC8q2;(^f>o9&VimnpN7}N_zH&zG1AQuR-2($tiEU z`2o7fLjA+OEbP15tzKuR7WUU6QzHh3nf|j58?XuG*oG?XKs!6TLw(y>to|LK{vDF`Z@NmboO8$4xm)HSO0FC7P%ZEh#9cfg2$v5^DecbdcDu1CK{-W=QegY?P8fVa3R20sUJuL&m1@aQE z#CTl8b@bsD?nGTb(>?M5(l@Nlv%&X*!6-r8_aM__9Ugjir#=7-r;kMb1$k3_5yxP6 zk#)~#|G3u%@^SyiJU0F;&tKbUi*NW*`=ZqU$D-m>(l7>aJiYzC{4dSN^GoQ3^{3P= zh4qKDUz%}DUQfTJ{?{(eka3TwHnQ^jpAB){^$F6dHl}Y9S(CKCf-!ni=+ltf8}j#U`TM^7O|}WQ zD`N`dlN_5>z7+N!CUc1U56AtpH#pBGlw%tT`S+@RFZ!eWyMw+9sS)n|Dc3+Yj?sR3 z$@@WJe5T(I;1H@2=Mh)D%y;=lzi>pjS07M^)EGjC@`6W-_B>MCX zwovrv)#T8+@jw>e!VpDX#dE% z?&+c4eBy=?&xbhIym8|5Avy2)(1g?+^X~oU{o{M?2|qypzxU0f-MsdW%g={SGCQlx zH(kcRZ+<;J|3aDmH}jWqOxkI*+%$g~t>(A4-OzU2ZBKLQR4Pxaeq_ugz4q+$*8hG! z45cUTyEY7`k3?#+YeQV4pb@3w(g(}JSaKX%Ys$iSaso2Rve5Zbnfk3POcI`goNLQl z3!rPY>s(P5rim-V49rCDSFN)^_JrwSF4_0n>0v&(5R0)C%drxxQ0=)r_8+V@Ph4E> z7}x)-dE)ehdu?!^6`%0jkvi`BJ+EYs>wo9@Q0HFj(SXENapL1|P0~%F(Enc%!X{~z zV;icl1G~_^+4Jw``6G)ibXzYVH}QGn9G_SIdj9J@f5)ZCmPMZbi=MxC+eX%Hm%sMP zUr5yWrnMoH=Y8X6n5hemo5O;f!qY*9`= zDu1u|e=lp)+kVh45Y_>+Xs`an?Ddr$f3xr*H25P zSFi)fQWX3U%UsuG*LB`?UUZ$x{jq)VhfsxEFgvv2n)q zGQ>T3n$#z8ul7cAhPauSjk$P#|Hgdb+{^lO$wL3P{+z}1rC5%YsOvEP4-MK632leQ zLF#txhNg+e|2;b}RA`^Aa@-oM!v<_ZIkutwl>R4lUS$8C)uy?tZG+r??LQn{qWv*O z`vd9e+8@%)ptYa&2h!W^aUh)?*o8f4b&qk5{yuv4s``6B|2zE<@;8iMK=(VwFWfe+ zLEI4}kU|=5$fECCMIlf2{FGmW?ENKw$+PNHb%A_)w}kH?=h@^@ zn1@Q%$s_eKpCEz8jPGB5Ev%1~T+`nI+-Su;`p8^+PcV*=tBgv2n{r_GUU zey{!~AC0>z%tv=z<*(SyUsXnYXh@hQyur6!M$W)Y%*I^I$3pyP<=raj*1r{nJ<*w*@$yB*Vo zGvca^?>|RgK<)10aEZKv8vkpMPNLf$Z^GBm#HLNLZ5thzL>;|;8T%IrG~)68cJ%B1 z(}!EQgL`;@{+oP@Xt$PK$2{Zwkww>9;|q{O9!HP=!TNi`8MLAe>32%PVCm$x^a~~A z5ox#fI7a+X;p_l5JVw%oBTvtvo8O`fh5dJ5@IL;=J_hp#zOwMfvggB|waGcQd#y4_S(9g{^(w`{tA3g5T@k(udaXX+Bo{r@Ld{%h1qhjnCA@;h|~3 z&~W&sF$#{WJ~c4>;5ffTzmJ63STg|@P$ghtkdTNAzRdi)$#@+s)n=t_!!nJ|J|U^PoNg{RCYd=7u51hRx&X)d8XThWU47`o8_&=oz+U z>v?m^x~&^W?~~3gRG4FUhpfG;{c&?ZxJOU8zJ@W@Rr)UbfG<2X!nKJ@qH*GY@W3(Y zWo+T+GKpv@b9}!x)K9GP_orMBAbJruqkY z98I$Sce4MrbJ7Qmf23y|*Q)K%rX5jTQ}lb|9~Vor_G(e6d8;Tar6*>&rbBLjSG|`OSCtZDOXUZey_qk`=il1y8d5<9f<4S?;`hLAG+k@n8#z@j(dLnp+B%?xAq1ejW;?mxmAOe&&NXaeCp}2 znC$(D{^O+n@3)=fqW~gHcDy+deY(V>2{r{Ks z|KHI6e^vi~kN$t;kjGK;^_%C74#$v2%T4|NXhj?9^yk;_bbj}eh-(^~#u2oN%OF9|4wE)g^fdC`OK$CGKGxBp3$JOT6d@ViqraIrT zHS~sg(t-RFD!*u6(PU|PwvBNO1JucT_3HD6724OE)$1?C_O)*mDOB)v=B0H4Cvh5Q za1Iynhxx7Ry!^-?mm~9UYyY3s{>3q*@rVBR^bPfsbgm$`r6}~0trOY*Js$|y=-FZH ze_W^cA^Plc=$41Na7$b@d;Jc154G#91w{6L)&1A}t{fRCJQ$Dq6&hb~pQyX&{>SKl zMFNdTq6sOK_|H%bM{NI$Bug?QubFtqCqXgemmb$U^_eug!xOF2fAWMBi6OhuP#@ z%*Xa${kq>maxr?prcZ#3<=ApEt}C*VT!oeLdc|+eSER2)T>oYqxdEF{j#!6g#BZZ# z>(woX*~s)A$g4wg=&n)Ms9Snk28LbYd;h*D>>;mz# z8&VV!F)aNQnMND3h->)Qh|kmGnuHCm=LG#EQlr?vNIt{=@EqG&TpV|JhCGMX%>%;) z@)9!o8#>wlaopb(;a{&O9DxP?0? ztbeWjv!%n@GoC?5w`Y`5H~gvme_8$OSsim++I_Vk&I@SeQ)qL(>SAlUN%H~vn?@AZ zoq9C>FXrRKvWND6$}{j z{1+aJ+(hky>Aur>zEk{B|F^Orxm*563gi4|JSJcgreGS%Fay88{ig!xkTvgk2BYk?LSKxfXqc*eQZ77NH@In|>yy`)`)wswp~BdK zEhBt?gS1bl^2Lf*VqJ6aM>ld)}ZhCGKh<=qAH5_50I z|H3)pZnBFXV4%2OK8wL*&u^_|PY%U!j6^BMVjRX}0{Xr>BupZwpn6Vmm`0Xi24WlSXda{1g_v*h^ef||c<#Tjw#u$Lkv-|=m zl%qe5m(z*}3CHcw!wj&eJQkug&oNPm#Ztu-UF@ zLk>lh@|Qdo+uM#hZl7TNd*^n_n>loyHNNDc@g-zYzG@lA{`>uqIMd`|=VylT4@O%5 zo;)Frinq1dlhvEllgXMl<^NV~_Vmh6mT6BhKA_wE-E)7J<Ts-v!sC$xCDp3d*8V|D(Q$ST4o6$Z<~c6~A3WFK(iu%(_Hl)FbF|?s2XAyi4yx zzfH=YCjVdK|MUKzEX@zxWeqgH4a8s!#c=fK#~ML)H!63@UiNP^ITj`V`LgGCUH@%7 z=j+-yz96rDPP^TAKOQCun~cJb*nINA`2TYHOw2;>UyS`X{(lZVtqh+_4v;_RlM9g@ zq>OpQdIj`cx3=*o)d%RyP&_RUTvh)^Y%4!5eN%<6z)GybYOKXN)Uj9f>{SDsnBPn_H}-1nyo!dM!OsBaN9`Z|JmLS_O_GE&0u%6z2}GVyN9sD zs2*aUS^u*cRoIH!7uo;W#s$!m2l&Q|J{oq?cO$jq{r>-YzOsIU5BC4>_4|HQBkmtj zz(INvrT&Mvv>(vZXhRNl``%9<8=aJvMrke0Z#XM`C)Gima}>vM0;g~W=Wqe-%Jhzy z&yYjhQ#CI?KJ>ri^V#;k|GixKzuvnc;~0%LGCe^XzIb|Y1=o0o# z(1<2%?66h=U-%8}aj`F_@*n<^i0uW1&&nh2r*1j>yN3N;FMpAZugG5+?fZHCbmS=K zjK)}u$3#rVR7^+vy!ZPb^(ExcHAFriDgI&d{Uhohe2@?B_fy&r=+*zP|FK*gGZD*- zS!CvVzc7cKi|h>b5pp4NPb%XQ%C<+8ab!_BP#_okZ7C{#c`s4;#a|N3=qs=iz3k8` zGWP$kCVQ-3v6hT^e-kz$+W&6t@})B1NBRK#z5$!D6_p!3uRq*NY^U$UZtO*`x?!(= zrRpi#G#yvtAd;xPrJZk#=NFGpd$#H{X?h!CovGPz(-F^_tP++(5l7Mc&b`EO@&rzy z=eze3XUOg^{3UUYynstF99NLO{9fXN?H|{KWrbyKy_e{v=U(bdbe?}Nag$#0FW$+^ zeTjmw{F=T**K2)=x_j>>>c_m7XqfO`B1tyRcrVdJHakv{g=e)1y0$d_UR%IjdLR02 zR_397Q(vM3oqPHcIdr`#?u`Fv?MoD!`VuWm-b=Jz?n@lH-j_&s_azR~GxY5FzC;_U zYx)uc#WNT~F&wqRYQ!;uo_xl=iFXuzG*aW-o3N%S?seLGiLt`cSKmvFCnqAi>%GKe zaw;;yJ6qpN1d&2XiqW z3$YkW(SBa}gU;*9pKkFYj{=HlnI!%0^HAcwL>hd|58`YaP>BEg-mSW(EI%|`>hzyAN{8p z#vi&aj$4^Y&cYnbMfaDT`!Ci!{Z1L1Zyyq2rFq=HSMR3Bx%C0)}0eeZn496$Op z6xr1R@(Jw+!XM5r+{I3@!Hw)?(=qi6q;A>w-Zd++3ahae>#zZv(Y}WNz21M2xs&P_ zXW1?GtUwkWx3Imf;y$#G|4o+GzhgI}jcr5qNbMKH{+Or|*H&yt?F+{LPZ<-7nop{OFZiiCt4}C4Sg_HL-KczbAgM;E#zNORgqbTStbLnvo&>=9A&j^XAqh zJ{1n%dNSH|piF1N};4&3>-mp}PM^Lfn(?K;9h0W9s(tTz=fo+4UN0F1~Yd zP&jz{kx;8&sy^|!InEyo$zzX)hLgk8ZH9zKeMKpKM$K!67>CfC*gUQ`krGE5mEE@z zRYU$gQ8Dd*Cbn$*pNW5b?cWnyyKf}o`g1wI|I;0|NSX51jDJgPd+Fa2Kb-wuV&}g1 z*z`w3<+mOUKW_bx#O^!&!;cpIM`9O#s{LZ`5%nn79>sB-z}xCbr^vddxCWp;hLifA zF6vVto9^jjxTRlZq;&(Ga}F192|c)iYv@J$8vQiu^*=qL-v(W-$tUzbp*TZ7&8zxp za0qD}o~IuNS+u<(`6xr#_qDvgl=>GDjwDU|7qW>_KWe( z>*dds#{WM%C`@$Rqy8|N?EZ@}y_?yVc+Ri%4Uy%TiMaRREHXpRA?G5SG0u`)h}`qa z$l1z9vTKpDkt{f_e&Ny3Ce34=vlvTJyWSd}!k5vLZ$26tHa!|v&{rbmxY=p0*a_>(*MsFZr}?i`NoZ86F)in zxG8LK?PgSAD>f>3OXX8u-?iiRlluS9^7E17%Xjhl^Sku_@8IX7Wj6mChZga_ahRS# z)_HBW28Zfb%uf|hTpMpMxgWLq-fM(c)05k@4WLy2_?Egey%|kt#6jnz-G7oyA&&n~ zlWoX!YZu5!cOH(5D6su;Uf#p;|3luN|L}}Bq_?#H!UE}EBK_J7D(d*zt~-tsIE6Df zhYM(D_dEFWohYsEm|_3du>bLY$d)P6udU$FBkl`_+5WgUL>6sl+5h@M;gWcIa0S;; z8vkE2DD={kv2DQkfSdH&NIk=Duw4(w{}1v$wDm@Nc-QZJ=vV1|{lGW;y1eqovM|u` zU<}1@jKC<2MrmG#dn~O(ImUaLpj~92eGN&U9c+d301n+g2_v{_##|#c* z#W5ZeF&R@a9p#vb(z*`bLESOW`J(5#&vW&B8*6-LyMzx?{}{$LBKacQxJcU2Oi#U_ z|HHb(v&1t8b5YS=7Uq)+u^8?5w0}JB`6Guee0cn|_K!!jf1FeYKc@ZTqPhW@b=*dF z|Cay%n6dwV;{QKtoqusI!wRg#Dy&A~Q)OWtsR|r=zldMU8;;*tuKkNWFSUQYVqLd= z#%c(!LS~n`$D7(^P+6xQ_IvMR3ExG!;@)j>3}vO9zQW@ER z-SoXkZBjPxQAY1jR+IaM#Xf{;@*uMGWIPYC522GykL&WL9k-$2xahbmqYgmUUFLsZ z*S;Js+;q`wmh_hB$4KC4>SK0q={u#ZXO*Q4X*>Mxu$UnX!JkEP8%`Y%6 zKkNNp6V{82wwcZseHUYlzjJ&W1$uFU?_`qi1T9V4hmUC=KB6rEhv}J<;<+JzqCFvh zqZ2v&_5N*l#o32`RqBh_E&mN92V*GeUi5smpEO8&a*F3G|2JLMzlBuH|7ZJ$;m#X@ zQ5cQd{`^PzVJtoQlDy&j9Z#Q#)XVaQum|h^Ocpj3(@~C@=(b+PEV4A-aftE(alB*i zpFSGq2%n4jD44@l@k>55J>5rZDt*E(K@4cLq-Y{ho$#BTI{SO1?pP%keuNK;H}<7sI_GvfH?z0TW@ zY8*roDWuVc_LsdMbUxz!;Nko~da=Lvn|Xiy9~svQJp7vXi!8nECGG#e^liPP?%%E* z;PsE*Z_k=?zy6&Qe-TGf`2EOmf;@%ZuX~n%Fg}zX`##T+7jOwZh-0AR-X~Y+59Tja ze#vvu-=6aRV;d0nE;K$P{af^Z@6b<-r2gne{nAZm^?#q%PkhY$c5z%oFK*&C?xGL< zzAybR>3>Hja_B-H1r*Vuue;TM9NMO@n>?()JVRz3w~^^5pAG}WGZ2F@6xjvN(MLU; zp3_I&sn2@^eH4n03ybuB>(lNcM+>W7@^lzWjz{ef{i)<+#4!vF;z{;5j^G{j9qmue zBc;o6BiS@e|F<;POLGH~?+QOJ9L@C9k*7bj{?Al#R$AMl>O0m}p_k)7UH|8W|3m-J z9eLjU%@ofp%)wk_r7I(S^Xa)aeOsIC13+JlqT_<&ILEJRhjv|I%TT?>J`QBflTU}0 z(ik~9>sB-z$u);Ib1;RSIieA zdvGPnclgm{T>qz+youYmi$26Xh5Egz?886|Mpjs6fpr1txo7Uzdka5PFLzuV=Q|LV zpQ0W-P5<9C`5z729GcJH@C8Q)3f zmUt(#-?#trcoC)f|26D?ll*^x{im1a=lY+v^~QyYVGAE!JTJKG^@W*>U^j_tyu$!&al~D!Yz2Mxe;wZW+n` zI_J=J_Ln?-kNwq$kVPBP=bif-?L*?)itWfcFYbT0lb(A<9l|-K@qgb{?vHW)H2*Ql zf04VLQ|-U@lKT!s@@=_F$xdfj#N57bi{#Fs?l4&i7< z3TfB0A%`N4;y6y=6xz2)|2FqX#=Q!==y7kLg8EJA{ojy&976h-^dFHnWYLCnhjLsz z=Wqd+kR9TjC)osgj?M1G75X*AHT?_7JEv=mev;?)69~VF+Swm1)x|f;i`CusMjU28 z9bqx8anHkMRD9N4H2EgJ>jo5lZT)-K#&sn6$jo{3U;j~lLYDoj+-N-g=d`UIQ%!h=o%DelCv-eT`$VZ@_GSrzES&C?SbfA;+};aepgw5?0Ic{ zXnB%7#vytdbH&l~FV^@b|DG?LU%*eB$4`5a|IMdu z#Y6w|Nq!x&gZO#p^?&ohdc=1H*U*dF)B3;tc9S041R8Lgeix~W?hQ?dYxVUBOS`Xr zTYa<0ZZe*q9E{AK_t*blZY;m!;dt2pzn=fNM*lxQvwn*Im?ZsZTp;cj{0HJ%`y*U4 z3ZpR=<1rDFF%_ls-}Jmxn$*cy&rQxiqq5=_!;>z-ZygexF+ds zd40a~7Gg0f%CrT&Y0VG%gZ1y0(Tks+7FLiATja;J5J_}g_6;J3PUO*rA_{0_>spjA=`r#=GUM1_VR7t17Nzk&%DIMH(l;pfi>f2_ zS2seO|CjO~Y2;AEQ5?q!oI+{+pBbL%bWXlBgzbX7X=cLKQjz75m z&qm>K%wOF9wAWq(l`Y2H|4zH7xO=q?l=lDAHgL{yoDXqNj4-GFUW7vmAvgt*(f=sa+{oazVFc8(J zt%X3wwdrcv%%Sw*_~88D5spV;G-ChzSaLinzoZOQZce06##Bs4T|N7Y27UQS#5w&< zXin%y)&^hhoSB$~Ihc$2Sct`FpAh@4jd3uZA@}~6hP?3=1r+xgf2sYxb%}m#=cUK! zx1OQjn#`hYm@sKsDxPInft84D7;*o)0WDDD4u+H<{VEQ8~nXwG}CHJ&rsl!@&h*8jhyZtlBDl8wHbCf`Z3?;=HR zG)807N#DmY_3N+uuD+|CQan|Sz7>5_?Z$I-oR@y%JmOf7t{uj9NOxhE^uHc0DPqh=>ur>nHYs}#hUo{RQiEKtbSuPLKb35+0A*JbUC{B^DloR<)+K|Yc zu($_hk*qN8rS^t3!Rf~l_n>Uh&XqiCoR8yEXm*^^SJg;19rN2Z`~IOGaV^qR-WX*x zZLtqv{7rjT5@%d{4i|6;qW5=oul-!Rtc0!@hs5A+#RwESjWwiL@i* z`5eYIarELQZe!CFc7KqzO!^!2LgS3kNAI`I7~5~o2m{H%sQldLL)9;oE9(109skBX z4kt$-D=dRi^xQc4Z;XCA`dAc4x3Wif7HG z|CxE}^m6pRJ~hlFXGQ%9{s1`_mEU|${wfdi=?k$Ky+1N`o{Z(iGBS=0TtU8ZwcMIZ zq-*L*I#edM!YUuIhPY0VUC)|mVC+GoO^S0{xZKQkfh-ZClt=#^)~uI0~$AHMeK zu(M`D_`$qSg&p+PL63)){*Q;Wc}a&BJg#lziEudcMA+cJHlqq#u^l_{^V6RQKq3#v^Unlirpy~8u_xr!j z4hhZYA2W_^OxQeOOxP=q{ir-=ZJwvm&<4_}%RcAlIPzvGQ5{P?A5VfUWTg&!UGT-deV^LS}S*xTPa4}NPy4n@4J ztU5{_#|fOm8SM70og**c66$t5r+vmdxc*#7c6;V%y6PRAeop(8`Tg_E@5dpearm|8 zJVW#QUopSmduxBj`T*#hW_B&GmJlzSLnH0_O{1h z`cM=v+S?xa%l5j*aA6}*eNy-@%RWewsir?a>TuaiYV<%d_*1Lj=BP7`fV2KCRl&Sc!SdVLru!1jPVC~ z?L=hiy!H)c!Gr#dInJMp`KYp1Oy%2UpA#y`HR?%|$Vap-wv1rASB zwjw)39R!QT)$`fs!cwxk@wu>!?0sFkC%F=<5ceNnO?JO7ACkR)crL6XH()cWP|erb zO4h8g9}2k>we;QOUL-Foi_vsZ{R$8I|Lgdf^=w@nW1pPCE{$QAmazW|*eUgZg|6L? zY8;I5AcZvAQ1bsbv5_eGzi+aQC>-GbqD7pogZRJw`MTHnzqi=d?)Uxwi^A&psT1x$ ztL41(tXp8P6q)wtd2e7~3bF+qjE9WT!aC^Xj)< z`bSDX2G9qhxLlfFlm7M6kHNyK7pRMo!x7g`9zl*mY#WGUCzBZMcr2P{NdGh5^EBy1 z9ot;*duSME{Vg<3^Zlclp2B$7OvGeNM>%Gq(5}CkY`@Om-XdRZlCKc=#LM${3$5N2 zS{A8;x>f?v+@yE$5+Rn;f?D8hOfr7S%IpU0SAm)1o{^JI}oSx&KcVYv5Gm0;{x7XaGI!GKVR3&UHwxfE2^-jndb?RFA zc{hD8_M<^wP2#WDN9g&b@js7Qzt37dah~4BSEUEBJgs(Jm;wX;e1Ww@$df$obKMo4r8?FD~xaTqJ$dZ?$jw?vt(%(e(;wEk*tIikeA9v}w zdA?10AHClX{MQ42@Oj^=@Q42IjQ9QDP5j?&{NFA7-^|CuLDvm%-9QY+2iyOKI&R;` z|8L^|*6{!N`(1qh{1X2EZ2tfA{Qr~u-?RK*{%mRgJ8@?6{C~2|b=73d!^6cj0;5nn zi2wV@$HQoPatQyIUYh@63|8tj_x7rLOz?k&k9E#?Ohh^}I7}up?jh?Qrn>j3j;Ev3 zSpOWl^zA>qKlfPoG9MHT;!GmWDQH47QYd%rOw7U@%*A{x#A392P958%pUim{UG%*4 z6{PEa`=9jRkbb0(@&AuVKeA{;^&Ne_;#r0jSc%#p(*LBf6!hd2_lDKA=9?8aW~N8L90f0z7^B>UB z|EDOPqd1NecrbqM6g`$Ho$Tfr`Z<*L_1q)>?(kpa1z~ahpG#y9YPB6)A+I6!ftA`3 zkI8e6Z=zXQkaFBe#&rho3XA*iR+Q~UZ--zS5MnGx(7^XbD@#P91)cZM(t#p$*lq8fPG$-Pnu$sD0V^gO`kRqet7_ zfP+y-X_V$ds^jM4x;cnIUsz5!uVWefNKqp39qqaKP9AWB0nWFBHiF>Jse# z82|AMJBda#O<@0p#dVI4yY_?a8z&r}!Wk4k!~gb-I#Ajl@m=ZnEb=JOi)ay7>m=!) zBK?m@|C7>>EWHhJ{$qu8s4G8h4fQW7-^E+obbtQiC3^Cfd%LPHfPMw3i|!3gh;uQn z2}=*t2SDCLc7}IG-bKcLcAi%+L7(G(KXlLQwM#Ac?#Vb#sP35e|E~9c-uu_S)94#$ zLbGou_7x3q%|HysP`nlWTyg|Pq5Zo0uRPwV9La4`|E1^Yg}i*NY-o`;TXD#FY4y*; z%9OY_U>0r4nz;7#Xz`52cuYj?Yu>-_Vlq9sPaeip`gA-Tf8@K5{a0~ZLAmga%h}yW ztocnZ{KObIaxN-9Wegly^-bfE-g+u5q%X!&EW-+{MC_X@jXyZ8ugvjkWZmCdavgF< zr16%t4e{*GOB-2uG7&cTZ8NH{72B~ByV0vpe=ph1PuNfPSjVrLJcuMxD7Eis z?YQvOr$WV7t?jSvw%3^8Zf!i>fARi5&p&g0&N)R?{zQNNpPmZUFMHPdS&s@kjuVJ; zCu7-nik^JL^P8dkq@P2|w-EOZjC;(-HPGsIj12YG`)@cfG9=M>dZce_WN1D+GNj%e z8D_br4Hs|;J-C8v=tcW$=D(x!q(0tb`s6R_n-g-K;bc_tG_76C;#TeHY_5Ux> zx4lIFKiM|>>Ck@C_<^&=517NBBfHGW&)+kC;HvQhXsNOOe5>{6k=|zg{$1wp-!gyS zIc;Q}{_c7-OwfO?FTHV${(E{enG)ws_jnt3(T9FJJqLMV067rp%cH_zawvvl1hUut z2S(9zQ>5(~;|%CyQG8X}^sDFnXP5tY<7M@{1Ea#mMZUjz=HJ(k3gewO5jFINL$5=G(7J-l;-7oF3qk&pin-k>g)9TWQbG;s}|L=XLaaft3LH0>)zesz>^To3ei?I}0VHqr==LY%iv4Xx5 zMS1~wbYYdSYVUV7xfZo?uLyDjYUr`=If>1VtI$kO;X%J&$-f@r`^Q$l_x@U+7`YP_ zKhP%jtpEG8Hi_S9_Y_w7SXpU4;C^~F(l43gMJADo$H=PJ$2A7q=sER`QePmw80q$G zrfen5j1aD}`U zb=<^lJWSs(ZHkV&fB#hIBYVC-D)jr2G{!ww-_qv)CG82%mW6@B2BY^KYyAJ|>CheU z5B3Lm);<9(Plus?8;%heh0z#`ii)xj_ZJ*b&uCxF4oifI^vRfthvUa?N&7v|fGl3u z=E~O8=h>JH`}Z!J!ge*GnVvdgjInFWF%z>e2XiqW3-NIO-9hZGHv2B*Q9uzbZ?bEx8VrU*PwDJ%kPP&8WgwY{yPK-2bP;|Dds6`qAteq_EpL zd$AwYIEW-tNTb~|??7puJ=t~se%-2|epTd~J=`DZP4*v$=gEJ_q7AYCp-ntF6mb*} z`X7$R@bzr?8f_Z%Qz-6YcaZm-x^PBV?0+~%UO?=BxJ34#)c+v3bByZ`%oX0FZ)QFj*WXwi&-=E% znqO%LcxO;p>UbGeU?n=$y>e);=^r}GeJH4V712e{tD7AXo^CRaVTgQ$EWK@?c?=oz z80c|tsJKQ%-9Gtvm+zKdYTtU*`^75%u^MZ!4jZr;g%wukJ9c6>_F_M((XOArgRkAm=gz&tzh1+?*0)|Di+t@C{%z|4{x8xvj7&ZM zcL)C$Y5GC&T=6|6$rQ3p&Ow@2anHe$Z7zRoX-^*?&;)j027 zK0*z>L3=^$LqG2L1e)n_AA<+;{Oewle(^M{cOKa&{-#^ff6sYjrEzCf-s36Pp20a> zz$NtHL+d}XtFg~1e@tFODgSSi{$0|)PkN;HZ}#8yA6LY44ZXOD>>BC6Y|H~aH&6O! ze>~i!_o28)+&lco3(_yV-;ceAR_id315rEa<6$s46g8f8!yxGzW`9J-BhY+SzoL6= z6dv19>g4_U3G&Do?SJxnBiZD*nM}!Nqg*o@V=*2RF&TyLXk#bax5@u}@lHHkUwaMv zvYve*TdvCgI3%y9Z^-}W<#A*?B5kN-U#nvKkus?`YmhO3=WGOFU2yfz=8UH;b-^E1v%0${Ctplh&Je6zI~4}X{Fy+VKpks%0dBa z>2c4mb>s$YM$d1xGm_n3QO6;-V<&dwgZ&G89q&gqw(l4bdfztxk)z_4F_{Ac-c$GB192Pa2_onuCYAF6Bb zvNqt4#tjX-pB!#4wxQvz?#IHmF^`3Rn($cIe#clVaUaA#PJ29T-Sv2=xcWrc(*KE2 zdHIP@b>s2S=C>T)nDkWGJnX6Pc70hWIzEcyIDu36>FF}-T9t(#S>I#V<&T9QV|V6b z;f&wT;fISJ4LfmQM+g@jS0`Q%Ki{MOuH*Ia&W_i^!9Cv%wcYDO{ifGK-GT3hBDQd@~%zC2{uP3a+6SH*p(x z(T9G!m47pgKhO@)fTVVT#*69|$BaKfYNU2|=X}urALw{6hN7T9zMT#2K<7BVnKpy2 zXZZgppokV_a4QZWjZ*u^6y^8~c3;~-dgOpGLR^)fR!=e4XB0i|otRNK8cok-#Er4^ z@hIL9w|Ycr{iT|ZhKa(e7YzuL$*HJS=bBEIqh^S7IX{V+j%T6yB>Nz~(mD>^>_$ER zpDk&~^Z)sYjbpU`&C~ve6y~^QF6Lt)Dn4%wXL2c)p}mIx-^52Cb9p`inRi?uOaA{P z{yz@!N7FdWhkmd>P1`8-`kzL6aU%zznivb z6U1)kzHyxoLheU34&p=Qzjn6&bp2m-z7N*_lHy1qjW%?DMt$TP<^+6NedL$wZJ%5p za>9G{UG+ZUo1!1ZahyPUiD&Vp*TX6L8Jt5L*A&+Rzd+APe_Wg861@j;Px1ot&gnw+ zzV)F>_!V43FKW-O4>g(f;U+zKXT5J|eYj1(i`201g=RE4rx73Ae>~m#z0l{J%<>KT zpErblKZ*5kWnUTlaCJk-q3g~D^}`MNfH#EV$qiwEa|U9#>jslUqmB_6g}PV17wVUM zFElLpUPx})5E?ga2u)~43N6oX2(37TG!C!XpbmNe|J%s+B^yG=gbkr{jQ^kE|LOUW z{(qSN=L1#W+Ym=CE1${4+3!8~q zn1i{PkA+x_(t1nUrD8p0spDm+&{tGwF_)8G*JS*U_tAide{jfo>C48v@clAm7HwyR`F=Kw zrwUuK9oap;A9-abJ?807?56KUk^L=9V}mEL#bor&tDiK6$ZyrCz4(!EkW8ZHyu5Zt zob~Sr9LEWK@cEr`diQ{!IR5qv-wxURTjO}h73i(_URX&sD07m& zomKSJD6Q|ZPyJ5)Gp_HlR#^1{YdVt~Q2V0yO;(}i1#^cyms#|!*p8jpjlI~9x;^{~ zG+bv#?y(z}`4#li`ac)hH#VrXiG3sESoXv8XuC@L`@CpfnKj1et=INW=4NXjV5{;d z(Bu5P^b6WV{m(%pkwW%GaUe}Et$*{p{`-%yj~btgt?^p+&+l>ln>Uuo!wG)a3!WFf z_7Qg2bw$+B8~VHNLBJ~dcOgj8tnjg2oc}Tt~eXYXt(pT?1w7KR4PT>sB z;Q}t92ko=@|A=F1b9gxaLmfMowJn~}!{@Wj^YJV*$kN-kNTc7ci02x5aTBp_keQ|( zpPqY}{eRLrKlDBnm$3iHFOvROjS={%_ba{ugOl<=*1pwGp8#IZb`{EC8#^g_o)|5ZR9UE)taVvLROrC5d)$X;|0SV^z* zj_Xlc->SoNp_kf!%!^G8)2HT~)mV#l*nrKb!dA3D$#0)v{edy}{rIl)_x<+5NoCnF ze)}u^L+cuTJkl@l+n4a$XIOt=3O{~a|IlqtWcOFt`^VY)F7r1$>y7mxY!~+%)AUPP zGhk=@zwcXL!8!qZ>HE?9>j9zH+`;N`%3sF^kwgl$_wLt+(lIQS!Kd}L&~x$k_3A@t zLgNwb|9tCuK5D}jJ_a8(t|?dQ|L@>yWUMdenj((kI8NXc&fpx{^ZLJyukYmh=g2O; zf1WHjE|M*0`M($Wzhue(-^I7}KUuQP|EBq&7sPW3J-C86Hz0#+^qhY1&LR9W`dAbf z@&Eb6CI6rQdQ(_6KlnCz7q#?0vfp0cKfPgrbS>dWz0)rY5Y{|Qx<*PT|2eMnSJx{2 zXgKM-W5V5Q6WNUD|Ci=}-OwM5fv%5ZZmSMkZ<9V057&QG-sa@_F6D9lo-%%q{J%^7 z-zNRfv%g5=FfxeqgxipIAH&5n0;4b*Sz#IXJ(iwR_l*Akc=|*X9T$)ncNZoLtM(kG zlG9O3FDGZBX2|>g{}F9gj_0769_Riwo@O6V8vizfk1W3@<>AIX@;{pCDS3FVYvyAi z7Go)vVFgyAy@~(7T>aw}^$)UZz4`}Pa9ku?-j)Axs9s*Ik^l4Z-huo6ecLYOOJ2GC ze)+OWe5+CT9sil!fQm-;imdvBvf|OQP(|O0?bwO9Zr5&dFZQDvao%v8M|_Z;dr|p- zBt3=V80EvX_v&x^mI_p5`$QSvxWpyvl452wg(>u#PQ&*1_tp$8kaacq+2 zxEDvo6V|flA6*k(c$^;3`AxrYlkC^OT+RxTZk^X3^&$HAeoQCjavZ z8fVD=d)~M2P3%No`Nxho(VNNG{(Z$g#=e=-{@6zbgt_9Fk2v>pA=%E3cc8S+F1z2w z=I38!quKkSHvbmoX)6vXThnaoL;Lrd_J71a>c!$&ia74A@-yuG*Swb*9}UZdmBzWP z;rG12?qRL_Ss^?pEVheRFaKy*>39`ZV=ZDEcn#LkqwQmaXZeV3vrHL^#S#tm%Jmyu>(h) z7xy2osgbUir3)3>Fi!|?rpLK}57Jrkf4BKYcCr5*z6te#()b@Tbu!NX)6e`3YhlGW zzuqsLAseOn7TW84G+fu^wJ-!pLLUd8^t~1-r+yUxPDekkbd9$UBC4qj{oVmPo6?){Eu%u zIp`x{pyR=4KC4{2sGJ)X(`mk0 zy63LW8K3(bNUvAU3eUWvd|z&CKE3jjBjxvzq4N&=z=o9iht4a*TiFNn{w##K&YO>g zSd67uhPdBJ#kZac-M`lVoJaXCB6GI>ilH2I9HQfjLTX}u0!or@4nmE1bWgv zHLOX5&GagyX7DZ1w15pj-F5x{`pX-p>3>G!IQ`OS-lzY0hjss5vlZL16T9)o9ck8o zy78X%uj}>yqjR?Ydvq<*fB%C1`xo`!lPx#Q|G#7Y{}FTjakxYOKeA2w|7(m77@~h4 zhmh7rc^DaOmf0ox_veiY?XMai@S5=f&x;>jyV#d~>$!Z)#5|w|} z?(iG^o7z0z^8l=IVQLk?N#&b(m!LOjk$8;;VCqqt4~ z|0eza&gpXhCxlh6e>$8Z&!G0K{S?Rxs7ZLP(x1d7$3191=KtRHoVz_|=hk@#^_$Fd zfpto?ZT;#`;k;`mVersKgFio)aeks~YW^rexbF&5)75!>ax^z43N zGQG6_EL%O5o)wnCbb4-_vVqMlr_V(3N%?nz{EIGa2(yG$Z!=zpoQt@3#(Z)i;vO0e z>hQ_y+8iA(Mf277^Y1P76ndeeUsxu51y*7eR%0#J;e+i58ys)OR#be}IC(NX!Tv&I zm2n4^U$)K-Ju57Oz4To4?OscS{q$-S^YSk89rE%#{`+@Fg@eL-e&hQhQ|LDCG9AOd zsf;6YD5CfK{lZc5c+?w5g%ji{oWVIy8)7k>A8y~;P?b_oz#;9LR(0)Lc*6k+?j*H|d_b@u% zk9!kky##Gb|lRh@HLEK64Hj+)^Z1$h24*%KT7zgK-VGM%s#Qx5E-~V22euS{<=hb1zrHKCbGO~h?P#S;l*~WEu;{IYQg*UsOSO+PMGbA6b zzu=rWPQP)D=U?O9G&kVM|m9Y-uT8;K&^8ZQspUhp9|LG6M zckEODzNr0g4FALToPLJ?fef-}Lup)_`ruk|twZq`9~*i0KaO)t@9Q5n2;Yn<#PNUG z{@S-L_77Vfm)7lN+qctqqIr{g14`>??9gr~teQ`}m)wuq=e0SL2T`-9e|TfZe>bM| zzlWsb6e>GD5vr~ZxBu2B!j?Us2>*EC@4{B3{nmzmQtobF^mKS@&(N@K$kXA6ul{}5 zdHL@{&TqYc_*D4u4P!REyB`VL<-PxOoHcx;r|9>iIF9a5j|gvHRPXrY;IRK0YdEUc z?wcZP!iccfzF0rKGb-$vHY)t2byV1$_&amhhK61G%ziZQ6Je)*vmZ_x9(J@24?h^U zGHjo>GV~ZfP*G-_{!_-I4D!CFcwaN@1?0a@;S36&v@afc3B6w)5Guc_-Q(-_#dCZG z*J3y-zBV}AjJo!&-ir6f|Fy9Dzc>Ex8`^~Yb{Bo<_cQez3`8OD6~3VFmp&B3F#@A7 z8e=gY)#^V##{p&U&*Y&M;yH(jew&P`cw>(=>i1a#ZkIU>WBA82_{aRp zW~9)vo`1_%Kh&QuKSBTYG`=fJ>$o}p;rakajE_fG2j6`kAAXm9dcJXbhV}pbXE|nK z7P774@cSHk?kfL(kAC>?^9y|UMgDmK`SaqsVP62}RKGTe9Z*-=XkId(eZH`Ts5#C5 zb#AiBI{uEAqFG!iG-jm3eH`jPMZeT%L(40lk&Y>0nR8a)aN^V2X~u<>^i^1mwOEG@ z*o-Rt{DruNmVF}V2VP8sTJ1gu6Q2w3n2%VqUcXx6^C9`dj8HdwTByG~O_@6>q}ERg zjZ4gBOMFKCOZ$U+j&mH6XnfLrp?Q$|oM4{3cz0qq_F_M(aS%zgkJSF+Id?iYM|REd zY~=aa$5TA1{b7OkvqU<{()*d>**fpR`$=ya6jI_zBZnfgd+h&UZ^pt`%0iCbNw55V zS*Yq#r|%YSJ;3}O`#-w&joEC^u|d|2)lNvSwf5*yaU4g@S>Fwk!cI6oh31U^v$kNP zYnt-*e^mdke}Vt8L_K|ha^p4ia+KO1-sEq%<{U2I65`%9J>(T!L%VZ2maAjD!v7%u zX8&P6NGlE@jl;+wi#GI%<0fw7F8UD1?esgq-dfk7@XfL?fIbj$?9X6wD28JM(z}e2 zAV*_7CL)e)$h4{#(R0$)iPHLe?`RXG7w};J;^}}&^IARf%USwqqx5d1J0!w;!eTe`d3*sCIl1%`ZyVE%gAh=>_$VR`-WPC*9u> z_lL~8>L>m`&QECXuYG@zIv|-Fsm@2{9T&)=I$%CV0M6JYU3q z_{P)z7tKf^w*Mb@-U*z-8DuB9hiS(2(R0J39~bDCP<%yQLcJruM*3f47lg(3|10D* z)C%thR|;E&)mVqZmuG|xWc2~}^o7rdxM%8S$5n`J5f$GuE`c7$ zR>X1mO}FGbb&?br_ZX^lP1WHUVY_p7VmJ0;KYG9DyCu7StBr{4c~l*iOd*Z7C~x^L z$s&&8IO5(yagEm#^wN4SXSp88y9blkM%xzLPM zhxx{?Ifo0lgb&XD>v7!foDOu(F6aN3^Z(039)%anL-G0Y(9&dnddB?pR`b)hnV-JL z{B&gBG(R2Hev9*8u88XzdQsc|Ic-4<s5 zW$C*vePr&Q`SbMrMf2y+nm_Lu4i)EcjKC<2#sK#*mK=||C7$myA9%jQJYVrN-|~FL zRV|*0&YO&>n2y?!((jyddUBd;Cp{Nt(q|#n-?eBOBK^*bYiq_j=N!MqF$QzV`6$i* zd(61gIR7sghbn9-mSF`}Vio>o{+~L36H4oJuoJ7Dvli>H0kQtSnXJNAw2x$e+0p;) z{J%G~|KZR$`5%YbrVO(5Hthnj{cpQ?c49a7BD;uxp>1zJJ@+E}iE8>m6z|Dr$fFBM zVJW21hVDnbC$gu`dnPxjhi{yuF8vSXrF?u;SmDoQp;B9Y)t9X~`Y1o(*Xru=TJ!%- zk!NrY7jOv`&#H5fS8xq+{$DS76LI~o+vHvJq2D{+-=6#He+{5NSpRDveK1nWfaX{Z zAhy2`6*e3rFbbnF7UR+TY2OCf{rTs@WU}XTiLh~;vS^s{(tDpSEYAOnV<_U-fpW() z@nHVnEc%1_e{<+_@&9-J-*IW0>bg?>+gP6Y^o3ZArTAaY|J%Vvp@{#R`G3pAvjQuz z3bilapZ~X-{$T#!TKYQt_58mL!qVCfHj`Dz-WU+JlG~Bt*LC_W?r*cx@op6O{6%yf zQ%@n=U)Fws&NsEOql+!gBaX2v&0id%elSw~fDKMN?=YL2L6+Y368kxueP8f_`cte| zu))o*va?v|`F+rS>~#@&SI{_jhCetK<2|HT&nM^Cz!2IsWVb4cA2 zH=5A6$vp~-^Z$;L#}V5uPLQYY!TEn@9G}DA%>T>(pXdKwaLpz3;0j{?zee`rCLYfJ z-m41UXi~s)NgSPUEynw zh7t5p=#6XZs~>bfWsafap1%wTW6ANT{2l+>+Is)l_`iwHos6lNj&jVzEX=`&#{a$h zf%biR@d*Dbqx{P&|2&^G{(An$Tyey#@Gk2Ks~Xf1BxB@wemu28H3mtFRqAQCk0 zD3C?ts_Xcr_59O@m-xTy`M=A>of#aO^MgZ*Ou9COG}@3u5l3+x?Z!EGtTFz;xPu(o zW!yo2i1<-N%gMpo7YFk{+#h+^amIPsSFQh#>Z|rs6wfJ~!8z3Ok!!A7YmNS3{eesL z9;CEQH1pFRtUqu?*a!3XHOIZUi8%j0&bzryull{VJ9g$Sy$}6rJs015-7C`nrnXsa zvdJCVW_>eFzMbYf$^umUOdGv(2VyXWVmL-%6x#o@_P^(~|KWc-{$R*Q!f5e~#du6a zb~gKq$@JV7_7_v>(@~@skl&?!5L-U~_aTliEBD(>)aq-SMb1GDz2OMI9&;VfM>9Qz z2kUIdI?F=g59*bR=}WN;D-qZKTSczMTCBqcY(^D&|6rd)vef_Z58CwUJ7c(cb-T%K z?_e+4^Hpv5Q3gJ94F9G*QpmbjwLhQ%$$IzI!8W6roVana68A&oZVP{dKRzw!b9 zP<^oL5&q!}{^1n)|GfOKF4c-d&Py-X{<>b>oQ(a0Z42}teBE3E>odf;{jvUWT%2*- z&n9)2*mrTlacsjsMV^Vey~0ZMkKY(KtcMkeT(yPp?s@re8xZI_D{$ z(ZydVjZYzq*ZDYpJBNl==|pK<+(QaBS@fj(L|p&B;%COfJ5I&E19gff@i*$9m@6#qD^yzl{|WCk zV?QKe3z5l7`)vN%0`+glOHr8Sxufd^?}4myU-jw?rFDAdvFWd}?P%_h{}IcNWv*L+ zl~{$oxJBKrf;+|M#r?k9587Pn>aY_$N_#LOnGzy>6K1UXD6qUa- zu0Z|!IQ;}pAEy}25W z+w}o?#Pdf1agBh|`rl{e|BLKDdHAHV^;ORwZRq~$`{Vx%WQgPcMvHSi zDxdItzA`dQq)*0FOh@6*Pla-__g}U1{NDGw%Qzp$voHs9@nHV)e0oxzk8`jW(ibCj zi*0cn_dbtvu03{Sb>i88&8R|lq;s&9 zo_j>x`q8)3ccSRH;5biq>HmrQ|5mp?eSiOx<=X!iJsm2n|4{sm{x^EV3)1!C`}6OA zBJGY-jvE(9C%IRg`%&>J@sJ0RM4NAEH(8hQe71;Zn|R1Z&!)+9X}%{O_ueABb(i!Z zT_b&Y_x^$RTlzlc`92WWV*2ayH$6srKBh0yf2HI9BKwRurg)z8+^f?6vatyCqljY) zAMStflKmNkRg)*kQ>b13bT~tvLyc=o>mrEjg5ygl?SF7uJ;VRT{SUgo^7o-fc+c1V zOSnQ_LoaUPHtwQe51c--U%fiwr2iTQlD)sY8wQg*U;1z1ha)G39q0dR_`&S|8g`%8 zU$*A&!mdI3%Z~iFuy>^XwC6t&_RRi7_{pH*VZVF&=OH7)KK*uYzozbR;3MJZ`}9Zc z8Wn!_raA84ww{U4z<@n>U4{yn6+ z|2;G({uG+V{99&PFH`{*v^aQmRkR@_TB%YKKEbr$$Tutbg1jgtr%I zSM)s;e#IvdJ{}V>8B;MGy}x}rl#|_`vPT5j^G)N6$T^sc`N+Mjezr*drY}Zuq%sTn zC)xT}l{wd~=`4I1R$wKv?k6LB6}@_p{eQ^SSckQ!W#4OrZ=fgN)Q8n!?n3PUe_j3m zp0!~R*NAD{(LZc#wO=4V`VDRGaeUuq*JKuH-=C!2iM|!v(aDD9(1kn-D6*w>>}7oi z`?rSu-@>NuV_VtrW^MGTC;9EJ-HpB2k7^u55-EIWenEfnj}*WEE})1OeG;uW^qRf~ z9HxJ8enPeXN{gorITTU5p8q|JPfbrYxi=iApFrFbpxJR8BiV>k!p`6vF5sbkVEZmP z?!h&b_8-=-)JwmK+qjFmZN~rXQU5=${vY$@4fTJ-H74Tv)3N^F=e&Lm+7&SngE17t z(SB0@|5^S2$X(X|e^3AaE;eqTz7n!!w)#H~En@$1m>$=L%Q~;^mh1YLh1?@$q22rF zi0_47L>KZnw9WHT*J;Ukj^0mPQ>jh;>hKNE5e@2C$pzlyZ1X75e9?P57W-Gc$Ft@( zy2nu%jjg=`_{WTS zH_ls(n*4p+7uO#t&A)H7cOCtM>yP{|_aAOm|3?aO{k^5)T80%^iB(vQwP>Hj{-YDI zt^IH2|D9I<$05YD9v)T%^o}SoN@rA)*13mUXZjNEU&^JbI#dh@S2l%7* zmz`1nhP5-uz1WXxq&v*LA~W^s|MGZ}o{Hg!Wke2L(w|3ZeEJ^szcuDf_`MA|6j9qc zDAX+1k4sPPy5IL-ct>5$aojtt8BJUG|M<}SKjZyRI4479`QfMNmD&ue9#;qBPv`ji zUCQP>KfJ&nFOqeO_`li?8jwUIn$V0C&WPh2F5nV+a0S=Ui}nQnADzgJ6#pRcPZ9r& z7{Bk|y|nW85ApTWFY^Bv@c);%KYH8q{Qq(05QyhC?xGL1&+z|+_e)AYf4@O{#sK<2 z#IXZ$ZNMfn?jbi=*ia0|2=snxP#8sy##r?5)5eqCPuSa?+&E1AkrTu6 z=jH!#@;~C5GEHcvr%+JlPj_CW`fyzHyPQ4~voHt$%lUtsG2+k3-j$&&r3x z@_mNPqBQ@H{hceG`B;d>$oh^lt?ULpw~u|nGWrVq_58mrf4*hxnKkJAwhD3X$!c;f zYHrDY=Y5A*=XeADdj4PgRpl2tUsish>ow)qa`&%ZS0r0njbH3AeoCy~Nt|5=5txKb}4kvp*)d$IAJ_5L35KAsF=zvF5Y zzUZFG6yp4YG+Fg$Wr((bHhK<497S3hkCP{G3TF`4c8P18oulXY`<-j7>qftXVr&~g zULBwd-QVW_3-4(+N97mh4E)NNPsi8L8^b^E`y{*7XK#}g-x(0@#@~DwePq8z&-pd$ zm7X;2kG}C4-xuCc4#%~@8sB}tJ&CWE!a(N?M#EL#TerPn#4*%y;S1IOYInTB>_E)aco5>V=+VTjyfJ0BR6GPYq=Vp-g|84JTU%$hypz|dAf8_mk zcj2XW_*U1{nCI?)#$r4sBHJVmOs40qyB1UF(-G?o1>`S_OL)1kYIbWTISaK{2ZTA~ zT-4AT>fJZyJ6?$9J?eaAOG3EgnmuxIT(n+7>eN-fl=rQ=D?8MzfXj*WM@XZ zLf%|N`b5OFeT(R-;p3B2ecx&4O!nJUWOo>sOqL_#oI19!9u14!8%pbvt#QwL#h!aIFort@cE4(4J$7NY%*^$$>L=b0@}FOj#Om&Z|D&;K9B{*Pq;$@IJI|9NFA zvh+6ee3JjK4iNV@>v`;>9~wWnSo}+|3@fn;aV??M@ zLOffs9Xqibd$AwYXwSSCI`Z#n|LD{H(HFXqM*&5&y!&2g{eKC&`>0BGmwq9`RD zN=Y{vNlT^6E4isma3ohUk_i>d>GE1G%Ox&x4X(yzxh$8sBv)MG60cN>YM{E(R+K1F zVn|Ey(7YYq4u|)1upDtLM;vhmr}2tcEb#_ML~KcS*;2Wm=af|wwP)s!@B01rex7GP zulwb<_iyh9<7?Nnf7o!+Ok%SAT9`s2|62H4=?r272~6hNC(CyFT%NQsM_-6=y=kO! zu4$uQ3;$kRPt~>XAISfR-jmnDe22?p&oL{1&dHba@@I-n3rGKVykejK5C09@_c`y6} zdF7M83I9YskL+RFCw>!NME((8f?3mGY{y`W=G?#CJU7qCQlUX0U!oPiT_!l^CmYj3k zJed+slMAU3-sL>+!Taz5{DZROVe-THDEfA^53V|QziYtIjBCI!eI(|b$M%Jf+wTm{ zp$#|u4<4o8@ZWok{x}v+C=(nLwV~(bpC`mUiKp=lwm<0~DC7W!api;Vja+}LHVZk6 z^v80-SICwRv0JoZnm5(gS3~%kxHfgh`M2kUZ_u}2cP)I2T>pz};X7nB|L?ox_wWP! z2nqGaPspF*=lCTieWTGDfX~tADtx>6HT@bE*3}Kpbz}X*jQ@Wn8yAC1Y}ysJ?HU`G z9Jy>9+%fw`!r2431qX2(ZpR(?o%R2;2{QI~mO2`l_5axRWB4Df|JSp6ARLm;D{vR? zL9_;73@uOj=Gfu`i29FTW!s24dw510MB8DAJnQ?5zQJ?V+K24W`|Oi!(*CG@QMjMp z_B!>mbs=VLn>*tA%C#MkrY{`UM(R8e9w zME_-u{xLX(zGd~F`Y6hqgYs0AH{;|mM#|)~_v)W>o?Z-K7~`14ET)U)&l1mt+b77R{U^yO=SsBl!TGv8C9P-<;8&uw#U(qk!`JAU|9V|e zj=b1-{ZrE1bWO^N6ge+%xBl#w@PGP7dzP}pH|!r<%MRZnzk^m7MNshD>(%{K>oq{e;o(ILC3rY@52Z1FwS*(1~`jpCE_kHT(Mn`x}}6 z?6J8=cle)Q<$umrPRy7e(0|>xf3nN`|0edoK7jr;_J1z>A47%g{~Y!|JAQ=xu=9Ns zA4i&R=9%dHIR7!<1V60l0_QJs{=@8l%LK8DBf1fImxcm}-~z%a&fr7tf`lIx$$3yDj7@9AlL1z*GDqI+CE7`{QD zyKKAgx9H!&g8qcGxRm4O$!PBWcg24XKfsUBo1_0B|6uqDJ(?3RApBE$WQ&f}|E~YR z*-Gz1*>~=g@^4Yu?wd)HlS?7|(lHtTzd!NqU1lF(e%13qn!aGa8})L3uD0-u^z-Bk z#4&_u&hK;5`8BR#-|D!cPqq1PC zT;3O-Jfv{EneqL)tne89aXf)c`FvI0mnT!mtPh#W3QyYh zG@e2FtA(ML9KbNP_4$mG(fa?B|KBFn*D1bfbR!C?w zOgKjpla2Z(a6lTj;2>_pwsD;GFXx8a=~up77@~Up4*I(O+3oM&of)g@QGR~0Feh|h zw5Ch7_KWSWz+Jcplc&|O4F|%j=yU4YS(L?)RxYYd>z~h9}L<`9gV~ zH6ecyI+dYMtnY-+ZSI86F76tW`+H+`{}4X2vl~9WcQv$cUF8$|yz#GJhqpM_+t60< zTsU9;TxiMtb!grBRe1E$FT!6NTYHCX@5E#4w}wv@91Nc<`(?P8CB68;@R{Zphj-cb z9z3zRKYXsi{JX|mLdVW6;mKw5F_bm$v+V^bf?fGJ>!tGZ?V9ehvj2r>VQ`%>4P;bCch7$3#QaR%qm zhDVXuxF?KbEdH;;=*FwUW8xl1GV7K4;_nH4%CG)1WlDu>A%{woHA!VoO8bX=!oE-9 zX*`3Q<;RS%X5s0R?Ef0}f1UpITsA@B^|7}!eIbkgp{qqX^>{`o0# zKpHJyw>~}@t>ZsQ&SLwGzPEp8?a#n@(mL)K zEH?gKVcfXL__wj+kp|=6dB(p>jDOpfFcv=1VeGog_&zx$Je}kpkTQl%&WWFIHFjTR zeE{?O7sy0WQFzXIevNC`mvBG!PdfepeeSYjHxGqd=m)XT=)R31r^xwbeaffj9|FGdQ*InU0q#q~yj5+ty2VC3Wn0Uv;SKZ%| z{T2^}Lymg|+M@Y~AIu7O(eJ^la37{?w7)N@UonSyq%JGpceL>^>R!gAo$(J)2H*7m zQa?-y50rSWWkunBX+3~p;gOD)`wsnwp}0u@jxielXgF%y>(QHE6y89-2{DsW5>6tVw{ z!pG@na1H~zu2bC5lIyBq|Cebil(1hE@`DKya@u3{;U-plCiRR`El0)t(S}Sz8!~G?dQNF3? z9CLQ{`Z&Q?wZDHYyl+A~2-E%YKW_H_ReQGT)-*Exe@ibI|FhqOHdYdo^r>$D9J-sY z!)y3GeMJ6#neSYh2hlcty&s_EwJ!>{3Ez%8kT%!&5V`$Lz7b#NKe4+nyh8Xc+=Ex4 zw?=>L#=dYLJ(^P)`TyKce*nKT|KPa(zfGyf;VpO@ z-hpX#`V3| zM}#x;?8!6s+x~=k=wurn#bbCJPvA*Bjc3q{0bF@M9|&??KTcxVz0oHzi*%&B&m_J= zpX>K*x(|e}(Z7KO^?h1g3iF;%-^_3Q9|~P3GVAhHc>ZOs(>;F6F)#El{*Lf>@jd(q z)7p|V$gD3TU*BwBDwh_jlt0Cue~D`(qcxQ#=*c3_zrgeNO}Cx(TzV(};^*{V;yDa#@ekeL^AY)eki!^Rv5uWK{I6|q`Kmm1JUd*YM`H>5lJW;4U%LfF zzIG`@d36rA*me-Nq4$FJf3tQ!y~nqD2YCpuz{o}K3~>x0W5>s{!(+b7yKKJ)uR`lY zMYxZ=9}nOtUXRF!HZ#7be0qa$dNez{i9G*W<wJ+uOnwv}#~CCl^&OLKcodHz^6g7DW`)P;H~Qb6pg)O)J!QkLG9t&j z!qeiOK`&Zf=O4A?FvhVhuTPTeuk;O(SIp&V`?UAn*l_4?jQ}Kf+J&Q}lJ2{~z@alC zea-BD3}6sLh+ky?w_f-E|G8`UC7#2taSi(>eOtH%(_QTUIQu`w{%70&&i@DXLyRu7 z|1pjPCX(#`6#Ji?a$JIaeNZ~L;db1C%=&}k57Fneb!YJk`dwIPbnW%-flWS-jQ`2W zga6(CnkNwgw<$oxCpwe?g%PaNeh#xNV+$ucBW1b`W%Jsisp!)iLL(-08xZV3f zoBHN_O9=Nn_5mEl>+uG>3BNo423uC^8aWZjI`=iRYS!4WXKsXWgy-*d!3r|Y(F--i334HO<$?0NO) zqvCoB^1@@}t2Y9#=J(PqGB|~de@%z-R~{eB z^Zdy~k>^h)g(t}=`>ntCC1L$3J^+6b&Hwk^OMBgz|2TQ&$NFi8_J>LOEVgeICchHt z?>7J6{|dfF{{~wAe1Axs+84e>Pm+`P4*k3M9%e6UpSHTbF4sq43d*V6aY1@B* zAK@qH6&J%#>CxQzfnx{4&*{I!i14s*oE)k=5S|nFYh1&=DdmxJHdEe~hj2jn792#z z|LxE8@z8I_9XNzOb#Q-D|3vDy{sp=E_Dk9S7%Aa*--?@?PhR;|PIv?PCcFi2!#nU!ybJHa z`|trgjO}MVbGB|~yjwdly7v!@kJb)IUeXSs&#C)owGlo}PqQDQIgvN(e|3LP(($%s z=KsA}ey8`A8h^;r_fH=@<@x6y2#?Vp$4I5;U+eiJTC3p+afy_EB=TuYD%YPOdlA(^ zv-0^|4Ld`47}40z!iI9MN_Hv8O;J8%fEz+Jcpufp$^zw+0r=ej6=A*$>DVE*6lmA~s>(l;cXD{u8(oP98~ zy!=7(p>V&r2XGXx#~bh_yajJVCu?bd&u`i+ut0*2gsS7%+d0Jlnm%{@Xqd${`o0;@kDf-#eb1w(_Mvb<{4F?$+t7RJp%4>)JAJU# zv%wwoLl~h)Ycs?a$gDf5yqH&4 zrO>xr5&ALUT@G$mXy1D;8x>)At0Ii#v+u83pJU0`{vvzX7;H2j@Ac9-KcxLUzduZ? zk7r8M(Z%||HuZm9RW~E@4O+mcGG(k${y!%FV?w@8R;ZuK*uO{EsdW|M4bpoPdYdZ3 zTgWrM#~y1Yy^S8fD9>YLU7fr|*B*L@_{@Ji;_T;tG`1yt$?d;88n(abJEDID3C}q*|Iz(N{~q|7xNjgypY@F9lG+8r z-$B~7E!4ZtnCo5uWFm@u)AK&59kxxgy-(!(^+mTJixTJ(tY7h;>=vsPOBNKCNC5o^O!<$Ub&?e~IVoItN{6)TW9byWYkc zX*dwx;FvezEqEK=fp_9vcn_x4Co|HVMdWKbPe$pci=3xG{gW5`uC!Jgwvh!PP;rr{y^N1@DuzL+h5{8K>iZX;mS+3J;?Qcr+@U% z^1@mD)y;_dboR}OSMRTX(fs}|>f?Xwi$d#XZw&{;Z@=+R!!6PFSMCc3$txeaBiu&b zjytg3r%rlSzt$J62YG9r?|5H0Bt9m8zk<9Az2fd6UxjE6#DHs$S9@=j$~NKXA5VR! z*=-H%Pxk!H|KBCqaqRSH{r_vO{XWOvj|XrRCvXy#m^Q9HQ^)>3&5mRj&KIy7i`bFm zg7N%O{QzSt?3p$F1LTBovYs7@DaR#t4~8n~RHFu`Fj;0F_T*{$Tt54oEnZ8n!vfnh zEu1oLK2K)s@39xy-)!f4`(^C!Zui9oAFMhU8pXBnpBNS%*-yDB&csegP zbm6JaKMG%HC<{;Kmxa#Ge{H^6Y4}`mX?Ox1%@yIZc@^O^7aj_a7rZvKSCofO<5Q=L z`M_EqFYFKJznB%8oTnMB*nT!ETp-u^xLzcqa=ty1d|5i8_!sAe?(VG6N$*kiJ)=z= zTiPGKly42eB4fem?=I3uP^fNxIUhEC@9p_v$e7tk)}MHP`C;s8fj-s({iwBU_4kKl z)(5q*YeSbbyAeY_;uu5Zr}s>u^X}z^FXCMBUEwct9tcU>X3*BJyK zjQ>>`|MPu);ljbN=-A6x!ZMy}4q=6yRyWP0H0{AHqPDbp~tANwII-+?C-v>kQ`Sg$Cz1r@S*%lCjTLh1c+y?>epjKhbro zF&1OK;x@5`$=I#hzqf`R`ka5_&F=X3#@*)s{h9Uq3f*UZn8&VtdRA`@SIIr} z8XwJ?4`*`_bKHP*22;0&T;V(nS1H%nOhcWwhB)%Y6`&ABIE-Q(!S;MsC?VHBaWIsU zSKgWx%E)q5;208~!*TKiPNEW%o@X@1P)*OQ|8YwDz&Oi!eF?wQe{$HmN>%#4SIpsC zo@cuSWgf!9K@e zO#iI5d3;4VrENZfb^B~!6I_?* z+d22s!j5kn^*leD9V%_BLN#h|3a4@9gZn})xvng(Be(x#U#KS=(1<29qZJph{tNZ* zYt+5$@Fk&zF{>_t9S+QFIs1Gn-+ zT2uyUkIiczr^L@lGlff7#APgD87s*A*E?mLDN8;ghqc{DF8X#meZw8vzx737)qZPO z#|C<9i$d&FQP`vpUNrWPE&2{d=);I(2)p7ED~G~Wau1Wn&9lNS@86geis-!;?lc~CXE;nB zw9f#F=|?c)xZ!pG5ZLuE0hEYKlpG4BWEmpAhjOw4ksrjYdz`~D;p0d*XqPu>r+2#! z^ewx;Rrj~f{{Tak{0}gU5u9+$NmQZ=)u_QKoW}H;{I7n?_?KOjZm0G?ZuIYtmU=!I zM*u)#y zK6K}G|C+n4Kk)ACuq55}7l*J+ZhtyEtdOf%i{i0??Kj^UBBJ%THifsagR8i){?;D- z#`;?=uatLxsQz7%|8e90o6~$dX82vqYUj;i-m_1wDGT{pEL<)MqrT-ajPv72V1l0c zzEeeEDyePQ@B6{PjPD0S^f-naeLpxGlb_=96PdN(JO8$Jz(4K}IrLn#y2f4P(eqJ& zLKNXJ`s&qB7{DNglu^;TxtVoz$;|rx8|?or`d~Br-#*d$Kj|TL zvT!tqFy5{$LY}ryqMsf1tUBOc`AS_}E3OX7Zr3iqM|OF=a04RWn+0U-bh2;R^L2k2 zKLzFeP@U)7rT*`5F8R6TXIY`qaZPAOD=y$7+A*yTpTTUd^W;dkNdC`~|FKZU{+G{h zj{n*3ck2IBS)oH3o#;Z3_FOj^+tlBpZjRCW5y$WN$BeNZ5zUVuvn`3VvODr$pP{F4 z35&@1%NrYs?A^=4OIXHv_ChrN9<2$yD!hhuY+$lN9>peoPJ4ScKRayEcd#%cZ;#32 z%@2q3y$a!PNsU|i$xxl_3?PvdsAGbY0PS`(M+dt#~Pe%X$YnM(3I?;v63j5?7 z4BhlO_WW!ee|UO77AlRu*RualITsn_-RS>+nQtC-xy-X*9VTcuQKEB)~-*A z8!mBQ1;*dWc(L*KF83d^U)+A=kn4==HyB>C-;)0h=rcI~UhmkkDO|!LE@KJPjkaNS zN&Z{({@1*JGBf@o?~O`t?4tW`b+0G9|7!0alk_PhUB|L?Rxnw9FszbmNK{x4j*P}? z=iOg4RvZ2IZ$sRQZ+y;knDq>LP9F@L;6!Th(%qwHUGn~2-!S5c zd^vK&!jG$f)jb#yRbPmQTvlzfiwe z@1fkb3LL|6oWMy`q6*ch!6}?ZE$UE@1~j4xeSCxZF`$od5JU7hhLsP|{G?HJ_!u(( z7j06%7kJM_>UVM~&zuDN-JCzTrOaRxW%>sTw|2L$(>pPEm z6g~2hJWbZ34h!Y(x77WT(Rjs;@r+{crQY_QIRAmh>)+tn=o|bCzF*=Z-^WJVqksQJ zzMXAve1xB$`2)g{Z)e7Tp#DH;vd{MS_0N#4xPXgjM+Z95@zxENi z{t@qQS^Y`R)cbz84Eel;`EzE4FQa|3Jw4 z_u4GTMIQ38{c3Yy?>!UNjrA7@M{A=MM)D&e6p`!Nn?0Aa!eM$bT0hhnj*z{V_@0R` zp%0P+i&>$RUWRB5kl``qzkP<(S$%6w-ocqLkaZ>uVhC{zV+0$nuiSAJIELf+o%w$y zXTr?-nJ|kv%p-*~7BG73Oc*a^N8dvE9AK&V(j$Jw<0iGuevXe2#7IRJNuhZ(UI`>B|_*Ofm*@bR2cvms9A90MK zPg|oO1D?g;hW`tCHgR$oBP-5jpBMTU&j_b*3F)`%H$JN_``Ny5S$GM{SivgN=3rj= z$eFN4U!QIY>*NMDv4tJ%;wtu#^;O^Wm(PS8G8cJ>=KtoC1t>%jZp{BZOrMj_X4UuU zS1RL$Z_fYSW$Po8e=lYK_xwvgV!sl^nq zNuFW{(&yyyXl<}MdOa2j<$K#x*f6O=w0dF5n`zzxqgMC)a;-CUlTje({Kh zrkrZf-taASit9o*dag=0yf{R2e*1;v=_Xk6QEuHCqv%cVDxBhzPNg+9=-(W+(fxiAr z!iz}Lqcwo$clG1b|K{q)!qXccvz}yQciyjW=4%dah-1xu3Woa*=?d1Ez`HqIr(pei?@o20e z{Pj#jShj5ikJW!De5&~q;gjX{;UXS)uFv#0g;m?u@I-B6_?+kS+4YZwj;2OyU;6i* z{@QihHn52;Jk=k<4%y=$TF-bN-PjfW+p*lxYn{rN|E_(h)0kF!0o$w~^c(N%tIrK1 zMK2A*oB3?C>*Ih4^WesJULM9S=_}G-wD|AkH|ZqTU#2gnFznH@7S%1tMW&w1_5JK+ zhdkk)f}D^~w*3|RQCqc;-dk+^rz|HF(Fbewp`6MIhv~%_sg{St#W7T$6OM>0LCgMI zLn&E?^$)UR$tyqmze5Fi49BtEcOaY~Poff4=-0p9*Q}2w?mV5+?UwG0KAM>G^jlMt z{atOp8l1vu)S?dQU+Hf+;T!af=Umq;nG&8S7hGGq$+s_!QE4WmIZh^>V?x|iqqz(E zfBIJThyJzw%E$d-uvodEJc{NTWc&-25B1K|fJQXoh5ilA!qcV3FEOi~5&cVXo^6my zYPY1=2jpn2b`Wm*Kb30>VKQGE1Z`jR+~1rRE=cPlw!d^Bw38i?{!RS?`Zm_Zt-qF! zka-E6;=9m|82S-MqGW$mkDQI>U?j;IOwv>2CCqKOMx@qUBcf|dqrkRSEaEbju#6R~ zqOX*F%zhlmVjttCe>(f}rvHC2`<5M>ASVv9-7$$NOfTvmU`I!D;OE%X^JGdmZLNR> zB>E4AHRoB!1~xIdEREHJVT&I9GjMkAVA!Fj?^WkHJ{_fDpLzR4c4yBP`#-WnE4AM` z4u)O(UPY|SbFpd$)kwTGwbWJll$p$EV$mZ{G39xzV4W~BxaDpx^c%#Wc2UfMKZD} zqBS3*eu2o2zASDD%ZU6BqIDtHKg<9Bwc!6Btv#@6+ZxufflX{-2fMh6ZT-UA|7iV+ zcWa;RSH5+q>%XadYhY8@p7kHx2R(<(MYQ(xY_)G=R~zz#Z-b25$Qk>$ij9b9t=plb z`ah-qCr5-=q?PCRd=#J%ML3LNOxLQfabui+zEHi4be?)Ti~YZ%{?{fLClmIYXf!^L zXnmfkQ^wzIJ0hJDl%fp173^1()1x_Yk^g%I{TN14`ri@9P}Tt-uJ2EK_}ppj=St`O zJMXt%`Z#X?wjav(`lDuRv?I#38;-a>ehKL!*SG5aJM@2aIS2XA-Tw(`oJ1w6P>mX# z!f8y`%YT@~9B!`vy5bqji<$c$bN@(O)c$dvWTpN;Orcg9J?^oNtVgf5)M?MWfgYm| z6u7Q>w!Cl?GVQWDU>CEIjlLm&?x~Zu z21|Q2xgB z5#lVhyLgy_)?~#4sFnI)n-TEi={l)v){xkX~ zI`mJV#TY{RjYZ-7dyW79%sgIwSS9w4%7IdH`>E_uMwX)j$8a1ca1xcMLN#h|3a3$v z?TM)UUKG|p;rkGd{sB}^HlPtrNNA5YldZUbi->$Xle>J)=yTn^%~kcmk6#`-g%{%V zm~vpux5^&qGbY|&X#DVq@k3Nk5`UqAXvxaqSU=v%|!7ip(-M?#^buDw4-*in#8~b123ou&fJ}@pW zfeHI0F-f07&)W4h3^MgZQ3zM18LeRubANmELE8tAjPl6ksQuvD5UpJhy{l;Ltf=oP zeQQ?8wSAJFN9H3cXQDo*xnlE*-spYtA4}60O1%40-vA2iSBS{>BF)w>qHp){eISdm zaCa#i|IToPzW&-np@b|&8Ol+CW9V7phepPKwwCyY#C9t%ig z?65ZY3D2ll8yv|aeDK9h6=;K_&o%ZhJ2wXNl&b}74lf4z$TRSjP&a>DyamMQ-$l>eT8n>mH&zn&8=NNamOgo|W5 zI->9=z2AGS0rHDOp;P$E%MXPva-B`k9fgenCOoScy&rM3=mVHsbdELSzQS|Mu5C$} zK7)lZ-@s+>{*r4YdscHogZLCKVG+GK>ffRR;WB+t-8;Z;U!pG~`lsD6yFIS%9U@o6 zy|DjiRd@~S*g$6e@2{#C=v&yqF0Nt^8UH?Z{s0Csr0o#TQO9dPjF4I1P%j`CdB{fr z3Q>gV677#A{bP&z$F%q6={MK^>eBu@!9TEye_$nFEp`9R^@)qMKePi9>e$25DaH|$ zAoAf)wjT(k^f@x};V+|?V_{F<22z-}Uxm1yCVmLyar9nds@4 zx$-5m?b3OSIV?Wpf;w&$YzD5IaiN%ZC!KSm`zvPq+V(^t`}F>;D++Qdf1P}adv zBQCPxPm!lFNv|dAkfhI=gFlCQ;RdAX3%IdPU|$XUx6V0Evwx#B$+++^IWqH-(CC;Z zG@}(4a1rh3z_dB$Gv*-7Ryj|tbom~n%tc7^Jylk0v*MJFn5|j3e{{Po0 zjc6YGB)aHlwFNWl^BWgR6iBZ~deTW_!7<(TiDA~Zxm^Dbh+DrvIOG5Gb>9xXS6mEZ z^ud$v4N3Y8M(EKTggBx(2z~YL9|IV~P?t1XeSa89Dyx5{JagP7EaEbju#6Sl^glVJ z{=qy_NMiw`%KovK@2}hUhY5Nz?)z)^{ULERFRV&u4eQvzWR7}L-MC4gJEh-9y}U)= z!9uO}uDDbK`xm?7dida9CHK&Kj6d-=l}(6Ms#j0Sx9Z(o;XGvK)s@J{%8fXhx7f$n z{;)>wcGzSInC)CCgBb3MAU~-IK`~?f<6iaGZVu zClRfAHHRDPkffEVkqz8;T>h_eoftgrx|&?qnDPaYe^WI6U+MTNRHFu`a2mD9tp8Le z|DX2!^F04T&mU>}LXPLZtd9UU|Nqh8`8Rp~WHkS3{bTa%Ys~$({(t1>Unk9aG@ubp zXvPIxL_4;BZ0rIWg5ED zANM_kjQtx^PqFm}$;hWY-mQ+OkE|UCt9pOgUjKep z$Xas$W4@8&2SN^guEV#5TzVd&e+Q;ZePgY@H553ur{O@zx2*uZyT*vgB1CHl3@oeH za9Fq)!@j#{U5m_rMlWTBBjQR>iZYa=0>^M1CvXy#s6sVra0=-US${V?Xl%nc^`9Fn z`{avk@GTEB`W82S;%WMk9^r0q|SZ2s9&VfqYi^n0cBJAKYJ(a#i za^dgp3RlTJ^ft=>^~&LI$^Yf;s$d@flUKA-jK&{^>6@MK3>_`;Fbgs19W6S_Lf!qXM64S!Si+Hl(W zCRdIcb2u7S=!xZ{ABSwM?ZO< zesOc#hWxW_jFWwFw>)&`RD@?XAG8)) zML2BVVjMvU#xDIy7&Ug587FJ{laO37E{0Ou%5d&*arnz4#i5*DfwsjX;e7MGp{4Lh zXvL#hCE>5HmV{%roxo#RrQuT-UK2jq{_1eCs5Cr&yexdCuq?DaYrI&W>=V`H;d7^6 z8$P@Hn$SUivirgCh2s_Bq+=`5+@!w4Q^y_*k?;&lhhdhIa!cfse?4O6& zKM%2g9tu^CsYVS>;WTPdhk8ugZ>GTd2be3i{y~Lrb3J<9*#Xc%p>{=qTpA0UAV zBu`rZz%in@Md&2EkfhJXE5cl|B6JJKknXAo3x~BGVih6PVf}|Z>kkxJf8en7 z2P&*TP;C8y66+6;Bh?kZ)4uO_d>mt#iPD;8`^&F0+9R`t#@{fnt&&38IimB7N^7jF zB8(%EQxPVdE7{HVN3{N4Pyd4ssTZC%lPtqjCZBi>{O061Rh%4f&XimGRXh z_s~nv`cK+Y7|fTC-CsPZ>~?=q+i)0HexZCAdMK>F?kHb{heED>^N^1MWb7kt+Gsva zq40L!(NIJlj`Tk}8v07C|66D5_4M^I*dhM%asKkd@$^GsAnt7{@({rMbqbKZ5IwFPzC=!!AeHDj-2!hREt>UK=hr%tJV%hkWl z?ElW+uKzp3{>Sht`+wy{q0~9baNKp2lNC4?g>eGY+7L6^60^n=<}h!}A%&a&ABeO6 z^(%}c(aQesGX9{QGO4{XRWD9k;G}daQH5$ux~8OSuc6PWTW4MCDf($dYZ|7j-D9PD z)z+vL*P}gBN7f_ycVGkAh?rvsv^}DE{Y}En7%tNO*wxNBtesKd{%6?#xcUFD3T1P# zcUz*KCR-iz!v2*D!WYqw^oO#-bTj{;i~2uW^?!8g|G2zUY*87ovZFMgRVqJM8^`P(2%+lfIx2JzCp4g`RbF zjre}VF^0%?kN*9Yqz}q3(f@vC=qZfU$v@)a4aWM=@0$DC`6ew{>!O=)(i$HnjOfo9 z7MH#^FI;lmA}(VI%UHoGqP1728{IoOtDTnV*IJdw7S+KR^K3?uIOZ9gaPQz~-qI8k5c%1_R@Cob#T@_()N8QJnx&#y@T zD?J~{0@swQU4XV{{RaT)wd>cG@e^OsezRX8if|a)|0RTC@<^n=S^n0?zCNM;*Z*?G zywVc!r6@x=DsT)v8~Q4~Y}~msD;yU-f!_A4aFVRVV5@Ho@oMEqrElwmZ;Z^btqRqs z!6}?ZE$Yx$%f4kN4`7f@JCwApTgp0RWFwl;jJ9ZON_(-DegPNJj*bY=KhepC zr+1+nF-+FD2K3YCDxJH@HPgqiP|B7+!KOcUy$_I7*N;V=^I=I)X=)~7u}FF`40^~ps(B=dXfEi#R0E?e);zEEa+IU=9)3bN<&zHp2@ zj^UKL>Y}!0Ox;BvNNQ)3Cu}>3oAW)r`%2-o_C(9evO_h!2B&ZuwWvcq8qgN`*c)4G zq&J}%t+;@T*dFD-Pey&^9b_~wxRdNcH)7~VH18o!jv& zEqaz%iPDnpDtYCtd|{$6Hn52;>|hsHv4^bh_!hAJ5piC#nt~w zb)TWZJ!zY&N}Yq{}1BY2r<{$#QxiJ|BLQ_ z$^DZPYwn+%6rLgzTgLXK(}ZTUVlu}*`3J%UdS?D(xq0dIcKrVQ#}08l-cu*ph2CS< zGay@h_pvJX)#Sd8yB_6Gzxd%A_a!_;#%=HGWdC0BU0@JHh|9mj^7ROsjn$t;+_C4b z@)7v7`S;%Ym~ax)i^kWN*x2OUnsxy_WsE<)tDVsG+wpm0^y5fi0@1%eCNYJ?32g)E zq;Ls~m=u@9W%^vJw6R2A#sWQ!REKk6MO;rFpBZuuz4Uc*12KB^&!0hT3U48^u8{E1 zvU@^Qr>^UVh}PM^f~eno$9`A79l|bo6?@ozt@*S6nf#SHlgy3uf8>9nE{n!)qdd|Q zib9_Fe57Am6bi|4V}#2G$`)BF(^miR3qb@}=n|3r~* z2V)q;lzk_)&y$!yv?kX8qPhKj7-{nDB90-HIY&7va16(B0w+<4zAfwf8=D^}HU5pv zy7*njn2lYJTs#yC>{Eqm)Zi3OqZW0T?l=C8S?l@F`S1CBy>WG<8;rBp-s$@{{=H=U zd(rrJ-1zs5v20Azr;PI_==IWRKqF$F^<qL z#)XH;5$Rr#&PBAN1D)tXH)6Os|F%&(5A)iBDP-DtEAoG<`@iV^$wZg?CzHaH>6fq|oQ}feeC46AD6XgfP`FGkp_jf)t{_Gq$Z=nJ z{1}ASFf2SGJXGL5$-cwd;26Llh7iXvMzHRf4QwL)ZsP#t4z6Mk(--7_%*yL?QJ$QU zH<6Yv7BIS@tWbW8lbQef(UZ!F$rbrOrEKZ<-q)uZBYo-@rq}siSi({xh zI~*2Qj3X#PDb^pnGnA26US}-_vI55tjmIA+Pv9gfQH68G?jc`WK^agjeC5a4p@xjw z5vRy)ZTOb=YBOmcWai)hc}}PmUx#`$pb<@IMk_90dsLhLOMC>se<)lOzVcE&J!A(u z(S@i_xtolkA8};nAEA9sW8%8aD4V_6w; zSy^(4FPdXgxP(Py`V+>yFI*O$=EFCGScuTj~F8}kG+^}lCvW1iry zc1bE~m%O0=_p19>*N?BN>oMWECUN8cM?G2Qc1foYML3M!Lig{v6w?QDU0a1QU3v*d z@?0C@7(%JIGL)kNFSI|73AgwLqjd|;KN~{Zn_m)6h&zc&RH3g+{jVK5u=LydpB|@Y z*6ESgs_j#QQ#g(EgSUoSvJUl_zAXP^c2)jVpUz*EKauW~KQVe-{y+I!|GXOYzi?7G z@=c$zU*fpFf9W)$3C);1ss2YReeQy^o3q0O`b8`Vr-f7Ge0z3i7uS=;hnMU`Z?3Tc zvKuk_z@l<(NqHdLk73~v;UVQuoXo869o66RZ%c6R_F4auyhC0w_BunRa0%P54q=hJ zj3q3i?T7peex)5xU&R{MvHi^hVI$H&vVUF0CTrD=*jHIHIg6a=q-O!{Rdb$2YA%_kQaD8sA_4_II);cjWi0#&&FvExcnhH_Nk7>;9llYc*E*}`+J z{NmZoDYkJM3rYV6sN#QK!~dL2*l*&hG%-n^TIWCCm={h+=Oijoh2Amt_sqdiO&{!J zf4B29pr1mtcK$Hp7;=u&;u88CYRNiGZt^uC8<3>W>a&>BS8?2ZG>S{>n^@3iFfTsU zZQc6A#@{i3KluNlYi@E(Gg@&0Q5m1<|4H$65uPr0|Cqh({t?YdNFn;i-vUNU+<%$- zcfFbaBNVv*LieBR{?XHK{R8P-_rvwR^%3;O-9I|T5AL|OP4gG-HoovO{bFnGf7$(S zxPS3o_DO811IQR6-_L$Bj%c0F%(!%&?>P!1O<$<-?jswN-p7Z(pI`sLVfi0Jh$C9# za|APvN#PO}@l3uxb9~Wtce87!FW3i}e!>nmE!#fTAdfc5|K#X~@$W6;-(+Hs?;Sn4 zrk~8QQ}&J4ulUsB9pSRHm#~c9ZvBViSLm7bM_tD%eGMb}z=w~xMnpaYPZwLm(X)7J z^On$6V73mM>_vNmmhwEU05 zU&p0u&BzYl(G#^V37=bgN!YS&+j{!`d0;woPnhpHmh@ z^A~dMpK1Txn-lWrH|8(o)1x&hW>G*d#LfJ#Z5sKicQ{{;_#zxeF^-@FrRZa0^lum+ zSZ8Zsh@M%mciH#=M%mwE7)JsV>bNALIs8+Xtp7`9)(2?RK0s#uy<&B5zB)Kh{eL1S zlsQj1DsT+FC(S>@ar$7Z`VS}QCo!Tf97eQu*$^tlRiPR+IEB-wMIEAN)r_dTtQU^P zj9So0Z$jUq`hUea5dG_SXiwTj`bG=%o8+0FX`g1anR|Wy_rxRUhnYXRWJ`L=6;kg;_Z;UNZ z@5e%gw2{Jmx4uGg(foliGKt<*i;fvbhr8+^Yz-6NMiw`o9cgz@2cz7 zy%THd|D^hVO#P4aH?zZdF{bTlF zaoq*hNG)PpR48Ne{~_!azh+_;%%pg<$`mWAl`_{eeMAfzIl=w?XzfSv%oNRKA#%tM^>2oF5 zvS*R;&l~bh=3UE9=QyU|TJ}78I-bv7vfnaRu!`Q)^VzYP=d;)7gImvM4`7|Xff4sK zy!d=}eC7G|D#ZNG*b=N+jvo5^Qf|aY)Sb; zCf1ZcWKuZt5ua+}|DD{+E|$&_M18RTKWTR#-*sIt{{OYlHetY25)u?dR8mw@(xsqN zNhv8Q`6frHC^<$(#WBi}q?6BJ0|pEjFkrxd0fUWgY-88#Do%2e6RXI`C^@+%B`M!Y zNlLlX@BW?_-_G~q_r2YIzds(g=j-bIdcWVV_v`Zcx_FNx_Z;a>EWvo@a_vjB_I3hu zyZP}d_Gs;GB^jOJ`H$z{MEn2Jg)1TVU=nlR{=Xr{;A8c}$EaV_F(2p`l(WggOu}vJd5`KxLWwDPR4B>MZq3p86$6Syx}xKAoPVzJvZlcAr( zoXITA=ahxh z#{Xlrf77)S=+l1}X#a||v1D{kXo+i2;|$K?9L}Q`3BQ*AjP;w7jjgriEo7@Pe;e61 z|DJCAe@@$^O+7%?&Nly^Y5twApaIePzwV2ZL$Y|f-be~5q8`-|s* z_-e_%`HwRFqr!LRKcX{{W{77dW??oO#^^(hx3+*en)^5?%v|Pq=$K2FKl|OekJbg& z8gSd?T^Ep<*du%v8J$7gxBh@%GzZcyPv&ypKiYfw<`wDQGtIjn)&|t*`;+?+jr}Xg zs&mRY@?4XT0u-VM#VA1;qOpI|A!X}?GDb#Y|5k3%*uVX(vZ1g0mP@+gNH@RW~2*ff63X~WCJS2-}m1}`n2c_kVqHMD&DpzZstfAuuDBuBV2v- zENDZd1Bk}{hnUL`t2=O*`3S0*BOO3=#%QDi=;U?`$B`ya_n5ys!F&>3=%zP06`5Bi zgwy01oW*(cB0g99oS{9+q(88ZamgF>0Z*ldgx~0=AO)!yjG-8gkr<6J7>jWjk2Fj` zIwqm#DE;5-#`Mksn#|rk%o!x)R7}Hk)Sj{zgv`#qD9j{h;r;!8bC{#EC;RsQ?HCm1 zvUiM@Hz%kA#>=Z@=^5icW2k+`spVuvG`1%D&dDRsdB;WO^|MZ0q$0Q|%ofKyEZJl} z&Ysb}^ZzCrXHPZGCR@?Q+)j2Zus&d>@$Y%#U*p-@6660i<6kn`XWv+GVaO0yk9S-^ zW+DqY$VDF7rGGwIfI<|Z+d7J3vdfthyG|J^Gnb(fHP{omopT@NXivdDg60qa#h+Xglm)IKypMk?|#Y z4twgI`$zVoL|R9>lV}Y?!b|ca_Ro9wy&~O7w1(k>@xQ)qd7iRgsN7+Ht~vnG|L0O% zmx}28?ZM}i12ZuT(Z2xWg~ow>pr2fYT`+rU;Ed5`_BJAYYiWBJ*u=t(HislU(Y zA83195uMx9e$4p4)44vv9L5oJ;uwyj=bof+g6z7Pj-TvapJX0HUk0af24@kCeb15S z(TjwWp1;@mbVz0{H}9}dUn7M%6;KV#B%RWT=1pdqacZ;m6cg;`? z$4LBR`5(>Rl(b}wSM*B4FKdMh@|IcXud$s@iKz;g=!`lBt+JE6neTy>R zWM7JLIx2=6mt#M36|x`oy`G_aDAnK5290&kaTt%Z=>Ebqjc_HvX# zVLQlZkMDH;Gf<*j&m?ypcP1@48~d57PQKfxs3iBDQ67(3GxMf%MfjJFH~)xzC`U!U zHGtTUD(m*<3NsHGSb$7qAqTl=YNMl{ZT*vWxP@%h4!4o*>>XsazOHj|)Iaj4xyHI? zYXD2E(J8hD5b-u;DMkJrPM<5je6&AeJ>(Gk`M3K2`nN`gxfWHmY@u^ z=hY)*4eHU1=xm4RjMw&;%!y0;gA0^5?uXFV{-(2|RhX`C_X~%)clWp7h3q*RLMM3) z$0K+9Jx`EbKT8NF$-ea$Z;lAj|Npw!cd6S>k*9G6XR$|~DmmdiDdzG+^459hBQy7+ z@BBaST!{){3b;k*l4LpKK8}R{&`ux)sVGeu<$M}v;Ex;?%7;4hKGoUx=J)rb3e|&0 zSzk6P9H2j~Ju*uDJ}T578YO>^qW>EenhuW&&D;+jmlkE#2Q*tBfOh7NV%ONCKiGYS zVmL;k;g~o|>_KO4iLO28{C(!J=y0F*3F1f>mvG~_jYk?LARS$gjtY~=?z$0SGC2iP z(Gy07Y2GBV1kJuwCOuck%V9sP-vO_;9NgmK9v{d@$^}fBdZzrR(7)lG3f9y+D{*!%^ zk-iPtxBoOv-V{d;a*>C86rd1AXd0vZj}!j{@uPLD_|bk+{y#4NPgVYfIUr0eS(h$v zk`1GkfA=aDM+wRh{X?oUvKK1X%r(sQXvRKeypgJeWCmnt&cgueiB_+S{I%Foz$oQJI#IuXVLSjb@1eQ^rHJ6Yv6zTZvIauQ;>?m z7>e3i=Jv^v7>zM_fB(-|=4i}+5aXD~qoYgRpiX#y{=2%_d*VQf^g-Ql>60vd&?v2% z$iDxp&6U2hrSB~1i}&|e=6tDNNE7b_q+^WoG>M#yDJUIJe=$M5cSQQ3;)L|We&#Af zXZCc(uLx6x?QZoi0%t5ti_G#(`^8CN2J=kx43baD*_ea5k=v73SpR1KU(^OqQkSVS z=W&03|4#;U`Aqjl-~WH^q_gjmw|eC7+7%{xgfm<66A^TBd z#&0PTZ|J+Np(o_lv&s4YdHq6me{E|pZd++h1NS|-`VU$CLML;1fxZEbF&{@2-NF8| z`VTq!4>-ZC_VfkeB-w=q=2PTp)HA=Y|2xBe7XOR>&v?8Heft#CrT-Y|KTi5jk^YnP zZCrC6y-0Xj-Ha5ZVlevje^ZqI8S?|6lZf!VMM2aE!z*Ys*HH zCFCBAVO~A@kBMW+eW~(qviv(z{w4RbSCJddsco2TZf)}5u)gTSVconBhqcp(I$!a% z#5FVin7Dn`1z}rSawwX7S=c(~van_PaC3CdvzT#3*fiPMHjjodPW;^u+!e-?yJ})# z=W+E?eZR0H*Y{1hJyfUMrEfVg9LV#ZvigsNx`b=&Eu0wIZyprNj(j|n9{PAFw@+ad;A|0bA}EHOD0?zmUdnmvUjA0bk{E*b8%RadP!JWbV3r|IL zR`8F*xx>P`DVK-!nU{xxWdA{%(JySA(l1PM?PP48d_mYU&c3f{<^#R+6mDJeqfMG2 zDYK@fQ@KyWbo7{Wnn6bU|7McW`QNk1*_ea5i2nUKkIcXVWTK(S_^Hs^P3D$d z)BIfcb=7uwkx0%aORp{)u zd}qLs71L7xYyW?owba7YlMP4g3t;Z+|Myo1v$q_kA2>umfOd2sO_-AC8Z@H~hj182 zP&(QCe3tzOt}jOgD#zG=;2u?E^(^<9YySaVLoMp&So@ELk@g=9wf{i4<}~^Vv?RMf z_qMa@R&-$3tP4Y@`(!_t5RQ?@u_s49;eLX-Jl7bS{Umc2s`BU$P>Bkh;&vKma2Drq z9=%BDQeN?o>!(xLQ!yCrb?VMM7|J65W^WpSK+ZtPF>Oz;dP5sMlYJKU z&!xB59!GnBDw#{gQ-*yL^#4)8T#5Zf`u_zX%y!Kj%*8xpU{RSeBi-U?D)j!9-oHfu zf3Ek>@cxT3On%E*0XQmBb!xZQQi0orf~`{chU50*A72Q~6uFa1rIyhlG&dG0;E z+H86zI-Z&mdL{a)nq`8V=UM}hCJ zEw7nv|DW-GZKnAL=K8tx6J%qQC(^956VF(T!+12z(Epfb?KE>su{M0Fc>v~gbd>3T zaBCx5>z(_w92vq)_O9pcB_z9@$1t1RC5|~{pZ@P2 z=R`B_Iic@!%vo>D<KfTwt%?{r*InG-|82J%70y7xA&I)|EB*tE6h2Z zM=uh(i-Y=Ki{Qss=CMCzQN*&{{1tQ z%pR5=hLa;P8e=dP<1ij+n1CL871GJ9Oz%A}gh|ZN8kZ^L)W~1E(f*LL7uF<%Y3$w3 z5S&iVz)URpmGpVjp82BmaNK)M<~9qnvFCU?{a<>R!(4ve-t4pKVJ`DLR1Md!8hlr% z6s7_h+-i&O3Jb_gG*tSBIhlj{8hibx*q4r6_B^yRcc2w*D4l*+D4TtkH41lya* z+;iNc+5UIryQTnzC_*txP=-o0O}GDjrv2|^%Utnhh&R)j4P*y<^%48uao~jg@2F$0 zM}shp^X~4;JG;8>mYJ0y>^UR6c-!e)O3p_6@;w)_UokG^zMr`Y6{tk*$vZu}kG}VxC01S&)k9P-hb%-YcK1Xy$2eY8}&VFPo;;`;yHt}IERKV zd*toiKhGTfllS0+SmP$`ck=QS;=pQr{)0N}&m_n)SBB~$Te~=iWH9w<;TS{lVjTXS#kCW@8TKBHH^mkNp3A{?9#khi-j| z?*8`if9tN0A6+8i#+6`UHuiE3(~Xou29I{cmB^DX*f$gKo)U(|NNg~<`2&Q zc|OVc6iLQK#{XyaJ0|-EC@nGm)Ry&~f77f@VXnk}ZBP{o#8HAWRH6p;Xhs{F=9< z-~ati8vh?Q{-;xKCp&tL|HW0KjXQweN-iP7kOGAWE9qcicw zlH)KQX_$a?Ou}SLK~G&$m`YAVsWJZt>o3aaWR%TH>}Re*w0?iOa5FFyvoITTFc(d8 zm4D-egT|08Xk~6oRrilq{!x8G`Nsj_YG*6|+Nyf80gc+AthPiS(%ieboJa>AolQSa zyzR{g-x#xak_`{x!{mUvfuu>O6DamEbeiaEv=*k5Rzfm5C(&2zwvTA`quxSHZH*dVQSABmmD`PIcc0iHgYR*>5UYLiSV)3dLjz%20_K)T0@t3C4B!;QtEDmDrDHuS5K#w0ST8wF!R+ z*>@`6&UHM(+%!`9OP_;dr2lE<|9sT1=g-`M>M7EHn)F94>e9TMcpJz@*X_zp4V~gy zVs1S8|Hm=rJ^o2j!u>dNc>(=To-+cOPonBP{f~Vjl{xf3=;Bser0n0XT-}ot8m1?P zQ`}CY@BEN?#s$^^oMAtU_VLCAXq~1EqB`Fip+akeiq%1=JErWS0ntCYn$8(toL7cB z#dFwoCtXLjvv-i^#CaaQNce+q^vv*(Or{`1-Yp%id@Fa+8CW0aFV7l3pbDwN4aQIm zN6&-Kn<7VJ47#&J7)$o`KghoId+LHk`r6~!cX{>+WZ(LG`}@+F_e@hC)?5F`T(1AK zFN6Msc?zo1)GdhC9z=bhsobVvI%Z%dW??pZ^m*F9m1KUvnVRg~LyQl}dC0&5WFiYW zh}L)JlJ)w0(f^(EnDbG9XbpKwj{H$1f24^wUH%};lFdQ;H%hs7ai8|EGSWdK`u9(? zS3J$NQQH{pIf};ih3rKrMhTiH^$XD)K;Qm4`p&-nb#$Na@2`vg{TKZsIQmEKfduWk zK1N-Qx$jDI;N-4+d;eFOpSVu>)&DCK=N@57_*F8OAMsqh`Vh?Z=+i&UA*7 zb!&1sM|S(C<9V_d39lLlV%JFj-y8RCA0kSh6mENtdwzY6ROa%tp8vEl9&`3}+Vn%x z^`z(T^8DQYxBCx#i~ZuQBHLf5`*UsI{>LljP3F-UgRyAJlm@=x!BpRXY?a1sDXyE~ z`~g(!57lHyd*N!yx*5K~WNGjFHj<_J%15C*TC9Aa;+VX7QXW12?)=m^_ZW{fblhk^ zESZi;n2aXny?LhcX*}ODTlqAmZ$mpe&^N!9r~S*({?Yf}BeD<_6{Ca8zepH$wi>#%5?+YurQI+(EWCTmLsF7G?-L6SFWIdqygM!<{+E z+_!!?-I`wJd8ksCL}kB{jMgugY6GJ)$oI8*f3l*|`zsgwm5-{ZoZL7fWVmhtGLeN` zEd9Fm0V76+CS|HQPydT-QMOu{KbYTLK>svN{|g_S|9Dpa3w{4zKV=R7uhb>ly)636 zIP%1uj{@{O=UfM}i*BHZ?7rO|0qX;bBR6@ege*hfdOvBCea{GeJkQUq2KDHe?H$l| z+_T~R{Xot9+OW$%#B<0)IE*9MV@|xJWMt@ME+1_|0V6~7{^5T?78`8j#|xV`$?=jfeQzjtkea|5C? z{wrR#CQe!%EMfP)>-cx%*}soAZmrlg)SU18_Zu9->N#V=o^TQa*NI?*m816*nDbeC``CCZ0fo=Y@atOY+Eoq6rCR)wkBL2)}0y`)(*ZY z_@`lyz4`D!Sx}h>&CETX9Grd#~;M*G|7Qtjix3*3Y{v6r8^-Y&bt6 zY|L~%!^??b!$9+2(><&2v{gP`GDx09Q|^FJk&vV>p}xzyARKj0=uu;aW`4WGd6;}2 zKSrWBvPwP{>tG%#KW#2BpEmvKSZluet|t24>N{;plRCAT-$C}h;;2*K)_1D!(5T++ zJCEg*`ml)Zm&|$SM~UUaEq%wo783@9T4u{pxoh_oVLW^H zL(XLw^WAXN8Ek*z*MoB~jEesaN8_=u-u{9AWL`T&etpy)#$auPYg5s5c0jn8+$o(d zA#=wL3?C+|+@tkoZT)lVfv7FvcGNzOro#im!TaMk}F5-J+ZR*(A8nW%Qazs)G#BGO;HxIud z_HSg%j3KeB`m4J=!`-AVY5dFN1XZCq`ADr9|9A2R@@iqy$*++2lC#MB`OU=?_C)gY zDigwT^U*8v`h}JB4|ydC;dA0zO|I#pTR+n;to5UbbzX42Fa^^t zP{tC%M&s^H#}Y!Jc6YP(cZ>RW>l}4?;gHxiVYXvO#*kQX^95n&6nk-U#>IAzRyzQ$V7 z2Kzr7g{?d144O0cg{JE}9`YYOvMkNHk)76<@T(vz4=0D6+;=4>haK)wJi)qxk;!2% z`);yisPk?-!xq=yMNY@IlgVNG_$2e>(LVm<@EvZs*zK7=epQJ7#yjj_UhCVP#W}3= z&69oq6xfa%-^hMIS^N&aRoILYR7UViu$0}l`nc)i_|c?WVx84D$Bte}zD!<>uj7%M zW1TA_^Y~cjknypjzlh|=V@JPzWvugO*T#^L3hSFzkYY@=&-wEowr6ZJ=Xby zkHwDOcPI0N*wNcR7V9i!Uq-%tOYG<~?sE@$9oe6Jesrw!nvcYeR^Xo7V@EfQiFH0d zF?MvsHL=c%uZbPazQgr*@aNvjy+W9`-T&p=W1Ual9y7fi|BLi_TpnMA&MU8X?G3Sg z+&(6~j_&rnJA}XeBe8!^V!wT&{3E`LC&W4*y(@N9Vz#H73p{gU?3eso)T#ZXQFHoL zv7d^=uc6~5-1mt40QrP`qdVe%&&D9kRc-!fSNPFbWyzrMuk7WAuZkt|e~$mvA2KLh%f4mC=-2_W4vlER^`Nqgzkqe)J`(#S z`Gjx%9js0s6jsesPtP6{o?%}x`O4U1o(Hp_M_VE46u%y z#+D487+X4YVr<#iiLu;q6JyIqPmHY?GcmSuf;^ZV3#-OYjIB=NC;t4?7loYYen-87 z^eBkTFIgLO%KD43o@>14^6rJvvv|j&cPiUam>aeC;wok?iR{d~W$2RW&b6EA{6=o2 zWfNm%l@nu8Kd+L#Db0L-6!t;m_j=EAV`4Zugno}(t@Rys)^{8Y#uq8pe<1pQc$3Pq zDZ_o~C|bu}6ix`+uJA{3EIoB$IQpvi)#XjiQCqKm-K|{4uZnd(qs*;W&XdN*I$u|2 zzA7BuO4A{2!to2kGHk5zk4xX9K6*Cq)*MOpHu4a*&$%OZlI)7yqjY%AJ4=hh>_;NE zQ`Y;E$KEqHo%XCadi0jqsmN^%^Q*VSnns6kn*B`VCQYMn8>MyQ)Kp`+)KIVSjC4EE z|Dw+dQ#0J!n6Y$l=ag^jblPT^^G_ROE;uzDL^GNYZyOM@^aU0TO$v*T4AB3gzddPP z)bRmfX(#)G^1r^ta_4QYNbMh14(=cFoY%do{@U2;Qv<@9IsNr5`}?PE|FCYZ`s$kG zP%yH8*pTDe5y|>9$)Pa4f7qw-qsrCME42%=2ZUmMrJdx_R_#JflJ_^$*XH#Pbz`rK)sG%v zed_@2<$%yMdSGZC(?3uN$6NG?T2n{H+D^(B;|GQg#Jy{oHhy1CLWrrCE=SJrl(3{Q zB`hsS3Cp@tLhhI`v5#_}gop4rzK=YtsT>gUrzBhdl^oX1N)GE$z`S90a@aUHIc!Rc z-ak2PCby&}hpj`ch12$JBe$n`|FV=&+^d~Dr=3jj{@TV8;r5VwT~|t$vF{_xhig}J zQ@nq4KC^!T_ojsEgp^Q|>3ro3?X>Ib=A>9Vsht+S(e+JabJv*IL2fN*JvJuRmYx#Y z7o^z7ni3Y@oE(au>KFd(T~2CScQcnf&@Wt)pkIg;>YOXdYq0G8q+r+?zY)5W@mpcY z75_aB3^JD!`i0k+-#~-TRil3gon!th4l=iFrFY4uchM>QJGbrrmA>m}Qb-a{3KoZ? za3PsvKBeT!q;M(ovImj^RbSlpP<6d;K2HBjC$e(1Ih?Wm!tc0^)UMo%YWkcnkl({9 zyx_Xmk?h(lpj#2Y4-exTX!UN3PTmk(eCmc+yjM9me?zSAyZiRpT?34RxrbuKCmxFJJpE8?*SU>rvLs!}sUbw;NAmnr{c z+Q0d1sGIq0sGs_5Xkcz!@T@ZYY-pbTtUBk}&_cHIYfF1ppX1rkG0uO@wdEC__#M;I`6)s|aVZ3p|MET}={aySNzr`Q$XZ#I`H|Y1`8(4(3 z*og!9SNsmY#~*RUjUgVNzA$9XUKkd+@8XPw#=i?gPSL`!BzIw0nzb-2%UKw5^A?8X zwPwCCwH(H4}CW59H}gjyV*-7JRA0meKzbxscXt+KO6SVeb)MjXRVKT z*4Y2qu%Eq(tae?^m}kR*anFWYvMycvlMOSz7aEUegr?pFq4{)1ICwrIw47KFTAACp zw;#(09VapriuoZceSTOpWqw#Zd49;AG(Y4_ogbE*e=;o1oNxSNu8w{AjQRe9{{660 zxV)n8hgCK79V5RV*5v%IPVLwooNrpcBRY@yGPFtCFA73+%LrT z7JeaC+WduBS;l;8*uNJ(>3jVXzKnmyzhF65VGs7<0R9zU9;bhZ1Nb-m1V6{`ko-}v zjdT>w84zZYb1)b4kc&JtW6?X`4~t*@emKnBi6!5DIxNL;=2LhR@8G(d)Qh+kx8ok% zkG1za6CNR-M81C9>twiDIYc&=q5K!mgwj`^31u%o6SlJNKq;!Rv-+9PL_UYV;^;k7 zVx3`1?C3k6HtzULY=AOx1-4)>8u3H?6ffdH=`*1gzhV9@y733p-S6eiFS*hfH;-J!zM5Q9)Gy>$8vmB`3+r67 zo-CN|KNvHOf5}aAqw#No@oz%dLT+U*;=Yalc5;WYb#Xl%TU&G%MMBukUUJ;{_hdrY zOO~E7M{!R3+-rR-S;1b(eLw#yvRe3>diqt@*OGPY_0#EBXTCeX-8Vm;K|h;G_eu7h zf55+;e+Rk!W6}eE##{Is{*HdPg!n*=!X3C5i?I}|u?}0X9Y4g6(2YOfHS{3y))2oI zH{d3G9_z3Xjrb9MiXL2&rd`5U@Lep#Qmn>0Y(y1m@l(8r-=Z5mcng2S-_h^mo*m`r zz@PCJ{*G;hgTf8Bh4?MF1My1x?#@T!462tP|#IS-~ z$)3l375~-b8sYQl;@4sw*86u$!SoA#e|=+elX=#{j0=7L3w?j*bg>t4-^PDCxueoK zV)fSMx0#D1ce9rqztH!;(D%3Q^^Ez^bLK~T&Et|4?3LX2^RFVSg|DeMzv=o~vW~re zy7|qS<~PZvx#mYR%#UW8A0=Da+qk#$?;u;}B!#w&Bx{G2IVm(xO$rCemIX=H zizZnwniLwyMq#Q)CWV^Rq;McP$$HYHP%)kUa%NK4PgV(2N|v$jBg@6JJ70eed$1Qf zP>h|}g{{-{2eA#?u?dCPj4fC{Sbq{55cMb5Ojh2eDsSXE_LXCl$8pLdxjJ2WO;%o0 zl~-~Fdp7qR{!7TE?D5IUyXzN`i`f^-1B<)}gjUc_(k z2fT(J{2714-;t=Va1k!WC|r#ja1(C99f+$hvg+yo)E|q}9?w(wB##rwh z{apmx!@swh#BKc9Chqnpor{-ZjhHdwqHkGK%dOBeb?f}~ zygD!Re4G4*`>%}Lte@fEJ;GUEk-XBG7?FIyS^%==6=Um2W+#Rdk^Gf4wUN}Z?jpON zx1Z%D>-Kgeh3;1q!@W^{DYXV53bQ*Yycj%(xK2l5_3wMc-*ablCLZ&B)&+1si*q=S zo=1{GZzR7YzWBQ&NW1P$!atc5l9^MGioqCxo=1g~c2_dD1?@j>W4;F89UI~oj?u28 z4lkp7x47=3@A*iG$5TENTXMhl=!~?IF73}qyTRJ5k@Ccs^l|QC9_yQbOPld%EUYjG zvuy&uVZ!#ONG@A*8_ee8vK+Qv2SJ?~juCEYoy)zgo7Q5e|BY9A`)BVEsPWD^rr@j;mTaWY$ z+YVcA`MR)C`Y=c7)AOA8UAO3B&n>R@Z}BVP_Yn8CetqWXJ9ZCZ&u7=KitJH&>^UmD zv{;PUeXCN>A+ALRnccf#0NMSDe0@wj z29JfOgpbltndtd}cT<-{@Ay=do`cL;UySR7H%{v|VC^0u%(!>MM9;WLT5e0{=eY{$ zx0buEJ$nClu;bl0xAl6Dv+su45PcKru`LQ0-FG;5X}9PR*Q>V{CAsDmYv-c-OT)hU z&G@izwtIW7!Xx^ZMe_bvU8`Q+p2hwB>kGKoNkeJ6nK}A)iz=*3n0$R$^rpVg^WOPw zX?3%CH}Q9?@3Mtyzui3L#qU1rGJe};2K|)~HrJ@n_^0%Vi;w>+8{3cd3pb>kTRHzSVq=#K7pB_336*`JHtUXF!9*D2@X z-|0gl*V9H8Jd{x3-|od6VQW>)Cc6a#l6= zFz@ZPK9wx%eD@5i@)Opq9IyE@$HEuMuONR-EIbyu3->trO?;90D|igw z#1r@~zK5r=5R0)CE3g{ta4_$9*hp@{c03`j?_wA8BIRW-S&n|(2I3;Tal1auOFxfY z!rcAPzr`*kFUKfcjdJ>=e;%j5N&X16*kxSTLiX%_Ikt=J9$>=oSl5n=Vyb@Y7B7tT+<15F zlTnxl?}+u>dPl7L&Z}a&V)35qM#XvteJJ+dm~ZfWH{lWHFXI;G+c2Mb5td;UR59_* zcm`j^TztuM+#&4UP#wm7{`kGjpF%71_pu58jJ5a|?8YzfC%lgMH0RY}5f&qRo;4Yn z=HD_d2utT)5SC5%9~}LY<(0-W#qOxDX15di;#nCEX5KumnfrX!9l#gF1>@f z9W|&%Ee;_1H%1*A(SS02#8P!oIk`{2v2rdQ347H@IuaB!?@+Jp;=j{8M@hbZCbzxh zo*|Dg#)RKjrt{Pv+l>`VJ>Nm=NS?C}u+tj4Us^-t+=NfGxaWTDLy0;| zncYD5yO(?opQQ`C5MT7XzcWs|METxm4D$s0_3Ycpa&Dt;5Am-o$+Kq|o(XMPm1eg~mjm4F6r&vr+ilpVnud6y9WhE3zM5p4fBc(!_ZEu#k0R znEAhfVezLna2TC9jlbdVNYvN593RIULp~m+ zk)!0Ld&T)6X5t%o2D#XRO0?nUcnN>Rn@AYrIWYnmcm~&F3F3qGfw2gSkv*RN4@8W?t+9vF5`l{dQvhTZ4o%?Z&Oh5n&gN4Y6K)!A0&)ycMI^X#s> zP1?4TH78O-^|4gz-&4Z@vYvk(_eS;x@-E>%i&<#GzhWcy;+M!em8t?r)qXgm1UbmY zQv3Lqj2otGrG`!7+(K?f(UjD%mE1lr)wm%w+o~Df(zx*TW~-A!XC&EVH9q|L-;bD!Grh~p2QNY#b%V@73@S4D)B@78n2?? zwd!Mx#&m4LnaC`kKZeKgP5edJTe;td&tfcQU=Fe|A6xM}eu@g5KsOHJ3T5eHoMpa& ze4C7s6Yxnqh%ex)cnUdKiQOni6OQ3GID>wkX$VGPEGA+SX5feFgx@ELBiWoRF1f&c z@E~Nlcs916TR6k__)uZ5!yWi6X5mRJ#byL?T!bqy4tL^f{pnAkTN3|!KXq1$_-?-> ze4HG|{bt;XYxtek2EAy`==a*3=gI%3ow{DQo>wjjDdg{Dt)C0K1mlG_?H_+fys7LP zF;194_`Ps{!hdkvilM^glGDk*3tvWVyvDZ@<`nnqu^nH;FxUT;`>V{CVkch1WjKlw z^xz8o6lM4;g6md@H;;J+nTUJ1-G>J-0hi-H#r>8rH#1*^Ke8WZ&&Q*#xrN(@@ZY)r ziracT#=V;S8rgl%hvx5n&_9(9gj)r^`XOWeRO4LS%KQj~@A(Cu$Ca0a*Tv!Y zJn_Ne8-iak{{q)A_v7~s($a|d0`gY!HdHcO#u9Hu_s=f&k6)DHsllQ90rF!DM6n2wmj>1Rq?|z2;9z25hC7u;@$32mmU(bEG2E*|B#WMAsAn&d~I*WMq0+N-p#N^xU*`H&b$IE`Q>31^J=V_;QOPm{hxY)|Lhne&D8#r8`(EyX#cglo5?Ng zTe%nU-$rg{-%+pqZ`1yhyE?W1$L*auc|nNwQ0-+eJ!gM(ulAoTXRqL1$$vjt#a>-+ zf0OI`_Lon!ziB$%=uGSV$wv033}dWJYoN&%_Ezp~{M*S6_6pywvP61TN>8$sy==Dh zo-4h{-M)3nWZz?|??LWhFHZKoeCu6gYmxLUk)E#aAe-2mbEJ2k^d{?0`X1aH_&1W( z>@|mduTI~KTsdoE$iphE#|etZ%Cf-J1Z zUtt+sd>F39$1xe7#lv_Mb5MY7k=-+X=|Vapd`5CKrO*+gYJfNf8W(c=9DYBD&WRkQ z-d}3$^`F|mD)Mn-w;vn#{hP7a`ElCWo72MeP6yg!RKB31iyzPcjLnz_u1?|8-@8T|6TlE;9jgp5a5nU(5dn@!ck_kL&mU1QW#-7j`xO-|+uC|1GYm#f#$kyRfz5 z|0(V;Zml#hf112S+@BX_1^+J!S2mPx!290pUe#e_s7dmF(l9L44jLZnUcD^ThgU+w zuv4LN#2-RacsVp*(-jV0`Mb~(UJtFeo(XNY{&)KEp3o6q4X=s6T>Krr%SE1N_k;TH z&H>o-i23`_Ka@UdedU^gVc*w>hVpM+8Y;|XRz5j6?0@D%p^EPMUf1vSOmBI%eV!>Z zN*nKYhxdB|SBv{IXpGXq`!_QGT3Y-PdtG}Q`!~H?2mkMhXN2^5oP1H*PZQTN?|8T8 z>cAIV{~6bO(zV~>-Y87`l=06QM^mfW}dk|ti?L4A8ZZ)8?X_Z#?zf+GqzysU~@~@hV9sK*7|4cM6{QG_h5S$ zu?Kt6cYf&j_x3N7(VoRh>_-)<=h30#0BTV;^(yV3c?5eP8_A||{?C?X{?8uB7BV`s zB|1C39UUlWzA&t}hI!)=Yc|My_BG_XvJ0*0uvX)oH5;>yC&+CmJa0|MS!+2?Thl@2 zB-@*hrLJFM&w5sIs=|6@SX^Sff;{0@3b&g7Dl(US8M(ss%gOp0Yj)_P8p#IwtD3ee z)#=vqupeNL#_Fx7-yN&BpSRbNy(!mP9`Yc2Nt!hy*o!?V#t!VlPE?+w4?`99BU(#T zifAoS^lz-c$!nYBnNjl5e)(gwJUCDu`K~?>?8_Jw%7v|%YW}0%w`}uG$?8sd=D56aQl25}*z3>9OTF?G*~H$={UHAq zvX#B9p8m)69c>qd&qcpPe@3Ejb2lbRuhr7-E8KpA-O}tW^~RUEPnU)n z*e#7;knX<`&r;vzTH$^leOK}_-{oV{|1OLY=3#tMxJKM2-pk$dGw$=Ycz-DTXT@76 z&bx){@g2X!y#k+f{g3(oC;#7zGe?-8y2n-ApFm<1w>W={$Hm#twU^)zuK9-WU*!Ks z@GQT;Vxak9B?WJQBStnQ+#=by)aCAlKO+9WcUeHq!2)?^`MFZRT`aqjJxFu*^55%Z|>z+AEGd;wm9`i+dNjlf9VSG0Pc(o?$;KJyQkB znfIZr`5I@RSd%X8yQA+c4R4b+FG#x=e3!Ryt#qp5_T4BgrTZPyEG7;A#2kE+E2V#; z@RNLtON9Fq`&+`cNY@ek|3$b@C~F(=JAVJcev>%=jEBYlU3?EKFwisnjoj`YkBQ?_ zaomfqaDNPsp&u_?Tz93(w+X?)6#x6V_rIvOM1=&+`L3kNj7Lg_p^d zo++ND{HJUG$i>Q5wlbD8Q~4*CvM)PwL&#MQmmj+!tjJLQ=PLjFT3!1`tQnIS9+!4& zM<<3Gq`{|9FhThrtNdfraOEGHPb>e}dS3a*wyqn(_k`Us*c>Do{crCz@+;D&BvtuO zP7HTTkH_#8>`QSzC@RENiT%t~s9r$-hXc&DsN+|U2IfXIO`!k72j>%>8W-B!t9_{X zPxood8|BObZDG-<(2nN(=*)U;C3&#WUh(PnjH3|^s6jPqaR8Mgor#afJ>xe~#$1YW zX9Mp;iT&rhXN;oD)TWOe6^bW~3Om?$kvpf13PsMj--_+U)_awilg}9yHem}kqk!9b zejAgGsmS~?eRy-@>zYUD!;dg_uqLhZ_OLoDgjI9QN6oZmZJM=eldV~sYA(CdoVK#E zRGC_$&dW+QS%bxgtp&kS&s>fU{05(rpFfAkumZoqMe^kLFiO6>4d28Qs6`_#k{@5g zP4efbutwh6^GEsIwH@lpf0sv>NBI=r!8UwUUi~z#kUu|)Ph$_xq8G#E*|8WVt}F2% zzKtDdLMdAD4jz&ZAH~~_!6wCPK9v=~wOTQK5O8t&Jtio!n$u~zhZiKl(^N+%=&$SjT=VM{Rf?;9f_{+j3 zX&$Y^+C0=Z<+t^mZ(d|PA-?T-=H||h2*vZv&0!aI=WGA4=gf$(w?O+>sr{?h{f zWCeTQ`Cx_eKdPNOU2{hMKd$c7XRSkY_EiHK(R55*St9>uIzyvOx%JK4Mqe7*YohaW z62c48;6=QJe$wc2TpHyA{1AV}4Zhp!PEFNu9g?Rh^O%* zjEd?lY(ctwaXY?&Ka7=kG*h!xxN4-?~sA31{#Q z{)A~4*%O1MNW-I;iwx|-^JvEpQH@3OR@$iC^E@89eEIQO~`m*YCz zjN9=EdbxMlO>U=D{!h|T67d1(hxLW@IoNQTJ_o7Xl5h#uEqJ z!Y9Jc3HLhtoT8%G^Wa`_FD@}R-*!RxEc22wbM#nNX#K${bNikO?<~(d+w*eY%e{2E_nAijg>vSKDfC}c>Axn^e@&wQN~iytK>u|_`Iq17l=pgh zuYqhNn@%hLXWktvwaAC9UCRF{eJXCz`CwVbh;ie<#aP6gqix7e@&6m++a<=1x!jiV zTY=@wdGm~cGmL#Rje!>!`z9L$J11Zr)-o43cW1q^=SE?pF>s+cHi>JCG4p0)(4x`C zz{btnxo_iMe8L#`q_OX@=-hu}-#MR;?VkJj*j{aLbe4EouQBj>XY;V{V~@@zivFQd zC9dc!(VDTJk5!NReC#3L>>fDCeH^^Q#dpb%6>4yxm z$7fLZrR)9#M-j$9f{)?T_#D23Ozglx{2P9Z7w{%7^j#bq6Q78?(P3Xk`vQA1GVH~e zXOG4ldo`jx8?)@)V2<{1G)$u>m|<^+JsY(+z#N@vSIw_#jJ+VEjq}Ia8#2xwkyLv{ zQr53)wf5TljA!i`ci3+k5FM$YS=L92hH^T$J{elxU1qF^1Lw@Kx2ga5*W>;3jhia%buU!^nFDV@>jZQ3XdmqyeC(b6PWY63_yE3w zuVOhiU?*zu6TE;omEB}zbrG}fc6~Trvfs(>@GM;d`}XMr ztpgbtie_6=QDbcdc4*&=i>>=PX>CQPH5Mh-TBI4P+f!Z6UV3hzb+7}&e*37aP=QJu zn5!Q!b)a)D2ZkCnOde?7XP`0LK>LsfhJ)N%&}1D)Gue&~w4x31L-u`R(TM?JF|yCw z_l+f|oqvF3+;T@+|7#K32kU>eN{sAMf=~S{a>K{A8xMz>bTXTVS@Ru|A()4fsd=Y?|pyX#iwy%pJFJXZmwHD za0n>|WMjZhY~la`2b|!5Ata@V1C%&m9wAQRG&ItTM(@%4{hk@k=s9|iH17#WiX?(% zjAKA{Oc2&ca#LD3x13y>fbVy0IXU;-&*$Ec^j-h8*IuvxT6^uiUV9IOu}8rChHwnx z*zNBFen9^pUgR2>;u^AlH+(PcWMGwI5Z*z*VXJwMa+OBc(5{f71FJOnA>SF`y9oW{ zVc-MuahQ3yBecs^>^*#pIn_hVtwKi`-z`7}_Ay6ufOd`a-$6UazmMZqqzC`aZteqB z+!v5dF76X$QUlZ;T&X&!-p_k)P`QV4g>vi_Q2GJ=Zq64YOQ2{4`?8Vtds=D6ZS-}_ zds{ik+zI;iNEgQrq?7ZR$SjWEz4wBSp?@QIU>?rHwX~NEcpRPtmMb#zpaELpZg?9W z=G=bTXD5uoOYj=}7px$jk3lZH08z*w%-_Hk_y;%ukHB%*LR@ZS7IeV?+=kl&@Gszk zA_&5ZFb)3>e}MPlYqV`A6u?^U6MqYT1;69??~pfx1M;B`4#LymhaO;wCUZacl&`}F z*oFKJasjTvz6nO)2k15m{ZO^;0&5-@nCrPffA50I)?ZL5RIIwdcjqp!)_;L-UR+?U z|ALyK4r+04K-PmFe{E|nFpqaZffX0z;auza3p@)i$ctNW*9G=`T~Hr>dZBa61$998 z#tZ5~hK^j&FpNHXK_ke4qZe2cxxik43mSyP)(eW0w-oUtA(C-H;k_5w3vfYE;?11> zNLk=G|B;-K{r*RCefW`b-up0SwkBu?yvN`@#y8U3%zrcH_ct;AN49g^5n=of zUHOdvp{I`VKlEbngZ>Yg|AIm6LomGJBcA^sX@oh1Q3x~6k3bY+5bs>xW1J)o^Bdit z%9A-n8O~FNnbg$^%6TQ^UPGB&#k~e9zz;9M^k>*(2ET*i zPt)hw$ny=N5Q8`*APK3XVZQ%4pn)Ac9~mc`-})K8hUeoQgOtx8-v>hRNQ8b5YZkaW zA)9ArA!$DixfzWA*E0V9pkD>=_p9)|e%}A>m-`*oH0CaT|A0PC8I*JWdh%D9Utv|9 zUtUu&Yb&+%k?Kwjss8Mc8n8E(U3~ul*|L}Me}wTrcsC9$?|&&<-lHGbGpOyv*OBJ= zPhX-7+5I8;ql|K)6!`p1W`7mW|0eW97I197t^7Z@{zteFM?*aSsUQ1$nD~j|C(d!= z2<7uU<%4vT`79@7vS~4^$!Fz6d{(a8XJvycovR=4}qP_-#u#E#e4HR`0mjf^KELrgT(W_6M9$jeL>uMR;^SJ2A`)ta+L2A zK1+WDMu=~e_=X=_sSxo+*YVw>^?U~jBE*vIGk;z~gFANak|O+2qDGYk0vdKeka<<3D-_$M;ye7JFikR85#Z-tjA3w^{}3SF;|sn)SHV>}lhBdaL-J9^cg~ z0%yi*Irwf^*3Q-3_&%ZBEvuEYakX4qS2IRft%gIZ>Eo}aueO@L?rOC>&-eU}@;yJ+ zYa5|r&uW$5v6^pWt!A$T^9cvY<9_B7`2JocbVpV*x6K%DU^UOW)#}^(k$O89F#zP&vA=o?egCSI@}Mv) zrGm8|Xa4KsDsp{7C5bgE&WfvyAImNsh_hd!$to{vvZ{h{RYJ|F6m!5S)lH;S3k?G; zRz2m{)S0AQxUP7|q4_BL%U7h-%9^ULYCv9BLjJ6T+UgQ&M+Q0v)R~`92kzatbzu*_ zmr##8p}zA8_A?|jura0nri6y}PNQc+Ax}cXi3EBkG_rRnRIWRzykMD?pH8aansTc!l;rzw z<(9iDsp74JDw$}uO3x=%HV|U`Uv5>r*J4$kN~#L~)pJSq|CL*{kCj_>&zD>EktAin z_uoEb{-cU-#{DJh|L-vWaRu`qd>_txV3Q{$~#TPs}p^afbPiGxD81qqd1N z@*g|H`=4jj{^1$=x@Xvrc!uAAJHs648TH^Nh~Hl5J9>ui_ny%J46ZxF`0otkzcXCh zZ}9Hyn;PXuX~X;&ZG<0@jq(GsF@8Wd&JX1#{J℘&0TpfidhAZ?c#5P1^gLwD&hv zh`j(l;{MjZh3{dX{k3wy1=--7{k0sYeyt4dBR9dVa2FhaM#487ev|vboB9sNPeaXt zH`xpNCS$iZRk`;~#%^z_{Lq{FDd+N`2YSmG%hxfc&tj~fFLgCB)@LrD1GjR{2cBKU zoH*CU+EwyBR%?0J@Xb7Eg;;`jhVhelp~z}Mrnq+|*Ych)ov`@BbS}96zKHuPAE%$e zE$mS)`R~Cqz-=Pa#Y>AnL*_2A=DmS+>83Gy6^fF-+A~em0im>KksKO@O9Q_+27Xm?aS2o=;dn3yHd?hu2tPL>s0## zeg~IHqI&(MtmP@l%)dll37g?>i2oMk=OCQgDAz`m_kW^tKObfOBdU_eqAK--RkkI{^*^HuWaY-F zs$LjTHL~XY5!IfbVL$4K>Lc}51MxIQqH20S%6`hIT3(2%b!}9hRaKUEZw0)V-TrE;XHA0@%!sNNe|;;XyjxRk4ZKio4I+ouM>V`Z z%fbwk(ms5(!?)bVgc z?W7aU$hRCw=UG3`zjnFS`*l0z zkh|8eybM2mqfQkdnO4r^Q!$xt+>2NFRkG60+Lv+nt9+fG_uu@i|F!EL{8Z1LP|XfM za~*#Ay`8E@HcWJ~|K)`4<$QCe%WA=`b+4cOw)v?o$ z{`poH>2*gsng4THLGstTub2IA`Bwk)+19`wKfizMXU@Z~;TQ6)(20C&1UX8+!Ux+~ z|MQDmQf6$d{-sks_zW& zY$&iA&j-|WpulRL4XEW%ms*eJSe~r`d7lr;=PIz;w&q&?wE_0Z1k}DIppJn8tJ70p zbs@W-52%NF4<-WY^>?XnAVB*MXyCmr-mMF0=$(LuKL{xFUO*$HGnx)4>ZUbw&)sIYh3a#X0?Ml!_pCJ#i zHSPKVGP1LsIqi1+7<+(t{rLSUc0TWt={-hXka^IT->%-rPBK5rvzF_jyR%(g{&xD? z=!aWoS*x=0ODxB9EAPJ)S=srma_uN$9N5aetB3jTOe^2r%KTTi3bzzlMeZ!iJCDzEgR{B3hR`_VEA`?Ycl=NaB^f3ROX(h;ClJZTx zUu0z_GOesEj~o$?oFNb8=C)jG-Bu1V_n1d{9-s2JdQ`9@sKSjN?tdP+R|HjzELrbS z=^+pE&>oc^^{C>AN0nQx^0Ygx=u ze`l68aM;8AMi=`b+}7~nOe?hC!~4%3jXvh#`RBGGb8ah&jP3F${;W@l4?RkzgS`Lj z;r{34S=1{B(z(^k{D)Vr46kyo@iNx)D(|3I`G>tKIO=8ppI1ecx%+@u#q^y^kfr!7 zJJG9hWJS7{{_kt5I?~I#r4FkGx7tpx>e61-&w15w!b|_x%l<#N)g19MRxGw!*Ll&u z*z%H|udY{Zha8rFmsf$qo$P-qwmQ~3tXl33a@%R#a3{j*y=_0k*|L0d*GOt z{ZGC8mYkRSpO-$2>N5xYMWj5ubdV_s;e)hVZF%AJ@!# zJ_T^|^Zaf@cF+3Mb;`%unNJ-PKCT%b_fDUJ=+~3*DNGxQq-h`T1=&*{RGd0UKr+(H z_^4O?vq25Opoj8>;j&)VS1I3PL7uNgR>yR&I-%=)ueza!YdPpCvU*)bR$rpXYJH)X zy!Fc4#QnXl$ZF%I6n|Bb703!Q#to|WNKkdp7FqQNimZmiMONdXpqik0ZLeCO^m*wnXy4qovW{FK zdw<{2E0p~~+;XjqSUDd=t=v@+>oERn@&6pM5y_A|^KRxJ3z1A$W+supWNz{m$lf2_W3dI z7K+~nzLt6Rt&F2?Cg|TWk9-4Tm~HHn!2R3Tb8pA32eYv0ZsG{?IMdmnyF(D6Qa`7>eb*zI9{p91*@ zWN9|PW5FC(zsovye=ffh;joU~>#$yKMHV4%$l~`ZviNNaWOkPIFmXH!b@0#NgU7)I zkCOkcovd5H3~8TXpWn-0kMMo{sAcx?p5(ayNFVcd#n#|~Vr%Gdu{C_CPazmt+r#^t zJ$%2RM-e!O-{XwquA>~j2w#S;fn(2UIfK0{Q1vJanbX;;+~>KEAzgd9ZyjUb-p+CE zW2fnpbKmmt-U1Zi?tXzeh)AEBLw#y;_o#$Z%Ta@Aon9lWCO(`&9B_pNfz5$vxf2yigzC-0V|0HPC;{7wqLT1t?Qfx3tc79R`&nFdsA!)@zpHwtIVnwndR+xF3(Zf9&N${Hv z`JdDv9o%A*s2L8tlEP~tFA0z z)gKC54ai2%C9Lyas^)=iwd7yQoF4PO&tEDp(pQzV+K~Q3mnzV-mbt#Q>PW1m|GSp& z^D_^8uvWkz`W5c^k6oo= z_h#liu2Kzt-ME+ImV=)+IbRmqtP$)rA8uw2e6y0+uY`u9SLqkXy0XoR{FePvFKpGn zbKG>zugK44zQe_LqIf=d)^FCwk?k3q)#kZMpTypke--<5SqF<;rOxzK>;c%UzTj2r z!956rxP1kG{pYXZIw1Xhn>G6UW({xM%sSs@Mc==Qe*aba8h#>c$rl^*~Im~MJ~v}o*UW3_&8ad%awcgFP4WKH&XVD8TLC3G-j@5&Sp8{&y%1@!Nut>s9{HHdP$huF8YkRkdQf zs<&=eO=!Dno3_)xyFvAPZ%_j??zn;X|1;ElDnl*P8O%Xt$g?+t@lOW(f-=}Alp+7~ z845g>q4vFhuMYp`)YMI8YWn}x^#7}Ach&U&t2M!QS3CDzt*(t%v!~>0^*|7M z{TrBHyjp)v8lKPwdGj}D6ZW=MSL@Tro1ndJgZ>^FiEPl9kg@$&>wCzj;A`*==-+cS zb9Eb7pTAlo91puT@VjXnG?Lz+(Ni0gS9Uf1lB-qVxmtx#1a2tavVrmS)yfQBt*jjz zq%e>L;78%>V*PuU9D8@m3E2G|^}BihWw$!+x?N_O0*<{Ov{VgNNW!jt?RqgWKtA_Tl#^ z_9`fYzvg@s@^6uyNIwK20WZK1{xc8Xs;r~8%Hg?{_Rse}z!jWQ4&Q6Y_0vb^dk*>Z zjS8x!nEznin{{b7viO53+CS^w57Ym7jD2YbSqDGDx;JagReVdt{$4w4&9(TiTgf{3 z3f93H>o+2sveJzISqG<&(z=TN4`IBIeO^BLPHj8r|GaZbfrqEmeqc%+XHThcp69}FIfT%zI5OB8~Uhj{+)yM(g?9khJ7D(yH@(OADtHf| z1zK0|4gh$;2W{Yo0JK8~bV3*W>G!)=viE-tecyvWS3hYDz~Cz0e~009^p$7R8sYte zQT&A8PqXKX_unA~|L^zTTl~A#3{6$L)d&p-URHh9Zq?=QX1u?fKFoirI=GuY&2ClB zEwcapyvjpd^ZRzI^p1I*;5wRxvoHsvTvJzZy~U81a7}&;R>N<3?p=rb;ts|v2Y;`u zRlB+1ckA<iw_|oQhqPM(+OvQEZswMD%eQB@yvN>_ z=fG~>z22>RNdJBqBM;w2{uq7=*-#0;#C;OBlfOoe`(YTy;SG2Neg(gVci}%_HDl1f zgsb4|++S}&ev$AuB0mFPfV}+F29(Op$Jmi;*QOCRNHhS{al>1yn*6R6`BazW*{~$ywDC zr~UpPX*5AI$1Tu$l>L9;JvJ#{KG%O8*FOZXx6iTu8{+zB{MUu-cJuB_6=S_7u76}7 z$Nlef{eQ^yj~wE7828ZoKi5bZ*FS^@`2GV#AqMd>zW)G8NI|WecRgrN^|X}+WaE0; zC$gF27G&w+^X%Q=+i`dB4Y`N;cH98pkPGqcIQBo(AoJF)VSQu`jOWXBqA#Jpbt4sb#?=Uk8jxez{2{vi4R`GYw21SHpyKj?pGQUjgG>F1#zdJZE) z@9=#KG^C;XP*aefO~j{9xMpFs`wMsONCl)&%s*MRJXb{K-$ z^R#262fxRW-NQB-Bx6=N1 z(EfL^PLFKk*nfb1^W?|uo9{TxH%pGNub%vLBYQXw;@->oK4d@s2X?Uklkh{xVU9y5 zXdf^NVcJCmqVJzojC(|!_K`S6`#MYeLN+rW(gKap1ohAWwNMAubPEMYE$kOW2IyP0ufO=aibyZVK4j}U`dw?^wA=qSu7xMewSoK*(g~l(z6V;ce+k)+ zeBfW1r{$VSVIPA#u}>m5^4z~0Klj2n@$+r?E$)BDJnP-~x$-930DKBQ&DiO0k(=Ra zxE>A=ZaeZ$xEpSUufij68$1Z#gm1w&@cSry3%*L+Kf}!lh45qCN|1+;_i^q)cnbS- z@J;M}oEwE0Ou`r(hhM@;cpZKN-@@-A^0)8^_TM9)Cyq}r_xu;|IBuUpHgLai;GPRl z;Py1!iF*;pEl3~4U=pfvTjKu2mK>er_&A(_cW_%oHgLWbwo_)e!kysf_-^EnzzMnV zGpK+X=;FK&`5ftXBl|f15%zP~2dIsj+!uLd`Jn;u%PDyZ&UuNo-DUIyiR*Q)}G)~%NtimzGEddqs{tzWPFmFrco zX1xlxvflFWdbzf&XFRc<^~CjD0PB@?$9g&Tt(OxS&!^o(vX1)zgb%M*g!Ub6GWUU> zvyb<6g$}IO2;oMN{rmY|84SV@^sHU4AoM~XbU-I`K{vGRUr*n6y#mk6JK_`qz| z&jJVg&YX~4C%GU8doJWfqqUgi8K z_Aw~oxB@1zzm9ti_Fp6CU;!F9{&T*W@CmpaJ_YaMPS*K`8^50cQ6L-q1~k8)0ZGsX zZFo5goRAGJ$bnqQGrXK>o5oMxF$ZP=6hei;K3ieOP+^~~#I@3Pt+ZV$ZP#{OJ77o~ z%<(Z&nI(?dBExJMrrTiKim?^jwi0Y5wyhLfsckcU%VzsA%eCR0qs|!h+Nh2E->Oa8 ztgEy|pVrm!>wmVVg0VOn!dbZqog2zscvnIeLs)m2J%0wts8AXnCi>wltfv`Zu2m z^>6(qhyNBad3(ocVt%K{l+Qa=#D9%GtG{Cp=vG~)>&*Xkx?VTvhHG`hb-Lm0tnBT& zVbKv$hBB^I#&ycLfqx??31c?qVcH(>Va{jRCo=348TN^7%Gkz}YI- zOjO&n?OLJfwj202!kpe_7Y<)&+h+UTW*5meyxPUF%`S*-+qKOugl%>qY`an0>;l+k z7r?e1+GZ=i?IvxrRo-^9w%LkqyG7e<^|tNQHe0D}WQ^D_31z2mvS+8VUE8k}<-Pp| z{>`cF8T{LpZQ9P+8?}9h=qGN!S@e>&@6>jp!O@P)m^j+rO_X*{Z`6&~iuz*W<;D!% zxJ@^1*Nr&t(2Y0g#+!BHExK{1ZnPt?HM-FRwYA!z9oLF_Wx9R`=WnvjTVyM`!}zjm z`X=3Ut*B~S?VF6VUE4S7=4(ZD-)v{fE{dDYNxR-}(Jj}Cinj~q7UOK!{Z8$?R#d)S zOgoLUUH^ZtzyJGP`W!nYc4?RS-=*EURkv=`t+(n6+M_)OwCC--s>AxCzWA(e(`~ow zwx_StZMZ%0xNg_&U)Js4(Ct6b?Gp(|>JEKLU)u9^-Kjgjs5|faiSE4jpzb`XyL8vR zy6XYm^|)}{tG(tQzO20mwD$q+{kp!ayLI;$bvNcd-J^THtb6XiUH5!X_q;f&ecJa$ z?Ym9;Zr45{*hj4UzNUQ-Xy1d{H`A%F=qq2=SH7pOJgu+lUfp|_?!9*(JfM5OrhD(d zLj>5bujy;g=|0_eyYAbk`>^~__v`*U?$Q0kdH+5g&;xp4FaI_ew(bw=!Q1rUH}v3l z^mToGFaLY>kiMaB>L2v5{!!o3w{=hluRDC54qpEVJg0*%CG{PB=ivACh#tBAX*i-s z4nCZr?qqLpo&s;d?stln(t+hkmT@=?OitM^B*N6L@>#0X^}co-kRV z{+_@*%>Opnt|xw=Cn(7$_2l>T;Cqc$&lKbeNL6Xl~O}dTNiJ`jVb{Ku(W@a#(Fds)L5^^`ZU(Bu>p+@YHUbj!x{@|Y(!%bjYTyU z(^y<%35}&Ro~iLHjXN~%)OfbWT^i5Pc&^6tG@h^V0*x1Hyh!72jTdXYMC0Weuh4j< z#;Y`5t??R-*J`{@606@fMA@YTTo7uf}~EZ_~J6;{lDgYrI3_of_}b zc(=xTG#=D=ug3c{-mmcijSp))r124rk7_)k@uir3uyuYcx@-i8@WxYob9DjhblEM6)JZG|{RF zk0!jD@M)q=6Mju}XrfaSU7G0DM2{wdn&{O;pCF`$V-O$=#bSQ8=fhQh?CCc>JC zh&K@?Vw#AHn-tCJ7sdKTseaL_UwHJ3ZvCQ9zv$PbTa(3_EYW1CCd)KguE`2bR%)_J zlhvB6(PXVA>oi%f$p%d}YO-0AEt+iAq(_rpP5Lw$&}6$NJ2cs;$u3QHYqCd^K~45* zvQLvFF|5guCc~PHXfmqFm?qq zREMTIHPxl5ZcX)QDyXSmP4#K2UsD5`8r0N~rbabgs_8OKmutFG(^ZOGx?e+ z&`gnLiZxTBnNrP^X{KB=c&OA&m1e3nQ=^$$&D3e8UNa4vY1B-UW|}qAqM26Bcr@eF zOq*u>nh9v8T{9ixrZ>~2nQqPWXeOwcUd{Arre8Ayniy=`?Qm0oUdL^L~1v*ir6Qw#) zrV|x9QKJ)$I?<#P&Ek!m6QeqrrIQYwEY-;}oh;YM3Z1Oh$r_!k*U1K*Y}Cmnoov?0 z7M=9!q)#W?bkeVr0iA5u$u6Dj*2x~74C-XBPWI_!zfKP5Ey6ZhIBHllM$Wr zzvj@X;ZuHNbQ>dRj1iqmV!U3;|7)Yh2pc1g@p_|PFXP`Fl)aYH>*f5Lg9`qQtmNOw zs@EdMz*cRHT4U6`Hf#)R^~Px6-ae& zW7cJi9Ao4fBhMK5#wfrzV@{kgC(f7?XG)DxB@<=)tn-Zy6HohWHh=hOXJC&ptT#IN zHz{_Q6fsO(9VTInWjoRNWit($)n zuG_@cZDQ*-v2`1--NtLT@!D;I5YE=nLjbPA9!PEHg7f?i|ufx z$Ye=Ir6K-J92h1Rgo%D=*>2}#$fSd@>~2RsWa1n$S;MeHFDu2-vLnh4+5Sz2xj=@E zCk*@0#64`ZSr*2JNn+SUI&8Zx*AVvQBVrpiu??HxA^uGa7#D@@YGMeP7($eH#Dv3Q z0@-4NT?JgV8#tFeTYzOYk1S?i^mwM+c(eoAX@-_VlH3UYCi5dEF${CU2y@D$iDANx zn50K+o9)A#r+JQ=P@~2f!5f0G=BeYl)ij+c+@%wad-j38uW z*^VvBzX=;Pelh+i@N15Y592qAZ8np(i&1PFvt>q<*v7KG)Y#259KtrrHJimh(PIRS z(Q6x|F`ISKF!5%Q0tPk*|Hh|dHk;B#;1grn$>hx(*&a<0hp{>Nf1}+PB;zzbohBJ4 zmnsG!F#afvcN5TQ0&+f^`yhtT7=B|APqv8%tF*4^}CMDAdjA48lK^%zX({_Tk-WbdN%(agP#2bCf78BQUDAS^i zec6j?Zp%ue`Ldn7+O{0c<`};iCRv0DVgx>nAY{(+89P1f%V%sob4+SEZwwe?5W@~j z#3t;;__@zvN8g-fSK{L!*QA+iyklInFCP&!*Cd+DJ%;DXpBfl9CQk(>PX#7V1twaIKME7l1M7;Mq8`Er3vnid< z)M=+qXX|v1PUq@$o=z9(v|Fc(b-GNaYjxVA(``B((CJQ{4(fEDP7mnxkWNQ+I;PV} zoypXhES+)aj7vNnc{ZNOB|#pKXYzHXP-lw7Q<5wEOoiTRt9q*~j;LN1HOr!LS$LL( zcUg4et$W$pw=4#58P;0?&szcF2zZx8pDo%8^;Ub4-s-4+tAmprHOr!LS#&Rp!DSJ) zMUUsL9(?wAZ4u1VTS4Lo5=U>P-s&p=j{7(s;M`Cqk;H%z);cjbhdtvcLgO$OaeW zKrZA#J`_M96oDIxp#(~y49cMrs-PZB+)dC7t>6JK_@Es+p&NQ22))n;gD|2w7x{9L zzKir+HBbw6KwK{3auJt{xLj@E2lR0fud4%y+tmfc?;?H|@wrkz2YqwUCkK6U&?krd=a60w@#PR#4sqoWM-Fl15JwJi8pCt>O&+yT@{m@*7gCt=DlOgVraZ%x3vFjEOnHVW&oJc~<~j(ELKq?t z1#}L_fiemwA*Hzp`bW?|g8mWokDz}9{UhifLH`K)N6M6RKZ5=d^pBu_1pOoEA3^^J`bW?|g8mWokDz}9{dwhj zE`t6M^yk&}xd{44&_9Cy5%iCse+2y_=pRA<2>M6RKZ5=d^pBu_1pOoE&u6;kBIqAM z{|Nd=(4SAr%tg>YivCgbkD`AR{iEm~MgJ)JN6|lu{!#RgqJI?qqv#(+|0w!L(Laj* zQS^_Ze-!BCSqGuF6qv#n$Pi}{EQS^+WXB0i7=ov-ND0)WGGm4&3^o*is6g{Kp8AZ<+ddAQ* zhMqC>jG<=?J!9w@L(dp`#?Ui{o-y={p=S&|W9S(}&lq~f&@+afG4za~XAC`K=ov%L z7<$IgGe$kfsOK2<9HX9N)N_n_j-h`H{bT4KL;o21$Iw59{xS5Ap??hhW9T14{}}qm z&_9O$G4zk2e+>O&=pRG>82ZQ1KZgD>^pBx`4E^Ki8As1JddAT+j-GM!jH729J>%#Z zN6$EV@>rRRqh}mFH-VlB^h}^<0zDJxnLy73`X$gW zfqn_}OQ2r@{dlcmE#UjqFS z=$Amh1o|b=FM)mu^h=;0Pvbcr#d8UCN}x{yT@vVFo~23ho+R%{@}4B`N%Eegu9DPM z5*?E0kVJ(Its4Npwk~OA=j@=#oU2B)TN2vn2W?sk0YHL~j>l0^th?U*Li~AdZ3_AbbJg3kY9GoP{1B-a;RA0eLLMUm^Y~ zQL7q1HHCURKs$E>L_2a^_;jw5EU%gt2fvMmpr(b8N?Nk|D@4S&p~sdlFWoRAGJ$bnpgoubM1 zMGc))y))k~U891t;GDAbIK%+VJuP{J*k$ z@q#wqXfOY;Y%_UjG}vjFJT@8ZQsZi|9qei~y3iz?&E}NRo6B?gz>cl? zoGEov;@G%^`<88Fg-ZtgOgb%w<**kWxSDJ?(+-#G#^j6(&dE)N_GB8SE&g;3{6E{6 zliQ2a2Wn~8`!ijzBvlRFCcCzFMC=zvZzNs}(wzF40e73 zy5+c+<5odYv8VK019y!E&NtIgT8Z6*#vYD+xq#+vjX*pd_~Ui*bKR5#F=Na3xhY<3i07#4R`#fXvdB)iDjIZYzPtP-c zo@dNF&v<#B@$x)l8NagkQ1grt=NTK$Gd5%wB2)r8F_xQWoHozcYo2k| zJY%PM#y5~I$I5VIo*~6NUFmtcyz_K#=jqPQ(~X=@#vr9MU50dKx6(X7(>x~Aw1G4& zGtH%z&h7&IFs+kzxxfvT&<1|M4^ua3rf%5&2tgQCItTwb#G8YE`fBN%DyW7UAdMWt z=ir~InKV-{=^Vo3=0g!QLNl}i=kkav&k4Ce_`D_{y*%=p7gIVv2S_J>5QvA4Rl1-7 zd@uyVN*5Bg5cfjzQb^o|q+dwfg%Kd0B6KSv9=1uRi%6pg{fojt{6*;R&Vp>fpPP8x zWk5N&@#iMLZt~;q2hwvBFJHP%y9rlJ85EBwT|)j#@}L0Fv7`maUkPPcLOGQ5!2l4x zlzf!pw-lX9sf$wdEhR6dnbX|T&uE^Xww zjquzO(rx6ajcco|eIbY%Z4|E`Rs5*#C!2nX-j5eQ(YF(QJ4tnr-423u;@%lhx{GSy zPLl2+D?Lf2gA`8?HTbArx|gi>QkA_N_fa){#L-9CKC;(GoP88`AJx}S*7_;Ze&X!M ztsl36R^WJmOK5=V8z9U8MKDNFF!7uorl>>286v$9Du*~9iYq;W$|F&wN6F`?2Z&>o z@T25sl<=ce<0$#$u8`&qkY?s7&CF6dGPICKR7@_VnMg`AiIisYD9z+enu(b-6EbNg zV$w{Yq?tfTGf9$Wk|fQ9NScX^G?NwS7|}8rk!I2%&BQ{Q34=6)@ic?*Gz0831MD;d z=`@4rbb|P}L!=q-rWwel8Nj9)h^85QrWr)0Q>e)RF`c4#QxPpNh*-#M)IwHB3yv(P z)B?Sq1^NLCJOdY)h+c4IgONp046V=sQ7zq72`7HIto zd4#157V;XP7lwgz`GrscORJMzm|9WSfY#iJUc2qD>^zOb(jy*D?f@ zek=CY7U(jh<0bR)dUlm-!Gqdfs?$pVFA>q87uqVd;CE;tK-Sv{(1E>!1iCoajqE1z z9*U(0J$lGQ5dT3`r3Ehpsm|aiPz=2!*h@9`l8Ih2(M!fyzFFvV17Z4zhgP@HkG+3b z3q1Q52HLeSh@yijEeugWL--w{_=ZTEIrfEN!VIJ2F#d~XQWjF(kkn#k12jWKi&+KG4gD|*2`%zqU1Tt}NWW^)iJf(oMINY&u6*d# zVoswLnY>ucE!85I`XZP5VqOPy0deJpU<6{2(jv1Ei~0D=C$4<_G84g@0a|3HVUeb= z$jrhbGYX4^9Mc3Ai-?B@)1o`7#o}xrd@*Sk7lI!KwOB&_dH5`r;?AYNScac6^2O!9 zST>-=a`Icwx$;^qR-j`A=PCkPtRze&Zj~+22IRXE-7AT+atJ7cO7dMvIaQJ0D$=Xs zd^KrTmur#cuvm*OwM{@Abq*lSI`plh?CL3>dUS3eO&&_T`vC2LI}Ks6F$>5$52(c^ z^lic)52?jw{IuY|h4{JP7d@1(hqCk*L75hP`14`+k+-%kpln$1G}r$kPntzPy86i% zpIhM_3m^?YdH0i-0BHu?K%9XRATNP15VoCoc`7ZolU4_1)JdK?aqGliC;9Cpoi4(6 zQKsGGvzxHp)E(_+u@^sm`0t}k`$)f^_;}(h4v_x=@;OL(4dOP093o#sl*JHw(he5s zdoB(Wc9=X5qw_HJAEGQnltBo8Bg8X8*iq_rlzg!6v`F7{k+qsd`kITZy(}{KyU6_R zBJ;G1%$+VK$QN^_i_DEKG8ekY9OojBgvBKF!kpd`b52W{E+~R>7}64b@Fiw%ma_Vw zACi#Lk|Q4qAqaz7V%>5nn{&Cf;DZk60?y|WPi|aGdBo2|-VzgdOL_RCT`c9bXo(rO zC1%`~==3cy%eKU|zf?fn1!d3#13(-F2`v@+A)uuq+>1!3XhchH^6IVtj*CgJ7=Ip4|rK&b9)wqE&p)<8qOZ>IO zTZ`UxxYv~ean=zh*Y;9fx0blMFV$mj@M@{C7|_27_a<~{CXQy}YVl~Pl``Nay+j9V z$wS^el&yz+`y5aLMCDtL9=&md=r@q8o`V#ZtORUW=G538w(+6Rl=l*-1*~0T#gAmtw zM+uZcrOq?Ka-La$^NefH|KA+l*J51fnYCM8dXr+TLe4qILIE%Wfo|C}uLs|p7qQ2` z57vQ`tViu8QMKlH_4n_;KD-0}__F`khhK+3e7O1W{`tcPln?KQKfZtSEO`9qzx((3 zvE$>1Kfd$sA3yzZr`w*Lzdjbe{PnTuzjOKj{`y$i{Oe=${jZO$|M5&c_a6Uv=fLyu z*T=4Z&i@`h*vgMxf2>`9oIU?Nd;Z+_{BigEd-weFd;VDae|ye6{;p1Op{{GjvU;B%Y^L+Ar_WbSn;`!?N=K1dV;rZ#Ac&45O&!T6^v+P;%ta|?6 zA8XyS;o0hU!Et=v**S0>iOk)^Jw%bX8h~_JpXw9?fH-Ax97i}KYtDX_Zd(6pY%WJ zf71V?-~X?VC;k5aeLU%Z(*LCYN&l06KckH&{ZIOz^gro;(*LC2uMfldvf=;k<4OOM zekak!ll~|DPx_zqKk4^--FVXP6V`ar?^D+BDQi6G_cP#l(*LCYN&l1nC;dG!vJ8BhA3^!s&cJn4VZ@AuQ;Z-FzO^gro;(*LagS^u;CXZ_Fm zpY=cMf7b73(DAJQS^u;CXZ=3;jc5JO`k(bb>-YQgc-H@{|5^XD{%8Hq`k(bb>wnh& ztlzJj<5~Z+{%8Gu4IR(=pY=cMf7bu3|5^XDe!sqsXZ?PS9nboo^*`%>*8i;kS^u;C zXZ_FmpY=cMf7bu3|5^XDexEzWv;JrOex?|HwjMsg4!>>>ziy8g{V)1o^uOr$>-g~N z_;}H8RyEA3hM&cUpT&pozs8IH7yU2#{d_X~dOu$Dzv%a~%6QTLqW?wzi~bk=FZy5f zzvzF_|Dyjz|BHS<+l&|eeytcU`u&_U{G2mh^uOqT(f^{~_ip1w|BLD7{V)1o^uOr$>(F@7|DxZ|Pvcd;?ap}B|Em90|EvC2{eJ%%ulis0 zzv_S0|Em90|EqpKdyQB9ulis0zv_S0?=#A<0~)XTeM2x__4|Er*aZ!{py9L2c-8-^ z|5d-A-^Q!{SN%3a<5mBw{#X64`d{_G>VMV$s{d8LpZCVA{#X64`h8zId>$IF`d{_G z>VMV$s{d90tNvI0uljvv8n60)>G#=b__=ZT7GwO<|4aWb{lE18(*H}pIo&X)8|HN5 zm;PV+f9dzF$M~iHm;PV+f9e0F-*=+pm;PV+f9e0F-_NDvm;PV+f9e0F|CfI6GRH6d zer6qhP8h%R|I+XGit$Uo&vnD+y5ZZE@k{?N{k~-x&KwUvQ;c8wf9e0F|Cjz>`hV&7 zv&Qf_aQxE$OTQh~@XgHlrT>@yU;5wlzv+L||EB*3`Gzrr-D0<4ymY{x|(^`rq`w>3`GzrvFX9pV7yg{x|(^`rq`w z>3`GzrvFXl4ZpXJH~nw=-}Jxff79=qsPU%X z&t2n9zt08ZP5-<8cm41B-}S%if7k!6-{*$$uK!*CyM8~fjd%U;`rq}x>wnk(uK!*C zyZ(3m@A}{Mzw3Y3|E}L>@Zp=VVTL)(FozlDc-Q}~-?wAKw`1d7|GR$Qk`3RIjd%U; z`rq~Y{660Gzw3Y3|E}N9j>FH6!?$VUUH`j&pL52${&)TF`rq~Y+&}!hG2ZpR>-Wvu z@XgzJ*YA7#;d}exv(fO~{jkX!HhIG)Z`kAwo4nyO)9`)%@O}QU$s4}YA2xZzCU4l} z4V%1SlQ(Sg#{cTK$s0C#!zORobJ=oHhIG)Z`kAw zo4jF@H@q(xHhIIhgTp3o*yIhHykV0!yh9i^dBgV@!zORohE3kE$s0C#!~2Ed zbKdZdVR*+dZ1RRp-mu9VzHJ;fdBZ!0VUss}792Kt!zORo`!zORoZ+M3^Z1sk%-taT`u+J48t51-A3@5hGsO~cRS!_VczR&Utq z4L_d`UqcUHLl2+dhOOSP)f={Y!&YzjYI@k}4d1H`TfJecH*EEWt=_QJ8@77m-}-I! zhId)RR&Utq4O_kOZ~eAJ49U4_m!qt2b=*hOOSP)f={Y!&Yzj`g_>w4O_io zt2b=*hR+V;-}?Wp|KIw3ju<{ijDPF5;Tv9>jsNJk;Tyg-A3k3U8@^$~H*EOEfArh% z4PU1Z8@^$~H+=pWK7R}wzTt)2u;Cj%iwxfx4jaDVGs*amep|j_%QtNKhWD4lmT%bd zjsNKXkA9z9hE3n_J>&2_bsZ2E>x->~T$HhsgUZ`kw= zo4#SwH*ETbP2aHT8@|^WHhsfq=wZ`0Z2E@JN5iIX*z^tG`3#%BVbeEk`i4#4@Y!kD z^bMQ7@mv3I{lE3w_6?t@hHc-l?Hjgz(hK=8_@f*MO+xU&& z`tAJ2Z~eA@%e8wL(f5YZ)*!&HfzhUz?Z2pE<6~pFlcwI4U z{)Wxp@O|v~t>5Nv{MPTgtKn6~u>Bjq_1piA-}-F;hwrq8@3h8m{kDMPxBlPyf9v;> zbl3$ByTD-=IP3z4@4ki?r^7CA*aZ%|z~Q^F;Wf$dnq>G~Iee}hc7ek#aM%S7yTI{Z z{dR%FcV@#baCn_E>;i{f;P4&V@EykRS#;P14&P-ApGn96`%G#VIP3z4UEr__9Cm@j zE^ycd4!gkNbN%i!@x{~!G}gTw2dVKX@X==WXsup1n9gX52W+rjZizy09w zN@)Di|3|+q;jkqfwuIx4{y+Nv=(j5zUK#PAN{t5!`5)v z8je5u?G1;$;jlLx_J+gWaM&9Td&A+o_hEB5{^Ir>gBUW(^`{`P$FeD!?ueE0nD{Pavb zQ_q5D(X-@P_N;hTJ!_tI&xU8yv*p?L?09xPd!BvIf#=Y3`7cuqZMo^#KI=hAcK zx%S+6Zap*4ooDX3_dIwWJ^%7Nd7eElo>$K=&ztAn^S_?|^Zeuax92~e-=6<^^ncR- zN&hGPpY*$8lTZ46r<+guKk5IZ-*;>=wTm@hKIymU%P0L-efgx{vM-wW@=3q7Uq0!#_{%5#zBkS%{g!|Er2mtC3&4ERZv~i7`hB;TPx`%M$tV5( zc0Bo{-}lY&eRC`XDd z=Cl6K`akRctpBtA&-y>>_dRy(3gf%%eAfS2|7ZQaXUu2)zH7{9{h#&w&M}|$+oa^P z{?Gco3yLjbKI{Li|FeF(#C+EOS^sDKpY?y%Z*Ca7!hF{MS^sDK27@sejB#K->;J6( zv;NQeP3-bnzwg&$Hy7i$7{|r_E%tA*riyJ&E-5n6brvErx6PTmRqseXp9o^&6?hE-jW^u|CV+`mM@hE|$Obdo3LA zzhYn#tFD-C#b7JGlZ|;*OsC?taV(=^!xPJ>SV6@KDh4UBeTt1!Y@cG#6oaN1a^-LR z)=RNjitlz~vlN@9{H_0Q{eSDXU5f2eY?orY6x*fPF2(o2v0uvH`njrbRpF}Q{Y1E` ze9_NUg{z9!+2N}4ML$;+t}0wrxTRzv%y>->tr|R`I=a zc&qSM;jQvTKXVoD2J=NfbCoapzv%y>pTEi%{a^GO=7hrvhZPPh99Ha|@MYsi+(mMU-W;`|3&{7{a^Hd(f>ui_b@T^iJ?!v>i??$tA4NU<2_}*>gT%RHGX_o zp0E19>i0@NzBiB8`uVE=tNyS0zv}<0-&iPL^?%jRgM|kR50RsUE0hD-UT-*_qC^z&um%koYCH~pMhzUlv_|C|1A`i-1o z#*lCNO&MaK9;2risE0+1@l%YSV*HeE`n_L>;d%_$F3q*P5(Fj+*-cr|EAw8 zBH#3X)BjEXH~ruAb8Ru8iUC#FwtUlXJ`u((j9bho!nnnIj2Kwun|>p!eAE9;|2O^L z^ncU;P5(Fj-}Hai|6Tug{onO}*Z*C=cOl{A!pX%@E8q2h*YEvE4DaLJb-X8u_t){R zB;WOObos9TyZ-O`nYwt_7p^Yf^?%oI>=ni?CLiJKV)T{o`oHV;_|W7VjSOL;nx`252!w2~dz9`hV#6UNS%Q|Iq(KzjyZm4FVeEhyEY> zf9U_A-*_$FVdjVaANsw^%n$trZTX?!d(HgN|3m)|{Xg`>3i+WQScn~CYyx8Z7Q2Ah z1>}eRANtK=@4(Q zke~X0>L(uZQ@@d2Y!32MKmCxO`ps|hQ@`h$xr=1_5alWQ@=e!e(L|J|EK<+`hV*GssE?`pZb65|EZtAi1)$q zJ~%)16BzMcI6w817$Gs@on4Fr<6UvQE6z{-KlOW$7k_*D{M7$bzx_l=jZE}U^b;E) zHZsva(NAu~z%Ud2-Zy8Wf1-b)f1-b)-#h3`^iT9p^xI=(qJN@)qJN@)qTfg{6aC&- zXQF?if1-b)f1=-jF%$h0{S*BY{S*BY{S*BY{dOK9NJ5ar-XjzJ-hF4H-}~YwWOzBE()Q~gu@Q~gu@Q~gGnnd&ECLcoN82?66urANQ__nGRS z>YwT-V?xG+j0qW&ss5?{ss5?{ss5?{seVJxO!ZIoPxTvp#^^Is{kAcg>NgR~RR4m0 zJK-$oU(oN}Zj3`?92yg{Ea+d*zo36X|APJn{R{dR^e^aN(7&L6LH~k&qtYzsH!RJ9 z{ssL8rdiOxpx-Vb3;GxIFX&&;zo6e%Aq)B!^e^aN(7&L6LBBmu7W6OZU(mmxe?k9( z{ssLD`WN)uJ7+<^%|jOS8?|OZ|AKzw)-32Zi_3z3L)R?oH+IdU{zd&puQ7VfqW(qw zi~0><<9&A)^)KpQ)W4{IQNMXzjALV85&Mc5$;R9+i~1M!FY33yj^S(;^&8K|UE(b2 zU(~;-e^LLUe%p*J>R;5qsDDwv* z{zd(Z`u*+9V+R%UzbxuE*3F`RgWW9YU(#>58`HqpN@Yp^l75@1Ea|tK%94KDsVwPV z(!ZpCN&k}mCH+hKm-H{`cXK&Q`j_-C>0i>nq<=~OlKv(AOZu1en<8dO|C0VC{Y(1o zv$CXrN&k|5Q^hRlU(&y%-+m=a`j_-C>0i=s&y^+pOZu1e8zE;&|C0VC{Y(0n^e^eR zeaVvkCH>3#m-R2}H(HKKWK1HntbbX*5p(QhVkeVj{kAe$*1xQOS^u*BW&O+gm-R2} zx3$l*{$>5k`t9zstbbX*olTbY+n&WJI?MW(^)Kr;kd7&4jHI)y-ySE+`j_=D>o=Uv zvi@cL%ld6~vaH`;C(HVcsk5wqS^u*BW&O+g?KHBie_8*s{$>5k`j_>u=wH#lqJKsI zivAV-c5zwJZI>bHZ+s{U2| ztNK^x>H{#E^}`d9U@>R;8r zs()4gs{U2|hVQXQ%&LBFBdhvX^{?t*)xWCWRxzvkSM{&zU)8^=--I`-`d9U@>R;8r zs()3#X>V5bujyaYzovgp|C)Zg$E@jJ)4!(Q1~O~<*Yq3fW=;Q^{x$t;`q%WY>0i^o zrhiTUn*KHYwv$=YZ$_Lo{cHNy^c(YMP5+wyHT{PDS<}C!e@*|I{x$t;`q%WY>0i^o zrhiTUn*KHYYx>vpujyaYzovgp|C;_a{cHLeovi8Sbh4%&Ajq2jHT~=Q*Y&UKU)OKu zoOS){`q%ZZ>tEMz|Cx3D>-yLAuj^mezpj5>|GNHl{ptEMzXPR~W>-yLAuj^mezpj5>zfE4&^{?x<&C9xe`@F2{ zU)K*vWL^Kd{&oH9`q%Z_?`2*8y8dfhAAsee=drv6R+oBB8PZ|dLFzo~yy|EB&;{hRtX^>6Cm)W4~JQ~#!Z z7$xTM+0?(O-v&6F`Zx7&>gTSqsee=drhW!1oBB8PZ|b)f&ZhoN{hRtX^>6Cm)NgX1 zP5qntH}!Ao-_*aUe^dXa{!RUx`t9EF_C*PJc=Tl%;3Z|O$@vZWsj$d>*s{agCM zoNVde(!ZsDOaGSsE&cdFw)Ai5-_pOOe@p+C{w@7m`fUuerGHC5YLG4cTlzttZ0X<9 z5B$UqF)+PDt$$np zw*GDX+xoZlZ|mRIzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAohE2a(9O{%!r+ z`nUCO>)+PDt$$npw*GDX+xoZlZ|mRIzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K& zxApJn-_gILe@Fk0{vG``z1h*fqkl*Lj{Y6}JNkF@@95vrZ^xb;{r10MI4?tAAJjuKr#9yZU$a@9N*xzpH;&zfE*@_3!H6 z)xWEMSO2d5UH!ZIclGb;x1A247W?V!>bIfJuKr#9yZU$a@9N*xzpH;&KSYyV{VYLt z_1oNLSO2d5UH$gAVGOdXe^>vW{yqJB`uFtj>EF|DqnkbbHrv_Lzo&ms|DOIm{d@ZN z^zZ2hcCx2`Pye3&J^g$7_w?`S-_yURe@{QylRf==`uFtj>EF}8r+-iXp8h@kd;0hE z@9DQ0&z}B0{d@ZN^fMCK)4!*GPye3&J^j#7_Vn8p$F?}O#j!2Up8h@kd;0hEGp=DQ zvaf$%|Gxfx{rmd&^@B&**T1iSU;n=Tef|6T_x11V-`Bsde_#K;{(b%X`uFwk>)+SE zuYX_vzW#mv`}+6w@9W>!zpsB^zuj}}p0lriU;n;-<|B5{+1J0Xe_#K;{(b!{NcQ!^ zQQ6nOuYX@ZACi6jw$s_y4^3rX|Gxfx{rmd&_3!K7*AG;MC&_{S1N{g35A+}CKhO_b z1-8n8{sa97`VaKm@8>}Of&K&i2l@~6ALu{O4^alt%7Oj^{RjGCtsLk-(9f@gU&(=f zfGY?3?YwiK|3LqNep}@*0y)rsp#MPsf&K&i2l@~61DZL|Zy%lm{RjFF^dIOy(0`!+ zK>vaM1N{g35A+}CKhS@u|4{#-{zLtT`VaLV>Oa(PbDl%}hx!loAL>8U51Zvs|Dk@H z^c?Dk&vK~$P`{0O4)q`EKh%Gy|4{#-{zLut>p9easQ*yEEqf02AL>8Uf2jXZ|Dk>x z_Z;eHC32|$Q2(KRJMJ9nKh%Gy|4{#-{zLtT`VaLV>gOwRs2_;)_doIIKh%Gy|42Wp zo+JH7`j7NObKx;^r2k0&k^Uq7j8TsCAL&2Rf299N|B?P9{YUzb^dIRz(huI{NdJ-k zBmGDEkMtkuKhl4s|49Fl{v-WI`j7M<=|9rXGv!GCk^Uq7NBWQSAL&2Rf299N|B?P9 z{YUzb^dIRz(to7?NdJ+3wj@XTkMtkuKhl4s|5*RA{$u^e`j7P=>p#|itp8a5vHoNI zoI#HDAL|E#bFBYZ|FQmK{m1%`^&jg$)_<)3SpTv9WBrUmj`bhwKh}S&pIHd=5@svM z`j7P=>p#|itp8a5vHoNI$NG=;AL~EXf2{vl|FQmK{oF$!|H|B3z+{U`cQ z^q=TI(GOdOyU2Oa+gs{d5~ zss2;_r}|IzpXxu=f2yBH%Bg;~E~olW^`GkJM{}zGRR5{|Q~jsOa-bIOSCTss2;_r}|IzpXxu=f2#jf|Ec~{{ipg*^`GiL)qkr0RR5{| zQ~jsu1YyuK!&Bx&Cwg=laj}pX+BQbFTkf|GEBi{pb46^`GlM*MF}6T>rWLbN!rJ z&h?+`KiAJ2LLjQ&S3;h@RFZ5sNztDf7|3d$T{tNvV z`Y-ff=x1GXp`Ulnh5ifu7y2*tbA#c$bE*GQ|E2y*{g?VL^@{!9Ir`Y-ig>c7-~ssB>{rT$C(u0!Nf|E2y*{g?VL^H(ztVrD|4RRr{ww{QeXjIh>A%v?-sMXFmHsRJSNd7JT*xD&t^Zp8wf<}U*ZQyZ zU+ZU_bFKec|F!;W{nz@h^yuk~N+zt(@P|62dG{%if$`mgn0 z>%Z2|sODP#wf<}U*ZQyZU+cftf35#o|F!;W{nz@h^>c~2)_<-4TK~2FYyH>yuk~N+ zzt(@P|62dGevUEM`kC0==y%&7H~Me%-{`;5f203K|Be0|{Wtn=^xx>e(SM`=M*ofe z8~tu0Z}i{jcRwjN`fv2#=)ci_qyI+#js6?`H~Me%-{`;5f1}_1?cC_U(SM_#RnM*d zTm85CZ}s2mztw-M|5pF4{#*UG`fv5$>c7>0tN&L2t^Qm6xB74O-|D~Bf2;pi|E>O8 z{kQsW_225h)qku1R{yR3Tm85CZ}s2mchM!c`fv5$>Ss=KtN&L2t^Qm6xB74O-|D~B zf2*Ij&#nGj{kQsW_224e@N=vGR{yR3Tm85CXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQ zXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQ zXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQXZmOQ z@ATj4ztexG|4#p%{yY75`tS7L>A%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{dfBB z^xx^f(|@P`PXC?$JNA%x|r~gj>o&G!hclz)2 z-|4^8f2aRW|DFCj{dfAA_PDi>JN!0hN>vvHo zbN%jrXRhB3@XYnQ1D?75x&FC+20C;7bNzGubNzGubNz0cWv+j&f3AP7f3AP7-wlYk zoszl!x&FC+mO69&bN#LvWv+j&f3AP7f3AP7pSR9j|6D(J9kriPd;RzN@Acp7zt?}S|6c#S{(JrR`kmm96a2Z?f3N>u|GoZu z{Va9v^}7L+d;RzN@AbPOkbC_+c<%Mz>%Z53um4{Ez5aXs?#AR^|GoZu{jN6UUjM!R zd;RzN@Acp7zt?}SpD)k7{(JrR`tSAM>%Z53uiyQd-0OFPCinX9_228i*MG17UcdVW zdC>o$|3UwQewU*1p#MStgZ>BoZXo1A|AYPq{SW%x?#P4w2mKHFAN2d%6XZevgMQbh z@}U1g|AYPq{SW%xxXXincX9Hd|3UwQ{s;XJ`XBT^=zq}vp#MStgZ>Bo5BeYUyQh-} z{SW#d^grl#UndXxAM`)yf6)J+|3SaIJ8^d>5BeYUKj?qZ@AgO@^*`!&87q(aAN9LO zl1KfI`XBW_>UXmzkNO|=Kk9$f|ET{_|D*m#{qFhXQU9ageQNPPwdDQ=?|55*=ewV!RsQ*#_ zqy9(zE_>xs|D*m#{g3(|^*`!=)X)Ei{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!L}-&5D4pZ^d4AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%|6l&U{D1lX^8e-k%m0`EFaKZuzx;ps|MLIk|I7cE|1bYv{=fWx z`Tz3&<^Rk7m;W#SU;e-RfBFCN|KLqo{;B?{e*VAwf2aES|4#K!^-uLr^-uLr^-uLr^-uLr^-uLr z^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr z^-uLr^-uLr^-uLr^-uLr^-uLr^)KjO(7&L6LH~mO1^o;97xXXa=l{$9m;djA{ssLD z`WN)`|KI^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(E zX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?P&h*dp&-BmqI|EQ>06Nn@(?8Qc(?8Qc(?8Qc(?8Qc(?8Qc(?8Qc(?8Qc(?8Qc z)9(yGodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(E zX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8?Mq|4#p%{yY870Mr?PIs;H= z0D7nYPXC?$JNA%xI*FV=k*FV=k*FV=k*FV=k z*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k*FV=k z*FV=k*FV=k*FV=k*FV?q3_zU$=v@C?|6KoEzcT=J2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todM{*{(JrR`tSAM>%Z53um4{Ez5aXs&H&UIfZprB*MG17 zUcWN{bq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D{Tz5aXs_xkVk z-|N5Ef3N>u|GoZu{rCF+zbfuzH?pjNqA1@Eq6Wo#13(A{ATrZUHdN3QBM{<)(Am=c z1eSqpvmW{%`XBlq`XBlq`XBlq`XBlq`XBlq`XBlq`XBlq`XBlq z`XBlq`XBl+0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0 zFaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{# z48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b z127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A z01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j z0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3 zz%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8e zFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~< z3z%T&*{ljVN(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifp-|D9Uj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H-hMx60HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQt5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rLP zAL^$8L<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT{YXCzAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLTeXXAc5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W^b`FwfM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks10Z{g?hr|D~S> z5Dg$2Ks10Z{g?hr|E2%Zf9b#UU-~com;OutrT@}@>A&<}`Y-*L{!9O*|I&Zyzw}@F zFa4MPOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#UU-~com;OutrT@}@>A&<}`fvTW{#*a8 z|JHx&zxChxZ~eFaTmP;9)_?22_22q${kQ&G|E>Slf9t>X-}-O;xBgrIt^d}4>%aBi z`e^{s0HOgz1BeC?4ImmoG=OLT(Ez&j-}-3)(Ey?WL<8v7f9t>X-}-O;xBgrIt^d}4 z>%aBi`fvTW{#*a8p9T;OAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$=x6$A0MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2KtI<{1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4dDFLLZRW;Z^Q2l)_DwnFqnNYTn5+i zhQGZHe>Zq(H@tn!na}f^AOF(7m$w_g`oA0Qc&z_?)c+pye|P@){X2j9{+(~#`RXq| z_3a`5eZ!r%NBZxbw@3Qwuit&jzixc@Dc{}kDeo+Cop-K(4Ib;AtI6}u%hGx01payF zc<_1WklT6ZG{|}HeZYC|$?v@Pmw(A)={(uGa-Qt}IL{`m&$F4x^K5G7JiAsn&;Iz%GZWxE+lhak z?F&E8Hh36(@fXYM=fzs3;m-yugNCTG~Zo?)<5v`kJtZ^G_tXctMja#ju#%-vvhGVR;YKhxWLy6lU zGcYg^q=Xp0(cfuKTlY@B0pu;wMF><^3inBT@K!TstXtk}mDQ4=&xLxJeOd3;)BJFloZS zyl42Uzulkzo5aWew#z>$eo{0Zf4%?BUp{g_3Xq$(cW2HckGM%u1bAc5u5stjA|BVx zihjcMar~qSzuEW3_TT(<&d#6R_s2`OR>Y`sAbJf1zSl}`q)6o-pnrv)E@Dvntw;XV zN>39B4v742qUf98BPdSh=4ph5)ybk?0h?h1=ojG+ErcsaroW_iNtzE5+ELZ%4w0wCquJc>U?^kb1V>qMS=MP%(eQSvLJM%F(WCF@?($cAOn@@L;fOXlm* z^89nrvhkY;`SU=8v{SDyJQXRMKb$1Lo*XGzGD%*1VUle5Y=Zp7n2C0fygX`fC?vvguuPP^>V$`j=AZB2xf59*}s zwoa-hideb#Dg2M1{}Hl$kxr`717C=2Q*pwnCq%!Iy3y|qZF(6!@U>2YuV^KUxLRqqZO9$48%9*vy~OingcMED z$W7e)@V|@HO&5I(JV19-M}ZF`BuKcx1da6JwvxwE?g`0!x%$>z87W(*{g>EfS~LEp zYK?f1PS6i&s2kGwL@WRH+^gDp{H79b_`BU&!<1+#x}%YA9xv6FXHl=$G%}rQrjUk= z;Etor2SpB&Cj(MntC7GmjRYUl$dQv8Da7ATUd)LaIdwEi{^9sza@v?ApS+MHXKp3Q zKk9!bHKbQTxO4QQf4cf}?)|weqt4F{%#wdTK20tJej%TIkt!Fz{)KdH(nto^T85+L z6!-o2fnUhbr*q|t&;LL!e>YbatGwNgmJ;F%d>0{eaL*^4>1>oZ$wxK!4It;^e}FJ6 zkQH37Qf+-dO5&0F!6-4Dj2f?xU__)8ZK8~M(W1BG-xnpNOQOXzFItSNM5>9S1-n~K zJY_ecr92odb@)}CjFuy$)qsARzSNQ(C7tw1GwwDpuZofl+_Mezq`4Wn6YRNBl7svQ z@)-Dze({t}PNSQ#@lTOQGDTe6-$Fm}AiL6|#6-NMwIZ#_qKhw9TN1T@x)q z+|~5WqGK94M;yV}Ncu>m81OTdXr#=mkwc_;j(X@KuSe(`&ylA;gNyj-N9iAgD>7)L zxIsf28aYFprEMB9Ws8*256a2c0pd`7{|J6XaUvH9Q?*{C^9J?DwY$V^o=3k)CB7_; zRN&XLg#Pjo{e}C62w#Jr>Wf#IsQ;XkIop`TO!B|3SGvKX%E!Lj5GEIUnnfI)*nfdeKF8lY zMJIQuPs0L@bd51zFs9g7>EtGUX2vAlOl%qbF7dSB)>99LWRWiPvIQdbl%b^-KjPMt zelgNKikm#y4I&S?uWO=C%BcrqI_YDFM%qiwS^9};tP|1Apng7!mH^{y40?4r;hv9@ zUth!XQQQlL4pKY={vS)DWxq1P{O7B5>1T_PvvIFLEtTYE{)6*iG30q}IkR#;@qU?4t_pQcnC9>-*_Y&_m+#A3|JMBciK|Ypq-5g0? z?$ai%I>}LC@H?`ecD{ps;r=b**5If5K{4(_5U|nDJ{7r2nM`{{Mvw+CHb}kwtMcp9 z$R_Towmj-jX71U7yNrBT(d(;4vT@%atg`(<^s{gZ&O`AU;vxkH-R+RgZ`XWiZeyd5#BUJ+btk%%4nb;6_HjR zX#4EUKZ(AM}Lf81jf4jOv<;_=S1@MXl)Qzi;Db#7^Ep#^9a`fzRS4 z@D$~dND00iDPhw3Ul4ddo_UV@1N7xI!mw@#9!GwjAi*~u`308|<{Iq|#ZUy7xYvkO zYk;-HUv6Oz8KVBqM$1avTVSI~_Zf|RM_BzZ^>st!S8BW*Bkw`n_f;K6%YMS`hH}an zc#`!3dOmzb{?+~e6RFQ8>@NC^hCJ**r|;?4i!3EvGIiBK7_~N8h`t22<8Oj9tc!k4 znnlUf-$CZ@Y0->@jA!&G(>&_0I$B1_-`jiXOG8mohAbat?a4h=%-u(ZfcPE|W+CzY zzsTw);#oyJpzQcOX%rF9CFB(_*Rnn+i59gc2)skPUyYQm8|0mOeu;ShE4&JS0~)aG zik9o#7wFYVYgV*`(O<{^9x|HoJOLh}N8o3+vu8jaNsW?8N@j{U$XC%m>V$Pbk%sjN z_0*!ukMt`!h4q15BR%MW7swB5JOk~fKTBOA)%dR;qwNMY(ny#i%opWLME;KWRUcDx zYUu*%3qNB4dk*qbRKmUJdNsE&UZmhRKK_S9-r~L@h=Z~?>XUYEq0iS5_uu2Ug8DS4 zYGf6%b1P#7{>qNdAnl17c^Chs#8--68R6%mFN7A_MjuCd^a0Z%>T^A9Hto^eyprov zxbG<8?VG6g&*{hCX{2=(dkn7qXwQb*I{APwY$NnX$lEvcn`U&wY3v_cx1krIZ{!~1 zS>1RaDJ8A#xK$k&lg1Ww(+-i{$o(*#e5$*R2^6{GR;_ejRp^FS)l4J@7H}`e&>^z0}t`(ef|&8)*N)(OtTRl%h=D9bRB3E$#Klq1{*Ht{95T3jmsB`_0$RBVm71Z2QyoNrDRC7>i zwnpwzS4LzQ8Dvix_=f(XnIYu|H4@D@S(U{cvqK{fxo#MqDTc%pX(SCpc8VM^q=;!% z3gxHW_AobOrij6kB2SW5aU0hOd*s4QnUCA#N?~r|`Z1uMioLi?(92Wg9`UJl*6%2D zIoBWJR&(k^$`{Spt&gL<#@NTBn}<@wd~l|iX}bij+n1zB%Z{1SLV6!@zmc%*NHhMw zBwai48*^#v*(uVMK2y3#^L?&gr#y+=-w%Oq+PiaxY{tKf@@z#m;-5nKlVBmt1$`go zhoafkBWpGz^|BQI(sFExK2e4Zm*3FH72>WKqu#DD&Xb=6!c+}&4|QJ67-Ar-UeSaO z-;G*ulWQO5FGjno^s7UV`yXDz{Ru#0P)MX0&#Hg;=K<0AI?GW+mk&L@bY^}fhl zuKywWcH-Lw3vsLU*HWYzyZHszR;oBQasRj6|CvUf#!s!Ew&4HA=xdRGs{FWbJNia2 z-C;dH1ZrJhjospxPy9#jWAm)>6 zyv4Y%i}Fxcf6hJmgexGvb?W}dsmtZqBz}buc$v6TMHbPXYW`Jge)DpjJcD~Fv=F9c zEAethP=+m;H|fNI-z&sbyf02#8bq4y_#?aG2*>>R1=m_!_(MRY$@pOyjFx{?I@kXj zaxK;W*GPN0$ZJS_kTA4k;5jwF&lKaRMm|>IU&8%7?TKw0#%O2a%u#hi*j!{_!b~}G zhcSWmP>}V|7UC_(1`Lr=vJrhZY=`|&OgrdjbB*%r!GOLJ|0-1vA>OWYf`V#w&;mC2j znx^Xcs#dOZy^K0BWRe$nk$c1V{Ux$`d9?fq;mS@&ODpcTS+m^ZT0QwUU5l1{@@!!K z)sg);rt8wA&oRvLusstvq2{(%5~xa2wT6qb!_Eog_n{+aP z^!|q`50x7HDwe(*E9I-0YqHq)&?nx85*O=wFx7GfXwXRnd5?xk@cvVr+tMD*?9YNn z==-fvQnHuv62C=&stF!R-k9VuV58C19^mPCcml3dh$PK zHs@?vI$6)P6!ht^dlq%^n#j>zaq_Nc8y=q_0U~Hrx=tHDCoFIpYVKBkRJDi zIJ4uL8P-y#D$U)^6|r0rW%4P5SEG ztcX6c8+VYocRw;83ZaZOzLhw}_h$pFf7usg5nn0m05jM1*sx)YdRFn0Ps4ihip>YV zqz;f)!nA+`g3P~18CPnEFGxNEvD8%`_mW02<427J-A){X#L<9kW6mJ#OQfgTN`Dqy z_!}5c@GssQ!8si5+|78R;$+^n5w=DX!T7;h)3>xAWmWk)%A9(MJY0q=Fus3h-L2NC z>I}`n{i?3@tnbu5Ex3n1bd)$IPvkuH=Ms1+o_#+3;;}f!`APH-&av1S1+FFm?qx9& z{C2{4|Jp_#^qZJd;rcA{^$z`qwhE&Mz961ktjU$X;7^)Co+|}8QwY9D{Ci0IHP)A0 ze-&FhdW^B<;}xRE-VH#MP$<}~|w z?!QT#WwRgk$Ga+h)?n%xO|V<&_vDu~LnC!lGLiFWdZy;|2g=pxJu{;(S&uVdSUK2Fu|#Et9@|-{v}&EYehaJJe4wD&KhO! zw2kowXH@8=*Vvz`>$Iinzh-qkS!5%AR;5$FTh#R|uJ3!4e$5)%ov_NbR&uS5aO3tz zyp2EHzXAW2AMW3*?!Q6(-J$OipF{bp{KGb`tyb6fah*C{**zL=Q{d)75{16gS3g-r?C#wqZ{xm;ojo2 zjAO*_QuV0(Ro+NP|lcBU@`2bkwM>knL2^F8*m2g$QowO0?mi9culLC*dI zUn4(d{hPT`ipOX-o}C(QXk`m=ZwGy3x|9^KFCf2*sXz71Zy{5dfjM2z9HHv3D2Swv ziqsrKKhtjo=6cos6~r-~fA$weluMlnj$faLo=g2=%W6DO_ty|sjp?dyRiW2GKIx7> zqxmYGaxmU}%{{Ti&m5)BH;U*h#n)C!0%4ON1(tHpJo0!5xmvl&_gds1;ispZhE{0AL@C9wjxz7Q@)MSx2)lbQvhc+$7Pla(3$N0G4 z^eFGD9~)^u5B{nhPa(@Fd(j2%9U@E+QqidcWv}PB-UEI(12tXrf8scgK5l1;KZIXE z_;is%m0#6Qh)>Ob#Rl}x(q+Xg?01&3U&cz-^>u_>LOB|d<(z|zsQ8FO%`4;O*vB(1 zl_vL9ljb}0wVL4-@~$RAYRUJ}+7)t_>+ex6HI~#+*7JmYfSO+0P-z&#i{Z z&o#ybu7_2a8}cX}(o^Bj5$>Myr~WRYKl~w{%jjJvBc+r1^c4CDq-!Wrx`WKsf??KVzwWE&j!zr<}phx&Bp# zuyo`30D5l*>o(Q{tPzSxr*tpxE^w`cIbZef3>EHc>NRbpluL` zSYxne3A{>P0$TaAAL0wdlAj6K>J;KS&h<|2>t_7<1o<&i|EOF}!mDQ!f%h3FUci4A z{wj@FHEzJ05J9=Niku$L5B2tmMqWTa2N%@+k8%G5Z1hv|ui70ymYaID*5vJ*@iX93 z%B4?c-x#EP^oc+kYpx~4#oD-(^_Jx0hs!|CBW0=7*DmgRopQZF zdQT(Sm;M#u-UT%e?Lr1PYv;8V(k4HQMWx)Q_G$6>l^UOxf1<7RoC}nWMa%D0IOd`< zY~~~MTu^$u@?S^%*vMa?cS7CAk@6mMO6}vsPy5%r#GaTm%gEC^-=#?@_4B*dNHMMv z`Hi~&bFF;PP1tOa_id5Vaw|g0R9k#Oyv(2Gfe3M{aIa|PQ{)@)&tRVrA)h0y-$wAf zhP*}c>=Jj+*X&tmM2PE2{MpAjkrf9?zkp}!Z$wBbea-g~`;9#Ol6eluJ^jzq?(7wM zzo(D15*PLO5BRHRQzo?rT&9&A+<^$KRFJ1CuIJ;fnnpeuCw_}w$=3XQ-Sve|y4gFcIEG%* z$}9M(dxu^m4$@zW`|2B6Zuis!{x=|#S;{Gq2eRz60$c;Iy6L!MK97%MHfyPnla2mRiGjrT_K zzQ8)%Djc*vN4-)9EZ6ld$d9N)OBsD-iB5Xbb@DFxbRmyrv)9jL48q1f*vlCJ@;Ee> zXykY7?M%nD;2v2)W#;=YvuYSQeV${id#Nn4%Y1k)n zd@0V>VXBczWFskCab`}eT9 zr@7YsHT6oID!KXbTfkm0^Y zPN4U}$8ZMz8LXi1e$;P56R2PEbq_z@k&;T<`T=@K=k)xAE?LX?uSwlR=IPxZA ztVGC`KjGOtY=@n&8ycvq@R2=oyh$TDxc9^FkFiHb=EETEtqg9_HZwJ3L!_>7vGXm2*SZC_b)viFW{ebbJf8+Z?BpD}*=F5l4{M!OVR&=5af8 z5XLbL_eayU_Aj5%wv(on@oOpiB40{oeIUknd}5k#9vk^rDSvILrtIodnu^B{Yu^51 zs;2U)MRVkvztmKHe^c}KFI8ze2a}{rvqw%;=5Pj^Bi-wB#8tarQi%KN>zpAY=fZqg z2zu;DohK>#T#S1KG+DIrd(OkntD@zjRZ(IYoG#77)1~F)6Vl3azqU|{w69MQt7fv; zMyHD1K2v;pX$w}JC? zJN?VTJEM%P19|kX0&E58$KA;|(KW{Sa+0|Lx?R|&7aK+Pa9=&~HG&zOj0Y`97t#$@ z^iukkiFIW^dGjpcJRckOg0Da${yfh8Z<9}TFZ6&1n(?!MAKiM8vgPu=ANm<^ET?RY z4fZU`3g_@U4;SGQT!xU1^C;xVD^W6nybd=Z{9=@Zk$1t@{e;{@K7fZ%x@U%%7}IrW z*b2lz0{nOmqWbGp+|wZi=D=KNO=g~6rIU8Bf(`8JvG-i;9gMS*Wj^5+!eUqoD_|w8 zhTa>DZKERnW9%F6Q2*7`Klm6s{m>cW`5>GCCv=1BwupO(xdujJqa`#8o@NZ65G`wo za|3LI&9DWw!FJdQ;rF9uH!?@*W%TKCt?WlX06zT_l8NFHe}~M8(0n4!!i1Nb(FLslL%viMlhdF7BkXiVHmGo}ro2&-(&?^i$y9n?E$z zGUoK>u{FxwF`GQ9x-hVbUe8znMO*1#$7ad{?s1jR6!#|HuRzzOV-L{30Q*g)&9P%I z(g*%Tp66v!e)85%9<4|lZhHeZ$~wYfU_QncW4J#7ra}@-hZL9tz4WENeUJK>hcRIQ zw->kSV}AN~r-kvy&iI3LdKrJvUBvIMrSD}kehendT;iDz3t=%dC$r|E{gE+u=Xk|4!UFupdmU-AiAN5aaU^asYQe z7$YKOWESsRqla3t8D#kA6H<&cK?PJn4b(wBIGc3Rh%`eBSixPu`U4#3o~^7uHnC?w z?*TvKgb%zhz*y6|Nh@uXsh#quvf5mf%}W`P4m*4CR^~NzADja9?)Vv`9;u#J^kUC_ z&_9v(g`dV}bU$;gA>&knvn=!MTyWMmn{PM34UeI72tWw-(( za2;-fFKwcPkzwO>QJ9;wau;{$*AZf3b9N8Cxq$YA2k2H+Hp=)AUB~f=V~oB9cCf7? zPt22-xE9T|7)StjIpa}fluSkUFi!O2PC}m!{!zvw++N0!0b~k(b6_sahlQ{hmO|K= zEGv*9)`csPBi~2xG8*-k{Am5M8o$;%j788M$Ji9f7zOr;j9H9_4#vT?gxLV&_BWaL z&(Jr+7T5;8Qy2@Ukw?Z(4-Aa5F5nEqhxFfI{gKI7n#Gu!&De_U#_d|fm)5*GUZQwv_`QaZIs(iIXcL%gS^&pPdzk(8CsyIk2QH0 z=kCbf+w{K@`d=me4>{0){Ud$2{m4$@>x!fN)T{Fv{f~BcA>Gh3i2Yk4#Y&tGaDoS# zn7NU{kZPjpf+LB5dVCq?}Us-O?7Vh;>GrTNI&k*J38sQ&$|sL83UlZi?PAWIe?4(XPZtQ5XVE% zt;7Z(1`=Q@gj=5wA1`Kyj`Hpv?vWYHw{i5}T>39e$Im>flN96}FtVSXk8H-?EZ20h z5Z#I$w&$^5{*?NADoz~KT?gf~XHixNy}+|*!i_w}_bDI8%2M810L`U-@I(2ax^r1y3gZBa8l#P5(gp*VFzS#Lne< zJ{*EVXvWWy6)(l;Rzo~{hj=leSAe4=UOJA&(?;=PLymvvG?DtirmMIfete?TAnTwW zM$+P>5gB?ZR?Ns!&WcO}oJpfwp_O>rh^PH*yjVF%wQ)ddKS>(k7>ySr&n+B;b3zYz zzz?V34D_B%kUr=K4-9}8eBd8VkWSt^=vosmCpb`cBD=HW#YOqt&;zZTNPjEo6VBQ| z`jpR(-hp&@B7NfSK{_MjG%(O1Cu_%TH*9$1NcHTbHD7X~z1@xogCHh`Hu@J6JCIeRm5 z3p5|p$~NS7uu{%;uxC<6aFE9iXdR^d!(3ayek+wZF`c;)*@4@E+{wMWAqV!u0XPJO zPz=2#jKAfKzYW+|E$!b$`-ATw`w!@(KXhGV{S{*Ub)WI~2IKEA_BqJ->t*i$2HPT@ zk;met0{P>38(E*Z6)5Jq&l@9z1}N=OW}GGIWIfyCq6=s~CGB1`?nd zKMQeBMYl2+s{Kk5`gCxRmyQL@kzgZ_Dfl@#XPASW3-0yNG9S4RT)dCb4<1;Idnx#q zur656`N1023F++riKC4Zgyo z7@z0K^8or!<7+1Ne}VZIP7E^ta;=*%u0GZuZLB{ir`gNM zIS77MWIL2jpgwut>nI>^x#aO6dDTS8cEULA^i$++aJMlBAoqg{|NedSfjo^Iz?~1i zY{sP}^wVAR(=65>*#5ZvBduTqJ7Yjc12$629+rCwp%_e10aZ`~bTcS_=kM-p#{Cv0Gl5@z?bp@Qsu{J@!2$$e8v^G$F^3{Hl z_`!y54-)?!;)g4Q8G-9?6V9sj2l6i5gWi+aKlFoVn0gITucO%i81>vh`>(?O;RHCL z8(iRq9tbUqk_W^y!g~e}k-9&{79a+UM+?MfdrK0~r$Q3=dJAPbGCY}YOOSJ5F3g98 z5TgGqMvgqkyOBtBcB_Z5o<6&r_hRr{4bj-&T4boRP|m3IKYvR$;9d$Fp|qF&#`*ka z^ewOrw!==?4dGdDNeBiqI=dHmVPKkH-R7B2i~m1tVIs< ztxADZ;9mtbPzTMK1!94EbZg0BX@^F1GdR#Yz+QV;Y@o&;=c2&`AdP=lIi~%`u}qJKj>D|S0DxwVEh~p zIlhmZ!v1#(_CF2#pN;*)z##MTFyk1~ALRMqdi9Po_P-VThwe=5e-ZW%J>cVf$(&m- z{tQCR|IH;@nNIvEV5!C~%dtx+<+-jYmN~ofkzLyGv%?(189(K{qd+A48=cI>?LxY5 z>tsIqLRbv_``91sV%<={_=NOT_!%~lQYIs(OEd82q?w+Cy=BE6Z!q=2x(=$YGaOSzrp-C#+-AT zxo40$Xqfqt`LmOGv@0u`^EJ*w_i?_T$N7FD=ljW=?<0E`@O)qi^Y9|(;W*~qY0SIe zo5(y2>RW#g>H6UmoPp*%=D#)kE(E%@i~V;~v{aqlC+BfH?y&!^X8(PR{Wt!#!+CP_ zA=1ybYvt&DIj{LVyqkGiSb#QSyG2c>r6LHQszQtG{ta-HipRTv2IKE&~ZoHz5k z98D$ID}H8P+WkH6e_J#Y_->c)TiPZ{vv;DjSrVm{XEj!6A4?Q_-xRTRO%Vsrbvl|R zOV`*8>AaRK&KuL@gywN^X?`Ky1=GaHGe{qde?L)6|6!bbKsu!_@QwDXd_Rt^dk(u8 zq+Nz-r<1f3_!dQo9|ovrFPyN`t}fac*=3QeY0WPKlJZWX>3ta}EKv1(9N>{yNr2iepuz%q7fxSO|+@DXf5% zuo`-^8GrUN{+z_VVc*K?O zwZyjp+!rFmWr>iD=$>E%b4Y}2M&APdKH}~ojU}W7+we1wM#y&LPH3);kR0TGu!JHQ zzakjF7`s~Iq%;sK9iyCUjPaZ!m**Uy_Wz-0qJ_1!jJ(AAyx68;nD!dvc`$M0!yzby zVlY7kgpV^Ge2QJ5*MM^g&)ktN7tfTCji8<}`EIfAM)y=R7aU{E-^AEo&K!XBZDstw z9VcyLane2-C)R!J3-j0)7O?+CI&kmi-WITe1DwzU9`Hl&DEr?r_NYkDD)!&tUC;hI zm;E<%CbIucX8((H2HF1(vbP;(|BLLwZ7ztDQ^a!y&cS(TCO!*XM7IvHhrPf#K&w_R z;dV?TZ|r;RNZU#Fy_wWk7WDm$QlTqr1@E4V0hyHz&u7ZVhuR+`}egkj?3o9}>{5JIEX5or<0Wj=kg! zKYJc|Js2<3@k@a@Fc;>-LRbt-VSK-mNc|+U9>l#8R)dlE6V&)H@>qgwz#Y08Cr0Mt z(oXv7W1O?%w;8s;HrNh3A^dTI>_+CmemDU6a0s0HbW(^^XYcMj-mOA6K?SHYcn=KF z|Go5o-wo#95Z@4Ttp@6#9vZ<6EntP#T-N6Y)jELZgJ3&`-9QJr13Izw@$A zLf(Wh+=YAa03Jfv$XSmqLPDR>M}EaUkMaF3G6oW0Dmdw9NyzDt0&~DU#{3WS(bb-$ zABdzt@XExMKK?XUrTBRE3jZHzSt*kCh$PV%<}xeeU6InPJ#1Q+@3uVj8>y|5d1 z4*2p|cNDM=F)+U&TggWo`D!O$R?=`3dJn_TV;mafZVF%z%xmcWl>E-_325zlmoC z#BU}3_Onq^gYMhI{+~Q{EFzCc+vzB&$KQE}ca8QuIv;RnvmV6H46ZENem&z7wBWXa zFP(loo3>w0A6PJ_AANGl`A>(G2bD>sp0u%5<-!QO{$xUdn{)9Q?|ZC|9cZe{L|<2u*W-d^oN9Fbhd z&$lO8?sESQ#Wz^F5Oz}4H94~B*An@fjQ92I=gQo z^-OpV_YEwfUSWLyKg#&eobc2BpSI~`qVV`Dd=lq1C-o)in*0ESPyK(?Kw@}#`p#T zeGB-{lDCuO5jj9O^H8!xUoGF&kWZ8{wjpW%h&*O8@_k&|1&%ZT1qej{Y zr{4KBfZ7)x;CenB0u#^bO5b8WKrdEqsDLV{fjX#%R!fSs*?Go}wDQc|hP31EKspln zT>`?Gp#`kq04MZ-2YTap{y&Z9|Fe1i4+HFL)ZWI&L5=?;=dZc^MgW`u=Ng{>uj2WC zCeQ!VdH&Du2{?Jjz)w7<;0&AtH-0WSkM42t{sLS?zXX1s?fXh*inn^E3|RQxfO(w1 z@1-7?UzH6vqhBW66|kWH_+G;ZZsS$@74JMag4A7zIvnKv2il@b=KL9Hqpj@7 z4%`l;??ZkcgnoXV`)@+&WbFJ6e%k^4F7y_t^C$Lr;DG_~Qg+`R$`73l^uINnKhfu$ z$nI?V2GWhY2YHV;9>7D;tz#VmVcx+E-EOhM zj&dfJf<6c4g0d^s|K_86u&;jl=tA_x;K%J7;*0=$8$d3_Zw0Ib^9}kk(sG~uH~L!i z4X_c~2eD7a16$&wF=G7u54U~?^$x24Z6@3{FiHe#8`k9Lr5{I#=>_)V=(`~Y_QL_l zhgLiG@5TOIDt_uek@`=j{*lW5)$@--ge!z%FhK=WK@Id0wr>G8mWurcAK8?5n0CXy z{McD%0rj6n{by7E$nH$)Kb`ttLj7}J=<%6SM?53TW=cJ>5zNp6R&an5e6K`E57Lmw zJOSaWDdT%O4}N|aUw^#C**>~D13ZTu`H=5UkQd<+guD434S5Ad;5xV>dG`)^2Hfa3 zkzsIJBIPd9JB>MQ3iH4Y<}~Cz{2qX3B6Hqs=C83xd5BxLp1uPy(45Wu1qtZZP0U{h zIm<^+g7NulJ@eNp<}YL`*W0!dhWM>u13Pqp1EzCN3e17I@aX)V@0Yq*-{9`eWZ#;_ zJfF=xj~u}5&0`;1z&xMJzV$Tw)*$;>q;r^kE7FDAjqEvy-;HQlNIZ+d_%ZGF1#J@| z4E{^;8!xAZ^Y=3Bj`JKp<@VwBB2~StAdK10Z#yAZL$if<$dDVrf`9uO%9Y6(Kkm)Y zfm=P-w`EaIWGiKF+eP{J5f1r(mS4U9vxWP%!FJdQyCDbmLvJRgvgK|3h)cPY%u5jN7TnFQ8v2qg` zhP%*K{7Ka{dV?80(zSjqUd@fFyCZ^zs5FjtGQQ=f#wq4*+*K+wUUDW9B6j2FL{>t z4nBz$E9FyVw1W*CpxUCflKpS>5BuLX_LS^T?d(rGkPi0VNyIZ3=EFi*3`=1Jtc2b? zp1&9H{2l4x_XGw`v%e0qzef7U*#8q}7klXw;4ESP%f8peKHUvH;AH>1nt0a22G|Jh z1%!dk=$>587WT!<7W8f4uO;rSJpbOr^KbUS+wn8w-ih1|&FqhJko&=c-ae0g{gQY& zfIA;LNY8O0L2Rj%Gm+=tUdn%gaA2Ls_`jU-A3D$-@Spe3hq%8Gioy2=Hi7)<{Tl=A z4+AB%eY1H-=eG*oxjtTMkfqBQZ}=Y3 zzlgZVhj%t{BR#~e-t}prf7B7qJS|@8k&Q}+7HGcD`@eTsPoP_|`F3!iJHfGovK^$1 zxs(;zgJ1Yd+79W5Q*Z{(!Fjj{m*6s7ff2>m@p2s*QXi$UZ~kfj{4DEz{KIe;d}hNUpVcS$Rdp=GRrkd`|rXw2CA7M&oQ9lzHgNZ924nZLlgR*l~{?N<3BY=B^`mR7$K@HS_Ig4kg$VR0@3yi|14grDC3yHER1!T$F$|A3mKtzbj9Lq`MiRBe>(=AIt#fFDl588`>$ zp?8%1?|s$+mFyWym=n3ri}Zot#s2p;`(MJG7-O%3?8fc7!2TC{z|AiPxfL_Ww{`{Y` z{;&3@eSdoXugVJJ&;PxYpSXL-t8*f2BjQPd>5u|$4dusw4!S3i^21#8`QWFGe7TfA zlJdhs{LGoWLx#MEeKoIPuZX?^EcmxmR<#COiF-ASpX;Pk&Q+8X4D6BBH$KKPzL{mu zT-uo?CcX#q@g2|x!f%Alum!fkcGwBKAqPgj;fw?sdYZkeq{#vFd^iM!Pz)u{r1S1H z>jwyTa%PRJfjX#%Mlgf#lQd~T7BygVpw8x6fa~}4&75jt^&Hx?IsrLAA zf<3!2$f$+5Px?i1Rp8Bs~W>!2_XJbQ1oS?=hd_ z`6cdCFp{SHmP+V*e$NK?IXDj&q4d?2V%o#E{?E+j{cqM8`2F~P;brtIFarIoGdwT= zUhsh*u5;}sgyAmSg9q>s!r!s~*pnuqG~SWPBp%LXqLDF>08_!7sFNh*bVz|Y(7aqH zmPI`OM7J`AwS(`~3|WX7JJ% zE^tpH?nL}78M2i4R=`SF4Q~8gt_)d=?zu+X$1-FC`bO~ICT{S;0Bpw39LkU_$ZgP! zz8$#}EO#i^Fy-R6BX;A?fsQuH1vaolRb;9-Dd&E!9RT;d49Q0x0+rAH0?M@}Lke*h zgO7aqy`)XKyvW9aW%9dS%cbGYGC98K8L8j8L_XNGM2_8gTHcRaBz3zM$$KZCmfEcg zaFwSojXrTw`H2Rh94IbaT_PFU$0J-E=}V2vplDF+N1S{9ow|% zq=GPJ)_A{9PM0P=Ed4MyO+Gr!ySl3)q**gTT0#?LE$!7bs*&H{;B04@=V^TNWj{Sd zI)->=xO}p-?wTNNnpkNc;(5=+IC*=~FQuYsu~ZRv4g7r{`jkJGBguaxm6?CUbLame z#clKHKa3HtL`Z4VLNO)&QjF97P)e%*P(Ij^A@#IF>8H$NU+ScuI6^OH$nkP)NHXNz zy?-n<2bW4C{^7?nCR1;9^7uEd3KukiRToYfpgHjAWbZA9^JYsjkz&RE}~xoM_(G}mT8=0rZEC9zI#BQK`EYxiNDEnypIH+njY>i@Ync|L6)gCVpDT{C{>SZv z@pHvo)_%9y1F#PXJxMFg!+aNCbddi=fZqrh zKV^MO|GtUtLb~D6`k(#hbFq|x`PV|<_hJK0TmyeCeQ^r)O#7;L8SN^+7TN*7)-}te zEqA$uiSI7lg9q>sbQ|fj5Cgv3^rKAr!Ug)oHTnu?Jf4&EyCtm42I+^?M^7BmQm9=pjv>RAofDP1pML)g&{92qOsB~V5lc~rgm<}m02Y%ZBXVd;! zv`HpyLpzUu=Yi~OV*PJt{coXu`q=-svHx#i|6j|w277@J-&BS^=L~@N07qCG%_YwH zun-o5k>6EZid+FJVKuCUFuz?G{)+c!)@x-0?v1b+wm~y~7TAt%ok!VH>9^}>2Y!d& zF^#eHsw0W{~EyE0v)Fl z#1UlwPk!yl)(hwzq^$oBGcQ0Z32>T!B zZ=AnlBR=p$CwA5~`osAfV@UTf_V4L_c>7C0RQ;?uZO(# z&!%rpqmPk4|3Sh}VUA}EIZxQ@*u_QUC1}3E{-3evGP)I8SHBf{1$_h@izwSx%D9QL z?$F3}{BA-R?t(gZyN4WMFaH4Pe=<=XBEy__=w86?AO;d(DkQCQZzR_7dohGj?~bp z>mcic_009qO;}ed<1%Z49xyXbUnZU_Fap=1xt8zm7_V=lTfLm2yZG&Y^t<3-{_bGi zU~ga!hkN)vfQO*lME?Z!TVE{uL=Q2z6JRPBKV;tdi2V}!`1c7{DgSB8KS=q(dX4gr zQGR73ploG2VN+lZ%!T=|5EjEy=*6DYX8&xU&;87ovHuJbOM~vJ&OI0 zVgJY;?sI1I41su7!fIFx>TKSX$2$+`>YI{&*oeLv{LATki%6q{aR4j}cz%QbHc)oI z9k~eG&HKJ^;o~n2+UK$w&Xs^>+w&Arym;y>SJy z3TmJZO0V+WKkp5N`HsFGcO#gg1+3rzC-i^^LLYN3gdBN_|51fJ1Lxp8T!c$-8Lq$x zv~rH$2JK)48`z-(9MH+Q(Z!f?0_kM@=tlqX{i7bnmR|PweM?w-r>nCh)*l+yAB+|1 z8(=?Ufs^_1I_cbmFx&JR2`W_wc;D|NcxV zMmK?Zv# z$KQ$UCY%fD#@&N-4)Yra#B&bL!$tV<_Y;gSF{aEWZp!IJ#$SK>p?IxCivcu;5>$P0c_^JQhs;O1moWq zGIrahunvf1eu-mDMs6X@HrNi~FZmrcbceC2+c z@r$yRyz;Md2zMbAgPU>P1txTl>(Sb@0=){?ww z-3`P6=g>Xn+)KRY(Jz93jC*1H_YW$0E`gtU8fzHj6=)t}ABVgS7WDS}tSf@7WpIa~ zqmTVA*v?Xp)9gQ4L$*PCCC|Sb_&qqJ-OG2nF6v|$+vA>l@Bkiy?nUMlh=By?<@s+P z^s^WDzyP`ze8boP{j;-;^)H+tPA748Q`atVZ)N@a|B-h8K{n@i;_u(0tr2=UA|lq% zQ8Fv@m09r>U+XKTGbK}^L|G-S%oSH$aqV2=iWO^pjcG>f-dV9CMnsGi>2&Mv)2EMi z`t+~UeNL22StV1}waO|{($#TgO5BbTB}$C@cppP1KlZzS+&`Z6dcEK0bKaky?;o$% z`xkPb(*Gu%5g3J1jKc&>M4`Po#rHkKdQ-xc-(pAp!nhs!bi^^YW#mlE!Wk$g9q}u%3Tse%kbg`3 zI{F4|LOHgf3h!^f>=53CJ*ZIUR{pE&|1>|P@FC>*`K!qzNT5e~mm+)ZMUW;djGu2K z`~FoKL*{V;Cvh6FPwxzQ4i|69t~{n*iYX0Ec<^x`yVOna}5KfH69Z%5tA_$)6ssz z`2XAQjsM@NPkyhy`GdwSlX>Bzli2^$^#AwM|BsftYRHk#QFD2rL!C>u?nrOGc)|ju!f!;>fQ$NQP3;C&*oe2-h{gy(?z}} zuNCIs%S&7(Y(o`xU>C-F z?t92O=cq@+Na>)Fo?PbrAo?dB`xksu>D%lJ??26J)gKg*=rq_#<%1Gq)m z7T4i@e((9(heNM*IC`HL9OC@D{{M?Kty|dlv5$s<^vb`sKFb&Shr#p`6#DecuYV=`+(5yS$Axf$I0vNkhW-ZWo9BH$D`;JL5xP-}#~8JI%wx-T@y8Z(JQ7_GCteZ%_JI z*gfrI;akN|g@3fDaMH2ZmJJh!@W5xE?#>gAQ>DrEPnN722Ij{te(u*Pre@JIa% zTh)^peady#-u3$?R6ji+l#?}N?X$+G(W|fnyRZlQZ~%u;jUz}Pg*2+`2ZSG-EeeM( zn&-H}{>5WQhiy|vhc>@uQMp%N+~tqx-x?XV;~$HMhaJry33>Q$hYf$*!^TxxW@?RdrKJdccxLY<_ zgLQ0Ccuje6_$~E^|5f+-{!CQXm5t*o*Ut4zh5d_e-Vz||D|v19X^J4!Z~SPz$IKkFRtM_`fv+(P`9%v z)bA|{4M@}!g~slpko0Vt?iPjA%_8mZ--dgRdw~9n)CCxf5)4E8#sTJk3^4y=K**9^ z`W(B-oN)ezwaNDvg=QR^%~ypMWBoH|rMIDabWzCtRzs!>69L&Q4EJC|uI~?0dW|P`9 z8Eu$yKNwZLc+Q9TI#Q z4Fg=;TGu$pwUJF5*;}8qFPGnYe{Q`?vgec5qKaX2MYfSu*nz$;Tc3yAgMBglFXoPr zb#w3YuiOs}XYYpu8gJg$FLpmPA%&@qiF2d2j#S>9eGm=_SEJ$^?h)vQhoWhD^{~ z*WM4Ajm}TcF1#N)k*2pHzhBzBo#Tc3-r@a_6<0m_e#nz2P&?&*I7yyH%?$T7)P3QM z@HsS{e>bFDV`D$}iMqERgnIYYfW%nwNTLZTT=1VuxPo3>!*%rG7TR~Zf7jEw*Zn8m zf5x?SxW;<-kE1>B%A@b{KfBkv?;4-t{#`?>YiM&^`o#y~j&x4SU-!rdXg%y0|Ly-} z_y4rCF_1nO`JvK&);Z8M>D|J(wCd3h>>Kg$9o0HdiQ|T$=El3BVcEMOfr_sw^Ys@u zz2v?Yy3h6QlRnCEy`OnEl#)HK+a zc260Z`@DSoxdCA&y;Xk9@RQA=XVuZ2-TJ8L^N@ehcO$QLlU?fe`qA>>aQ6Sx5BsU& z*q%oDCZ+yrl9zK|)A#Fti?A3=u^cP03LSTSgE!^9x$+>{{k(Lg9oHVo(VGX!gYtP= zK0MaXbzPJP&l(#f9Q)^^KW>e5dVgj93b_IAw;whMmtz}tU>EkF?^D`NWc5323G&hW zzeDtbeX^hJLXUG68 zec12XI4SzS+1Jmgf1lQN@SoRCng4fuVA!I+{EqNF6#Rd`_3Ur3uWzf{)%#hpOSpUf z!@528%N||AJ|D|IpT?e&Zp%ovHCivT&(E7bu;02T((C`Cx&?z#+id&T;?l0GlpKdvK8Nw-1VleXVgBC=WBi3DqcAT(xNEb1iuL;c zpLPFJoMVRjcYTF%`7byJ;{3m<{xcnAn2A}KgLznh_ILio{qN=@*vrqolYgSv{kzs< zt~E`z+;{(Et8g0`$1X0C&SEUZa@4-%{@vpU&!w>b5nsVd@vD&XESsi1d`FFB!T&T{ z`bIR4P@ee$s{r9wYKEt~o>l;8^e{_RmHldY|kT@)VCw=!d z^0#_9CGW>IH2zcnANw`!|82-3j}thF)A;}E|0~S@Iq%x=c>a&Jd!hg5S^3R5&fyZS zpcmJ09err`96Rtw?Q8Ar{8jCL?fm8?+W%x)dM#7r&1u^I^fvEfi}$g$#`{ZZ?;8hj zOPY6ZFQ%_<&wa}JgXaG|5bm{iaR2|+d*J^YNcIl#O({PI)2lvjeSt3;t3Yo(8@p-xCO{uwcWP2m@Knx9}cKfw#e1+c}JieHYEScTeWoPR9)pPq0n4c+X2`Ua$W-aEf@ zH}Y?65?79GsKO5H!XE6y0USa#j-b#Ve%|}O%-<(m_dNgKe7+eZR`AVi=A+rIo<=I- zm^2FYw>rF1eN|QAS&GY}-7y{LL>67mo-cBUYrP!3$^Unk|L?f+0WIP(Xhj?1oRkyN z$$ee^SBIaZw+_`ty=|?3dUl3-b&7Sf=;x4M?>KZ1SI^>txax!Kd-4iu7a9jgUPDc@ ze6?1aZX?@XxDQQ})Ol!}uPuwZY5Mo@ANoK2=ML`S0s1diPhc=g@E`g=efN3%Xa3J& z(inkJC`I4r?PEt)ewW|==llrf=ESupdVXWRnfNW!w6hTBY)=%9W$t8hDyE|h(f1O| z;F+RR%%aahezg3DNAo%6iR<}mb6v>Z&-zZu#aN2vScz5W<(FJTuEPdY{MuRt zKld%x^W`D>^pbmhNBez8*yNaU)G7~zV*dY;{Qs-@|J9p~>dhqCgcSSz=(GI)Z}IxRpJTRk@<(1_&g%Kv816&L*H60V>Z*Ki$uxP^A@ z?hby}PBP0M(?xa*=g7P~e)O>NKdJn$QT}%*|Glf2vAx8R=e1N8rj zJQ>@6jUNaD=~Yk2``@${IK53iC?SU-JzLpMjzTHMp;cT40wP`xlUluo$uauh9P& z>;I*~%Mt7UmEDgp<~`m>$x~ zs~u$KGXI(Oz%Ke8>_ew}iF28|)Zg9HJU7oZkJ_E)-}wCyYPM_pkg-qx2$?|BTHg^G zSNpD(#r&zAft26UXhTnhwlmrL32nw0_RLSjFdHNHi_zgE{WSXckSl&`Y=JQbXJYsZ z1H(D;0xsbS9(_l>^f)KF@^i)`{P?MGUAPaoa0mDB0CoC^>j#*BZytSuZ0u+L{UG!2 zr&1^Zzc;FX0M$(f4uR7I{7DxD};z?vVHJ0R8_r=P<5)AUPN% z7>2?a8nRbEWLh~fie8Fw7>`zQaSfme^g{pR)7BZLPexvwC5K1-k9AYnAX~Ll@TbQA zZn^)v@!!+^zmhM#$~r-1^op++h4$IrKRRFZ{?SG6MsB0>0fq6u^~N4_yVi`c2kf2< zTKyKs|IU=oEX=_?EWjcxM&G}VDa?Q9`HF8uxYyY8<>X4N!WzW!zw5{i*o1OC8vnbE zo_N;1@vT(RcOW&K-G$^>cGxp~FKpj>wr|5i_U{Vz?`F0oS(qocOr79AyRZlQP-wqu zH^%jE4+t0RUp8>(-S_N&c5nB=_u0Qa>|Y#1`ab)Y&6{ES#&z=BE{eNs?7DQSaRdp( z-!iw^-}LO$(nh6rSQQ^(o?jw2);Y*7*PeDv^dYpxV^Mop8A_f&4L`!8bp=iepGMOP z>8^I4YuzVVu>WT`?sfN%MrjrHKcc5_#(&P?0xsbSdT|Zc(Y}-Y@1A4dMz+Q^b-T8; z-aiWS?>(31x7~ldSG1gRZ)kN~TSiI(EG2}{vdnIFPKbDMc=0!_w%R2bb4HiJ30TMuP?R2_t;N<9ORpQ(fJl?Q~a_x zlsRrDW+C^R;xLDthXp9?BcQ#ITgK;y_8a{1?21S0jm5oCGw0cb?yrqJ%3dz`yulCLZyz7(D)DQu4jZru+duOcA+AMG zPS5OjKKeF#rS;OPY`ECDJ^CoMAKX*-5`IlGFTBGsyRZlQZ~%v}>+K54_l-1!{Iw`4-4BC{@lLpKMz&XN}y`z=uo-)V`2NW zk68nAWY{tEDSPx8H|V!Cc5NRX{%OCyuW26&-`qPo?3w?O@a-2q659Nh#lB_Mz+C@C z*n4nLc;oJHeP8+$lf@zLw-Y#t(>Q}SZw?8ER~LtK^y+>^;RoA&v-^v}4|f-ZBP0LH z9z@3e4En23_sn0(o91ElGw-V3r$giD&xX`9pVmk7X=Tf2^nZOuKKf)hcKC8V$w_Bo^{-@D(K2@^7r?-&kD|ZaL;2 z9_~M4%_(~SugZ%UjCTF_9rKM5Ko(sqj15?=-<-^kHvVBW|HyIv5v0-b7XQfG{3B%B zMq?t*mxL1O48sVFLhDV(_`Q^#9l(dd$1;vS9(lf(TtB{x;d~gFAg+3MNtj4ZMs21f zOeLqIW~=);=)RsejzV}QnqF{U?y+&c`$XMV>p!9a2{a;!re=Fp9JVH=|IEQWEWjeX z*5MgyTW>-86zgx!;Nw?bWcT{Mcly3FzHjwio;u0nKNrRk+*J4ZZ38x;`i0_9 zPS%jMbBn_^dgag5*FV&UcvX4wmOAgU^2W6l_V;nUb*`y?it@&F{cisezpJC8kFrV{ zJFp9T@RPvrPaeP_w7Zs$q-(2j|JwLn>(&3-Ryi^+eAG2IyXIrZ-9K8I-M{m;-A`TIcTw+4F1TF31$pXhX%v^{bM3oWM!+{nEY^x@zwkT(|;x%@5dZ)F_9TUgS#XW=`(EBfwSI{|^t^NX=eF@wBc{V)q z80@za+>x(_ks~k)r5J}o`$Qet@RE8Ajq0;P`y|%md3?keK79ceVKJ6sIqKBc^{bu3{roBa!%D}j!Wyi@ z25dq(wxOL((ZRMTtUq&#{mZ8Pz4gzU*&iM3kJs5BXwfdopw;);=KD1 zhG7KyULF`mk)=pKtBvyc8DSiK0w$tWTn3Zr*;DeLd_R>w9eLrLa5vd?mR~Wh|MMeb zd(8if>;F6+&ve#W{f?iBS;+nHvtbT74}DMRE6^{qfL^%&q8VWkJ?*_NCNm?w-$CYr z(3fK+I{UFt(1mW~5XUFgZGSG*o73Ka#9PlPAD;_Jvg!D9A$9+`u*!eeU>!DK6Uwm- zRcN1Irv58a|COo#%9MX)%D*z@Uzzf+EF68^{C^xXhd+%LdIqf<%R-xD+C1y(Y0rfn z(i)H2=gr9{_n>CJ>zeJluuu2^n&>GsB8fu(*L>dx(!0yTA;)CQFK>OJEL77gKgVv+ zx78_}UE=!Yy1p5%?-|!Owk*`mb^i;U2Z1O6N4r;2dfPJ!k#J=fVYgVyJ5q zzC^!*)N0q}I+JT%Yrp40uefWtjy~MN9o)kMJoZ-)2%-OS-%-2r34`e+_`Uuwd9HDa z{3j1KktyL}jv0YbD8)F8#{^77`#aj(=zLxK7G2uD-N@1NIJ%JkZ3+JynSMv#`(5pE zw9?yp#3@52OQ-Ua>gk_YkDp#xzpJ}{m`=}1t8=$8#`Kwpdxz!FjjpZM{SO}r)yk1s zew%|@Y0o1UpoZRXo?r2@^?!sHBaWF#q46#EN%nr}xzO{|GHdL5mY=nDgkxgW{}|Zi-S^MDpd-gAT{epBZ;R<@u+ToaHW9I2u zZHZ1?qYp%WB%8=}73_c48S{PhRpsXM+N7v;jy}iTLd|XU>P7d3!v0q;>sM3Prk1$R z&F+(4Cym1TgD<_WeX-H~3#V{LI`{AZ{lDfKFc>8mhX48goAUoi_CJnob^mBV2CZmA z+&gfDbVi{R!e@qq^_fVQjPDfljxs05NxQ9|4 zJCVRF;W=pP=e`izE=d&jzflhr*1!D|`G21OEI?fQU@^HA%drxx@O$~6joWol-jpwM z$Y=iZ{J&*2|3i)ZlT?0dldh3wrdj!bb@Ym#eIRTgJEfc5EB&3)pCoT~xzKwSCaDHmOwYVclAcZvAQ1N;7`j?AC!9O;|{mjrGAwG{2DD40DmEv%keg@}o z0he$Ey|{+!=)m6Bb)RkwkdKW^|Rw>jq(5W#{Wwz-E92-TDC3X`uuIE(YK!a2W7pq zYT3(E$ud+Y_4$)=FM_xyK`O~6$0FA=OMF6HL$seetY0r&`8VqMAOae|605KV>#zZv(C#^P>}3Cw z*@NtVdUw314Evvd_vlUck7G!`<^GSGTYy%yq5AHVpqT=v_Vk$Is3x=+*pJ7U!>87Eb^#(o&CF8 z{@<&e-l1K7Ub~!~+=SF^V*?y>5~pznaW4Kj@&YcQT|VlNk2;Y>7rIZ$pEu=C9No$O zeL?*{S6xrGEK&cHt-@_&9KUi!I=#4t>u7yJ`&ql7kDlEs?b-IYrQbnbIJe(9b~_h& zPh9nj<{Xm!{}0cupZ-#EFlv+y4aM&3t^T1zco^cIe<|TcGO2B_g&%0^4gKQsOJBS7 z3mnt)<7mU_)A%>*8Vl!sSro>ReNTiizVJK03)%ZM^Ev|i>2Hif7EX6-%aEBQ`@Tk- zpFSOB=zL!L5MAQ~H69>jxt1-ckP}?(v#HBRz>Y z#w@PYJHmfnd;8&7fjDNMN_ZD4KlX5ZK>Jz#?~Cdv^;i~NyVXqx)lqx-;a@lYZMOOk zzqkMHP4yqzaI}a0j$?z^?`Rpxen%_44TbduirMetvgi`mjYsSI?r|+gy!(CR0USbg zj6(u-H;rFl&o{916PNY-pL*Z?#lz0|jQez48g0m;b&2|Stm}_qb!{h3&`%<-e$Am9 z(bs=kT(x$@8S)%zM{3WImr%3fq3ABIhejk(=fCxA^oH5SANVF3+44!U z$v2WBuldjW#|K;&?!ztQzSJ+YZ`J>;-@9|W`er}-|8-@1w=zDXuSA>l=nVFMkMTc9 zqtO5Tj{a}s(#~{y)s(6peW@2mVK?t8oO0822ykn@poEhNYP$wd9bEtTEwC_Xtv){9M-7}i+*&vVVNn^rF$6UZATtV%N*8iVBCiK$d zp7af~%@cn@et+4!Jvb)BIrK?1zAz?Sb4+?rX}C`Ip_P7%yn_tAbAWLH#iik%@B`$8 z^WwXP>%$j+^s4azH;fM;)Ax-JAT#e6A28DR0JJ}2e8Cjy%#aS*HPrt`8v`JXytMlN zjdl?3_&x`dB^ZVg7=^mC_7^~596@RSz)Y@`mLaF1%VLT?FR$L7x(i85z;rTIP zGJPsi;+uq%;u}4q_t(GE{a%KdsA%zwk^e=%Fh{uZ3(C&Vuo1hg1unb*xfc00?UL=!#rmigh1Ta2Yxj+I!2HCTtj`t!%-e`L{x-&_CR`)EDe%&93I3!Ukz=!sGS%FY`|!&YLe6zYSHW@GZ7(l>gW2yAp0t*TglZcG35suy5#g z?TG!_5!ffL+VKa-L#UNrHF*RzQ>-DjdVJWn^S^}3I`ydUt8)KehN`zGhKiwo8Mf2^ zaqNe~j%Pj`5{^&dpLTyV>>B;i@Xetg4m*c_G<jU9`=qL6Tau#4k*L2e$V3sdOrT)@a9eR`^P^T4vj4h|9reO9Na%Dy!q@X`@-o* z>mCvIPa6@w+dLxddwp2=PW`a37jLZp%dltKMDyP!hTY8*!?&hP2)mMh5qkf=H2mKo z@ZVXJUmiQ@99wQ`S0Gn6Fq{@XgL8=Qzwc9};R5{3n?p;#zYm$=e;;N^w+-X`C-+J1oFUrR-?YD?@B~c6 zWK6|j<>?O=vcorr@Iz}$)(jsKYHtq-N32O%H`aQSZ#}_>{*90r^bJ0%Z-nIN^`U9w z*ZE<-9_p@Yf85mmK;l{Lk15(8GqgX*l<;)vmtiJmVGibD0T!YCzV^pXb@X;+Wm28o zp>9Upqc~rqeX-E~Z%gd|Mm`>Yf7UmQ!urD-i^5{*EX8uHL~D1EKANJiik{UU*{MHr z4SgN*!?iu8X@gAC7TH=9Hi)a9Q4}_j<*2<|6tU`$nWRd+G~!VHUGQn&G8dgohb?@$djm@HXxiP&!C3hFxq>)Js_MDzJR8) z-s@%WS-(RvzWWY+aM}ThQ`+ieQkl?*N9%uF@}Dc{#Wh?<3-l38WJY@|77%3^b1qT-k<81BFj+m zqvDW0#h##N)E{vUz%2S4%tNQTB%4(Kcc}l7qvvbbr|kdc7ucuIvro}N&&*|?u4SL1 zeJlGGoyejK-N+%2fzn=pMOch|p5sz-IaZ?XqW)zxtY%-YV6(F4lVlT83)#Qh&7ptR zGZ-EU<6l?#Pp!C`y~f|u6Fux-WAN9}Hz4(n|B6fAmKHXNOK&&+KidCKd8hPpaodpD z&Gz4E{J*jORl+-vo5U}HN8|tN*wKac4`#c*7o@pF+Rr-&QtR2^{<8=BZ~%u;jUz~) z{jleQ&SKAZfag2f^X=#PqA>rv*z-N*`CjyV&wIYjo-bPKJzxK;KJGlyNuv!})QYQd zY@VL*{|5g*K|hIjO>u9=WIxyHx6|Uz;2bWX&>orMds6OS625|7RQP6csQk2c$ZtZp zF0K!C$F;%FzUTkbt}pC=HHD8%zDSX`9CHWv@BsZ+scSG8C203;cgTM+?`6@o`=M=K z*ng9Ll%3zako~_zULygKZN5Npz0QMB^mwMDmch zIBuYt?ET1-R&dh}c}3kD!+-t3kRVgYDMR``t&E_zq2e>bjmD=;Q$L{KpRQLws5>9; zPk-3@Iq#@n(0NMz@|ODLvijw?`UUwJ_T7`#`|Z=La2_Xc&G%BLzNkmTY<_4ozNmh~ zqj6|;N%dbw{nxGjBO8U2?DQt>l@y!(q;yWBl8-R%DRzc_4i|6e%!O{sr}KW*Pqi;`o5{6t=l^dT|ZcQLz7?R_@Xt+5flb zcM#hYInSuO`Jw%PPh9m{V;#u;FMEF@tnM$NL{+5aWN!w~l_jC&Xs?El-^|4(cG zPw}3$%M)bdv)=3O-_8FS;Xk8Lig6f^*Sh)4u?6jH{*I*fe~tG4YVH5^+W*>NIWjN& zC+Gikvwbsc;CgFvN^c@2qhgD??J4_f&?|rQivBo#+l}&z@J!6Y9L&Q4EJ7TAvY1?o zZ~OL^ldbY!9DlNsp4}{euebj-eGT%{Md#BabRl%sYx|NYbE9MTi+ zu^|&}Px%9>yUM29^7dYN9JwFaLsa}8?8E!ZzXQUDP>sreaX&#>K~Eu#He`{<37o`f zoWVK7eE=$c`AUfP_66bIPxlX($SYCDHKboM&z9`NE!;utGB%d@d-THmi?POy(fhA< zF7-oU{*U(_{qFXk$rC%o8w!Ed!)s&D&xgXz{L35FXiu@uZE7juWIAG z8nQL7@@c)Qe0o*+^s4gd)o^shtD$-ItKrzjS3`R3tD)t^SNYao4XuuA%e)%u7QPzl zm%SPqkU%4nXhI5;o#%*qpBkZ$z;u*hChDYFj|L>rxVzlW;tdK=3xOA zVKJ6sIoh|EYyX#P|CjT>m-D}u^M94|f0c*)l5*dFIsbb(|9d(AdpZAmIsbb(|9d(A zySTgMVWo6dVGY)yb*OtB`D)le&rb1du!&xd`~c4e-Hz#c`qjeTCDqRNXdk*-&tse8 za^JF+$CBUOSCzOOXnNjrLE$+$wvJvu-*NQBX7QeP((`V*>;2wTU&~WfpHV0H?;h;K z0USa#esBF5cETTx|KV3?KFIzj({tJX3w@hQ)K6qweq}98h?MeDD-~Dm}Gj-m&}PGCtxBbqmJ*Rp1;1K#&vePE;1<`=O)K`HujxQb=-86 zVJ2o_4(6e~*!Yj3e2;iMzxurYT~@!7N0kH3IJVKVUhDb4==q~{n&&@7+_To~kIJT zd^MDly`OkB-0@6T(i3dO1{B7%kty|J+$*M1nOXHo<@9#z?KrN6tRi=ymLB)-*+t)j zeK>$e`}Z88XS=l>P)$FA*e8(Ns{SOq_NxD$;r~X%H0=XC-hX$AYha%y{3nGp+K@#a zCvXz&@2LOLc~||9E_yd|OVt1B_s8vkt;^g(&|k z`F5*@_%7&i-dLP_*Nw;b|DtE4-fF4jz7BS z`#bFWLmDlJYmB$<_WhxnJ>GwfJcYq1K`q<7Mjy~HdSZpVA^(n`k3vd*X%d%QFR!3f z+&GNK1Wd$aOvQ9O_CN7amkH0r9OOQ$ua>L~?COtLACMl$<1ZpJGuQ`Htc^ooiu^Y} zD1WgP(52n-xPL>tXSv^dztJzOB%}Xq6}blMumPJ;j%}#I4#c(0caeKgH&^~wzBM3G z@18o`8=B}TRJ>dq_BrkV4xt)HkU;Jm#i4z*{69b*RSrD1|AceGdGhE*{hv7IxbzCo zf3xRLwxVsJ_5Y2<>iKIne@pZM4tzNNZxH)U`ImBzG_uI!1pZ(3!AY`@?E9Mh`Bl&F z=R-oZGWoRlnvEX_XUKD?U8~$BFCosQXh8C=HpETe);rn|WSie+;RLf^;@TIvmbV|Z*dH1JhpF}*|%sH*Kv@2>_5>C@6s>djYsSA zr3XD3?l{jqJV5`i%R^Jx$OHHm=-KuDi^22~KkjRu)p%(Z z=HIuNPfMSO_EX+J;u4c1`;HlZBbP=$8w%KzE=4oBZ+ z|JShp+4E_%&@)N-TN}R(TZ*+!hx*>JLwbF`mN&^g$nod&encKrpZ9FB*NM18s7BSl zsP8{p6tex;7cW^~M_dB=LEf{t$NiTp+=uuy+K@%7xXgOK2Kot{#A#H&F7J>v`^|r% zpQBgk7rj6>Y*#+qP(Iv^c}V#{PhB;x*l$;m`}}}V@x=k5mwpY`(T7{OgL`;@_m7|H zzgC&`G4p%A@}&Jgp&T15t^~s{0)_RNzcwI@qL*SE#-s0*CqrMI?`OSlQFtOIV=7uF z`NoF#3)AV@(Y_yg8GR=5J#54K%20G+ptxC>gL&BJ8W)g@uo!ha)z9j;hJ)&-tLmrw z+6ZUWPe|ctGyg#c|G{jwG+JI{UmO3|inzb-QB55JMzGT-;}qVLCdm{#9!=K#rUcK-Fo zL-4vy6gEkt9NSQZAB!kecCg@gE>IXefshv>Vq zhGR!@_|DLdBr}5w@@XyYM~!mjmjRl;6^5 zL(jLYZ}_7AwI}{Oe0xxdu}dEeS@C^;^-*)f+!x;1{K2qy%HUAH^P{0|`$t2f--ndd z9}UU19}bO~4~JC8UxucBp9+UJo9pAh)rZG~AJprYZT|D{!`r3d$kkDymR|7_Hu@Kz z3MU+M5~pzn=Wqed{XVG<{!4RH{!2La&VLD++kX*S`h6l)e0_AdHV(ej1+ zzap*|RqgusKRY^Hr}yC&?%*CCp#R@$Ghr}FFbpHmyTyEDvgiB8+>w384vr(oV*(~( zGNxiW$`JSWn@RTa|IdnHKH51k+^GLYJ0SYT=ne1~uI;zJ)~*KOb5Go!;b_p``vYp@Qr`$vbG-J`<> zdcu6b25h32BehZgw|>>+&@rKLin-?E(yL2D6}bbgD@wyIat|`%JN2(-u}}B_a!>2G z*Qeh#Nk2a74(p#s0}^ON5=}_qkpEQU2ogvkjW%S_o-{rn<2?HIvt*ZW_d??X<~z?^ z{hQKg#xbPPQe%8Uz3~P5{@eEI|9{51`P!y&Orwt_npRXK2uynsyen9w;}xqwT;SCE_G9LkWcY2JZyr0#_= zp+1hWKs;CDW@+!&HhbNXv>++vEEK!eYlR#d74jio!~A z71p5rSplpV$7stPhfG5sr0p>uArP?5WWI|8i;Q zHFluq%hvao<_2s+Ikuq+JFpAy#CHF$?Wgd?F=3Bzdap4S%|QeOSl;(fR``{w$pH-wU{eE9k{FTt^?; zUu6HI^Ck5=x;Cockz4VweR1?H{{Of6|H<@4?UPg5CuntS8>%l`b4@yTa1ReqdzJlv z+gfYikpBm{Hok*_^uhR}`G0)uh4B^2j}pHRL!AE?{eL6ql|N$lYk!QQmtq{oV*)0k zF#m5S|NlY$-M!l7%I74Sdwiq|u{xQ;7fJIo0)@i<*Yh?_W7Rwi=O{7`5#>)<$pZ3|Nm6}Un~Ek4ZS~({dM~K z|DNx@%YM_+toWF<{My;|pN;dMOTs;I572+T`!V)sAUQbd->^m|ISeBZ#~+U(AB{gQ zrH{jS#CMoo;+>J*+5J!s*sGANLhEjOw3EA#x$3&yXV!i05#EPK^8&l}yH3=(j(XQt*neS~ z`*-chh3?;V$MF#d{O1s=aRdpZkVYHYZ~Ok_?@naVg}Ap%T;nm1$NMh~a{p-QQU9YA zZR)4$WO2w!Cyx_2iQ4nl-)BdhrYG(jyK%cXoS~mX>S=ic$;-YCY*`}zDg$3z!Ty@h z_I{IXZ9GitNPT6FkMnn~kcIuX`1HEOcO!R^{f)Zq*8ke8j}HmHfkyp(aa?}WS+*Vu z_U~sL`~AtGS31{l9euck_O-?zpc7eiomc)T19N0vnRqnA{~*m{TlpW*GT-$f_EEN> zPdRf(I`{AZ75YE>|DE;<1|#>W0ilE(h7sufoc2B0^GS09$ZC0N9625nFcG!N)L8aS zrpLL@aSrrU`gEkudwxhdrV+iqyPm)Y@C5rGTkgMCpJ?}&IesSQU>+8r?-x&oMdaeB zf6Y5xZJsZ^F#mtZfUun2DlYn%R?@RK^bKPbeGT%SOHO&)%@5E;7V01NWntgdZuL4l zwXnYqnHo7T%<`Xg*nmwa$2L@92in=$9qQZ8V)gF;_3vo)FPTGr3je!0wi(CRR0PqM~q)>}eSgE|8aSCC1|#uA>jPa3|{eneLGf zkiKDUo(;Yi3`Pm!z6Y5e>+sOCJM{rz1br0p&&!+Yi#P_mi>!NE`^UXDkdOO6=Cko< zd;Z!+TYSTh+83q%KMob2l7=yW6X@;t<$q~Do?k*QtUsl0DXc%F{nCtM@_PC$^}lv$ zhKzeewUL$I|7?isu1}O!wK08@$(p476^zlFN}rD0C#}80$B;n$W|O0R!(^)1JeTK< z4{*#(#5U(FGJV+i8FC(SFL>s2<;(T*J=w|kmR-%?@3%!*jHS3B?=2@+Vine49qQCm z^$XwEK38`o<>{tom=+y}@}lp&Z*#$iG+hd(j`|-yQT_NR4#wLtO*eI9B`RCGQ7?@tJ-{0JQ>i?y`jIi0^xCt}S^xXFFpQqK@7geeJ_@NRt_^XG zf<}~zOCKx?vR(!&9 zN9wri_q>uluK%6qLY;f9M*|X9#fgub9_$u>-n$u{2iAjTVC}1U-10B+cvUpyZp6R{z9V0H?0ksJntJnD}Q5; zbL_(bdJhqkniEY`AA0Ec(i`a32|}$-%0W`YURx{!&R__}5jP}=T_SZu8*AnF~`G@O2_I<=2Z_+8ufB5NuaED&;W%Uc$ z_v7cn1G0zgztOorZH)qD$Uu7Bw|y|#y7>d4gdB#XNsGJ zIhcp{_iro^&b_Qpmn`&u>(5zCUy9{eiMkHs|IncQkkEE$9Heg7ZfKfh{NFPJLxuL) zD#xwCI&8oulw%v(Pw9U`=SBAKS#6rT+BV4T*Z#xNCE6cjwLg%aq5UDv3|jkXe;~cx z9tYCdfnC^xR`(d^=_diLSQp5*cT4yVa-K~dg?XrCojg(>^9d4Y%=rH0 z*TVW(N&c^24hi@C?*aP%4`n_EqXfgyuJ5e_o%-Ffc-+3H$G*6u=jH#)@;{k=N&UR> zJ^O&(w#>MBX^cWCs&8vclQom{zhOLm0wyAkK}ZaDecBw!=J)D<^3k}f!hCecRsM?I z{8eSdhlYgd!W(?sW#mlE!W_)Q0xZIRR{kw^%u+1JO02>ftiuMhub2Nfd(LE5S z#bK&&8?wlwmHn5=6o(Tr?yj{9Z`$LHemed>l5MSDx!W;aI3upw`2KU`1=Q{?4wuL) zsPVrB=_I=C@g{r?O>Ei}+qThhNz~Enm$845KqDUSZ%4oGKYh4`JGh4j=)cLgh<0n) zb<8);A6ay*HNF5jdGF(I>|>xWvhr8P*Z-ToAIFZvcvO5{A1yf%JwMamPxgMnb0@1` zDhbocGR(v*)Gp(H5I={W*zVb29(@5)^td)d9D~t_MdH$v^!bxZ(dwI9POd~o+c3_F zjeEeX5?+HGJ)iWQHv3Mo>+)Y%55738^VViRh57U9lWoFr40#;;6UP8G zDl3vWba+Vk=Zzl>2VeSNc=PNN;Xw0$3*TG+-@^X0p9$aX`LAK$>i-(Pv*N$%>njUy zEPF2OS^Hf0_LS*i_uBs+zP0hchb?=RhX<91hxPwEN8jhQm%mXGcF}vEvhST|xrg5K zGvDu5l$Y|;4?OR~m!AyZ@8HX+?;rO0z2Zkjq4h9-!LN)@`+1Ra-5MU^yVoj{lr?#F zN{)U=+(~6w^c8d}i(b2I{$aoA=0>nte`Gw((6Uf9vdsL$>0$el{~rEPyJ^SMlS8%t z9l<|!ObNS&PYvIkGCAzrI$3|})UfB`ppg6V$dK^+cbbhCnqLy$*f==sT{bv;Z$L>n zpgo@QTN-WPo(OMhCuQk9U+|rhac=(!@+3~<49=nX`4RfCMuc?z^?t|t{i0u{-!J>M z487j3@0(AC3x2PEOCMUlr}$X)pYD!*FT+BkH9k`pM}(#U!@}X4#wa+h`qaSigX8=X z{XQ0cxME;9VxD8|-BHG$8K1v|pMN3yZZ-REBfmbH=&5C%rT<<*FRr0rZ#R2ixGvoO z`hd`Z&V%|0^b>S-m>Y&18#a%lR|kaV8|L4U>HGG7qi5Kbt>?`t>$Ywjy-zx~P+^YY z9kTYW_Q%Zu;T}EV`WnVsSLwU#1HSOoNY^GViN;9-!UM;om$AWLQ68XmtTl1T5@gh& zoqPw`odd%#;StEu^K1Dj_z}A4IoF!UqwDUZXVH!hJldzOWwYyE?>;uV4;&rtzQi>x z@topwd%-o)8*0qCAnUq680wQB45OU06yq=+6EG2zF%^aV6|{RAp4I-Dt^G4s`^Wof zYG(gqx?{>P6SFV}^RNJm(7w?83}YlZ$?RHf5^aa>o9Z9raWu*P-^u>h&Pg9M{*j(> zT&uQ2n|4HXP0{a-e_Slh+N(vO=B=Wzl%AOFo?hZtqOU?Ks>k9>gsn|<#{D_aJWB^ZVgXl>TU=+Ngxk7H6| zA9N{w9P+odJJJ1uIsn!4?H4b-@qU|tiKtzx|64!*WO`!nn9%V0m@t(-9jPh$c9GmZ zCNyHp>i62S?!E7~BSV>EW?~lRpz=rZt#>$&z5t8R^Qou8VzT!q`j3EjaAKLw= zBdMRiW=zP&XR2)%pKW1%7j43$XPqC%kVcC%Gu>lCE7|6M>EbcrfOHO_8b{D7E`tO; zJ6zgG(bLF#FS)g2_*ln;F1+UXSI_ny7J3hS@U`^Zo9cYW*3cW~O9%2#sQjXNMN_2Z z**3;C3{WTU)vM1NR%l;uRK&l(z$}%mZH#0woYRI_k18+qi2V+|8bq(hv>7*p<5p6!Yy&t z?Dae3J=CtZ77*G0Rrg=>yK-cp@L)XZS7?0ReWLE7`yZ?S6$vyVi6*2_;y=SM0!BMbc>zcvqExC}Ef3w>W56XuZfumIbC_3M6%$i?XWnmz$C zmSfAwxUR@baurs}>lME>Uy;5Jas8X|O|p}R(1 zqi*SG85nkn@BRCtu!r2@z6;|yhg&CExRC$9temAELN$({Z%9!{#IW>JWEySABCg?I zBR)@$YZ5lNo)h$wNR4LyBKb7`!?SE>adF(?8S)%jHxCRK$VQ67&z25r&v*tM-JVfK z-SDUK|7G>BXLZbRY4_EFI4_`;Pod5Es*A1ZCd~)vZyHftck0pjznG5`%O2YQl}{)~ z`3vHjgmDjpr20Fqg*@1Q(!Qe-au}jrIf5L8IL@bYt?zQ9@?Urya+9*Y;t+qg$qo)7RpTmO2NH0EF)s_&Z% zL)N_G8H~2q3Vkt_qG6iyNxAT7-QcE;u1{WH?zfd#g$iQ_wv6=s4bncH#uqDY9X6pH zxt|RQ+sGID~5CK0PF~ zza{^(u^x~AzUjH5F#dbL^4~YnJXiiF)9Qv6G9w(<=xe(#|C`@`L|VP-`UIIm8g0m; zqRyHFamx_Ah+_ zQm?arg_C4lt1s4n=_&e6->d&x_4!x)l+V$z8DjuC&+-ePP>%jImdos|75oHj|L%>} z|I?pee?uR?zWf9l+0scgA%(@#S&HRYiB(vGb=ZJ*c4$YBXM`-eu6j1_ct-m6^X&7Z z^Nl~iF~o5KE%({1j%mGN`~g|bN3lsd<=BQQ)Ha*{&$it`Pe`KyyXbq68tdAoxyBjp zpC4eKxVY}a0rC)9?`qSLN02%0yR4W0l@AHw6bkD;?3MqMzSA1{|El#F_;_L)HO|v3 z_}6Cm{-?-aOVru^(}pbaDC94BEc)9{2)93L{d>oD%9~kqoj1PZvhgKkUcUPOD7*g{ zE3bOd|7*)2_EMxAinNDPY>*(AauWAQ#CT4swu#FyGhO6uzzAd(Zv; z@y_S-to=M|KkIp(wLa^!ezuHb|NVYYoN4lq^E1Qv2P3V2PaYRX#XH*U$?A>j$z;u2 z@_(x~dwS(3%e1E$AJFao?z+EA^5-@8N5*-Cv7fJ?|8n!&cN4MS@4Rc$Jjuf_qf)5-l6xQ-$vz6lmD;r|9SsUmgWcU zv<8~r24XOVVmNyAV~rrY8rvEmc^L1?;UyxTnr`_(m9}g3S zO+w*EY(Dv5{C_!p24I3wp zD4voBuBiVbwv``~zA3_&V+B@X71m%a>e#D#_NswRO!7w?yV;&=Y|tfk`zZgO?Q4Be z`#Qfh&DI_wquq^mxNW5I|7>pud)rCorn9@+-t)uw-9y-6R1dMwtpC}BDr`aROYHwF z;{xc({e0tv9}PR`yO7%ce*b?xUs=Dw2mAl`_}JO zz3-=wjZR8SqqLUhH=L2a6Y3z&If7$2j*~czvpA1-WqL=Auf+FBiP0!`>4L&y|HD zx~_XqIN0R9W{l$?TWh@6z256h@0p(2>D|1d-2xBRF>0o#(1<2%=&)7+U-(V!aj`F_ z@*n<^i0uW1&&nh2r*0YhyPEx7Cx4NRugYH;?fZHCbmS=KjK)}u$3#rR6ih?=-1qw* z^(ExcHAFriDgI&d{Uhohe2@?B_fy&r=(Yc^|FK*gGZ4#+nPlc#zc8DegY0zm5pn@? zPb%XQ%C<+8ab!_BP#_oiZ3!xVc{fq`#a|Lj>C3SKz3k9RGWP$kB73Y~v4)I!eT_B@WRu^z6C5L>sDW`Vs@hGZ;fL9JRt~#4&=N ze8#%By-<6dH#<8qYt zg;54{ZNHbuqi&@1OKXF)CZ)ZRY?|vokirbt%*1TW!8|O$A}m4sIpq&JuPJ}J#fv-& zD5B*#>35$86ZaBn9FpfV%784{c12yG>26}FcnV+E&q}UD?Wwzo8sV$x$-8$G4L9#5 z*3j1?r5@hQ&NrR8n`p#_+wwQwd?|#@-}z)%@0^O?$?Lx@3!CVbUtqt#Vk`}P3$|eg zcA=oHzD`{Ah-2hR_4}R1|Lv9k$&_+qkMs7S8V68etZb4@A&qwJ{~d^P0CVWlrk_WF zUPOz0-Kq>YcvO2|lm5pJ?SaVB+iD<=9Ev!CV>pi9KWM)ryY-))CVRf6Zb5EPZ{Da6 zH;#$9=s11b97VG7oBDmerZ1hIU966StMuF~1; z_1W{U)(mmnhkg~lZw$m>48?HY{cv&wMxpm#^y8CbQLRiHPu85V-x@gywe%_EG$iHs zhU?y2t9*Y>-6`WeqWgcy*Z!~PAId`S_si_JVmyEJpQams=(;#=Wd=DDvoQzVUv}=l zSo8EdWo*8ENQ9Nyn6IKOZwJH-Y! zvX@Oq)i02`Y2SO-tiVdF!Wyi_dTc`bYX0{+|3T(Xs9&66x7f1+S#;dO_O^=q;9mYW zSz7;&-HbN24b>yHUkv+WqDov_unn~@8vj3eK-fV~&Sm3f4G6pFdr%sGxRY&rfsGUX z&oACi?5nw*cxU_F#NL-~Cw@BrZeq{kyNRFlzn<8wEckKFza@6Pd_D1_S8pbEPQID= zVfU59j?Mp`_`&=?CbloWl4xxm8Cq&ahV)xchJ!DdTa)-yICS&Lu+M*0;{e{Qm&f+^ zD~&b#xqgT0{vQc(PrChia}bZJ+sAYHaX)9*Ypl8W?u9|&z^O+Ye9t}Tk{g1@1+x^3j z7XC+KCw{8^V$WgqDAyjrF&xJ`>PRQax~8}Wpgx8Z`kyZ7Qy`n}>SMU6UuC3q1D$gg z=W!7|xQwglMf+;~H0$&~J)++RU9QO|^gp3ET|doh`e|?wX&joX9|u{qy(H|#vT#E@ zw{Qo2$gV338R7jl`+hTJ+IGsq0Qx`_ca~{?EaN+=BVn+xxOd%9ayV+IY9k~^p+mkF@y_ez&y>dhKRYN)bljuksE@%i&R_5W|@=c8p7{~HGv^1pG2oQ~KA6;E6nZx6W- zwff#`gjds(TeS_KRR8#vx--2QO=!dc=cL_#l1w3v|4)-`$aHHL$Vhh{j*BR;{c&F2 z!}0$^-k<;Qj5wsXwEx0<>0d1U+6*e{_}Q*IhT}Mi(>ROsXlM63`1748t?!s&|5vmB z@qfsc$3#jcH~faY^2f3;(D7gl#c+(kD2zsFUWR)ttwTA+ zdzqkJWUqY^$&BO9y!Yxkx7T>j^uh%1b(r_;9p}dk4r9eJ9uqMMQ!owXn1Rx|4&FiC zQP25;=epN(^?Vy^d}lj_4^saa#x^4P65F^?+R#i-y{P}gy2LZZGaGYI(OwqjkqfX0 z?RT|*yx{pGhc0}0{I&LvN3?&OPzOJ%{o{hV0hx8&Mt1*}|Noe=|9|5DKWd$SaW2Jj ztiVdFLg7R{&(f$4ti1<*??X2JxFa-Ht$wOZ&y~6`-H_l zglh5tvh-v;53vuSlTDB7@}?cPq2RdaxGSR$K-OL2e_zwSKql|9|K!8=g~hIQO%X?M z499U2r*Re!*Z;Z8kG;wMUt|B-@4`#$KiM*d{hz@8lj-T~KbdiyB}?o7y!zqwf6j}u zR#*)#(vw%%|8D*ny`W7s<+#~#6WPc=zAQY>do0Z_FfKpq{a+Q4w#KY5^DUTBb}nAXNq(uQWl@y~mlw-41gfFx2#qYdq^ct7ZT#QVX+ z`G53cfA2T*{`fyKt`&Ibb?+BhdfUs||9|P*dRN`QT|2-VAHCn6HTi!1J171kj-c@S zk>NOb61`vdEdOA9C_VOlo+Z!YB6<+VK*zmLF4G^(U#R?&=c2zo<^9JtAnsjgd`9{= z>;K-apBPE~(T)10o6hL}KBb@dsQK;UxQbrfz%ATCANqY?`d`-nj!xvzg**xi^cK z-9?TTR=xP?FqRyT+9CQ=$w`P~7#hTr>~9>wyXrgIpPENXm*YmVX_)?RX|9*%1|;7T zeoi==>8Zm{e`x)mDdMcOwnf!!HrfY(z6eFf1;=rYU)Of+y26&CdbNEV$eJgg4lBr&h|gky_q2+hJgkrVJ@1{2 zdkn<6#c_`D8o#Z@dTc@!DxR>mKe-J%unT*z57kH_g~GS*`y{3LFRz>H63XuHfA<}} zw%>anHO7MMZZtN6?D^NHLyphJIE}M7kKV7CFG%*_a+L4#qsh4bPcL}`w{Qo2 zh@ffanD!mB|*-?JS;%N4bAvI3ggf&f;R_QM7ugf+x@P(7FvJcvtOZ)56Q_IX- zbIls8#d>_O|7Vlq_Dk=t4}6=gM%NW~9dV36k-yzClKpkg!E5Xu9u~gjD6^J*UUdqPiYWe5{=u1qZuirUDJjfia3H}IF6HO-z@!G z-6I+ID(s@iy@3kqH>LM~UHWkl>7&wrSlW<98`2%haq*nRd0a$xh;yD~6X-cMyAzk` zR}t6rFCg!nt}*&aUeHe<{03@geY8{;-yknmciS6rnEiBw#k9sf51Uc(S##0koA|C9 zQ24d=?_C?$k?12c=gfcoNBId^_OEh-@${e5wsurq5)anZDh`v6Fwl8}(fd_#e?gw3 z561|M!f1@ecuYiW@18{Ve9C`+n-A&RY(g^|obt`qjg%kz`(}|8R#Eq8nBx3tC`X}d zP?$l^#B6lEBrnVB1;qJA?N_u1qI0o(7Ix@8WdXA1wDqCoN%j~A>1oUnN6)`lQfi1QBYKTfaRZT#PM;}+>j{&)kb z=vxr`RAXA>n8!wJ6Sf1pum}54jRPp{|78t??zerLj^qA6DKd>V=Vf z|8KYU75anuf2Zl^@o@dny~--o_2(Dy3mZoAKZo&)_?%5><|C)D+%<2W;N$a^H}ILy zI=+ZXa~rE#;`7%3jn2D#+?)J+zHoj%KW#2Q?Ir#|NPAQ$^PRx=||&yalhz45ZBrt;hIqxjj&f25WFKy-Yq;`S0{{IYmfq(I^|3BXARDHn0 z9k)y{oNpH#P^PIN;i%?OfE$A(4e$XGR zf47ug{PfhYoNU-EKVI|A-uCU{!TNVSU)8q#3%)B`wZeHTu?lOj7VEJIRoH^wPscg$ z#xu~nzv$U#5?wMZmY%h7ncRg#e;J8S(Oi=%u%)fI^nmpwAgX{ln5FW?;#r;ov?KM!@V$A*T zw0nxXSKB~o|37U5XC24+5a-G0?_7+>+6&_TOg;3=xQbq6kNS@bd`^0<)xFhw-}E~u zt^eaVzu&!*ZNmD{@S-w?eP|?`USccA6uZ&yZTSiVQGLo<2xMHFu9nRlN*|67&JP~p zcoaq>_P>uM$D{H~%0T7jMEWF5!8Fv>v%hH2m!CwO)8B;Vgnndg@a4{#fti?%Ihcn9 zScLWovESMl2jdxX?~iH78(&dCaj)^0+V5Kz>&JFpdW?SS>H4k7EZT+%la?joS&HRY zf!Kx-_y1c-&+TS^cUpgtz6M2hwQ!sLz0UrUYlT&>Him)RgjzPWirj+I{(q-D*9*on zINpKgyysfuIg?G9*#2St|C{ROzMCZ3=(}n1oizI{Qsf3>G&Y{_eH>N4{<`n#yV@zm zQ`P8O(Kpp@JV(bl={L?Jj`ir;Zfu8i7j{bjThjlM?{B{EZ?W%>yg&a`JJEG(BOtxn z93Jsi;{cM#X5^D)@*q98{eBx#n%;)uWcf-tk>98diOdO$dr%h13gcdCuUiwGehhIB z$_DLR$uq|JI6jGH$0>bPjbzhNziqYeALweFoNtzc+J2IZnAzT$lFK*x#Hcn>u2WiWs zzez7NP7i(bep`*P{pR#AkQ|K4&wV~r{X)5-zCYCQZ`|W>as;x%G8jeAjg$Yz=%=HP zMRBD2L;e}}=eM5E{y*CN`2H>O@#pRH?3{_1gej@-UCS0E^K3BV*^uSY9k82$FMJg}`O5R5VcGLx4Sg+Ai=S72c|J6~{CsHi+dsejxv+2b=fXROKO6RDriP!^ zOtxl?xnHk;ChUIgx$xs>CWKv^p9?>F_1Um<*|Xt?uYWr1sF@IcF!xhoJH2(#|E!0ygWVZ>2I9}zqKKUBHmF}9U+h5I8Nd;cKO!MlIL*| zb=#lUKI0u+dp;z)J##c&@eWQsuYJn={<-G&;~>&F^!oFjq51u3~2 z2bDkJ2b<5GmbQwIm4!ZG{r-uqK=zU8#-GV6^xRE*+hZ_&D2f;CZIApVd);HWuo0*} zA^ewRq2}=PFv{_0)Y8Y2&%vHWPIdtUqMDL23P=CgoDb_=CK5A~JPO`-ZaMLI1{V=g+}B zR9P#g@}08KuI%eT=VWD}GVMd-AKz2=a8Io`_=2(mho&l9ksYEAf<@x$`Rwyy3EAEF zd{|2MzM2OU3ux+s{Pcih+TiEK7 z|G!f@*Rj9uyWZ~&t}BiIr$@V7nt#{CHy6)YoX17<;4-eF7wv3$$IHriBc&e$=mSw)Ce5!)|2pZ% zU}4qs)y2r+h-)X0AV(p#4aBjNNsM+p7R}S8{~7Ojs&t}`ZLaq{G>o(U78^E3JUx`&%gY0epah+dI z&+*SYv7WvO#h2aN>+Vq*iS@z?7k^!(EJ zpU14=Zp1^JM`RK-zL3}-tPzg>w!P`f^Sv$ zL;rXB`~L4n{_j@)?`HmQ=Huaj>jt=PAO_=u?SDfZx9{cuH}QXK`2YO!S z|NjO4{|Wx@8U8PSwzU79I5T+7=5!yYcj;+#9<~)lodS3bp(sjT6Px`M*Khj6}|A(a? zS+t@0wmx6+EX8uHKm#uh^Fr9A9l_GB#}ZfsH>4}$f13m z{C`*;Lk=Z7`<^@|{}s`)nf-fF{(nhcB@Zo@H_5EyHZrdNQxwk;9K&%u7(aKC9?O(Y zcJnm-EK2)&?v{VI`!Djmu(KJ^lHT1Y9Nn8VBEqy&o>wigO{;>Q{)?G3F3Jtf7 zzd|Fu3C(On3b&+xx$8>fUxGGGdKI={8`|06j&p2OH~WvS?QGLtHi~_Xd;PS`V*hY( zF@+i=YXc1TIbJ9Oq`X7=0C#4@*dK=>W#|rCE zSANR(j8i?_Dv{`|*_^yE$Vc12$R{W4M)+#8w@=VDwHmL8@LfV_e1bnlG3gN*;| zJf~iQKF9rj=$_YUms;lClX09--BIuVJ@5aV_pg1Y(Kpb9X5UckD;nUMff$UTcsu&J zYs;{DRFPWEZUScaqa2R;u(wa zn26ffy?@`uBzkhMJd7#yX?QsP$af$6uj06Za^V}6vAd60^P67yi7{~G98`SD7&x-( zo5mx({Zv>$UxX!Cise{=*f&=ie{f1)nd4Q+y1zB#TI3E(<4tKB;@O>(HnQ+!BCPk@ zCRAYywqXZ$p;w>&9h z+IYJE;{AV~f9Cp}bBd_^iT?aQJr$~7@vQZ;9uam7#}VgF#`pVNTTu7NZ;1T(0pcONWC{Q%ydl~&f_9_a2Z$8i}u&e ze@EvDeY{8Y$zRYnPZm(T`E+RM7#UjaA8>H9F|IG^|DUgKd$In1vTfGWq5XvM180mM zFo!=!cA1l(zia%!72^lcQe*x3R_o6rz18~tJI&v}Y5u-*+Q>Tn-Sudgp#NT9dgB=V z_w;5mCC(e}@fPl&5B+v{4)VePav;)|MuoxTPz=WiWUu)TjH2fzOWQNX8PLa~_?ooo zSI_&;F8}f7E9!asM}-XweSdS!zpoz^#yf8!YUmBi+?Q)g^SJ)#*-Lj^*P!ub_vty* zJ?H+>@PhMDnwRgnG`l8+N#dD;X(-1G%*1TWLHjuU&*+>gpHKE}PLQupsY@fy^?JDe zzxSQSA!U9B*(bIABJCZ|6VC!H!V+YKWw4Z<8|1ska{3As=>_D`g_Xjpz28;j8q~(U zBFOcqp~t@GBsMv&LNh&u2mO8}|9Xh;A6xw1`)hq-n3zif^dnM5icBdcB?*BET0=hQb!eS!31q}#KR-}2Y*&ws0y?~e#ShT}Mi z+79`rRi2?gD1Xk<&m*o05Z4B5dQSc!FAD2H?>9z;%jDIl;|6ZwVfuz?Q*_+@`=>%5 z+4KETq2G_BG48qgwl@DSX-{~zEDRJj7`^XWWbv9dSGJ};&&FifzxUV_wyO!v z^weQvj9puf8JLOLn1gv(fQS3<4q|t;*>@q20*YvPi~YmFSNS~4w7;*@{!V5Q_ut*+ zSyWFoS3q0?P`g-rJh>Ec51@uw?hDHuuRz@UCsm`LfNaX}w_d$p?#DGaSNeSw;u_Rz z$hD~a0>A(3A*`owLKU`P8+PE~{y!c52aWa8k7myxgsAHzt0Ld*;r>W(vHv(USN=m5ZHWC3ZQ{wHh$DE=|8OjZuVcekYtx{gL~$p( zgS_X|h10@f|HE1GJYxUDMY0E_{s-~Lb%iS$U1$8_gMDKgpOjA2T~PnO#zwHme>1;! zdcSa0JiWMq!Z#ibx5zu_L%*HUudd&L&U5Pjr_{-BtDB>+Tm640`>&4Qs;+-9*7fDZ zLuAHr9HZ0byp8j<0nGB-GPax^`yB>|cObI#lK=lN_uCda-x4=}AlCa!et_fXvk!Le zPz=Wi#JRgQ7)4LIw+4R6X!=;B95*{|A{*Ur&+pjSM*jOdY%In*r^4C*6Uj-Kf@vtn z4D>#wpOu^)b@n-qwRo_eV_bh=j_@9RGxNx}{>Gws-naGD{7O5(yMw|K$4jvsE6}O# zl|y??|IlIXLqXlEh%S0w-Rz+7bdz}uL*yf5>1}(>W5}4tK#zMv#Wf=8_R7aQeYf;d z`_^mTFIM`GRak?ySdUF8w74I#?iv0s8jzfR-=}Pvz{liU#<}(joU;YnumiiW2m4Ts zcK!SveCG0o^TXotNhOw@5ICPwO6w*>)02v<%;}|gYtU%y8M4m z9!IuA{?Ew!P2O9E{m-la?zfh>I5T8i%Wnof?jJCd?0iQ2=t6#+@@0rJ0M*yb{r^Q- z*nDVZnC-kds1;U&dGzF~(uPX*wJNqBDTA8Pv`kusFK|vzFrWUb-?Em?;IPQ?5-i1X z?62<^esAi*uSvH@jg^z+x8Km_Z{;;ey?o$t#>G_=U;u3WEz!EMBlb7?7lQS`~-Wh zSZi?Vz_9nObr`P?4EyBAf0i$|j~f+!aAHK*F=kZwp}O`?YXkmh+|aP=$>H{58yep3 zek^Pq^H}(&36F(sw~e(D_W}Il)W^e?osWl#D^G;Y{htVxm!1e!*B=jUe#_y_=bj3i zhCLPDsV@sf$477s$8i!rJym91tFrJT>wE0H^s(?`?8cku2=ezaab-WSY-Tp>6u=~5AwtHQu-}ri{+yC8=eCs=*A@LpG?6*VX!taFC=5L4Q zz26S4-QNl=iErsY{AM^f@>_g^Z-zs-D9#>S##Qv<25#XF`p|Ee@^8BF2igG|kkl^F zctO45sPP9#jnwY$oDcf{104^>P!#mXx3i%g=p4s4(`L~14F4Ym6w#s#ZpA^QQELB~ ztQ?=t?rR%Jj~oz2h^z9`>M7>>jH1WA6Eo^Yqv^SfxG|PK9>we8R*xvHzf|+lFi}|b z!U16tIR&-qT+_&M)C`d>=O;14@k}(IU?0R+TF0TA-KgjPvn36A{y#slag6rAx!V7b z!fe;f!8|NL#pkWzOfJDvwAb+eoA?N1F3%?*^NtH-$^U@5asA1hLDxZ(ierko!=L1Ncz+ zubu5bUH?~|?}PQfq&QMYqYd4kQ6Kq+IRT$mANi$v+b7qBobX{)#2jc}5F8fOv5HN~~S&(m|#AJ=BNNbf=1le~bubGlHycU`Cwei>KMi`p~mLQQ5} zxIs_eUgsNH7jDt-AT{iJp&3oiX~YNjA5XV_FZ4MlvuwTo=k=lAPh$OB*;mFsTv;D- z=(@dL{cycL;Ps(+Vtp9koPikby20eosAB|1q3+f1h5E(c3k~zX7m}OThsKTTLlc^j zLdy&5Ln{s@4^c59a%;lulH5vcoeKg?V__HU~(;s1{QEE3RGq%pTE3pb|uomml&WG#RYz)j+ zeWzsCUj7|faBRL|XnEcE9~^XE`jRm(e7_8tMcWx+zMoCvslpa)Lw2|CM_$=Mk9oQi zyXbpRWPb}&+2H5cVlw*X)lV8jfK8*_Fk-pUGv_F=G zH_yrE_5|2aqh3T`6o31LZ-?yut#Lf$a`aYwFRUONlsQS?&Pw_!l-76ItA3~c8P|7N zBdmJ9HJ!=zsC~)%CaX~MqPat!%S`$fY{L%h!XE5H-EMvb8m_S;ciD|g{0e$${htf$ z8ynQx#J-VnEc+pPv|XkBeO|Jz%xdHF)@geubF;J$uvK{!=y85t`bBM`{^tOaNFn=@ zIFP27*1vf{|NY0~`v^Pix*}@m4gKBsAmaxd zA44-eb(5VRCLMk+&5xV!JS5+ezE8p1h+FWxSCvh5QaUK`ZgZ5ecf5b7hIXs;I zp^hEP+7{2~;q%$*`FNHYWa(|2rP1$~#d8(CxPe$V$V}CaPtU!={y%A*A9^2(i`jqV z7fSzY#t8h>`xV~+av*A7Hzt4_ikfxO<=o^1^Ee!jK(n|~Xq+saBc=b$`~CmE*CZMz z%a_yDk;&8q_45;a^!LlJQLZ12I6tWH9b*IN569cTrGCCynXvc1k6zfQey)DsGGG0F zvHCxm<_8?Qt^SWJy{%il{FD69$BYXV*Cb5AG?b&)e7+fExBM`Z?6H?$;m^hh(C1(t z;#i(eenmk=dV%Aj|0*DlF7c-yF~&yt5-i1XWG}b}tf1F<$Mq;Js9_bhP?Th*C z)2%-+nIAu{f9N(RvimFS{p0L?m-!o>^@jQowu$@Asrn_Y8L%V%-}kMrV4Z+H^nK|4 z^?=Z8?qKye<*(xdNFs&WyZ7ru=@=Hv;8Xfq=(+g&I`ttmq4BWxf4+4+AGKjKAA^q? z*OV*u|99{;GS-)KO%X?M499U2r*RhTdHvtU*LU*$b7U9aKTj4M7s-}0{ND@wU$W%? z@8sM1pDfwtf7ATX^WwRP9$ZG88<4?OdQLxh=MerGeJqL#`Tu<4lK;8{UpWZNEx)$@J-t89#2x}fDT_dHF{~Xu(t80~hG@NkWQQ_{jiEKvn|4Z|~ zuImrRK-b4Hw^fI%w@Dw0hwDEoZ*%f|m-0A&R~f%s{@*G8Znlsr7gHS@3li?9Ssu^cPV-o*c3rvCA& z`UlyyPW^){I4+Vc@5%o-ST8Tu$p3kHZ~uM&zHO)SC9m9mzkFFKzEvpvj{i)qM@1uh zMOOVmS@CFDsG@JdHtaxLw`&)<2m4TsIBz)4BR)XSy`+3VlAc0wjPhaX`}G&;P77;` z=f&U9`_;kW2zd<0(es0khm&NtbvI9wXK@}E(Sr@zI5tXi+>4{)32WK&kFE+YJWh}2 z{H9;HL3V40zC~94MjF0qZF_nj`n{vw8v`*ILopm9(A#MI{~ycj=kf6{%5j|kH<}!a z@tBCXX6htz3Z|hPwF~9{xynv@T+^TdGwHLD8YBNtmH+t!jnn1--S6A?CUzpP{A0(P z=*?to|Gw-VW8X|^f9%5p!W?nTL!A4$fNW>SJ5X9@m)-AT^YgE<(d>Oun}3V)v=s-H zt!cLPq5XSZ`#)kI^&;^sK^*s1`5AWpYu?NBkA|hfO5@yC^Lt)o_prwOEEk>=7Td+E zmwhy>aJ&+$um-UWyasFO(e^c9J$)0RPyfO7gX`u?|3c?DKenSbz9Ri-_FPh^a?KWO z!w&4i9_&Lk+Hdp!5%>7ZZDy1Avdv@x#f1Y0iwXkBGU+))AlV@=r7ty|2{$D4L zkvaLi>zurW0=;;c{m=9N#eZ-!|6e{lG+*9aEdP^jj^p^B9`RIsOx>NliU;F=dg%|= z&$>ash2m~;Z}%T$TtBNNNWbs>j^Fwa$N%)(D^H;`{>L|-9Q2Ve(D7h2pHZ$|P|gjD z=`>$a*l>(MZ{tV9C~`E$Vmu0ecq&XJCt(`O(Y;|nm_hb@-E&vxjL-c|q}M5Dg=bz> zzArO2pI-UNk@EY<(0QAEU_(m%L+6y?t?UDOe-^?V=gq?cEW#2jMcnVC;#*IJ?qBQw zavbOX^%%#xg5LcZb$+s1oU6#q#$~M`*P`}{ci(Mn0zK)T8dfL5CVCZ8)A^QYn$HHH z?wbC8{pAf)^*^I=oPKFE@74dj-Mas-*@A7@fn9j>wlwQM-Ei0X*Y*1U(K$>1J-Qa^ zzkgBx{Y(1q$(HNp|KB$M|FF6KIMkv4AK51T|24)34AH-jgGg(mJcNuk%j{zP`*TNy z_ScLLc-{Db7sQXQo$SkA_JuT#KJ0Nn$GrD_WHk;TiON4~cleF|O>Lg_#uYT2vOj&^ z{`5`uE)ZukQb4M*t5P~58jf200?=XAOMCPklQ}RozRwQ+zq)0{^UHdE zaW2dNaeSzK)A3*oMd5M#$dMIw`qIeJ7>n_kh;8y-dRD(MiC)@&maU#b&kD<68a+2o z*}&$O(`TUgr2IQU{zaEIgqgysw;HcQ&OzKeV;;ExaSx3Kb@=2pZH|tYp!v%C`S+%J z3cXO#FDwPU9@j<05)+8NFY8I$R~Y|A(;vG5o2g!wvEd`p|Em`AV-Lic$3=3Kdl((>$32cE$73QUVG8PoNgtcp zAnv4i8_6bdHv7+1hyUzvjDz#aF#|JE^1p@8rpG;XI@!!Q^m!;QWPj(r?|&~dKSEgb z3+gcB5=8%dDOtfsD2>1OY~#8+aeuKD!kgVstb>%s8IljzUvN$wr{B2R^RMx4n!F=& z!xd#M-n=V6e@c7cX9nBf%2)?+twQ@z`TvCcPv$Pj|MZ9BJNBx7U(o(HhX3JvPCvu{ zKn7W~p){^deQ=Gq)}nZnkBvP0AIG_+_x2C#g>OO?;`qO8f9+cr`iCu!OY8Qs?c3-( z(7aK-0i|^`wre*OR?R2gL+(TE3)-B?1E|^EKfJm9zZ+Bf-$T-I3Y8t72vt{x+kfj5 zVe{@!gn!)scVP?Cerv-&DR;Lmd^)_nduZ4?zPP*G-_{!_-I z4D!AvdtcM-1?0a@;xr1Mv@afc5xrj?5Guc_-Q(-_#dCZaS7SITzBV}Ah`RQ!-ir6f z|Fy9Dzc>Ex8`^~Yb_adv_cQez3`8OD6~3VFmp&B3F#@A78e=gY)#^V#$9`q+&*Y)y z;yH_new&0TcyqTjY!Bv!v7h?y>i1d$Zl^g6WBA9@`N#aqW~9)vj(^KnKiHoyKSBTY zRK6=p>$o}p;ralFjgLoH2j6`!AAYBPdcJXby7mA4XE|nICbF&K@cV3f?h5~Zw|@BV z^9y|UMgDmK`E%mBZeIZBRKGrm9Z*-=U|uqxeV(uds5!;|b#AiBI{uEApjligG-jm3 zeH`pRS-;e0L(8k5k&ek>sdJX&P~y|tX~u;W^p#kJHCT)F*n}$l{KdG2mVF}V`(H|g zTJ1gu5}yn2nvYnsPQO~>^C9`-^iVfzYN)?7Rhj!-NUeJ=G%hxmE%6!kFYOQRInHrN zqVY-hh2}x-bAoyD;@yE=*n@qj#sMVJK2rOO=iKSs9N9J9vytaxA5Zaw_J{f2&tmB$ zOYdj0XY0HN?Z7 zH)pXuM+aFqRy!fR*4m>-#BmHYXM8tE3OnxjB$_k+&)R~Gu4&5K|55$F{zd-BV)gX- z%8l36%Ta27c#FT`nzJ~Mi->#I^pKZv746RHSf-BgD*uE0oBfCRAgwrvG!7wyEZWd3 zjvKgzJLp3kx6^Mwduv^T!Z*vp0Qx}0u|I>!p%{)4NbfX8f*g(Un20#GA=9c}M9)cE zCraz@y{k=-UciI>i{pNf-OpP8^JCV*(9d5Q4{=KQfhn$^hH}inOw7g{^nPEP*t6!3 z3{ziny!n^o!vb;%*P3(73i z=8d^_-9D7o|Cz z>VRZ!q&gp&cU&Ng>VQdcrjSM(awtaKcXWiTo8b9Q_Iwfh;Tuo+Uo<0y*#3XadB<@Q zr;&ZmJxn#GkDePQ{Wwp*h~lg266ziK)zbeuyC5vK|6eArqE>h>c>|^P|GEAbw;bO= z^E3W;ocoyWKK#$$wExdluSPRc=yOfKpR+H2wAK$f7(+1}Bk-TL|34@H;oxNXPksHb z+y6Jnw;TB;Z}L&%7{F2D%$$)w`7WdBas1C%^1-~_F1|qQdn}-cD)&|Sjn9XJ>Lufy zHxWG_D+`mz?mvDsOd)%NIv!b$8JLOLn1gv(fJIn>o}ck2$nHA*vt;kTnj1aRw}m)A zb%n5%ScSDHe0h3UPgd`DPha?ahZ#9%M#oK?pAXGQb(nALnzJ~Mi}>LDzaGc! z&gnqstaAQ;Isd;r%}+=6E%Vb+?YB7p<+8Z0 zq8GLOpVxNve7HeRzUJOudH#O>=TALce_)||UG{vq<(%~P@^FXjL)L%yt5z>UrlVZD zl>1s&9tJudjDmZL_f@*j`R;G0``hdO>fN9JJhb2aA&WM&Uy{CS(nsd*nmPs8Z$fFE4t8RdbJk!j)+5&cH<4A?g7%T@FFX3bo&Wci_CFjPC;#IR+mu0; z-lkn3w*PGt&kpRu9%L8tFSPCLqvu{?KT%CTfZ|>G40&`RDJ+FF+R*)|_eA#8dC%lV z_3#bPsZ0Muc_|+s5mxwfS*X-jU-e~cjy}o{__exvyw?1`ljLcf#d%yr#k1-hlS$jedzbD_qY50`dTq0~3rz{$#y!76u35)ap;uwlJcA(tx3_O_sHlgH2==veyTME#x+2_;sCri~HN`aJ&lzK7SEiN7Ylv_E)rDpz|$l?C4@k z^N3^YO7jMI3Zgh{r;{3lOrH}n7U|L6IC=UsCVJ-CdR|F4q0xPgcB|M>hLn*Z0z=f?ld{J&e` zxr09RtC9ZK@6Z1mKz}g*W*~hq{(Ao3##iNU4E0-_Ls$6PqhSPn6nf*@`sxSWPnl!r zxaThe!dP-VDu2iSwzl4XHvVs-b0=X6rlA}&FcY)!q49t3eV~1xUOdeI$|(Qx%0JI1 zjlZ7%F-IJ+?O-0c0EMR>4U5QL>+ALY?0x_Li^d0hTV3xb=EnGKDVAde;`ptVQR@G5M>4wTk^=cnzWKUn|0)c&XcCpC<3iY9(tBQ{uL zf1`Ew-rR5f|C*17J+8^@=Uc z5BEnNa-4Bq_BHGOqxy>d6vcBAr*RgweB_#I)>@-KSbyLmy$3066V3ee2kQ@97WTpX zebsRzol(fn{0BswprgylW(W_wz2>f zKhs9<+<_R3p%{)47=`x#to`o=?SJ^+jz1XkkuX|3V=*2Rk)6f|>XAH?R*|9yz#%gX&W1GW0vW|Ff}LvJ|Dug4t6^UzFB;lVoFvCgtU_=9@oBKi_6 z#R|mr|5lQ#um)?f9-B~w-aptUku3E;{DU@q`i>ZGUfnLT+dJ4p_IyITUdO?XP~oKU5#=dW3&Coqsr4{y!)G zt4p=wp!3qpw7;%XHz#BNVB38C2VXar!1@evZhx$Q91~|8_p?!*CH7q$cO2XBPm-sj zZm+OX{o^;r%{VTtKfwN9q^FfNJ!EE`@zbl-sp(hIi_W>qXLRuwO5;<=;x#^w-_D|; zRXR}`7dMiPVN+6rtWD#$TeySj`PM@qYqSa1?(84>9q^x5j6EX%LX+8?y( zcak~n`(0$-ae*vqn<%aSKbiez7vo&JL()~6e=luq3I0{R_5anUHfGeVFhiWR`>p>k zd?r1qJ`vadulSj<@Qzcl??9cRN&JoaC*}x?`wErT|9`@J&Dak~*aBqo(msoSHedbQ z@e&lKdhY0Y(R(24+*iFiLus9!xorAtY&)7e2&Hp_k|07-R`x9r}8$L^K+pqp{O8dTeHlYezPEu zEqeE_65%Y_`x)cd$&2X0Wn4usZXmY5-6HRx5B=(Ve;xPR-v-cgbNME-v|Z2#BaZow z``+YV;6IRc6aF{tk8E~Ieky!(WEkqY;TVC~22dKWRBx_^<92<39Sc4Gmpy-U4e|U@ zKwKlBwEp)Q`Tqj@PaZm79qAlwq30eEw|?|(^c^TVE;!DU zUHX6G{=e0&Pv77FWSREAg-?eH>pv8KqyLTG@S=3R^#1(&pGdpol;g(v(n;(=UJ@=aQzhW!`{RrZi!iW1Gylj64Vb$bu@+4}P zJsnPyXHnzY(z*!ZI`8-*O8Xz2QqS{qXj_}qUD1IgZB-U)-r9WVd4@WYW4!}fFkHT+=Ke+|3N=`UOTcVXus{bh&$Ti7#F zf7%P52)k!}BK%~~@UYK4{qv9!VXuC>cV1U_*#D96^S%0`c8&@^d&?a6?^_e|WA5vl z_Qw2+e((QbjLaLxguHbx97_Bpq!a()S^r09P5jx|k$(@V?tc%>i9dy=G5;1Cul-v{ zCjK=vsMFP-F}Cx9xuw>BtbOmlsL%ZueKH>lab5JEU$=MKC1Zcw|C@>NVH1YaN1*b# ziPoZ@7%JWxA2z@E-@-pi%a;B_wLiuAgZ;xl4H+J`;q6xakBdJMemGtK;|n7~#jpB@ ziZCGjc&h>PuU}a?D?kgMdWPE!93(%Q9oNKf72JCI8vE~{F7|`Ys#E!)^rxW6w9#! zS@)9>zLH)&$o@a%Dy+pC)Uxk2!q?N2Z|TG8Fn1yL|G%OBf7jYDh-<_&Ztov9wAwF_ zAN{7b_c*?9lWQ^yweLTt-HE;h+tA5|=Fo*a3MjIrb?jw*2m800{ol-{?qyrq@n&uG zsVDjEuHA(_*oSHyKoTi@XnsL|@sAY0|1O}27JU+}IQY801{|V)aDGCy|4NIe4LKB1 zyN>@oj89EZHn}$(qaR1y6QJ2~93$C?lfq8pEY9PhePH`8I_|+$l=dIiuhdJwfm^tP zx~<0l>{kCjr~V)F<#qLc#5E@3`qQ!g-{-u34cZkk5Q8xk!_j_1|Nj~N|Hxg^|9@Bi z|4ue;uf7tpWtRFs4lZQ>aflw*hRZsy?WXJcmxbITWue{s=!oxyUPKr2IJnjGQP*k7 zc#hsrTvMq{{p!$l&k+skSjqX`<1F(i(R{&sI~w~}yvH-%(QkBOLsDM+6( zkC-e+-2ZL{nZ54&yv>iGKUjZcHhm8Mdi{~o_(wMI;rPdlc{k2mgqr+)+ZWd#Db2rc zw09l-gX@p{FZUmARsTl{as9m|;#!L3Sb>#Tg*9k@j{Qd`Vq5#)%>O&3{*Qx*X+1Q= z^G6nK=+?KtRy;kit>O!V!+Lt`f7}$qexYxS+=6ZB)erDT?Jql`{tatqkbAHX)kt@k zdqrmI)&J%3Bs~?w5zB}ix}-mk()je<>VK=voA7%Zawwv|! z9W)?`Ml_)rDV!F^S)9j3^x!hCq8IH6{y#dA8!7%l;-4)3=`nuazk6xr?;qsrr(fd# z&*%Ryc7OD?7x@3<%pnlZE!;sLYMI?^9M_$a|If+)67ce(aA`5y%*fVR;`E4cQ+>=%08r0mB|IYaivDWc={Pq05 z_AAOSbiSheLf7lcuVwCEy{<^Mv>LzIVf>=B*tcE(q5DTxdfKF+PM?4MYU2dg=?fs^ zoc^X$#!n)JP5!eATX4BvJ|cHu7xrMoUF-cl;(a_B!am2+#h|zGrljpsT_`L zgEhYQetQyMFNJ~58H|Q2zPE0B!H8q1;K!{)4qP2T|wsw_W$tv?e4-$?eMLxsWH#p|BS_W zOhmRx9GFDUU2`p_(5E5R84Ac>5|{9DVb$!`400xFuM7yY$vLQ@H`Kdt%yYZ|&AZ(< zO8X4#9}pG^TY{xnjulvmRak=u{cCIK@f%rBZbIQ-O8Id^ia3H}IF6HO zKP>;C6FGDtj{=G)t^Yqp{(t8E@qhF^4nZFpTCRF*_r+En9PM3NrMgd48AgWk{D zcZjTdLO=eqe+m7X)N?QpgE17tF#@B|6U>1jyMLbuW692pc7?pTi1dkwYx@?_Rl~<8 zr})0p&Y9%5DadX&E}1Mx#yNFtVLciax;K>8C0p&DUt|AYm8Ngr4TX36!VKrn#B9vL zJS;%_ZR;PP)XpgLp5N|;BpUfyO=#}8=iS{4%f+(=+pq(>um}54jrPpF z(2>8V{i9F&M_=ed9t9N9^4`7Bih~#KssHfd#F;^M`@PVH^pJbufOwKfA&u-K&Pn*4 zo*N`?R6JCG75_>dfi{-)};QQe8x;mKv#+RL3iID>a29UU8&u zkuu)KYh1=9E^!U6#${Z_B`)!bOI+fWR7eBqRGU&tDWxo`rSLGk9o`Oy_j53gIK~l2 zoWW_l;uRxaaYV$H>W(d``*}`GrqrI9Kfdet+xvN*{k-m%-`>ByA920a*TcUj{{#9? zT@U|}{7;Bgy04S&3$6ENhyNmOq}+XB*naU7*UfKL{;Pw}BjLLKANwR-=hW);@c+`= z-zA@8PX3&iFBjy`6qy!|{_l9zKK~#7D|Ybr*!c2(_y_XpCx09MiF_8>BeqZeHaw5~ z6TAR_ivDAsKL#sYXPN7AFYy}B-}Q~U{(oVgKf~SlU+}-;U*cckf5Y$1KUVMD>i;Wr zeFeV1CH231oFEhH+W%zc|Dxn{Q ()i!;5PE0#!~a3XR`x^R>V9bbc6NBHaGDQJ z)CV|}A(_cR5`%^dPsiP6kQb!lD zBgqlRjBW4_!UUqZe3R_*Bp;v5`rsA*?UTd5z;ScryyF(glyI6{Ooi}H=Xp2Yi}&Ln zlqDY|Ka7u}e^>k9nsX1h1`N--28_^0W6pWvK=`=*&f+}Uant|c5&BL4y+`SfVezCg z!7))AdO`kqT-+0Q3QuF_lm3B14q^mXKj7ZTjkjpCkaI|XEGK-0Z2b_sMH{ANOMQJU zgs+KfS7%&!TTb`}edo2;!?(zdzq}s4Lq_xezDs@&KfsTWP=EY{{26|MUt!8O8m$5N z41K=Bw~OD~|cm!|Ty!Y$B#Fw}@4?7yb}BiA|dJ^vDEmP@PsZV_XrMfcS*% zlVsBVQ{=RBCEEDld|jTBRx}6jD^c3wlAYP%YxK;2y)G(8o^QPVNoj7mCS^s6T#&ch zetui{KYgRU%h}-@_K&S+hi{SJL7%wqlHbEnTp4iLz4hEE14hZH4ETX$?WhOGHQQcCnI00mPX%Oa|j2<cHBscCq6kbh6VYYR??lF~R4)u`TgHzWf8q;1z;}F*{G;{% zIuD1#j(IoUi}&M$IN$9V;2dU*8Ln zzzyI2sc!TCo7w;R00!3C|GDgc3>UKhbJ+jv_)+r1&i7G#9BICpXQT7u!pD3Q{IF&U zoWIEVkFx)$!h4={hC?3P(cmhx1 zY4l+bBbdO|{=6_nZhSH?Brfy4r>F51d<|1e?s4UC_y&FcitWPRqJIaA`V-RPQjS|7 zqq+Ov75_c_06#)sj{b-I!{I0NXimVO@XzRxEjrrp`~C;#s=Nzj-}%$Zza?e6Zzf4j zEr;+c$7KBfUgX=m!al&ln&*QwebIh5>*ax5ZQ)tz=gAj{V;Iq#-)E%r8(hbMao;>{ zL;4lgyf`NB7b*iVlO=!3e`%x;&F_om{6=g4M{9saWx;g0ye~d^LRlbuSUPmOa2&70>+wds8E?g{ z{#kkP?ZS)3Ufw~z8#~5;-b=QA_CR<)`9WOJ_Gv$8O|kIe@L}PPV&l_U;p61hb0M51 z&!Zht8Sn_18Q-tZ3Xjqs!{f-5&)4LAc`}8}`jDxt@Puto;c2A5S{VAsL5yHWpU(st zt^YqoUVVjciA>`wXnn=OjE$A?2h^_=%>n$H_-GDbpE}?h^!C^4L;N&5*tJFFz;K;> zkC96K0TuH9I{z+f~2(~()%SD(O$dOjL!|A}+_48Opy@C<%~ z>o_ps{3m}m|Daa=u8v=5P`|7H({<{1j7e);U&4g8LPDEi(m9ftYSKS}L(;enhj9mX zjN@#4IXB!%zxw6E5Y_8<(KqzZ?tK5=%ve>g^7D&@Iics0HC<}7Uu=IV?!kSSI-`zl zJQQ9|pI6t;dA_ftzZ#1Lo()p!_Gtco)c*KX*KflEw!IFoN2dMZ*}RcHwB_Aw@Gpq| z5AY;=di6%zEWW9&;MqP^pBw(x{XUsz?Z@rh@PxTJUntMBCgd+emooJ6jot9Mt=;h1 zr9ESEe{ZbrAHrvL_rj<5uZ527YkXp#HU9OR@Mh^ zW%wIoYj3yh9e8x(_VB5K!{L)GYnk1GxTG$^X10#?l%T>M;Pw9BlO0O zgs0J?FX9);H6rRSd@?+Lmv+&F*_J1w= zzh3`(E}Nk6#@Jh$zL>@Taaq~7stjCL7Lt>~(b{}b|NJyLD2>*yTOXf{*72Vr=dkkz z-`l^j_GjcD!0+!WKc=g}ckn`f0iP%e>0cW+w%=DUbw-+d+Dr84A4}gLzlHB0(@#^Q zJg!n6lk>{mIr6)^XruA( zJmcRb#=mV#7z>~5Ge4m^ao=Nf#NEyQ>=fy9y8N08xK7jfCi)5mxC_Ljlzrl4J zNVp&SCmnx?K7YlrTSvle^ut(ea^J>~Q{=*m@&9AWy_zE5lzLDZ*mvznxWj&TB4+zw z+_=5#xJx+tf7nQw>#lGg(vOq<#+(P}gRX68T)boAYwmB^eoIHf5y!n0?a}C6ceU{_=3d67o$(J)2H*1kQa?-!50-eYWkuoD(s}?R z!lRup@g4dPLvfM*9b+{9(Qw?h*P$=JD7>D0BVs0{B&v^uH`CvWx8og{D)jvDPWn7K zSAHbCoBm!bZn!q}P|CF}WE~0b7xzJY7`^@j@li6iTEzY@3LmGR#d!?wxlVDz%dV?} z{a>c7P{MvabK|*3K9H^9U}(4RBe?J(?X%w=43E(t#}jx8Poocm7(wK_K0!`lE(+r- z_!@S;e|Pu>8Ts&ii@f@htneN3yZ9b{fcZw>bDcJa{PH8=MP*AGDJ;krQJ(vW_@Chy z_!Xv_<(DOP$A@yXf7w6oC7PQvL=L;BXsytZPWP8oM){_mam=~38{-6D)&BmC@PSF~ zAj}NN|G3rvSL4~LThqw&|1Cdf{Lg-q+E__U(WiU-bLd{a4zJ?(^bz^{#lCZC9!C4j zjedaE*E}!WA$%w9LfTy8BjnCE`9^%5|HR&b@KWJ>a35ZdzFPgUn+L)x>Cv3Z$p7cn z^at>J^A9TZ|D9%k;^sQy!}K_At|NZky&iYm>+pKK5pTv@@pjCp(`PY`px?DgY*w$QGFV%t(zhj zN`JThUtGUwrSGs-{*N6BAGP1d5feXnhFx8+++sRHe%It8R9*rd&NXj3GeC-wy`P!ur<<)uIX4_%hfxe5{ z|1H}6^j_cUUE~qG6r-2CGsH2Bj2$1(4v+dS@3H+pyc}(l72%cStMLGi<8_F9Xfxw` z%BR;0r^m9x8_5f=Q9gbD_V8Bv+wl&(6Ys{(UmgnYCEt(qPtDE!?4j^M`UYR550f9o z$8i>kDt*UfJ08KKh(&#>;#|&BA+!FV_#^arZQVJ%lztBun_PQ?dtj3>hOQ_KRJ0g+-JY^3;7?4 zAFT4+(E2icIpRmkJhuwZ@r37y{&M{<7_7Om->|gf80qkS(5}9@&>F(49s2-|<8^pF z-iY6yfARLKCiBvGiG?6-i}4>>vaB&Hui%0^AT~q1$p67@-g(4<%P$| zClIUPyP|#^+ULjfQ|n%ci^jZ0)sMsK$2i$vul|p#BV+bUi6`U2Bjji^Up;no%kGW! zAJf+G?UTkJE~o?A%`2L@qWneXKWi)Uv@$q_jDJn1@>d=o&-47rM3LuDCWWWSY5Q%w z=LKQoNj?BCist|O?xnq9%zuKs`eXex!w16@eGWUf3zJ`o^mm#6?|%hfqkjXfe{nD* zP9F&0q9@5Ie24yBd=GP%v`^byU$^Tc)56hOlD+Yq@RaR8z>n|~^ofh%XY^?9{NRa0 z;TQB@VN`fTI8F{%9SYBg`wgz+z_jv6Ih!eO%R@LMd>alUCe~gy!yYJ5qFSXx2xDPMKEAeVPfEoQzwd<@{M>i-io*59e5|+jrZdH_#k$E>Y1~3GvnRb ziP61(SbVg0K=QJ72z_4NKc|iGaeA8l5Y36aRsXB|dy|g0Ei?b`P4YXvuhjTMmcD=b z&}q*<|4?|8{uoB9JpVe+AJJM3kBdvB^dpf^VM@9FG}(u!4w{qC=WE#+!Xt>reik>C z6P3P$ef~2IzJFxqzmR|Uf7{ldn_KU=Da=L3<16?YzJZxqeSg~7|K0UBR<(cEjenDg zdiHOVc7pw<=+orOeK+5d&UX;CWxq?tO8K+kd-NaRM;I)UKMP!6p8T2ACKzX*YX@ZP zk6iU%VF*95-_P(Xq+j@8c!q5M@`K@m@9sDB>o_o@?!aw0jQ`pEzdP)6C+@-#ycGA~ zKD->iU;fHpYo66n7TqF}3Rd?a7MJPM^Q#|LwaK;Su_ySS&oQ{5>90_F2GV;(FIA!sFx< z=EmGt!-!)9qlohVh~p+O zg*l}06?_ddl@IBEG0)#z{`q?I`px%GneU$_7lp?zKNQ9>fdnRz#1y7+ZdUn@=-GWk zdK>4ChwqSA@2v>mCENA!UO32a_xTm!d%{1!kMI-x42gk4!p0{GIfnHqQok(T`x19<9v~@ARzj zQgI3IS?n)NxBjqNY7myN+j^YLCMoeRU-&kF~`jQV)CL>*nM|7%PC*EMxB zBHy4zj44ybo8tu!csf_)5jGbCv5neC7H=?h(BD|SA>wE0AX3|^f@k{bN zMmN;S+jQ-rw~NpG$0N>u{zqe5!dHK-j{ow(@J{->@m{Ky`~*KkX8k;M*dQ|N1IN`pE$SflZ8R1>*2%u?W?zyC+a}d#(Y&{49^AC> zjQH6ub)bIwKU)7^+_3S7INLH~Pv){Yo%2^1WLpj`=Y(hIG4l-i@EiJd9GFvvqgPx) zJLER{VcdZ`F{RE-Rvrp>(dWyZTYZ{-PY6eZGwbK7Pg8~L@1yG9i{>sUvs-XZ-`z{? zlbQe5_PlTpedwCD$u7I=`TT%Bp-sM~ZL*?`LJn^{FWhIJ#Gbx>@|BosGgeDJfTZV= zSr@TBD;yVo9nxpi3&IPH@*mmHE*~iId|l^|>x|k|@e?=NSfh=H!s{LLM!Xqs#oO@? zyc6%njQV6&nsbPJO&7>0-E@)j6sUjlqW%}{&rR)5_0c4m6rLid?bplBf3I}jkMtYl zZ|lpeegjlTjr|RD>3_e%wpYev{>yz?nU7I@+I{vJ#0VxZg*l}072Np$`0xB5 z`TH(t-=vW4($2xyCH?P}H^w0U+5eB9kYA+p4J;mfLHHIqB`#U5{Y;P6G?+W7T|)m} zw2yGQOWx^_hsYm@`w@PEpJC@q{0GQi;Tc?gp|%IP@o)5x{&`+Fr@y)dQJ>C%dGYG~ zjW3$t|3!WLZ+Tv5`|R!EkocW9{8_jy+WxWw;V^miLwAKc$UAWtcKX#xKh>}GMe9M{ zp65G05RQnC$=@#}??Ioq`^c9gngcQD+T%6eTa~g+IQqv^{~2~$Bm0v*f9wBuNp>7N zJzD?&x@&)><6n&ja2zLb3RRdfu0C7O{yxKwWEUsNB{4xCi)W*_$C8Tx!a`H7s?Lx&*KGu5>K4(`-r}Bmdd)v&OtI-kcXEPV%+Dq&h3|QJx~FFZq5``ls|E3|3ob zu+E$m`^L>(8L79fkD8@1DEM$cea^n++Ln(RnEm@(AEJp=SAmKSw zk|%KrRhaTTqcMgWdS?BP)7l5dSuW^H_`Uv@(CaX&UKm+XXOqJlIiX%$qUL#_fow!%S2vN(i0te+_WV3rgxio-pDtnn zDQtRX)#5MW5<1X{E_9>c*nOsdAgO;~Tb=8eI7aBBo5uGC*!P$?$-c*A4f`Hb^l8jA z@einU9Vc8zt@F}Tr}zhui{{kN|EBh!fHx{oFGKS^_DV$T6x45_S?x&R<-#+Gfel|N) z*;b7j)Z#SG;OYksggSCVSzJ%)601`s#{8>~vAsq7Pj% z_K$7)E=K7ih+`Og;u5Pz!ZmUqQ^w7+(!M7oji1l;823l6a30bzbJ+)s-5YC9q0BKc z z^DhCEh)a|l38iEiBEN@nvI3DG#GHGa#|hy|q#L!%o3+z>TnGAB+~1n}+u(nI;VS+I z7{Mq`I_4CrP>mYY;xx`+W?lYQzh(T(E=jjb`yV&^cgIRSA50*D$vydhU;Ce&c3iJ_ zT_>G-G@ucEE8hQFQD~wM8TXII{+sD7$ox;!Hy_{h{?SsWUae8yX#boy26fRsm(YPu zMC)&Lkr)12-u?N(&`s|_3logG%mHLOSR*u>78?hX;r`deGV+t|f5++2TapMG=wt=5;xyFXO_uFC(o z`Txxsz8$mtF6OlJ=CR<}r`DB)d@UBQ6ooP0@;D~=aU?KF&wSshqA;D*HXQK%U~ty= zgJF6cBTc>^oQug%arueNTJ)WNTRY$%4~86iF4|n<9`fkL(0h2*b*# zXx-e*I=WiIb|9SgAv34jFJI^tcpcGS%kH*i- z=<`|1K>L)_E3hb>E@Z#wv){=R;(E13D#??G`Uy^vRfz3syCn4^q;f*Fa1BPPj9VjG zyC9C}-?>rWapwO<8(E>&K7D7b|I_@uaGE~U!v42UwEj1|LLsIDUB|4 zqgQ*bhm39MZ&5eL=mUu3_xxkV*^Y?j$B)~VL|WM$`LEB?Q@D&JWc=lgjYRhD72##9 z-~xLg8h?-01YQ$f#|AbrRUwaJi$1TtJ(r&yw&}ZAoRzo7<#BQ$D?99oi~NJGkyrK4 z?2}nvR{tOudB{fr3Q>fkD8|m4*bmkE|K#C8ZR8>R;rK_|I$_7`TY^%QAzG_A%2Ves z;~CB(^ZdPo1<(D~^QVu=>;G;1!}Iq&Bz+H=_Lt)}-lzTXOZI=X{?D&{=hCmh37o_! zRG}I*NE`dFC3ilc{c*4Qd*DDgE!_S-egW1IJVWnm_U=$ekLENERvifS^aez0%8l%5 zW0J!s+4h49%uWHB-zPq_DaVzq6y7tK^rdO5@zDY_w769n0a!+m~pC^ zjZQAI@5ct%|KcZXPmq)LkJk3j`2Um9|NlCq(}^y0W2(YFIfp|JeV#o(SI-}wK7hq4 z|zgn;$w}6!!>$l{(yA%=~GW`g(_5| z2DLbiGpIv78qkO)G^3wy&;SPY5e{LP9><9CA)22wrVbxR=KrG2>h}Wgxk&v^PUo4E zV82`Q2e*|OY@$s6;Fk6QTAZT|7m=vYPeUe4^e^cD@1UoR^LLSRv-&5n5Lb7%sSEb? zqwg6XIVp{5_m9?K{=R3Bez9lp5&i+cJs7S&m>s$uzhVAg4;jM%;uuE~vq<4GmT(2j z*zrGrv^>7?5#NY#H2-gvT*Epx(7SseY?52p#xDA{Uf24_>fKJCmeQY=T2eQ7RP3L>aA#*WvMg5Mbj-JN?J%zM7 za}i^?>>5lIY9C;douDi$5d2-u;uK-G@Sj zV@{wFCvgf@s74KD_T0a=${hJe>j$^|5Bwk1`_rz!RvI_`1y9o>AIURh9qO@I?tV+% zFBy$j+#Js+_Ffun?~U^xXu9zYo{PT0zvTNRF7kbBvOW6uU*y}_{)UJ7`I$c;9Qk%; z{0ABig=YKgd|&?z*@la_gbs9~3$0)D4U>y6YYIK&)%R&1ksBZJ{#MkV^i2JKt~tc% zQ|$X_jBA`8jXlk&qmuMlEXu35`p=C)4dnO+<*R6HKN{nYe2b%Xu}9TeBen9``I8&xuUVRv17*GvTlsQC0CRYh58Q3sWs!8 z$J8I4(VPU&(0AUK(jJlSI$}xX&bD$#+8e@K7+F{DxYpr~8+JH5kYSAi3##P7}jD>)lxH_nDR%wqv5q_K#x z6KBKtsk33C=4?n*pAC~`XG601tn&42m@YUQ`r{89e||U&VrcteHub|HUhr@j$$wb? z?Ze@u^PNH!s?q*S>yylcP(!c9jD2Pc9}aUzA2$E*;Tz@qzBb~X@?Cj9*6?r`#{?3U z4~NMU-qERt!&Jq?VY*D5>pCr+GpIv7raE0?^V!fqpHI3sG}4=}c+tJJxyKIo8ao@B z#q}1Q4J~9F`c}?{i{vH5%01Uz&lMfQofx_7xh{Loo=tq!^U;PJ*S9snS1*A{dUDzP zy!idA)5c2PZppMMYuWt_b7c{KAThDr(dRw7rr(BcaN=)O#Z!` z{onF0{h0kq5R=CTm*rpm3Z=qrKh$?kkNk#)$K_va{G+ytZCC&D?odJQ{5m_FAlt38 zd%=9mtDiU z$4293Niv4s4gO_a2SZQc!SKah{gRHEMN~)6FR7!})m3B~i@WN-GWCCj`kzeXss9Vq z?M3Q;ayr-Cx1JA#1Y0`O|7Lx`sUH3H&XYoNT))Alegl2|mxY&*rblZ4E$r#Xr~mD> zkAYrmTgT&dP2qE%&u2G27CM`otbOU>Zw(;aZ2)ksj ze`r1Jee_^Y_}`A_hCb_5#{75fOI^maIttik1!2H=Uw=bx7%h5X7}?5aqum$>Oqd5Z zvHOxRepz3U{-ULSC%;K2x$$CsF@<5Dp0%WIK`t`&T(0kDKRe_J_ZH-Ye6szo*^k<) zh4j8+<3D9Np@=?Irw`?HPB=<0#%PT^BrcBOhMaIrTnSna-X2QHGHiT+9ZO#Q`Trd% z$P=i1N-)G{&TvkmdxLbdE`J(@o|s=>O?oJs1Yo4=NuIhM{8R zg7PSuYmo6TR6aB~Pa~SpjOY3{vpUFx?0XHdZr45Qy4%?C z)>XT}#u#aF-59nlZr^?TB{~j=tgp! z8q}g!d2^bK#q!t?{`2>JK6U!<>%cYfvJft3v)FM=)Ao&4C&=K%4C?w&M@;U!ydd(2N$e;UX@f12bpX z_x0KcB(;OOsD>VT5-7tLZ@`P(2X8U<;kOn z(KGAovXcksaV)ytwEUbxw7%}RxFlwg!iI6j%VhNL-z74#DWWwWqke(Nj=myp87qkV z52AG;H$Kb%|25$MAFVyGX4^V8u!${fV;6h4h8_LFJO60?ig#(B9aO$`s_Vb0d~0M= z*q-(8-3L8~%tf^J^jwW^V^16Mq;G?a+Q=FEx0;QJXsz4fr20Rl{wGI;SEZHb_?7On`|;Zk7#|K>C?vFZ968N5|pA0 zeHH9il+&X*agqOf1^om@Q~KW#$8gpmAFl6Dd-(hr?dK}z{X6fsLHek)fBO&Rd;L*! zwb~Ko+6~8CAHRfjk?UJ?|DF0jx}Agk=kEWcG)|!k)u=%&PU8$_8stCBVIH^Ee_i#A z<;BeXkGp>)E@}TbPqIq?AEr?!jb8UyPd1=WTk4Ew-bjzp2Mb(R16y9W8JTujhc;Yf z!`B`PE#lg65tq<`PIRFgJ&0idaXh#F8W(Oo=X*ZKrn+6<-LQYn>yMrlfA#a86L}d+ zh~}bQA(yd&Rjgqh8`wlbp4%dC`uA_scd>`L$VT6kKljy1+w!Nj`68~_wvVi@D}RxT zJmjMQ{mO&^+$taFag1OTV<+VAQ}TDEyj(4RH+lbYwxpL>n0zI=nk!_JL zPiEXFU)go^#=3p(0A-WI7%MFP>=ozeSh%*w*RdDiBA0!Xf=kAenU~X@E+s; zKR1t8A6AL|qjI2>+<7uPl#%7AzzJ01Bu=3U)u=%&PU8&furnF8-;2V=Cww2m(LaD1 z$VN1w842z27P1W&aS4%cXL66P8GXLTx4EW1`0-0Zm+)eo9#amC`&QWl{l>%x3XLBg zGk%E7{QhF&hsLx=$zsQx_dnzooHOp-Dcp^A|0lX&F6GQ+^-A^7he(5~X9QA4R_E_6NdNB+jj=tED5KB6a9*v0)b~%PVi_vY@wrPxW*R@`A zE^&#jyTfI22~(-N!xeHF$>iN(&bE212(KdDWZWMMNTJ^~4OBTtwfnD=W|OorLLbGN zW7e^OO>AKsyV%3bn)`Q6bFO6`3tO%UX=DG3d;!J^-3KPbB`|59B&O)o=v}|DhC!yD zC<@`4G@~^PV(xFBK4kkKl2IPH615*(8=|!fqIVUoofY*xrEkv)xwcQy^T>QeKj0T{R$EJUZmOjMf9D1z7J$E7Vj-(Q(KlXmB$SY) zC__0aa00!{{Lsjlw&h9k6#B$fk=2O)trPtVD6U;Rj8XY1`p3?IHfTQ|grO|kb8RPw z3vJh?9-+5>AScu~z80r(2Isqd^EiiDWy%aP>x7XhEMO67j33noKj|42Yl9lzV^t;A^73aoKo^rK-jhw?a&SE2@&iU%mfF?x$4s0e{(1scN&lY*k$2@2J;r|^n z>#tYyzpCYHMNWDj$ui}Ch4SC?Z#Sp#!q;=cMQQCUgm8)MKxY*Gr1yKjH9&rOByVFWdZMr~z=UTNqYogCR($|dOU|)w+*f#h#kDO9(`T_b?i;w`-CuUCWbax| zXcV8qWh|jDNBvuLC|scrse1?6?aTBPME|rKVYkQCy~E_HxaamCtqHGV1DnXK|NT|< z0(~31*uypKBjex4&L6}OhP55yIqG=rhfy-?8|nq*A`kf}Kp~1SQ=k7K5=1`y$&N#zls->JKKy0$ zaxCua+dv8n_Nx%r+sqGvtVG{sHZFMzu~pCRiu#+Z5{~Bo-Tbfeu>In*#viuy*I@7( z|33`VHpSA@4_GY zzjcf2K@7e7@?xZKoBzjFjnl`G#GtYuDkFvwl@U?dFpB5;7tGo(h0934?e=hmZ2vO* z`el52=qp&o8rHFaXzXD?J|DyohUNXZav-z*a(Pt$=7mlBZDAX`*uypKW2Qv@m)~xU z|H@aH^9Q{&JQ4FYhOg$^X(!7R&#bE@0d9In4SGo-1F%95NTt9E?0NAFUsF zK`4mg{3ooCEW%OjXbTjRx7L#{G4}IKHpIKMr^QF@kCJFVV+EyT=KlwL2g>OcIDty^ zvOSmFM;ZMjPN6T)_%W*Jkxd%?o4%S}gVED$(`Gg*hO-WbT5*vLf0{glDS91Qk0gE0 z9Q=7S2sa{4U&PII0{d&(zxB>>hW#6*Nyddo$kEvsgeJ!{qXliah)d`|CuYnspEUP zLFjLA{}{v&hP$QF=KI5FQd#|T<(cCyV+mKVj1{cnmjB6V^$!-1LK=%0Q}&O?e1AQ@ zKTOh-ao=Bu?+=M)601rgGGi>c%bl{AvA0>g8?vE*9&wcg3X|*}vEm*UJb0 z8o7_Y6a0z4scb^5O1*kYzE$t$3g;m+udYNsR&K=6vduom&bQ6M)c(o0Pa)c0!>)Zx zb||7BMKO*cTKBtzylUQjDOrYcR3Ooz@19J~YX3K9hf4ZMoIeE@SDE{7{r||%zh0URXhaj5(SnP(gbwWd*xY}<0iE>T zZgz{Yp^P79l``N-zFQ621jz%T%l6FrKklvl4W8#W4~AR*Kkj=N8T&V;o?`0{k&#b( zyhj~RA6-8b+N9Bq9>g$!IL47gX8qqr_J1?`np~)7H!iWSm)Y0km~_UaJ)y3P{tq;% zK8@WuogUi(8_ukCB( zI{L(Kkei4#xG(8P>*{U^Z(}6ieIYZ>;XM5V?0*cpzM*mP%kIBa{=+EFUD4J=G(ULP z@#&9glVXp44gXXB+ba1F6NuJ|`9uHP=%0l9(rLd}z5e~IkhSdo$9*G}he8g0zSFmb zTzVd&e+Q;ZePeCDH553uxA9QOx2*tud&Y>#B1CHl46dlxa8$S$Bfh(6U5m_rMlWZD zW8z9siZYa=0w++3lQ@McRHFv9IF0m&tiKx`G`3-!`Y()?ee!uW_!bd;lRNsr+Wx}! z7iv@3|LWiU%h3Loig3XgTfOkkm&_Z9TC!zHo-3$s>7%>fyx_O_riHKmbqJf}7PhgA-YVZBx%hYYglptJ`kLhb2IcU# z5`X)j+}eLV|n+6&#c`WKHF9jI?L`4k1yRHK9?*F zpU-+l=qh+cc%ri`eBs!u!jtu{3f*00;i-z(gug9&O*rFxQ>({~IUEnG^u)^Xkmo%4 zD8S;{@sPGXW%~l=>2q7hL!oV-s(o1)*v!=j@%%89qn|uazqq+=!~WSedcrtY>V=_a zgRg^qd$Y{9rpLO?kL&awjoIgC#>u|8R~~wDD#FuS4_XVYA{@1EF^-`Gtvr6q2jP#ZYQn8O|Rq4u5s5IF!>X(7tpmTxhvJv=$xc^_l5oPdlXx_% zG<@pftHLKcUJ))8m4?SE%fe?0%R>84jTh^aeY~bTeD3sX!e{qh6*}op^gI~8P+1X9 zIkpNd&FVWmdE&tk`Hb~%JroA^AJWEp$TN5-3>Q7b{&|S~^AP*zp-}CZ8r0%6&Y%wU zXuyp9W(%x;fcawUA5{2O_dWBy^EKZQ0Q%WFf>Z1 z3C(ChU)zHr*7;y)qYu#sXCDj~>6b8i@j-2q2aTOP7>3dMb^UJJ6`{knPE2i8gf6lh zN%~y8BFrZ%LXU6^>F$cKcvRaVRuNL2)_=&e{y>rS2aZ~Qpu+kC#nvAvvHk!#T2t|R z?fU`8$1#rCD6JW`zx+C@Ju+8l{0$4*Dk-F$BRbERw8qOS!UPgI6=Bl3l09sHMC^Iq@ZpRdT`n3ADT>aa^{_pzT`oFX6e~hfL|5u+E zN}Zz&m9C?ltiXvVjFXtrhM3ism@}R*j|F26Dct)1K%D)rUtt1?HuitF@dxdcDeaZ% z265T~r=(MbYSduLH6>kpEqz|yI_FwX)6XDU(=c7*9;@7|wnm+}UhR>3vH{V*0~^UE z#2h=Q?GerEZx(LBNRjr(o_5Aj?TiBVKg<5dt^a>jD4UDD+Y zKa>?_TKETD(*Mz>|D#L)$3^yn`oubJ>=pKZt@ejuWBDuxCXoY_fGX00K10k~> z-n}>SMVGX@5&Z)!`uErTQSbi)>e=X=^hJH>(c0cA^lqqY#19~jaYVLz^zW}EeMo+Z z{`WIWPhqrP{t*{%G}ea!*WBO1H)+{g7d?EF*7+!5RDaHhxb*#b;j-hFa0Sa)!7A1e zt-U(aG?<&xTaj~0<{0se*lnP zzj19DKk-%VH~ST$2uHE=pF=1nk45^MVBsjuS8 z#+|#eLZ$FY^mSx~Q)Cr}+I(Y(*C;=#d|M}dV`PqP)u=%&PU8&fP>=pP_ANVk5JPO* z;iPrlQr0OWM+ex~Z0hkn_Vs==m!EyT&A#qrPj|DgFIrcQ4L{q)KED*%=e|$RErm3F z5iQ;7Lo_&7)W&Wko6w9Fv`1r8+KX-Ui@1aibVhjoi7qxgy&FA67TW|O?|Q{O;17`Eu!*u@_DeIo*t@Ct{rrE;KKsa8Q4b&& zdB{frX0FKpm|K$vMr_VQahVG zY1=8>n(yh|R|%)JCt6>e9ct*cIE^!?Lp>VMi1x_G-q>0by%{ZN!$n-e&KUoFGU_Yu zB%^u3U1T?U5W@hXc@J@N97)V#V^}#tM*ZvON`0dh@_P|`=GW}>4;&7cqkZo89YyO0 zl(P|T=j;Dd*YmEN@O^!tH~(LH#eVaiQPc)lrmtYJQMrWFl4pt4C@tx(kyqcs7bXg0 z6IUk_P`kqvnr(V8D|44&}(QDoau z^mnO$F*u-}!tm^ke(4eVD2na#-1_I3a0yD0{)qNhT>YO^N9s42Cl`cM`r6Z2#8?me z9}`Gm64AW4shIlTd8^$|)CUl)AG<`Ks?gUi?Q%qY0FmuKkM`%YkJ&$I`XUw@oClHZ zUt!-9sKiP1H5tFK?G$}T+EE`s6}=kKKXpbBmv*$)Zf5>{y?xHO|4ZUrQ@qVS^ii_L zF||02GpIv78qkOt`^?&R&T+T?e-PJ3h`G*Y_TRqyUvmG;?w_1ocmL#+@HCm&HnuOF zX0)IUQ#tm@KNK#~GxHzI%}b|u;1B0Nc8crup1R0x^qsJt0om%ik5#*`X7^R;dXz&0 z;zw%Tm+&wdx4plM{d?JWfgubdF8_|m*Q01LR(}q0$DY5&N8r=u-+S-l!b!|58DC#! zW0Uji+6DBKG5++Pc0%{>#^;UEPauIwMF0Mn!ZZ>mwGE_`!euOBN?a0G=<{vT#xi{c zi}W;7oz8_-alLtbX2^B)(KpCV#OTpKe}=Fnyp7DdLc+r$yx}hH;T4(<%qJHyT z`(6EZ2z%r;>|^IO=Fk2o^4IE2GB?uyk^hOhEE>Cw@T5_evAAPXZa+~@JC!+=C_E{l^b*X6GgrqjAIPb_MOr`Pht|$np}g3 z=JxkvwAr_dIEGQ?9ObCM2~^@FPN53@+t&9tHa}Qu{2Q5d@w<&N8@nF8bR-nmry4b= z#c7;D9qKVNVEh|%*7KkD-}8kAG{SCd54JsUrKJS$IBI+T}+=3q4xDf3exvIx_43=NmiMhUncl&$-fjsN(-uW4n5x zElP_%Xy2hzM{bONE6+!yHHu;5{h9UuYmNWc+wYA1>^n@xg-6Iy>0Xr1C3K(@UFb#+ zVz@Q`wn;k=3)+GyWZHSF@_(EAzvTYOM7R4VlfqNvw6<7c+xevv$2gLh5|_j*eg35T zKjD7qm$4|Ej>6Ejv2l6hw3+Su0;C9(yx#{nZ{xP`(ukA zv+eEgQ~zo!=F|H&tfM5pfId{`-cIvtrWawf#=VJ)W4IwZ92HlLV<HXgh?l#y3o zYb^(|0w)lS$5)akaSBza#`$9Rkgu(v45$&l`s3_SOGfR8)8vjeeCvC(nY0fw^Y8y6 zC)A0rM*|wsgl4p$4HvO9rcM7PK7!vr5-tf}eIcJ7vJ+kCM%1U=L&h+GI5P7O)IH<$ zXiQ=bN%|}n_tl|@#>k_8#b@kq^-J`RyP*zcMpD_aq71pBEV;}V%`qul#u7693FF=u zt_aWY;hV)A=GoK>XV~Ay0Mg`Qk^29lvI`Sz^FQ+cvrn&jZdp1ji2Tr2$-b%^{(o!q zo9pjn>iv zOnRNTQ7Wzs<*2}O?T-_} zt-ir%-GU204Wa!_F9;{aokA6=(ci89*A5+A{$2e~kJB^j^vG*9_Nm2boI(1*+d~~$ zj|R+Kk^eEbCV#3=7p}>lNO#Gf7^{^3PyNn6uU7ppoD`0H)2Hp1sMPl_ohCG+1yiTg z|7fGnUzB!BcDP8tghk=BaEe^$$POLidb9ZOl3nP_H8w!@AVwctQm!p44}=FWB0MTQ zto(_Snf1M+`dj{O4es4O8()%l$g9R)XUP;UW9JniERk2Rj1{#1kbl9iwd3h)SjPr- zzIiBYM*5iVk&OQByG`z557)4dtnd1!kc&LzqX317){kp@vGu{FRU~}%CE6Rulr=@J zm%VgUT*m(RruFCEXB|M}`y1c>PWIHU{C>^Yj_vbn^8bo&;KzK>g`+lU8Yx8os_Eag zei{ZbgyB-QEk-bkq-!j8Tq(*>jtZPWC1$qx_hXJNJm1DIp52^c8>g|D^nZYA{^zy) z&&h=SCa*~oQ}pQ#{_{G_)Py#im!|COu75V+!gnaXih>3(LeqcF;?RK%iO=~&HNvs!2K7x|6KQv-T~_$ zNauzhuJ^5vpfB$J(ItLp*R^e#zi_Ydg%|4=TX+8}?tjz$i|@8iVp|wIR$rR#mqQ5b3ZVy$-{*`V}(KKujx`Uj87{}@Ib(Hftlm~~7Fm$8JW^YxkIi>|we zT|0BpKFIVFcCu;N_NhjBv|0Wq$2N_BZyWz66Z?Gc=*e~cWR9J-Z?t~Jr2@rFCX=UVf}ajqlb)5*I+hc;mAz}=y(`0mh-3*&zpHk@M< z=WG5f{8iPTg-=%A5iZ64MR*jSn*A3tz&~Ye+5SlRpN7AQOV^r_oxY>T>s}B(xBh~# zZQG9Z{2#mWC*d>Ae-fTl&RzYO`77c#K5xB1G8#XALfQ2N<>AIb_3>Zlgsb2GS@t#C z_L22pq@nED`6Qo!58=1^9h5y$IxR=FGsK@>TD2z8vvIIErE%LkUXJ&&C+oG(NDw*1#}5vtI9t@d1pnzsE6w z1SZvSNknt_r!QOom&~jW(4>8U%=&x9>fU^HaGv`AWKJk^o^n*+1o}>ye}+o>P@DP> zC+Vj!sxBNsw07Ars>D^J2DLbiGpIv7qG#2DsJv_tj>e2y(L`@X|C0KD)j1IT>vwox z+C}hYC;cKWp#!)6zjIOh9}8Xb71H!YjLB!?%GwDsv8De_ zxg3?vQ}k*5GKpGiEJ&vd-Dq8~t=m40?0Dh%S?_P0El(f7VuiGk!a|R}LUGajfpId4 zzBS{~WD2oT_qC^Aao?O2;U#4L`_iO8NI4NF`>(10k(r;Pz8@}DFP3N{lhMC7uQ+BI zD_F&I+kb1qGo9-HZgq5z`X37o+LlOT5o25Ge@yJD>(#xJ>+1ic`hQ&gkMuXQ!@6`f zu!U{xVh=k%HWo%keR%ui)%*EJ{GIO=ne~KU^4skBQ_hWCad}wGmu8dpL4#{KapRn= zKmBcXzHJ34#FTnJi6Z)!vVNN#D}FY+Z|&Laf1CA>*+<3o6j&p*h;31!jLH9p?0D8M zvqwt)K6^Cl@3Z^!|0#Q*>z}d*vwodDRP+zolUYB_PM-O%+3BzTHoFyLSwGJnuinX? zIJJ|VX!u2TvGk6i1f?iLIVx}hne~5Pbx^;e{{5dG3YFqc;uNZIuFUsa;rk_Pgllmc zk!`T^W&K~u)ic8F?(xDW_Ot8geahI#hq9hNRPWuQf!>IWUv-W0v&y}l=Kpi)diFr) z_3Xj!>)Au(@c8xYcF!+8!IqGHSN5}GD5VdnUn1Yza(deM_^5E?dmAUCH9ViY{!KLhufl#YvJWTdnfZS& zVuK&k4lh=|C}VE*FUV(=_Bn}DsKWnG+TF)@S=NpJKla%s446tvf`W)jib_hl6m%jf zB_$=_BFds2I+SkUrle5Dtr9g+_)_1B?0WL=iOpp98}coA2#0Y5oj8W$ zsLf9ZC&>5qmz`wp!YLf|ty|=W*17UWulo6%vT)k?e~k8Tx^@D6`tJhmUy(MJjLr!y zaqVfG!C9QcdGsRTSMr~+esi+1wYI#4Y&GU@Bm3sx(~bYnX`8gE2gusl=HD~Tzta^o zAX@*|eQ|O~7Ejk3Ng;)NzkjXGPGujA_tqZ_VeYZVaTvLjK6Z)z@Gj3ff_)T5V+{6a zQ%f+GxqN|ln`fNNoQCMk;Qi;VF*s|lf&NNqzW2vI^zHv^_WmW_zta1Y<6V=ENtlc& zn2Kqbj;7hlpET|3XG`P0bN$%cxp!!bt514=@f;9eE!j8!QKo-X_|E)CbSBaa@yx_5 z%tpg#eW-EP7BEM19|wh*%RCPqbLsMDzccsIy1-fkZo9ne0x}bOgwGl5C+ZM&m9O(jfsfTKWtB;-qZHROL(b)eGbNOL)2M#kIK^1eP z1BlKTjdTE=+>YTm(&XtL^LHnhPofLm^d_ew^U8#9nmmKEIFDY$=W3rbv`3lr2i7qz zd4oRSsnn40YyA|YAQgi#1j8@_qc9p{Fc#yGhVe+pB=j7m|9jn--Z?;%*}I22gM^%l zX_$`MQ}%+8+1VF`ndB_IxBqVrb9DA(-~PWHgTh?)j#2XFcy+)yd6g_ZWBg|fwa+-U zoUDk()@0u~dE`0oxX8SI)~SnB1Q&(b;+Tgeo6N`AGun6l-(=(Lsm9r4E83Xb$&Llq z2h247J#YMLJX>30{NHB$OGf+b8w)NB8RF{kjtj_4WFZH+$V0pI&nF8|h$3`bM^Q|6 zIdfvyDPv{kGE|}ldm^`U?!z4IDcEN`-pt&FXixqA@$cw3D#-W$zcqvJJtzk%bNq)rs zdGEYeq&tb$FuXth*Vir2Q}zp$JM7O@2O#?YT#D;b5uLw1m>hy(7=ic4|E_7t*Df|| z7ZLTrJ5YUI`NRSBO09aO?xgWQ*>K4CU%1+Odu+us8e=dP4a0=dw;0FV;<*kYjd?tx ze`>e$Z{r{RYdf7=bOzoeatfk<-%cf`p=7!`ZGrc~lKZu>-X%IytV-QdL00nrzy1Gf zx_D+_CT1b}7ht^5IIyo=L~D=K|9F4@&q-xV-|GGIAH82qoA=N1u6Y0di~4QR`H!>3 zIR|sm^K<20dpM6dBeEO+x4%yR|GNKIuy+rqk0PV-P*!Aj24i&oa1L`W@{o`B&L1vd ze((I@LgpewZBqL&&p1T-A^PX*WbVZ%K^gW~t5;(EUnO(j`Tu9F;bX2xm9{8aLtiOO z1>)1xe>2p7v*;r98JeST^WNEi-^Sdx|L-C7_Zj^IZEq{0b9>s48UJ@W*GHJcID$?b z!*TT7lN3&nUANHjlill+%!BC5;55$QETXaRIr2Puk#N%U_d1^r$;{>E9ro#Kq%fzV z$~?gSnc9DD75qv)Lz!pUhiIR`fBFAzk#+U18G>OLfqyRlqu85ry)*je|MekSo3+ho zpQims^(pQD8SQ_s_Fo^UPd{>4`+rFLFI=f_QRbWMOEFGI#W3S?>}Re*_M^VnGjtE7 z`a9a7G444Qi?Qa`u~$7U4#BVSwB_(pKKIv zyg1S^3DJLhCzJ2}UtFj6~rt6#i!eQ>+{q1)ldyab6tlX`I1X?2)HRPB>4Bx%`m4b>8{N%)RJ4|Ia&DqC%JgZqd0USfHUpT-&ZBSwbuA+*oeokcRO{N7thx!z8l1Zg`kXPQg_4gb`sHIUO@F6T8x`4714Dn1i|4 zV_v;vv~>y0<>uM=PG(n94Q`%;zvWZz_jZ$tL&KTVT2#gT(tHFUMdyDf z_38gkv!B6P^t@^vJb50y=ziN8_}{#f|C7lSq+&3JpmvtIeR2dwVKm;`|1*X;8uK5- zSmtr)=u$VR6W*Kuu5R|8IFKTJP&Z8aBugJON~G(lfhg*)4kF6|DU_)?7QTx zUU>`^D9tej%rpKa%L}!C+Op_e-2H9Fi8a>eiDLmWk%b)OA`kg!(ndB{YX3^if2U~Q zhiTu@cm7YZ@&7UH9}b`v|MUL=qcZ^u#8ZeO6r&-<_&32Ap1Ea+xKYMjiH-%@)_L04 zWNj^KxOMGF3iV|77xYI`rA3Y+7LyrCfPH?L|eL*-$cA;wIFZy6uH}8XC?erneS9~pT&5S=JZl85Q*p`+YiY8wkw$8ac zY?(gH9G&wlW?UIIO?I};qalnHfA<4-hjHYtnpoI*T)kA^FYL(meG~2o)hT!DTTTcE z^8BZ){zIWI;aYnOCxrG}28FUC9}T64J{rpHQ`^^dJN?D2>Zp%|s`Gb;{Rwx4;$d`` zQM_|6^nbNzTsR=?cD~}?oPl9a;Q-&=y8Qe62QB5b#Kp)S_lLxsA(w_F<1Y(KJ1+~_ zJ5ocs>z9wdIIKv$B&;mDB;=(I537ca46C(;r=mJ5_{ZVgp<&&WE5iEBD?&lC|Det2 z7dB4m7bdxOGB!`XAZ!_H-`6zrf!=uvw=Vh7Ce4tQ5bZDZ&E&zU+^1nWddxY^Afx?% zGs)=u?^)z*%)wkl|NfjuW?%s_(NJXkRA}ucb4#xAQ=2gYb1wSMFPo;`o}n+7um3+L zHRN&6M*#}4XQq7wGg3nlbNLi=!TW5TMp9?9HJjUJ35diOi6SNn$d3qi*q=SULxEh^>3y2 zUw+?*|I+@a>i^3VOXB+e^ghFcAAwOAjWHODrX2NL-n;vMiu8Zk+t}O5j<$FE|Ju9* zr}R&>dv#~@e^2WF>KiojkH(MV#FK{cNJm4C{8L~|%^a<{i_X5E%sd5s|GyhU2Zv}6 zQ2WuOuuB=6%5NI>95NmtXQ1Smwx?IUp^cu&J`4Nj(%Wl~qrE?s%%$Qf!@lwQ|EOTD z#Qq}v|AG)^yJimNVjeQEs7#rWZgDgfdjCrAU!wm%*ZXI9|4i>scCc4_-ZqUHeV1{z_%qRdX;R1{vnygDmt6W3{rsThkjI>l=&YWe z`;EWPT6@G^h$0jt`Uh`)7ab&XbSB)vIleJ-_Ku{`FJp^lE~YW;}H$bDr}sEK5_ z!*$95+HeT_%u9V#y{)t&#<4xIrN>sNnd!Lxfcn)RW?vs^6vlr+Kj)CXj4yU zTd|+H>X3efeo%^VsThnQ7={rTh5zIEH}X$Mf$y&^ubFNCpYeWeruhfv`nmKIWMh;k z(yX%+&lrrwI5f=A|Cna&G;>R_Hhiji0OoXbl<9wPYa?6ho%_RWGJ4)f4pYdfn1<q3?6dS#Qkc(s*AdJwNjT zM7sM(hhIrXx{6G0wQb4+nS%yxdM=rVdgg=E^$TX2k76&tdwPNB4EpFl7^U;{Pq7c> z_+S2iQ*l=)bX^gOQGzm5q6STK%&*~K3jI7Ap|BZ0(pU=nKs9vZS zM>E>6%h=)&Su)P}1BaQ9pcB!*0iu5dROoj^=XSq$K5x%Z{of(dSXqyB@yCQcfhB*r zOTB4r0dp5l;WWCx(Ju^<|GK_v?=AcPP5*aRm~%LfUL!zDwg&b&tJ$gT)j5`)3H5Jv2QGBS&BqMq>=dVjR*i9zFId zq?1{h-g{mMlbEA5E>p;GP&N^F`_5xc8dO zZ5C!@&+&Bnzw|JNx%|An*=N(kT;_SG8m3=0`0h|COa(Hy)fV3!7Lb`}sPqqWG6(fF z_WDn;FCDq;d1zBRF$PV`EBlf@JzzO@`QO8`524NcKP3+4%ySgUI%*qhEHRHF9eU7?wK-}(acHEqld_QltqyDJ=GZn1a%V2!=x z%tz2M!?R8EjKZ{{bei`^-}}!JFDmi=|G#!5gihBT!*QIzNp#^9nzV(@h}xJIyx;$4 z?m%_#KlT5$mvznF0}afL`ku9?(!*)-oWWU~LqnH6^7ihZXO8~KdvJU#^fD*B;`uY_ z{?N8S|8cN$CAjS>P7f($D)yA5hr#3!lvK()!j?~UrY8Fc>=#$`51NYU%5i#3p;UGe z)sOApOAOJPpHadsr88b~SCV#_&Xc|CFZRBY z|JU<>qVor5x_%aBV-Dsb+WR+;{QrIa&pi`Ex4uMofBX2qad*fN=K^FR3pvO|9`ezy z{))~8>DhXBC}i(D|7VUgoTVNhi@3da{!cOU`{(~WpX7XsB;z9E|FilXlYIk}mKc9( z%lgj0Y1XDNS7N_5s0sz*C_x!2QGSeB|9&Tp{|_7g)2X+U9lgf?;;PZc z9l-xM|5p1uP5X=J{F_7KIgBIdM8g>EuQu)&bIUyK?_B*4<`d{J7H`)!x6RQ0&eH$j z7M*|7MV`VQVNa81P%`_SzDv1w@+|u~>>uL2j_GHN_MRgykZGlz_9M!F=lS;{;gov# zCHuU{C2{(@tL*P)PQ_ph!7z-#D0DxW6h@QLnRsK!u^5Lmj7K^qVKSzmr!FZ>C8wd( znE(Ct7iDxZ%4Q|@Ggl#6zdv2L8JLM#n2kA@i>A5Czj4ArW5^b?GPk9w`^PE&s6L_m zvW;JN<>^myTk;qBlxV-kS-JxK;Y$7C3Noq-qBFsAg~>xc3Q&k5 zbW7`EvP->JLPlo;m64UGK|Pw$hC`@5Yi^$$?YkWzJJE1jzoIKK9Aj>=4(K3`GoL_5 zfxZdaO7vAwI!651=e?t|aw?dkwZ;2qsHfCX@lI)!B7aDmldkVV`!jR~b=KSu9~An| z|5uOoo&SGY+MJh0XlL#~wKS~30Yqmd)(w7VZu7nK|4)nW49?;l&Z8Fzzw_=tcLo$0 zjsH`~?%mEt7&IiLG7rWOM020R$PpNY(RlCwUt^fd>%AMsGLOT1=N4C%cvrl?{(XvZ z#th?%ImQ*(Uuc|xQ=TQwb>oqaNtlc&n2IK2kmg?F46?;IrEmSe@p3!**8iV2F2Mm| zYR?*%95*gGX`Dhfc5*x94F9PARCAUqZy?M#&vlA{|d~N*pFzhL;R$)c{l&H34aLLcPZb_bv(k{G(!4IpM#^N|7qp_ zeAKV!&)k9PDbjzM^hYh~(!85^8^}i2?aECJo#I(yZan(`$1&zT{z+28{Wx=Z0sT*& zGXj}UqUt>Tk9{JQIrKm1;#OOv?BB0k-IEj=rYD9|+)ktK{E&IZ1=azaVLyxZamEE` zou&+;I^Pk#t;lc&x6jJB1d5~y0b$VL-zGQ$iDS^ z>Vif3+T+-FdG_&S-}-y|`_h^BOj94$TmQ&huK%+ygZ_kh3aZl7Er`}0M17#C+@@hV zW?&{}VK#d7dD_2`WPZSzn(W<|8XuDLkbworL>6)ot?$ew>-G7f|2yX~=c5488uFGL z`J+hwNE2_m{6Ur_n}hamlydFjKJ8&;q=QEE@1JO|c$#aYwlUgs6pif**^5w&5;RZh z7os_UzWsIdoqhZ3=sw@uUl;xRFZxGt^pD&F3EFjijJg_g-<9UT$zA#O{;xDYalP`Z z|5ql?J;Idmt7I-e;<oxn+S;S^5e40=kF!&$QHw&ZY*?DkK`^JFg)UNsKHt`Yvf zH};)AM3g=$-1Z#z{Q4ZJ%;jf2|7l}9=IraW>4&82NzdQq`MLjZ_aFEc`^8&Dw!cpI z=i0vgk5|c?%%dzV{!qt*>Gkk-|(%$!N zBun#^k3xC0SouK3F?sW(JbL_{`Kht)F%D_yxXFH4G98mJ8BNN2^GxN_c)n$}@@Y)p zhIVwIZ+-#hK~qM%2acn{ukMzY_&4K zKfk+x{%M;27v4Yr@vQzA`u@Lu${PM(s!OzcS@f52d`UVJD}~jXTy8@ftvZXVV8f1=a7eR7)P+j zoOnseh|tMgKE(VtjxircRsEG=KPr>Wf8zwVljy=J{PXF_EPizWkddHeZe^E3(W2Bx2B+K)JXb~{}F!S|8)Ig_NHU>zbEK_>9||anos|W zc66Xx95v!RfLhe0SX+Si{;$^;zyI9yJTxLmim)`gN2`1O)H9ATKQP+=&hnjoHDpBC zk!rpweq~r8Om{2ubMo|Yd-b)?(L1ev_u2;M21IB4SG;UZoU}Sv!tQ<7@$bm9e;;k! zTCr=0Ip6p0H#mgVb4G_f>C%7vm0_dsaMd^Qj@B zFyXSWsq5mfecs5hZNacmbbeUans7x}cWP`{JNV|1KX*)6gN^40hYe>3hk~4&>|Gmc zEdU+(dOEbT9}0`}ZV1^0H-wz7o5GS29}Y_wd^l9N?-tLoeEQ8{#h4HH@0tB@!!8M{ z&R!Z;kN9BUdu`{xcKT&uUH;Ipe%|Gw;QZxb!};N1W2W;NUQP@f2Acnx?pb}Ot@7!T zLGmn`atDNpgd}|l^)=3wCUHx zTJzO+HPQE0-)U2t)Tzz<4zlkRN1gh%zEgdNM)hvrc`T>YhedS1WX?lBOe_~}>D&Ib zm@pvJGFy%sZzNZq8Wi$&CxoMK#KLo?x_*edF`e0>!bEC25Tc+n~J8h1H#4RPU&z`8(L~RMT zqxNw$9Uc%4-lu<<;<>r~#|0t&8~hPhv;UPGO%CX1Jd2??kDF1&eF9leeln8kxv!Cp z^KOV~YU4F@5#J$eQ^&;CkZq@xBa%8GZaZ|mdDxAyeql*9enNeu=!7oJHQxZ!V^=Cz78fpW!}(yxy~YiEK%|I`#|ZqnCa( zHj+JB6V|SrMgP!rWM@1+$M`P8{Oi1gu=wPtSoWNLA;;YR5-jDmtXQ9_G9fHCAH5>4 zUsy^1kXMorJ|nKxf`0;&)?iHxQ5BU8Dy6`HF@V^L6#ma(j*$7QX{OA01P2(q%eN%1+lhskj z<%iSGpy_q?%n@ztsmo%msoL5E?RJ+lu25}UUt_IkgZ-b4!q%O02F)4!Leupf5BZNC zS(fJ9$WCia_*IaVhm*rj?z@tc!w&Z-9&cU2h~%)BeK%P$#CbQKVTgwo zT#|Y6Xdi!a_!hTZ?Dotby*k8y?HzV7ul4QD;vClb=E=T)3T(%XZ)889EPjjMDr`mx zDkJzMSjui&ecbeM{AkjxvCisSVn?qcUnVccSMkU#vCfr|d0edX(s8k)Kab=`V@JPv zRjl);*Ts(B!h9|FL3hPEzdA8?bm+ub=WUTpk9B_UBeA3R-NigUcJz*q#5zmamyv(D zHFoqF_qm6>p6pLPKPuLF?T2DVD{#*pv7?(t$2y;%5IZ{j+F0ks*T#-!-|6~0`EzgO zULnk1-2de}Vx3Rk5i`9V|FiUYTpnMA&Z};4?TxX0+&&_`j_&rnJA}XEL$UuzV!va8 z{3E`L$HzJ!y*qYPVz#H73p{gE>=*o7)T#ZXQFHp$v7d;;uc6~5-1mt40QrP`qdVe% z&&D9kRc-!fSNP#rWyzrMefILhSH}|hKga(X@~0%VSbPlgOzcV+8r#9`a`BG9_L+mi zwrPW`4;d7$W8X4kRO|p*heovE22k0>U%7FH(@3ajR+r)Lie&#WW_=U9#LaJ_|wlw~uI1ND2eEXJ4%RU+TQESNaCEUPmE&I{GsKe{H(>Z=P3O8K2(=V71nd!Zo^gr%q{urj_@fR8D!1+r$duINE1EE(YN(mJo|#kU!NqCjP7ypRsYD$P71Bznb7v; z!qEPg%+T@nw-lNQv8>byu|)|JVvCa}#IlD?h~T@$z7HEUX$gA+|b=pZN1nUlekp`yKTT(xV_UzhrIDDeEuBc&>4t%exmw z&*B}A-lc3uVQ$jii>sKqB(gK_mZ3|gJJ)We^BcL9mQ9G2RZfUS{k%%{rZn^UQP>BK z-|IcgO^M;?rSyB;YOU|6v%ceKFuq8!{sYnf!<$r=O&RV>N6|XwqHsdkc7;ERW9g|2 z!_im8uP$$Dj@o+l>u%*Tes!$#8D(z0a-K9M*7>?J^JU@aR+diea@Y+lVn%q9;L%`-dS24W9l9X(WAGhZDmOF2IMQZ=B za&Z5U=e+J!_1DE#pBfO>%;~Rh+222P`-gRN)mPUhhk_CP!-gE!4o}vfNe+eS{ljK` zy)CZa>iQzrA62f7UZq`_Js=e8EA1qYwrUq@lDx0^U$Tilp)@NgTyc%IuFx6bGxZle zM`c-37|E?Ft-tk!{X@;!r0`Sax;C$Ws2g*AtbWu0>stqCF9(FCQ3FHs=>CC9INqX9 z)S5aX)^<|97&kCBs_%2 z@m=I$P33@)KPB1vujH_9R&rR60_F{~lf%Zr$zfAk^!~|VGr1)-IcyzbEu6M*8@WBj z`$+01jC~(jK1{oso8tYW^O^k%xHlzKC!~a$Oy?_S zXs2CYHz&o~N$s@ojjnGZo4ZEG4svTj>#@9-`%&*?iyemB;QN#6u%eBz@cdAA z=9#eX&@-WYRHijQndZ$h!~P+ep(-^qRA*$$f0^=Mrv01GhPs*0hWe?`h6d)w1THZX6*CVqv}o(-FaJsY-QYwEM1=-jhmTko@BJGq0sc*wJ1 z=Llti+|6Dx{@Ji+%(G!HN?lVn``NH>?z7fMJZpW#v&R0GQ*)Df7eP z$@4?@r1>Fd>in?e{F7m6=6vHHb9L;?XUz8>^zVk1!sQixH>|3m?-=pjuqOXsV)bmN<$f-_$};9#!~UJ{ao_7- z@J0M9{te5q3VW~*2k?D-ajgC!4&dMMWBd%iMe>KeHquczXF!-q&cR&FLoV{rj74vM zH!ObjyWuc%CzgEk>97>XnNQ(Oyp8K`Rxjc<+<|*=Ki1y!On8KR68ZXZuan^xK)(zxSOu>s1&mDqy4 zXv7cj6TFB6rO$*~{F?bU=*I6+ci%Ii{*h-WHT#9EoPJ>u9r|Kq=j#7rNt^MnG4C?t z+1yk8!tyi5zvN0|+&pp>`)YDcQNNI1Y5ZH#FRXLTda_`;|6t5C{v|ifjmEzT#=i++ z3%QlOi2FAF+sPfq*2VR7Y;Dn56bWHBd&zO*-;)VpFIjrV9K|{9bFcNWWCeRA_x=2< z$ZFwh>giWqUrW}p*H5Qko%zoEcHjJX2K{U%-6z?1{sI4X{vG7@k4O*v32)(V_y_vk z8sY;n5_jTWEXGo-#yV`lcKiT8L^pnq*U*E++d}+0+=!d;S**iGG~$Q&33_lznsy0a z!nd&yOR*a3un|?L#ZT}eeuHlG;4S_y@KX4hlEk9^$v+PQ)wi zzo!sbWMH(I%x2FyZtcxUePeRj8E5aEOAO0<6T=E}C3_zCRs2_zYlP3Ii(iX%SnuC0 z1=BC|{q>E>P3Bn(GcNS~FZBJL)5TuIeH;Jn<`h}B!0-)1h7+|6Ec{6gRVLf_xI z*E8lv&zT?XHIGYHuvc>5&%cVS7QUw5{HE(`$vXD>>E<_Qn%^Xw=9(YPFh81Uew1uw zZ{yz1zk_U@lN8!AlB{V;3LRwA*$=AtM-^VcZ_tB&6uMtc4EIL)rPLaL zD9rAp@M7>B;yN9L)xYl%f6rafnRv|iSr@?lEY9IPdLBs%y^;Ka_~P%7Anm$03IAkL zNM=q!Dh6XXdL9){+Fixm7PSAko%vdPdrXL5I9j`oI=qbT-Qv2BzUMxCjzc;YPV>#lZ8PM7 zC|w`%?*VerL$R>kciA8fib{o>rwwG^HbH*INQAOH{C~`**4GIV-b6l9wqPkm%VGRVIS_^m%8Q%w+CE< z&E{IRouJpoqE`OVGo538iC%dbe_^+obJ>23Ta#;|G#TgI^*80;xA~9fH(Hp8QOsx` zDvSZ%<{`|3m_1j|wes)F!ZUA65H3X=(X$BCeUo^jXXgG@@7wbR_vCkT>x zbbWz%PH%@+w>syt24sk6q$n4$?1IX@I{|8m z_AKu2U0=YxP8v$n&CJoaTU23P!sHvmqBr$@p7+jwkyf{ucN2fN`Yv0T_B+f|Ui{9p zF5|atX3$>=VRMc8jQ?uiy>OCmH^BDL^)ff6JqZ zq0l=oQa6s~e+%MRkM8JxSK&eBmi@WN?d4b)biHye{%x7=`I6@!5yIu%qWf2QhbaHm zgyErDJ}w+%kJAIg!?vmJJKOx5daqlWKTW>h9=su6-8H-~KXi1C^gJWO!hiCQ%21S7 z7ITZzy)XZyd1moM?bhlj>^G%}I&_=#kG@5e{{I-NtsUlm)g!`nmuvHxZzONVK;~P? z+i@p)nqpxhd2eKndwywmkubFns|DGqb|Hzh?a-uov zgru-^L~>X*n*KA5{+}*w1-;%%dbYgT$zc`qYI$u<+5e{fpKi{3W}^OYqW-UMlaZ+Z zD-X;|4AK72B5cFhk0+W^{Hf8=R0Rum7lO?<%qQ_ zY1Xi$(|?oI;;5N!{oG9J=g2zt`V9K-OzUgNCiZ6T(H{5~vX#9pO@4QMv<~k_(%>g} z0Wad$_zm8Wx4OyS<2CeP(bib_6ZsbYhJRq|xsQgT-j9Yv-ymN;sp9r29GrbTd?qq; z|4j6K@G#!EKNdbuehK+&V&SpKUAV`|ujBK~U&3SfI-bC{@f|#kg;? z!$xuow&MwLeH*)&7b!1$$#V4LHV_x#jXU&VUiw+=66WrQ{ylaXc?Cw|8kEx~{p(o$ zP4b7R#V+Hz7P4pe%duT#_oYi?U2Fan>snb9>t6E~Skgky?2hk@UE`i-#L-jt zsl=Yv+E~xED`NB6`|JAkT)Q!r-@jkDkXg}-Uk=qk{6n}2AAxEpemCyJC-ETu1-B)d zgGX2VDfhZE)_v*MVqF(s66@M=QB2iu-QtC@o|`7dJ|2a6@XlDzZFk1H@47msD;Dp$ zeq^j?&(V=z+fjpR)Zzf5e`D045e+EQM=Vtb zm6QAQ8!PA1k+4^dpd&#s^A7deF8({sbCl%kXL8$1?kTiZMcVxize19%g>7{8oGnpTa|! ziA94F!&k_)*n-{YzW?76_mW)?{9EEa(m!_+E6Il^(3?D&zSJL%5Q*Fn-L)~X`lIdEj&Q z|M%MePgby3a^HW<{(n^0*#D0Ms72j8`~T4}$NVpvEcR%|!D06QqxGz{kZ4B-{wghc z5fsy_(3K>=v%i1~*>A(A@Kr3t2Aq`7|BQ=-{eV1vUw?Chn8-Zw+7N#VW%w~VK4e`E zevF^t7f8EaeTl8ufy3y;Y5WcUK%&0Z75FILxb&l88aYy4x>uYJVkW+ZXON36s6-on zhL`XMyorR-o)g27foE_7mLNV@9~g_U7}?|K|F8s0v1~B?AC_YUR?egU!z!%Cn!)sc zSgXIZZV3HfsxxoZ*&Ec^8_7-T@b}k0Wn4i2XZ=%}aa+1^8@Z#?Ip@c%e^R&ZB6pwF z|G^&YMd>kh-1&F+aTy;~7!y@uf2aP>VRd8MJ749PA zRC%*&VAy?L-W(sTQRp9ZBq=N57^il(H7 zt>pH3sm2YdA>Uq(HT>6+Yje#>lIsgo{SzTIY+(PL{P7|8S%;tD6N##3?9Tc@FbRCEjFVJuV5#dP>CPlS9lfuu2Ual6sBVn&O~PU{4qR^uj9|c z-p2htd>Ugg19Om#`Pho*@e@?w1iEn$S1L;v<1F)yp?^|%wC z#wy~5#Jq`gpZPAx!;1@a4o;n+MpNB8U0S1 z^E~F-&{_%@aJT*9UKS17sSMm16p(h)ga1*XV68htth{A62%u)CV z{@u^8--AaGzr?eG?zks1^Xs_}*J3DMzgVWeD`eTLzNq)Eu$atd&l!>)mW)UbOUY&a zpPrkRZv2}bR*);%^SH0#znWY#*T3I0(!<)!^stUx&tAZN1OJWWCgBUy(!*xgZy~p` z7jfUle>=J3OnN9jmmYTZriWeRZuS!Hd-(4qONB2>vya^M>(3F&WMR& z3AvPgS(>v8(kF)H)6+GFBmp4Y)F|HHe%EGiJ=gi znYUmozang7-i{sTCx&9|WZs3{{7SHgc`r)GPYh+)$6SsIewEnIT!rcd&MUwH=33P8 zt49NKBbvrL9{~rMThPj{4eiVwh@bcVSTrR)EJpT>^pJxk%uBIsf$xvy%qy^xUmjL5 zug043zCZff|EU-F&yF$DOzl6pk$qE!_FucZncTv@m3tBYZRB?L9rfD(Htj#Tt5f@b z+}^2^7lddJ)n4|}bM{B~YX8Y{_6qKm{P&Yp?A7)5H@UuVfB97Vo2Ju^&a~d2Y-DfB zFviNX2AXVPZ{^;`zn$!0ukh_EOQdI|^dw8!%VtaOxzd~5?OT^j_C2Qh9^?-8;$+{; zx86mz7D>+%>FN3ovWdMpM|$T;Z?f*B@4>x+eh!(Hm9r*_-VrvEYh06$wOEHAsDpkD%bDVT!UFZvX!YeYWHugV-a-Bt-@|k2wwJU`Piu>Q z3@YgO&GhTm|uSOc~$LH~H$ijO36_&xphvGVX6qE63Jd8&%2L;#` z**)VIE~F#ErzA&H3LPP;28d&zaUr+Q;CF-QoXC;t{iViUf7kw1k&hd@{m8iQ-;Kr2 zkJZlJk``_t&*2qoGk#;tww-e2i8T8huN@P%8Y2{88@6MwIrMH^CaxQe2^VWqFA`@9 zKI_^i_&prC8z1(#&t~`8D9ms8@8b6Y_hRkxQucN1*NJDSXZw-+^{`*+89pHVb^LD> z-|gc1sDA&CF+p5$VOR72HUEF`-{P8DyeOW32wN-upWsg8)=C5Or^#Ez{aImF@c+DU zWkcu&yzedURUJlznk4@x4MRiipkblz)yqSDcqKFpJrx><|2{N@mqYWlUE$zWzYQ(n z_0W3Tnb3CIf2SYs2_50p@S6C`#oyt(T;zFnKdAri9DqHKn7OE!JWEU~>T2 zfQ{HRj_w?ru?1TPn_I#*Y{!nX)<0t>qP_gP2iv=dJ=lxB^FznIyMLLC_AFLnKdMkY zj}9FNP>Z^$S8M;wBiIAkNH&f2f3`I9fA&DOkkOef(b?(k=s-d9g<-ul%o~qbvq9#w zuOZi!U1&{*wHoKF*_drSL2g6gd22e(TFY_Tnhr82+1`9Cb^Q{1*0YLJ71pc5;u7N( zBFqVgPl7^<)z(ORNXL~Dtne`EbkUfU$kjFgY|%O9KN z!GZF~x8+<5q#Pw%fE1%W5 zW`(@`nJBOGTcLdZ25;fd{AykQH`l!(zaAC-RXoN0Y1jM^-*LY!uAS%}yYQN@PrKJ8 z;#!SAdxopsYk+5O6h4vv#o~zLKJj#j|M#xHRXkS+`xW8p-D_nO#&|jF?Db)h{kw~i zy+HoQ66U2?HpBQI%a7mSe|N_J^~V3k+^fjd>}!r2|DQDeC)X)Q>(3eg_nQA8H?nWy zUdVqlxkdP`_2xfZzm43^zGJ%ik3#bw#^SrMd!G6Od$1R!(~QxvFJn+B7q()m`HyA#uH@yu z%SWXD-54p%!}z>#jksOBSGeb=+~+Ui{ekeG7H^?ACkof&JAQ$C1wQWjAMyV?|KEu- zN0^_u$JN}QKw=cPIDdr4#o5obm*DrV`I_*b=l=)tMe=E3HhRvV@Y`5KSB?U#$2zP< z{+S8Zw$hO!n!_$4w`TPVTkH$i%)bzuuymBQMZ>Hy8f>jm{k36nvbk<#MT11F?xL&b zpT~bCxgx>ZBr=zM8QIu7CNvcIW;wncYMBopI(lDj|1^dl$Kry_noF%Nc^6VLvK8Qw7SI_o1x$T4$eFlP>MMqwg#YZD(CJq0{9DI|jq<^CDlYEOyg!?1=Tf(+T*WvvCO}LLK zYa8%ee*ejSvpD~RhsFPGd0-9e+-Y~N4S&Qi?~}{|BCte zh-bPR&*CHQ^=bSI)?yp7Jl`hI^F2I|{8xsCm&ui$DW0bMr)&Sn#mZK;GL|z_`6rjM zFFSH$$W;!PAGD8J!p&mv(DMC59WN!6#5KUilxR{A1HFMIe&#Av zFQEU!0p?oN@vBDzb0eC@)BoZ9^9fIl4Q=k#KE(W|`?TeabY_9JuxMmxM{|C3X1%tO zJXmP2_;h>5(TE1rpc=I}fXWfh#K+^F@#`pKE=9Ssf%l=r{`1{4M$%LhjmgGTWPX`Gyt(mp%_H^UhZ{Rs zlh%1hSe+HZsyXJPW?Hj0&04j|)~ro6mtAR2TUlAEOf6C8Wu=;|!Q#W#f?%muj2{Sq7fI#kFVio`STN4BX8~bgM9AV4t3>!$fL`n ze2Q;j8$K+reiB#8pC862u?J_-i(&HY7z`EHRd^8J#11r}6s>q056Op*;;T`9#0TZa z8}N1c=`pOsHQuc!lJeIRSb-`;|8mJfbf)ZLWH(DQEMZ=XW&CoF4-d}ArS7!a?BxPx~`H#uU+AQ^-aH|gA8CJV?O`7kY zZv7{@uG9DT9STmG8zwihZ#w7u_c}k$y)MKRxE{CQ4txxsfM2u6{{^4NWB4YXLM9er zIaZ?(+pq_VeVcOfM@Yw2@Eh>>dR&6;4@y_`;2cilkN6#iU8aq~-^xud__gGJ z`Cm_o{el$kWtkrf`_kx(#?mLTmtYS?#a`?fMcA%L)e;rZ&<+nQJyFa|zp?0YOa_utre z&Szu0=YBS}R~sCiC0^ER41C_%JnZ|}qqB*ke`r*RD>_TGX3S?})nh*!d&oDt2h;F5 zER>IbuMFINF`Wvo$2Ay>k6;)c7A|}1h2eGK&LWxHmG~ka!xPBCdK6+eoJkiyh7-#k zjGZH|8*FTd+i)-b8y?1&un1+apDlhIzrt@Y>;taB9e5D`0aFQazZ8$(jcNE-Jc$L^ z0V@1>8%*8APvhQ8>C-S3GcX%p!?*A(HlYY7zVKkIgxrTJ)S(&eIEL-U<=Z-q^AF98 zZ5?JFG1)xg2xr(0Hm_JSC~V9dWG|aN9VLVGLk8L7GbsGRb$^7T2;(2ZNAO8}246rX zcHkiX9Y4YgcoP@;E{=_fPryWU*q711z@Cf@dokwOqcO)`jcCutEPFSYqdgoA)949i z*xO;xMlB97M`zkq^Q#(dFUTn4{4w^1jI~E3)n1X5ch1x+rI+Z_3yia;B%PjMJiUOh zJ6+SKC*U3zCd)O8$i?R_3fXiRIf!%_OR)^OSdJA~i9D*03KSYx&oa_3RB~qxWr+7R@*~OxmKAxee{=im3gMD@R$AmHnye05W@~I)Gfl zzLZ>+sSY5Qv#%gm^3Nkzv9BiAxISO|xEAZM9tGHdjo5@jY{nLBMG>}PJ9ZqhMhQEy z3%gN*JvLG7^?ao>{NuwxGkyOt_skXf&K^{^?Jr6VRl-!487tzzIdkl7>OcPVc<+4U zrb>I=3)O$-z+2Ed-W)yJM>z){d#Ar0J|Q1IfG^?8SdIwZpJ zThVEaMTxZ*X~ydIRF|`to*QT#?7*>IY06=$y-ep#}|;2b%X8Xv{Xy zKIDPnAh#AYS;x^#wxa{BXhZyvecxDgVnA4o?DO`0W65dfA7B}`+!5CQTEzDL`d{t% zDw7hcv1V9u$j91I_Wxr&3b5gf_J4-OkZFhSw6%02U7-wN(;LB&e=)P{r7ij;7*(-oL zZuMvwZ~hBS%*}}YA>4vi=Dza*&+Gr=`|`l^@=)J@!>1@GH7TJ5O=$UeV4|jxixrV)mk1?luh`Ckh zEa$rg$iP14Xb#Y>k^Vbr=lJ(=+=levzs1dcpql#vvf0IbqFic(x`V4#4>kLF?+vQ< zP_9scy%Ngar{B%_5@ab9uVP;|(tb}X&A5%ej(Kk@7nwInzaHu0*nxC%J`0)6@o(?D zpkwIY1Rhv`^Kc#QB@-TpCxPXPtbAyMHn<1ghKD(~pZ3`Wzr!EkJ@^W3+X;oRj{C&l z!e7DfIQ~22E#QCxsE32_H29$xSfa_=&pqX9@IH1Se~ny(Yq4*FQTRT(jllp^ufM>W z#|7qkF3{h*pz@3hDuc>37x?bn1=jj6@Xd=0to2_|3)Dj$?v2O>@Z+!j(hJPvT~J`v z1$j8vcK!m-!VB`^7Tk4#Jzp2pkDos1+Hygi(6jM^x{;wH7c>H6&tA|da`5N{);oKnQQq_?j4N(c5&Sy7bt_XP;n>EzlVALA*=WC{6p40|Nrs)<6H}V zT9IuD+5zt|c#rXoG&l3#jQRb|jQ^1x9Ct<-|3h~HURM ztoo4W|A!i74q*(!jPoN9g&4%UR`wVtiNpLx_s6*={{pJvQ7D1(Oa5HDxb|LyD)=JT z)@rVS-Oxz=mqG$k9G7!#{Wj^T=joY?qTj5kFC-T z_}LD(z?;PRHk=?$-fPVA{u%vpcni0a9G`*&c{z%_6uo#wB`XiOon>9laU=3?kbdN+ zk^RVU zP=;C5)hfz)HRN4OnOx1i1}ec1FTu>G*kcC2gOX3u=h?{f4WbZ(I3yqmsiR@O|2e3^ z9XuZyC!62;8M&6{unCuFf{F}vAk$N z(WU0w)O-ht=X)3Qt>*iJxb?1Ctso3NPk-bn-zR*Q{s@c`-x%?YJhoaP;)|~5yGI#( z2MHp?lO$}4aB<>FK+Yfdz0plPuPHMN`96FATbyo~YNt6Bd)M3^w^{3G1w-(ajxd^PCQzl-nE-H-gpMqPvd=$#zjYw0@di9J#c zVfuN;uW0>R6=tkuJ#H=QackMr#`pBr@I5`gt5*!p%(Zgx-LUMPYq{}#OnF<@DtF^r zxwfuljIdUXht|@^UrS$YEq&d!YJHyX`5onZeyrCvLFJycso^@;0zxPA+b**Jj)>`#~uY9e%)qF$0ey!TF*Q&$C-V&tW zy;kiI!##?hNHf310I9ir>$m(qL+JltuLFLEiEAjhR)e0k8rbon#*m|=6AG=>2xPzT zG5z*d?)|$M1MFvi^}~z-Ag`YN)m!NMSErN@McFA8uKOtSUmsPm>tiZST&j}nxXSsl z?6Sc)`z4yKit=WwIv7_K)SgN)2b@y%migz4ZjT&kp8)=F$Je9P=M%nEyB<-`O*2pFAV~u`|5?c}5){oT0CKhW&_V_zk!- z%z>U!FMfjf?SuZKXZU{a84beF`ZJ9G&M^Kv!?pbe@6NudF@BUb%#YDV_z~GCKOh_9 z2Xy27P;SEiYxXPtTJ0Mc!(RC&ds*M4y}wC&e^W)+3*kfVZv$KS9`@N^DHmLj1J1c$ z$#Lpe%H%$BGu#Gu!vSa_eB%17Jrz|1^3?o!Djj3lRO`eA%6wcpUY6y9ek_%?oaTYhrd$!b$s*l ze#Qb{V||wWZOz}hTuqN&q1ODX)bivy)jzXdb>HWAaG4}(&|k`0nS#vvOXO9s8UBX& zZ$*9v!n#foHh@Gsuh&`nSRZH3`YmLXKGPVy1n*qJ^Mt*TzlOi2OY=E+5WWxI2;*(A zzq90L4HGwRcUkS(tMpl9R(6>A&!`+%jzQRY9QDt#=fGEZ3LTcTY5v#LZ^ZH%h=g;CWYYu_7H-T7JeqmHT}(qJ_b zPg5kS=J%rPr;Mugg{a!rMdevjZF%=b8UNH+?WE@q)mVX@QFRoIGXK|Lb&W; zud#YBZM1@!QT5@ke|401Yig{)7iz2_I2BsUZ|&&vva<=ZdLzW35$_j`7{jq^c4z zRk~xUK$b^hDtj-c(ycL-R5x1gLu0%fcU(p9kEsy1f@5RsosTJRO-#Az<8mQ$+!6Yi zF*&xxl%2^-iMwJ-T|1`a2Ql^}j46(cy&qHb%HxV4!_P-Fh8!iG(0oiI@5C76#WX~| z2Ag9VsE(;0_r9|+1wR<$-Vjp{bkB^b>qJbQ4@cBNI)M*rE&n^URy)$SHzx0$F?pVk zsclb#l~v!a>@E40qpMxc^_}8#Ct0pUyK>*@SKgKRR(`X~D)6+c5PwD0?JC}wPronU zDnXXM)2^}?0xI9vu8ISlJnP$;r)gL9!A|CX+Zq41t8Q;Q^S|wCc&J^C2inz?*{5A{T2C;?ep#Ouj*6)*|DQjo!7piE?1|zkLFlCFSM)oXuEm`g#6!$d%#O9h5`fI=}KW{q&8xRET6+Ig3xlWVvxK zS>spfYCmgV#@(-q^?u%e^Rxcfp?mRDGj~F@JN(Rb`04j{sR7wI*~R{s6S|M{EnP0F z6}Ps%e)g05`EHPCxn=Sly)86X|09&t(P4U*Eny_P-Ta z1JCDJgM0k^{;{7q55GoUD6m2&3anA&82Jhx>|p)RFK$U$v3bAZka(d>N#xgX%epq8 z?2Q3Ab_V3!5m3(lfc^pZ+`R$4d~crhE$oj%Vb`n7{|8jOCm^?{TP4WS>TZ?sT>B2^ zE9!HtO5&-i52*T7Ks6@)1wE5Ng%&}up#Q1gL8t7R^r)TASvt>Z-P)m+ojv07KUqyd>Tte84O@$>`dvj<9Ww8O4&@%> znwjZPj;n)yWQQECJS+QX4+Hv>Dm~J}UV#p|xkiiM>rm0NMOGo{6>yE_ztf>H7(I|@ zg}QoJyP*$5T0^)EUfQDpWXe-yB_Hchf;RdLd5B%wq3U-=Y^P@a#xgL7DI@Im&pudfNxMh{MDZ8N5a?G^x{!6iyQ_v>Y zj$+1vZQQ$hng7nR3fyhXfAy$nOR-h#&bHixZ7RWEsjE$8`-`peJ;hc9va-2NRVTdk z?c13D>Sf-xO?A(}_NItBv)aHhGS<$vf93->EkCe|qJA zuT6pVzf=dZ^F*7vGJDmH>{;E*T;MMitnO9cp)9K(w}CZn%zw0LXippapW8Ho3_Vn8 zjjnH_|5I#*kG3f?S!_i~FZO;f^WRxkg8U^Z-_(1>R#qa*%Fg!45%I_w@=$JW%eBsJ zO+UVi_=aG9=P$kIH43EkVd6W#~#(J z@vvvhqq=OL>X8lSPqDV@QPV1qn&*RjpUA^FIH z)3MRR{%?=Et}U^;Nv9|1wtBDipugMdTklbSS5N~xv#r6y9_BZ?*$?5iMh<6Lq5U4- zfA(nXF%Qo_w-uRpTTx_emq+nueM)@bQ8FFm{bvvNKQGUsJ~@!itzPCoymDoFm3yt1 zv7T4?2fZpd>{a1WFZ=(zDyGcc2fQkw?_7#3!*BVCK2;zq(|z=RUsLswKHe>LShcv- zb$L~v_NrmttHu*v`oCWG|GBM}h?lWqiPg5=i~c2+m-Kw~eQH1Cu>8Bc3LNfY|5J(8 znc=j$HhR^Kzn&|->h+XZ!NC%%57|$?2B`1BV_x<@_3>MBUhaQho|S#{f4%H2^(s8$ zRRnu<&ddH^FJm9C5(%%8UANay)}Id=J!eYT(HxsN`RPq{9qr+m^WeYVsp3Hjva+AGF=826#ouWJxFKs}D_?AIvnp-cOD-uvjY_!K?jv?2xl z3U7IxJ&Qi(v8ltIKE;ptYVg( z&G@)?`V>UJ-h@wK+DIf#`*=6Vp8BBT)IkE0kv_&peHxexY7mAzlrM~w_p!c8`5p`M zd@Z&*XZq9y-RJw%1HD|!K~J&O=PI`P6UA2B3w`9RPu^zk@Abu2J1?d9tBb8bc91b{ zP<2Ows(-fFYB*4AH6AXunhpik3@z*W)Cy(KbAP$h++Tu>9fGRb*{5o#S>H!p^(p^_ zxK*$-VioScQbll(XLJ?!wUVttm0nBx=DwAE-@1W&J8r$W{TK3@9n3S~FN**F!TIO#cNynC3!h-0-HphPz)x@DIRS6tr@w8L zzKXw&?aV=QyqmCJ#cwG(eidH+RMh()mc#E@Fvm6EvX0%K$L~ZqtYh~% zte4x6#mF18`Mrv4e%k_>lWjdr9M3{M{4@CAad5$-lQFe+Gp73_wv^wd|y9m znSHz`IUYFD&%9lUHFTiF8a`ZNjU4J%2u9cS^8RKo-*4zu1kU02IODkMDTmL)7vU@5 z*mGLWU>^%qy~;-BcJ(RmdG2FK*Iw>h$Jn>GbAtQWY5L^cw>-SJ0L8ewUtkU*(yx|K zznb0sYT~}qxZY_s%+qHb>{ng7)T-U#Q;n-%)oYwq75*wSomRzr{mlDhS!Ka~m448# zl4JdH&-61d)Xz6J`&H;FwF-{X-cOxT-VFBx`kt;2e9A#O*K@yk?~JmMS+t218w`?N zA6LTjam8OqTCvc_6)lKZk?e>SW}as3aIZ!a{H8;}$2EfAVf+pu2k*Q}13~8S<|0<# zG4@5V;jS0i!$!RBj9#7@SE=(-ej6d5db&H?@~^&%y(L#M*V-@dJ5I~y-B{eH0c&3!spB)S?p!t2i@kxpyV4lEHN%5n#Ww1xB1(z|W z$Gq?Jm&uFtRVS@>r2o)m3N)`{u5X<>6YJ>zuH*as%mW|nQ7;4WU@&R*`IAok;Jr{NFQ(f_0WKfg|+?=uganXtmtO9Z0xVJpTatoR)BzN-^fGNV(e50X~a zuFI57|I|TVoRG8e61mRDt=#nye*Yq6<#%1C0w`Q}iHhdpR`I2Ma{Di1?;HCda4*{% zvC2bnt731;s*J>~s{IkGx+`wg6fh4ST*vdTTlMAZ)Ntq$HL}RoM7ZX|DXWF!*6ej^ zqbxkl=wE#q^Iw;#z2Fl0AL{1&CuXiQWpyS}R#*0A>Q0BP9{l#=H;C*j_p|(~4b5|Zh7qoZ&^woD^xaD|wl!{rS9d9rPFc}Ymnp_LC;seZN}t$r57 z2Uu$v+>x?|w$lGO7Pm&;xk90xh1TdpS7;2vC(M3Q^gkc9V(GXQXG2{AlI%}Ot)U)P zC9KwWlUCEJnAObn(6FCNAZRu4#i}5w=nW#OHw5mLps~RdYV^-y^gjKc%y=ku{ zFI=utC_cjPqS03`4ADJx$*GonoL*$9y}cxlNt+`si#J%9Lt5xFO%$s>QDx_X^x{@$)9<%R`$r zioN!O&CG#sRucPF(0KG}{Tx|ezFCpquwUwht@?M4o3H&P`Ps~OxcE*K&j(M&W_=Xd zk-1szo~!k7?A--dvrm_Gu*lWwN?*+$fX(U;Uael-gD`~Km+&`m{%WoR(%-jPW6y8a z$kxrQ^KDl2y{qZ>U#+j;C$f$_?b)gyV~-!$s$68&rCXF;y@~$8COJKuxc;}u1-aPs zBAXchZ=wIYMK$;>TD667)E4^xTU6rO#QUFHRCaKS$`hMZfuG9vH>t|MN!7QA2zr%k|_7>(ywlC?3-5pK+r&PoO&UA0 zN#XsQxc)aW|GS0$-4?w>{I9}ku;>e9MQ%_wIA(bN6>{cokPC9L=RyAay#ES?xcxg} z{tG^Y|3=vV4rL*JTQG8iDjwRV$^+X~b#S|?S8Z3#*6pedZC73McKUZWs$uVqYJ{d8 zH}d{}rdm#As&yulIjBr|_GU8v$z)$pCi{dk<$pd?fyXk{vG?!Q>Hmznx<11i(Pz{% z*r47MH41Xv*Zdh>PWl6Ts_6gM(Eo3s|6fD@zlQsN1O5LR`u{cb|7+;~*UwzCi(7a*S>4iz403Mlw6};2tuEK1M`d5=&wn`6WSne!3J%@-oEA< zeG+*ybkuLq-y92uc2RZjS4;2s0fO|4JBJPFuuMwMYj~%2)du|wakvVuVSaXl^44!) ze7%9W$_;8bdky2OYt;1o^=i(3T`h;MXMW{+=I5?qPtFEakVfT08`z(^f#0Xupqj@v zuokz01} zKnLx<2ex3}3LnGYUgSP_2rlFJAo4M|gT7`zeve|WhI068&Nn0f7TJaLLl6@10u19n z>+o&LK6;xRp4({ueE$Pn!D;34y@otLeRRI(P(a_PuzH&L57xa|mv$pd-k+xZv+n&c z{h!C!mv)eK@FT2yv&LM_w?ypkwX@b-hyVK3tb?y&9h|X#6S6rw&G?^naQY~1Yv}(F z#{1Z3<)iP^zJvbHJEs(Qcv>9?rqy}&l)8w!dk_7eJ>%-VbG3p;r`i9{I(YRZeE*E+ zKMe7~;Ni$68tJ-3AsBs#=l{M-*z0r&>)w|z|9OdG$oQp~C^5&kif4HKLDtSoSlhis zj?g8H|5*{1ki+?uLJW zyKsLL`8Bu?j+54t$nV2X;2AiHTNUzN{QeASWpaN({u=i)@H%wUpXh`C#<_pzT#RzQ z{nOk7IKCb}36Bx}9DYBK{bu+kWEHRv6dd*J0e~FrF34rgED!R}PN=}mzET%!XU*(0 zMY=gIL6)wXQW@)Ltf4{}905G0*yMi0hy6UpKPH&ATtvjP;ti{*nD054^|q{{h!Oa+u>0+(YmEOrzyo z{}3MJ`wtL>7{tr@{sSZ-1$A!T^`Jd9&{i6eO&PRLWDCcw$g;!d*}K8F%A6?2jLYJyIlW#si$;@$;?zog2G4EG^1DueHeHkT@8bTCl>2t$&hh```1c_0e?Htc(N|gTLeWFFC$|{RVT6Hvi{v75okEHz2p;{u$)$@Kv~l<4QB-Bx6=N1(EfL^PLFKo*nfb1^W?|uo9{f# zH%pGNub%w$AbU9u;@-#ke&hiD2Y0allkmgH5spJAXdf^JVcJCmqVJtmjC(|!_K`S6 z`#MYeLbfm;(h5z`3=Pl-bx;pA^~_^H)iL&PAS;@A&bxT-Bg?Dl2b`rJkfvXNbf2Q1 z@B;mW(3{FXLB9cV=jlg44t*OJI5H;b$Ffm@eouDh&*+Dc4~RnoqFvkvApGpKBFIrU zdeDwXkRccMxghrejtBO=sX;&O93A_imo^@Jf%^bD^`Kkl7WNAw1N5yrGA{nEBGSvT z581Y!e%HD;?e@QoYvBoVZ6JS$bi!w`?}1kAUqE&sANW`1X}M-n*vH{6>{G~%JooRx z&wcO>{Co?3gZrN`&w3AjuDY2v0H1(QGIsh~qceJGd<&8#&(w+bOf#;4biUd=K)6;DkK*DO5r&baUQ^e2#Q`ko_G05c@go zgVf3Ikt4Vz;h%B;DQw<}-W)rzPb0I|^ZW>(K4$^LH$ zV~;?Tb1{fxPe8JKT&e1D`n(yukDS5#$QkU%c#-kS8NTh3!J1cw8r|$|dMHEHP}7y6 zS}4O_4i$SdR0+lFGvtPnYcp7H$xwbqh6+|^sPNJZ6>Vj`<>3suwq!7#$Y4D&g9{)- z*>`5hu`feTWW0cO56OD&0}wu(p$P3e+HCFvKVu*7>k1vn&?w=?kOTYqUKtF*F!Zj= zP!Rf{A3C86x}gW!_h-=e%}@Y3pbb3W1s|Br`q|)M-LnNCV$Xy8h*SWDcfOz^ zw^Ymvf9__!wSg?Xl)qcPpTx2E3Rk&9-|6Vn0`mc5UUUfl!rzFJhfFyXY3E9NC z<}Q98itF$g*WWjgd0dN6Ab$uyHOEkd+pC-(!#)nB99O~=_SbQ*#r`YgJS;*Z$A8W@ z6Fvr4z$f6hxRZ6h;l}T0KorOZzX8qfXFw9PK^tDq1}Efz3vwY3@(nL%*{1Q6f6Red z2t`n7u+LW7F;v=Tt8lHdU8`)@D%-UK*G?GL26KFjROX0duGlbFj_Eeowi0Y5wyhLf zsckF6R%Y9b-}1Qu%nEHd=cqSEgEnd-|F>zAHtT9_(I<5cKa};U%xmFBW7xKB#_W84m@%I*$ZY|7`IYU00E zpVr^82Xw2h*Y)QAdflKKb>nrq@p|3(c6QEo-MHk4C{vl&Df4<|-pIcZl!P&x@-b}> z_%P=)?Gu^yiA?*%Hf3((#5ViHHv7aj`^0u-Zs){y`^0wJ=_X~~gwsv7(@nP14rQ{S zow>ty+F?80tjwEny4iNR*><``nYZ9{i|ura?R2X$Z^h|W+v!%@X{R!G;`mIfL-Z53-y(WR z+jnX^(coxDW=tGy?qLDq@p4n9ZrY}sw(BMwcj%^@b<-`n=~mseQ#aWW z*c#nrg4$Z`(2nawy)s?DgY!4r=B=_7-C=y$HGQ*gzD`uNt@h2v*{Qkx9ZmGM8(?$bE|Q->wc$pUMDKwE~cHv*{=V;*WdsBE`5fb61%j^{O{6k z-KN_%>bBeTIqlJ&1KRU;e)VB}UY~zfx9j#hbovr6pcwBerjxXwtuj`H<>yF6; zBz330pfBwCn(oqFpVwV?|5$h3cTjg7)!n-LKHdF*?tWZ2?$uuN4`0;Y1KRt5_I^!Y z)IGZA^STFfpYGMYU(~(#-=TZIt9xG@(?0F{y!PF$eRpUd5$q$@eP7YO2ej`&?VIh= zm-MAC>Pz3%m!8&_b)W9LTld|!4<68cU(tQ{-zfs@*H`qF=k!&5^$vY?pT3Ia2fAPP z-+8a@C(ir#>3|;41AFviykN8mXfd?~4K>)Qvvr$_Y24Nt=nJ#z3#J;LFmdgL(w+hDsM)uW%+qX+fq zlX~3ibB{=3)M~!FE0IeLX=*KB*_at0xcZ z$*1(>(~s-PAL&WT|FH0dr^9=6_yHY$P=~**!;kCmp(k~ie@>h8#>3MbKBvQ!+(mPn zp3+l$^wbyh)B}3zK|S?dJ#|=5J*TJj^aFZE&zORHW{;lvf}Y{{K|S+`o_S8s>U;X$ z!AA{tTo>gwJ*Vfsr{}(}@9T(updTFK-{z2hs2}M^hxk9Fqx!M_SwGS9deN&FeR|Qa z7Xx~+S1%6S4qp&Djb~{*TjLImJ2jr8ahJw(HJ+#Oe2o`qyinss8ZXwkTjM1fFV%Rt z#w#>lsqrd}S8KdR606@ivWnH15^7Pvh+x_iMaEPXO?Wlo(?q)_{F(@8qC*p%n&{F*wP3CK|NR!2ybZfFilckz0(`30OD>YfA z$!blqMp&!KI!)GVvO$xLnrzZ!vnE?K*{aDlO?oux)ud08?V9v!vQv{?n(WqOk0yIH z8PsH-Ci^uxpvgf^4ry{&lOvi8i8mA`$21w%WJJ7)Fd5TiT->B+RzEM%&&%}lHvQbA zpZDnJ{rdTUrreq;(Nw9X$~0B3sR~V1YN|?8)tai&RIR4!G*z#u22C|;s!3BVnrhWl zo2ERP@@mSbseq?TLQNNGx>(b0O_yl8RMTaeF4uI0rYkjF zrRi!-*J!#{({-Az*K~uX8#Udc=~hj9H674&ho(C<-KFVnP4{TJSJOdF_i4Ic(*v3w z)bx<1hc!K>nKI3kYoE{_*;dWAY1X4zpJv-N>(^{RvmKi46gR!uZq4>+wpX)3 z&Gu=wU$X<69n|cQW`{L9qS=sUM>RX9*|26KnvH5Urr9|Ed*BQ306Ykf!;^3rp3`hX zvq{aSbUaJPvvu5|<4zsV(Q%iK=jwQ#j_2!mfsPmIc#)15>$qFTOLV+R$D8#^mR`x$ zD^9(Vr&o&gid(Og=#_fC645IOoha0aVx1_{iE^E&)QMW1Xwr#hooEqnSU8nHtS@IPPXc#S0{Zs*{+j*oebz?hfa3uWRFhv z>SRzS`*gBjCkJ$LP$!3Ua#$xvbTXuqVV#WVl>aq{PK})M8>7bSR1)L$GX7s1 zGe+1Lag5iS^m;k}=AitwlwPmk-yBr(Z)6q!MpnNTF$T68W7HX={!R^`wrzjifw1ky*3G{;KZGuC zc=U!>Z}{{^yWa5YjR5~|bQq)87(rw78Dr2GLl|=|W8@km&lvf}C@@AL#u;13mtau2ncp!ytQm{Sut$4Q6>o)o9HtBWS zC+A#tFocN&VFJ#%><|bOy~$Fy$x`=dyLl}6C7W? zCTiX9tT^qHjl zOenh;>_V`MpkHqc@X!2-$^GCP!*ls_Iaq9mGsPxLGAa%8Z{ol(u^>$J!z*?>C&MNk zj1_k~@?jI_u*n*R9ePD6j#eB|cG&iBGRy@sVmx8khbHb3qs@viK1>oLCejhxb)|-| zuN)EEh>2~)1P}3VV!*g4Y*!OQ$ixt$ydx$Y78A%88|*6Js@=f3oVh|QbNOU3=c30m z6~?0-$WAk~5|ZRb`8Sy#HHl%E6GoU*CQS?zZqy__YTIld<~+@F%!C><&KMW%7msWo z#_yQ1h50upBK(`=FzmyX%yPVPWM>Y$31}AGorr{8_QK7kP{)rwVXpBDFAdR`~i-w6en-nmxIruj|9dkL9 zE&`tzD^4bF=E(MFf;f!L$^RQ2#vmD|@#!?lIJs0Y2!ZiOVZ57wP7{#xIot;^e8%t_ zgLraGJQx>+3CfWjz(k&7UxT(FB6DP-GAUx%o-Z1<=atNw8=rk$nTnA)?=mTwMqmu% z(+J`~temzJwDrbV@n^1mJRsiaU$L0DRzjH;ZR{&vOmkaN8qHVizQj(%Y9?e7(*C#SRyuIFUHS(4m3~jm>2y%1`*nIyr-yYqs?#x@PU=jS&SdM1LuXv#>BzJ3OdbjHcsx^} zGetU6ES{2F;b$uKR(thZ?QulSil|)?O)J8)BD^c23vWFu*8UYSh|7rH3V7ZM5J$kf zBKmF7QKYvziuG1!&0C$E?5tf8O)H{jMGUQouq}E$Z}sA{*K3PlzTOHFN02!Bs`OTW zA#mK!@gV1hvmg(QopZyS3n4>B;%BS@nxGk4^j5_ER(MEnMSI?g)~^VU-io>3isBe2 zOd{*8WC-DSD>;TJUJ>OhqGm-ju86_65(G*hP#%#aE6FlDc(TIwl5AQLo)r;T5j`tn z=ucsWR)P-y$zj+y%x48O?`#sY-jEFra6%5aAQ$o=9}1ulil7+WPy(e;2IWuzRZtBL zVB&6u7H9(xc)ov3qk0Eei(vL&AZ5#i}YQj=cX zi+Ej~K-{iwAbuC|yNKUK{Jfqr?;3zXARX5*j6g{9x#*pX-nrm^uklC*e+@PQsL7m^uklj$z6% zOr3M6RKZ5=d^pBu_1pOoEA3^^J`bW?|g8mWo zkDz}9{UhifLH`K)N6M6RKZ5>zQf5Aa{!#RgqJI?qqv#(+ z|0w!L(Laj*QS^_Ze-!P zMb9XDM$t2ho>BCSqGuF6qv#n$&nS9E(KCvkQS^+WXB0i7=ov-ND0*@`oR6Ys6g{Kp z8AZ=1dPdPRik?yQjG|{0J)`IuMb9XD#?Ui{o-y={p=S&|W9S(}&lq~f&@+afG4za~ zXAC`K=ov%L7<$IgGlrfq^o*fr3_WA$8AHz)ddAQ*hMqC%IYvFlsOK2<9HX9N)N>5| zW9T14{}}qm&_9O$G4zk2e+>O&=pRG>82ZQ1KZgD>^pBx`4ElRF=ov@PIC{p>Gmf5d z^o*lt96jUc8Arc3`o+;Nj(&0Ui=$s0z2fK4oP%KQdddpDv2&h zbV;I15?zw$l0=szx+KvhNu4FpCrO%3f)rZl|q*kx}?x0MgCLdKSlmivPz6K$UljkFZRH^Jc2gulm$dt&<0)z0zM1+VE_hU1ju|r7$SPRFbfD@Ncch* zf%O*G3^f+GCTM{+@PZ#YpbG|I z5Qbp{Mqvyh5QW&eY!uFpLqg{q`R5#zfrAv-aAWq{oO2MfgR*iEv%`DNS$@tLKj);t z%t6mNXZ6Li=bXdmoDI0HoWV~IHXpPb&N+*W4+r@%2l%Dz9puyzFr0IA;fRfVJ30;L zC=*A&oftL4G08eekDPK{pL28@>?l#i*hrBAaF|qk>@WgGtoRfPG-sik-AkkSoQ(mEst)OX6bHg>f)xap^c>2D{u$Rj%0b zu^BDRwUmUE(ADtAT$*ZkI=~4z;DTJpGuSDbY+uySN!2?G?9w$VI1A4?OAtj+43v<4 zTypMWkvmIqDYc8z#9d~v6{3E7IG_!$T)_V;yB9BLX6&t1Oq@CJL;E>$uLdAXQyfuskYA;uQi5q3&S#Ot~DN7opX=D^H!dy}&&)BFQs>Rs27^oy?qrtvPjG*34tSdH?mnMUq zhRI{I!7eqf7TdwDR-+3|!r5X@8NInYR}SphTF#kLHzkgZOSpf0+Tf9lI@H2$$@bXwSX-dY# zh>hmzq)|F)igwx~NZOFo)QG7cnvF9CCKZ}T1*Kbodj)Ql6cu|)&oy$_Xykkg4W*6P zJ!tIV*p~-r-rfYn(}_P`CqLIiNie=X*ITc1LCT_!${gU58l<6&p#LbB9pjU8j5*HH z7d}UyVu9z~LRK9_w2+;v1xJk*DAfh#B^KxiEaZ1+p|DL0#n~{T1$Vs`N~-}`R-}dU z9xYTsbu)MXdu=TcUmZUx+>q2lW1bcQgy{%qp{ox@wa`7Jh2CK;^p|L1*bs!U7DA-Y z*nWYrJ$neD01AP$7{4ztCSPETy};m1$g=7p;O4DUXXZ0w}12oNJGEEyu(=yXsYU!MAzz@?pX_pJ!PzCMa2mCN~ zlV<9M?T-+IF{N|ypG&;C_@}Ry&aH+Ts0Gr8*nb4 zxbmHl2ZYaW2GYwXzxgqx3vz*U3Wk7q=vbu-8^H&|Frsu3VT*7tA}>Y6T}1jt#9b5t z;weVAV&Y+&bh?-{iqXF~48&iI{_bqZ0sOg%$6XGTgByQt^6Mr)?g1b@H}UeN+q9c- zC6qzQsM4k6zce2T0Ub+Qf&7(HhNYB4X+I1C;mgQJ8Gg&qsf@ZPL*Fv;Qbu0OiLab< z<&-(iEnVRS(yAnlsxqak@n2mD=uu7lHRQPl-D@cG8se!Tts2s(!GA5~T1$Mj#8F3@ z_2^Vjy!Dh9>=1JLS?&p4$n}Eg{`bp4z##+B+75sL@XG`ccJ? z>VC56r|A87@e_Rq(RYwkC)w>JNEhy10j0aC2JR&3Ub51gR60oU1W|*J>ZSY0Y9CeE z$8kSZ(@z}zgzYDL{lwW%araYw17vN0A{`*k0o(>~8*BrP2f2g>slGwN3{nI`6a^E{ z=@E)LM4Tbg3!!p|^P#xXqo_O@ReFqkj(LDM#t1(~e#QttMm3I+Pwon7?f_|Kp3=-L zr6a?O`9#I!QksdRG?PeaCXdog-lUnBNi!jnW+Eod1WK9-lr)nhX(mb1Oo*hJ$Vf9; zk&Y29lM!hq9nwrJq?s^CGZ;@Z2v0M>PBXwxGmuU*h)ySnk2^$~0dJatY?=XVnt^DV z!DpI5WIBbK3=q>PiZ>O}B7=yRI9E^vmC&rk!fF`Q zBCUL}s1$H7A}>X8NNTZ|JQXLj=q4UFe%+++j%l&P3FN1wON*uCv(yFTr3{_QyjmKCL{(bwVvy<#jseBc zM}mD+V;`C5BNKgOjOClfem4-NpLl3>iv!pPMzqMYe{rxwi$f?nl+xlb1vHG`VTx~< zw3%aH93jjIN{--vg#3rddx&Zak|&IH=2^PPyyPNtk&DbbE;6UM$o$+Q^Kpwbokix_7MWvOWG-bf)dNW_Wi>(zM6{G$ z2t6ZoRg_aT`K>0sYR=b?c1?wrXbwwt=u+1V#8K}6(yT|{ zddjYW@@YWlM$+V=#JdmB0l3o;mYTAGyz_usYDV8?{PB=lYQaw{{#%Kk3x3H%`Fbcz zZ!wf>$%j85b{~0b?*__-1y6JRFY%;V@}sMteDS#z-mw7E@RN5xc?pnazzxJ1C>rPAbO_x}!S)#AG#M;XebH7W>?=CSn|oaKUIsDNQD(+6K>_GUS|9|j-^DJ?q+pa_C6q-EAEmvcCmR|h`m zgl^z`9`WSGwVY4Hy~#9dep%`gbW zQJBzjksktDF2=o>bc#o{>?W`7O5nJJ^h)qoO8)4eEz?0;E^UGiAPqWZ%VnG|=X`ms zmMclS61Pg?uOyFEHCnE2*K(~JC=)tU%XP$GN4$0DU5|Tx84zbZadK@h*Y{|doBMJD z_C~Lkn@Rxvn{jVOmlon^A+A=BmfI);Zqmziu$DdK%|qFG$hXe{r9hs1Er4GiW#J=S zJL&le=O+dt%LBL#Mez+-IRGZ`Q%z$?%`YyetO7z4{`MnwwHK% z$y1Q>r{}cXM;Q%_X?c)1S#Dn*tOooJQNBauVF;au&|{cz!{m{r_+^&jmxn3u5hVBO z(l>^K2N@T{Oc$FyifdlPyYSarytlq{r>)W?*GOQ>YwNSIOmTZe{L84c|U!B z`rpmxmA@`m*Phv5pVwbK{u*BY-(R1%{+jt2;Pc)e^WOix_u-$jOOHRs%%7*_#^-#` z;~ziy>+-by*QejRKVSUyfAhzC_t*8~Uw?giFa7EL>8BrWKfS;F{FgtL_mrRi^5^Ki z;iq?kpWX?6`aSv6`?gPi4~0)Z+J1V+_URqkr{AML{doKNAAhdijeYvx%BS~IpWZ`# zdgt`%{mrNMH=o|ce0m4-=^e~Cxc+M#KK?akAAgPc+Fyj6=Zoj7=Wow9&v(xc&ri=U z&u`DfGxaQZ7ClRzWzULd)${-USnHk*&!%U~v+ddO?0WV*`054 z&$;KqbLqMA_;da58_%uh&U5dXdFGx6kB3`Dyr2k33Umu3^Wy62(<4OOMekak!ll~|DPx_zqKk4^--FVXP6V`ar?^D+BDQi6G z_cP#l(*LCYN&l1nC;dG!vJ8BhA3^!s&cJn4VZ@AuQ; zZ-FzO^gro;(*LagS^u;CXZ_FmpY=cMf7b73(DAJQS^u;CXZ=3;jc5JO`k(bb>-YQg zc-H@{|5^XD{%8Hq`k(bb>wnh&tlzJj<5~Z+{%8Gu4IR(=pY=cMf7bu3|5^XDe!sqs zXZ?PS9nboo^*`%>*8i;kS^u;CXZ_FmpY=cMf7bu3|5^XDexEzWv;JrOex?|HwjMsg z4!>>>ziy8g{V)1o^uOr$>-g~N_;}H8RyEA3hM&cUpT&pozs8IH7yU2#{d_X~dOu$D zzv%a~%6QTLqW?wzi~bk=FZy5fzvzF_|Dyjz|BHS<+l&|eeytcU`u&_U{G2mh^uOqT z(f^{~_ip1w|BLD7{V)1o^uOr$>(F@7|DxZ|Pvcd; z?ap}B|Em90|EvC2{eJ%%ulis0zv_S0|Em90|EqpKdyQB9ulis0zv_S0?=#A<0~)XT zeM2x__4|Er*aZ!{py9L2c-8-^|5d-A-^Q!{SN%3a<5mBw{#X64`d{_G>VMV$s{d8L zpZCVA{#X64`h8zId>$IF`d{_G>VMV$s{d90tNvI0uljvv8n61_^!w~I{M7Ct{c8x8E^XE^!t`&ICDJw zOflZ{zv+L||EB*<|C@e4YYd+Q$D95){dQQxH#6f+|C|0d{qOqU^}p+X*Z;2H=f?4_ z|6TvP{&)TF`rq}x>wnk(uK!*CyZ(3m@A}{Mzw3Y3?|bX-SC6c-QaeuJNwl=YsLB|3m+W{tx{h`akr4=>O2~bHn)1|Dpdwzn|B} zhyD-!ANoJ^f9U_v|Dpdw|A+n${U7>2^nd98(C;(&@J-k-!yIOq!whqL=>O2~+p*!> zvGJk*L%(myhHuHnhyD-!ANqZMA0PTZ^nd98(C=r*;b+I;+qCha|3kmeIpahBhyD-! zANqanAAa5#ANoJ^`{r%<=52iF_r3k_z5VdnX!!1a*yIhHykV0!Z1RRp-td`e_&$I5 zK7ZKc4d3Yxo4jF@H*E5TP2RA{8#a04fArhr4V%1SlQ(SghVTD}?;VCs-uNH=HhII( zKI4D%+vE+KykV0!Z1RRp-mu9V-WLp;yy4ryVUss(@`g>`u*nlQ(Sg zhIb0XCU4l}4V%2-{lf4$Z+OQryki(PdBY}e*yIi0HV&J-;hn><$s0Zk4x7ATlQ(Sg zhR=n=CU4l}4V%1SlQ(SghE3kE$s7Nx-zIO^`u*n-XdBY}e*yIhH zyy2b3u*n-XdBY}ec)u}h@`g>`u*n-XdBY}e*yIhHykV0!Z1RRp-mu9V|Eu37Z`kAw zo4jF@H++9HZ1RRp-mu9VzBwH>dBfMH!_Sn%CU4l}4V%1SlQ(>oI&AWWuTzIj-mu9V ze(oH;0~$7Y!~2$DlQ(SghW9SRdzWF8H~yvHCU4l}4V%2-UCgk_8#Z~vCU4l}4V%1S zlQ(SghW9kXCU4l}4L{Qko4jF@H~yvnU;2IDH2$UkU;6#LJG|2wwtB->Z~RNY_dCPS zz{A(Q!&YzD>J3}H@h|;8BMtAuhOOT44rtiw4POZlTfJecH+(fbZ1sk%-tZOiu+!Z1sk%-mujhz7rd^dc#-E!)LSM z`?2AD)9`co@N@aF)f={Y!_Vi#*U-b)(8K4qVXHT6^@gq9u+J3}HVXHT6 z^@gq9u+J3}H;j_c|xBh?Y|F?dhBZkirXHAThVONTP2cbtdf4<0o4(=m(XiE({G;FgZ~UX*25|UJ zYxquU{G;C%aQvhHAN~L6_mXtj1rEEwVHY^;0*CLuh8L&9E^ycd4!gkNyRhLk$?%$F z_*^-Bt{irO!!B^x1rEEw@qha50*CL+hF#$BI%U`e4!gkNJG9|DjN!BBunQc%%NRbB zj{omwp*!pXhwnp%UEr__ z9Cm@jE^zpMWcYq$*ai;Uz+oFWYy*cEz{5Up_%3h!pMEcc#~=NF^xF&$uX~2g;P|88 zciqEoaM%rwKl*J4#~=OngTpJK@kjq3{kDX|mT=e-jz9YU=>MbNu5fs5H0%n8UE#1R z9Dnrx(f>#PAN_yy+ZqmA!(nSU{^++i9QKC8-f-9(4tv94Z#e7?hwt8p&Efc?|BwDZ z`v2(vqu&m3{LycRIJ~MFwur+Pao8dbudRmfF2^7J_5dS(FVDX{|MC3e`9F{TFZ#de|DykkephVrMZfQK^F{v`{a^I^jxDBkvF6Jc{T6-s zqTi}7U-Vn{<%@pnzI@Sd;g>J^t^D#uzolQk=(qOE7yTB0`J&(V#`&V(@-JWXf6;FN zm@oRR0P{t^@AmRVzjrM8qTk<+CtvjYzB#^cj)h=+_ZKU{SP90f$yf@;yO?~@Z!H*O zhkViR`@wiW6W>wCchvc+-)qc#)&EuhSN&i0f7Sn0|5yFK$BtcLe3zZC`oHS`s^9mF z`KsS{jrpqotA5`(=Bs|2lzi3yRlj#Zu|>>R{a^Ke)o+)Wulm30|EmA1{;&GY4P#fB zulm30|Ek|$Fb0D$4$N2mU-f_0|5d+y2v8{^9TTI?! z-WL0-n775eEr07bYRljH|JHBR7NfTOt>0_icz+c$w%D)5a4mo9|69NBRr9xgBemG2 z#gZ%5XZc&dRawl%^0$7kh2#BK3`}Cx71OO4Y{hr7F|Ug0RJ=BhWmIfv2lv+Qw*A7&=fY0g{ulz6|O2=ReYx$t}5U3 zvsGcM@=gCY{onMv)fd((zIP6972YbmRlezGuHxNbzUgPK@=gCY{onNSSNW#@n|{Nb za9H86!eNEOioH|5>Hnsm$I3VT-}Hae&t~PD{%`uf>HntxoBnV5zv=fLCWbyS^vQSq z-}Qgj@6~<0r_6W#TvxotkMGL!UH^CeUg^j8=J8rT-}Qgj|6Tug{onN)3+221@A`SL z@L=J=@?HOT{onO}*Z*Drcm1XT;l#p;g%gXRQ4Ed3iN)9`W&<%citp)Ta1>4~Mn~}u zHJn&Dv2bGH#KMV%6ALGn@A?gs@?HOT{iX-`uK&CK@A|*%|E}L~DL?cZFXe}RzASuM ze(3+9pEJu3{Xg{o(EmffkyFeV@f9U_A|A&6AEe2FEpbFcTANtKF!nlQTiy1{2 zw|I{c1FQVdZ)BAp`hV#Eq5p^eANqgj|Dpef{-64P>i?<#r~aS%f9m%xB%EA0xfp8Y zr~aS%y&s9;eZ0Gl_ayQDI^LD!r+$tuKlT6A|5HCx7w`JQ)#a!DpZblx!q~;+Bb;4~ zzVcK5PyOBr=BNIj`hV*Gsoy)o{M7F~VSeiWso(p;{M7$b|4;p_Ud%`09b$g!HzJEE zNq*}8ssE?`pZb65_l_|?_5alWQ@^=MymO4*V2sY<-D7^~|E1pmEv6^|3i3<;Fa6$2 z=9m6o`hV&7?mnPFK!g0!|4aWb{lD}Zuf;pe{L=qRzjvAWrQe_}zw~>rnP2*U>Hnqw zmws3wzw`qOv15!)K#bpF7ZAIE{L=qRzgbLv>Hnqwm;PV+f9dzmDz*c$9mp^J-e1LS z&=}9fmLR|M|I+_Uzgn9!ZTmNtU#6y1TH?oV(L4NC}AM#tj`AvT7H@?en{lE3w zB;>dL-}+5+@>~CJ{lE49*8f|-%|d?bw_C_>{pLIQt^c?F-}-;+w`a(2{lE49*8f}o zZ~edZ|JMIo|8M=j^%EHJJ~-Y7=eK?WBi;+=w|){MBu2cmi*aDQD~@-?`K|xAe(&+( zZ%?1!`hV-Up9rauiT;UxVk5*xCi*A($&DBoW}@Hw=1lZY^iT9p^iTAA2c3!jiT;Ux zdyGuO)-32>&~MzD1^s4mSsvzX5E#@6MwBMg5EV7xgddH?NCvZ0svyUlAkOnA>Gh z|Dygy{r1-}oXw(sR;4vn~_ERi~1M!FX}hji?MAM^)KpQ)W4{I zQU9WTBitxt zEa_j;Z!?u8{dQAX(r-JJCH+hKm-H{`U(&y%e@Xw6{w4ixE@w&qlKv(AOZu1eFX>;> zzodUj|B`-F#4PDw(!ZpCNxywomh>;_U(#=?m?iy7`j_3}%Q1GWU{Q^ zRwm2(m-R2}U)H~@e_8*s{$>5P_F2}ytbbX*-F=qzFYC9n$+CXivlvBZS^u*BW&H-y zF~y9Lbe8qo<78R?vi@cLhSOQrzpQ^*zpYM|_1o)YS-&xLmh~^|U)H~@e_6ksMwaz2 z>tEKttbbYmvi=qQEBaUTujpUVzoOqRE-U&Cvtu8Z75(f$R`eTcXGQ;t{uTWz`d9R? z=wH!q6OtAEEBaUTujpUVzoOsFGzQ*T(Z8a9MgNL^`@5{@U(vs!e?|X_{uTWz`d9R? z=wH#lqJKsIivAV-EBaUTujpUVzoLId|B8M?@)(k5MgNNa75%nNS<%0ue^vjg{#E^} z`d9U@>R;7w513W`c2HT>zp8&#|Em5~{j2&{^{?tTo6V~JRsE~_SM{&zU)8^=e^vjg ze*40#>R;8rs()4gs{U2|tNK^x>H z{#E^}`d9Uv@Mcy2s{U2|tNK^0i^orhiTUn*KHYYx>vpujyaYzoy@IGHd$Hh_j}DP5+vHWB#n^U(>&)->^Sx z`q%WY>0i^orhiTUn*KHYYx>vpujyaYzovgp|C;_a{cHNy^snh()4!&FO+TZPHT|4U z*7O4eS<}C!e_j8&{&oH9`pulPu76$sy8d-z0Kv#x(#|GNHl{ptENuu76$sy8d-z0Xv#x(#|GNHl{pco>tENuu76$sx_^xMy7L;r^U4gDMX?P;^2e?$L6Cm z)W4~JQ~##^P5qntH}!Ao-_#GI#5_Km`Zx940B2MGrv6R++*LO9Z|dLF&tPR!|EB&; z{r1Ay)W4~JQ~##^P5qntP42U)e^dXa{!RUx`Zx7&>fhAAseeEF`7rGHDm zjbXO*Z|O%3vZa4ZKj@P!{agBhpV%R0OFxQ`E&W^ixAbr6-_pOOe_Q{yelRH8`nUCC z3)$Act$$npw*GDX+xoZlZ|mRIzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9 z-`2mae_Q{yew*+h64}6Fn*1xTP zTmQEHZT;K&xAkx9-`2mae_Q{y{vG{0`gip2=-<)5qu-`CJNkF@@95vrzoUOg|Bn70 z{X6>Y*t4VG{x?iVcJ%M)-_gILe@Fk0{vG{0`t3qv_nsa7w(ns;vZH@T|Bn70{X6=1 z^zZ23(Z8dANB@rg9sN7{ZA-JGe@Fk0{vG{0`gim*jM>q@qkl*Lj{Y6}JNkF@@95vr zZ>yYL{k!^Y@UyFbSO2d5UH!ZIclE<5+0}2$nqB?&tl8DStKZH!AX#?x@9N*xzpH;& z|E~UB{k!^i_3!GpiO#P6UH!ZIclGb;-_^gXe^>vm{$2gH(*e|CKb>9uHq_bGzpH;& z|E~UB{k!^i_3!G3XtJxHCCILRo7?Q_-_^gX-~Kj?L3Z`;>fh79r+-iXp8h@kd-`p3 zv!~x?JA3-~^zZ54)4!*GPye3&J^jE=_Vn-R-_yURe^39O{yqJB`uFtj=?8nVr+-iX zp8h@kd;0hE@9E#uzo&ms|DOIm{Wjy-)4!*GPye2NMk0Iq_w?`S-_yUR9~#P@e%s>M z7RR)+SEuYX_vzW#mv`}+6w@9W>!zpsB^|Gxfx{rmd&_3!Jqdyd_6_Vw@U-`CH4 z#11<9`uFwk>)+SEub&0UzJ53=`}+6w@9XD7vajEEI{W&esqE|D*T1iSU;n=Tef|6T zfvWH%InaNg|3LqN{sa97`eCcURyojrp#MPsfqwh_9OysLf1v+B|AGDk{RjFX$^cq9 z(0`!+KtHUN1N{g3`IYc1InWPqvaM1N{g35A+}CKhS@m|3LqN{sa97`VaLV>Oa(fsQ*y^q5ebthx%>K zbEyAN|Dpav{fGKtvmEL_)NhlXL;dhs4)q`Ew^7fb{zLtT`VaLV>Oa(fsNa4)hx!lo zAL_Sd&!PT9{fGJw^&jd#)NkXSL;b8o4)q`EKh$r>okRVH`VaLV>Oa(fsQ*y^q5ebt zd_@lR19ATTCm#KW`VaLV>1Wk*r2k0&k$z|{JVuW6AL&2Rf25x=%8~vf{YUzb^dIRz z(to7?NdJ-kBmGDE!MhykKhl4s|49Fl{v-WI`j7M<=|9qcr2k0&k^Uq7NBViD9O*yO zf299N|B?P9{YUzb^dIRz(to7?NdJ-kBmGDEkMtkuKhn>Zp#|i ztp8a5vHoNI$NG=;AL~EXf2^M~$g%!o{XlSz^&jg$)_<)3SpTv9WBteakM$qxKh}S& zpHaxM{$u^e`j7Q93t?WuY~@)0vHoNI$NG=;AL~EXf2{vl|FQmK{m1%`^&jg$)_<&@ zdkCbQWBteakM$qxKh}S&|5*RA{$u^e`j7RW=;tJIqW?tyiGF4xC;CtHpXfi)f1;nG z$ccUkIw$&1^q=TI(SM@Oa-bpyX6P+muuNr}`PEoa#T-f2#jf|Ec~{{ipg*^`GiL z)qkr0RR5{|Q~jsOa+gs{d5~nSSmyXZp|dpXp~{ zbEf}H|C#rWLbN%P~&-I_{Ki7Y*pHs`Z{&W53`gxp$0juAe>1xqgl<3{uYZpX)!@f3E*r z|GEBi{pb46^`GlM*MF{`S&HKVIoI#NK+g4_>p$0juHV6d@NqfU&&Y-Q4C9n@{pb46 z^HOAWcy&(P*l|E2y*{g?VL^c7-~ssB>{rT$C(m-;XDU+TZqf2rSfh+OKw)PJe} zQvap?OZ}JnFZEyQztqo2hU?9hezrGP`mgj~>33-@{A8~5U+KTnf2E(N%$5Et{Ty&O z;9Tjy(toA@O8=GqEB*XjuJm8&=jd{!|4RRr{ww`g`mgj~>A%u{rJu9UmHsRJSNhqz zTH(ztVrD|4P5(%(>FfE$2%A zwf<}Ud|$5hU+cftf35#o|F!;W{fu+2^%Z22t^Zp8wf<}U*ZLXNT%Z2|G3Hu76Pp|TZX4uA|Be0|{Wtn=^xx>e z(SM`=M*ofe8~r!>Z}i{jztMlA-))54=)ci_qyI+#js6?`H~Me%^WnMCf203K|Be0| z{Wtn=^xx>e(SM`gb(7raztMlA|3?3f{u}-7C*?-}js6?`H~Me%-{`;5f203K|Be0| z{Wtn=^t->E8~r!>Z}hY3xz&HG|5pF4{#*UG`fv5$>c7>0tN&L2t^Qm6xB74O-|D~B zf2;pi|E>O8{kQsW_225h)qku1R{yR3Tm85CZ}s2mztw-M|5pF4{#*Sny5v^>t^Qm6 z%xP}*-|D~Bf2;pi|E>O8{kQsW_4D?*)qku1R{yR3Tm1}vZuQ^lztw-M|5pE<{yY75 z`tS7L>A%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{dfBB^xx^f(|@P`PXC?$JNA%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{dfBB z^xx^f(|@P`PXC?$JN%Z53um4{Ez5aXs_xkVk z-|N5Ef3N>u|GoZu{rCFs_228i*MG17UjM!Rd;RzN@Acp7zt?}S|6c#S{(JrR`tSAM z>%Z53um4{Ez5aXs_xkVk-|N5Ef3N>u|GoZu{rCFs_228i*MG17UO&?ww-$1*|6c#S z{(JqLd+znm^z-hS>7VI$&m=SbGyOCDE(&F)-~I2*^t%C`nSOV`Gt)oQKhw`ZXQqFq zf2Mz?f2Mz?-)*zZ^w0Fq^w0Fq^w0FW0TH)TGSffPKhw`rXQqFq-!-Gm^w0Fq^w0Fq z^w0G3)|u&_>F2KFHd$u+XZmOQ8RyLOv;3Lqce6ZfcHAwG3rU&jpX;CNpX;CNpX;CN zpX;CNpX;CNpX+ymKThyxu79q7u79q7uAimOT)!JInd_hHpX+x+AangZc;@=&`se!R z`se!R`se!Hjmcd9T>o6Zt4*2fpX;CNpX;CNpX;CNpX=w#GuJ=YKi5CkKi5CkKiBX6 zOy>ICpvheST>o7ET>o7ET)+DUdC>o$|3UwQewU*1p#MStgZ>BoZXo1A|AYPq{SW%x z?#P4w2mKHFAN2d%6XZevgMQbh@}U1g|AYPq{SW%xxXXincX9Hd|3UwQ{s;XJ`XBT^ z=zq}vp#MStgZ>Bo5BeYUyQh-}{SW#d^grl#UndXxAM`)yf6)J+|3SaIJ8^d>5BeYU zKj?qZ@AgO@^*`!&87q(aAN9LOl1KfI`XBW_>UXmzkNO|=Kk9$f|ET{_|D*m#{qFhX zQU9age zQNPPwdDQ=?|55*=ewV!RsQ*#_qy9(zE_>xs|D*m#{g3(|^*`!=)X)Ei{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!L}-&5D4pZ^d4AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%|6l&U{D1lX^8e-k%m0`EFaKZu zzx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-RfBFCN|KLqo{;B?{e*VAwf2aES|4#K! z^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr z^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^-uLr^)KjO(7&L6LH~mO z1^o;97xXXa=l{$9m;djA{ssLD`WN)`|KI^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(E zX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?P-s!*7f2aRWzcT=J2B3HP@ATj4ztexG|4#p% z{yY75`tS7L>A%x|r~gj>o&G!hclz)2I|EQ>0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfZprB*MG17UcWN{bq1i$0Mr?P-s`{Df3N>u|GoZu{rCFs_228i*MG17UjM!R zd;RzN@Ac30&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq z&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-Bmq&-6P3P-g%-(?8Qc(?8Si z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=06Nz{*FV=k*FV=k z*FV=k*FV=k*Y6BKodM`v|6KoE|6IQ_0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$=v@C?|6KoE|6KoE|6KoE|6KoE|6KoE|6KoE|6KoE|6KoE|6KoE z|6KoE|6KoE|6KoE|6KoE|6KoE|6KoEzcT=J2B6LW)ER&}1MvSaD*LnSs#E*2h5;A`U>Ja50EPh= z24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ z0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja5 z0EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV% zfMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A` zU>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2! z7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_ zh5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~ zVE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50Q~!h)7GN_j0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o<8QPXibYU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=RPTe$oI& z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!4pAKs1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCkPPXmYs5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(E$39ei}eDfM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(E$2dKMf!n zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<8t2`e^{s0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180A2bo{g?hrKMf!nKs1180A2bo{g?hr|E2%Zf9b#UU-~com;Out zrT@}@>A&<}`Y-*L{!9O*|I&Zyzw}@FFa4MPOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#U zU-~com;OutrT@}@>A&>f`fvTW{#*a8|JHx&zxChxZ~eFaTmP;9)_?22_22q${kQ&G z|E>Slf9t>X-}-O;xBgrIt^d}4>%aBW0HOgz1BeC?4ImmoG=OLT(Ey?WbnCzM(*U9Y zL<5Kh(5?U0f9t>X-}-O;xBgrIt^d}4>%aBi`fvTW{#!o{AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz8Kl^wR*M0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs11UuAc@F4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Imo8 z`Kg6M!>`|m-x;j)82(@|`(n5ZuHy}VdmsL8@X~I0|ClqM=Qlt8rGKyQH-7bhH{9`9 z|M{r@J?8)J{PFvD{`CDj-@5bFUwrEOL;m}QJMWM5-#hP*^wnR#`;>p(`0i7_yW>-y zEODJD*S`jj_2g>uJb76 z-^;T}55q4E-d3Jx&%x)}G3@i~u-AEZdgwfV>qq@;!|r*u)9F0hx^kZF|2Quutj~*? z$n#=q=DfI8I4}PA&I=RZyx56eDPPy>*v*4rQy#8D}#pjTjto$tBHa0 z>XrYzdbv2So`=t?bF1gofw1%Xdtc+#e)aQe)Af0^L*+aytDJ{5g7Yw0e;#I9&%@N- zd3e8d9^O!#hbzeQ081ErYmaX{-sAmSd)TIZ9`@9phfPK2VHe4Hvy6M*%=Mf%FTUr^ zne+4JMDBUB5BwEn|58 literal 0 HcmV?d00001 diff --git a/keras_nlp/tests/test_data/xlm_roberta_test_vocab.spm b/keras_nlp/tests/test_data/xlm_roberta_test_vocab.spm new file mode 100644 index 0000000000000000000000000000000000000000..8520ca4919b4344d5643f1e99b2c03993b901e80 GIT binary patch literal 237831 zcmZU+4Oo%9fPN_rC2p#yu9z4ZHXKe(1dX-P?12-{aSN^MCc~n8#0+KB1ct_aB-^ zx%fz{`9!PzRIB@3EB~f_Sd{;zeOQG5K&q@4>mC;QNz+FOkB$4)p8Z>Y^*6cOe{wI7 zAl+IKqpAtfYasZ&R(hgEj{E`om+0vt2Bm-f(7#&g%SA%{BEKCk`X=}gN>aFa3SnVY zis+X?7OV&T0{o$ca23e(mvpjxtxi@wCi;wBI?y?jB65^U>AAJA`;BX_1gaq_Sr$r{76qy_n`N>(4qydq~FNh=$iTrd>WXd(# zIV>{umdKMgL{dgXrrj1v9Tk~=Ph{JY@sdkEw?CI4fBnrk+3{4OysA%-ou4Pl-+YrO zyI+r!UC+cx?l*Drx1Yz!p0)Av+O&AtJMky-tJmUX9p!jEZnEraoh-lpda~p{HBt7z zFi{FVog{zv_(VDI=}+a2Zzsxj()i7{Kb3>XeNa@Wk%~CcuheTLJVErUo)G!7k3?3l z6?y&@ku`6}$j^-$S@%?otbI)*>zBmJpMMi8nXkvn3(v>OhHs+eFM?6hPQAYPbhKoB z@R(V|G#8SOlHFP< zUab|wBCY7bv_~gp**Y;SrCoMtOD9$1MXcQWH2#Ot{}@@ZKquAc z!Oumus5s%&lcL{1-RO4^S3Gg1i68aufF+{O=%jQ$;@k?xVY@qu>Wo z5+Yo1oJM+aTghV?_k?AxTzzx4441Ff{!82vtr>q)wMM*1C+G(?)D3BTjDtRW+~<0Guao5;^*@ms(yJugIr`B*UHvKd{#2Gw z=jZ!p$Uh&SA{T-`lTSZSlZ#*dOu9B|B!g=$L$Pv-`~G|X&t&kE+4A{ke;}8?n=K1f z-fqQ8De(ori;`Km=Mv6zHb$J}qni8rk#q6iN0?>EO0HL_w!Rl52}u1wj2KSFjMYad zDq4y+QpWsP(cAIwjghiNv0|DNE5?-~)x^<)-K`>?@*A;I5sH;M{HjjI%3;!KK)*#_ zYRQR_PWq%7cN>^j#>jf^*#dgf%tCGldtQv>BL9Ir2EU_UJgt+{=w@vE6XfAc5f}Hj z&`-R`uJjl&5pP+oNNWmtst`Gee{maaGazEu#7YQvHGQ-Am`2VKM<_0uJ`ybk{7j`9 zDfemQAZebX9=gcuVfx1N$& zUkT+qOh35Ey~A*UcnpPv$-|yz5JxBWU#OGM@HbD=$sOv`Fi#_0qs$kKDfX2*xrv{d zF-bQaTSmV_JT18O)Po^Kqzk=#o=81qXsN}Ixb>u8f;5ldCQo*Q$bIhX8n2TI>cN;! z`q-h7_EK|}e!Lp%M0C@rpHE{Y$T&Lzy}E*MFT_YFivIpOazFC9SgD_vD8W|xjxAQo zXiHOeBK-Z$yfQFM5*B3pKy;y-Ipfv zqG|`~>o1X0Deq*M3$tJ$OaT2H$^;L%XEJ#-9$;=m8ZzlSO^iSIn@7pt6554%c9yzE zeT3fB(wE|8HrHP!{?@aJVp=NlKdAePrbLOszj~5L6W50DJC6IW(e=HF*ma^@Cf_AT z66GHLb5*+ZvxUf+xR)X4Aq@@0Ig|Qusrn)PuSl~yj&V0mnnSedX^||#Rv8lIa7Cgh zyIshAtMFTa>^jT6#JdIedN9#W+mZXp$5O7FqshxX+N4z{xhf2Phu6{0x3MqWzaiXe z{8T?E!F>>dHu~8oB3CJsX}8EQ(%{1eskeVse!Uvm$UW7ThyBUSJ)3ZslMgF;eYHpq z?i++vwm*P=7EZx=C|ONh#9;uvMI)ua)%b(Q5#kmS89O#+LO^64Hqie=nSZU_avo^K=E{L2#Pt%d8Jh z)27%>a5?Nke_ksknIh*1ZyKcS=8-mKG|-QVNh_Z;FLUi8468WlljYP$dH7)-)fyvc zB%T*FVqUEi<5I>n!rf6}zo6aLYGoh!|C+i#NL-cZcIrTt@jhV;_?a)z*7zTUV0n}n z@`)Ra>Y9uAg?ay%TG7#e-@?y`oxF{lfO|3oKTVL})09V|CG>K%L`ds@LGZl<<~i;U z(wCPLhIK>eIP$YZ3BB>qFSLX(*JyVrfnvDCy+)*31FRwb3JY_{AoX`PR#xEN1RGSk z&uZj5!s>^puNxviSL5Xfr|K|P_7ZL+)z3*bxgukQb!NPP}rchGM% zAszKZuyC#(aCHLOplrxsOyWI)MDtPkuO@t_A^BtNY2 z478j6EOm)geHO2k(J2K&5RZJD?2)av?pog9sCy) zUm1SogrAK*A6jS|eLU&W2TTj7&vmrfl!tTk3a(G$zN3VcXaa+%Jev6Q!niSEv4)g317T}b|IWf!`yF+krwXxN`;vt;z4(S`4-`y zBRp%N0PZtj7*GF4j`@G8l~%@pWb&obP~&Ov71Dh*M!qGkRJcUg>Ny%R&%L2kxq|yY z@Q)y`t9U*pJb5)x=lVgBKj2y#sJW+PHGLMT=Ag11johWKjK~Nw#GW$v4gEzkO)3s( zB$jcqDw{cGn?@dR-7qv=3`wcdNE(KmR5@%&71PR8%1^uPVs6Mx6@w*Jo+7Q1Hm(!) z@P+9z7q`ik%G|{DV?aHX_;8n^SER~a;#2Fa-%{oZu0O!7=G5_&FP5=eA5VLYvX4hM z52lLwz;rRwc8OfKFG`h`ZPTTN^giT%BVpT-X8iw%bnV1%%%iPmrb<`(bm=0^_qcwY z@+5J89|XH;@6KtGg?|_2*^F$&Kb7()!+e+x`d-Qp#WSf#)@(-VWikF`71$Deq8uBp zxS^3N#9cW`yb#mU#6VcRq6rpISd{!v9au*C78)`ElP?^bKIT&3b+i)VjVJyTz}7_z&O1=2_pH zdhthJ&V9jGIVZsG4-z zE9`|5+Ce{)Ym{FP2J{v9SE+g+Uk2vaUz4U`l=!IkM*Qo*1Nxa9dqXwHIg zw{D|;;;|p}PZ-vwvtF_9Ch$D!HyxYKp^aIKEBo)^J{y?O%dzbWmqy<5X{2gSlJpbb z4AuT`V85SpPC$Q9Yp(0$rFfM4r-=L)uAAsLvsAcev5RM-$L}Xy2`oY|7Bh zdcie^{xM!7(Zv04F{1x>I1#N8XSzoEiNiN4;vZx$KpZ_9&cC1!mXpRRD2o$uHt6I; zg-+arQTu_{@ZSqbr12&3U56!Ht0=|RzKoSXlTOBw-v3bLp;AL%#?g1SA3FrdrMb4LXS;@3HV0y!SNcwzNkx`?Jtt`hIJSlVx0tBo?^{ zJ|nHJT9LWj>qMS_W#}33b65$_LOx-e$!{96p8U_6$vIoLPS$ZP6@4n~oIzc@CUSH~ zyu2e?IZit_STyo}1>-clcT6M4kii+uyK4Xc9P(u(l@{VGHrUTOno3B+o-{sD_7Yzo z_moD96rgVfe<^)q4rc*riIR)EcxIxM&JpP*ol?e%efT-q2cIZSl&-TH>E!Id!`|3U z-*G`V7hN=e=%*EM;&Fr9c*qd@;>r6$h*jHuGhrVKhPh5aUf1g zZzRZ38pkx0AYS~A@f_h4@*H?z73l_$jr%zZLsmaQ!bjLE84_6kCrIsK`b0&7)IHCf zLOrzg2!})1`7PE=gx^5$o5<45iBeL*nFNh$Se?MWCP588A z4P!~CMt(t^ZRgrnXj3}z<$|8E+dx|T&<`p%>8o?IV*1EV+#%-Py~qM6f^ydQR^k}j zpY^l;WnYj@d}XWy%v{%F!-i4nS;b2}4eQ7&HXr(eIzU+zEk_O&@TGWQR0|5 zp7Ye7O7Nuw_WAUS$Kx62AESS8j>X0(cr_VtFPR{rZ^w=GuPx+3zmYi=uFoJ}Z_|Hh zs|b4VbK?1$HM#N^{7Ey!bEObx3ZY*T|1Q#gjrAqhU&Ypr9%F3zXqo7-cf%lkfxL!Z zCEOc?dlNr9eHlu3(1s1@8}WmqVa6ra`f6;bIn6$v`)?9w`OJs?@s3KLHJExv6Y3WF zJ^5wL&`8~sj^{iYd7m&k7={Tj8Ky!a#DbGLOGc(bum^i#?V%=Ab>^b_|1A7fT&47b zSi(9~y)gG1nBY{o)xJ0p|I*EzwIUx7o=O=jXN|IV+QxW;Gb;45YwS2rXGI!Cx*H!-eHi(~u#-wS-l5$k?96^`pHR@?TZCo45~(q5Q~l*}^q7$Bm6+ zGf8V;h163v{VnR@Hs>)Gk@dKZZIqR@v?^N`?(%yXQhIxZY(d`urKeZ0F5~({_*TsW zv@?An)XzN6S%2`Vn(wifT}YnAs=a#X4g5Lt4{`P%{0jLA>)*^3QZh=r@$A%aLo1tz zdn@Rp)1|bKeF6DhNd2j2e)E~a49w|z<_J}P#UUhhRIKJ0`k8(+FxRX0uOyDK{IkC( zrd;YwaP0aV^gQYpTUO(Ny1$07YD`yss|vji3P^YC8O@jJl!Ni+EAELSe5zEMnH zDY>>n5(%3Osj!%P=8(sO$W_WszSkiC7(YGbG_0hoxc741gv>?i)9|0OLTakHRw(i` zVUJ?}!Ov++&VBX~rY4)bseVfSytHWvekzQUIL5~PriXb~{n$wRdGS~6cnVog*^4i5 z?;v49kcLhjD0@A}bq@sK4AgYd|B2%~`k0+1JqkaM@aZB&D!;0q5TBa=N(|_qrpvM! z*zXKwzl@cv>+1-&h;lR{D>w%kR`C&snpeikv4>|`DoyUICe63$Yc)g5T#u+QH{@YDq^H84Bivo( zPyJm)fAA=t%jjJvqotGi^c4CDq-!u*x)@`f@SR)jZPT6kW zUEo?NbH3`|87kaY)a&vUQcn5Q{8NQrIrXs;SqQ5C)+5#U)`TnzW5bj`Q(Yq;ElOge z<+NG(9QqaGPnPmSs(jkfOXsmC`GEYqMt%m!59Ke`u*P7`5`2}s1hw+#kKzl)k)Ltc z>LlVi&h<|2>t_7<82J%W|FB$6!mDQ!!S@&^Uc`R}{wj?)HEzHg5JkB*i<}$@@)jxpgdvgP?`*@dj650`_QN6OQvuN~a?I_26=de0!)m;N>3-T^fa?LY=OYv;8V z(k4HQMP=Nl_Gt#J1IOd{qZ01AsJWzVO@?T5**vMa^cS7Aq z(ef^HO6?QGPy5%r#GaTm%gNK*-z}Fi>gTtu(PCUJ@+)=!XIgo`o3J?|@7bcI^*(GkzSL|7*MTzSv{MpAj zk(CEXzmR9^`=g|czUKds{YE~1DLe<{p1v1oclHWB-_u80iHmyt2mIBuDU(_QF40OZ z?qHNwD#=q7*9&l0O(CC*6Td+}!rr}{IN#$r_OYjRQj6dF%9dWxQXjmeZi6bC!(y&M5_%@Q`dKdl4`Jz^82|rE6$#{5` zdU4Y4)%^Mtem`7)8hr-*3{;wBhe;nBQfbv<_wQnJ&v32#E9#XvReqPKu%BtA`&;s) z^74UJo_*wZjQc%Y<9tq*~j=aGbD^arPPkA;ETVXrwga+y=a(I^< zZ_-FE?!EB4W9-q91#l3GprwK}3ep5^>sX^BZEdXakTp;T&NTWivJuSSVZY~I%K6a( z=1$yJ=)K1}U=r)~DV$&6cB*jT1$`%TPdW1udI|HC@t2I1FGtBK+(w><>xY<6=dnIv z9P-n*&f$L^OxRM{@hEXJ_g=((3H+4TKP_6^tO;C$I=PIWmpQoaHqRDcx~MT;<=l`p zil1vf;_U}N9p8fOHplDa3gL}(#9`!hFms=cdE5>igmFy4{oz!t{fj5H?WAi> zh=1*qa+C0WPJp|wKB;wmmZEh(GhG{@%_8`RKj6G`$&=b)#;iW!JoMFRTK~7xv;mo@ zt&X~=dHc)%t-1O<&sWD~XzvjIF5HI);I~Z^9oz9(m;i||8IoZt7&`Y*iJZ?(`-K#L zULd72-WTKBeld+ZkB$7RRJ^uWQ-1YnP304ZG;e)AS##v7MRWL@ztU8Fe^c}KFI8ze z2a=^rvrA4K$>j_-SGw2bimP_7q!Rbl*EvH*&W5=#AN1IfI!{vexe)gGWNBRxBW=*0 z86(!!F=9j7*|T>Xi4jLB=lOc<{0W_`Ae~jP2G+v{$bufu@q6=QrLQnnyvTmsKF$^V zJR1l=XIl)<=VIimw@9|taN}KY|u&?ZSyqJ zjvZOkIZr~`ad#jc*tLm!DxnH$pbm z(KX8Wa+0|Lx?R|&4;w{#xUZi08o>-s#)B553+V_kM=X%%xS+EJVz*g7}k@sR{Co)&* z<@D(at?WhL2Y&sNQh;>h=YoUi-W{|Z6rqF2W_a3|C+ndak8P@2ynnLwfHK=MCbnCJhK2NtMoQ-Vex`E+>%A(NyUs zoQv>oq-RLIcaSRAiRUIn;0`qVQpEyy(XGcAJBh2ToH_jkY>jev%p{MhE(~m<*D)4A z@n-thvFUQ3dt4RM#l4aDE6{c6*aP&Vn2)i=3EZCulOY+VLMqIH9{N)6o`-$R%b3uQ+lO2Au>k$M)57>;XZ%4reT+Zo zF5-9B()V&0KL%1{Hu21b`LGb0Q&{uR{)^GAyS383O)JaLSAb(P>mdB>V9V3WD*V>K zde{J2unD%nR``AU-;O&M_JWDEd)doTVtgS=_Ter7V^p*Z&)|J)^l&RSgNz(~Qc92} zsDvu0fjX!MXOm7Ek!ENCE4T|;e}DtsyP5UJM)nNo9tbc__`wJLj5VzrwbDkJ+9{7J ztIb8(e3TLCu(KC$WnNSF!6{Jhj-Ns5k?MIx5BA&(edB3g_;Gwj4?yPt&v=J87e+dF zQ~!*|E^woJw(u?kN%g0Lu88J>3g}F&9cW{?| z6(uG%XLr$?3u!;Nk8V|Eql^#GbsUd4M(JB%2ir>W#5{S4Yq4CL0EytPU_3e!Ba_j+ zj1zshlhLO_V1)4qw~uk8ADN2ZESL>*VLmK`#Sk&3$TDP@b>Rx+@b^)?j7GhsJY2u5 z!msrM^2pffh5ixN z1)O2{k%1emKQb9hvl&x!7+aCuxLpevbHM{<-euTIJlkO>#^!Bov%i))Wc+VU zq5NqN*ClysfR`|Lkkr5vL^51+#T6-i~d(i|2sneL-sdd|42XX z0J4+#y5cE6_3FGv|D)YqNH=%}uzyRmSc%gCPVho=Q?yuI(Gozno{g6Fd(l$H!+z5= zY-%R9u@oCwfUR7JmQ(7!+gdq;JO^%`v!6#^1Xq~#HgmZ5fKD#qz6|~aItiq+_QxiD z(3*z*EuxR6V}D57JmRiq{s#wK;htf*4mZKZ`aOcY19ze4n2z-qHf|xEJ;a+&yx6`U z8Nl6nTPI!jc(>ssV*qq_F*f)(2XL|fY}3ho;&=eM71#hwfJB%Kk=7^0&x_gNqrAI^ zdw3f2Z9M%qkNyi&@iUL;Bo#RejO?f9BAc-{%Qc;xxmo{MxH zqkYl$Dt~NyAF_|W;LW6eWYa%#=pV?yI@+Iu*x6h!fP+v3&G=ce6Ql&)YDi%3kRT@X zN^q1WNXM}R+9*M6$g%I7##0~IbQRYlPmGruWF6GQ@bY+RM226A6Em`mvm#SJXVU0a zXeFLD;%PsdAXW}iZ5)u=Pm%^WMiRuxa|;LIoZta31mF~$fu56z(hGgyg?{jX9|9wZ z(#d-VU8@u11P97aWOq)2xG0|+JkYw4^f!|};j9g$PxGus>uVg#BG${(}?Xgzk;lAGpB-^NGWGLn{lBeti_@b6Q!9z6{33 zk4akb!V280z+X+g(67;o57ywf9?a~4Hy|y{*;&X<(0o8ETaa79N;%uXo=F+OK^{Ax zb%62@acv&^tu*Gubmm562W|&)JNNE{T-Xcy;2;!13G|dQ{#G#lHeg@1w0{@v5B>w} zKcJKT&~=UVSD5wJJ;vV~jK4$J=K$lckGcOFY>Rk?A5V}<*5mG>etM~kv3Z_6_oM$fzGhSg`W#`=SDnti-4N<3L$G;&V*Hs1rqQ~o5*LGZI8+o5b6^~v*IM>vCPfX-Xk{|NRE&ePaGw&rqa z#ofibt58oIjbK(>9b3D zF9yF=5R3h-L54ev4Oq<69mxx;LAS5TI3MlsuW5k{#8%|bE$kx0W7~ zc4$O5g9E(-?6rr)25S6qE;#h#_~X7^Bu1V~v=G)cle9xc;=NGxa2)+ioy!$5E*G&* zC}N#ZB(2wqq-~@~+DD7TinQG-68j?3NIxVFq*bK}PVj&i0&oh>K+oVI=?#+(c%lCq zWxGKcj~$W#bY>pnyX-@90-Vr2a!6eF4vCvE9x#)ibHsBVF2W^fb{!JSjv~2?Zmp&K z6^G;s`Y<@iR|n;_QwKJ|4Zrrpj$;>feDZZW9N9tv3=Ym_P>*`|0&r2Oza=}2bhQ|IOGxbZ27!3$TCifS>avb6(-tGYB>RHxM$cC!`2bdepP#>Wj@%Ysd?1-8O)P$%1w z>Kt_^GQz$o7r7VqK>-|uA_%`;B%74|MPVbjE0rJnjVk2uOBx;)GM+(k8aDjpA*sQ? z4kA|%Nj=i3;aMfJ>?^*xdzvz!yB9IWM01vmuFgdJ!n~J>?tlREmj43hAH>;@e=>e1 zaDoS#G*R-wB;uJO^1IVf(#m|*#vIjtgZXikIp-F0&j54K5c4DRXD9P$S9UDtYn+Ad z;e0=z^Zg{w_ft6ENA}F)`M@IP;RVdY@yxqZn0LWHo_QG5xBgzz4ZtZl1I_u&f2;Xj z2y|-~`|qY$sXDty&f|96X8&Ey{`(mFZ~SYA^5y8s{ZbctUEZCyPpWfv$=lhxq-Jox zypy?G8m_$|$5ReUBYFF6?;CO~rAXe(eqHLD-jw(6y(z`pUXzm2-Ext*FG1O)d@+HM zhnl5FcJb}*eyKeDYq`v|EAaP2hoovjp&U-nlOqjz^3Ie(sqy_rhPf7bd%V0$-PCR5 z{koh3Qak>Dyq^~>^}cAi&h?ur41{?f;`jm1oB3Uirc&$`KQk}w{*L#*Et+wBx6Ah} zZR4fcH(uH-Nz%%*8Y{GqCW*awlGwT?iG$}l9ZeIZYjm1)UP}?@jVW?M^MtrGKa=jl zDPrUqq#wqyPn#V+tq2j6cww&-hcw_=EK9!LKVyj6B2j^I^go;#&{y3sK^-M9BtpZzzg6BucW- zH$kA6xVuPW5oy5|{LCX!vK6@E2$;9SHrccjb3 zGbLmrsAo+6ui1B_d#jlXjxpwMWbChC4nX=hGydO-m$uP(X&;Ff>mK%n`Roe|+5aLP zxOZ}I3s}JcPVj&i0?;$U{&$o;D$=`>{Wtj5vH#9v{|%izF9^r#YMt@VgEDi^$(n@`oPayNuRs_WwDgQOiErPMj{{_K^nC zfqR%ZuER};z#X^?_u&Ejc>aq$d!c_fbqxL?>UeBoj zlmyYOW{!os*yIFcb2{aRM0D#m@1KPjvSaj$??VC4M-H9ibKo+#^ahp)zqk-506lfL>m=dAc;!6w)OTVXpyK1!6G z$XwV9`=9_0f^&~fijeB;-JQ?7Rp=(D1a$`Qg?{?KkN)q!!TcNM8$zztKpoUWBbcED ztk9ar`uu=e2k?9lY{#%0=sIzzL&}Q-fGVGR&u_uj`XoD5A@9Bd;AC;coLITcc27Pv>5|6wk=+LQEw@oJRJ#~o;*-F@^?=!e#5=J%9` zdra%RhkI&!I`jJ?=Jz@5QOQR!(MuWBTU{# zS%ZKLvhZ_~zfH(3;J(FqK5{#_$Zy{f<~P<0J8|cNKc96+A?pwW^Bc02e6*3TcJgHf z8@e4j4p9F57}?7``=9_0LJ^dJ2`ZsyfbtJfe)8odKmFGz|1HXoRA(if@sxiO^+cuH?VD_S_7!DMU63Cne0Kp$=>9|Jm&s1)&NVH`zNsmKySrf+sJ1-Sg(=x zD0xWY{4a&NiRX9e)?jat2$LZhra~&rf*#h{z2m88!h5-|e*yIhWBdOR#((C7ANT*X zP4{YSGn4&4(nDL6eUQTY9Xx|2t_bf;&PC3Lh06VLj2K^=CSkr$Sd4qPlk+H~+8?Y? z?&s-KQ`o1VuYvVY`t{Syt)#(vU;}RNY4SG8HxTHXAaItvog|OQe!`gtQzRDuEwB}~ zLvzw}v5ch1PIPND&!%}EpNqa19OHTZt>GCp(ndJ-&aVN~zHlGc3*aD_cve^TCi4M$ ziE=|FR6z~YK|QouQl-t#Gj^nvXYMwn9d`%Pk;Ly35XKBGUrS=J6W=Z~~mGdH%nW=l_{J|4--nKffp7gm#N;dcY(aQ?oVdSHH4Hr$MUnQ&LYg8svM4a2yN zSLs*0^WX?kcVX&qfcGD0gSKnDqjQV%XQYj`vLiciJCObl_eNe{<}vGYIN`fb!Z zsQ#BlxGi9mDAqQt$%oG5`)bOS$r(THEa+~@f5&8C<1pln({|W3Az_M?gJBgB?R)ZUk&y<9{ZiiyW04fn|MBjtb=CyO+B&^ zEOz$wdDs=0ar>WQ-M*T+eF1j713S*deh08$wcibMMnxF){*MEx-v4nT)mY>~j=g`d zi}gQajTd(SPJ#1?PR=0Dfl(hP=aFt7djJFPC8K-U7xcj;^ve)H_k$1mp>-wpPd?ho zmwH#mwvK!ivi6{yj$XC?h?gteHw@Ro_*$IYL`L8a^kC1u*mWPMcm4XYcV80qKZ*K> z&a<3B`z)d($KKB0xd;j;?hw=MDVSZoWE^*$62cY|NYzihoB20#_DSw1- zG9q7L8@ScntHwZcDevqfEfrcx#eWtwyV#dJ$9o4K$BC8lsWRHZ1`bed(RzgaZ}p@7 zZyS3`_NR9CryWQKd+%i8nGJJcJ}iXAunbl}Pd?Az3wi#I^zwTG{ioSqhuB{u1EcK! ziL;Bn^a*g5vj1h@>tdho1`jyd|E?mQHLxBwfO{TcAPe1_$JxT31lfeX1p>9iy_x6V z8+rcCK6opBX58D6JE58VaV~N%SkT+&u&-Z~Ap39^KnLkLE+mRAjdCXO{M$$QFAxr_ za~S`ZGX6scx&wZH|9p`9i=YJj`>_e+kMG|YXn*K0rTvf4{@9i}XYbsM{lSS_%GR*I zQS1--!}~Xt#G#(qsds*>(4FfNqy|~Ggz<*&5d#Z|i+uQI5;xLI-0EGQ7WzjW;mlJK zq#oI*bZCL*d%XX9oAm^`6`OAd2f7m++bG)s%9ux4kskaaU(j~Q0Gxs|a1PGHMYsf) z;R*~ZzDkhm$guh-jeYZv`{(Ca@8chVJK$GqKsEn9&wDnwAAoK>eHtb}B1C?smC49t zmjX+8W`!!=C`DgYhXQW0Cx)Z4_WBm@!0=Tooqti z0zbb0caOEwB<$aS{bMuj4cLDdwlRQ>3}G8cM~Lw$#QDH@#wX6hw{rh>$OS*&yzNEy z(6+tMx0|+se(KZ*e)It2K__dAt|QogDfYjI_UGD<@Ba~|YzFl+jxm_H3g93VK?x{3 zSLF}C%sT?OhpF#MWEIpv9hkFuhKg)dI<&yp`Njqx{6}A+OHy ztc{2#8Ky!ixHXg?|5@nXB+3u7(dR;dHuC3D{%Fb%^YJrh@(vmDF80;Dn!O_WGO*y^ zPFd9&Yz6LBFm|q!PB~XnPB5@XR^Rv-%lT%OJ#$&-axw8eke}~>))RgMWWgrb0$X7_ z?1Wqx{)RIWWcV5Os>{0i9}(4=333JE0rh1@0Wq{}>y3_Hh2kS!5r0 zIUnrDU!C2Ly@xo5Gey#KfD^nBenlsdZ}}ecd7fY5J_Wk2eg%f1k9CF@`oRZ&2*7o&-Gm6-fxB=Y9zf(f_8+^J zOL#f&$Yc@^=Q6R#36Kbr!JMR%WaLyxg;~(NR40}NJpV+uGKRH-|J4kck6Vpt9iZNE zut94|hP0(+Nc+4Du_A5h8Dd9v;C3KAo)Bkp4vvL1Z{1a1*G_@E!M@H2-qWD{}=G^1}tZU@V4$~8o}`0a?DxO1VSjdFnv z>`)b*CQiz^muvgLJtsp7kOx8Kv#*eHtAUjZL!Q!XE}v2cm}cE?g_xV=P< zZ+uqjH!qU+H!hN6x1W*s;ulEWjs^1W$!DZ?^E^44IZtZBPs=-lbEJCn9C`cpQ_^jj zBCeq)#6;Z2aqQQtlcY6`L!{b9#8EjpWzWK7Bo+KTEJTqK6QCfG5lQvD9v=8#UXMDW8 zwcwAWvT31I5qAyzeJ}c?Kas;Je=J8b|Cr~_|3ymL=F)!{BVLJ;vZncBO8O%)PWeMA zt^PxKe_Mvs(+*{yFpqtqlX~I^znme*E3hHSkau?fiPRieERFa_p2(1QGnYwS?+TvT zE|c1vKb5wIbZM%Rj9`Hi%=5*=V zn9j48bm`lX&a;61Nlo6BX&qz3I}Omo6UMZlrnUatRR6DL4b? zpn2YMvA}tB>&oTKjmzaC`XzAmF6Z2GIp>(m8H<+7W&Eyyvyk#4)pw8AkvBno2N^-$ z0q<$XAn^4v2Dvz&q)pXx#JgO(4-X*nGGnot3pqS^%KL^`Wr4XlSB-ap)c?qxr%-apJj-vj~n)PDSY zF5-eM_?fry9vpHzG^6iC=7NR1x95=WFz@o?-Ul7%4)*r8+dQ9!)-KBDBV34ZLzEAx zo-K5YF#ofU)3Z)8um&tX&6@62_EY?(qDlBhg}92K=K|}$LDqj^)_>4n&H9gd%U{d- zuY&oXHD6ag>;FR5|Hy9KF4pF5@PL!~x`22}zyy`xE@b}4zY5*kMci%t#tV8KsCW1L zxP48eg{;TVtcj6EC83$MObgNq7W~J~6-QYA<95Q>xndq`zgz49*oTClA|Ln<^Ubyw z8Guu82F}5GxCocvGN^ao)iZQ__w^Sm;cL(mmeRu%64fI)<0RCI_ zqfGk31^UD_`U+<}-jnpZMXbvP=!evYC!Tc|{mg}aVgYNebk?OC>9^3z+PDqc4XiJ~ z2I{?{AK!m|EnX5;I~2EcoO!>o;F6X#r*4-3J_?B?mPkMt0)GmX7D@f5&8C;~Tru0?!*fbK0N?kVhl&?_NOLEHw?K=xNB zN)>+QZS0wmb9_7lRTqpTo1GrnD<8-1pLhS#^uN~QXf&Kp=;g+&r2HPC$ z0y?l^_5H7vdmP{d4~(4$uuoCn0DE!w5T=)~|9Sq#`8zh^hX8b9XI&$Y&fgeAx`(iT z*MFYB1&HGmID6?cAJI3k=`*;`f%*pchwp#AXKij{uCgq-o9s)*vA&qbcj(-+3f91S=owMJam4cv zr1uDWdIR+jeh5J4HNOA7#rMBRCwu;Gbk_yG{~h4_-w?kiM;uwO37jYSUI00EK6e}Y zLEjEL;s5vjChg!)qYY?_<~F{OSj+Fl5JtVPxEE z&cS)O2$!ITIlUM9&NA*o{~+Tq_(P1#r#XLT4bXLf^}#ykdgvysD~)lPHGv1rjMJBi z=L!tNb!e{T`#Z+#o9I>_XXq||`yc%dIGDdXSU1=kn8V>Ne)r)4=r+PUs%N{ztKYq=);QIXpuko)xeP)_^*jcjfcW z1G@UAqz^WrXF*^oeQyD2lrj#0WggFO@ZSQ;?zbYhgE^XY5YkTmY%q52$2gFS-(u>@ z8lo@4Uflb@_%ZXbd@BX$zh8d`aTh@e_}Lp*BCDVV>Y(f@-~IF6P=xR3>v1=N8Ct*! z4se18yb%6~b0OsL)BKMrn{5EI@E7-se9pHdY#*Hqd+I`G)#!(5mT3t=%tSmztR=K25Y^by>{FLLIITm@@jJ*a1<8<1JB3ATXx zcEW=HR`egf-`?EFC7<>PR-yrHaJzaWvp52G^4)IJJ>BH?u1~&8keLCOYNAvxCJm23VyHj|!&ieyy zbk8V$z0+kc@$G{GI0$a;b@6Px2;Ix`^1gf1r3Bps0j~K6N#g=(AuI7S6HgVg2AT~# zPes;)rJ8d2C|BupX~b=Yjt0s_7#q@F%k%d+lpor&3AYFTeEbV3KhnYT`xfr8f&-l3 z0WSpL6!hGn{3GOXl=36{Z;?0B^+NzUQz-v6%8$Pj*-bbX(v91LbPn+w2gGv@&cj9c z;rA1aFEOUfByP&-e~M?fTnpg#FC~oyq=o#U|Cwuf7lHm~aZQ&?gu4RG=qB247~M)) z+R2ao0ANQGH28|K1%=)FN73}f5(uyLdhcRw<~Sl~x?&Efr@G}hkf ztifku*Q|fLS^Ihh*`rM2yF=nw49mb=%K8FnK{mrG^!2a-+KJbCjqzuc@rQUjz%jsn zeI0X6HtRt!GbZ@oi{d+?au9bBlz^LY-32Ceuj}F3 zv=Y4v)SlFDVE#a=Z^hT(R|9oW4~<}k7O+CAhWEOmJ)ify*?ZfVi|x$G9c`=+pp&`2 zi#^zhd(7`6%}mu;wUa?Sb^;4j|RD75y>Zxg;(1j;a&s z0WSogH9lI}3aP(L-VtkJezq__vql+vKEYhwS;{`Hntfdx`#RPz-CTEZ-Hr4d;5Sm% zu@8j4&799eKe`Wo_ zJeR=FJcTt3@(MH$vX4Vv2Mc=pJ=PT=)-t#w(9z3&7i?!K$7%MTtRdUr|0C`GgKW<4 z#NWR~TO;&zCL&@D9VN3eUzrtO@wL8UI#V(wN|aUNiYuI8q8-?{Z#LuN?pOY^!pS}?JQSv3?7~C#(bK-4vI~MzG zDdO1pCM1u`Yp2W&sxfv){7S6C8q^---x9x$z5$z1j%}#I``a%&gm+;ND%81^|LXcb z%}*(O2swWKYVrsY=uzIK$Xe^thid7QvWoJQ=^J42qs1zbX_JRaNM zSLoU4^50~0J?Phvzc2qGjuGg>b#XnvwXPD`Yj1&DWh$F{CV1DiYc zlQ%xc{-4MGM+*B~!$4_`!+1=->Yx_pmED&Uij!l_WxA< z|NZp;qvbB!9?~1Fv&#>%%eR(2A1#<%;JB*(YNibz}7!@nXiFt5zaL8KcEe9{{2$vEXPW$LaXb{ z40|%Hp=Y0RZ-e+K=oQ~*^R0Jp!rhMPB43l&3iI#frMUl2?Tda_Z*OpXjpx@e$8%j^ z{=L4*IDV)}IOUl%l1ayIa$GsKp$a>&3*$WZJ!GA8)T3d9bkImoF7tj6{S%M<3%;rJ zZT5xtpYdKFjW79={;8YtJ>vMlea>+Jhfs|pi2eHsGTQTTO}{ukASIke8?wmb1kyK) z!%6Zq&fpwcw-$#?PjR?F&(70snDv2hiGBrzc7t&21M4EICl-fZ@zH;9o$N#H3&o*k zOmVnHPwdt%cwIY#eh(>ic9U>Y+a%5b+@frY>+rlb$DAg=_k8Wcq1QSby-y4dasFNZ z|3#YCE$sW)N5epRCB&qmMp^L~^A&B};lWZH2pFL^&pyq^W^zd6sx7T95xmmu)Y~i`c{aF3`qWQY?4B0w|FN0qB zIrsZ5bC`S=S!G1meb-0kdR*UmW5xGA5&n6=6XD?H;_&A5A>qKpA>n(si^KjK9}3@{ z{-Ln%{8QmO&BMap0Urr(TpbqnWJZK0%5~P>_4_7NKQkbdlQm@RbH=C9 ztFQyRum}5a0EbYGBS;{HG^*5k8oP@^(z9v0TNF|^i?qLg z8}2#o0s1dg7ho_-Fcj?@2bljc!2FK^Axn1YbL=K_!ucE4Cf{EansICvUlm%6_0OP{ z-iGQ?MIrZ---hAR8i`WWE-DH&lZ(PwdSZ!d!#MhQqz;;6gCrU;L0pLscetAuPnJLZLn2Y&Xh<3+zIJT3_CbelY+BV(VIAq>=k7~m-pXZ-I+V3smAB_)a z6J9KhrKmouokP~#?jKf?t57@2UTfq!B=|BK2DrAhu5plSBbzp|w?1iKF2DEw+y&p0gou8gva6fb+O>aYfzqEHd$BXy9 z!}}pCu6oq{kS9-|cJlpjl01!?>F(<(_k}aU=g@Ti-H>vPjs4sw>fU}3>fKiZ5@W<8 zi6*3Q!GA8{3VLx3*U^VtXy57nT~Fs;_n&nC8Q0d~8tdIZj`qAOkG{+Q>|XD_YkZ3P zcMYwsq0MpWMGwLq>710m?vW4Bde|}k+yBe%{~2jxAbl|MPf7bZ=RnuQcMIdvsz*Jr zZ^XlQRO>t?jvI=a8}EjOW$%UrD!!)7*I(T9lKWcVKG(ZX`bfw1e&*d!O7^^R|Ka?< zC+r(7JPzYA0TrLn|Nn`iFp1vxg?GagavI{=J!NF>^YZcM280>(R{1f*Pd1aDRY!Mr z>!YI2MShX*MqcYCyVULVqvXM1?EhyT_EW{NJ&p2BO8wO&FXz6d@7MnpVlkFtIaXp7 zI_~-gZ_0afuCn zas%FPKWq{%$2RQ1F6=?yr?j2O>UY=@fJ`zqY$ zZuwYT8g0lTj}thFKJCbg?-%JKd=O3x_g1JY$aA=WOGv9Tu8`Y*GQ#{}-zQ?HgQZJ{TA{EeFx|k-<8z<*XLSa%uapkVZUqR#OVKKUq7q-N|$dvpo=d<^@1Dtk)0EhE_0XuZrn zKX3lPe(Rn{um2)-3kIXM+5CYH^9ShBCm+}U8A>0Hl>f!G2a@9BS_C7-rCnDkITo#a z4&%u2h<=E|{J#~(_zO=$VP1f6*Jk|`>-GOX=l&-<$8`7a`U>OnUvv({`F~UVXBx^d z12Zujb1@(7@BE4T-_1v`m!Eql|3tC-cdf@f;{>hsYYTnmmG9dV)+LaajIN`tEDwZ}oCY-j8c&{HOju_G{Yz+mJ;bCvXy{ z@&DKVSD62E-nHTJ{2y)iLjTWm@|$yu;ZQjQg?_+C?_m|Y(HxA&IH1FVEOkdre`;_$u&HsBK+-vXP{{O4@ z!2dUp>>c8pQhpAmSAE|40$(&%f!=z`IWDtZ=vjHc^Q=Ax`bgxvz58b0K!54NLL9xwzP`b}zRQ-r&A#5tzGl0(qAgPr z+S&CT%Y1*xq6^)~A&=@M#lM^XI?5b%=d69*{C7TtGJ0Y^+jqA)0`!?k-DmsWWCP!2 z3vYcg%oZ2VTlov?6n81pUCUf?^O3pAFR+ncV2-g1!i$lc%rEc^Kfx@1f)|YoV2dvm zzZ@&E3boHV{}}c^J>gm!y4nBq4M_F8cYfz?ID~2(L7_kV zy!U;XzfZXC1^&Nzd^1R_;G5aZN3&Z!ja0-jX%y;jb$F%vs;a`X6qiT4V>-}@EV`OK zU*r(idO3QN|L-pU-*M#wTEu10iZ;YKDJP_p`?~zE4nIk6eM%ekwzdB0+3D)l$=1!H zpF@7Va7$t$Q`U>qEI4K>a3)mm-3jcj}2J~T~K=b>?)wk+zV>fghE z=>PPeJGh4j=)YJ!fx#%jf9U`8-RJS2`9FtBV>m{l6n&q!j~!Y0U4HwY^COs>6W5;T z`HlHz;8{>pLYEV=0znC03!AUvdq(4jWMMYikw!+_zZImxt)nOYZd@?e`sFlVi$Jt1Q?? zRw2#_dF{SFcXA84^|rcF{0{8G9_+&bRJO}c-|~Kn`Ts}o|F7o%S8q0|HA+uF29ASFO&^lP6G1KS`cO&2HDd^Wk-$5k7~)b-S0wweEwg8z%p4 z_1us^Ba*Kx|C>EmT=1VuxPo3>!*%rG7TUGDJNR8Y$t-_N7uhYGBlGh3(ZkCBr1HN; z`QM@Z_pW}o{{XOH`rB6T{vy$^Yb|cPJ&;5JlB@6^)&F(Mf_i04!(`>e zEak!?d{)mI*w-LU7>8!&h#I+?WpMF~V)APUS`QP^ZcY6MNJ^!tqKl0t4 z|Db3CX!|=Z-J|?N2CY|>e;4fuC#`KUJ*1UaJIKss{xj`?UGzQJhfenr=Q4Mxzq_Y; zZk}r%wL8tf@%tgvY}fW7W1svHGJ&SGz9TfQ_FXND`BOUsDZi!BhMo#-XR`Mb+Ke&m znV*PZHb(9jqryr0Y4q_SSNzu40%Hu$#PAmehI8ZvT*4JR`i^?(aZYsQ=Zr`A@zdeD za35~r4({Or>huxU4>14UJo*IL*w6g?LFV62HUA#h9MgY^G8cnUf}t3Wk!U|<{{4CL z@6VclZ|(iAyVl>oVg3F6*5Aj`XUxBU&is36rOmf*p=X?{)w$Y?CrEEE4W-fi))N#{<|oT z^=Mz?kH-Jbk^dLS|4Zb5vM~SqruH8S~3klt}RdB zfWr7+;qI-@MQ)1m-chx&0Oj3Z;?!GR>qNg{I(AV za0t~nf&@}XqYYW)aRR4t2C>c1^K*UF`uux;Q5xdB`{>);ir6o5&M}YX|6QP8!WHzQ z@8iBL@_N*9D@y6yA@AV<`u}gvVO;w_axh9T6ooM~WUqe6v~pr3y%b|H4z1$i8bIUe zh5pB9tTRlXguFIO4v+dD>n5{7wrZ!~PmTZGa{qVZzo+?sC0}}#b%M(16<;q3?X$dp zbT0D#(M9h@Zlm%6h4H`j#vXLL){L&Ok*gmOF@|GSNzc+S1?tyIx>AT^BLh2$7^*t2{uY~Ome zZ^Hui?+W(sX0|0+m?yVPo!~#aum}54XuoPV#`SLx2p8;MHgMILb@JOTio0y=x^${>1PR37GPl{^^z1XzMx}OG6(3=qUm`cgImj;8 zo_0+1A+*I~QF~Y!N}fOsKf_g}63LH3wmFo~RkzE3&s=g)*` z^tcvxa^6E#l287=h=nsuZ=v)UM~3Kh10^l>ePkOiTiLYCM&*VO?`4XR^sqG9|+$c z#!q9s!4KSTA0O%}@oTUS8?Xu6Kl2wMu0>Ez&+KJdsYt*-+p^|*n4(pc;lrHTch&B z;X7{+4civ{+`jEU4^`4iplau+P`UeKVf)mNSp#!K*zwfU_UJQi&~It%+CD7&(|&zl zQ$G~Gxp!39Gw&ne+b@13wD~QIeaozYx&Dc;_u!!L#@%81zVs(1i$mUTCvXy{aRzVR z91;$%E)M7D)%}XX54QVe_ZNj9?k);PM*NjMh>ZOi^jD$o*}sxE&BN+v-c`R(hsIH# z4XI~8t&ir@%9hXQ|N4x4^vQ7S{%81i&8a*4$#6lMmv9BWxQ6S9ZR|Ma=(US{qQZ4g z@o(^JG#E3GSirx*SCCxGzp{VFEb`wV9GIg`9?(t?uif`+C7R3gH=OdeMEk$HsZ? z6Lnjy|A+=8(1;|On(b9_*qWIBGaGX;9}DqXhi9m5y#?))t-m>)k6(F_-Rt|_>HE(3 zzSVbm^5|;$&3At6MfIP0sKv3FQR=^8#^4p}qn6%Mr27?zb%R! z8`oCY-^caVxu*Kb${W}9yZuM}u8xjA$|`B>z%J~;PXfO`c>ssd?pivMuC2!XYvXsV zSO05U<;cA7QPY;OZ!rgXOO;ajX|=||M1PCko!teh<%_L_j^fvcC%|+Z*4YuFY;5p8~5A2;9*%* zJ$XP_DgFYk;W}z}n^XD>{{lVHZR}mfT0rzWNZr-{dRzbNUj47wqMX@^*Vx(Voz^mU zOdLBG_YitO@4rl5LFXK{`ipG#C2aQ>*zm|>u-{5>N4^?L4#!B8Vk`>n6Ln<6OX@K+ zs?Q4TlUR@QAsoH>-uOc@?gP;B4&MP<7a4!ZKhe(4??5NAh+_}BT|*pu5c?;pkMn^z z&v;D0B-HXh)SR{c4?Qu+xt)I+y$mUJZWEGdR7bym{j(W<&wZE8j+yk?s0`-Q|Je9_ z`g|z_5V zKRVbSud_eUqFs_ftM9YT_nFp5UnQL#*o8f4RsUwNkN)WUIY2*zn5T1ybyyd2Y@#@( zqS|jqP%Ez|$P{Ym4HJcZb7|pQ}({8!Q-9X|szk^A9i!)$Uc z`kvNTpkHP_y>S0U)5Ah~+IwA0W=43wgUkh?FULxB_G6!*3*E>ej!&xF{(Pu6r@aA* zx1Lu%J|B`~)A8p+>i+X#mH)26I&8oulw%vJ&_1tB{a2>`D^vfKDgVlpe`U(QGUZ=c zIQqKz|2Sq2e;O_H3|cpqg*L~udDhiapAS2vH4e2en3GTLLCrkZHOqBjpYQ=R(Nkzd z5{3S+dA<*%cbA1jj>(u`-uhx$sHRtbj@_Vdt5Z0;#P!W_ebZgvv#xJUS*V-i{uek8 z5-X&;S-P*g{|@KD5osilLKp z_bl2_?RlS+&S{*%In)k%-ujErhYR$?Q?5<;68#ENt6iJxOs;jU{hklK;;!L3`fv+( za1Rgg*k3&$g#OEYNA1cd45pXh_xiu&xyH%zpFG$^ri6z&W;jNo6k{`W=1mceTgSN^k2Crwo}Soyt$Dr+;ESetKd3uI~O} z8a*qm&fUfs(`O*=9hO5ky0%*PKYS!qD@SJfZ8mD9J(rx18hXQde#Ohy{}En{IA$h= z#<$!j+54sEL(fmktg-7^e%9I%j*0neIk^(65cB66GWLJ=ef9aUj=l+fpD%m3{vdzv zYn%Dzf^lBIavyIR|77jpa>r~#6?R}3_Fx|l;1JraNzj4L{njwpZS8^%YZxGR)*1#l zI>7f+?E4|pqkKPPM!1!1b6nj)-*3`4QsevW_DzsU;U+S*+4=#E+61U}t|Lewg*4hw zr#`Po!?Wri#6IP?wrrvQUwDIKvdH5EPU1Aq;2a9}?{4-l9^1d_`A7BpW0W%=gt-m76bU zlcLr+`W$x)HMiBP7u^>M`(M4RUrk+`TH-!8yH9$ZGz#kvzVyEK#YXopoWdRH+`|L( z|C(#SV3c4e{^$E|%KszS|2VeQ{i6jLw4x1h@4(^G8HrMiMeAM1+_dHoJ$u>xV?2EV z@@)Max)-qjF-ct9Lum>*4RP({GI9pu9!hcSL;^E~XQQc~`$BBHBvIJ^Mmj^nQu(b-x<;CrX5|Cc z(JOxTfv|z>lx}vf^mj^sqV$nR%=?@CzWwVjg>rHmYVYfZKovc4Q5kSnUqPJz-X-sE zR<X9=KBHrHVGs79@2KzR$Hw%~525!JzKu8dHrma@`KkHV;*KDJ6w+uz#pl)Q zUoH*>|JY>rGhKg#_&iRau>apzio4pBsG3Er&$0E;vkVCgVp|0oD|IZm;K3O04 zbhZas82`Urzaml{Pll7uHO~LWV*=vZ4_guY7A6U|FVeq{LO=UPHt9=jQ{+bIXUEYR zPL$wqRyaNtjR1u6J!%NMDRR`!|Pf$8@b=|E_iaXg~su zyNxYC6H-{}Kg+QatFQ*^umPLU?m2brWdD=dgY18Lcf6+z`=5XJ=uP*JV@SW{{*Rkm zfL64j`tFmVTsqrOg&nAU%J_p3PljFe_)Z$IhrSP~0j>>6$HX-Z3*Xx??_(5ytltlz z8WnG7gP`(r_J#Smu?F$KNTUr|Nw71zbYAeAFQybs~!{bf1zxZ_1xIx|99;qWXW1x}I!VqW&jah1WO?HWW^_p112}2_F40P$9y1^(;v)m-YLfdf)uT!_N7v z`*d6yZOEc^iTZbp>yKe|Z6{99Pa>~=&7m97*MC}EwRXfA@*HYMXwQ(BP_yEp?;xSR zy&~L;rn}CEMkG<^zx8bNhFQiR_$C_J@=3DEH3RQSFBO3&x2{HbghNH46L`GWa0^b$1DQ?ul+dGZ&Aii>-54kt&V z_ON~haxCKbp!#}iFdSAsx!wlXoxE%gBvLoE)$Y5GPdy#RId(iIU=pTa8p_bInGX_i z?d?v)wYR&Fr^mg{n)iAJo=JL+XF$&^@eI%=+=>~}n2Fh#i}_fH#aN2>+fU1dS78mR ze&dz|IZs8dg*ab`i5ENi9aE~zwF%}93A2u`Xm}(938GX zCOxP$Tqpa`O20+kL5AKrz_@_o(r{1s0dm55@m<68;fp_d)%buL#s`q;`^E>5nRkp2 z7-4(>+MhMPV6t?kONZ=w%Kt_g10apOwEF*zb`b9PJ_nN}7>eN-iMq4)7eHYgK=&W) zFL1Eb+H#{qspH0C9LA$oTn#4B6Yjm?h0$RWeF{?In}n0%8$F}<*T2*JUWOT{Xz`4Z z|3$wrTe$KI%FfTQ5xcAfE<7K(7Wsdt@ke{*f5fu7QGJv|6Fv2o`QeURjHOtPl~{!} zSck&;^T*|XWYLA+TmRqtZpN`^`TEc@UEA=P-;F=buI8V@25D`= z?0o8@;oEEfJnXsp=OOL4zBm3ZeCMY1AFhrLZ_N7huy@4h@IBXdKpB?xdmbmy^YIUd zH*c!nKmO5hXiRDN=i{Z};Qo=}&F4ni7fwG~_wcZP>hSR0=HX%A>qEnL>W7BCcw_xv zhCNd!nEy5*>~5YAzBPG#*p>W?(EInL;r|YS|IV8H^4Lk|*m6_50=c?@;k57>oI`y7 zeV-}~7wDJJ^F#UUC;We}*k7T`SblN6xQ2>1*}!D(W&a^>;STyf#Q#?`I^3f_K>wHI z0Srb7hGIBIV#{>x^+C!9^{N=T2{99sJQ zeaH;^`!G|wZ5Zo6xld~64AH*+ru`L#$72E}VG0f_Pk*q09lkk)A6i?oX4sHWdwWPY zVol1rG1i-W>j^&eZ-m64Z}3@tBP2(y4^11t&JXkTP*de?NO`DBtoTnY~mU(vTjX!)z`veUewNEZ;7u?n^a8HHx z2d^sMPkDcc_SPQ%ul&0HWaY>K`XRJ$cK-w1f3f=~yGFTxGL8|*lSdC~f34U4a$K5h znWt^D#QiUD|8vBd<5w-6BS;{H*3IVVAx+OVo7=a@T)n6xzn^bwuQ`6!{O_ta$4^{! zrYM{sPoj3}fN+{TgBp6nDDU<5fN)Ou0-DZxua~`N{SL|a?mP6sX$K@uX{(b-WkMq! zt^aY!f3Bbx*Ki$uxP?0?jQ?ljN8e*%{f~nWeGmmZL0t0!eUOO0g=2`m#}?mz+dFI! zGCiFQA)N>4|9{Fuh`xtR2b+N&eGYMMU*$jWAADaw_EdKMME?8j&V!+jsorQ!DRLxg zZ(D1V9E<3Kh`xse#tDx{VPApG?(w+x5w>WzZPk8zjqLkZz6R}r-d`7mo+skEA4Q?> zujKuY7g@JzNSNUNlh9MqFH9kOf2v=KEJMYQibMJodxD-(f5bTeGwHK27oF;oY*PK- zq5emXp08n_vj3Z3WS_pkK1B;XGlzY;mVJu$t?XNLB8x6`BZoW&N_##QVlno4j!Vhq zSc$re`j^qLnti>3&B~rnl1)f0VE=A6hyFRwU|1-Oe_iE2wc={_8h=ku^ss-8!Cyz; zfYdwwD=v9kTG%8mz1{f#X#YR$ozlz2Z9`@^+kdC=|Hk@P3GYB|BEJM4jsLG>M;F#V znC1Fjl;#p?zu+85t!IP#&mQc<0USa#jv#^d!=4X1i#^`~p6@8nx1Z;W!u;=I&-aw) zd(rbf@A)=+zG$uYeEqNbxbsLSjW%RaE3U?|d3wVC8~pzS{UqWw#l0Dm{amZxPK!H( zbGU#)dt|cjNx6SX_zHSa;hV{!^3&QOzX{>GxIWY!*9JfPp8rp~zOet*WIi(aB1PVE z%pKgr1N2{|uEAiGpxw9KA^*j^mqpj^hqifP|4sT)c7F2$_Wu%jjV!Fsb@O4~Ya_GG z{#!2}Nh?>u{usvo5RPkvc9Cgqhv9x3iBgP3>t6X&ULHqpxFLVtHr9_m0ZqcGdF&4| zdDYlvan;%!Q^;wkJ*zK)oPnCB^c#?`jd}0;^9}YDX`{Vith4yJ=>2#53XyUA|6+0} zmSZJWq4KNREl=1BfF9QZSx06zKkQ%KK;MLNbZTp7(RJ7Nan`#)9^3p@Egx2qJ5bwg z9|&>}YBF)W@Wb)^`-BgmX@qtm8YgNel83~_aRb$4??;}rf}3{8E9%}D{_78h1erok z8PfM@Wdywq6`v7qG(Kgj`T+(1biMjP-T8Qb`oq@Gc}M+%&Qt1_x706})i1}@FUU`~ z@1C^YZ=Ys`^EiQPzLz@nMLim3@k66=k@^jf#-Y_E)qffFU$^>?Y!pti)0?zcQf&5< z(m9PvKEk-C*ctjcT)-u?uhzdmfPbJL|A4x=OT8TH=G?RT_i=P9{{oJwW78}67u3I* zW&8_>;{(!@+2+#e#Wh?*h1Z7PPbZJCfS}HQN8Hwg1;^|7(Zk$h`2Mod46!_RX+?>#fNty$P6viY@B4r|qvn zul&s``s4I%H_9)K9Pv*Z8zX6+2j@sS)_s^JfNKd%OhD@|Q0jT)pDf_Pt?j zj&C9Ekrv0Qj1xB=6XI{bl)rbr8anpAs*Uq%$kx2dr}e7x=~d;^tIDTW!_gJ5hUV3; zhGQFF4e7P7hL%OI@~yubS{>Jxc{S86cs0~7do?s5fkq_JgcK$@&k^@NB|;yLX(+=C z)Jd}*4M?DIce(Gsod3O?|GhlSbj)nb#e6KpVl2gSv~Mrh{x8@5FXw+R=YKEf|0?JI zDi8T3<-Y%N{`YeJ_j3OCa{l*n{`YeJ_j3Msad*qZO6jb^8mvR>Q|@uZt6>8@JK3|r zCVDyY13VjaJErTIR||WWR6F0JeduaEk8O_2eal)NOMZ7>RpNG_=>^XPh3DkhI(q#) z$I%m;#e3dK&%5ca_j^-)El*W_MxEfld$11&a0u1-z4d3<34b*HhhL%jAp4(8&td;B z@NF(pKap|$ne^q&;fQn+NFj~Z8=FH0ZS?FS{(*UMY@2(J>{mwY*8U~qUKS_BRqx*% zPLiilTfaG+A3c_$@HEtnG;2h#{^75 z9p6Vie|+E)2WKuZJO^)?!>^q<0xM?WE49vuA%td>#@gGm|J>v2F>hu10S^Z8P zRSq=c*hbHKt>?eU^GEAc&wsMG=d9TyorPG8rFb-cXgNK$**md{z6O6dfB*gaUn_&c zI>+?=vOM&4`F`m=A5t#-8~*)O_C$Hu6yv`5YA7drKk;g~Fw6raa;{qMeaZ?J?`JLi@pc@Z~%|??>R)zc56GJntlYaPawBd{YiH1 zRsTK9|BZ&J+6Q>N|Lzjkz&=g*PYP+YA&Wdt;3V4LQU9a!uKFKc^ls#qsQ=aPkJ~r> z*t(;%Z`@mkZ5;PHY`bXuU$gS@efGy`Y1Y;o|95y`I73g|*EYd9dc_ZY6Q{IIj%%Zk zjpv=`C*Gm^xZw9oXsy?tAbSzlAuP-bQT|u*?N$x(UC`sau{igx8;|dQk$YvE7xw>K z?b&DK|9bsD`hc!U>pJ>y3wLl25AdIkKf37qJM8;I8ZC%xjJNLg{h^vY-hYieg~2F6 zE!(_CAJ9;GVuidR{|={*L`r^X5|>;rub@=iSd7DXOu!^e!8APfKk-qQ3D3Z6W^3-kRHe5FC;V5*$0!YjYD6G{5L-+f3X$NrQPzle?z-xx!-%g(J!ndqyKCb zxd!X70h>^cZK%Qy#I?+Kk$X@#NB&p7H6T&%o;utcn&>H1yj&diIqm=sp&Cb!K<*pG zp?$UdKR_N;4m`I1gmc1q^5{kVpE%~Y^a{^^v*%B?qHTfo|Bc1!`D-?ROY{K_d^rAZ z5c^H}mvW9YvdH5E{$KUMNwSaZ`6h=uqxJdHgPsg`oaY`Mp#Rt9p~-CI0elPe?0Wyj zV0sDi3-}(;z2f0m`=R2h=h&x^9EsYsPli%*ENWhG-4k6mDt@S~h^D7pHyX#d57gDT zKlju?COX7l7JuCRA+9yB<;Ful?rWaaIB6E<-?x}gOP_%DQ{F$~8Y|he@7Z^rac;l& zzt{WU?)iB4kJo?m-ZR35_wTs$b5Djz(wTy3C`0QD?%zGmpl9XPPT`sK*~km$an&!GKTa-0?SLo4Vsa^J+-HMu!aXb(UWulw-o0?+ZS6?%ck}PHjrp@04{BSJ zO-N<5+x=$^)?ouSp&Z*#g?8=A|JnKuN8e`u*RcQD^J%ouGfDYd8@~-(inUFj^1Wk+ z^!k1+Z<2eEVN3YN?dVy@%u6($me7GC)kn(|^x@ugp z->x9{`2nHgivvP0{Ti;L54Ug!_wWGkA3xK7tupIl=J$N%N&A07IW|~a35H@g3hOg} zZ9o`FFU44lL*FY;hQ2!A&wAgY@B~c46tqtCjScG;rqQ#bd_VLu`V8cI*oOC&q3FUu zaWgR+bFt4g&L{ePyapHbNV=UMgU2=z1K`u|IvXE|146>8U7|7L-G<>(3RfH*gC9Xb&UuJ)7!S}7A+^Z0iHmzQ z#yO1t{Q48&(D4D`%~ONIL441godexN^j%rQv7*$9+wyeyCuQxf6;Fn5cKmJFNq=qX{|MV~Bwh>Pi9ZkD9#mrN(g#CUeBWPv z)Z8%lg*P^TFzlT?IMna_XsFx%(U9o(A!YSPLvrnhLu2N{A=UAhp{d`e!r{&4`uK14 z;nCp-_4;L-|2+Kgc4;_rb!4ceSNw#H{>7)m3CEnoX`I11TtIWbPpX6e(%h8)5{|v| zUqa^gUxb!^p9mFS9~Cb7y)eIbnD1wleBu7Di0egFyZ-&pjtbZ5eYk}?xQ7Sm|F_yq z7>p7O#c=d)F&~-i`MxoCWS_BvW65zCj|rHBDVT;b#QptdkiGo>Gh>*Kc6JOm>i^LW zh`#Z7eSJ^sI~G45x%N_f0L%aH@V^MhIsTRQ3Rp^S_3Sg+Gt235&S583(pMqh;oH3U z&yO=&da7}98|F}|SQ_yT?ZZF}|qKWp85 zZPPfW(Z>?<(v0)w<2?Ek^u&tMp#g>Yf4^0Cu6F;+-2Zy_kCTo`?;jmblV{MX95_c_ zK&E+g=$xfoz$M`;$W3<+Wk}al??5?H_u}YKAIDfAo~v=QwD)V9z3yCFwf`Hn3;pjJ zuA>jPa0mC$p49%(rs!PlJY-@2X)-6ACy$=e{>QO;-%5@4XScfHu=WSq_8LF*($o4c zo(`SIpAK1cp&L2marAZ1568wl9n$83wv2jO`TDf-^=bZrr$gN`?+*<~pz)&jho)2B z-|^D$z%}+?C$C^IN-z||F%qR1i)iCl&l?rS(Q7u23blJb5XRFdU=kkB&%deuy|4bg zt<3T5PxD*Puhnm4?<@TZdvR>#?~3E`XNW7TKk&n%Fq1wTb1@$a(f3V#kF&J@(Xd4Q zt{iLJD19__OFyIS@%;f|v16BFIdWY^VI{c=Yta6j=kGmsdaqeB`t-Wbd;XU_|F=E= zx3&MzYyV%?2T8UF$GW+7l;=-$S{12&-?+faoa*oAjuyZ_hr zQ~2WOutzw(*BA@(01lxVtsTzM?0)Ev)?Y5{&&ZdN-|gDm@1ymXr^Nrv(ZgIgj4F{Ml#9%-}1V#fp~4d`d|Hd!ZDBf4^Glg;|%_z`G0jU zsDI}?tmFS^{ecyK7S8$a1zf@v^x_(>qYv$i*#GE!N&Swljp}#gRy=H99DR%b|84$% zGJR3|Vl>dV$#lg=I7!voY_W&hu{*4j7Z|3R*e?_eN(F#c%%A0K;Re1-C( z#P359=l@0j-*9^6kJ$a%A0z3d7>jWjj|nKu|J%v`e~^E7uXefeIf*8DHg#P7|Gsh7 zj+=sMD8mfQ#B8*0^!=gpZT|nawCOHu+mSiMvEE0OA zAr_-`s_$o_b$saA7u6qFPG5=qLH<8$ zm!s*Te1%3N5kKgyh~pc!`8}>dxPz>!)4uN-5_Zw|U>^?P5UOzm38aul8?xyAs(eSr z`u7AGeSs&*(>Q~3xPVKzg1VRFe>7}#ooJ*d@hJc28m)=wxNEqM$NtHB`B1ohp69>F z^Is2S@!I*2pA>VmRXXoJ>7ye7^W;Xb?{9)Sr0OI(@=3FcY)UDlT*T$uO6m zJl!%k%fC&>~FG3I7Jrh!}r=hi~WBomZL&n z%u2G|HFcnKD*pny=-tTO*Ux|3{DURzZ|NM9UfgT2Mf#aB`rqkoBlMfrl!R5%sr;m8 z@kQ%r(rd-lm`A^kp6D@$et${WK;MMaRr3(;nmd2f9D1@`T>2e*6_QnG-EEI{au+gJ zU6=dJy3akr`|xO9VAp=vi8|L&@7fCcFHCj+u06TH{k!fsKH`A?96~jYAb}LpXhZvL z-=F;5i7dJh_jZYEJm&Fu|Aj&BA1yuVf3%`a{ZySS4q55saRMh%d*1r{?1Bo zZWo6$^m9l(BX1yi*|&i$OXOc=;A<<`U-Q`BZ?dh8hiM(5ugvjr{>~M$u>TgHUYGc8 zVXYNSn9v+}V|7ZWd(_X<~I^nEl-Ul$6-7spjMe0%f3nUIQKcufu2I2hSYh_4@t)~qStrV6Zin0 zVE<#w{rBny&%kWV#eDSr;>oa(TpabUd8ez*^Q9N&{|^}umeX6sMIX~jdiI9C zVXUIBLEdx8DQ~;^0lLUS{lmU2?7P~nUT3Ek_SYd(BL;?<{<97nunFbZhAQkpJ3G5W zecM^A{vDwH9i{#ybI4ETe^}OG!S{l}C_&u!Ak$+V9(s1CJ^&1- zk3{|jc~gB6$6$AnbX4N!WyhYoqDQ%!TZ|h>aL_b-PEjn;72^FtZ2S0f0OAO z^7n1|`@Z~5wh6Z@V+!Mw9Gg|X6!sq`bBOy7$NjT6IL{`OV;c(j_o{v``lI~2gT4!? z5$^pd*FZLo(SCW!`$1uRrr!_X5ULU95m&s-clk!Ya74IQA5el!q4LX~`(N9aoZf~k zDk{wRB~Rca`t%LtUjA(8QO}7m|y;)W5=hd9@~apLnKIq&(+AayU@Pb^Y7>RBa1F{TQ49t@p&pC;p#)hsbIiK?3ayW&~|5DOPqYZH%pw(`tR7w7+-Bu}GO-aJE|LyiA6NGCC!4`28anrgwG;8af<@ zMtTxWQ@tx}QBFQ8f3Ns|FLLbz!gaC_?e8dm(W%Xq-L6bOC~s}`u944Zf6ZclEnt5w zQT~#Dxc+0`N9^$?ox=QwpAHCj=oMd9zmR=Dem*=Pd&vG9o%_?)C{TtBq{n^R2a~Ov zKM+dDp~$S)PfMm(umi|a6#Nj&T-RmSb>4Mebe+omv3?teYU8`FImdYV1WZD$aosiQ zf+_U47kfjoamMsA#65YM)F*MT_C|7sxS5!Zxp;s7#(d%2%ldT5LjSk^oW=B|SdNvb z>oEQg4cZS0ZHLA|>UQmhrisS?Jv%T|XrHZe+#0OI25dq(wxRu${wH)^WdEMkrn#$a zgWP`YKO9}6{V_)S1L^77AJWXAwV(C}(%bEEAe|l9g*|9>k8zIvK6>`5`g=eBJN*#y zH;i9E_dCWf+%~R3+z}*@LKnjGLFnNR*=bwzf1`Gg1E=#?i-P z0^%5i#4y*V&5>+=ul^?=jk_w$M|WK1uh`9BRYrVhNSG$P!M9yT&cIB}#$3$DLi}gt z-(trs#d55~Dy+deY(V>Z`G2$LOlFl8UGi9*gP1!d{~wqCGk;M2El@XmACJag#eEhk zzQD&YL|>EiD!w};l#|;~g&o+1J=lln+uVY_kDIeh9uRj3)yVy5NZ8(DzpxLPJFV^8 z8$KF(_@NSh>;1C!z-Q$tdK#7CiBQGIkUm%(rUKZ2 zEyQb3nk;N`AM$(5PPtT#7-=YhJ{dZsRKK{l&2Kpi^e`S3A zzv=sN>{yIL#n<)Gk`vJLGyVN!?-x9GvihZxFpVt349rCBGX4kgv+0TLo(<;G=Oaar zYcs?#7>!scE_7XN@ZFyO8uqRJui-l@{;R&evhc>T=fj@0&xdbMo)&hm{qNyh8~=OQvR8R{ zP_?RaKVsP?}j_@|D^Vb`!J;hU2ug`Ha`=}(;!_FNnk zaz7ps5`O;Z5zNEWPIozEd*J?LR@D z#A%$tIW)g8Tp!l(kgmVp?^wTI^vm@7WxtlEuJ`Nv=9A%q-|OGfht}_DK34swyJO$W z(9md&&(y`?p=rR-aQLP%3XZEjH8A|(IKM={kA)wu7#NP2=U97pr159Q=P%*sU%MAatPfpgscq1YI5Gh9Sp>&Ex3R0ipSZ z`FCXczWv|m8MbBXd2`CTts6)0lg=$vm}7W{ti7xKadSYpM^CuEhB4Mv`Y!u`FFZBE zwTVljapHjRz%l7%Z17i<2WTB*OAywdV2Y zx;yDvw4(!$_Ni;x?7G*xkB#mFM~AsDaZO7+r}*4nbWQYz8gnkly6z8#`s4@0NarlY zSd7DXOu!^eL1BLd?Vg6`w0~x4|IE?;@qU_`+5ed4m@>@3Ow7hy%*R5sFEBsD7>Q0Y zyH=Y-+oAiW`UiO&O|t)Yvj4Sn(g%%yq-Px0s_oFG9Z_9V^n2qU7fZADYEh_pt0*j` zCuX^)m-vNl$Vs__+1*r(2K>Fs>~WZXkM?j62?z6s^{kJ<-~3zQ>E zv^SP1S5T*ZuNPMsk2g>IqtQCL{$GV1i0j|)BKKe)y5!@S$79}(dw%_)Kd@!D_68n} zH#$mh#xbOEKpKZojUz}Pt{a#ld({cKztQhWZ$sa|d&XoQCvXzguPZ~Id)TIneFLY( z)$TNZ0cYrm0qzCo=$DY9H{p^0ukz!b-;YOyD}L+6HC#tu#Jt~IWY5diCnS5nF)-XC zAE5tc-#gOEmce8RhGIBco3$}I^f}Svn3UKDT}mH|{B7+{bib$$K=nNP#Y=CT-^ODC zYS-%j*3UnQp4dA&G`v1KOrcLhYO=mvB)5+ajo7mKz4oko@B8hDQ0AB!n2Fh_{E>X? z9nPiC$3pac>glkU?EQ)UJz!|Cjau-_ZYmRsVmF{(t0<$5He3o9B%V$B;(LP5u99MH}k$=hyFae)p1y zYZ{PA;ieAf&x{V6oTD7uP=y`Xg+17ZcK_)}>gTT+9kTJ6YTLzUTUg&koABsa=f^Rm z(IU-E_vp|{w)tPWcyu@*okOU`5wwcSAVJR#lQvTHH1gg{ZtZA3*3qF0uX+B}v%H4| z-UAhp#b+Si-a>o3LjwQm$D zRPc4?rF8-)aT;fE4j1r;`K{}`{Ky}dBlB--|DV^cSchiBZ=+}H)h&nF$n+h^t3z_=u2I*hTY6dshF#)&|Gp^fA-A~i!g$VM)=3sF zqXOvMl{HgqZS^eu-9dlgTeYGIY3uxt2Xmh^m zVr#ld^8xysMikebdNlqo=HtY&hxUKv6UtHkg19DO+`}NL{*G%Q5B8t5@2G?vifC63 zCr2WV^XXjcyWFV!7aoh;MD2peFW1HZri zwYK(pc~jdq?h%&f1N_g{zn&?L*_ey!`{u%sHSc%^qwKXpUyP+_n5ukIE<9Q{xM`#7 zlh>E~Z6#Kr!q|Z=BYb~@v`?q<#fn>pO(;k1XG6j^vI>=7wy(hu{g21x|F@MBZ%JQS z(0I!IyN<&6BF|}uV|QT>_Tc~yp&GeQ4+-sW$^UGu$K$_mdafvp|K6|s_f0gS`to^v2AdMJcl;r-39Uz zdjC3vD`Xu1&`b959bYG_J|6c6ED44A7-Kx2XVll?({C4t4dgZ4!X32UXa65B4)^F; zea4;ro(vD@{kO>f!a3n?vWp*JptxQ>i@{{iZ>?od4#jYcL@CB%9L8e;`o20OOd_YC zdQNeeMwVd)W}+E0QBpKK0i}hc6vVPO|>c3Wf{uMvv zb98LR7=X^R`~oPHqd$%1GJ9(UKLOjnd!zOL^yk;#(8sSYKY>QJbP`QSVX<_UVmVe~ z71m%KHlUpy+R@_~A&ai7p3OU+k-q&r`~2uU;}38QahyQQeKxCOT5lMCKvwfnY?4kn zwxJ5O&F25JZFkTU(rCag`W~dlxVEXTak~5G2iPYruKRF+JcQP}+H~aqN7?W?F2Tthf?OnXILJW`()qsDrtod`-h1x%k9R(w zXYJ=%`&rNPto2!+_46PyM|_v{^1t#S={SYbx(~bL|0dsQjr@Pv`V885VjVTk(<`;F z&G7wCk-wI(*{*3r4n>smmpm5R+m1SJpJ4ra=XT1QIdq*hzT~3uC1g>)Y8l7=`~8qO z)8t|2XNK_)Mq2-#JRy#Xx3$@m)tl6l$(lFi|5k1G^vX|`X-_depxgc3bAOlR&+G1w zjPnR%KVL!r<(9YZC1Std1=pm>OJolU%A!*LqrQk(F2%XXaZd6Tzghx)JTJAg_K-yWMv`9wrK#jKYuDeDcBg|8n|F%tG&9jQuzMe-1sZ z44+F5kU!^>3y~e9jCsU*1@v6Ew(%#`2k6UCJS`7gRsTn9D?cuMQ-!a=ZEpThp@w_9%7$a|FaoY*oxX0+5g$b1<;cR_{NJq8g|lmBemoG{{MQuvVMaP z_W$qo`+ig-?jKRWL3$FU{)e};AJEfiLk@NO-cKJJos^bFX)VofI4gZ8)j^zd6vuG_ zr*H=6Z~^Vg^p2R%kVD*4H7`Ft^uOct+4jEwyzbEJwY43czSRJ*N~m3 zjxfgh1N5Bd(8JGxH)d~8@es6eo!;DoV2coq8zzpdcWSp4e zp=f4vQkT_po!4~BeV_GSE_zQ#yeAYUm4za@Zg@{P)a1QpjN>3%YrNNe-s>&znV#9@ z-Mp;b0uRDJp(>FH!i# zUlPmcE3gv1?9eJQ_W!RYd#qovmW+9S6E-5+|8DK_r83}0`T+dC0h_TEl^Z>;Kio@f zr|-mW>_xA-VXuCr>M7bZ9arNZlBm6| zN74Jvy~J_y1WuvnyY~`j$nG!vC2@|tfJ-qPSCGE^UgCr8AJ>Fsg=KENm*}PEUg}G9 zo_{ZKlV0&J-pR{-iGr~Fn!ZHWYki5jd+#Oc$Gn$lnDAaANjA=SFVRFcJ5G^>XSE5s zwlw};Tfkj>5-m&KOSE3@OB}l1mq>T_ zB@WXw^z8Y*L>sDW`Vs@hGZ;fL9JRt~#4&=Ne8#Qb=rH0vBJ_< z-%E@qCnCG+y~Jd4Dl)=5Ti;9MI^Ii6cU+FrzA(ywt{v|s@~9gr{nFYXtx0KbB%9{> z52P^DHM1}Wb1@$au^3CyeqQ;5&g;sbZt)_I0*YvvB>nF5P~yEr8i(cij4~igwp~?M zXu6kJCZ58V^|O+zP<#4bqDJ^?dh*`AM8mCniM8}~NU4W6v-3@7?nR-QvH6H@qhc|e=?=q z*z3IgsK!B57%Q73Q%Iv-`+o=G9Kam9wCU$jpcm01U$-g)4jt3p*QEckLwg{y^tKv^ zBZnf6;y6yA_Yd0d$Zq{-XULv!saud6)tfiz!;NENE;&x$F-MWC{HA`NujxytXP2ns z;2J$QTe-nk>80O9QJGRe-gE4lr@kSq`}4kYYkl_ot2IL$_n}{f?;8U#7(+4KcR!pQ zfl=uF7ybC;SX3+1#*;N??YBlwMlF3RIUPy)z2S!U)+*nhS9i*IkLdm%^0oi#`G>O5 z`~5Qetr*WA{ihkmAG$7%TbW7D!W_&+_m`dfFV;N$P8pkT9};1udECEO@21DO^%?%@ zVtS5!?|j4@Kl(Bh+0_E_3GD~MAI>k_#ZIxojqGL9G4%_iZrS(VH7l_StFadAumPLV zzJ~w3-hYs}lj;{|*)8_0Ko%Xhu)VF~KD3YjO_tWbV>hFXZA0})?H9xTn5YuhR%}P@ z3&#IX84z~Tlk?cP*#p9E`d*aAAMRq?o@e8P|MLrX68mfJB;MX}FR|~%JBgnzxR=it|nSrM~0S~ zksgBNm{Yqoaey-o4y8lN)+>`D=-Wh|$m ze%#O5^%`p~zH@OZ1lf%?)hJ;3aMJato&1;4jhtQkY zJgzs95=R=9-M11|L;gKcG3|dQwru;KiGO_U-xFKAZzSUSb2-2N(;c=*nex_*e@kq8 z>E9ARoc&&6=f3yY^hZPGw;l~YZvBtM?mPX%j~4w$Vi$g@{bKJC^(faK#c`a#+v-TC z$hxMu2B1EMllq@7>Qf+_?&)K=rC(*FbpxGq4i|6{3Wg&&e?y}H?Zgc)B z9@YQ!|)xcA^JGDFTG z=OUXi&XQb+-1ExF*~&(;Ymu^%EI6)y;nC10&10Rj7)w#R-Ws05m(i1NJ{lS}JsMWf zS0d%O*>MxuxI=r(u)$%|$id-_XY{4>O;s^tD)r&LjL|bRmxdil}SS|IZh0 z;0q`D#*JhXKRNoiDQs}jG-HY#^Z;OC=d zHvb!k7V*Dvn4Uq_d2P1_hw4|%PZdvG8*eYUAGP}4YlK(RliRcnpj7|(mbx>&8BJ)! zLFc61f09fgj{i@SZOC+M7syC=9*&DBu>Emf-ox?#L*AeN@QgU5x3vGl0_k5O{n`vF z>iF5NJB|}Lg)=yZ3utHeJNWaRD6Q|9VgJ{#|M7pwmMPM&t>Dlj?hA+6{1=mm-|6emG^wN{DZNT_|oAldAJ;QFWT@T0q5Ar^=^+tPm*YADkSLuEI zz&HH5yz>>cOF3=U((F&+~!8B;MG<(P@mx(?n!-7(MkqUXBLbM<^1YkX(Bgbz~x7{)dt z`6Ao6NZQa$Prac3!@9(?#4`tTQPEx&=93Gt81478e?0H`BZn@0c>J~Yk4LnBoKy!t zrv2lhx&fJW+(vf)mjC~lvHySK|37M-e{n9u3arE`tVZEeWnmqu3LJXBh+oSaj^9|W z{fj*>wST>0UAKM4Y6!1FW|z9ho7!biS*ITMd+%cj-$lCO-feLVWwr8hfOFSkE4HI{ zk^Xn(<4$@~8QFl{^u0)JQa0~VM(++@@x1r#; z=(sDR4nWpj=6_$;zCb4LvH#@5_Jt*`bxjdRaU3Ub3TJQ*57+;>$B(_m{$FSR+3&)O z>_6EuhW($w{*&n$>_3@toFz-^|Ge_y^?xpivsPFQF42=$+5c|-8NHxQHRZV3aTD3d zKfWS7&U-A)FEB1Y>-}F7){Bg`na&q|7h{aSb9@^GdU1m9WRmX$Elt{ok7*x1qAdW2 z>6w$_xgmd|Jt2Ri6FL0#{%v=~*@u2r>WkPd{|zJuV<_rg^nA6SG)Q}Lisvi;H(l1h zg;dP{XZwfY&KrSI7>(Ng{73m=EIs*>yy5#DPoId?%kqY>2kZY#7B&^rQI47DwqC_7 zvNYati1Gn(ykqa5J{sl-pNsh@n8Q`^OFlF`-A(_6`VW78spDnHvQsO_m3S~-C!f)7 zrCwho;~odA{MH@)YqIBSzVmPEPiH^ZI$nnj*o-P{#dhq(ZuEXv|DQZiFE2DmQ%q~) zX=y_<;`ryi&fAY_97GZ+q|t`x0t^^e|f&zf?-{+$zl5l2z@{m5{FJcZt`dzODNK9nB&KF^UCa0xw# zW1!>SCs*hX<}Xx!$#c=)p7Q=<8xZ#{G(IE!Tl9bL&`*q{{^&;i(oJXef1lP*e9ZiI zaa=<$ZsIoXq7VJPFa0m+e@7>B=t3R^6w#uuyVZXj+NQ6YJgmPwLuMVfk?AL&4g(spMqDF$@jj zN%l96;2rfH?N7}krOR<6*)&Z5w=~yFa|4p^3O_F#&Ggifr$4m*&s1?%THB)PJJwdA zm*YQO|L2AOL;uemdEWia6wfTo!CYjeDA5$3Tbt|yKwpfa;#ytk&+~PRLc&*>oVFNa!3KdUS+n?Ny zo!E`N*pF%?kwW3y_kEJm{Fm3vbqQtn_rLoNU)%4!j~Zh^b~hRuLH7LX)1gQn#c`a# zDV)JMTtM$v%oikka3#uj_|ars|EHI{iQBk~KEyqR`n{>_!$1s1R#;|%bph$QXYSW~ z3qMmYcU&CjI}nziq8>a=|KBwE9}U~&|1I)A``JV`Bl^gt^`CATYl~5?uQaB+s;qw) zO&^PP@3%u4-$~|{cqg;pxBv2Z5vBS6HSB+r{C|M`r8a)Bt+{3`)?oua*#EQHar@=>*9X4CR-@}G zyN)Bx4_X-F5R1 z)KeP7mqg(S~$~a$G#;Z~>Q)9paoP*#vry&F;h%`ZdHg{R_xDr)!LUlIQgk2)~Ki*&i*{ z#W%`})!p_+9A-ZqVKJ?7&%T zXgvMrw5=Uem&Aj0wTi>!BMfxjVDx@f++UFA=)*Arqc9p{F&+~U+q)-|J)iR5-{wR5 zHk;7Q2B&_`*AL6_N`;XIW_Za`T!?;Cyl0V*nD*9H$KGm4kIOeet+lB4KZtTT=RO28@ z`+r#jq5Cc0rsKH(Pl`;V4Y{bR!xhP+IF1u2&Hvk@eTDvD{@)q;1w33obf2;cb^ZB8 z{KAHj{Lf+hB0gsmn)%2ntZ>a6C;9k%<&AvibB-^e(%iiYy zAnq6Z2jW`$BV02IqcIlaF%gq76{Ymw`m6Ns(7u5pTBNh}vh;f{ajk^E>HqKe@cswW z#aVky`lYR$p42W7*Z-eMFYqrO_W#Fwou&_1cpUSSVSi@P;~cZOWaW3gA9Fy8)6^IE z8Tm2ZH*)m2Ch2W?eZKP+VlgVpv<1Cs%@6v6_3xI^i=UnrR*(%_9LEWqLTUY<8J_8N&-Suse9yBc3yzCq%LMhm zDf~O;$mgWzTHRZ{_f5Zx()vG+^9S53*(R(H4KFBT*oQ{4=|#4J zOtBmN-jc5{5Y?xxg+Ru&>1x@`q4eSS;QZhbjz?iMV*mSCay%-(qzqJUPNYx9R7^)* zJ^PCWefdemIsHv&PUuJ024C)+nV5w+n2Y&Xh{b515c{o-aWI}C_x_lMyzvzU6!#f_ zsr|lniGFP7rN`*Eo}u5G%%W|WFlkvTo@H2pm56N^asR(n^xPiycbD}C>1$DBR||L8 z-y7^NxlUO18eLRgX5iO&U>yko-^5$iR~ZO|G%Yf?z>5n zjlP>E-$}FYB1LXAMq|@S-^Vfa>#zH+zN?*5JXMXp6@63f#&dL>mww|s;#iNa9maM@ zcVUBkZGpls02l{{;lkKH2 z{-GXmEz(rp7-ckVu@7MUO?y`oXIy&@7jOwZxB}yxLnj{W16aK186nQW>q5H2zJIMD zv>xy*nxuJ&v?JsB9L6ia_-|HeHICr2PFEQ3+>+&KAfjD9-$SQJOPKjfcre}3!v?Ej067BWGAaD% zm1o1Q<GWgw`@hZ(3C-spGmdRc*gRoO*ej0xs66s)s2VaMRE!)KwhS8={&C!du+@E6 z`|Tk9Y2;_Zc4>X<{Aa?pk(0s?Uz!qjo}3cDUKP*ea1Vu{#;0Qd**1m>K&YZPWzPk z{qxN4$04L~__gOeL-YGzF~8q?Yk$W20O*`%eE@Wgvp&Eu>jMn2KER;o!bWw~O^D<9 zdi-BOUwwDWjBt(K^9Ab`e2#sm-$d`9%EN8)E-HV*4>q4WEo~JaD+_(X`u!7If$SqQ zj6aiC=($_=w#Q)lP!uoP+aCGL_PWP#VIxp|Qur^+Ld}sGVU*+1sHKl3$0IpohPIp; zp=rX5(1_Hq8KD{3m-To1eKIoisbqS#bpgq8#Jz-yDD6vpL>=Ldx&mhUZ5HY#SbxZP zgVOp#P0FQ=@dtVBL}co`_6=phgZ_;<&Yz3-d@VY+W2T9#DfNJbuHBDn9E|ZGg*4hw^8Yunktq4UZ?cUj9N_<=MVzgJ_`m)6y4U%?x7gP1 z_x=Bi!s_{{6Yf9bjU(AGab|_bG4e%v?p<-Svq$L_#ut?Qe>OWmjs0a?~?Er%-cMy7JPsP5Z%@)Q7*twy@PD|9_Wsu4jMUcfH>mTvrbG6`M@l~i&RS1EH+?Vmqd{Ix;;+|7==r7bKaW|z&ssfkp5De+r3bM*t#)1IFSHAM zg?~d&q1Qe42K7#Q8*(V(D30RrC--+u#4hr2Ht^eS-=P~QZl9!^6D@fnc-$eG} zCT=6E&KK(+cj>u#zD;@`z26V~*8_j>dEct=hyL%3_x;~Z{NHW--!1&#%*Vq)*9~yp zKn%tQ+y90-Zr{iMZ{q*f@c;SyU3~xi68`^e{{Qp*|C9XRv;1HFY-#^Hac1)Tf3nSW z)nv@W!^JfMqfk4D|NF?t!)SVP2>+K}n*U-9R_ZnP_NseK@PCDmbr~9euvyS%wu@iP|C3|D>@L^yC!x zhSl`7NcDGbXmU>Dn2&$RKCScn25d%!I$r^mUobAsn&?}FZO2aR#$N14-8T7um;8?; z`_-sCh&CdQ5l!9GKkS@?NFs$|P*)?{kVE@=`TvMKh8#+E_FZ{Q{wtzo3;Xwi{Qsi7 zN*-P!Z<1NZZDd^krzoDIIF1u|Fn;b7J(ek*?B*HzIh6MG+#~<)@L%KwVR8MROJomf zwH;g`uOaqyTY3C;^er--4xfKJ^l zwe+|qNn8VB9eo2z>wigO{)qff)?GFJ3JrIRzd|Fu3C(On3b&wnHpW}W%bkFOxOD*^A$v94^?wI%g zuJ?c5``5nH=o@H4vu`N&6%BCBKn%uEycPXias)=9{kr!GqbUYk?;bYP2-W?dMYfWFUC?V!wRfK?3*i%KRB(g z%<*bu-QQYr9dbvc@s_j=@$Ak^8(DZV5jOa3Gpeu^+p!b7(W_5?FWJpc*iZIY$FG_^ zh$K=dweM)-iTOG0xbW4dLd93D?XT^&*O=gLZ9Lt7@%}&0KXZM~IYm_dM1TIDo(k13 zd)E3{j|w}E6NqysW7&6#o_xgfo1y%qpF_&G5cdv@d(6i*(CT)K4E5IgZ#XbAB++!zoYY{KHg*c+{N7}po||1Z$Dy+r>%**5#>(0citM%uR z-e&#&UFPrKGJoGWZDgJP?s_y#(0{Kly>X2GdwMgO66a0#cpG=ohkiRf2YF!tIS}c~ zqrzZvD28JMve*3wM$vOqr0p5w4CrG~d{x@?tLOb^m;ZR)dYtZ@Cb@q!JnuY|=H+`X&8|sdvUsLqI?6E< zvoHs9(LPT9Gdicq=Tm%}6XfgD>e7gFy&kUr?|r9nSec(e_DOBONPEZg#j_BLu@qTh z87!mc2Knx>g1!<(dI5QKVU@6I?{_u17PWD&2yz2z=&|oPiOr6y&`eL^LBC(gzaHZI z$5y}h{#u_Hxf2yX&?fe*|NFEyiQj4W6ju3IS!q7tetI?1FPY;-CXtHA$g0=JH3r-0 zIrWWFUm(30>Go{oxBQL!^WUoF`=i2-;{;BjwnP4Dm1pP=%Aa%e3y5n1#I*sNCdohK zC1E}2{l=(pg}fGZ+{A4>Oy4kVijKQ~|5WHBd%iy^^!t%C#ywZx(&qmq?FrA8g@M8b zqxT(a{Qv3c&>ip(_6K;@J^?LHhoOEOju9Az(HM)0in0**7aUK|XkW|@ON5E^$(V|V z)+_hR{kR6_D!;ErT!VToxek?I;P-z$gbnn~sKQok$4)%l|EI(M zps`;1(d-$du-iF%u^-hqh$K=-qun#_Kxv*m*>(PY-KwB|RpgsJ+#l&p_8*7m$$!YA z4YB{BO*}aiaTE{wACAZH^=$YWZ5s4bDDGl+koTOra7I|{e>g{8Kx^G~uy1VRlhTR0i|YT^*$DRdZ|2v|=ohYurx!O-_{O8*HhC9)=(kJy)%82j zd0zehv^x16b#oNR`|JCtZ)%6d>y1u-4n9MkiV|3b_w`qYkfZ2Xq&X&_-zrz6W z4n&q-^8f$ke%nIlo8smV#Cm_p4{#iP_QB2_is2Z6ICr-Oqv%QZ*1#_rO&^Pt<7UTA zWTX4-`5hbE$bWyEjm3E9R9G8eA~_jTF&*WYiQcF5vyyY7&OXPn77x~QjO!1~72cz7 zWa`?kKCUug$;XHZz`co|k;B|6o;a%ivVA3Dr^D5!fC(M8Xzn;jCKZZeNy zhotn=y7kTxJE?XKKXc;@0MO_-+I;i#VY@?8f&o*8?YIL7WYHeJ;VP+ z1ClfD`;<)+_?UdlIM;rmbGBkTc49a7Vn3?UuAjeyuieS#&b`9FUct2$6@ieS=9@!}Vrd!f~&v|5} zac5QD<0;pk!8u&OCG_A!>p!xqvCk=gOkP7N|8JB2UDCf#dZhPn_TTj%SHyD-y|{_& z8tK1m%mY0)Px@znJlv)Cp}0rfJN(BB(l5NoAZ5Q9J45VK6xqHJ)|DAn6)r ze?-S4(0o?EqI+x<9@|jr|BSrf6%%cWKLLg(}Z^Xb3(Eo<2f4vQTx#WJkGf%<;oXZOqnInpoue2{vGHt1fyeUCC} zrQcU!H7d%=LIG>(anG-H$5<^iLzr-RndB zrq@E03f&<@amq;`SEi|Q4}j6XnXq;_}be9-?N=y))OqM$#% zoek|k=QzHZHiNEb`2Q%Nh!$mVD-I!zQv1gg<@gMCU)w->p zm{B(xP0wY-jj{CcDBcjadPHgcrJ9e1iNdNE4G5FTsi;-wnogFZW{7k-KZ%)+XQBBd z`yjs3Iu70JMm_(ZEosQ}|M`iHW3>Ox)BcAP=D21q=3^l$K5q?Yaw(Rfy@vna#77`= zc|HM|cU&M#{{JNYKMwIn(>TnBey~4H+dh7UI96g6($o5f)nrDUJgZ)^mc9-f(0NDv z=;DXx5$Dhrm-8$9R=bJ4zh}%cy?Qwt`qkG%Hk|zHZ$M(iGJmOy8 z+nu`;|JnM#o3>~Z#BS%lah(rB?ngBa;zQ-XcDDa?{aDoJxJ|!{)UfY`W;8jc5g*)t zJl*=e(C3`Y@(uc*H-vsaiS=(~Um5#wbwkLZ>&^!C!wvd?H-zHJ4Pk(D24cAD29rah zju9Azx>vpz>X&>kG%WaDNN(8>8aHhSO=w06EzfTVtvG}<4zJmu4tf9o+sO7M8$!o~ z4WV<4|DWOi>G_fVf0+O016AMK5Jvl-u^5kusD0*p*6;sbm`qRB_-{<5Pe*E#^zV@V z%=h#KeJ_*?n~7PNgSnWGg;pjsOrqwzymm*PL&|X)&Fk6VXnV<~HLm|vS!ffUL*bX|zT{CH#|eD!`JHln1{KC5 zUm!0b&f)1HE5D>&;49Ykp=!7=Pp8^X{Sro0$V zj=(64M#Z=Khq2^%OvGgLKCvN8C8r~O<0GM*oQXM@i|kd;uiM%o^xUg_!aS@f;g zj-A+zz1WYsJ^TtZTxUn_u^X5974*{jKNs0IHmJ3UeIw&o_QUjOyGr}}yl7pSHOA+y z*Y-~4W@{f{tMVw&Z&}lKxkX5%{V1E4~5bK-9iwOaM6)HS49z zxycFUaX21n=O_8-@0VYrTt6Cdeo)~% z#s<(Ij<ZXff%^Xv^?x$W4>){B{U2F+Teo`oC;6d|85b(9 z$(V}iC`YgPd^5>z`C%5>V=uqLpN$cq&&7Piu{@ppih_#tLdQk_RX`qH;!i(fjE(T6 zScVnIUUUywNw4#c>rq5@km)d{Ki%kvFr{l%JM(l7Aam+;$XSbty&KYm>Q&}~j+_gC2a$JzTX^EW)} zjrAdH7xx>}^h;VZU}yZl?^|ENIstp>`_cRB0ioC2!Rm3!U&jZLL<+U{?$?LXF)WtB zr}eeabMg1}>O*Kk;}PxueCv8XYQq*j1|K!9DOc+M@8D}>tS{%9B97uXPT&;I;2hfX z`oE2@@8tXE$S%Hro-8;nk}YTXzZdzxWXb>E#kci8S+dRlrum^4#B&KfxPmx0AcJf4 zoPO}mA^bD?SQHoW|M|ov|DXSQQ&=@W_%?YLwe&u+-(KH8yxI}u8(7Gs}5UllRgv=*MC&r=H&S< z<#GO=GJcQzzf1n#CjHN|zewXSGKlkp+mLo2!^JZKqc9p-VHx*5mY!4hjQ;<4`a~2R z7mycs7bXj<_8g{?(@{$=CugE&$ou~P5p7nE=b)J$=l(UGW*<-*|2BhJ<-@f1>o3xs7SkeGzgl%4drra=Q{ z(dQsFM*g2B|MLkNXUPA1-nZ{f>_lGq$BsAAo5|SzeZ@V-zM0bg*hdD0x#F0QIQMfQ z+0Kr4ptQ~|yWhp;=U-)`+54h4{}$zGD-J1J(`@TQ`}dmmf5blO#o}3tIPR_TGwl4= zyq6gt4ahjdna=AV&qA2JI|x1I{!zt;cdIL`m;F^+X5z56rj{A9H_SCdyY1V(b@t*as>-GPmbGH6_bS=_<|APMe7xmwhEjP^nzhnOY5p(@6`3bAkJo_ke23@^V4WU4q55WykPu7 zJkCfPj?#~#xK026CjI};>2m)kgjKJ9I-DZUp!Tf&6vzvxNqDZ(pTs4{J!n4W|K9bS zyFF*;)_Di@o6K{8bxO5wnEUdkamuaMD#aDoTthEz;x_K05B>gG{$pc1*xF7q&V}hB z^NtH-(f8eQz}%N6b6{$GpB?;vb<2$Bm-YPOT$lmk_)z<%)aV=dOqWAm#!cp>g)Eh^I6XYqJ!8u&OCG_A5dcXK|xJGvW4`Tsh z_)|}Zo8(>eq2GSZsm-4LLw~#p7k6mLA704s@K{q#RJ*G`%X{?@&KL<$rFYQu$oOk9+ELvYB(~^HE&H{?2>f|6Xo>gs|%8)nUk`i2nC7vVxCL8h`KE#&vh%{$eYI zH@lx$2Pus+Bpm z|4I3u%w3fK=?}+u>{I`~sQqsY|HJp3eun>n46mN1<-;656@qgL=+P5zD4_h6V*6n56x6^l`d6RkrO6zFs&~7NKnoqo!+>hGlwKUKzhos{aDmy+As;&;V|JEnMmOY;c|9IfZ0I9|_y#z5jHaHGHI}==Y;Ij_yy72yb6h z@A%~4u>TorII7p~n<8w&h_KhbSU^cX)-QD&U}Q^unV^1h~cUo-3l zD!;1TOVin0cG#cdpZf3W_gMpO zmpKe$_{TH&$Nb7>q|maSf6G@t)SoXuLI3tNzAH-WxH+P*>V$ zUNWD3zOaR;InDodZnDWb{*ITTSzIYJW~9S?9O^$sztm?#%PXIejwxZ8b5`JR;?vq` z#)XyiRalL+SceVRj4J&6g}8>6eIn=wUQC2q?LG$+p9}Apk65!_zgpt+A^F0LP&a#8 zsJ}c-nL8<@)=vtJOUz|Ud`A6C`-6Lqa~zUreA0cPd64^@V4l2qcVaj8Vn3>J5J|L; z)c)c*cRDvmcFpi?PqxF2Nqv)2Fom~}Ap z^Owd$oK}8ds_Unt95XQsb1)aZ-`6Jgtob9u)R!D@`Q`YqkX($~X~sXu-%IJqW1cOR z(N`cvZ$^{yJ^FfegV;wlsnkxqfc;^=n$X)aV=dNU12&`mMfQ0C`#g_* zewlsd)9077&#$o0WXrqkKMq~w>z?KRo@D>mu_f93{@oS^1OiGMXO8 z|BNLc%-ika3&g(10*a_|UzOkZd^n_DGTwO;(etsgFq!QB<440(vNx#Xk>!|)S(t;l zn2&{6jHT%L8GnN8uG2qD_WrB6(Ib6Zi1Sld3R{KMSck%wXM_!8^#S+vh0lk$XX<9h zRfug772h&0fgZANm{WbU5%^Yr{h^XJc+Kkpe173Xk_z$lEy0QWJL9FMvsp6@ds zc)r6tU-2~G@_fZrEuM+an~bTLj@psZ@0@aaa++%=Jr`!uXCc+!wP+e5{mzSPYsNa~ z9KXde26M^zD9!(S%(&Ay|1TJaDr_m1VFgxV75--apE`dNO6zp66RVxG7VEG9vHriA ztio2bk7R$@(f{rIzc;o2;m|nwABWkd46^h#?E8EQd@o1m#sPa zC_muW>gw@Y^Z!nfXK)S|a0wO9s&kN6a1C+(UoUwRas98`q0AVE*4M`h)p@bLeyN|9Af1acP?Bx>EhySf2Uxg;<68!OgF-vsmc)eb9dFbr1Vd9q;Aae1~1^Grcta?@N4sdTmDk#TNfZ zPr8=|=d{stNZk`Rn$Wn(JqnBS|BjN!5!)_Kkf-p$`G02|pTpnG|I7cM=l@-B%_a2U z3S$1hM)u++9?t*c^M7dmUn`#*|2OmhZj0wG`p~aN`d_<0|8D^O!Tg(n^uhS+`G1>U zk-st2Z*dM?;cJhE5%f{$jceTC9sk?fdjHw@zlqMBjH#H8 za?Hdm%)y7o|GoQx_I-Ns2>&ai{L3ru@>v_VEo?(`h)R*o9SEex8wf?h2g@hupK*5 zTK}D&wwwN7{r6J)pZ=fJFup08_;rogXpQ|%*4caGfc5`tJ|6bECUby)X|DZ#dNmHB z^9ADrj7RPw^VSI{kVWIF>-eSh{L_Y)_`mDhu#(CLSt^beetM*eA&ncY2In?rz zYpz>sjs9T$flKrrq_j;m^V1)!KX66Z2lMwe$Gy0TIR8G*ySYuT`n|S0cIGa<5B+LA z7vFo`E7Jd_wpnem$sO8eeKSqIo#s2r0#y7=8@+P}VlakcI7VO;+W)imzvs38;eR{+ zV8}T8=t&Or^m z;RwGTa~;n|Gd+a|>ukq5%R=D~>XnP>OR)?q5!e4)MXttLtiuLuMiqMhV4p;?)c^1g z+Vtr=W4L*ByUA|vU@zJ8Rc-iD20n8P|E4`s$hudxKcE50diT}AHlvxILPeu-(auXD zjW*;^#8I@r@&W%)eX#2h{^1P%;S~A*y!@{&)rv#TOE1^{x?bI!jQxXc3-lj+-CP3e zGsL<5vHo#foN?UGCUutBcX7gTY{NfAo{74>!bc;IZ$|iVdajjiJzz~5`Y4q458lDX*08np?5F>#e};|2 z!}<60lAlVBcFkCf$3#rVR7^)X+AnK=(5Bx>=Ctp3k$J}jvZ!sMwEq7T_M2UdbL|dG zS84vew6!JpSM}EaSD)IHQMbZOan>HN{=e{9^rZSkT>rn~XU4)iPQ|_hb&4kOH|n35 zD=h9SR9gT43GX#yKO|ubk;zN@Z2s8-^>4>ZQJChrqw59lfvj_1_38|zb$aHp>94Zw zXzq~z5zCKdu3LeXScTPCi*;zP_x&M`G0matHSwZAFQPR6_pJPnbiMCSoN;gXEWPc3 z`p0SQ`{LP*Dr`mV4fgj=|FE4N*Su+vk9X2{<3r=e657^hnT{RB=S&cE;Zr2V$&-M>nNb7b#ljAJJ+p$AuR4ZXOD*#35# zyo)~ctMmPJ+;4vyK+nzNo6OdBK_84b<~#0tlYgH7K-Nw8-?Trn*(v#{@Xe87sOyGf z1Y#RNX}nUsxf+h!^#OJ)^88=&{LwYU^G5-3jeyen-)H6ji|jvn_@uJ+RnH%7=>F>a ztg@U$yi6-uJu9I3LHeFb8w-VE*!a zdQzT`bFdfE7bA6xZE+m;K96&->s~Sce9hCLK_7Nf-*)3J{nGmWo9QY2++$s{6w9yz zE3pczu@>!b>iG39$)Mq3zkB-UAtS;*{dRA^ zrtWayBjM-!^hfO)6@K=nIqu)LCg#W7*Ej8r`4|1(|HBxW*Nq8z^Sy95@t2TJ{D)`# zAE7nzXJbeHJ*2w-Jv1l&6q?5TTWGxgZy}lZ*U+F&SAW*n&Wq-jTK}>3-T$IK_h0nM zd@RIu(SLr!-f5SO{c-dEvi>f0UN3{fBCQit`8i zhkqI}JZ#5Xt@-)n?#peNtd zht*;3LhS#4UH$)_wP6s~h-uu>KWuEZUm!pF4Q=mneBWl*WEN@PpQPQ1z7^Zi$%f|8 zg**xD1a6j8gL|2>RPO;0wtHyo#*K-?3c*>M~r*@#oZ z&fpv_;GunB`z|@|!8MfjAJ(tbOTUTRxQn`N#{cY5|39z(AM@o6^?$@QCgS?jvHsuZ zynYSZ6)_NlF%-kmep3JcS^fXWUDp49PyhceHg2E360&8s`acdWV*hcN9@mD;I-v|4+#_Y7-TUZ>?}c7O7xFl?&GS*$X~}qw-cMXpsZIUr@D0xq4eD6Q1>WOq^C;1L z(R(`<`&Ycjv*tFs$59xKu^5kun2f1NpEi$}EJxh`ZYG(%;rqP9kDxzTe`F4QF8+G` zk<$1_Ht^y2$BcP5&RdL{{C(RO*B>d(zi+g69sPsrkNhw9A8u9uM+$NMy`|z>h80+e zRalL+XrILXqZ6^M{cq;~omT(HA;h#E9^&~Ui#BxY+g~T1p4e9Lg~4G1J@!9tj$yyh zH%4y7cJ%58_@nlhol*aWwKK@Q*pF(YJIuWzGxh5K@_3S-is6W5L=IijpGRqY`X2Sa zHRetDy$v}OQQJBw)GXJJOHb~)-}hg5M_tWv+&irqO)~!z$P;8AvajqPz=Wi^nPkk7)6f8SoHAI#*^Jo*xQ}lI86PK z&-;e9s>zP)PRsx2<^OT=KjNA)O=za4P*CPicV4CXa9s1doIVq?FbDt3`G1?_f7rWR z`;^rzkA1;1`U?E@{J$-KzGdv0 zHR$}d3UThqYH}@VZpnY=eTP`*cmw`={$Kl5eBP%^^(om<*zkZEzg6s7Kka13b(`n-;k-}#GS%s~*QZFBoJFy#kvGJbu z{vPo@o(y5X<7yPX=$^?G;{1a&S@maSh_-+>dJaV#MOqqZU2z(J)9k&$c%9SFY2W$rO9q@(8io!E`N=xr$r`^ngUTTS+Stt=cQlSmx{yZ!MU>Y6A0z)i^ZxihdK}yO z!2X;O$2p|8@w3S|@AnefgRHPCP9rAYL0bis2Z6QRoTg zz>wX)PlT~#XGXh1-dsfbM8vgyi|DH1IC0! zilUTsC?(xwBrTOLujHmO!I50aNG4P)r^{=(ESI>%HMkm=<+5Djl3a0#OT1Dks)6cC zTT!A!i6Je)L-TfcI~?B6!E(g09C5@MoW?6&vBVo35wRuRWlQCLo>Nv$)Sj6?zU%kf z`+1)IyzZCZ-oL#cEMWBVwJ?V9wQJfxY&dBqG1-1COd*kfE&Q!?1~GyJCUfnRWjlQ? zPuiHHFGRTBG*UU&v{A2xe=n}5>RR{@K(`$Egz+2Oy68!mTW z7_wjd*fsN8mH+DCb4a+Z|HnQ_*EzXzE&RXqws*_tn3X^0lQ{|GO^AEWPx=Z}F3*IDMe+)KRL^LKqC zuK!=y=TC7b{x|&Z_?P%s_&@MF^N-a#H~aqzU0;FkZ&Cg49>>YVn)W}L`M)SR)tD9j zwKV<>9zxH|Uii0UY$C;91ACu366={(DU-o6XKr4(|88kpY#tDasb1) z@#g~WXp%xE!r^6o9gSUA$(0-n>yqC+jGJ<=-aQm7QRKU|HZZN z9Wt8#_g(UP_yK-|g!IUb!vHoGk|G$!r zi@_x}?F!p=jg3o=Ts98wn0+JR>;c?@gSZX1;|~1J`hVI48T&g+9gWQTe{B0P{EycE z>sdVz4oT+~xC{3nS_3eKmZyAkZ1Dj^{l~AeZA6_tJfjVw?Jz{1_5DTP;JIq;Lw4wW z_DMErf7HGx+)rPsZULhw(wN14hWG4ETXd4k?Z#!2=|lG*hpmmKR`c<*W(T7(UyM``4&WT|K3Kv12OtQy>?|| zR(PlIyO6Of)eA$~mhmITpZJ3J^Bo@`|6u*Uj)UQ#W8Q=J;RARW=ej%toW-BuK>r&1KbQTFp+fe5 z4*Q=SKSF-k`96w|Bh5GSOmu#n|Cn!rAJ%k%^A|b)VfH`f^=aLl|H4KbW#f;zzHv5x zg6%&+PnNO&OWFTulh(Gi5FaHU!{c}YPvU7jgI)|^7~{Comlr0<^-t!7#3jD>^fbPL zuVHf0JuV*%-=NQ3wq5vJ^zUFne?nSZ%5n2#GiHl|U$Ebe zdbvMWTX;tLdGZC~7(z7X_c`hO8rQII%r}o)kbbo_FOJCjg~|X-XUU)PUm7Vy^ZTMX zztP(N(HfvpSuj;D?~6|!Qx*sxlumCuf4R==a2vgcyq&xQhY+nnIIJv)D-(t=NRR&2 z^R&G93fu3(J$M!F!~J*wNAY^R0dK-v@HX7+pOqKiA-rJh<(=evux$+JePqjL_k|CT z597SHPuqTLiiMYj4-0=3>z~dFA1AMz4dD!V4sD3afJe#9_Mu_e8$OW{r^ew%By`#WEx*V%d7WiY^;nwpnj!j z4&c|sM{@vs)dAn2x4lju;-}fct}QADhHB+|3?J7YP$B=X@&8gM{IlclT?fLur133$ z2Q9DG{vf}HAK*urzQ}*B!#KBn=g7?eZiGG3^}G5%U8{b_ zsI?T_V@43 zj8*k0KfhR*6S^;2)1_Mb#r9X=F5H93)9ToU1L0NlId$!<=X)Rhek>GtHb|-4qxt($ z`{PrczX=c6_IkVlnf8Zg^CtS>rgyW>zaaWQz*Fq$mFsP@_=dKEXZv(rZulGb`&6E_ zAGdPDlji1pp*+u;kiQ6>%Frj)cf#j3cfw~Eca6#Yy|KD~2%p*64WHh-8rrw6@`-)k z_}8z)Tb%1{Xe)RwoG*VawB-IewC?;WJbLLD;jfLYy~DP5;<5Ex!>0-khEJCLGF;4( zUi@JAO!JGwyKH+8p4i+UKG$IWUE?jGW9OFeSAUr@rNXt4LnX?Z zq%tR^{X;%s-zV`jp25xXW5!ss@bpRce+~P;PXBr?o1pOe*jt*ukj4LTN!hoe3|vzd zk`uzw+I&&}{1iDLjh3%lAD@iY@t-7TvHeEh+rP8+XXGEi@9zpfrYpgB@KSyOpC}6H zUl}*H-&ZhsTAI7sOZ4a;OWz>Bh3_EKPgAWtu2deAbIRRW^1HTu4@WVKAK*v$34V&7 zLTBidQchId-YJb&3?BdX8S*l?Nau5cgHkCT1Iocrkm zu5EBkykp|4?r+I{i-*D?$Grk=(fq>?W`(=x_uy5y57RZ;-o9>B2hNXN^4hyKG*T%>=;7>$249JTHB=*=$* zZy?`=Rc%< z_M83TarzT@5>Mk9^kM+Rh%L5`^UXRb8`mCA@>xm z6*}DE{*uZl-_&!CIlFp&oZze4-@g{#H=!Mb>3;biH~aspJzI5a8kzpTr5B9<*>6G{ zD~U<^RJVT)-ObnGHT<4FB7eWkcP`C?Xq&#?576@37lqq|Z^s=-n`?ZC-2Nuth_CaX z*xeUiA$%9^!K=_)qd#_IU$~DR&8dw1f9|J0fZv&aa9sc2DfTCBtRp@|kK@KV;@8~k zQOCU=Z@`=I7Q7Abz_dDj2D6w$X8oHnbsQEjs-7BCPmPm_J@r4C6rNmF|EtR)|G{@k z=UsRY-iI6hgCC&Zs6QX3e;5nu(`aqo6ggk|+x7qA`c04f4r}E9*n#j-`+XcS@dKyX z)pg2Ew$*uKk;B3x!kKyYoF+7eZ@FbqbGw8(tuDqWQ1i7vsC$a3_ z=#!X5I?~-|5?`Uu^?Nqm2g29r-@t3+7 z7y1`}NBFz=9)5&rZOIvA)|Zj5Z?-R$OAA%XpJLCy#5I!9no1M&WRd4z;Q9Ne+s=9} zKi(I9BCQ^NA3r5yWBfbubNVmw90s=dhi>rshzSp>?7n+(+Jz2XGXxN904B8Q)Vry+JrVnjPLmo`0?K>HD{ax6$8$cj8@m z54Qi}KzJYd0i>TbH}|s#!o&1+zDOS?KZ=jz3=)<4j>$GWipLQ7_9Yv$!sGNC{clgu zpTxqRvSC*lk>g$AX>re>7cH;zk6LmVG)eU&C3p zU$Zj!9Q&_lO`Bg_O!~df^DX*!@Lfb}e+**ig7!a#UEhf7{DJrN7TdpvA0Uk%;V1Yh z`nt^jkNOA7^$(O9^JbT3=Ku56AK|+`TFAb~xNQkcEVF%A*(*!zdv@>ip84s*v;F3$ zV?M?9jj@5X8yCpFX7)b@Fo+?vlE-W;< z_ImfgCZ9*f|K#Ms|L%Ws;$XPPe(9I;KNLSu>A9ihmHKkT50`mv6`tcU&k=p)`d=_m zeSN))0FL7Icmv*q-<^N*d-nY${U5%CG2g;CnK-HaQRUmL zG5$wR3D0_FbNWT5$(iH7?YHr4=IN;h&n?R{!f2`Ih)McXp68hFIbxvGb=$XZ!+p?) zk9ltxs`MW57T5STyaVsVyYL>o4KSufIo@m|NHKxy>85ZoV@a5{WL@S z!z6tc+qVjnUy1a0oB!{B1z)3o11*2PKO|1=3*VwA$w_>N{#|?zvlq2bTU}q5>m$>` z(OQx{@tp9q?LWYe@Ducki{YpAXzu*Lu>;}f^j~5`cvv`24pklq&x!jru3_Jl@<=(G zDR0X|I3Ror4kF|K_GkKd=(pnz973NuxId|XBK2GUf?R$3rR;xRn%MP!w-(9!| zuflz}9}i$!KiAC4|HS`IpTJl#`(Ij#3idylEKsf%vHy2NI4X^Ge%G%july<}yn%cZ z-h#K`9e5|+h4ARX@}6~)cv#C2p^}X*$>g2 z$eZ=Qy1yssc-u1b|K2RW(|b#eKV<3qrw^X;{PPcl$LNn^q|)=R_52a7)$oM4L`pvr z`7|b#>(7wAi0YtO`FyU1ogqAoXzXWULpgEWcd*BQrr!6D%={Pf_y2F(@-uVm9XE;D z=y-euU&A*rU8C<$JNtjQ{>F;-@0#&%GEv9=ZPZS%|0I2ie3kF!ThjRsqPFaJ$yh0W z7JQHX1N;aBMe=8X>&ufrliCDh>~rmajQx?T{woaOC-(a(eu?x;9}LftZC`#cocG=R zntl!Yrqvy|1qboJn*Vp3eQw7cID}W=F5H7x;djel`D@j4U6j8N)%AZc|L^z8-}Nu) z8n26?jh}n2ScK(BK+KQd6&2|IETp<|8GxLgf{xzRsV0_ zsR)nKAHzc7QRVN^kh0G_9v9cMS`nTgpG0qaMR=Ng1~JDCNM|rr5qgCOFkE$1`Fk`B z*)NVh;eK=Z2TmOggBU^_!x%x7|A!qnj!DcSjj!Nqm_GiH{ulH7&E=n~Gq2x#|CIUu zX>vh$^wL9N4C6>(0!d6_3TJ1O?}(n=H>9_I_GtJHdFAek@LjS^AMg47{B~bl5xyt< z1N;a-!B3Ise<*y5{5gJ!=P=pr9O8dXpF4gu%;Fk--;C!+Pb1~N=h0L9P&gp|797NF z=sopNh>5?QKG^En;12pBjL@UC8R8wD6<#4O;ho$?-h;`*N5iYg`;fG5z^uA!?rK)J zU-$uJ)}2&d%&V(X=v%G`{TT2r2RAFU@4c6eiZHxY5k~Ua_gAgYv1Dw2k-cmTHkyz3 zdg+`W(te)bAEwpEGbQTiV*Ov6`oFHKn-Td2EnrlcGS(>nACvzvAzvpe)K6vX-y`hQ zx{B}y>AeZPO%>rS5E~%JF^Tk_?GLl$_TL>1+u!sZ(Z7O(=bV}U=>DUB4}4AB zH;|;ydPZ|e?E>NNAnn=~>Ro5db-Ko*;)d~Ed=EdskMI-x6q)t&)L{e2tPdPl_cW`6 z*tgMG_-F_FvWtC5CTyEfpGEWDqIqyr!qehsI@N*t>HlE;e{n;`AL4Awj6Ipl=5)?q zVt{QqxReu~qsPoM=*6$;*RXF^8IB%t3GI+u=m&8dZpWlLGkN?#xPv}d?%e9r^m{`% zB%E14SACi)WPcx4|6VY6L7Cl*v-<8{VV}(Wzt$IpyXb>gwM}-|T`%Sb^a*Y9Rc(`H zZ4`27{YBv(`y_Vt^^^Buvej5E`2dohOJ-ffx~y|&qjb|n&QqZN$&315 zv_ChrKh;MQWKwvNoU&gJJO6#s`2f;yl)tS@*Lz9(Z#5@8EN<|Kym2`ve3<@Gj0g`O zRtJ+qrF>_^oxwS@;f4MQj|#UaCmts={(axPH9SFo5>Mk9^cnl>@6`W(nQgC($^4i5 zk}@A7`m}rPGk{@?V-mAS<14uS|MB1YKl1mT*S<+1-Km{}(Tn=uk6#~y{Ad3^eoTIm z&Nr~I|0UsDx_~FKc>bt9?GIndw}xPmv0(If7wID?R5!nz51YRC_WUqp%xomv*?h7f~F5$7U*Mv`%-XA_$abLJta$k77`2O&j}4!r8BaBbutH9&n`Tn-82QhRpEcx#v8KE* zeuA$BCe&GxkMblrb{jjHTf+{0&Oh;Hcl>+fZu9^C%=&$W?lV8~Z@4wg zW7j@CtG9-$vpI-4Za_MNsar#?a2|%Mlxu9Jq0U=F9Qoo3P>3QNMlp_H zdp;|akn5i~7)r@2Z_Ns2WH~Bu3<=NSIC%mmQHe>V@HDbv8M)o)hZCC8}Q( z>d6K~c6B4!gvidGWzWx{S-2Hx_2~lUk-~;&Rwe!dE}|VB=tLL#jNNDY2a@^+w$!LmXFa-p36Uy-?>&J#mF z;^?hrf2)Vb=z}NK%}CN`FrsZUj5vny!v7DaY`=t-&#QmgU5oU#V7&ILS>ZB$3Cmc) zD%Q|fqyFz;pJOnle^%Q(zM`DcHXp&deKxR(E$m>|K(RjUYda_pik7)g^td#E+>9a2nx#VP(@26~E$fM6S%A583LO#6! z3%;#1ZmeHfC@xxmzlc1H-u!)`m^^}*;|8P?wW~{nOEG*^-Gd?f#ZfM<0_pw6w8Nor zjD8#^a1v+L<<0Kxocn2E$G44oo}bMQm9|x(8Z|hD)41}%eW8|IR~FZi+kdhz)RPTp zL=&3PiVIl(h5Gk3>R$Hpm6sg|7sa)s1D&`zeoz=fmvA>)=8MAi=dI6UY%W@VOS2=K z|GT2l_Q98je%o)XzZIv?EqQ(zqbIRIPa}nSjC=oy)7sHdJFQB)xk5$ z;STNJ`l7IEzcs951HH9HA$F=LY|;lW8vDljY81ijv*{?q*df$lFH!Bd=ci8wm{^0)vS;prv ziZP5Mfr%Vr-?{t`jO|aA@juApe^B5$id+XdFPz%of3U^>fb40%GZZ>c+uhptZ_Els z^xg}18jrd&9HtN2X8^_YBN%bq@Vb8p?E04gO2j2f4uw*(43XbMIaz_o4`S9m&f%Ew zaiklx%bT>*yIlwRmfhc~`&;LKfT2qM2N=c(PB`WyDp7@M)Zi3OV|q>gSHETa%PvZ{ zQ~Mt``gcc5Js*rCfr(xDe^2|LoN`=`cU>!;I@F^9z02PJYEfvU4;uH6#{Qe=&B**u z(>EXA@cz+Ut6r^E-e~`vGX{0RJ{QrB4n*s3b&}`*O5Xk1{?JA5MhyMvYt{bmxbD+8 z7*j8%jPQ|E|dYxbgqZX}%pZ{4Qp-^X4$`*{9Z&g?ud*E*FJS z-|`s7`EevLLC<{OsiH8I)Hdw*{a|3m_k$sN9K(&iADoTJPjUH)%v$iBe_K1?ANPkG zdM;XB<1X^(`6xgkif|Zx_39@KU=TyfsA%2X%sRScW_|w+_Wzc0jGWk0Zjm?E_n%&4 z|JSR38~%CwKe2is6g$rml%N!oj*rIA%jk1i%0T;+(<`tboGxU)=d<6*W8!+WMUImv z5cLzBBr6fy(RNAdM@Z#_D&cAjR~olQw01!p(Z6$}zT?dQjn=b5jeUAgTmPr&Md1{E zu$ldDpJ@G`^pHAPIGRHkZ`T$fPunNa&klQ59dNIFr7o@&SBGS`YnR_6yS!ew0g>;` z0y1_w*|+TZy1$H{g7SW-&hzb3|93c-{M_=htkCGVCN!fJ7jO~nm{y0+U^drza->@% z|7XenSSVxv%jY-8|LpfW_5Z1?&>@XZbfHIkuA7W)>Tgju$LRft<9GaH#@LRC=EslO zmPA_F9r>@%&{MdCMP&Ttjg3V1?q%U6EaNu&+L&|UsnGh7kS7>0SZxs!zjk~o7oRl`v2tN z0d3?#{Qmey+B#uJ>|26Tlp$KHILcFJG3^=7AoKjagL%*W=JThI%Ip7q{KND2JtTb( znf90C*59xF@eB5UwEoYpeCN`yz%iV_NmQZ=)kquruOYWTsQq!b`n!K$I3?WnetrSg z5j;)rZSw9=OONI>4pi<7b@X~fYswApXk(H?$J~D_`~M<4nauqEj~-|H4=35lZ1&@h zX+R^I(2Q1Gz(q{Qjqlrc)-iMByfNcc6C0geVBe4Sv;W19+nyjN>>sV|pYi`EqyPW4 zOQ!>!=)z=$eR2+lZu%U1ezuN3JiQ+amB!y|+5e}Ui;VJa^#8xiI1s(J(e>MJ3^C*D z14rCfnelgP*C)jdm$?IaDy#=bMq{<}?k^gvjsE+$A#TMtKIb{idImkG4~9)~TiC%add0^Y4u-4r%=`iA z?$NU@dH=3&7;!|t96920vHk6rggi1I1t>%j4kP{k>~MrE!TQ_TqU4qL9t>qNz5RHOIXBZEMeRK0MhdK`bT^t!qNP{6>=48SVzy!zOX@V zVhcOy-IV{=_k~^hV1xXRtMonGY`^LM8&cl&9Z~;apyIdv&-&A&vHjsjb^dXa<=CW%rUZ{P533?LI8vj#BBoBl<>9iTx ziTv~O>CwOOqVe+r`rMMVQAjVs0zHis=5bhD&yF?(c?7)|4}=o36ww;G0~g#E%7n`? zOdr7z;>h&BpZab6+a&(9_&WDbj&vOe6^=QE<2Zqns6-X2F}>^lwN+-xKUhDw`G4a7 zsNSD){Wa3K;V*cK9{EU~CTmfLg>v^>>VC;+yyC`qMzQx&Z+lOi|3KsQZ}4pN4gLk+ zFL9CYW25cSzyBiN&bBu`!q3nA0pZBEGvhx{e;_p3XZ!p5XUJAuz(us91D$C3qHma7 zcx7YgCa=6-`-oisi1)Xw{-kH>|FcaYPM>7oM`K)L^l0p9Rvne3&tO4bz1e?m45~lJ zHz;34WBbtk77hX8& zJkAx3y^S6*_Lg;h3@*8>j40H1NKURA*F2*B=!oVdc!s|7-jw!;bk`6|DtES&JJMbk z-o)^la>um}tzWlO+TPt1&VQ`Q*rv8xcqByscHOey4x%!Cm%NHO@v|q@4dNo-i-jY~ z^K$JK+qS=dAY}b}Z5HGr5Bb=BwK=f&o(b#5`U`}kwb2SA`H>Kc$aU?_p37O`FufS9 zA8HIo$lgnQ&%~F|2g!lOtWZiXL$n6S@R;)7K11rPzO^Rr;7l0EIuiylggAyVf(_SK z?zjpZ!*Tr1{J)YjVP^eIn8h6CkwO{^7(I3-jGa6a#;ebSMAexvQFbOIi_a)u&xEOh zGodg3i1Ft~!T<)h9$`~I65<7qgyH;0^xr-bPB`C5RH6!Pzpy^ZbO_b-8cf?~rtpz4 zd-xIa{~ozszVB%x?keAv_oMZXgfWaGar}`mam+h9`AC?ocqB}fiE~}2q;ndzsKaE3 zYiv3b>gjVy_l5>~BNi^Ww^sMq?p|YOLX)_jqBEhHY(?+#nQ(!;h*-Jjy5qT`UAP0o zmps=c&)KtyuXsM%kYoC`#`)?cFhNf)nZLJVeH%=(sb?^YxhD13Y4ul~`y&^8tDVy9 zLN^+`s~FjjIL6SYt%Ojx6@Pd9~ias!*#!VY$E6?@3~s&D$sXF?8{i#$a0fAh%#6ru<> z=Kmh1&&g-A>ihI7mGQzi=l|}q^^wWHm$LtR{-qzWUkPIJ_`s6%Tb@I>;-(c*H|fPBmz6_!c_Fb)g$QSEU5ur^8Fo{k?-5( z`+l}1Ic)pQ`d|Grh@m6uhDvormHO|bdegDdxLJ~np=X_cS?B)HUARAdaYw(TV`dQ5 z(Q}LHs5NyJna0A7`map=U!ndd6M5?Y0(E5Y$BPqHyQb@&tE3p<|(Po8Qrepeqp zzxtu@x%Cf)CmKHzI_f?WK8w%9YyC6s{h___{o&I^4~KJy>%w1d{9oa+w3pC!^8Mj_ zM?+}otPQPrG}aLQdZr;P+qQzo>OT}d)%=O@$@2Pe5sy39XZoAMs%>j{qP8)7&hz=~ z`o}^?Q=_#n{rgUT?YeCn*u)l|>JMRu?C}q+XS|PY>K5c8Q_to4e)h6Mo^Ve=PRJ+Q{)+vmty)O$EjIpBmJ^ETgSGllPUVEd^kR%u%R}Pg z7^=?+N5qw&W&f?Alq|#g2idXYm7o3Jp@KYy)-Bc)<+X}o=)j@ zOLs;eP0V@vt*OcWuC`wdPT@3aQHS)e^f#RF4SL3Nu4|S|3D1)Yt}Wf<+n2_uG!xPs zCzH-GA#SSC+y(tVeJlGz|Jr`# zrhWl^8|&iMU&}|xyo65iUFb#({fHw`vOlUv&PH=ElH?2~=_&FO<~Cd-Qfsae(Y2*f zU|TB|aT!Zk#tK%^SIRzSKMrKEk8#sKoqc)J|G$`h%Z^Qu6NlOEn8XyO7xfRYqoXb7o0wdd#_GYaMUVa&IJ)a=BG%=)XIytHJKnl#=h+y;&8{0mw#DtcXTL=I!I1Sec?6TO zgCUp9Lo)8%>#iI5!Uafgvc*@Nd)GD4dt$eQLh(g7jAHaISw9g+=!12}fKfs(#YlxU zC=su^RbQ(APvm;O7$|a`1+MF`>%uTbQ0AC&RNxqn;{;Bk64TmnGb`#C_WT?dvG?OWP8{)UooCYdru0u#NuwGy=uzIBB4e>UHiZBDeVS1Dnbs z4DlI=`v>4KM%MVV9QWK$Do;HB9^YGCbo?=XJE*5;*8d6W;S%+5vHVpbf04r&DX`{1 zH5;JS_Ga7h`}way8jWZ|Gg@&07txOC)9m{?Z3J?z>H3_%lyF*jVNLrl#s-M%A0QLj zOB3{DmwKi{|3Hnn+x|F{q#5%TyI)_P9a)fcT8LoGe}|G zxZ@=<`uFc58QB!knvYSxKx9W>7Po|DME(cSx{&Li<^TU$@c)n29$2+)4eQvzCbqDH zU0lVse&OwZw0_0Awa@k|-#XOw-&DReuqkZM`Va1dowpi} z_oqF4?zHxErStxs_ggQ09Jhbl59NFPQL{DL5#`zqM_eDjgmjVXTXp{(`aimygZ$_2 z|AaJ7q7qf8Mh#BkG^Xq2Kg?ndH`jk%@r>og%>9qKeKko zZaM3FKFg-MRo~r^f6eQUo)LfL^PUrV35$s4qFpAJu#6R~Vh!uqKti6|ByaflZ_#(K zi`mFV-;h7|)Ja?Nr?&Y5uG+STtgkD7k&8U!qX2!%gnryCALwxmV+5netB43`&xKF;a>+tn;3u9!V^A@4^$i8rxJc7Y) z{S*5B;{9y@8T}I-`X|t03?co-qHzAb#{Yk29RXv3W&r^uG9yrRfVL-hHWW00s6dMC5ys zX6qNxxBK`$ki}TIyOfQ8XE;J%f9;`ALYATo<*2|h^epj1BV*c@C&-iN6<0}CA^NvY z^e>>ecJUBK+;RxEM}A` z)5xq7My4>21*9={SR4F=XH={Wj^q(O_~NDtw87Em8vB=>8-sbu)dDth4%;}3jf`67 zt3y2+5&b)`iEKtIrtLpd2Ud8{ahOZSl;e8~_l>ZgVf6u?ooWk>8 z&j}Z#wLKrgMY0_oQTUVI@4eOl`Ng5oDSYMShe8**&L-%N!o~m-o>h$Ak2qTN0ZcAB z$C`0p;kjkkwj@lS!NQnt;Iemr$+eO_t2v=Td|yB?;|NL+`S2&(4}?|9{(T%_VFA5E0)c@5Og^fowMPc7; zGa326wUQU`1V6Zo7_00uE6@5hw` zne~^;qxv^5Y}juTTiC%au3`_M}S+n)_?R|`4Z-kxrpXqtLu67uoQq$kUjl*OGNe(r3-VpF_QH1Jd*b+*l{DuZI0w=NzZmzfqcG zTzHrqnR!WQbW9VP(TWSWh<0>f+8py4a}Z{$oTpa0d=FCQBBc2qES%zhaM`?djIZiz zzyv*sN&7|r|Lc@SG>?4}UG%fsf|>RCjSD3Tq*o+8>7=pXm~Q*TFl*afuKx$btzRIV z@&Ea{Z-?G1E`~As;7Rv}Bz*=W^k@!39MK$vzIykM0SscOOB$`dKa3=m)jw07Iqnh` zaT!Zk#tLrwpPW+vU>+%?v4Bx!|5(iT*X{em1U(t|{k8l4khq!`R;9Csb!=cVM?I-- z+@#N)(r=_*-lFedp;miWT&jWni(PR&eDJT5d+0sJpZJ@~Cd4Y$t0(1K^=__k9y0Ul zO5|hZMjXvs>|<faoe^YiiPCtQ@h}OKC!;N)F(#q7x2JSm9|5v$A44!sfO|EN9`GUy5DH{K;bbJ-6 zQG-)Bjap>Zf2x!JPka7(o`0d|k2HND$Mav-M}V9E|7h_1n>>Fqnt!$aF?sei=Kfp% zKl1ahlV&{{(1<29;{q#vv)((VLX>_3*G4vyjF(i>$|F?nt z-^9Kq=j+&wi|p$q_BA;woiS;TtLviw15Kz;qq=pHJwBygP872@rIW%XEMoGoeaf^y z=ySE|-&5Kj^kpnmseh3=t^Tdk{t(w=`zpDHUh(VX24eN@OZw5ex|_mV7|wTJ$c%G1 zPhUU#9|Nv$a7_G?`!ALMFoLs}wKWmV58iQn`eWLp*ri{^|I+`qQvSm@qP1dv-~TrH zC*huS+U{1be?Kc^ExG?O-^lR;A%{NK;oCwkJrB{p1Jk9xu~y$23LM+ha3JK{R)F4J zW5i?;qBR5tmep%GEL@CX-(9q>Mdm-Fm$JeUaV0248Ol+CV>pfzIEhMBp&B(fh4hE4 zzZ)Jjwqcz5&yAIR@-L2PvJp*aMk_9$<-rHTMY0_o=)@J{kzHgo{++1w?=X4{{fJ|-G3o=-_NPZS|7@u+ zeFiuBy;Axf=cE3|3i-cT{_m9k+vN%Efz1Cu<=GLm_>XuRDaT&IA}(VI%b2cI|6>+W zAKQGrcY`z*@CWn%#U*j$|9@wbo>$8AB3Gr?Hfmn}4<8I`^zFCnqr2C<;J5pxg|GZo z2pi-kwy=YqO5Y;6@OO8GtK=Sf8|D9cQzvZE|~;mB*kQ+2NiU7cm& z>5A8ezbSidIPH9sD@Tnv91Sb<#PZRQ=REl+z{2X$khVQ#`#k38vztdlp>3b4d1dI| z$khk&;xL$_pFB^$xVddZ{@FHi%s5x-rJ;MBuY-Mivdp)p$GXgq>+m0qnHOiq$-cN- z9=dZX!ZVu>S_`cr9JX&Uj-Ui%m;NM-8avC3lQsQGNUj(cL#b_LICr=>{N<72P)@Hv z+v1UMzWLtJQg|e^;?b;<@Yh#M!ZF)U;IXXI@Tm*037>3#b+}km8XiAh7Cuv07TTUQ zUaU{{iR$w3xl^wVpWS^;=%7E@{b2aQ@rrQLv6X0UQs3dJV-JSNXRL4Yq0qngkT%vs zp20(5sOTZ~&qM5=huA+4g(}BXqXwsN8nviHJ*Mq9Q(*lA%oSVzpu)Gh=b7&w<^OOr zj5b;S;F$FfkiZ0zC#`>Azp1i^LQnI9p+P#0XhJi3TOSOujt4_4eULsd^I*6@zlf0w z4{Dn{Xzb*{Foc$`>v!9#2<^6YU~;n}bdp_2(r4opVJ=w_x`ks%cU6Ri!`cq9ijeBC z{zIPi2a2peaM=0-71kdpw*Ekg^#{n2>WbfK-}gH{jxo$cX-%{J<<}YQk=a7yZ}LBTT7R#n|G|)wR%U*ma^(`eH}3wiNFUsB|Guru z^d*d(*7vyT{+HeV`h#IvT;j;SutKgP@_kw(*Ae~0Y<9(Ut?|(l-bA`lxq$ga*GX=P z+dC(yT7PyIE*X5P(BPj6xLsNl&``=A=kcn z$VUM(_K`MiG@qtWc)RatC?XF>`kx&QeI?fatuyv|`uZ5`5P$hNfB9i@#F%W-bwq0? zrGJfdJef9G%4|D9p~V|bPQzw)9`>KtV_?mEiJ3LJ~VIDu(xh#75(S>p+F zm^bE-!p;8=#M%G)6~>WhW&d{>f6z{u)LxmY7pE<7QaY8WLNz8`Q_{89(C5^xv##|N z{WPLA4b#=`vC_S2Yt)MC(H^NI>k<7suz_qu%&`O79?`u1CgElb7ioX&YG)kQ&M0vI zGwgrd{Qp;lvbos1Em2RCt&VwN|H=j7i)cssLs?&Xn!a-$C1DUlAHXCmiZSU(|;J-7c%SN-F-b@bV|Dm(LcbVe}By#_WnPpo{i2) zU(kmht?ivc&$_xsd_UqCLu9*0|NctS2j!ROe?K$y6h`XgA93*pV}0m%&3)~Bla{P? z(akq$jgJyW^ydtVOW&IpE;()ym$8IptY8(<+N;xz?wy>~PRsObt;%DI>R^m{Hls)! z^9)Y7cXFcIy_1u|YtmT91~##UZS(MV$X%q3v0d@ct*i9)PwZ!$sIPP7C+FM~m-TJ= zzgGKQ-?EbjFvzAIN?O+~Wt}o|q@R7wrXJg6U++b8`PtW7 z?CTEpbQk;jf_3HC@H4IK^NW#v?)&uIQb^Mm(A=dyM7?uGZR`fJ5lv`DTQoMMz1T{> zfQx8HM}+5}=w!pwyU>joCTm;+`ss6(&fVmi>0?+ZWy_yn(;vIu2hmf_HWxpG6fU8+ zOaBKJ>4WWT^VGp`nZAUP9rpVL_Iop1om>{Tf>o^Hh2v}M!Y%q?HpuhO`Ub-Ouu0#- z4tCM!8|nAW43LAx-c7~t=Krhq*+bT{dH}h|Lp};HeOdm;?5aERascgY*;(;lIhpcJ$EibcI7S}Fa7tZuQCl;n?xGJQwX?|+ww=Vy`JUc=rEpq%qUB}Tp_*QUQ#g%U z)S(^?Xp4O8jjc7(o6w9_T);(akMiFqqrUPEGMX3MNp_(dG4vyv_Yfz?ki-nuhm<2^ z)W3eV)HhlozZbD*e#K7z;K6Vy+UH*1QM7(QIUC_tzW&d;o_FVj@9P7-@&D4x_M7vJ zqBg)1eHjZ4$|a;0Jxi=aX-RjLyz*AQFi{vA*u)lgu#2nML)LeE3)uegOG58yeJk`l z#Oln`CVS%iddNZyugmL**8GTL;F#x+BHIq5uT%YtfqwNAhGwq!OApgWP;8$U);~vt zOHhjRN3_4<>i?uVQoq3*IWL^j*Pg}#M!VVn7)JsVh~~vj#?=4LTjhSDK7eTb*hTte zg}!!amm}%}h;08kw7r;p%>GH!7cgJ%Jcw-n3i}?zahyPJqwx#dPSOXZ9rXcJ(yI{t zQ)d`)X-8}AX6E15+2^$TzbMW%#arz|A0ew9Q-f1Djat;99u1hb&y0O%9e4Bp2XSqL znCom}|LwW|Mfbnt{>h0o_fJj=Pmzf&WBbx+LNi)1nPZ>)1K|QaGyk#NymWdyet-UB zhqxZ^sgvwN?=kBckS)IZSe5%~a$mdzwX*mGC;2z=W7d+&WrIEm>++D}a)Nj6HzboGkVVAs$J#4?${Mr9Z{z{!m=0^HI z@;_0RMPs*79%%_hAy0fh(l0Fvg=E{8J+GmnP((kBVjMvUN-?*{Z;@Z(44=el{)h`p z{1%bAe0`38qR6*{F^po$zLVPLNlYMGlWPFc-2Og{H2HQB#}LY#qZ}1DhT}Malc+@B zmi7IO%@33s|3+qA{4Qh6#;!*$9ts8asX{esa0;hUi#kmA8~?_v_5A1j_k6zIxH{4e z#@TD{^!*$EUNZi@X#6{F{Cmb&HYVv)#`zQUdg(Nv5i!qtvdBJY5}wPIcKM;uOpn&@ zUuZJ6j?DW1`Nqz*A$s=Ab3X1pRPz6;wq3o@8l^=auL)XW@mFL6K8o`k9{>=LS zHOBwz?04FJ_8lVQ!o%cVu9N#(@kiu|8aw)A^%Y?X5gtADi>^Xa|o)=?5)Kp(7iZ>RV* z(~B@t?cT)2F;t%&4vQkr-;%E&9Pvz7x{fn$iqanY0fw^Y8yWC)A3sLp>VMh$b|n6&J8Qs!jhTK7!vr z6fO#1c`2VBvICvyLe!_+O~%lVI5P7O)IDSLXiQ=jN%{;H_SB(>#>k_8#b@kq^-J`R zyTNv4MpD_atPHuVEV;xN%`qul!Xh&L31i+DE(=fd;hVuM=GfHpr`g}e0Mg__k^29F zvJ2yE^FQ$avrmtDZb>@Ji2Tr2$ll89{(r0V8|&|6>i4Ukht-_cty#yn9t_^Vvp;TNM%29zA+8@V+TYQ7jx&`N-4WaGLF9|2aokS(7(ATB@ z*A5+6`fdGBkJB^j^vG+~_Nl=soJRV=TSF~bhk8t3mj5xkDu1d^=da42NO#Jg7(Fik zpZu+VUXA)+I4K&fE7OLn3+*Vq8rjTn7kQMtCHJP_{3u<(fRkn$%^X4dzP>TmhCCAfF{tba+~A+H#F zogq`WgzZ;{ut;9U5|+{SL;eN7(vGLEVh!uq{^o(O5$U78M>6`i?-sd(U0lT;vcBt^ zLN4-UiQ*qaT)vLo7SIuzjXkO@2`LRJK2*v z^7~a|JGRfQ%Kyv0fgkfh7mnJbX`~SStEO+q`e_)zAcjiWwiw0;lCH7Xaiu6jIVx}r z$1%OhzaO(~;kj0R@$BXl+c=Gdr2hj{@jtKOe@-UsH*r;(n50jw^Pg|b3n!#=5|yYz z?-=`g=3uC%4|cM@+xZ#LPa#@6e;9ELImc;n34IQ=WF00q`5KT7NYZEZS;d!$({r zA|HaMi>=}4Sv<9QOXw=F-r(`ujknwqK3{!%=&ZawJW+pJ_+0ZJhtF2s7CN?W3y&`! zGR}1Ay3y7T+1#aDMEM!@6^9;9T{ehQF-*)9}gTw}p$be-R$Tr)K_z z4De4`o3=k%{>S03AY}vMLJ^#lq|55l%(;tPWlyg@;X8wx! z_0L-`kc`HUpHy~zL3y~oUw!;nIpNCpf0}*OwmoG1S7|7Fwm-=y;6wP0eg|bwluq+u z?Tm8UCbqKADGQ?c3%T~sw14i+33>Dz^B3~z(V7&qD4-YOX8zYUjeONRoG(Xw5e}mm zM^J)N^szDeH;fOgvo$b8&#c$GY39h|5BKams4oTnTWIELPn=AYp>eXv#ihZFRZ7*Q7vBU-y`2$kZhP>mX#!fDi^ z4$-q}MpRzb3rAx{Eoh`Sp>I+Bzv3K-{`EVwC+#AAqXqg+^32b)Pcz!gy*~eP`ICMD z7txNJ|KGWw{g3%h`3hG}(#+Ik|W1&LYNMXKPU!k~Y{=gWSMDMEcXflOZsr%a1uefJUitr*b|9xrH zAEca!lYLj!|H#bGQQr?0s~1bOk;&-ao0lE4gk`MYh3&sp;pq)6B=cCd@>9~%oJqdvSn^2)vZBmU0! zip+Y#FaJ&U+)3v~uDCobGY2l%oR2kXirt zHT(4|>fe9%KsYY$1Wuw7XUlxQ6~14xTDS(M5ZMOXU)KMnTsgf&0_*GXcKP%naDgHkfuVwdlT+1Hlx|Tgi4vk&Qj;F3=5BFco9zm63 z8qtJiwBiCTq8-zo-Au{z*|X$a#q-(oWJ)+qE-XEtJt~bcj5lA)PGADb3)iwIYoE`a zDtbPt1%E>RNV6{3WDcr+r3F zHaSP*wd~9Exsq$yv&i`84f!VXu4Sil98+*DdmcR<&u1^$Zy76CMQ`f)?AXln*=zK{ zt>?1`uuk8=hIgZIi9`aFuLKNXJrccQ4(wePt9&%oqsfO!q zyr{2vRM|har2HWhYsw!oDIEETPc`xX{{Kn4`}nTwdh!3SeYOb$rjn4LAfl3@l9Db3 zok~hcNy#@kN=3;rGAfQyjwGFY1{*M7z<>b*1`HT%Y-1a{W>;~Nlbl#ZMn=iWH7QB? zPD)bBoqqTCy!dv$7r*cA_WS+uxIJH2@7MeNe!X9p&)3DfzBh4#|`SB|DXzgt!8J*$zkLTY+`~T8~Dx6Ua99cCzEV^)%|0+Eiu8z2+clJLi(~5bXgxsEo8IKN;)` z5bgczn5mzWp`XK?$t?>x$VDEyj34sJXfIF!S%@O^Xm|S7>%P~2FXqE~N^s!6yhj)3_k?f-}24k^Dn_7Z#%;gKb+dSiB<}^fS2Jb&_ zjlo%a4fIz^^SwXzp>O|Rv-dCY{*~UJoZy;tOu}SL!BkAcbTrLY{-kMNKU*62o$JTm z&b>ohTz%5}i|2s&YRSI&k23wE!guFCqBD_Zh-W5dVKy4Z=tGUSwtzXB`#31fT;_S` zm`j&G``x*Z)&Lllu^j{VT|-bILjLT$7Ii6ru>lC_x#bv47JcW$T18Mn+@*R&LSQzx}MTshmYR zfavV~XpdgBKRwa`G$PUg#HZMUhegU}qzh<&$=Tav11iPe_uoePwCD_wNEgs5-nJ-i z=13Q?OFdL0Tz&K`XhWm}h{pbhn9C2VJ8+o!2&$MP9YA!(Xru$^~n7=#0 zd=g#grZ+hinO7!+)8rYP#d-81K3Dskp*_l^Kd_E*$s6zL)i*Xo_G)zD`CZXpj{om`x^v(gA%-%iB86@OXOv7~4p0XE&%+9_j%p_;w{r!J) zn4_~N`}Y6s7!>BRcZ`-dC#VC)%d2GR8RI`=sC~w%p3%PZ|0Ww}Pc_aaThYecPIfG?K47Nt?|I{2$%W?M$~$?6XNMx|J!6e3UlNa=}5YmTaJqxrKOVe@-e}`c@yD|LFZ{+Pr_3cf|+)U(|1l&VQUO&N-Nio?j^M+QWIw8Ij%izx{Rk z|JVJ$g1vhLeH0muhq5BOGZ>@uhjW;7k%xS|fBtX*^ZVxy7cv(iYLnWJdB&mA579qg zCvz`G3Cgg?TD=nM|0Vi z`!?pj{eKUszt89&XnR``o!irX%=o|4xjw=i#u0Sl7>=Xoo}_Ss?7EqbpX^?rWFACc z2B&cbXAzBk&ynZRi-eP&zt{P6NM%S&i>#}6%}@--Nc>~@AI;vB>z&az|E~|x+N^Cx`!wx8s!wVE&uIU9wg37+efp8Z z+W$k^f8k1fi!$G2Uy5-$Dux@EV?T2hvLE%mo}qgv)!)$ujdjm)7>~5*{=zilplezN zzgwrbO>m9AZPdT5R{z&b(*K_v=^FI^$@;1K|74?Z6U32@Nr?X2JDGg{{~A-6r(zoV z{?DO4Xf0Cqa+E<~JIH8{?{xk%P@-JVBzGNmCM`J|`7kM@7ChfY^^J>-OdfGY=V9fJ|f|2f1i!qobc~{gZaMg>2Ohw~_7a9b~n> zu5)qJKk}!!#=2%}086aVDYga>@it{CMgAR5pDVt6v_E1!BJ>e52) zMUh|Yh)_(HpbWL=)gxpL>d}noY=`KK*Y=mpiA(!~3zRqRhtSvlrn93}n67X43x~OP z_qX4L>^T}jCwUCVBX|2fPmoUuqj~o@shdT2< z)!Fyv_xGa;)q_V_Up6Wnpg*lWGD`hED%2kuC4Y~i{~HyW4vz}W+z%d?7G>54G+Q5l zcIJ*^*Vv;!*nNg#I7Xu3m^e!8L1%7>u07}cede*~aG&-G;z$>laO1d*M;ay|9bJ!( z3X{n0x)EVAIR#VE6Gnz<xA0&SYM)Lq91=9?&MVRQl%izP+?>C!@0% zN(+^L>`PVtlYNtsz75&8|1?eB6h{tnk%xQ~pb$l98l(J=6aNJ9qjjwK(SB0?KQ8}I zRsMxJAWSV;mo9IT4WpHR_bL`g3Ca-tL#i^e7b@4xHO%#B#y(}eT-mKSCrt0VV}mO8 zHhzb27*YTC2-%5aIF3Gjq5KuCk2%495?xqY7oGo|)TjSD&3*=F(etWx@Z@>)qWc|d z;D7sW{!b=Tkcz<=irQJ`_Q{bLjWKwC|Ib+FXv}{Qzqf6bOPI!O*ySmwX;y{Y@ zLEUialPrDED6N{vzW=MumA>9KF+sg| zMEarPg!IFH<|;&I_H@Os2vddaZuKt$XDm#M%<@h9#YtfX^Gx&%l26Imn1i{I+mlyV z|7QPR)CNydm#H)7aesgRPX=@OO!r3L|9|eJv+t6(dgU=xpftxAFwgjxEHBjlY0IK> zard_wC)QY>CyoWkL>6+8i#+6`NgLT*sr@T4|DB?JAFh2z-}yhu#{b8(e>i|z{LlXf zjLrls5KkeBP>hBY|e|IKi#<^abH0*@Xt?Q{-vXGrzC@JHvh!|BL?5c)SdK`xMiq z{}|~%PWn%g{*&}=Tyq}1NO)P@j1;6|F#7a=QU-7lXH8cL0xP8_IVOv^qD4Kj(*gEI3ux0vib9BzLm~lneG}+lUkA^T# z{M`@S6~>dhYGPsMarIJtzpx|M_f5DxRHxjfZ#gj>$n&4F`j3RVglp_AoEX|~9u&%s zd_0sM`gka}Pis?OgP_9xsKiigu(M)A(Q(Ert+JwsnEU;lq>YRKcBj{+28&rJIWW~7E9=JF}# zhBJ+~nafaB=j*-W)Cr3WveuB9?*BF}pBy$(4^5_px zi3*(Jb{c1J7Uysty-4U%Uh$9Xr&HKdF&OQ2>fcK3zx=)r|E2v;)&G|#mc;e_>3xO^ zKN6!c24gV}O*!hhy!ZD16zTu6x3RaA9c}OR|FwAsPU)X$_v+5*|DM$U)i-G5AB`W! zizf{ekdB5N`KQ2`nmJl?7oB}SnRyEO{(m=?4i3>Cp!TClVV5#CmESb%Ib=LQ&OpgA zZBMUyLmNGleHQl5rMK4}M|*!NnM=h}hJ6$C|53qQiTy?T{{|n3)lomY z`%dF*@n@oaNK(imvnygDmt6W3{rsThkjI>l=&YWe`;EWPT6@G^h$0jt`Uh`)7ab&X zbSB)vIleJ-_Ku{`F+n+>p^lE~YW;}H$bDr}sEK5_-F3%u9V#y{%s z&#<4xIrN>sNnd!Lxfcn)Q#Mdq^4|ab+Kj)CXj4yUTd|+H>X3efeo%^VsTho*7>R_ zHhiji0OoXbl<9wPYa?6ho%_RWGJ4)f4pYdfn1<q3?6dS#Qkc(s*AdJwNjTM7sM(hhIrXx{6G0wQb4+nS%yx zdM=rVdgg=E^$TX2k76&t`+9-s4EpFl7^U;{Pq7c>_+S2iQ*mb~bX^gOQGzm5q6STK z%&*~K3jI7Ap|BY}ToX^MHs9vZSM>E>6%h=)&Su)=E1BaQ9pcB!* z0iu5dROoj^=XSq;K5x%3{okR|SXqyB@yCQcfhBL>sou1KBH}e_da< z_m=(trvE!D%sHG#FA}=tTcjWrO&P`qnbMhT=~SlG<83FE@gw?Qhv#SsNzBn2mnr1b$X~nB z{*bd5)+B^!?A^`~oKDWbOf318^m)^s`J(i2+0~RQiWGnS=Tod;O=_myTTaJhU@+pcQQ>oqks+ zn|+rx3U`HavLaL5bKIlZ{&(cNrT~Q~LNQ8EhDtO|xBq>n{qJPUT=8azH`AF7WCwfo z5&Pe9;Dr6}sAH~2gD{Qr?(WMwySna{nUx{zIU~Gy+v!_M&PMz4Jr}ZHF)rl3pScPZ zs6_3_J3}+~zV!v>YucC_?2E5IcV{@n++y$i!5VwXnUA1jhG(1R8HH&@=``<;zW1Lc zUR2_P|9|aB2%WAwhT}Maljy=JG-(T)5w$Tb_@MvK+=1%ef9U^fFYB7U2O5|g^*w7( zrH9kvIfJt}hlVbD|JU<>qVor5x_%aBV-Dsb+WR+; z{QrIa&pmgCZheXF{`T>I>#mR?&IQOs7IKh_JmjNY{S}=H(zErhP{`hQ{?8m~I7>Z1 z7IAz3{GVdx56=I2KFRqMNybIS|7Z0(Ci?~`EiwMomi3)~)2vNluEc(AP!$TqQGzm5 zq6YP7MjM*ung2)M|NTxH{~tE~r&DhyJ9>@(#Z{w?JAnUj{;l?Rn)Vmb`8S8ea~Ma^ ziH5P-Uv1nm=9YQd-?{o9%qP%cEZ(kdZkwU~ou&W5Ejs_Ei#&xr!k#A2pk(&DeV20W zJ9n;Sk<2^@SAk#`a?MIaV&hzg@!YTFeOZIt@OXBo*SK8mroQlC1is2ZE z(dd3MDU2bbGx5ff<1ij+n1FOl!emTAPhC=&N=`$mG5-haFUsg-l+8-)XRboDet)`f zGcXggFdK6)7fo}Of8&IM#*i&&Wo}DV_m5ZpQGG)B#{uDLXDk2Os(P{kjoP5BwnQJ& z+`GA)NCzIBO+Qb(?aE(+Hh2MZCR((;2NBKfwbs!6m#GKHEPi|D=xc3Q&k5bW7`EvP->JLPlo;m64UGK|Pw$ zhC`@5Yi^$$=4(K3`GoL_5fxZdaO7vAwI#&GH=e?t|aw?dk zwZ;2qsHfCX@lI)!B7aDmldkVV`!jR~b=KUE7!>->|5uOoo&SGY+MJh0XlL#~wKS~3 z0Yqmd)(w7lZu9-~|4)nW49?;l&Z8FzzxVFHa0V0^jsH`~?%mEt7&J7bG7rX3M020R z$&nb1F?j#~Ut^ie>%AMsF^|Xl=N4C%cvpO|{(XvZ#th?%ImQ*(Uuc|xQ=TQwbrX<| zNtlc&n2IK2kmg?F46?;IrEmSe@p3!**8iV2F2Mm|YR?*%95*gGX`Dhfc5*x94F9PA zRCAUqZy?M#&!7M{|d~N*pFzh zL;R$)c`yI934aLLcPih`bv(k{G*bFYpMzth|7qp_eAKV!&)k9PDbjzM^hYh~(!85^ z8^}i2?aECJo#I(yZan(`$1&zT{z+28{Wx=Z0sT*&GXj}UqUt>Tk9{JQIrKm1;#OOv z?BB0k-IEj=rYD9|+)ktK{E&IZ1=azaVLyxZ@x}#cou&+;I^Pk#!w7L&x6jJB1dBky0b$VOZN3Y$iDS^>Vif3+T+=GdG-lp-}-y|`_h^B zOj94$TmQ&huK%+ygZ_kh3aZl7Er`}0M17#C+@@hVW?&{}VK#d7dD_2~WPZSzn(W;} zj1S3q$iM<*A`3Z))_3NT_4<6#|DE%g^HG3k4S7qB{81!-q=`3O{vgYe%|ZJ&O1XA% zpZ2ga(m^Bo_fND}Jk7OH+ZgRRipKVZ>_sR>37RMM3(*`v-~KxK&c6M1bf53s|5uuyxK8=i|0@&c9$`xORWg?!@m#(75X|-H z(?87Px4?5AGxxx)w$OdaLx}nnhsh)8`+tzJesnHjC;Kt<{g-gEehT+CI)LNcPT(ZE za0;h!20f+8;Vjv8YjQY8cKfH}d9oJ?uNntp*GT{08~1J>B1)eWZhMYqb8X-L$1CMc=Fu2~v1rPa2EO6J zRNsJXmBwu;uAAWe0aWV`)nrI};cCgc8NR_}Y47_slBM~|N1;4gtbCy2n7nyX9zFi< z{M0!27>_h`+-N^6nT|=Aj3(v1d8YDdJl`@~`81|)LpwUqH@}vr{maq*(f8G5DBqdN zH`&NOMI2L6tDH_F>y@4CW$Jq72Ib>$RA*c_LjKamW@uY$+(X;kLAEzr|2HQVW(YeI zvoITbMk;^9ojJ(dw|+U@nqKC4s8W|iWxtY))-RW81EVv@_qBO{vZB)aD;N8fkE*Dg z+&Cg+xNZS5k%e3={kru5BSwZMWvV$(|BGxjR1-H+iXqEJNRVKWUSF&j@`y z&(Ey}_2`)G9nf~%v*G>yK+XKxu**NhbI3zDj3d}%PQ0XKWawlrA8P&^$C!_!s{V?w zAC<}Gzj1=wNp#^9{&D{8H2WExMZNm+dzT4UCyjrl55)d7d#U;VvZ4R5zF@rd1?Kkm zTT@UqdKCT0{|LYEf4Y7#d($!c-xKt|blfdy&8PoGJ33G;jv8?uKrQN0tS!L%|JUn_ z-+yj;9vT@WMOYf$qt!is<{8JD9~k3*XZg;)8aguUNHt#-zap#3g zim*}m@(k%eTbL*v=&GxxN`F*xi_R+8a&k!6d}?SYOt>^`>bf{=pEoLOTQEEnogW^y zCR`rYof;R`4!$Ym&m9}qVB`70VZ+(Mp&;i*d)LNU3qS|Ho(}EoN5bN~>qBfN+(zZ7wjMHvQ^YYrgufCi>p$J8epnI<=YKLH51k zs8iq8cdGBusNU^6kL8s5u!!!L%z5ZXiRHpAeaF8R69$A@X3J6IjpWKxgF@c!gmCnY zSa{A<*N>1pM7ZGt!iqs5T&a6D1FgdUD@F`Zc7Ko%e!|=|cR)BszKD}}8Qo|R??=UR znz@6yYxfmlJbU*;&Se<$-Eh`}7Y}JU6$0zaYea zi$CEi_P>&2$N~L~XE6-taTBVzPbBNfPe)Qc_YJaf-t{p}ZM=pq;(KIm>e$#CvhB2T zL{bODZHJCG55FPyZ)D4iA+f9atGhkJ-J~vQ{LAD7RiQcgNUa(Fck%}EYGKmJuaNhW zv&j4T&BYYLYRyJ)&Ec!oD<7n^EnilK4GcD%vre-WCBl?C6j5t@$pFZk)2#!n{urrZuDtD}y~52u|$)9dV+Bihzem&RICwY3S_ z?Jj3rq1w2<##+$^`#&3ntvlxonltu=rt3Q%@*h32EX}!*oz|G}s~{^6Cx@NfcO@r> z9qv&)!McKx$zd=1Zn9*k^KLxD7T4cJPRF*B$zl8WB=h9aKK|tJ9d5bU?U_G*Rfzw_ zJM3Uy>)V~hIjr-|lYRdb*p3_D$bLXs{0_fW*o+cXM(|6pl-;)axas5g(WF~qoz*wT zj$TQ=OkRwy3>nvqoM!tPZ?C3M@a}RkP*`IuVbgc85kHn5v z;GWxKM>ma$bv{2ac67uwvCfOHi5<VP7%%%GhI`KR4~B*w@H!U?KPKlJh~; z6~92bWVvsA7J~9`AJL+a6b5k5zF7Gm;=Hj}`Wp|&!m=Cvn`L)$SRS4V7xG`Z=7R7c z@-loFP3n^T=bjF0@Be;S_rQX16}O|-ux)rFB{ZqX*8CtPT+eMS`_Z@6;dR{U96uU` z8=>6k7fg)I^xjSSA9pc-0#ooPN>}y|Wv@IP9%n9pCMooVbNxK_quT8ZvS$6$p-CU4 zi5}(XJNhl2eL>`}Pm)PS_c-{fe`IDSh1T#)XnS*EXn#91biDH&g=S(bD|KRQQNqO7 z;^c|3?BNq*IU^^=mJFU4TRL=NY}wd}vD|SJW6MWRjI9_mF}8AoJeVB|tHw`^txn@7 z{`}Jyg`DVqN4higiAt z%&k|>lg7q6Usq7?ON+zoMO0O`~rcrFG-f zRAaf+P_OZfbUV@iqR$FbGu+ykv2<|flyB>F+Gd#ZPa9+|I5iwZGnx=@8xXSe1r`lW z3X6{n(Ep;pJ!xIk@d06JC;Nl)zrMwC=WVY@?H^VS?jQ1;*S)I#+Suw-1Hzg){q-&T z`=@UIux_sU>YC(GFtUHxkmK4B$@(+Np)kFF*sQO&#r0cVU*!6u%GJ>;wF|QcgkpWA zo#fG0?LtkG_ci}ZHqj@PW+jEouh!NTIwO3h{-WooEK3TbxK*X~x4y7{s5zSyex_X4 z=JgMCW3P+Vj~-xs>j3TLfY3C0U}zrGKTrwBTl9%qQ%A-7rR2(_a4GY$2a*C+U)=Ukb-iysPX9|MvU0RJoU#4F@3@WB zuH1`i`kXJ2-@_`r;JVk5?Aj}!TM@qx591qX^=^w!-Vj@S>V{anS2;L;L#*$+`}Wyg z1B`>@d+EIb%aDszIVmC2wJW=ngS?cmw)uwGx=Gi@))d|l%Rf9YY;f+!#*AxY>kp?o zcl4UrX7(*7u8(aZ3m5bYYh3#mZ0#Kwc6h(tD1~XBcr`2ui#MSaKfqT1sjnTnFq95X z3D2=VkE8e*7X9kk@C))+cnL4#6}$>v{rDSr6K~@k7%s$>=Z8H7^TXb@`PK)|4`pYb3HuH`6Us+tTJw`>-YhfhADS7e zQZqw!MyC9iDgR~KzxiyaoB3?0pZaWQU~XLStTOy;XrBJ8I_KHYLbmd2OM6zIE@FORym7)r`R024UHlZk#UJoz z{0)gW==b6qScJ9Mi39jo{0_gzA92NvAs(N;Fl5bM7#6wj;*5pHzY9Z7(ZaAKcVSqX zwJh!{}8fyyuqaAC@7WxnP!mz%1VJN@`=8f3I zuW;J4Ve{~3!xn5!eKr)Gdp2zAeKu?-cd!=^eKzbIsVtDY*-Iuo8}^KSHta>IYszLn z8}`k8*7}HNt&e!t*#FtEpS_B#c3sVwXTyPU&xTsEE?xSQ4KuzM8joj$rrrgi`E*7& zcs?VvoLCTAncKLxAIk_GCo&X@`5`NPepobRepozte#o9QKjciEAC{bdGAzxUZ~SAf zj(z!z`Tm3c{jgHFyrS=iRWK9^LkIfH7v%Vj;W&KlZ`|jDN6WVl&5L^hV8{1?xJ(pR4eWiLMywzBU)DXOuv z`kBx~K8L^J=sifHH9fwqP$B@k9I+FXBMyGocp0Vg4<;@dwo1 z_e`jNs+&*EST;;7&DE3$xU;k@o$3hZ$j8YZe=gxzK#EOa)+^XaXlSdTXYsh zLfFk-a@_d$WJ1_WmYy+3aZdZ(Yke$P!CuLIKmRJSTKJlJ`c>E0l6CC$)9F`dzB|9& zH$R?1KbuMSN%oz8z`vb;2f6)Y(gT0STlgFPj()d<_&|)p9k>^Zu@tMZ4qLDtKg5sF zjX&Tu^dRxp5Wf~T;3j+?>#z}x_z`}J9$b>9UBXxJT`a^>tj0QQL=|fBQ@n`Zq8mMU z3xC7k(eLA)9p&i2pYay{j%|g5!VR~D_${~t@k;yeDFhZ77%e8V*>jFtdvj9Xm|S+o z*?Z>_!}8w5u!3C4p2vL^|JCFg;q&R@*J2&k`*%yh^b37|ePeQydDg;=3w{3!eShb4 zu@`aQ#(z7xqtZEI_15OMnTsTMvzHvd(D%R4_qXo#jQP=X=0|(YH1o-j=g@m`OTT;H_4{C=0`Kkk7k-5C0p6sxVQ7~AY11og|>_&YnqZm2iY_^ zDKt+_3J1xS1xeP6CRs0<6dK4zVX8+ag__i)a3DF!deWp&F`fQ$W>VNsRtZx|ma*?6 z%f+)hUw;mJuopW}jGfqpt<&@eu?^d?35D2lcxW*%!(Ki?IT$u?`*hA%2YCq8ool zzuV<2+=S2JF+7eZum#&ug%|K!^q}7a?J+LFrMMi|;s)G;&*AfU7*F84cp9s*4jZu@ zyRaAKs6q!`#BcBiyoMh98Gpmyk*Ke55iZ3jT#XxW6K=sBh^sHM>goT~AB)Ls_MGF^ z-Ji7Xo?LduSnnMDUvGl^pCJF6SLMEn|7voL@cH!#VXf=ek?Yw_nTLDDeXl;tceF+G zalERdVJ%+WzRW- zI9wc-mpX5*VsKa)t_yi}H-uG9*N4@uWBr@r$-_b+IxqBmoBW0QuZ-NRpW)vzjW|oAGEYtS|?&Z34ex!uF@*kdB4Zd~#pY}-|T+MHD70L)+RC+8k1IK$HGmXGm5kC`8?Bo?1g#S#x?If z?^$hLweV#7gLJmsqdgpjIdm1qc+JcQ&A~q4x|TPS`rd2XVSdbg?@KYa$h#HJHgEiG z_mO9|%`^8{#2%$b$$S1~@7ZhEN4WP8*Bs&YfNQYXT+6l-^x9a|%0GIhbIdQ%D=*_O z>^5^Q+mCT;a!r&bey0B6DFh}Xr^PKoyx9DQeEw1)&@hjo?5cjrzedg#pb`N6D zXV`{5_IV!xgShR-RME9N)%-2~%DUTG6HHY;jW%5nB_sCs()HW$E*I7gMls4LX zHQp(lcdLIVQ5m;q^M21B-Yfsot7^o%;VSRpexLh$=jeO3Z_^@!cdwZ_JBl?#`^8Qy{t6tuo#r^&33%J)wLutC1Ir?^sDy&PG ze0^B-roPYf-uZ25b+dUl@pr55vW02C-8|*R?>_4?e%odS{gn_l*Qn3xH1;j4zm~NSBW9?sEJ$o2JMcsN2p|04^Mv*v+453OG)J9~6qb%m4$H>Sf2Ps@)1|GT*IP-? zmNz>&tYTg*udONj-?abJ&3VsE)c;M?|MhJ$67_%Ofq97`+W%REZP-3FG3=< zljJVz*>)dtRyFo8@9nidl`QLg_YAA@6V|L8v34cR8kThWZ?ak(HPfx1n`!+VS;tf-poDP1K&cnvbUwl@2-#5;r&<|{1h+XMf?W8#T)WgH~9y=h8`^18Vi3W z-@@PUcWgcP@le$J@sQ{n{Vi)rw}joy^<1+eHlMw}u3yhJ8)NzX z`-Kac6}|XnPz}UCf*bKMsD|Qq;XZsC58|J2Yoa-Lbj6=?uPb8RL%tE~y7-b<*N%%~ zs($MhFO2ovcz5iRQJ4qsi1plhN38qKt75uh@t*5O#d-#PDE8l&Z}5CK;SuI9;}+)I zFrRr5mSGiCG4aiK24BToe93d%A?)2y9makB_`S@ZLM!w4u?hc-wfGn8#xL+EypH%Z z=ha~m79)F}H5r-a-!d);OXpq?mQD8`9Q~8!mBus0^bg3xDySmkAN3snO0M;9&vpLu zxnBESU{ZKPul?8LCicSP{leyPL+D}qg{|yG+T?9{+WN6WLg5%X3T(k<6k#i_^$y#} z{6iOnHD@mf>yBR#)}FKQVI@{$6|xX1s27uqkb`V2#S%1^ zD5Ge>LA<{%y@RhBuUF^@@-tjJOJC6RnzeXJdgjx5?se`A9*pklKe2K z>kPp48k88mMy|yc>_+$f|B|?u?0Vo| z68Dk*xszB)K0J}$V$d5gx^=D^VA>PjTK8h-$Cn0p0f_H(;B*8T0`XA zgio}%=YH)&i8@P}-9Y!dmwXMMr3TR4f#`I*F7&JcHi?y{4lXum{aI^>b1n4Fa0UeG;aL0$bR2{B=&3? zlh{?aBC+evjfvgQ>`d(5#C-Fl(Dh(NV)uxJ#zdbC|6SO#QTW@R)@Pm+-ei6&vL9WZ z*mLF5#CZL%kac93`M-f-@wkEJJuWowVLd&T_6`fnPFmw$cv)C}`tq=1+{m!<@Z}-z z?B!uq=EY%k*JWW1_x#DuJD++{SVyimZ(BfaVBbh?%JhFeax?oD@ob%OQ7FQ;DHnz9 z*wK7ZD8|ks7lmEeT|Y3Cv^krB+gL)1kA^wsf6-*IM>7r%xBnllXRU=qJ38=JY0-A2F1zgB}D?Wp-V<9%+ zqi-;8H@3Zd#^?e4n&^ztI{lpJjL`=6=#0^(O#L77;3RcyruEUYtrzqR(HW{8)5JYg z9+N-H<VcfhE3w!LT*OUl+>`5+&(YWxFI#<+smkKH@&>@N>L^3+2&Ua6kUS9>@=26mG*q_%fcsgZLJn#1gE_ihP@k9I?ucF_z>SK(?bZo+z$Sj{fhR5+u{6*MXx!;G+Vk~B04ze*HTk$-8iVB=S zHxA+oW$9v^Wxj!Yn~aeY@JT#~FW{?q3OQJb-6%&Bj^Q^rgMOZA2u5KnCSnq1;D_pj z-zSM9*_NfRe{kE1p~B^o)5*UJUq)`c#sE+2k9h`}h04y_)cRTR5Dw}5^qKK&o1_lUzFmh z!J+#B@@Bk>cPw>X0`435ZzMMfUznC2HoJZcxs|<$`!@dD$sK3XL-D!vu(LNk>>_uw zmvG<1e=k`od|8@(7f}1 z(Sp{&{yUF$bRh2k?pfY_k#}F5dUwbkdUwbfd3RVsE@fYq=Iny>yTfvF1^Y_wdHh$A ztJ&9NIHSN>0&B^2?CZG~48J>UNVz*~#HI;%heB*--h!?Cim;7&J9eDEI}~Fl^DgY> zSAspvdr>;!?oft(%;l)ySBd@1Rj6LzyaF6xu0?%EycEk8`2JYVyaFrv=+}>)c%tj z**9fq|FyfD$t~<#xfk)@Ms8={QLp`P)BcmYI<^1D?VUP#L5TKH?PV`LXMc3B_Ma?g zui#$Ee?M8pUR`g0lk5BTmru37X*%8LOzZv0M)sx*W2{VTpve~YR_<;5+sO|03g51> zM0!?APqLJ~Y_{~CE4|6xzIDlD-(#xpLGEBLPWHWg>s@4Pk@PH)p04j8o7kIkq<5b5 zChJc69^4!FHUIaq?Fh$l=8S+HI+ zEXLNEw}+xr^3xgliQL3qIQe${?c4PUZnqxg_E0eU_OM~@?O`Li#x?m^i*@*+I_NjB zoGJciEKom{FEQTs-%9#0YW zDx~3ld=dYGEUd?0VHsR}7_P;~F&Ur5!*~>PP=IZb-7|jaLOLRRMshTz&=I0)fH(#k z7jpX?em{uLi5#WgUux|2pW44F@^NFg9~<}mo3YsWaoX9N)57)SIlN+R#&3<;wo|S= zk!GLcHDkk8V}v4X!*=X7hu)1##dU)*;bLv-MdED1=Uw{*zlS4t-YZz6U7x5b~XRs@c%pi zEv~7>i{kmau(jg6%JncyU-F|53RSJ32nFjclz<3&=Fn@uZh20 z{2ji_MV@E(gZl5z0oe11`TNj6ls;;G<(h$E-`9tR@^4)lD$He8J~=q-f96A>ithSe z*YEXAZ+W(To+&d*8}E0A_j>|Yi~BQZjMBmTH!}ZPTKp1wU3(k*H@#a2|L=)sg!Flw zd{Np@6W22Dc(>>3z!zNq8P|N$wcq02C`|m6@y{9KpK&*Z#a+ff$iWgU?YhbM_ok3b z@49?$wErn)|C4_rU={P~F|n{_p1D4(#X77XYz_b$uo0Wa)16~8wqWaEb4%EU?bvbF z`e*D!w3mPPV0#y_2YbDKbFA7GEh>aC~W9jmvWx7U)rDc4#a@*sOjnl&TXi#;gD4(!5C zRGy;`LlyQTT1!-lXf09nZ>+z`Yn$YmQS#A#`D3#@I8Yw>t~~V_`RGUd|BM@jxq;gQ zWQ)8sP##)^pW`w916`lU|6$jBQ(pXpxc-7`_bPHV` zqtN_^vG^|Ro~Qo69_&TwG-Gt^%NP{Og{_!s{-fTvZ1YXY>P~s)xV&;wo+0bl>(9wc zz48><#NN#PApaJ!mA$Q={>SwlZ5M^lNt18lY3aHJ-<6(!Mxt+XHzrE2)za=O+Ie<=KC#ak%OyM^oV9lylA0-tpKkNN*6|KE!Kz8f>jm{WW26vbk<#MT11F?xL&bpT~bCxgx>ZBr=zM8QIu7HZ&CY zW;wncYMBopI(lDj|1^dl$Kry_noF z%Nc^6VLvK8Qw7SI_o1x$8fTwalP>MMqwg#YZ<97JNV^w&m$z`ObgJU^-6$=k`yJ9O zCJq0@9DI{2rGKLElYEOyg!>cwTf(+T*Ae{xMYvBWYa8%8e*eLKlQ{p3hsFP0d=D!y z&@=pv-0mKaiQ`gn+>5Vpe+-Y~$GC&ri?~Z%|BU(gm}j~R&*Ee5^;!HA)?yp7Jl`hI z^8-AO{8xsBm&ui$DW0bMr)&Sn#mZK;GL|z_`6rjMFFSHW$W;!PAG;x}$WZ?0D*yai zUHeC@8Iu?umv(DMCx#oO!KY9#LHQr6{A1H_|Q_i4);<;((YVbQ42j^_O6%zAAld9cu4@#*%AqY(|LK{aY|0F@)1 ziI2xU<2O;pT#9mM1MfqL{pY)9jH1ibrjH#JiYJW2A`6jKZnP#0>8mU^5pk0O1``e z-^3HBMI$bfA78^w^5>_pM&8=hNtG!!KB;~IsumV+x z{^gQ|=uFwg$ZnQqSi-y%%lPFU9}$*IzZK+4{f<1W!fLF^H%B;bgtJsz{j1mhwaNcv1$*E5V1@EOs+~Jsb4LC@uI|)ltwVJ7RRbE)bWB}YBL8PPL!(T& z_08KxUmDtLqVsbS!VA*iMZAT6(&%zr8s!805P!xEzT+2hw>&uH z)A%#4mKVN=r|~0Wx=v0LXKP+9*gL62IKjHTneyKJJe`7A{ydbtE7o9XGPDcL=n903en|u}7 z#NLd9DcWY#opfIB80Q6!>lZ%BznZ<~u=9M)`S9B8b_bR zUV=Ro6??H`G<{RTC&JDN_d5HWqN3RI;9hYrE-^RXc0u?o^O7=i^jKDC{lO`7`<^?R z<6qhP?i~Lr_GpfOP0z>=Om9!$d*uuwk!gEDa2#dIpT4p(CwK8E3V zSh(!17lzk`JBws)SK!Nd3{M~j>rsf^a3)>+7)~sEFm{f-cCfJ_ZpFR$Z+IAA!6KBw zezy2={2IT-@DI5Lx8p(lJ4_|S{Zc%B7pCE#@gx>t2dMDlZ7_8cKaG2b(5GQ4W?(kH zf$!j1Y(f!EeCfeh3Aqnds6#W_aSYpy%eQqJ=O3CG+dAAlVzPO}k;42s5yn4)kKxn!9KM81?7%_%8-9!z@Fp(wT^t(|pNPBB zVP8i30(&wt?8TU8kH#E(HKIKmv+Uhqj`nagOrs~5VQ+^$8?`vV9Gz)b&97>Vy&$8F z^T*m7GR_{6RC`5I-aS*VlwP7wFEHMol5~243G@QO?sQF`o`8E?m@L;UA{U>#C}h)N zzEX6Y9VmVe|CGxOJ_|=|$4VmxywdA^ybSY#3`v!8Odu}2N**BA0_-`eP*te0} zd+933V)mWnF8;g8681ghUe`zOP{zKGEazWARHu;H`%-dQraFLJ&c1?N$v=-=#lD(c z4L_Si(R*YlOm@Q)7%&Gh}p z+%s3?J9|*ww!bJfR0&gEW~_(<=ghIUssH%b_G30KS5+VmUToCu;B$ynr{A-DG8T5wq@gd<6=y9c8G+-_#>iobds88gVSg zYj^`A#W4YQ;eI@B0DmcnOKz`r`@VoWWHZ zb(6fg^ODT_B$@X~3M;23g}fTNg5z`sv+V=Sq%+VqZ&+a6-L!#W{UJI9=FM~Mf7VWK zvWBAYEL{To_UQwy0~r{KW?NHHV{HX?Xy1#At@}A?ZAGUw7A4kNq#3K*Q(ewpdTyX~ zumi(>`>3l>fl3^hs~<3RpmQzwmT5t4vC)#+u>DAs=f;+y9UCD8Pm@ z+W#5ae{Fc-$@k9x$#`%5i*|h5VeS9C0q^!lN}PW%PWxZu-!$6t68GHW+PxzOgi?Kh zGJS!4Wcdu|A50k#DlO>hJ7*_F``@MgU!eUTZm+=q!`HjO$5q|;zCZ8c(>SqDF_cg@ z*R3BogcJj^G2kXPae#mWPH?~wlG4NhN*pkc5GQdO8fiwO_vrn8&x~gD97!W-q0n1<>cIRKc9O)(s%vWUVFX%Ywfl7dhI5ggJcK?VABUNTJ3_l$#ooimm{UE(+$waG@!bMsU>|cd2WZzw{~fe* z{QEd=MSAex?B+gD#eD(UixX;29kq zzB_k;wf+ly^Wp+){TI{>bx@0Y1F|0c_-k8pfqA?O3aq#w59eCXU*K7IL0;T~yDqTj z>w^05(+izjE~o>#H(pQ|GJNEMhG68`3mQg-j$U9*UVsaV5pU+~N6G@n`H$p;?Ds#C>%)(f^WI0wg}h@QG5-HZ1!q5E{QnViEg#9f zgYn-kt~=yBWpEbC@8J3OFwZ|^)jpno$eQQ>Kc0V_YsOCtvNcIN;5`QKF}{)JX8xNo zzrTs`KeCzz>|^i*@%6$Far;M(ze`!&PMq&>%x45Me}vmzxZMh+xGi$-5BMMA z96u$N`7X!5gYV<^ab$os<;yqooP$<|Z<9Q8!^jZFVHn-W{3|Rl{roS?6*%Bd+=K8r zxD__T)$n&$-lQ`do6Qs#|jhWs* zqhAhh;dYYaQ;;MtN0DpLi&s=KbAj7g=4Bi=ApZvGM}8LBhr9_HMSc!>is$_|DTik8 zKn6Ph0|ekz_;=vbjhTGHG4pcD;*-$N^%LSc8sK^wL=JHchLOV@k02vlgHdFR<2W+G z`J;rJgqtYi4)Qw)FINuf^Q8GKZv4bqrXS|u3*_r*@WL6mpE`O4Zl`XZf?Bs3--eWn z{Y}oFgC%$ej-yxU3C71z4i!+@S#JGld`!jhDo^GhWjIe6W>QxxDCd=sdktlB755sb z06)A0)1P6F8T<~4KTV%!BhNR8K^zj0gcOV(jqv@?kU~3nJ~B==zx6Y84bR6r1}L8a zz7K@r(J1{M)+}&$LN?FLLehR3ax)nJuVwuILB9&#?^oe_{k;F%FZVmFY0O>x{sDcO zGAQT#_2jQIzrw0Izr3bm)>dliBh{T6RQ=gOHDGTnyZHVAvSlyh|0v^s@NOJj-v3gz zyhlHF8#|4rzJEa2FDTls%*{SR{?j)Zys zQ$P0iF!2+|PlDs*5z6Oz$_MEv^I1;FWYc0+lh4YD`m9{H&&mc@*k=_~`K&_D=Y@P$ zex1)MroUg}=RGLSyE7Q`pWt~6rL**d&hosT=6QXL=QUJK@E$w;g$DWwweRsfhi3e? zoZ`Lr^h!11-@AqP;5V*R>ovR=4}qP_-#u#E#e4HR`0mjf^KELrgT(W_6M9$jeL>uM zR;^SJ2A-!sa+L2AK1+WDhKX;4_=X-^sW9=y*74n=^?U~jqQsLT>?q+9#Fd2XKk$2_ zn|NMRW)|`T_z=n}S&Qd`nk_%&ow`T)X4U6ErrDQUt(Sl9;@cZ8>wn_^{Ux5GwXf;F zkPZLWYr238-FQ})oaeDp*=h|m{+@3g@*NvKeka<<3Dx>$M;ye7JG7!R85#Z-tjA3w^{}3SF;|sn)SHV z>}lhBdaL-J9^cg~0%yi*Irwf^*3Q-3_&%ZBEvuEYakX4qS2IRft%gIZ>Eo}aueO@L z?rOC>&-eU}@;yJ+Ya5|r&uW$5v6^pWt!A$T^9cvY<9_B7`2JocbVpY+x6K$Yw3=t# zYW3~?NWGn_*^{+eJ>V-_EpHXy(63vq)~wZPcd@qw>36SI8^m#s;V0U}Z!y5=Y@YQl zzt0f%ty>}NX` z0~k_#g73W^3n|H1C&4&lbR%OS@{##riuu@P%kfxB&WV(=%TjVRrIh1ODc7A+Ua-u{ zPp4FHO}SMVPVxP>a?4$nQt{RSl}t2SrRP&B3xyf~ms=I@wOEy>QmVp#^<0Ymf8|!~ zW93%e^W|23G({Qk{kIR9|ES`daevAB|2xcoT*3SY--q)a`IvlsoU?7q$1cu)yry>M zS^j^2AA$4zJTya9CpFM?QiHBvX(<0Bzuj8{SU9R|KWA^KfKO3_*M2l zzRLc`S5-d!sw$vz_El9uHTD{){oqyh%)H9G@2@frepQWgudC@W^FKS7|CvMo6SK^J zoMHasjC^O$sBPkm{KwAl{^uFBe|Uzz?iuzYp5Zs(&M*ghMm_im;?zL^KD5Kr>XFn*FR6j?3EQSP0owY(=xCoJ(W zoeS>2FXH~n$LVKq3wxAH{(JBYaGS_<@zUbYkhx2&d2e7{dMo|%1M~;YImm+=V7e)j zpMuTw!KZmX97FyZs=lyZmACV)?z=w4cOL#qW!Lh}&-)n*e4X`K_O~^C`!Y2?dbwKi zu2l1rYgPBmI@SJw-@#>)s9t|5Yk3MX^DmKC!e;mz;=cv?If&?5McDun>$py5>0^D8 zIqSEOG5Sm+@DjXp3C|PuM*ar=nl8;3;6eBSc%zKB!T!#YpEXR}xZP#8Wv$TXk(pT$ z=09U{Trtdk(irofF}XGlGyfS=?yi{fR>heAjIn1U#?v7a)gmKS1bT^o~U zRh8x48)N)aZMBh}KU{4EcE;46Kg|4Jz12znx*iUx+h1+!7bd016rHPf$a z^2Ax2ZLk_vj;KByXU}$xRqKkYW@C+2osRR}&Xg*XaaFkEDo2(@<0^eWu9B^B6<0M_ z?n5KI8+TlVAB?B~xBO!x?46G*cU4?D>Em)Cv)xhpm~lC_#Fdr7ONqPU8og#jsSo4q zNf=QA8UG-z*cHbWMMj>FY6LkaU8c5BJ`)aRomd;ocBe zH*`&psPjZz9S=vK{D9dUV{kE?Z0y_H$lrmQV_mZP&x&UGE) zb0?XuWSer{=~M0%c~)MN%gXn(sQ`b4Rc$KTm`A@a&niZiywj%A7Xm8V*QW9V9X#vX zn5St|)xi$tf7=-UwyAb+8}q+ys(+|W4F}rPn9-)DU2UxWw5erFn_3SB4o3Vvxf0EGToubx@;>-{$lI<6i??_iG40B`D`2WfNdJRqfMD>^R28E`IZCe zJnQHA*Dlw3ziy`-a@YEmm*J;x)Tsg_)5@8ADkjs7d+{p2N>=(=`!eo+m9O*j{+plm zzjob&pX%8Ys@dUZuES5iw^Q}VhKWx0znsv$oNw-QSuMD=?)9^u+%GTj_%i%zL;BYh zSb>N9YH!N7I(GWeKi}#iz3yly^M5WYNd9{F^|Jph-|Bxp+Y0UR^ZUns<~;lwdLiEm zpUAg{kt5_QaIfBBot^@#3)%gAKt0rZFd0yb_0XqjS7|!e zYT4SZ=AGT*^FNu5LF#a>UG-ZFt-4)>RxL93!*=ByXnSGKF2eq_5Gu3RhYXg34; zlPWpV&0c|axw%G*-fvgovxQax>E&~c=DpLd5g0y@YlS;dBSUW#_lbwWEk}U@P~o9_GI@t$cSY^IzR6 z+)`u}xw9;Hs8z-ID{-}|bbpamwx`G{M^-eos`7-FzI`k6Up>s*wyO5oR@LqAQ9ZI@ zUn}qbwyFvD=Dn@Vf3>py(<;xgR(WSzk~0J(Own;nu&ZmHCfW4eV)U|8uK`kl}|)tl@R7^nZ%1$kA3sCyJ~X>BT?j zVg5VQN|L`6xPj$%p^QTx_^{8=$ zM@@4=pCKe}zW@;%VRLVgI*Bo!1mwU8K_;bXz^w zc+lT%^{(@%uQRCromp1su!s4LF7`vXt)au2R(QXM_n$o)dCbG}&uvBL+*S-3-{n!_ zS)Y<0dX!2BdH>nN{m;v@s8IV}G!uL6fV+5c2*b*y(P7aEuYT$~bj-{Cr(S+b&ddGJ%d@hV{;!w4rCvp*y^3Ow&3f7Y>t*cYRWj*SihPaw z$@3L`D);+Tc1?*@TIJ&#A|+P!T!*ScK2@fjRzsan^|;pseX1>USS@ot`iLb~)2v#bKY^Tzf^h z58^(s@^yuf{nX>g&OQy}9$wSO^WI0F#i!U2rxnfbQ)J8Q>{;|Nk4+u!^eJ(~Cm-j% zvwe&qd}`gtHS?ZN0o?pNzuS=Avp#j5^09X2Q^$mlYsSaD)2AT%^(1|Y&_<$Z+Q)lA z_S6TJpbnCdiuN)->Q(=2P$3xbP`)r!*30@T<$Em1^R>w8nC?|4be->2H}r5V2R%hr zudB%FOBPwJFZ7bPUU{3iztA?uPU+vSwY6QLDe1!s_xk$tNuWd)o{4TYCIHF z6Ev^wRST3p&;8{NbAJgkb_l9+XRoTDdR;Gd)vLT05?21ss8z843KhaZp3#-u*NV3W zRdNmOoBLMQkt<~H?>l;hvOh>zu9ZKs3*^V>lh+{s8p-rt=BJT4-)7A`ley!2*x-iS6nquF1`k6i^DJ}3b;VW8 zjc#H6_BzHsKjz&+@!P=HGS9x1an#KO{X6E7Z(t0wjeQchfBSmw?YQ;e_Fu@WcQDU{ zzZm}i2j`!|-=&=UJba3Mb~hkD20y!z=LEcopT5==x(|Qt+nIyrcsF71!*2;X-Uly# zCSo1CJ;Lu(Apd|Y&E|J3nB(eqS;y|r<#!?+*0FmX*2}HPBIFHO{9Z*Cziolc&axgR zj%T3`{uzAmIJn?Z^53~8zONs%%s$?e9QPmTW8SXV8aPmF4IVDG zh7R>948v=Ccz?5p?>F=)3g_^9oN?TBl*1R{%kVXD>^UuGu$Kj@9%UhOI(wD-JohoA zYcKb$W9-}8InI6TG<|aJTOQt9fFj)8FE9rY?Nf8OPfhMVHF95RSm(6r=jgMB`c#`P zv1)esRPE|h)hee|iNA^rr&a!bAM-w$R%x(LB_H;w_*kFZ(|ybf_3_QkJ{7o1to)<2 z_fuz-JI(!ozNhO$pR$q8b=)uBKcg&UCT(Jr4F;*sPb%s8q!KTrta$j7iseVGXjasU zFi$gbxJScDe$yfUlN!SBAbtmsp*yZre~>x6*{IcfjD1mTxa&c7vk|XreGku!E7h@v z-$uxzp6<%B{41|yZ^@O+wf4#Tj??md=(JjqnX4jJ)~>a3FlXxgFllAK-yzo(DJy5o zTIIe+AB9EHd?;WZweUpJD$0mh?p-OXIJ8zJP|DnEStx0hQ;!vA*Yf;h-j~JB>irR` zCYZEp52mcTvZPgiC}K4r8$Fk>&U>kvL)~i0zmz#W=6#>PR9>X7DrL1H{f919plL00 zeQVW`Tuc9VE#K#79{6CldKicYgDI=mpR)Su)~eqfwLyn60^+vu1s2~^&J}hFlA-#x>Q;8PaWjN3E3Mjk?VZI%2^lX z_b*1Ryv|FN4+U#4QQ>UDDq7PkxBpW1zOfGi_tL#lt1O(b%J+_1714xMxj$-EbtbIp zeCEM}YkB^4sjh6T>JMF_1{T>G3DPknfZv6J(H;C*l^RxbUDeM22um>(-4a{+Wh9Rzp@H>}k_`ORs zvNd5vR(2|y9<^epE>)ayPU6{1m4wvvB^q6kurgP~`2CNB<;c2RPRPE7cRA)pssAAJ zf0xs4*!K~-Tm>6rR^i!E>OWz*Ul_HDcgCy|WN8`eJK@Vzp4F*}=w+(h&%Te~Wz;|W zK<-Fbwa>EtQFj@0LYMRX-^(3|#J{K91_fubY)E*gFHf-wE$14%3I zS=NoXPIC^$t=xw%Q}&)7xxoI;RUOye{&waaFQe`EXcQ8itjo|=VvI-Pq!Xb`qm<+D zJ27j7d<=z?RyhAM<~iBtMZ4|NwgK_2M@^0iEnU$BT^@F(OS&_0Fp3QoI`{*~I;u_ZP&NJ=}ZPrgX{uii+ z!c|x4SGeatc9n|Vo0;>tN;UX(<6e$i4u0O`d|7z2hOyUtxS2Wd%}Qav5*m(PrC%WH z$~G(dTlP!6uvP!ganm)wB0rn?4j12v;`!iNzgZtgwr6Zso98Nh5_?zvRqWGc9V~j4 zI@4FN2Vk@Mf>)^r_aF@5_7(i~pTCOhfb{om*2wdlHMDgz>wKFPd;co>{a5L0_=&D1 zPkXlNC)g7Qwkijixn_&9sy5L-*d(WC6W9M1xgZC7Zgdml|1I=?x2PJwg)6o&j@m;1 ze~XG;n|S|oi%Jh}QCV`6%JEb2!6sGuH>v8_CRI;tV*YEBYENw9`rpL&|F)>%*)3`& z9xwb4%FD-dc^JFricRufvx$52CVp>{HJ;8b3Se*Fxry=b7S{i^=y&+<&f3Bp=@tc{ zcjYGa`M1#iwCgX;I*pay8%aRcxFXQ=s9hFYdGn1jlYXKx1M zpA7Z|Ww1{uL;mM86nHE{?R)=T9sbX$v-5MT5q(bGp?dY4s8*2U-los#GSctgQ%V27 zn*M)1{r_tE|JB_8>*@bj)Bmrg|6fi2zncDkHU0l;`v2AR|Ep(j`apuKK`{vH{PZqS#I@%>lp zd&sBYYw!)|-*Yu{bsJcpzgoi_54kq*yJ;IVoZg_3QyY|5b~XKyt5x8+T7^&qZYbWe zf${a#$_!qutQ{NVSj9WFD>mp~h{KhCHS@C@l)G*NuNr99rG*KF+X=TdvZ3YoHQyP+Q9zQ4g5aM230?{fwi~|EO_o>{d<=jdw0tT*$?fO z3v#gMLf+Bca*$>Y`~-@j0KD)AuF2B%yLta*w>s{+LMhx{gqPq=SV2G6E7tHHeFZ zguPCeuEUN!rJa7a)d8o{Lgwg`E_kvsT}ONb+54| zyi)m(p+9*jd>;MNzvP`W^oJ5CJ@QM&&ghRW6^GdSf0TCspc;D()IP*J08qb$cL3Ns zVD|nuy~p<|cn_cjT37H60C>R%ZQzFhv_l7ULKpn$_q$iJ_kRt2--AC_KWT+vU={Db z!_YeV%Cl(=^ZvmIej@Lu+4IHw?+}Oo_xtZH{@rSZrmEd)goXnzt3GSD>hgCp-rr3h z=08;(+)bZmw<_lr+5diCb(5CO-zN;kP{Z zuETwC2V<6lzgO0(-CXdy^?A;X;&(siHsdG$*lxujdSo~Ad%HDqa9+cnce$tU*3h%N zHF#*Z1`h642-)xd57zK^t2h5$1?T=#J+628ji}ws|No~t4=?iDF}s;V+N}WX*}s1` zb4$DB+p}BVV{glIU^nkx@76t}e?N?ohwmbP3_pczsDxkQJ_*~&Un9r;Fa+c92D}2l zf?vbC@Sm`nG3Z~yRq%E0ueTt-NcbC(pMfvHUGNWZC+?3TzYh1pangDc`2+YVJOd|j zt3=*|-=8C`4DK(;-{5`*UWYFF6TR@?IQQ?Ii&M_GeU^Iw$JfE9;W5IW!|xZd-vr-+ z%zXBNf}@T-0FaH{1v#vl@o0Kn~>%Wfc9|G9h=UD#@bNw^^>q2(B zdH1D?v0f9`KeCVG{`a~5KjivH4stw%d-(mIYq*T-A0i>X{{S(FL!ylDKR^mbq1Mg2 z9<--=+DZemaXsx5+01bZvh?tI_HOX)xI6fU+{1i3F2pzF!hAc9{SP(BytQjsA6dir zXbtZOtx-`1-+X}L>EkNdv4$Hl?^l2WoRG~q7vw-LM9z~xh z_#OTlkloM@gD`uZc8v7k_c*c}c@Ft!%>DFZzY_T=2xE`I&DigR-xJr}$P36vkdMQ2 z@H6@pn_*=O0v!oC2;r@@5 z`*!2b@&Dxb_aN?nKHN6r)`uK~mAHKzeu@2R-2VdmZ?K<(zvK8XIlh4XdUK98|L1Te z{0;8cBe&!JIpl3{AKc9GXE?VNK8HPX&2N-t*8ABP=|pC;PttXS_WwNXpLUs-@f+qp zexrgtw0|f9Hx%DN`%h1)^r7FVY=ZWGjP}3cMdrV0|4@DQ=law8p9jyX{yo}1G@hXS zgZ-WQQ?&oBwErEn|6Q!pBilIkA7I}+`7!(EI}Y>Bk|XS^CqLcD9*%>!_j0}u*^mFw z4)%W%eh@jtargx714bZ1yNE*U{j-X5k4VrylBZ~2XK7!^X68d$pb?s&9vYw)>Y%!g zc?_sL#vTr2c@xih7teiUSrz?&v-AVf^b3&gQ}h#Fpq~(aQ+X%oH$cuD{RqgWZ{q^T z`U(26Y*e7%la=vv`XS^45|D&gC-(t}JUgW*a@dU?wBsRU*u{M=$bEq0{(WyMROE?Ly!*AeQ_+3Q)79PR=d*t)P@d@Uh{{kMz z?Ni7G?iUW+bKwcxo`ySdFXFfb>4P{-LN#tn+@IKzqmvvThcoaFZi~nU&bPvL%IsFS z6Z{N7_Kz&#xOCN+%AkDD z2dW^R%2l*~sNTu*A8N7JK|OIa5J%%0+W-5s{||ZoZylrmPx}WiVSUhs-46l6wnGQ@ zPUzxXH`wplaNK*8_7DBoLomR(K^VdwhT)y;|Aq+mD8x7yhXnQ{q{_xLS~W(WcRlYT zujhT__3X!Zk@3nIzU{J}HLvw*aI?4Rq4lbQ>dy75fl};cP`-D)DxhfHdby$an)R%= ztXJOp^~ztlUIlB`t8gppEf24kYs-4Z6YE(|T+aotURignmt)_0IgyEc+C8M|xDP<& z@OnjQ-?1ihANV=@cwblez%x3*8aIo*p3E6d$3v#gMLS9tLhk`p^P@!8Y;)OqV6W`iEmT+A9KKp7v zWDhQ~g5yfutD1hVYBv4XxPGtNAiq-*=667nyw`+mB*=bH(ifXm@i@GkCToo~4D`xy`e zvcYda^ZOZ)0&UQSm$SeL+2Dd4$b~$^%bB)m{Nx>TU=~0jR2b~D6?P02_Ss5YD{a?G z+qKemZO63(2DQN)A0w4n;+QQm%$8xg4YsWqTd{2`!B%41O0kvNHsiN!wjZ-x8_qfE zj8U(R+Q|Q{+N908N?Y`4UCj?=eJ0}?xWO2F~gn~iacF?M1Y zZ`({Lq%5A1`lz*pg3*biTW``~^! z01v=}@O5|yz5xf}JMaiR3Xj1*!Q=22 zF=jA;xlrRbW@DahniLlrJB-=dWz2pP+hUf9e=$F5%m$PCVq40Xev`LFze#r~*Z5z` z9W>^E$C!hr1kUH09R6#+(d)O4T4NTr8MDdc=eO-9-QV_`eEyrG$Cy>w#;k4ox7Lf6 zcN%O!-mV)K9Z_W{<6320r;Ho;H-eHdW@8?v?ExR=e1?4@!#V*iJiar<;^<6HYhTPB+<3 zH!I_2oNl(AZnm9nQN}Ge-C{f4Vms|r#!j4e+D1}r5@P)Q*w(o6rk!-`OT^!r&g4niQ+w4Nv zW*5S?8@0_YfNgdGY}=u2w({F<(l%S=Z8vM1t?0H}w9Qs;+fHq>mD)zehz*lacKRlJ zb}HMo{aR7p+i&3CoZ6njzirv3?VP<)+joe5;`W9Y3KAt z-FU62FD71Y%+QV7bmMm2h~p03c$03tSvTII8+YnPI|5sy8%vwSe zCfmG4wxT$vE4!eY0-9R#f-RcBbs2xY?Yv>-`qpa;>O%yI^iH z&UW4J)Xr-~<=e%y(>UAp|M&X)zu%?Lu~TA~cA5WO+O1o4>qgyrtG=K;+H*jA-p;E! ztS{<|&+0bacDrtS`a0c)+Y^uLcHRDE-Tn>T{uA9kk%W})(3kY3Jzv+Iy7P;=^RA!h z&U+8)&ZD|ZcipSI9?)Hn3&*|MYyRQO+Iv8IAJE>f>&v=ZcYjfLWA4*Ey64Nf=l;XOY z{m1oB`X}>$2%eM?n1}Va9w*B0>bvOn-Ge%$L*^g8r$bNa&<}O!$NHY0&=Y&~1o}OJ zwcGj3r~ml z=8S_w)Ps8JdwS}y zo_bDC>*)vdjGi$C_skwW^CdmQ@q>Ei5k2#qp4IpD{ezDh?6@wyT&>+)~T^Bjdg3RM`J;a^=hn7WBnQnX>34agBlys zSXg7j8jETyrm?uj5*kZtY*gc!8qdCO5@cUuhDp|#_Kd*uki+rH)^~|ns95PSQ90hDAh!nCMq;hsfj90utr#;iCRt6X`)^e4Vq}w zM3W|(HPNDpR!w*`;njps6K$ICYobFFoto& zgvA>Q6C;|4Xd)`!M3{(cA|Y;4G^<|}>ldZ^MXP?{(J#97i$49LUz2W47HhIZlckz0 z(`30OD>PZD$tq1&YqCa@wVJHcWW6REG})-hW=*zevQ?8FO?oxy(_}!C?V9Y+WTz&( zG}*1m9!&-{*{jJuO_IcrCc~PHXfmqFm?q}Fc{LT#RJ*1+G}Wo8E=_f7sz*~nP4#N3PgDJx3TbLUQ-his(R8V%%QRiC=}Jvk zX}V6+4VrG$bc?1vn)Yehujzo;|3BTS=`KxoYdR>l0ZsR7I;80VO%G~%NYi0WM>HMP zbX+fG=_Q9=a_Xggy;Pu=3iVQvUUKWDQoU5Bm&)~0m0oJnOD%dSs+ST^i9rq{90o`j z6`IM>OrB=)HB+FOBFz+QrbIKPnkmyvxn}TCshKLxRBNV2Gqswj(@ec)8Z^_WnI_FN zYob$Hrb{#3n(5I@P&2)n>C;TVWtK)e(p0DEtI$o&bMLO=*@nRis)bS?0lBrj+^omojbI$5ui z4LaGVlTA9=tdlJ|>D5V}PPXZ!Unc`P*{+jaI@zt0JvtfG$zGl8)5(6F4C&;6P7dni zkWPkmGNO}Fo$|lt(5azQeq(eSBWR3aol0T6UdsP#BgTjrBZ2XHqh2rL-yD>^HmcXl z`8Nj@{2N)xzmZk1MU8>2+8DLQsC#Y57})BK(ZIhgO+1bKn}a63-fU#6F+9fb>UE!y zZOaCJ1IE^Fj1K;d?7V2ao@Lu#cOY!Lv32op&JUo=8y>yk)f+y&(WW>2dLzL98|}vE zF-FiBy~YR`V*q2;WsDqSo&1<8?W8QYq#;*ZGz*g$Ha|c+>9`;v)LPrVHX+xZM86L zpUcT}wCvM_GBzTyb;Pg_O^Rr1Vl~3}ShizBi(Zp-uL)%rgIx%A5%lSee*T$1F}V-D zF*utyn~lYGI8$V@B%{(G|0WI$6AQvbKe%kSb24bs!B}>;BOf$z4w|fC*rAt|;%M0s zWe08lCc|7HL&g(^eQ4qyGTJN)C5TZDgeBFeu>4#PfN&Me2v zM|S40n{Y-DGP-QX7USQ9jTyffe-!vNN5+To8^bo6N!!IJwvE{`BT8&z*W{zx+CWyn>oczDh zZVZxf8lO&+jFU?hgAf>h6vn#==rjR2pUr&`!)FY?F^DJI#Dj5Bn4lcl0ZioC_BCh= zA~HuNDw86H?fIf%dtT10x$)W8m8lq+^DdK;X#~bFK8+v_#PVr7L0fN(Wq;<{#{=Sx zzGaJvYdMr@(Z;^)#Wc5NrO|xZPF`(Wj%IU=UksBh!UQn_A4U)|XZehs9`@xkww^gA zwVXFX#u&h`!xFIxdoh0QbJ)>0XW5nbILI|=<{Ix97wyYO1kE*x=5mkWx$>t5#+_$K zCW?G>0uKcy7{;=lycaB&eSyhSfyq;W$y0%e7UPe?gfw}g^%{c+3+xz7>;$AF2^kgd z{m#ZUo6_v4PG{=0Q>U|aI!C8-bvjR{i*(wp)5SVnrqi`L?a}Eroet=9r%nfTx=*J= zIz6b#eB!t;m4higmvgt6LTxy%l%A6~i$>m}KT#sW8IvR%!%Mv@FV&MfI|1 zSQeqTk_1X3P#%#YE2&aDc&gm?l4@KQo@EhO7TwEY;7?%&mV*xd$zjkq%w+~N=WGv4YkDyD$xRXYNJVi(+LLEj(D?%Mc zNH0PiMo2S49Y(0b2s%edKSG`(=pCURBcvH2%?M>5L5B$S8fgIX5Frl{$|izN5$YsD z8AYg*2z3&nP9o$lLY+jYlSl_pClSgpLY+h?#|Y&Zp-v*yNrdu@P%gBcxd`PMp*$m$ zXN2n@G6E5ZLJZJ3k^ssml7dmqMbSTs{!#RgqJI?qqv#(+|0w!L(Laj*QS^_Ze-!O&=pRG> z82ZQ1KZgD>^pBx`4EHL(dp`#?Ui{o-y={p=S&|W9S(} z&lq~f&@+afG4za~XAC`K=ov%L7jG<=?J!9w@ zL(dp`#?dp5o^kYyqh}mFGmf5d^o*lt z96jUc8As1JddAT+j-GMqIZi#tspmNL9H*Y+)N>sDgETyOQBl|-BRe5 zLbnvUrKr0U`lYD56gsBRkq*jS3O!TQTMAuM=sJpyqv$w_j-%)|ihiT$H;Qhf=rxKi zqv$eCp9|55TkO8!U5>nM30C9k98bCi6JlFw1{IZ8f9$>%8f93_vVHNv(zI`wuA!FkNT&13%U z9HPmohX&}?+qrHih7!G<=YVX;g?uOi!sQVzuMGmw&HqL5zuA@#df(0?(mW$oDHK|T~fAryfdilGEb zp$y8Q(s187J_wFvv%?>4v7(E%bYasUZM}ayyO3XT(QTos=8v|4+r@%2l%Dz9puyzFr0IA;)soWJ30*KC=*AYoftL4G08eekDPK{ zpL28>>?l#i*hrBAaF|rP?KF%J0uZrtIl~+T&pC}(&h!{;2a_x|vSd^Z8qPWWfAYw| zMYkZ?x8pPE80T|NkDYGbva-ugDwl0}*o>CuT1vsF(ADtAT$*Zk zI=~6p;DQ{;HP|VdY+uySN!2^^?b0ixGuT1eB0{Tzu|gkvmIpDY1*v#9eBz z6{3E-IiL+MU%>w>yB9BLXWgDgJq@CJL;E> z$uLdAXQyfusj|-)uhoWgx3Zb}>*mvG;*jjV9Vpr1*n#jqUqq61fx?Pl8Ha^0Amaltvc z$w9M7V6Dz^E$YY_ij{oI!*sA6nXS%%(Vu(v*ye5gX0bNuzYq6z#N!khCGE zsS#5@G#h6eOe!>wa!R)x_j24SC@S`po@?N)(ZKm;8cHj%d(ha!u`d_UysZ(4rvrby zPJXVNl3;v&uBT4tf|NxsmD$fF6{4XGq5m+K9pjU8j5*HH7d}UyVxH&Sd}b{~HJ_EE zc}KP8Db;!ACFbb}%;&XhzMxg}MOiSUd3T-WOR4}_TB!N5Zp~LhRTFpsdrb`xUoAf> zT%Xc>L$2lnglP|GzOxsGHQzO$`JO?|_Z4e?&=7=(=EJ1V*nXa|J$nct9}0l97{AXm zCZA`FJ2j^$Ab`BWT6m8Q#( z&g@p22WXndWSTaRre&tN)Y92qfFGuH(k>Ucp%U7_5BOo~Ce73h+aDnaBTDDsKZkg8 z@K0YYol^zXPy?iqL--v0Gc}WD3MQRHnB06Qf<|bDR^VJ7apgH77YLu%1f-Woe)Hl= z=jQ2wik6rq1n z1c<)~{oPrR4ft~tkGl*g2RHuQ9>(~8|BhQp4$k|Eg{`Tp4zy!+S(U_sL@98`ccJ?>VC56r|A87@e_SJ(YKRS z2iff)NGI-{0j0aB2JR&39ZNL{ zr=p}5+{EL?ubcGUaV-=(f&3JAYN3RDmbietl%i9qR|{q6SH`)r5g?xOdLX?D{8o}* zet%$rR<}@94LuOi0xfQ#x(4cixT^bsvZ^8O8q%W0E!6nHuZ7y;g#ofrOLl8HRU0(- z?S(qDpdBvMqj`N9kehmr8*+fy8jPKk8YvyNoGvutpEkPCNC`Dk>WygEM9DT0Z4)_b zqC}fWrkNZx6ZPyW*MbMNy;P@{0A3=ZK`*pbYQgW&LV&Ed z6QBco2MKg>t{d4+;yn~g4|?>Fi6H)is7ec72vVKF5ug})NwAk{>?ISuWTKahv3#@8 z=LW*`5f80yp&xtykQR9MFNE5)Fo2>1qgoiGfClk9NbwDlHgoI?LxdSZ$szm?k^eAx z4^wSn@);)GFongO`@#t6u>7+yg8K-0VQzeZx$FhzbQhS9U5HW8JWCgtmt0^ja)Eir z1?ChNn4eownGMhkQ7vW_KsWTm2qd-0gLRR?*dqO^ zMJIOFRTg=mF1qreSBp7~T4eHKF}GBUTWg_D&;`Vm7lvVo!>AUSg;>nTUp{f= zwLuo6xrje>|iXoAJ|v{}$rsf?xDdz8=cbTLfiV^x@Bk-ACTqx`47_ z!P8v-i#%x-{pji^Uwm$bcPxN3{N&wFUIL^Ua077$N`SlsB0$)7;^nEd*iKpcp)Rf1TvFlXSWW-$j{rlh1C#c2jq>o5f!I^x?mcGVLS%e&XYavlt@(A@Vsuc@5w; zh#VwegOtS}deROS>3c2?5q5|?525oA^&h4z!<0c7f5XHxOxO|Xb%cDd?zBkXbdj~1 zMf#eHti3EU_q)ja?jrNFi_D!aCdn6bri;vtE;1Lo$Q|1xYOx`XQjDBHW8er)XG9 zZu07`0FH}EuNZ$N&X?6_se-gCaH}Bx3i4Q4t);3q zE!DVzGNCiIR7?D|#9NErb-34+0&&(6C)f5;UALCFxi8gYZ}4iVu^7<53HK&+X(ol1G-}mspBl8l=33kld@6!sIK=wZXl4X_#^xrX0C9 zFO5*%Be=5;wM2JjiM5_3)-#q^!&qWHVu^K#CAuq1%&9Lim%hY2_!4XLOU!+r&-6h= z=ehr$XSVQs)&L}Q-cbT&Q2BpzbYF{cooCi=dFf4xu?jio918`&2n4!i%e)?Zb6&(A z|2|j;PO=`gn?%)`k1Ky& zuC6`vPaoG`JpLM9|M%0!t-oe|2Kc!1$Gr2O_dfh%e(CYYnEUgz-1xZP^Z3V4{<=JE zfBNuy_s5IB{=fb4-u!ia|NGO2_tGEUpMLo9_QU(jkH7t~yr=y5+n=NNh9BMuet0MN z;rHYZ@7q57Jrq9tX#3$E+lO~(AAXPi@Z;^rfBd<6H}>IwD<9rVeRvP`;hocm_ctHj z-+Xu%^Wh!Lhj%dJ;QG@zeEc-#@1Mr~+9yKJ^V##o^OxtV=bPud=ZEK~=a*;VnR*sH zi=HLVvS-D!>iK_vtaZdC;d3`Dyq~Gs#<4M0ySmQ~*Pg%pKtnsAZ&w%4e|C9bF z{ZIOz^grqMDQrCHf70*M*m%*8i;kS-+n_$Fu%t{m=TJ_50*Ep7lTLf7bu3-|y4oS^u;CXZ_Fm zpY=cMf7bu3|5^XDe!p&xXZ_FmpY{7SbUf>S*8i;kS^u;CXZ_Fm{rWnd_4_q;JnMhf z|E&L6|FiyQ{m=TJ^*`%>*8i;kS^u;CXZ_FmeeM{~`k(dtnPT|adiVr8{JK5-x; zzvzF_|DxZoKzvzF_|DxZoL*qsNi+(>pjaU7)JL6UVtNvI0 zulis0`~7RY>VMV$s{d90tNvI0uloJ$HD2|<>VMV$s{d8L&nUwVXuRt84Z(QT@AtuB z7c}gGhR-hJRsXC0SN(o|8?X9b_1g@MSN*U0U-iH0f7So0|5g91{#X5e-W#v_U-iH0 z_kHQ`d1$=qf7So0|5g91{#X64`d{_G>i3yxyz2k0-)E=c=f>e%jPYClZ~edZ|JMIo z|8M=~bi3pEwnY#rvFX3`GzrvFXwnk(uK!*CyMCX+hi}4$8Rjs<9A=o~UH`j&-;NF6j*WNy z@A`d9HhfDq-u1uhf7kEx`*_#?uK!*CyM8}A4nI2%-=>Xs{qOpH&Kd9e-}S%if7kDG z|M2t1c-Q}~-#2f=H*e!zzwhmb@9l@rM#Fda!zORo`@V;Q!`_&@zNdBY}e*yIhH zyy5$sVUss(@`g>`@XhJ4$s4{l9e$=9HhIG)Z`kAwo4nzx)M1l1e4RRM@`g>`@N?(z z9ni4J8{W4Jo4jF@H@tTl-n$H&yz#ewo4jF@H*E5TcQL~zZ`kAwo4jF@H*E5TP2RA{ z8{X3lo4jF@H~dUHZ1RRp-uPSp-}-&uH2&8Aw|+nG4)1h^t=_QJ8-MHferNa@c=)<^ z*y;^iy9*y;`MfQGH!@Rjhe)f={Y!&k$@R&Utq4POxtTfO1E*|60c zzT+Bxt{%2}!&YzD>J3}H;T_Vj)f={Y!_VBqR&Utq4O_k8JF#J_H+Z`kS$TfO0{>0zrke6Kca^@gq9u+$lY#-enD2yZ`kS$ zTfO1y?_sMqZ1sk%-mujhK0Azm>;Jd@f9v--V)z^}{;l7JZ+K}o{-fW9Z}{4L_~Hy-d_$|zG2HZ{-ggt`h9L0HhshQjKlYg!=`W8^bMbZhfUx3kA9oJ;q&mY=^HkE z!=`W8^bMQ7VbeEk`i4#4u<08%eZ!`2_+Dq&^bMb(hfUwG=^H*D4V%7U(>HwQGi>^X zP2aHT8#aBzXQyG)H*EUGKl=aC|Brs#zTq>~uF(`+xU%t^xOH3fArh>jeqpp`;C9}+x!hLDu-7R!)N?q^EYh%hRxrw`5QKW z!{%>zRWWS-hSwFt=5N^i4d2I(fArh@jeqp}?rM0IF>L?FKl<(e#y|RP0Eh3ihVQh- zKl*I}$3Oc2(f^NrFG+`8;IIoEc7ek#aQN;i{f;IIoE|J837IDBU|>;i|^DZ?&s*aZ&Xp$*?*44*}ZUEuIt#_*YR{J+nn zc7ek#aM%S7yTD-=IP3z4UEr__9A3u^yTD-=IP3z4S2M#4-C-9vd>=CG0*77TunQb^ zfy4JB!}lY@HgMPm4%@(C8#ufG9`=F5cX{K#`n?PufAs&+Z!>>9DnrN4~#$heTP5({P~psam~?pn()G?{dk#E@o+Hn( z=frdBIrE%*EE6=s(#&heLdG0)O&%Nit^XU1P=gITzdGWk@etX_L@1Fnl{GaD< z&%ZtY@%-cYuSfr9{h#%J*8f?*D>nJ8-*>wCtpBtA&-#7G7E`-e^X0RCi@to;Z`GI2 z`YrqNS-*8(KI^ye%V+&oe)+85(l4L&Tl?j+ev7|+*6(}ceAaLIm(Th?>$d>RXZ==y z`K;e}d-<&2JC=Ob?{CMG&-#7e9N#y`LNLDjiU`1fHDMYsi+vN7yagju`A3M{a^Hd z(QhyqgTWXF=8OI>`oHM^qTj?WU-bKaJ$7?3j*D?z?B8Pl7Hg{5R>kBkCT}rsi~Uv1 z+hX39zw{fmrQi3e`Aff%TI|wd$rbCf z{H5QjEaqbQOTX8`@%}3YCb8;@=~fK3;yc-xSH*NHUK__UDmFZ^jEWUhte|3$65FTP zIK}oU22C+&iXm72(r>*Ko2B?}H#SSLS;}Af|I+`Le%qzkF2!~!wo9>HitSQ-4;=fY z{H33(3Re}bD&9|otIAjXTvfQLc%2=tDqr<;RpF|_RfVexR~4=*zEciYm9P5Qs<2i0 zs{gD0uln8U3u_hMJBPOlZx!AuU-dIr@oq3*^)pxbs{gD0ulo6`eAWL|zhO=|tZ-Q2 zu)<-*-YH-8f7Q=p<*WX$`oHRDv+`B{SN&i0f7Sn0|5yEA^?MH!L!TJ>OS66=9_-5D_-Nrcjft}|C@fV^y7Q;c&(pr`oHP_rvID%Z~Bde@=gCY{XAHBu<&5{ zrvID%Z~DLK|EB+&e$#+(V&TNXiN(+;hDPDUVr&$%ffyUb_w+G13MUq$qj-lJPAr^Q zII(bI;l#p;g%is+{f0^TrvICM(}R4||4sil{onL|({H$x@A{3G@?Ae)7QQUs^?%pT zndQ6w@A|*%|E}N2DP|1$uHTd)2I?_-ih+7qv=~3d_$kIu`L5slg&3~Ka6R7p=DU7g zE#LKj*UzoxyZ-O`%_8z$|9Ac0^?%p@T|d_r1F9HMg>B1s{pJ&4+`_oUj3SI%yvK-v zRle&tvdVY;-}Qgj|6Tug{onO}*Z*Dr5B)#%|Iq(K{}25?^m`W)PA;5W47Kt@{}27% zkHqjk-d)Fgl6Zd|?@IDRKS!4z`hV#Ep`WRXcYWdN@|*i}&Mrn@`Jw-Z ze(wbHL;nx`KlK05?;T-&==Yv5KlK05?|or@=>MVrhkjNs<|FYAF+cPhk;RlGKlK05 z|3m)|{Xg`3$Cw}bf9U_A-`phLImT`e(xpoQ~yu> zKlOWeAJ8D6L4NB0ssE?`pZbm0;vHsw>i?i?i?-{qJ7U;1ql@=O0O z{iZqjrT>@yU;2OP|E1q%A;0w7E##Me^PT+C|4aWb{lE0vGvt^4U;2OP|E2$z{$Kik z>Hnqwm;PV+35<9j9PfkkOFw}T?}hVAKZy|%Bi`A?I56H7$GhVE(*H}p_jvKQr_V3_ zzx3Nrgw)7H|3p8r5n>|~{S*DYwVL>YwVL>YwU2^vqQMRR2`J(PxZ4 zGu3Y!lc|0au}t+Z=(iKjg8l{l-tERXG{&JZA>ezUkN=r?rDqJCr7 zEb3p>Z}b|Y*DUH^)W4|T05;xtXHoy6{zd(Z`WN+^*TpzC_7$>i}?XswUQU9WT z`|B9aW>LTKY}_TzqW(qwi~1M!FY33=$fEv5{fqh+^_%U**fxv$7xgddU(~;-e^I{? zZWi?~>Nm#CqJF!lEb3p>zo_5e&OCNdG5^b=eq-G%>NnWUlKv(AhPyEhjIC6b^e^eR znaYxWyQwVcx1Gw8{w4iO`j_-C>0i>nq<=~Ol72Urv!s7X|C0VC{Y(0n^e^dO(!ZpC zNxvy#mh>;_U(&y%-##l#`j_-C={Hr(lKv(AOZx3svZQ}W|C0VC{q|g0(!ZpCNxu;_U(&y%e@Xw6e%qHU>0i>ntbbYmvVNoGm_)`TGRyjx^&2tAP9}CTS=MhWlV$zO z`j_=D>tEKttbbYmvVL3pEbCv^zpUTxKFj)-_1oEGS-tEKttlv%}%len~FY8~{ zzpQ^*|BC(<{VV!c^sne&(Qg-*75#?Uv5(7&es&-$`VFx>H{#E^}`d9U@>R;8rs()3#ePLGh zuj*gbzp8&#|Em5~{j2&{^{?t*)o=J7d&I2j=Qgsce^vjg{#E^}`fU}ns()4gs{U2| ztNKlNv#Nhp|Em5~{j2&{^_%u)RsWj)HT`S)*YvOHw|mT*{x$t;`fVVyrhiSpv2ND% zujyaYzovgp|C;_a{cHNy^snh(({DSOHT`D9S<}C!e@(wJf7bM`>0i@t*q=51Yx>vp zujyaYzovgp|C;_a{cHNy^snh()4!&FP5+wyHT`S)*YvOHU(>&)pV7&heoiNA`T>Hh z>0i^ou76$sy8d|GNHl{q~<(*T1fRUH`iNb^Yu5*Y&UKU)R5`e_j8& z{&oH9`q%ZZ>tEMzLz;E{>-yLAuj^mezpj5>|GNHl{dT5V*T1fRUH`iNb^Yu5*Y(@v zWnKTee%rjP>$lI#y8dzx`g;^{?w+*T12ELqBPo4gDMXH}u=M zW<&pm{tf*b`Zx4%=-<%4p?^dFhW-ux8~Qi&Z|LWsVk4Lh{dTh1(7&O7L;r^U4gDMX z?Ps&0e?$L<{tf;1wAs+Vp?^caZEZI6Z|G;1vY~%N|Azhz{dTw6&~JMi+uN9@XG8yn zemmT3=-<%4q2DGq8~Qi&+va9N|EB&;{hRtX^>6Cm)W4~JQ~##^P5qntH}!Ao-_*aU ze^dXa{!RUx`Zx7&>W5Kc9-mG9oBC~lv#Ebm|E7NKDx3N@^>6BDu(GLtQ~#!Zd*N*A z-_*aUe^dXa{!RTR_u16Hsee=drv6R+oBB8PZ|dLFzp39YFPr)|^>6Cm)W4~JQ~##^ zP5pLz+0wtIe@j0=lP&#Q`px>YrGHERmVW!bZ0X<9zomam|CatO{dUdS(!ZsDOaGRB zBp_S*v4Cvp-_pOOAI!;?{w@7m`nU9N>EF_i4`fUKmi{gMTl%;3Z|UFCzop;CFkAY! z^rHsZ(!Zr2^vRa~E&aex>=3i1A4SNP{w@7m`nU9N>EF`7t$$lT7?f@O+xoGEZ0q0F zzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y{%!r+`nUCO>)+PD zt$$m;O?VKAZ0q0FzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y z{%!r+`nUCO>)+PDt$$npj{Y6}JNkF@@95vrZ_}F{{X6=1^zZ23(Z8dANB@rg9sPFf z+0k$R8>S;W`gip2=-<)5qkl*Lj{Y6}cA>F*&yIfE_b?#Y(Z8dANB@rg9sN7{cl7V* z-_gILe@Fk0{vG|crPvm{$2gM`r(x9>bGUhu6}#g?CRguZ|5A4EW7%5_3!H6)xWEMSO2d5 zUH!ZIclFyuXIKBO{$2gM`gis3>fhDBtAAJju72C;0BW(H&aQqN>g?*@)xWEMSO2d5 zUH!ZIclARw+11YyWLLkfhCGe;dXiyZU$a@9E#uzo&ms|DOIm{WiMU({Hn# zJ^g$7_w?`S-_yURe^39Oeqbki`uFtj>EF}8r+-iXp8h@kd;0hEgFV^Pzo&ms|DOIm z{d@ZN^zZ54)4!*GPye2NoAK=F-_yURe@{Onkv;u;`uFtj>EF{24P{TiZE)+SEuYX_vzW#mv`}+6w z@9W>!zpsB^|Gxfx{rmd&_3!K7*T1iSU;n=Tef|6T_x0O7$L=}%`uFwk>t{Y<2c3QW z`}+6w@9W>!&w^xMKOB{P{rmd&_46Uw*Ka$Wef`i>_Vw@U-`Bsde_#K;{(b#GRd|vd z=s(bZp#MPsf&K&iuvK8I9OysLf1v+Bzx{p=^dIOy(0`!+K>vaM1N{(X0IeM8KhS@m zAJ)o&{saB|O8Au==m)rRpx@3r2l@~6ALzGL4kM5Q{RjFF^dIOy(0`!+KtG_F1O4{l zInaNg|3LqN{sa97`VaIU=s(bZp#MPsf&K&ihx!loAL>8Uf2jXZ|Dpav{Wj-0)PJb| zQ2(L+L;bK>4)q`Ew@J^Te)ueh`VaNnsOM1sq5ebthx!loAL>8UZ@->H{fGJw_1m)N zQ2(L+L;Z*P5A`4Fw{g#*epVuf`VaLV>bK+0q5ebthx!loAL>8Uf2jXZ|Dk@qB8U2c zIDh{WkN!jbhx(87v+6n0f299NKQtE}BS-p=^dIRz($5&>NdJ-kBmGDEkMtkuKhl4s z|49Fl{v-Y1U5@l0=|9qcr2k0&k^Uq7NBWQSAL&2Rf299N|B?P9{XA2S^dIRz(to7? zNdJ-kBmGDEkMtkuKhl4s|49Fl{v-WI`j7M<>1RuFr2k0&k^Uq7NBWQTAL~EXf2{vl z|FQmK{m1%`^&jg$*3TK_SpTtpAUMbRkM$qxKh}S&|5*RA{$u^e`j7P=>p#}dDCAiG zvHoNI$NHItFfU=Ya;*PY|FQmK{m1%`^&jg$)_<)3SpTv9WBteakM$qxKi1DZ1X9ki z{$u^e`j7P=>p#|itp8a5vHoNI$NEq7a}qhxf1>|HKQoaN{U`cQ^q=TI(a%xjL_Y+b z6a6RpPxPPYKhb}p|3v?Z{uBMMWw?u+=s(eaqW?tyiT)G)j89JVpXfi)f1>|H|B3z+ z{U`cQ^m7|I(SM@_<-YpXfi) zf1;l;%BlWS{ipg*^`GiL)qkr0RR5{|Q~jsOa+g zs{d5~seVvAr}|IzpXz5&a;l$g%BlWS{ftvi^`GiL)qkr0RR5{|Q~jsOa+gs{d5~ss2;_r}|IzpXxu=f2#jXKlhq5{b%~m^fRzI(|@M_ zO#hkwGyP}!S;Cy@XJm7x|4jdx{xkh&`p@*A=|9tdrvFU;nf^2VXZp|dpXoo-f2RLT z|C#q^`GlM*MF}6T>rU#b~5Ms&-I_{Ki7Y*|6KpM z{&W53`p@;B>p$1espVY%x&Cwgyiv~epX)!@&mQGmKgSjZDd+mn^`GlM*MF}6T>rWL zbN%P~&-I_{KiAJJ#c_e0>vv!v=laj}pX)!@@8CfAxSZ=}@ z{!9Ir`Y-ig>c7-~sh^jP3k|u{f2rT4hFt1rXmhFmQvap?OZ}JnFZHvvxzvBD|5E>@ ze%Bpxsh?jAbDK;3m-;XDU+TZqf2sdc|E2y*{g?VL^@ z{!9Ir`Y-ig>SrXw_2x=H+nX!>SNgB?yR;U5GFSSq^k3<}($7=oO8=F94mcceuJm8& zztVrD|4RRrets@j`mgkJbh*-hrTH(ztVrD|4RRr{ww`g`mgj~>A%u{rQdPpT%Z22t^Zp8wf<}U z*ZQyZU+cftf35#o|F!;W{fug^^yuk~N+zt(@P|5`tnm}~vl z`mgn0>%Z22t^Zp8wf<}U*ZQyZU+d=>bFH6=&5eGy4RWLZM*ofe8~r!>Z}i{jztMlA z|3?3f{u})_`fv2#=)ckLHbQRn-{`;5f203K|Be0|{Wtpg@Z9LX(SM`=M*ofe8~r!> zZ}i{jztQiyNpAGt=)ci_qyI+#jehr&a-;u7|Be0|{Wtn=^xx>e(SM`=M*ofe8~r!> z-QUiQ{u})_`dRhd>c7>0tN&L2t^Qm6xB74O-|D~Bf2;pi|E>O8{kQsW_225h)qku1 zR{yR3Tm85CZ}s2mztw-M|5pF4{#*UG`fv5$>c7>0tN&L2t$r6>a;yJV|E+%JG`IS1 z_225h)qku1R{yR3Tm85CdHdYzztw-M|5pF4eg;3c`fv5$>c7>0tAD0{rhle?rhle? zrhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle? zrhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle? zrhle?rhle?rhle?rvFa=o&G!hclz)2-|4^8f2aRW|DFCj{dfBB^xx^f(|@P`PXC?$ zJNA%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj z{dfBB^xx^f(|@P`PXC?$JNoqo!0hN>t~=d*FV=k*FV=k*FV?qwpr%- z=lbXR=lbXR=lb1%h}$Wd>!0hN>u0Gm*FV?qno;KZ=lbXR=lbXR=lXf;%=ORpbJua3 zEOY&H{d4_{bLRS4{>=5eSspe!?v}@eq|Ei->%Z53um4{Ez5aXs_xkVk-|N5Ef3M#O z{y4#(d;RzN@Acp7zt_)F=U%@XFuB)%um4`Z8v?o4&x7Y)|GoZu{rCFs_228i*Y9pj z?)Bg6zt`_-Q||TO>%Z53um4{Ez5aXs_xkzr-0Q#Bf3N>u|GoZu{rCFapUJ&`H)wLN z|6c#S{(JrR`tS9-Z;%K55BeYUKj?QUDi8V}^grl-(C-F99`rxxf6)J+-|db(=zq}v zp#MR?zdb=7^grl#Z7L7?AM`)yf6)J+-;KLG=yw+<5BeYUKj?qZ|DgXt|AYPq{SW#d z^grl-(Ep(SLBD%CdC>o$|3UwQe)o0qp#MStgZ>Bo5BeYUySo#2ck-bBLH~pP2mNl3 zVMSl z(pDbzKk9eqCy)9c^*`!=)c>geQU9a5C0$jKm33A|M36e|NT96J^K0o@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36i|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#S zU;e-RfBFCN|KhYfBFCN|KR;5qsDDxaqW(qw z{D1lXF6v*@zo>svKmT9;zx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-R zfBFCN|KI^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0q9KsO#e*(OusV#bq1g_{WJYD{WJYD{WJYD{WJYD{WJYD z{WJYD{WJYD{WJYD{WJZ}0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhSclz)2 z-|4^8?+iel0jM(obq1h!`tS7L>A%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{d4_u z{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u z{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{muZ?8Gz39&-Kss&-FV4P-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P-s`{Df3N>u|GoZu{rCFs_228i z*Y6BKodM{*{(JrR`tS8S15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfZprB*MG17UjM!Rd;RzN|Gz5kWH+*`fTAeh4x$FddjmiS1|TxiO*T}}6eAGg zgV5R1{REbQf?|+2BP=WIZP^hoZnGZxANn8qANn8qANn8qANn8qANn8qANn8qANn8q zANn8qANn8qANn8qANn8qANn8qF#y8=3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b z127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A z01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j z0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3 zz%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8e zFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~< z3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk| z!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0 zFaW~<3z%T&A01N{#48Sk|{{6#g>(Ky40~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPVBhMe0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz}|j8X#k@Ej0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1pdad|0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180R2cm4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180DY~W1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0rV67G=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFF8!DOOaG;x1`rJ(8bCCFF8!DOOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#UU-~co zm;OutrT@}@>A&<}`Y-*L{!9O*|I&Zyzw}@FFa4MPOaG<+(tqi{^k4cf{g?hr|E2%Z zf9b#UU;1zTxBgrIt^d}4>%aBi`fvTW{#*a8|JHx&zxChxZ~eFaTmP;9)_?22_22q$ z{kQ&G|E>Slf9t>X-}-3)(Ey?WL<5Kh5Dg$2Ks1180MP)t_22qw0MP)V0Yn4n)_?22 z_22q${kQ&G|E>Slf9t>X-}-O;xBgrIt)B)E4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1L$Y^ zX#mjxq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCkSPXmYs z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dnn`)Iy=**Kfn` z4AyxJe=wMRF4SzRyX*axm%$d*gn;-wuzn8Zgzxux$?s%;KeANFQ^M7~# z`29P7`u?47-TCS-KK1P(|9!)qw@3Q#owrB&>aX8@%D--W_bK1q@hR^tah-Rre+?e% zovX?7&dbty=LG(F=XmgW=aAcZ=QPN9?|r~|@5%4H_m_X(JB@hWJ5qDr+k|`G+a+jt z{lWQQ&BpLsgITci!PC$A;6%IOYlA~O=YvxM=gIp0c`_M#p3FL(CsQ>B-^-Ip55q4E z-d3I`&%x)(G3@i?u-AEVdgwfT>qq@$!|r*q)9F0fx^kZE|2WSktk1KV$n$J!<~+Mr zIM4q0&NCC>JllzXp6v@i&o+1%eDN2{>*vK literal 0 HcmV?d00001 diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py b/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py index e488f1d0c1..f3b39711bd 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py @@ -12,10 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import io import os -import sentencepiece import tensorflow as tf from keras_nlp.tests.test_case import TestCase @@ -25,17 +23,9 @@ class SentencePieceTokenizerTest(TestCase): def setUp(self): super().setUp() - bytes_io = io.BytesIO() - vocab_data = tf.data.Dataset.from_tensor_slices( - ["the quick brown fox."] + self.proto = os.path.join( + self.get_test_data_dir(), "tokenizer_test_vocab.spm" ) - sentencepiece.SentencePieceTrainer.train( - sentence_iterator=vocab_data.as_numpy_iterator(), - model_writer=bytes_io, - vocab_size=7, - model_type="WORD", - ) - self.proto = bytes_io.getvalue() def test_tokenize(self): input_data = ["the quick brown fox."] @@ -112,15 +102,13 @@ def test_error_id_out_of_vocabulary(self): with self.assertRaises(ValueError): tokenizer.id_to_token(-1) - def test_from_file(self): - filepath = os.path.join(self.get_temp_dir(), "model.txt") - input_data = ["the quick brown fox."] - with tf.io.gfile.GFile(filepath, "wb") as file: - file.write(self.proto) + def test_from_bytes(self): + with tf.io.gfile.GFile(self.proto, "rb") as file: + proto = file.read() tokenizer = SentencePieceTokenizer( - proto=filepath, + proto=proto, ) - output_data = tokenizer(input_data) + output_data = tokenizer(["the quick brown fox."]) self.assertAllEqual(output_data, [[6, 5, 3, 4]]) def test_tokenize_then_batch(self): diff --git a/tools/sentencepiece_testing/__init__.py b/tools/sentencepiece_testing/__init__.py new file mode 100644 index 0000000000..ba0c2545e4 --- /dev/null +++ b/tools/sentencepiece_testing/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tools/sentencepiece_testing/create_albert_test_proto.py b/tools/sentencepiece_testing/create_albert_test_proto.py new file mode 100644 index 0000000000..80e82b3cd1 --- /dev/null +++ b/tools/sentencepiece_testing/create_albert_test_proto.py @@ -0,0 +1,37 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox", "the earth is round"], + "albert_test_vocab.spm", + vocab_size=12, + model_type="WORD", + pad_id=0, + unk_id=1, + bos_id=2, + eos_id=3, + pad_piece="", + unk_piece="", + bos_piece="[CLS]", + eos_piece="[SEP]", + user_defined_symbols="[MASK]", + ) + + +if __name__ == "__main__": + main() diff --git a/tools/sentencepiece_testing/create_deberta_v3_test_proto.py b/tools/sentencepiece_testing/create_deberta_v3_test_proto.py new file mode 100644 index 0000000000..c3f98867c5 --- /dev/null +++ b/tools/sentencepiece_testing/create_deberta_v3_test_proto.py @@ -0,0 +1,37 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox", "the earth is round"], + "deberta_v3_test_vocab.spm", + vocab_size=12, + model_type="WORD", + pad_id=0, + bos_id=1, + eos_id=2, + unk_id=3, + pad_piece="[PAD]", + bos_piece="[CLS]", + eos_piece="[SEP]", + unk_piece="[UNK]", + user_defined_symbols="[MASK]", + ) + + +if __name__ == "__main__": + main() diff --git a/tools/sentencepiece_testing/create_f_net_test_proto.py b/tools/sentencepiece_testing/create_f_net_test_proto.py new file mode 100644 index 0000000000..949a5692f9 --- /dev/null +++ b/tools/sentencepiece_testing/create_f_net_test_proto.py @@ -0,0 +1,37 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox", "the earth is round"], + "f_net_test_vocab.spm", + vocab_size=12, + model_type="WORD", + pad_id=0, + unk_id=1, + bos_id=2, + eos_id=3, + pad_piece="", + unk_piece="", + bos_piece="[CLS]", + eos_piece="[SEP]", + user_defined_symbols="[MASK]", + ) + + +if __name__ == "__main__": + main() diff --git a/tools/sentencepiece_testing/create_no_special_token_proto.py b/tools/sentencepiece_testing/create_no_special_token_proto.py new file mode 100644 index 0000000000..c13ef6e05a --- /dev/null +++ b/tools/sentencepiece_testing/create_no_special_token_proto.py @@ -0,0 +1,30 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["abc"], + "no_special_token_vocab.spm", + vocab_size=5, + pad_id=-1, + eos_id=-1, + bos_id=-1, + ) + + +if __name__ == "__main__": + main() diff --git a/tools/sentencepiece_testing/create_sentence_piece_tokenizer_proto.py b/tools/sentencepiece_testing/create_sentence_piece_tokenizer_proto.py new file mode 100644 index 0000000000..a40eade848 --- /dev/null +++ b/tools/sentencepiece_testing/create_sentence_piece_tokenizer_proto.py @@ -0,0 +1,28 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox."], + "tokenizer_test_vocab.spm", + vocab_size=7, + model_type="WORD", + ) + + +if __name__ == "__main__": + main() diff --git a/tools/sentencepiece_testing/create_t5_test_proto.py b/tools/sentencepiece_testing/create_t5_test_proto.py new file mode 100644 index 0000000000..b7e28160e5 --- /dev/null +++ b/tools/sentencepiece_testing/create_t5_test_proto.py @@ -0,0 +1,36 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox", "the earth is round"], + "t5_test_vocab.spm", + vocab_size=11, + model_type="WORD", + bos_id=-1, + pad_id=0, + eos_id=1, + unk_id=2, + pad_piece="", + eos_piece="", + unk_piece="", + user_defined_symbols="[MASK]", + ) + + +if __name__ == "__main__": + main() diff --git a/tools/sentencepiece_testing/create_xlm_roberta_test_proto.py b/tools/sentencepiece_testing/create_xlm_roberta_test_proto.py new file mode 100644 index 0000000000..988d161f99 --- /dev/null +++ b/tools/sentencepiece_testing/create_xlm_roberta_test_proto.py @@ -0,0 +1,37 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox", "the earth is round"], + "xlm_roberta_test_vocab.spm", + vocab_size=12, + model_type="WORD", + pad_id=0, + unk_id=1, + bos_id=2, + eos_id=3, + pad_piece="", + unk_piece="", + bos_piece="[CLS]", + eos_piece="[SEP]", + user_defined_symbols="[MASK]", + ) + + +if __name__ == "__main__": + main() diff --git a/tools/sentencepiece_testing/utils.py b/tools/sentencepiece_testing/utils.py new file mode 100644 index 0000000000..9deebd9737 --- /dev/null +++ b/tools/sentencepiece_testing/utils.py @@ -0,0 +1,33 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import io +import pathlib + +import sentencepiece + + +def train_sentencepiece(data, filename, *args, **kwargs): + bytes_io = io.BytesIO() + sentencepiece.SentencePieceTrainer.train( + sentence_iterator=iter(data), model_writer=bytes_io, *args, **kwargs + ) + with open( + pathlib.Path(__file__).parent.parent.parent + / "keras_nlp" + / "tests" + / "test_data" + / filename, + mode="wb", + ) as f: + f.write(bytes_io.getbuffer()) From 6b66ad8e706adcd86ed4c1a45a9a843d342b1d4c Mon Sep 17 00:00:00 2001 From: Abheesht Date: Sat, 28 Oct 2023 06:26:24 +0530 Subject: [PATCH 19/87] Fix XLM-RoBERTa detokenize() (#1289) --- .../xlm_roberta/xlm_roberta_tokenizer.py | 26 ++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py index c0b91abb3c..5e7873a455 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py @@ -156,9 +156,29 @@ def tokenize(self, inputs): # Shift the tokens IDs right by one. return tf.add(tokens, 1) - def detokenize(self, ids): - ids = tf.ragged.boolean_mask(ids, tf.not_equal(ids, self.mask_token_id)) - return super().detokenize(ids) + def detokenize(self, inputs): + if inputs.dtype == tf.string: + return super().detokenize(inputs) + + tokens = tf.ragged.boolean_mask( + inputs, tf.not_equal(inputs, self.mask_token_id) + ) + + # Shift the tokens IDs left by one. + tokens = tf.subtract(tokens, 1) + + # Correct `unk_token_id`, `end_token_id`, `start_token_id`, respectively. + # Note: The `pad_token_id` is taken as 0 (`unk_token_id`) since the + # proto does not contain `pad_token_id`. This mapping of the pad token + # is done automatically by the above subtraction. + tokens = tf.where(tf.equal(tokens, self.unk_token_id - 1), 0, tokens) + tokens = tf.where(tf.equal(tokens, self.end_token_id - 1), 2, tokens) + tokens = tf.where(tf.equal(tokens, self.start_token_id - 1), 1, tokens) + + # Note: Even though we map `"" and `""` to the correct IDs, + # the `detokenize` method will return empty strings for these tokens. + # This is a vagary of the `sentencepiece` library. + return super().detokenize(tokens) @classproperty def presets(cls): From bd7745068489eac205fb6919695857f3b18d1151 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Fri, 27 Oct 2023 18:21:47 -0700 Subject: [PATCH 20/87] Correct tie_embedding_weights and add logit checking (#1288) * Add tie_embedding_weights and logit checking * Fix formatting * Remove comments * Change backbone default * Change weights hashes * Fix first hash --- keras_nlp/models/t5/t5_backbone.py | 2 +- keras_nlp/models/t5/t5_presets.py | 18 ++++++---- .../convert_t5_checkpoints.py | 36 +++++++++++++++---- 3 files changed, 42 insertions(+), 14 deletions(-) diff --git a/keras_nlp/models/t5/t5_backbone.py b/keras_nlp/models/t5/t5_backbone.py index 9d64edd3bc..314a5d68df 100644 --- a/keras_nlp/models/t5/t5_backbone.py +++ b/keras_nlp/models/t5/t5_backbone.py @@ -82,7 +82,7 @@ def __init__( activation="relu", use_gated_activation=True, layer_norm_epsilon=1e-06, - tie_embedding_weights=False, + tie_embedding_weights=True, **kwargs, ): # Encoder inputs diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index 1c737a863b..dd2bea7a4e 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -36,10 +36,11 @@ "activation": "relu", "use_gated_activation": False, "layer_norm_epsilon": 1e-06, + "tie_embedding_weights": True, }, "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/model.weights.h5", - "weights_hash": "5a241ea61142eaf96ac1805898a2f2d1", + "weights_hash": "2e10b5f72405d464ee55026b07e60741", "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/vocab.spm", "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, @@ -64,10 +65,11 @@ "activation": "relu", "use_gated_activation": False, "layer_norm_epsilon": 1e-06, + "tie_embedding_weights": True, }, "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/model.weights.h5", - "weights_hash": "9bef4c6650d91d1ea438ee4a2bea47ad", + "weights_hash": "bed6ef276cfe83d1323467051211978d", "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/vocab.spm", "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, @@ -92,10 +94,11 @@ "activation": "relu", "use_gated_activation": False, "layer_norm_epsilon": 1e-06, + "tie_embedding_weights": True, }, "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/model.weights.h5", - "weights_hash": "eab8eee1bad033e65324a71cd6e5a8e9", + "weights_hash": "7854a05c2e6812899bf6f0f104792cda", "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/vocab.spm", "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, @@ -121,10 +124,11 @@ "activation": "keras_nlp>gelu_approximate", "use_gated_activation": True, "layer_norm_epsilon": 1e-06, + "tie_embedding_weights": False, }, "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/model.weights.h5", - "weights_hash": "4e39b0bab56606a9ab2b8e52a6bc7a9f", + "weights_hash": "aa0fbaddb1759ef313bbc4f9e4f1e197", "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/vocab.spm", "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, @@ -149,10 +153,11 @@ "activation": "keras_nlp>gelu_approximate", "use_gated_activation": True, "layer_norm_epsilon": 1e-06, + "tie_embedding_weights": False, }, "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/model.weights.h5", - "weights_hash": "b529270c5361db36d359a46403532b5c", + "weights_hash": "84a10bec83fd093931bb2a6264115d31", "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/vocab.spm", "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, @@ -177,10 +182,11 @@ "activation": "keras_nlp>gelu_approximate", "use_gated_activation": True, "layer_norm_epsilon": 1e-06, + "tie_embedding_weights": False, }, "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/model.weights.h5", - "weights_hash": "50b8d3c88fc10db07e495d79ff29a1b6", + "weights_hash": "513f530ce790efa7e261c0ef965f3697", "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/vocab.spm", "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, diff --git a/tools/checkpoint_conversion/convert_t5_checkpoints.py b/tools/checkpoint_conversion/convert_t5_checkpoints.py index b04a9a319c..6f16a64e6b 100644 --- a/tools/checkpoint_conversion/convert_t5_checkpoints.py +++ b/tools/checkpoint_conversion/convert_t5_checkpoints.py @@ -81,6 +81,12 @@ def convert_checkpoints(hf_model): keras_nlp_model.get_layer("token_embedding").embeddings.assign( hf_wts[f"{section}.embed_tokens.weight"] ) + if not keras_nlp_model.tie_embedding_weights: + keras_nlp_model.get_layer( + "token_embedding" + ).reverse_embeddings.assign( + hf_wts["lm_head.weight"].transpose(1, 0).numpy() + ) # Query, key, value, and output projectors in self-attention keras_nlp_model.get_layer( @@ -308,17 +314,18 @@ def check_output( print(k, v) # Forward pass - keras_outputs = keras_model(keras_inputs) - hf_outputs = hf_model(**hf_inputs) + keras_out = keras_model(keras_inputs) + hf_out = hf_model(**hf_inputs, output_hidden_states=True) # Only compare non-padded token ids. - keras_outputs = keras_outputs["decoder_sequence_output"] + keras_hidden_states = keras_out["decoder_sequence_output"] + hf_hidden_states = hf_out.decoder_hidden_states[-1] + keras_outputs = ops.take_along_axis( - keras_outputs, ops.where(decoder_padding_mask) + keras_hidden_states, ops.where(decoder_padding_mask) ) - hf_outputs = hf_outputs.last_hidden_state hf_outputs = ops.take_along_axis( - hf_outputs, ops.where(decoder_padding_mask) + hf_hidden_states, ops.where(decoder_padding_mask) ) print("-> KerasNLP output:", keras_outputs[0:5]) @@ -327,6 +334,21 @@ def check_output( keras_outputs.detach().numpy(), hf_outputs.detach().numpy(), atol=1e-5 ) + if keras_model.tie_embedding_weights: + keras_hidden_states = keras_hidden_states * ( + keras_model.hidden_dim**-0.5 + ) + + keras_logits = keras_model.token_embedding( + keras_hidden_states, reverse=True + ) + hf_logits = hf_out.logits + print("-> KerasNLP logits:", keras_logits[0:5]) + print("-> HF logits:", hf_logits[0:5]) + np.testing.assert_allclose( + keras_logits.detach().numpy(), hf_logits.detach().numpy(), atol=1e-3 + ) + def count_params(weights): shapes = [v.shape for v in weights] @@ -339,7 +361,7 @@ def main(_): os.mkdir(f"./{FLAGS.preset}") print("\n-> Convert weights.") - hf_model = transformers.AutoModel.from_pretrained(hf_id) + hf_model = transformers.T5ForConditionalGeneration.from_pretrained(hf_id) keras_model = convert_checkpoints(hf_model) # Save the model. From 775ad68984cf13aa301880e8961d2f7b706565ed Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 30 Oct 2023 12:13:19 -0700 Subject: [PATCH 21/87] Add detokenize testing for model tokenizers (#1290) --- keras_nlp/models/albert/albert_tokenizer_test.py | 4 ++-- keras_nlp/models/bart/bart_tokenizer_test.py | 5 +++++ keras_nlp/models/bert/bert_tokenizer_test.py | 6 +++--- .../models/deberta_v3/deberta_v3_tokenizer_test.py | 4 ++-- .../models/distil_bert/distil_bert_tokenizer_test.py | 6 +++--- keras_nlp/models/f_net/f_net_tokenizer_test.py | 4 ++-- keras_nlp/models/roberta/roberta_tokenizer_test.py | 5 +++++ keras_nlp/models/t5/t5_tokenizer_test.py | 4 ++-- keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py | 3 --- .../models/xlm_roberta/xlm_roberta_tokenizer_test.py | 4 ++-- keras_nlp/tests/test_case.py | 10 +++++++++- 11 files changed, 35 insertions(+), 20 deletions(-) diff --git a/keras_nlp/models/albert/albert_tokenizer_test.py b/keras_nlp/models/albert/albert_tokenizer_test.py index ca80ace281..74ad0604dc 100644 --- a/keras_nlp/models/albert/albert_tokenizer_test.py +++ b/keras_nlp/models/albert/albert_tokenizer_test.py @@ -28,14 +28,14 @@ def setUp(self): self.get_test_data_dir(), "albert_test_vocab.spm" ) } - self.input_data = ["the quick brown fox.", "the earth is round."] + self.input_data = ["the quick brown fox", "the earth is round"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=AlbertTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[5, 10, 6, 1], [5, 7, 9, 1]], + expected_output=[[5, 10, 6, 8], [5, 7, 9, 11]], ) def test_errors_missing_special_tokens(self): diff --git a/keras_nlp/models/bart/bart_tokenizer_test.py b/keras_nlp/models/bart/bart_tokenizer_test.py index acfdbc3d87..5a0015357b 100644 --- a/keras_nlp/models/bart/bart_tokenizer_test.py +++ b/keras_nlp/models/bart/bart_tokenizer_test.py @@ -37,7 +37,12 @@ def test_tokenizer_basics(self): cls=BartTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, + # TODO: should not get tokenized as expected_output=[[0, 4, 5, 6, 4, 7, 0, 1], [4, 5, 4, 7]], + expected_detokenize_output=[ + " airplane at airport", + " airplane airport", + ], ) def test_errors_missing_special_tokens(self): diff --git a/keras_nlp/models/bert/bert_tokenizer_test.py b/keras_nlp/models/bert/bert_tokenizer_test.py index 29ed902a62..e53419dab4 100644 --- a/keras_nlp/models/bert/bert_tokenizer_test.py +++ b/keras_nlp/models/bert/bert_tokenizer_test.py @@ -24,20 +24,20 @@ def setUp(self): self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] self.init_kwargs = {"vocabulary": self.vocab} - self.input_data = ["THE QUICK BROWN FOX.", "THE FOX."] + self.input_data = ["THE QUICK BROWN FOX", "THE FOX"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=BertTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[5, 6, 7, 8, 1], [5, 8, 1]], + expected_output=[[5, 6, 7, 8], [5, 8]], ) def test_lowercase(self): tokenizer = BertTokenizer(vocabulary=self.vocab, lowercase=True) output = tokenizer(self.input_data) - self.assertAllEqual(output, [[9, 10, 11, 12, 1], [9, 12, 1]]) + self.assertAllEqual(output, [[9, 10, 11, 12], [9, 12]]) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): diff --git a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py index fcaf637974..3c17cfa397 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer_test.py @@ -28,14 +28,14 @@ def setUp(self): ) self.tokenizer = DebertaV3Tokenizer(proto=proto) self.init_kwargs = {"proto": proto} - self.input_data = ["the quick brown fox.", "the earth is round."] + self.input_data = ["the quick brown fox", "the earth is round"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=DebertaV3Tokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[5, 10, 6, 3], [5, 7, 9, 3]], + expected_output=[[5, 10, 6, 8], [5, 7, 9, 11]], ) def test_errors_missing_special_tokens(self): diff --git a/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py b/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py index b025b4e7fb..e4bfba41d3 100644 --- a/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_tokenizer_test.py @@ -26,20 +26,20 @@ def setUp(self): self.vocab += ["THE", "QUICK", "BROWN", "FOX"] self.vocab += ["the", "quick", "brown", "fox"] self.init_kwargs = {"vocabulary": self.vocab} - self.input_data = ["THE QUICK BROWN FOX.", "THE FOX."] + self.input_data = ["THE QUICK BROWN FOX", "THE FOX"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=DistilBertTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[5, 6, 7, 8, 1], [5, 8, 1]], + expected_output=[[5, 6, 7, 8], [5, 8]], ) def test_lowercase(self): tokenizer = DistilBertTokenizer(vocabulary=self.vocab, lowercase=True) output = tokenizer(self.input_data) - self.assertAllEqual(output, [[9, 10, 11, 12, 1], [9, 12, 1]]) + self.assertAllEqual(output, [[9, 10, 11, 12], [9, 12]]) def test_errors_missing_special_tokens(self): with self.assertRaises(ValueError): diff --git a/keras_nlp/models/f_net/f_net_tokenizer_test.py b/keras_nlp/models/f_net/f_net_tokenizer_test.py index 8d3511dee7..3dde34e849 100644 --- a/keras_nlp/models/f_net/f_net_tokenizer_test.py +++ b/keras_nlp/models/f_net/f_net_tokenizer_test.py @@ -28,14 +28,14 @@ def setUp(self): self.get_test_data_dir(), "f_net_test_vocab.spm" ) } - self.input_data = ["the quick brown fox.", "the earth is round."] + self.input_data = ["the quick brown fox", "the earth is round"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=FNetTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[5, 10, 6, 1], [5, 7, 9, 1]], + expected_output=[[5, 10, 6, 8], [5, 7, 9, 11]], ) def test_errors_missing_special_tokens(self): diff --git a/keras_nlp/models/roberta/roberta_tokenizer_test.py b/keras_nlp/models/roberta/roberta_tokenizer_test.py index e5fcb1867d..3b2305608d 100644 --- a/keras_nlp/models/roberta/roberta_tokenizer_test.py +++ b/keras_nlp/models/roberta/roberta_tokenizer_test.py @@ -37,7 +37,12 @@ def test_tokenizer_basics(self): cls=RobertaTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, + # TODO: should not get tokenized as expected_output=[[0, 4, 5, 6, 4, 7, 0, 1], [4, 5, 4, 7]], + expected_detokenize_output=[ + " airplane at airport", + " airplane airport", + ], ) def test_errors_missing_special_tokens(self): diff --git a/keras_nlp/models/t5/t5_tokenizer_test.py b/keras_nlp/models/t5/t5_tokenizer_test.py index be07b486e4..77ad734660 100644 --- a/keras_nlp/models/t5/t5_tokenizer_test.py +++ b/keras_nlp/models/t5/t5_tokenizer_test.py @@ -26,14 +26,14 @@ def setUp(self): # Generated using create_t5_test_proto.py "proto": os.path.join(self.get_test_data_dir(), "t5_test_vocab.spm") } - self.input_data = ["the quick brown fox.", "the earth is round."] + self.input_data = ["the quick brown fox", "the earth is round"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=T5Tokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[4, 9, 5, 2], [4, 6, 8, 2]], + expected_output=[[4, 9, 5, 7], [4, 6, 8, 10]], ) def test_errors_missing_special_tokens(self): diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py index 5e7873a455..eacdcc7337 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py @@ -157,9 +157,6 @@ def tokenize(self, inputs): return tf.add(tokens, 1) def detokenize(self, inputs): - if inputs.dtype == tf.string: - return super().detokenize(inputs) - tokens = tf.ragged.boolean_mask( inputs, tf.not_equal(inputs, self.mask_token_id) ) diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py index 9ec205c725..2057eff9eb 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer_test.py @@ -30,14 +30,14 @@ def setUp(self): self.get_test_data_dir(), "xlm_roberta_test_vocab.spm" ) } - self.input_data = ["the quick brown fox.", "the earth is round."] + self.input_data = ["the quick brown fox", "the earth is round"] def test_tokenizer_basics(self): self.run_preprocessing_layer_test( cls=XLMRobertaTokenizer, init_kwargs=self.init_kwargs, input_data=self.input_data, - expected_output=[[6, 11, 7, 2], [6, 8, 10, 2]], + expected_output=[[6, 11, 7, 9], [6, 8, 10, 12]], ) @pytest.mark.large diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py index ec29b2add6..6fe72ed497 100644 --- a/keras_nlp/tests/test_case.py +++ b/keras_nlp/tests/test_case.py @@ -24,6 +24,7 @@ from keras_nlp.backend import config from keras_nlp.backend import keras from keras_nlp.backend import ops +from keras_nlp.tokenizers.tokenizer import Tokenizer from keras_nlp.utils.tensor_utils import is_float_dtype from keras_nlp.utils.tensor_utils import standardize_dtype @@ -203,7 +204,7 @@ def run_preprocessing_layer_test( init_kwargs, input_data, expected_output=None, - batch_size=2, + expected_detokenize_output=None, ): """Run basic tests for a preprocessing layer.""" layer = cls(**init_kwargs) @@ -219,6 +220,13 @@ def run_preprocessing_layer_test( else: output = layer(input_data) + # For tokenizers only, also check detokenize. + if isinstance(layer, Tokenizer): + if not expected_detokenize_output: + expected_detokenize_output = input_data + detokenize_output = layer.detokenize(output) + self.assertAllEqual(detokenize_output, expected_detokenize_output) + # Run with an unbatched dataset. output_ds = ds.map(layer).ragged_batch(1_000) self.assertAllClose(output, output_ds.get_single_element()) From 9edfdb22dec61fc9620e978dbd64b9f8dcd34c1a Mon Sep 17 00:00:00 2001 From: Abheesht Date: Fri, 3 Nov 2023 08:48:19 +0530 Subject: [PATCH 22/87] Fix Whisper (#1287) * Fix Whisper * Add appropriate decorators * Simplify * Fix * Fix attempt for tf.keras UT * Pass bias_axes explicitly as None * Debug commit * Debug commit-1 --- .../modeling/cached_multi_head_attention.py | 1 + .../models/whisper/whisper_backbone_test.py | 2 - .../whisper_cached_multi_head_attention.py | 155 ++++++++++++++++++ keras_nlp/models/whisper/whisper_decoder.py | 127 ++++++++++++-- keras_nlp/models/whisper/whisper_encoder.py | 86 +++++++++- 5 files changed, 347 insertions(+), 24 deletions(-) create mode 100644 keras_nlp/models/whisper/whisper_cached_multi_head_attention.py diff --git a/keras_nlp/layers/modeling/cached_multi_head_attention.py b/keras_nlp/layers/modeling/cached_multi_head_attention.py index 16124328d9..3f30cb16ad 100644 --- a/keras_nlp/layers/modeling/cached_multi_head_attention.py +++ b/keras_nlp/layers/modeling/cached_multi_head_attention.py @@ -86,6 +86,7 @@ def call( ): if ( hasattr(self, "_build_from_signature") + and hasattr(self, "_built_from_signature") and not self._built_from_signature ): self._build_from_signature(query=query, value=value, key=key) diff --git a/keras_nlp/models/whisper/whisper_backbone_test.py b/keras_nlp/models/whisper/whisper_backbone_test.py index 41fc6df33c..63d57615af 100644 --- a/keras_nlp/models/whisper/whisper_backbone_test.py +++ b/keras_nlp/models/whisper/whisper_backbone_test.py @@ -75,7 +75,6 @@ def test_saved_model(self): input_data=self.input_data, ) - @pytest.mark.skip # TODO: fix weight mismatch error. @pytest.mark.large def test_smallest_preset(self): self.run_preset_test( @@ -105,7 +104,6 @@ def test_smallest_preset(self): }, ) - @pytest.mark.skip # TODO: fix weight mismatch error. @pytest.mark.extra_large def test_all_presets(self): for preset in WhisperBackbone.presets: diff --git a/keras_nlp/models/whisper/whisper_cached_multi_head_attention.py b/keras_nlp/models/whisper/whisper_cached_multi_head_attention.py new file mode 100644 index 0000000000..01ad18ba4b --- /dev/null +++ b/keras_nlp/models/whisper/whisper_cached_multi_head_attention.py @@ -0,0 +1,155 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Whisper Cached Multi-Head Attention layer.""" + +import collections +import string + +import keras_nlp +from keras_nlp.backend import keras + + +def _index_to_einsum_variable(i): + """Converts an index to a einsum variable name. + + We simply map indices to lowercase characters, e.g. 0 -> 'a', 1 -> 'b'. + """ + return string.ascii_lowercase[i] + + +def _build_proj_equation(free_dims, bound_dims, output_dims): + """Builds an einsum equation for projections inside multi-head attention.""" + input_str = "" + kernel_str = "" + output_str = "" + bias_axes = "" + letter_offset = 0 + for i in range(free_dims): + char = _index_to_einsum_variable(i + letter_offset) + input_str += char + output_str += char + + letter_offset += free_dims + for i in range(bound_dims): + char = _index_to_einsum_variable(i + letter_offset) + input_str += char + kernel_str += char + + letter_offset += bound_dims + for i in range(output_dims): + char = _index_to_einsum_variable(i + letter_offset) + kernel_str += char + output_str += char + bias_axes += char + equation = f"{input_str},{kernel_str}->{output_str}" + + return equation, bias_axes, len(output_str) + + +def _get_output_shape(output_rank, known_last_dims): + return [None] * (output_rank - len(known_last_dims)) + list(known_last_dims) + + +@keras.saving.register_keras_serializable(package="keras_nlp") +class WhisperCachedMultiHeadAttention( + keras_nlp.layers.CachedMultiHeadAttention +): + """Whisper Cached Multi-Head Attention layer. + + Inherits from `keras_nlp.layers.CachedMultiHeadAttention`, and overrides the + `build` method so that Q, V projection layers have bias + whereas K projection layer does not. + """ + + def build( + self, + query_shape, + value_shape, + key_shape=None, + ): + key_shape = value_shape if key_shape is None else key_shape + query_rank = len(query_shape) + value_rank = len(value_shape) + key_rank = len(key_shape) + einsum_equation, bias_axes, output_rank = _build_proj_equation( + query_rank - 1, bound_dims=1, output_dims=2 + ) + self._query_dense = keras.layers.EinsumDense( + einsum_equation, + output_shape=_get_output_shape( + output_rank - 1, [self._num_heads, self._key_dim] + ), + bias_axes=bias_axes if self._use_bias else None, + name="query", + **self._get_common_kwargs_for_sublayer(), + ) + self._query_dense.build(query_shape) + einsum_equation, bias_axes, output_rank = _build_proj_equation( + key_rank - 1, bound_dims=1, output_dims=2 + ) + self._key_dense = keras.layers.EinsumDense( + einsum_equation, + output_shape=_get_output_shape( + output_rank - 1, [self._num_heads, self._key_dim] + ), + bias_axes=None, + name="key", + **self._get_common_kwargs_for_sublayer(), + ) + self._key_dense.build(key_shape) + einsum_equation, bias_axes, output_rank = _build_proj_equation( + value_rank - 1, bound_dims=1, output_dims=2 + ) + self._value_dense = keras.layers.EinsumDense( + einsum_equation, + output_shape=_get_output_shape( + output_rank - 1, [self._num_heads, self._value_dim] + ), + bias_axes=bias_axes if self._use_bias else None, + name="value", + **self._get_common_kwargs_for_sublayer(), + ) + self._value_dense.build(value_shape) + + # Builds the attention computations for multi-head dot product + # attention. These computations could be wrapped into the keras + # attention layer once it supports multi-head einsum computations. + self._build_attention(output_rank) + + if self._output_shape: + if not isinstance(self._output_shape, collections.abc.Sized): + output_shape = [self._output_shape] + else: + output_shape = self._output_shape + else: + output_shape = [query_shape[-1]] + einsum_equation, bias_axes, output_rank = _build_proj_equation( + query_rank - 1, bound_dims=2, output_dims=len(output_shape) + ) + self._output_dense = keras.layers.EinsumDense( + einsum_equation, + output_shape=_get_output_shape(output_rank - 1, output_shape), + bias_axes=bias_axes if self._use_bias else None, + name="attention_output", + **self._get_common_kwargs_for_sublayer(), + ) + output_dense_input_shape = list( + self._query_dense.compute_output_shape(query_shape) + ) + output_dense_input_shape[-1] = self._value_dim + self._output_dense.build(tuple(output_dense_input_shape)) + self.built = True + + def _build_from_signature(self, query, value, key=None): + pass diff --git a/keras_nlp/models/whisper/whisper_decoder.py b/keras_nlp/models/whisper/whisper_decoder.py index 7f5d834741..c41a870a42 100644 --- a/keras_nlp/models/whisper/whisper_decoder.py +++ b/keras_nlp/models/whisper/whisper_decoder.py @@ -11,33 +11,130 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Whisper decoder block.""" + from keras_nlp.backend import keras from keras_nlp.layers.modeling.transformer_decoder import TransformerDecoder +from keras_nlp.models.whisper.whisper_cached_multi_head_attention import ( + WhisperCachedMultiHeadAttention, +) +from keras_nlp.utils.keras_utils import clone_initializer @keras.saving.register_keras_serializable(package="keras_nlp") class WhisperDecoder(TransformerDecoder): - """A Whisper decoder. + """Whisper decoder. Inherits from `keras_nlp.layers.TransformerDecoder`, and overrides the - `build` method so as to remove the bias term from the key projection layer. + `build` method to use the + `keras_nlp.models.whisper.whisper_multi_head_attention.WhisperMultiHeadAttention` + layer instead of `keras.layers.MultiHeadAttention` and + `keras_nlp.models.whisper.whisper_cached_multi_head_attention.WhisperCachedMultiHeadAttention` + instead of `keras_nlp.layers.cached_multi_head_attention.CachedMultiHeadAttention`. """ def build( self, decoder_sequence_shape, - encoder_sequence_shape=None, + encoder_sequence_shape, ): - super().build( - decoder_sequence_shape, - encoder_sequence_shape=encoder_sequence_shape, - ) - - # Since there is no exposed option for this in MHA, we will reach into - # the internals of the layer for now. - self._self_attention_layer._key_dense.bias_axes = None - self._self_attention_layer._key_dense.bias = None - if self._cross_attention_layer: - self._cross_attention_layer._key_dense.bias_axes = None - self._cross_attention_layer._key_dense.bias = None + self._decoder_sequence_shape = decoder_sequence_shape + self._encoder_sequence_shape = encoder_sequence_shape + # Infer the dimension of our hidden feature size from the build shape. + hidden_dim = decoder_sequence_shape[-1] + # Attention head size is `hidden_dim` over the number of heads. + head_dim = int(hidden_dim // self.num_heads) + if head_dim == 0: + raise ValueError( + "Attention `head_dim` computed cannot be zero. " + f"The `hidden_dim` value of {hidden_dim} has to be equal to " + f"or greater than `num_heads` value of {self.num_heads}." + ) + + # Self attention layers. + self._self_attention_layer = WhisperCachedMultiHeadAttention( + num_heads=self.num_heads, + key_dim=head_dim, + dropout=self.dropout, + kernel_initializer=clone_initializer(self.kernel_initializer), + bias_initializer=clone_initializer(self.bias_initializer), + dtype=self.dtype_policy, + name="self_attention", + ) + + self._self_attention_layer.build( + query_shape=decoder_sequence_shape, + value_shape=decoder_sequence_shape, + ) + self._self_attention_layer_norm = keras.layers.LayerNormalization( + epsilon=self.layer_norm_epsilon, + dtype=self.dtype_policy, + name="self_attention_layer_norm", + ) + self._self_attention_layer_norm.build(decoder_sequence_shape) + self._self_attention_dropout = keras.layers.Dropout( + rate=self.dropout, + dtype=self.dtype_policy, + name="self_attention_dropout", + ) + + self._cross_attention_layer = WhisperCachedMultiHeadAttention( + num_heads=self.num_heads, + key_dim=head_dim, + value_dim=head_dim, + dropout=self.dropout, + kernel_initializer=clone_initializer(self.kernel_initializer), + bias_initializer=clone_initializer(self.bias_initializer), + dtype=self.dtype_policy, + name="cross_attention", + ) + self._cross_attention_layer.build( + query_shape=decoder_sequence_shape, + value_shape=encoder_sequence_shape, + ) + self._cross_attention_layer_norm = keras.layers.LayerNormalization( + epsilon=self.layer_norm_epsilon, + dtype=self.dtype_policy, + name="cross_attention_layer_norm", + ) + self._cross_attention_layer_norm.build(decoder_sequence_shape) + self._cross_attention_dropout = keras.layers.Dropout( + rate=self.dropout, + dtype=self.dtype_policy, + name="cross_attention_dropout", + ) + + # Feedforward layers. + self._feedforward_intermediate_dense = keras.layers.Dense( + self.intermediate_dim, + activation=self.activation, + kernel_initializer=clone_initializer(self.kernel_initializer), + bias_initializer=clone_initializer(self.bias_initializer), + dtype=self.dtype_policy, + name="feedforward_intermediate_dense", + ) + self._feedforward_intermediate_dense.build(decoder_sequence_shape) + self._feedforward_output_dense = keras.layers.Dense( + hidden_dim, + kernel_initializer=clone_initializer(self.kernel_initializer), + bias_initializer=clone_initializer(self.bias_initializer), + dtype=self.dtype_policy, + name="feedforward_output_dense", + ) + intermediate_shape = list(decoder_sequence_shape) + intermediate_shape[-1] = self.intermediate_dim + self._feedforward_output_dense.build(tuple(intermediate_shape)) + self._feedforward_layer_norm = keras.layers.LayerNormalization( + epsilon=self.layer_norm_epsilon, + dtype=self.dtype_policy, + name="feedforward_layer_norm", + ) + self._feedforward_layer_norm.build(decoder_sequence_shape) + self._feedforward_dropout = keras.layers.Dropout( + rate=self.dropout, + dtype=self.dtype_policy, + name="feedforward_dropout", + ) + # Create layers based on input shape. + self.built = True diff --git a/keras_nlp/models/whisper/whisper_encoder.py b/keras_nlp/models/whisper/whisper_encoder.py index 31267cbf78..9d5b41d0d2 100644 --- a/keras_nlp/models/whisper/whisper_encoder.py +++ b/keras_nlp/models/whisper/whisper_encoder.py @@ -11,23 +11,95 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +"""Whisper encoder block.""" + from keras_nlp.backend import keras from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder +from keras_nlp.models.whisper.whisper_cached_multi_head_attention import ( + WhisperCachedMultiHeadAttention, +) +from keras_nlp.utils.keras_utils import clone_initializer @keras.saving.register_keras_serializable(package="keras_nlp") class WhisperEncoder(TransformerEncoder): - """A Whisper encoder. + """Whisper encoder. Inherits from `keras_nlp.layers.TransformerEncoder`, and overrides the - `build` method so as to remove the bias term from the key projection layer. + `_build` method to use the + `keras_nlp.models.whisper.whisper_multi_head_attention.WhisperCachedMultiHeadAttention` + layer instead of `keras.layers.MultiHeadAttention`. """ def build(self, inputs_shape): - super().build(inputs_shape) + # Infer the dimension of our hidden feature size from the build shape. + hidden_dim = inputs_shape[-1] + # Attention head size is `hidden_dim` over the number of heads. + key_dim = int(hidden_dim // self.num_heads) + if key_dim == 0: + raise ValueError( + "Attention `key_dim` computed cannot be zero. " + f"The `hidden_dim` value of {hidden_dim} has to be equal to " + f"or greater than `num_heads` value of {self.num_heads}." + ) + + # Self attention layers. + self._self_attention_layer = WhisperCachedMultiHeadAttention( + num_heads=self.num_heads, + key_dim=key_dim, + dropout=self.dropout, + kernel_initializer=clone_initializer(self.kernel_initializer), + bias_initializer=clone_initializer(self.bias_initializer), + dtype=self.dtype_policy, + name="self_attention_layer", + ) + self._self_attention_layer.build( + query_shape=inputs_shape, + value_shape=inputs_shape, + ) + + self._self_attention_layer_norm = keras.layers.LayerNormalization( + epsilon=self.layer_norm_epsilon, + dtype=self.dtype_policy, + name="self_attention_layer_norm", + ) + self._self_attention_layer_norm.build(inputs_shape) + self._self_attention_dropout = keras.layers.Dropout( + rate=self.dropout, + dtype=self.dtype_policy, + name="self_attention_dropout", + ) - # Since there is no exposed option for this in MHA, we will reach into - # the internals of the layer for now. - self._self_attention_layer._key_dense.bias_axes = None - self._self_attention_layer._key_dense.bias = None + # Feedforward layers. + self._feedforward_layer_norm = keras.layers.LayerNormalization( + epsilon=self.layer_norm_epsilon, + dtype=self.dtype_policy, + name="feedforward_layer_norm", + ) + self._feedforward_layer_norm.build(inputs_shape) + self._feedforward_intermediate_dense = keras.layers.Dense( + self.intermediate_dim, + activation=self.activation, + kernel_initializer=clone_initializer(self.kernel_initializer), + bias_initializer=clone_initializer(self.bias_initializer), + dtype=self.dtype_policy, + name="feedforward_intermediate_dense", + ) + self._feedforward_intermediate_dense.build(inputs_shape) + self._feedforward_output_dense = keras.layers.Dense( + hidden_dim, + kernel_initializer=clone_initializer(self.kernel_initializer), + bias_initializer=clone_initializer(self.bias_initializer), + dtype=self.dtype_policy, + name="feedforward_output_dense", + ) + intermediate_shape = list(inputs_shape) + intermediate_shape[-1] = self.intermediate_dim + self._feedforward_output_dense.build(tuple(intermediate_shape)) + self._feedforward_dropout = keras.layers.Dropout( + rate=self.dropout, + dtype=self.dtype_policy, + name="feedforward_dropout", + ) + self.built = True From e8f63412a25b0597cc368f9c147a257718d42ca1 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 6 Nov 2023 15:40:55 -0800 Subject: [PATCH 23/87] Test against Keras 3 (#1273) --- .github/workflows/actions.yml | 7 ++++--- keras_nlp/models/task.py | 3 +++ keras_nlp/tests/test_case.py | 4 +++- requirements-common.txt | 3 --- requirements-jax-cuda.txt | 4 ++-- requirements-tensorflow-cuda.txt | 5 +++-- requirements-torch-cuda.txt | 4 ++-- requirements.txt | 4 ++-- 8 files changed, 19 insertions(+), 15 deletions(-) diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 7d4a7d4ed0..4d6a5e7dfb 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -7,7 +7,7 @@ on: types: [created] jobs: build: - name: Test the code with tf.keras + name: Test the code with Keras 2 runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 @@ -29,7 +29,8 @@ jobs: ${{ runner.os }}-pip- - name: Install dependencies run: | - pip install -r requirements.txt --progress-bar off + pip install -r requirements-common.txt --progress-bar off + pip install tensorflow-text==2.14 tensorflow==2.14 keras-core pip install --no-deps -e "." --progress-bar off - name: Test with pytest run: | @@ -38,7 +39,7 @@ jobs: run: | python pip_build.py --install && cd integration_tests && pytest . multibackend: - name: Test the code with Keras Core + name: Test the code with Keras 3 strategy: fail-fast: false matrix: diff --git a/keras_nlp/models/task.py b/keras_nlp/models/task.py index d4c6180405..2c1d0f40f1 100644 --- a/keras_nlp/models/task.py +++ b/keras_nlp/models/task.py @@ -79,6 +79,9 @@ def _check_for_loss_mismatch(self, loss): ) def compile(self, optimizer="rmsprop", loss=None, **kwargs): + # Temporarily disable jit compilation on torch. + if config.backend() == "torch": + kwargs["jit_compile"] = False self._check_for_loss_mismatch(loss) super().compile(optimizer=optimizer, loss=loss, **kwargs) diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py index 6fe72ed497..fefa7a3a0f 100644 --- a/keras_nlp/tests/test_case.py +++ b/keras_nlp/tests/test_case.py @@ -143,7 +143,9 @@ def call(self, x): return self.layer(x) model = TestModel(layer) - model.compile(optimizer="sgd", loss="mse", jit_compile=True) + # Temporarily disable jit compilation on torch backend. + jit_compile = config.backend() != "torch" + model.compile(optimizer="sgd", loss="mse", jit_compile=jit_compile) model.fit(input_data, output_data, verbose=0) if config.multi_backend(): diff --git a/requirements-common.txt b/requirements-common.txt index 44661e315a..5c9710de4b 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -1,5 +1,4 @@ # Library deps. -keras-core>=0.1.6 dm-tree regex rich @@ -17,5 +16,3 @@ namex rouge-score sentencepiece tensorflow-datasets -# Breakage fix. -ml-dtypes==0.2.0 diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt index bb115b14f8..f424af9cb7 100644 --- a/requirements-jax-cuda.txt +++ b/requirements-jax-cuda.txt @@ -1,6 +1,6 @@ # Tensorflow cpu-only version. -tensorflow>=2.14.0 -tensorflow-text>=2.14.0 +tf-nightly-cpu==2.16.0.dev20231103 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index 4b2cf167ea..98c2746474 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -1,6 +1,7 @@ # Tensorflow with cuda support. -tensorflow[and-cuda]>=2.14.0 -tensorflow-text>=2.14.0 +--extra-index-url https://pypi.nvidia.com +tf-nightly[and-cuda]==2.16.0.dev20231103 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt index 14e94dd862..eb147a3d38 100644 --- a/requirements-torch-cuda.txt +++ b/requirements-torch-cuda.txt @@ -1,6 +1,6 @@ # Tensorflow cpu-only version. -tensorflow>=2.14.0 -tensorflow-text>=2.14.0 +tf-nightly-cpu==2.16.0.dev20231103 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. # Torch with cuda support. --extra-index-url https://download.pytorch.org/whl/cu118 diff --git a/requirements.txt b/requirements.txt index aa289402fd..a17dc717a9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # Tensorflow. -tensorflow>=2.14.0 -tensorflow-text>=2.14.0 +tf-nightly-cpu==2.16.0.dev20231103 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. # Torch. --extra-index-url https://download.pytorch.org/whl/cpu From e7bbf2b6c565f23a5be28557b60427d02223d091 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 6 Nov 2023 15:41:04 -0800 Subject: [PATCH 24/87] Improve Keras 3 detection (#1295) This covers the case where tensorflow has been configured with TF_USE_LEGACY_KERAS=True. In this case, we will also load the legacy version of Keras (we just follow the `tf.keras` version, whatever it is). --- keras_nlp/backend/config.py | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/keras_nlp/backend/config.py b/keras_nlp/backend/config.py index 7436012d1c..11b0f24269 100644 --- a/keras_nlp/backend/config.py +++ b/keras_nlp/backend/config.py @@ -15,11 +15,8 @@ import json import os -import keras -from packaging import version - _MULTI_BACKEND = False -_IS_KERAS_3 = False +_USE_KERAS_3 = False # Set Keras base dir path given KERAS_HOME env variable, if applicable. # Otherwise either ~/.keras or /tmp. @@ -65,15 +62,27 @@ if "KERAS_BACKEND" in os.environ and os.environ["KERAS_BACKEND"]: _MULTI_BACKEND = True -# If keras is version 3, use multi-backend keras (our only option). -_IS_KERAS_3 = version.parse(keras.__version__) >= version.parse("3.0.0.dev0") -if _IS_KERAS_3: + +def detect_if_tensorflow_uses_keras_3(): + # We follow the version of keras that tensorflow is configured to use. + from tensorflow import keras + + # Note that only recent versions of keras have a `version()` function. + if hasattr(keras, "version") and keras.version().startswith("3."): + return True + + # No `keras.version()` means we are on an old version of keras. + return False + + +_USE_KERAS_3 = detect_if_tensorflow_uses_keras_3() +if _USE_KERAS_3: _MULTI_BACKEND = True def keras_3(): - """Check if Keras 3 is installed.""" - return _IS_KERAS_3 + """Check if Keras 3 is being used.""" + return _USE_KERAS_3 def multi_backend(): @@ -89,4 +98,7 @@ def backend(): import keras_core return keras_core.config.backend() + + from tensorflow import keras + return keras.config.backend() From 8bc1bc29f7f5a3622f11e535cef5089911602d4b Mon Sep 17 00:00:00 2001 From: Pedro Kaj Kjellerup Nacht Date: Thu, 9 Nov 2023 16:53:34 -0300 Subject: [PATCH 25/87] Run workflows with read-only tokens (#1305) Signed-off-by: Pedro Kaj Kjellerup Nacht --- .github/workflows/actions.yml | 4 ++++ .github/workflows/publish-to-pypi.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 4d6a5e7dfb..93a7a364ef 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -5,6 +5,10 @@ on: pull_request: release: types: [created] + +permissions: + contents: read + jobs: build: name: Test the code with Keras 2 diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index a1774c9057..0e0d101320 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -1,6 +1,10 @@ name: Publish to PyPI on: push + +permissions: + contents: read + jobs: build-and-publish: name: Build and publish to PyPI From c0e6f75144df014a5a183adc331b68a6b39e1e91 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 9 Nov 2023 17:22:41 -0800 Subject: [PATCH 26/87] Update CONTRIBUTING.md (#1310) --- CONTRIBUTING.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 688c6311e2..394a1fb148 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -142,8 +142,8 @@ We strongly recommend a Linux development environment for an easy contribution experience. To build a dev environement from scratch on MacOS, see the following guides: -https://developer.apple.com/metal/tensorflow-plugin/ -https://github.com/tensorflow/text +- https://developer.apple.com/metal/tensorflow-plugin/ +- https://github.com/tensorflow/text ### Windows From 239f6fd1837a6928e1f3eb649aecbfa620123723 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Thu, 9 Nov 2023 20:05:05 -0600 Subject: [PATCH 27/87] Add GitHub Action for Nightly (#1309) * Add GitHub Action for Nightly * Add GitHub Action for Nightly * Add GitHub Action for Nightly * Move tf-nightly pin to 11/4 * Move tf-nightly pin to 11/7 --- .github/workflows/actions.yml | 1 + .github/workflows/nightly.yml | 49 ++++++++++++++++++++++++++++++++ keras_nlp/__init__.py | 2 +- pip_build.py | 41 +++++++++++++++++++++----- requirements-jax-cuda.txt | 4 +-- requirements-tensorflow-cuda.txt | 4 +-- requirements-torch-cuda.txt | 4 +-- requirements.txt | 4 +-- 8 files changed, 93 insertions(+), 16 deletions(-) create mode 100644 .github/workflows/nightly.yml diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index 93a7a364ef..ccee52c2d9 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -3,6 +3,7 @@ name: Tests on: push: pull_request: + workflow_call: release: types: [created] diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 0000000000..677a641658 --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,49 @@ +name: Nightly + +on: + workflow_dispatch: # To Generate wheels on demand outside of schedule. + schedule: + - cron: '0 3 * * *' # run at 3 AM UTC / 8 PM PDT + +permissions: + contents: read + +jobs: + run-test-for-nightly: + uses: ./.github/workflows/actions.yml + nightly: + name: Build Wheel file and upload + needs: [run-test-for-nightly] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Get pip cache dir + id: pip-cache + run: | + python -m pip install --upgrade pip setuptools + echo "::set-output name=dir::$(pip cache dir)" + - name: pip cache + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} + restore-keys: | + ${{ runner.os }}-pip- + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools + pip install twine + pip install -r requirements.txt --progress-bar off + - name: Build wheel file + run: | + python pip_build.py --nightly + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@release/v1 + with: + password: ${{ secrets.PYPI_NIGHTLY_API_TOKEN }} + packages-dir: dist/ + verbose: true diff --git a/keras_nlp/__init__.py b/keras_nlp/__init__.py index 759347103c..0e81a7f731 100644 --- a/keras_nlp/__init__.py +++ b/keras_nlp/__init__.py @@ -28,4 +28,4 @@ from keras_nlp import utils # This is the global source of truth for the version number. -__version__ = "0.7.0.dev0" +__version__ = "0.7.0" diff --git a/pip_build.py b/pip_build.py index 7774815e1f..6bf576df66 100644 --- a/pip_build.py +++ b/pip_build.py @@ -28,6 +28,7 @@ ``` """ import argparse +import datetime import glob import os import pathlib @@ -45,14 +46,37 @@ ] -def build(): +def export_version_string(version, is_nightly=False): + """Export Version and Package Name.""" + if is_nightly: + date = datetime.datetime.now() + version += f".dev{date.strftime('%Y%m%d%H')}" + # Replaces `name="keras-nlp"` in `setup.py` with `keras-nlp-nightly` + with open("setup.py") as f: + setup_contents = f.read() + with open("setup.py", "w") as f: + setup_contents = setup_contents.replace( + 'name="keras-nlp"', 'name="keras-nlp-nightly"' + ) + setup_contents = setup_contents.replace( + '"tensorflow-text', '"tf-nightly", "tensorflow-text-nightly' + ) + f.write(setup_contents) + + # Make sure to export the __version__ string + with open(os.path.join(package, "__init__.py")) as f: + init_contents = f.read() + with open(os.path.join(package, "__init__.py"), "w") as f: + f.write(init_contents + "\n\n" + f'__version__ = "{version}"\n') + + +def build(root_path, is_nightly=False): if os.path.exists(build_directory): raise ValueError(f"Directory already exists: {build_directory}") whl_path = None try: # Copy sources (`keras_nlp/` directory and setup files) to build directory - root_path = pathlib.Path(__file__).parent.resolve() os.chdir(root_path) os.mkdir(build_directory) shutil.copytree(package, os.path.join(build_directory, package)) @@ -69,10 +93,7 @@ def build(): # Make sure to export the __version__ string from keras_nlp.src import __version__ # noqa: E402 - with open(os.path.join(package, "__init__.py")) as f: - init_contents = f.read() - with open(os.path.join(package, "__init__.py"), "w") as f: - f.write(init_contents + "\n\n" + f'__version__ = "{__version__}"\n') + export_version_string(__version__, is_nightly) # Build the package os.system("python3 -m build") @@ -109,7 +130,13 @@ def install_whl(whl_fpath): action="store_true", help="Whether to install the generated wheel file.", ) + parser.add_argument( + "--nightly", + action="store_true", + help="Whether to generate nightly wheel file.", + ) args = parser.parse_args() - whl_path = build() + root_path = pathlib.Path(__file__).parent.resolve() + whl_path = build(root_path, args.nightly) if whl_path and args.install: install_whl(whl_path) diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt index f424af9cb7..d92d3aaa7a 100644 --- a/requirements-jax-cuda.txt +++ b/requirements-jax-cuda.txt @@ -1,6 +1,6 @@ # Tensorflow cpu-only version. -tf-nightly-cpu==2.16.0.dev20231103 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231107 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index 98c2746474..86000b2617 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -1,7 +1,7 @@ # Tensorflow with cuda support. --extra-index-url https://pypi.nvidia.com -tf-nightly[and-cuda]==2.16.0.dev20231103 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. +tf-nightly[and-cuda]==2.16.0.dev20231107 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt index eb147a3d38..04a7ed8c11 100644 --- a/requirements-torch-cuda.txt +++ b/requirements-torch-cuda.txt @@ -1,6 +1,6 @@ # Tensorflow cpu-only version. -tf-nightly-cpu==2.16.0.dev20231103 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231107 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. # Torch with cuda support. --extra-index-url https://download.pytorch.org/whl/cu118 diff --git a/requirements.txt b/requirements.txt index a17dc717a9..de0bd1cb71 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # Tensorflow. -tf-nightly-cpu==2.16.0.dev20231103 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231103 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231107 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. # Torch. --extra-index-url https://download.pytorch.org/whl/cpu From 11bece833200548c181810980ff50546da4d748a Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Fri, 10 Nov 2023 11:39:44 -0800 Subject: [PATCH 28/87] Fix the publish to pypi action (#1311) It has been failing since #1309, see https://github.com/keras-team/keras-nlp/actions/runs/6819858177/job/18547841938 I believe what is happening is that newly pinned nightly only had linux wheels for 3.9. And our publish to pypi flow did not specify the python version. https://pypi.org/project/tensorflow-text-nightly/2.16.0.dev20231107/#files Doing a few things here: - Updating the pinned package to a version where tensorflow-text-nightly has support for python 3.9, 3.10, and 3.11 on linux. - Fixing out publish to pypi workflow to set the python version like other workflow versions. - Some other cleanups for consistency with our github actions. --- .github/workflows/actions.yml | 12 ++++---- .github/workflows/publish-to-pypi.yml | 40 +++++++++++++++++++-------- requirements-jax-cuda.txt | 4 +-- requirements-tensorflow-cuda.txt | 4 +-- requirements-torch-cuda.txt | 4 +-- requirements.txt | 4 +-- 6 files changed, 42 insertions(+), 26 deletions(-) diff --git a/.github/workflows/actions.yml b/.github/workflows/actions.yml index ccee52c2d9..64a41ca16e 100644 --- a/.github/workflows/actions.yml +++ b/.github/workflows/actions.yml @@ -11,11 +11,11 @@ permissions: contents: read jobs: - build: + keras_2: name: Test the code with Keras 2 runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.9 uses: actions/setup-python@v1 with: @@ -43,7 +43,7 @@ jobs: - name: Run integration tests run: | python pip_build.py --install && cd integration_tests && pytest . - multibackend: + keras_3: name: Test the code with Keras 3 strategy: fail-fast: false @@ -51,7 +51,7 @@ jobs: backend: [tensorflow, jax, torch] runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.9 uses: actions/setup-python@v1 with: @@ -77,11 +77,11 @@ jobs: KERAS_BACKEND: ${{ matrix.backend }} run: | pytest keras_nlp/ - format: + check_format: name: Check the code format runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.9 uses: actions/setup-python@v1 with: diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml index 0e0d101320..c3f6767350 100644 --- a/.github/workflows/publish-to-pypi.yml +++ b/.github/workflows/publish-to-pypi.yml @@ -10,15 +10,31 @@ jobs: name: Build and publish to PyPI runs-on: ubuntu-latest steps: - - uses: actions/checkout@master - - name: Install dependencies - run: | - pip install -r requirements.txt --progress-bar off - - name: Build a binary wheel and a source tarball - run: >- - python pip_build.py - - name: Publish distribution to PyPI - if: startsWith(github.ref, 'refs/tags') - uses: pypa/gh-action-pypi-publish@master - with: - password: ${{ secrets.PYPI_API_TOKEN }} + - uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.9 + - name: Get pip cache dir + id: pip-cache + run: | + python -m pip install --upgrade pip setuptools + echo "::set-output name=dir::$(pip cache dir)" + - name: pip cache + uses: actions/cache@v2 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-pip-${{ hashFiles('setup.py') }} + restore-keys: | + ${{ runner.os }}-pip- + - name: Install dependencies + run: | + pip install -r requirements.txt --progress-bar off + - name: Build a binary wheel and a source tarball + run: >- + python pip_build.py + - name: Publish distribution to PyPI + if: startsWith(github.ref, 'refs/tags') + uses: pypa/gh-action-pypi-publish@master + with: + password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt index d92d3aaa7a..4be6e416e6 100644 --- a/requirements-jax-cuda.txt +++ b/requirements-jax-cuda.txt @@ -1,6 +1,6 @@ # Tensorflow cpu-only version. -tf-nightly-cpu==2.16.0.dev20231107 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231109 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index 86000b2617..f0a77c64f2 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -1,7 +1,7 @@ # Tensorflow with cuda support. --extra-index-url https://pypi.nvidia.com -tf-nightly[and-cuda]==2.16.0.dev20231107 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. +tf-nightly[and-cuda]==2.16.0.dev20231109 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt index 04a7ed8c11..7e956c4516 100644 --- a/requirements-torch-cuda.txt +++ b/requirements-torch-cuda.txt @@ -1,6 +1,6 @@ # Tensorflow cpu-only version. -tf-nightly-cpu==2.16.0.dev20231107 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231109 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. # Torch with cuda support. --extra-index-url https://download.pytorch.org/whl/cu118 diff --git a/requirements.txt b/requirements.txt index de0bd1cb71..fe4340aa9b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # Tensorflow. -tf-nightly-cpu==2.16.0.dev20231107 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231107 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231109 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. # Torch. --extra-index-url https://download.pytorch.org/whl/cpu From fe9b86891359dedac74b9c0a54b7331f0deafd9d Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 13 Nov 2023 16:24:39 -0800 Subject: [PATCH 29/87] Fix nightly failure (#1316) Apparently, `ops.array([1, 0, 1], dtype="bool")` fails on the tf backend only as of a recent nightly, which is probably a bug. Still, probably better to just pass list input to our preprocessing layer tests, which will be more consistent with our testing overall. --- keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py | 6 ++---- .../models/bart/bart_seq_2_seq_lm_preprocessor_test.py | 5 ++--- keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py | 6 ++---- keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py | 5 ++--- keras_nlp/models/opt/opt_causal_lm_preprocessor.py | 6 ++---- keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py | 5 ++--- keras_nlp/tokenizers/byte_pair_tokenizer.py | 2 +- 7 files changed, 13 insertions(+), 22 deletions(-) diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py index e238b668e9..8c15de8574 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py @@ -266,10 +266,8 @@ def generate_postprocess( x["decoder_token_ids"], x["decoder_padding_mask"], ) - if not isinstance(decoder_token_ids, tf.Tensor): - decoder_token_ids = ops.convert_to_numpy(decoder_token_ids) - if not isinstance(decoder_padding_mask, tf.Tensor): - decoder_padding_mask = ops.convert_to_numpy(decoder_padding_mask) + decoder_token_ids = ops.convert_to_numpy(decoder_token_ids) + decoder_padding_mask = ops.convert_to_numpy(decoder_padding_mask) # Strip any special tokens during detokenization, i.e., the start and # end markers. In the future, we could make this configurable. decoder_padding_mask = ( diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py index 37493bb91d..f67dab70a0 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py @@ -14,7 +14,6 @@ import pytest -from keras_nlp.backend import ops from keras_nlp.models.bart.bart_seq_2_seq_lm_preprocessor import ( BartSeq2SeqLMPreprocessor, ) @@ -82,8 +81,8 @@ def test_generate_preprocess(self): def test_generate_postprocess(self): preprocessor = BartSeq2SeqLMPreprocessor(**self.init_kwargs) input_data = { - "decoder_token_ids": ops.array([0, 4, 5, 6, 2], dtype="int32"), - "decoder_padding_mask": ops.array([1, 1, 1, 1, 1], dtype="bool"), + "decoder_token_ids": [0, 4, 5, 6, 2], + "decoder_padding_mask": [1, 1, 1, 1, 1], } output = preprocessor.generate_postprocess(input_data) self.assertAllEqual(output, " airplane at") diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py index b501ad3fe0..41ea591df8 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py @@ -163,10 +163,8 @@ def generate_postprocess( back to a string. """ token_ids, padding_mask = x["token_ids"], x["padding_mask"] - if not isinstance(token_ids, tf.Tensor): - token_ids = ops.convert_to_numpy(token_ids) - if not isinstance(padding_mask, tf.Tensor): - padding_mask = ops.convert_to_numpy(padding_mask) + token_ids = ops.convert_to_numpy(token_ids) + padding_mask = ops.convert_to_numpy(padding_mask) # Strip any special tokens during detokenization (e.g. the start and # end markers). In the future we could make this configurable. padding_mask = padding_mask & (token_ids != self.tokenizer.end_token_id) diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py index b0cdd2e3ee..400273b792 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. import pytest -import tensorflow as tf from keras_nlp.models.gpt2.gpt2_causal_lm_preprocessor import ( GPT2CausalLMPreprocessor, @@ -78,8 +77,8 @@ def test_generate_preprocess(self): def test_generate_postprocess(self): input_data = { - "token_ids": tf.constant([6, 1, 3, 4, 2, 5, 0, 0]), - "padding_mask": tf.cast([1, 1, 1, 1, 1, 1, 0, 0], dtype="bool"), + "token_ids": [6, 1, 3, 4, 2, 5, 0, 0], + "padding_mask": [1, 1, 1, 1, 1, 1, 0, 0], } preprocessor = GPT2CausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_postprocess(input_data) diff --git a/keras_nlp/models/opt/opt_causal_lm_preprocessor.py b/keras_nlp/models/opt/opt_causal_lm_preprocessor.py index 26f01a32d1..9cc8c7f495 100644 --- a/keras_nlp/models/opt/opt_causal_lm_preprocessor.py +++ b/keras_nlp/models/opt/opt_causal_lm_preprocessor.py @@ -164,10 +164,8 @@ def generate_postprocess( back to a string. """ token_ids, padding_mask = x["token_ids"], x["padding_mask"] - if not isinstance(token_ids, tf.Tensor): - token_ids = ops.convert_to_numpy(token_ids) - if not isinstance(padding_mask, tf.Tensor): - padding_mask = ops.convert_to_numpy(padding_mask) + token_ids = ops.convert_to_numpy(token_ids) + padding_mask = ops.convert_to_numpy(padding_mask) # Strip any special tokens during detokenization (e.g. the start and # end markers). In the future we could make this configurable. padding_mask = padding_mask & (token_ids != self.tokenizer.end_token_id) diff --git a/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py b/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py index 2f225612d4..9ba6851d4b 100644 --- a/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py +++ b/keras_nlp/models/opt/opt_causal_lm_preprocessor_test.py @@ -13,7 +13,6 @@ # limitations under the License. import pytest -import tensorflow as tf from keras_nlp.models.opt.opt_causal_lm_preprocessor import ( OPTCausalLMPreprocessor, @@ -77,8 +76,8 @@ def test_generate_preprocess(self): def test_generate_postprocess(self): input_data = { - "token_ids": tf.constant([1, 2, 4, 5, 3, 6, 0, 0]), - "padding_mask": tf.cast([1, 1, 1, 1, 1, 1, 0, 0], dtype="bool"), + "token_ids": [1, 2, 4, 5, 3, 6, 0, 0], + "padding_mask": [1, 1, 1, 1, 1, 1, 0, 0], } preprocessor = OPTCausalLMPreprocessor(**self.init_kwargs) x = preprocessor.generate_postprocess(input_data) diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index f92d9e6a77..133c9565b0 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -561,7 +561,7 @@ def process_unseen_tokens(): def detokenize(self, inputs): inputs, unbatched, _ = convert_to_ragged_batch(inputs) - + inputs = tf.cast(inputs, self.dtype) unicode_text = tf.strings.reduce_join( self.id_to_token_map.lookup(inputs), axis=-1 ) From 9286561f35d4727a373e135217279761edadb486 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 13 Nov 2023 16:24:56 -0800 Subject: [PATCH 30/87] Switch deberta to use the "int" dtype (#1315) This will be int32 on jax and torch, but int64 on tf, which is what we need for proper accelerator support --- keras_nlp/models/deberta_v3/disentangled_self_attention.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/keras_nlp/models/deberta_v3/disentangled_self_attention.py b/keras_nlp/models/deberta_v3/disentangled_self_attention.py index 421ba91d8e..1c9ae569c7 100644 --- a/keras_nlp/models/deberta_v3/disentangled_self_attention.py +++ b/keras_nlp/models/deberta_v3/disentangled_self_attention.py @@ -232,12 +232,13 @@ def _get_log_pos(abs_pos, mid): x1=rel_pos, x2=log_pos * sign, ) - bucket_pos = ops.cast(bucket_pos, dtype="int64") + bucket_pos = ops.cast(bucket_pos, dtype="int") return bucket_pos def _get_rel_pos(self, num_positions): - ids = ops.arange(num_positions, dtype="int64") + ids = ops.arange(num_positions) + ids = ops.cast(ids, dtype="int") query_ids = ops.expand_dims(ids, axis=-1) key_ids = ops.expand_dims(ids, axis=0) key_ids = ops.repeat(key_ids, repeats=num_positions, axis=0) From 0070ac11d851f4a02360a1061c087584e4784811 Mon Sep 17 00:00:00 2001 From: Pedro Kaj Kjellerup Nacht Date: Mon, 20 Nov 2023 17:33:58 -0300 Subject: [PATCH 31/87] Add security policy (#1319) Signed-off-by: Pedro Kaj Kjellerup Nacht --- SECURITY.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..09b1bcfe83 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,18 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives us time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send a [private vulnerability report](https://github.com/keras-team/keras-nlp/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +This project is maintained by volunteers on a reasonable-effort basis. As such, +please give us 90 days to work on a fix before public exposure. From 1d3b520c8e9578de7dfaf2a41c20a786cc08cbf6 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 22 Nov 2023 14:11:08 -0800 Subject: [PATCH 32/87] Fix missing export for reversible embedding (#1327) --- keras_nlp/layers/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/keras_nlp/layers/__init__.py b/keras_nlp/layers/__init__.py index a16dd2b4e3..71c5eb0411 100644 --- a/keras_nlp/layers/__init__.py +++ b/keras_nlp/layers/__init__.py @@ -19,6 +19,7 @@ from keras_nlp.layers.modeling.lora_dense import LoraDense from keras_nlp.layers.modeling.masked_lm_head import MaskedLMHead from keras_nlp.layers.modeling.position_embedding import PositionEmbedding +from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding from keras_nlp.layers.modeling.sine_position_encoding import ( SinePositionEncoding, From 1b7c8351f7ea110c41eb59a961163174d50a0a04 Mon Sep 17 00:00:00 2001 From: Gabriel Rasskin <43894452+grasskin@users.noreply.github.com> Date: Wed, 22 Nov 2023 18:20:11 -0500 Subject: [PATCH 33/87] Add `version` API to keras_nlp (#1324) * Add `keras_nlp.version() API` * Correct version path * Export to keras_nlp * Format * Remove __init__ version check --- keras_nlp/__init__.py | 4 +--- keras_nlp/version.py | 23 +++++++++++++++++++++++ setup.py | 3 ++- 3 files changed, 26 insertions(+), 4 deletions(-) create mode 100644 keras_nlp/version.py diff --git a/keras_nlp/__init__.py b/keras_nlp/__init__.py index 0e81a7f731..e46172e307 100644 --- a/keras_nlp/__init__.py +++ b/keras_nlp/__init__.py @@ -26,6 +26,4 @@ from keras_nlp import samplers from keras_nlp import tokenizers from keras_nlp import utils - -# This is the global source of truth for the version number. -__version__ = "0.7.0" +from keras_nlp.version import __version__ diff --git a/keras_nlp/version.py b/keras_nlp/version.py new file mode 100644 index 0000000000..15fede3a08 --- /dev/null +++ b/keras_nlp/version.py @@ -0,0 +1,23 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from keras_nlp.api_export import keras_nlp_export + +# Unique source of truth for the version number. +__version__ = "0.7.0" + + +@keras_nlp_export("keras_nlp.version") +def version(): + return __version__ diff --git a/setup.py b/setup.py index 13214e70e0..52f5c84fdb 100644 --- a/setup.py +++ b/setup.py @@ -37,6 +37,7 @@ def get_version(rel_path): HERE = pathlib.Path(__file__).parent README = (HERE / "README.md").read_text() +VERSION = get_version("keras_nlp/version.py") setup( name="keras-nlp", @@ -45,7 +46,7 @@ def get_version(rel_path): ), long_description=README, long_description_content_type="text/markdown", - version=get_version("keras_nlp/__init__.py"), + version=VERSION, url="https://github.com/keras-team/keras-nlp", author="Keras team", author_email="keras-nlp@google.com", From 36a62a6e18a813697427a5ad8ea7d11ad7c2dacd Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Mon, 27 Nov 2023 21:13:19 -0600 Subject: [PATCH 34/87] Fix Keras 3 version check (#1328) * Fix Keras 3 version check * Fix Keras 3 version check * Update version check * Raise error if Keras is not compatible with TF --- keras_nlp/backend/config.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/keras_nlp/backend/config.py b/keras_nlp/backend/config.py index 11b0f24269..9b73ae09cd 100644 --- a/keras_nlp/backend/config.py +++ b/keras_nlp/backend/config.py @@ -65,11 +65,19 @@ def detect_if_tensorflow_uses_keras_3(): # We follow the version of keras that tensorflow is configured to use. - from tensorflow import keras - - # Note that only recent versions of keras have a `version()` function. - if hasattr(keras, "version") and keras.version().startswith("3."): - return True + try: + from tensorflow import keras + + # Note that only recent versions of keras have a `version()` function. + if hasattr(keras, "version") and keras.version().startswith("3."): + return True + except: + raise ValueError( + "Unable to import `keras` with `tensorflow`. Please check your " + "Keras and Tensorflow version are compatible; Keras 3 requires " + "TensorFlow 2.15 or later. See keras.io/getting_started for more " + "information on installing Keras." + ) # No `keras.version()` means we are on an old version of keras. return False From b6a6e272a93fec94ee4a54d6e82d200ede092ed1 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Tue, 28 Nov 2023 19:25:38 -0800 Subject: [PATCH 35/87] Simplify running KerasNLP with Keras 3 (#1308) * Simplify running KerasNLP with Keras 3 We should not land this until Keras 3, TensorFlow 2.15, and keras-nlp-nightly are released. * Address comments * Tweaks * Add link * fix link --- README.md | 63 +++++++++------- keras_nlp/backend/__init__.py | 16 ++-- keras_nlp/backend/config.py | 73 +++---------------- keras_nlp/backend/keras.py | 2 - keras_nlp/conftest.py | 10 +-- .../cached_multi_head_attention_test.py | 4 +- keras_nlp/layers/modeling/lora_dense.py | 6 +- keras_nlp/layers/modeling/lora_dense_test.py | 2 +- keras_nlp/models/task.py | 2 +- keras_nlp/tests/test_case.py | 10 +-- keras_nlp/utils/tensor_utils.py | 2 +- .../convert_t5_checkpoints.py | 2 +- 12 files changed, 76 insertions(+), 116 deletions(-) diff --git a/README.md b/README.md index f109464690..4d41a8685e 100644 --- a/README.md +++ b/README.md @@ -4,9 +4,9 @@ [![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/keras-team/keras-nlp/issues) KerasNLP is a natural language processing library that works natively -with TensorFlow, JAX, or PyTorch. Built on [multi-backend Keras](https://keras.io/keras_core/announcement/) -(Keras 3), these models, layers, metrics, and tokenizers can be trained and -serialized in any framework and re-used in another without costly migrations. +with TensorFlow, JAX, or PyTorch. Built on Keras 3, these models, layers, +metrics, and tokenizers can be trained and serialized in any framework and +re-used in another without costly migrations. KerasNLP supports users through their entire development cycle. Our workflows are built from modular components that have state-of-the-art preset weights when @@ -40,19 +40,43 @@ to start learning our API. We welcome [contributions](CONTRIBUTING.md). ## Installation -To install the latest official release: +KerasNLP supports both Keras 2 and Keras 3. We recommend Keras 3 for all new +users, as it enables using KerasNLP models and layers with JAX, TensorFlow and +PyTorch. + +### Keras 2 Installation + +To install the latest KerasNLP release with Keras 2, simply run: + +``` +pip install --upgrade keras-nlp +``` + +### Keras 3 Installation + +There are currently two ways to install Keras 3 with KerasNLP. To install the +stable versions of KerasNLP and Keras 3, you should install Keras 3 **after** +installing KerasNLP. This is a temporary step while TensorFlow is pinned to +Keras 2, and will no longer be necessary after TensorFlow 2.16. ``` -pip install keras-nlp --upgrade +pip install --upgrade keras-nlp +pip install --upgrade keras>=3 ``` -To install the latest unreleased changes to the library, we recommend using -pip to install directly from the master branch on github: +To install the latest nightly changes for both KerasNLP and Keras, you can use +our nightly package. ``` -pip install git+https://github.com/keras-team/keras-nlp.git --upgrade +pip install --upgrade keras-nlp-nightly ``` +> [!IMPORTANT] +> Keras 3 will not function with TensorFlow 2.14 or earlier. + +Read [Getting started with Keras](https://keras.io/getting_started/) for more information +on installing Keras 3 and compatibility with different frameworks. + ## Quickstart Fine-tune BERT on a small sentiment analysis task using the @@ -60,7 +84,7 @@ Fine-tune BERT on a small sentiment analysis task using the ```python import os -os.environ["KERAS_BACKEND"] = "jax" # Or "tensorflow", or "torch". +os.environ["KERAS_BACKEND"] = "tensorflow" # Or "jax" or "torch"! import keras_nlp import tensorflow_datasets as tfds @@ -87,14 +111,9 @@ For more in depth guides and examples, visit https://keras.io/keras_nlp/. ## Configuring your backend -**Keras 3** is an upcoming release of the Keras library which supports -TensorFlow, Jax or Torch as backends. This is supported today in KerasNLP, -but will not be enabled by default until the official release of Keras 3. If you -`pip install keras-nlp` and run a script or notebook without changes, you will -be using TensorFlow and **Keras 2**. - -If you would like to enable a preview of the Keras 3 behavior, you can do -so by setting the `KERAS_BACKEND` environment variable. For example: +If you have Keras 3 installed in your environment (see installation above), +you can use KerasNLP with any of JAX, TensorFlow and PyTorch. To do so, set the +`KERAS_BACKEND` environment variable. For example: ```shell export KERAS_BACKEND=jax @@ -113,16 +132,6 @@ import keras_nlp > Make sure to set the `KERAS_BACKEND` before import any Keras libraries, it > will be used to set up Keras when it is first imported. -Until the Keras 3 release, KerasNLP will use a preview of Keras 3 on PyPI named -[keras-core](https://pypi.org/project/keras-core/). - -> [!IMPORTANT] -> If you set `KERAS_BACKEND` variable, you should `import keras_core as keras` -> instead of `import keras`. This is a temporary step until Keras 3 is out! - -To restore the default **Keras 2** behavior, `unset KERAS_BACKEND` before -importing Keras and KerasNLP. - ## Compatibility We follow [Semantic Versioning](https://semver.org/), and plan to diff --git a/keras_nlp/backend/__init__.py b/keras_nlp/backend/__init__.py index cf1c63c2a9..1ffbde75a5 100644 --- a/keras_nlp/backend/__init__.py +++ b/keras_nlp/backend/__init__.py @@ -14,14 +14,16 @@ """ Keras backend module. -This module adds a temporarily Keras API surface that is fully under KerasNLP -control. This allows us to switch between `keras_core` and `tf.keras`, as well -as add shims to support older version of `tf.keras`. +This module adds a temporary Keras API surface that is fully under KerasNLP +control. The goal is to allow us to write Keras 3-like code everywhere, while +still supporting Keras 2. We do this by using the `keras_core` package with +Keras 2 to backport Keras 3 numerics APIs (`keras.ops` and `keras.random`) into +Keras 2. The sub-modules exposed are as follows: -- `config`: check which backend is being run. -- `keras`: The full `keras` API (via `keras_core` or `tf.keras`). -- `ops`: `keras_core.ops`, always tf backed if using `tf.keras`. -- `random`: `keras_core.random`, always tf backed if using `tf.keras`. +- `config`: check which version of Keras is being run. +- `keras`: The full `keras` API with compat shims for older Keras versions. +- `ops`: `keras.ops` for Keras 3 or `keras_core.ops` for Keras 2. +- `random`: `keras.random` for Keras 3 or `keras_core.ops` for Keras 2. """ from keras_nlp.backend import config diff --git a/keras_nlp/backend/config.py b/keras_nlp/backend/config.py index 9b73ae09cd..be3fe23335 100644 --- a/keras_nlp/backend/config.py +++ b/keras_nlp/backend/config.py @@ -12,56 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -import json import os -_MULTI_BACKEND = False -_USE_KERAS_3 = False - -# Set Keras base dir path given KERAS_HOME env variable, if applicable. -# Otherwise either ~/.keras or /tmp. -if "KERAS_HOME" in os.environ: - _keras_dir = os.environ.get("KERAS_HOME") -else: - _keras_base_dir = os.path.expanduser("~") - if not os.access(_keras_base_dir, os.W_OK): - _keras_base_dir = "/tmp" - _keras_dir = os.path.join(_keras_base_dir, ".keras") - -# Attempt to read KerasNLP config file. -_config_path = os.path.expanduser(os.path.join(_keras_dir, "keras_nlp.json")) -if os.path.exists(_config_path): - try: - with open(_config_path) as f: - _config = json.load(f) - except ValueError: - _config = {} - _MULTI_BACKEND = _config.get("multi_backend", _MULTI_BACKEND) - -# Save config file, if possible. -if not os.path.exists(_keras_dir): - try: - os.makedirs(_keras_dir) - except OSError: - # Except permission denied and potential race conditions - # in multi-threaded environments. - pass - -if not os.path.exists(_config_path): - _config = { - "multi_backend": _MULTI_BACKEND, - } - try: - with open(_config_path, "w") as f: - f.write(json.dumps(_config, indent=4)) - except IOError: - # Except permission denied. - pass - -# If KERAS_BACKEND is set in the environment use multi-backend keras. -if "KERAS_BACKEND" in os.environ and os.environ["KERAS_BACKEND"]: - _MULTI_BACKEND = True - def detect_if_tensorflow_uses_keras_3(): # We follow the version of keras that tensorflow is configured to use. @@ -84,8 +36,16 @@ def detect_if_tensorflow_uses_keras_3(): _USE_KERAS_3 = detect_if_tensorflow_uses_keras_3() -if _USE_KERAS_3: - _MULTI_BACKEND = True + +if not _USE_KERAS_3: + backend = os.environ.get("KERAS_BACKEND") + if backend and backend != "tensorflow": + raise RuntimeError( + "When running Keras 2, the `KERAS_BACKEND` environment variable " + f"must either be unset or `'tensorflow'`. Received: `{backend}`. " + "To set another backend, please install Keras 3. See " + "https://github.com/keras-team/keras-nlp#installation" + ) def keras_3(): @@ -93,20 +53,11 @@ def keras_3(): return _USE_KERAS_3 -def multi_backend(): - """Check if multi-backend Keras is enabled.""" - return _MULTI_BACKEND - - def backend(): """Check the backend framework.""" - if not multi_backend(): - return "tensorflow" if not keras_3(): - import keras_core - - return keras_core.config.backend() + return "tensorflow" - from tensorflow import keras + import keras return keras.config.backend() diff --git a/keras_nlp/backend/keras.py b/keras_nlp/backend/keras.py index 865f62d1fc..c248438f33 100644 --- a/keras_nlp/backend/keras.py +++ b/keras_nlp/backend/keras.py @@ -20,8 +20,6 @@ if config.keras_3(): from keras import * # noqa: F403, F401 -elif config.multi_backend(): - from keras_core import * # noqa: F403, F401 else: from tensorflow.keras import * # noqa: F403, F401 diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py index c918090ae6..3a8ddc16ad 100644 --- a/keras_nlp/conftest.py +++ b/keras_nlp/conftest.py @@ -73,8 +73,8 @@ def pytest_collection_modifyitems(config, items): not backend_config.backend() == "tensorflow", reason="tests only run on tf backend", ) - multi_backend_only = pytest.mark.skipif( - not backend_config.multi_backend(), + keras_3_only = pytest.mark.skipif( + not backend_config.keras_3(), reason="tests only run on with multi-backend keras", ) for item in items: @@ -84,11 +84,11 @@ def pytest_collection_modifyitems(config, items): item.add_marker(skip_extra_large) if "tf_only" in item.keywords: item.add_marker(tf_only) - if "multi_backend_only" in item.keywords: - item.add_marker(multi_backend_only) + if "keras_3_only" in item.keywords: + item.add_marker(keras_3_only) # Disable traceback filtering for quicker debugging of tests failures. tf.debugging.disable_traceback_filtering() -if backend_config.multi_backend(): +if backend_config.keras_3(): keras.config.disable_traceback_filtering() diff --git a/keras_nlp/layers/modeling/cached_multi_head_attention_test.py b/keras_nlp/layers/modeling/cached_multi_head_attention_test.py index 4aa0998454..052ce66ec1 100644 --- a/keras_nlp/layers/modeling/cached_multi_head_attention_test.py +++ b/keras_nlp/layers/modeling/cached_multi_head_attention_test.py @@ -36,9 +36,9 @@ def test_layer_behaviors(self): expected_output_shape=(2, 4, 6), expected_num_trainable_weights=8, expected_num_non_trainable_variables=1, - # tf.keras does not handle mixed precision correctly when not set + # Keras 2 does not handle mixed precision correctly when not set # globally. - run_mixed_precision_check=config.multi_backend(), + run_mixed_precision_check=config.keras_3(), ) def test_cache_call_is_correct(self): diff --git a/keras_nlp/layers/modeling/lora_dense.py b/keras_nlp/layers/modeling/lora_dense.py index 3bc23e79f5..c439d86399 100644 --- a/keras_nlp/layers/modeling/lora_dense.py +++ b/keras_nlp/layers/modeling/lora_dense.py @@ -127,10 +127,10 @@ def __init__( kwargs["dtype"] = inner_dense.dtype_policy super().__init__(**kwargs) - if not config.multi_backend(): + if not config.keras_3(): raise ValueError( - "Lora only works with multi-backend Keras 3. Please set the " - "`KERAS_BACKEND` environment variable to use this API." + "Lora requires with Keras 3, but Keras 2 is installed. Please " + "see https://github.com/keras-team/keras-nlp#installation" ) if isinstance(inner_dense, keras.layers.Dense): diff --git a/keras_nlp/layers/modeling/lora_dense_test.py b/keras_nlp/layers/modeling/lora_dense_test.py index 81b575310a..a15718b03a 100644 --- a/keras_nlp/layers/modeling/lora_dense_test.py +++ b/keras_nlp/layers/modeling/lora_dense_test.py @@ -20,7 +20,7 @@ from keras_nlp.tests.test_case import TestCase -@pytest.mark.multi_backend_only +@pytest.mark.keras_3_only class LoraDenseTest(TestCase): def test_layer_behaviors(self): self.run_layer_test( diff --git a/keras_nlp/models/task.py b/keras_nlp/models/task.py index 2c1d0f40f1..f159fbfef8 100644 --- a/keras_nlp/models/task.py +++ b/keras_nlp/models/task.py @@ -319,7 +319,7 @@ def bold_text(x): print_fn(console.end_capture(), line_break=False) # Avoid `tf.keras.Model.summary()`, so the above output matches. - if config.multi_backend(): + if config.keras_3(): super().summary( line_length=line_length, positions=positions, diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py index fefa7a3a0f..7797ca1cce 100644 --- a/keras_nlp/tests/test_case.py +++ b/keras_nlp/tests/test_case.py @@ -148,7 +148,7 @@ def call(self, x): model.compile(optimizer="sgd", loss="mse", jit_compile=jit_compile) model.fit(input_data, output_data, verbose=0) - if config.multi_backend(): + if config.keras_3(): # Build test. layer = cls(**init_kwargs) if isinstance(input_data, dict): @@ -253,8 +253,8 @@ def run_serialization_test(self, instance): revived_cfg = revived_instance.get_config() revived_cfg_json = json.dumps(revived_cfg, sort_keys=True, indent=4) self.assertEqual(cfg_json, revived_cfg_json) - # Dir tests only work on keras-core. - if config.multi_backend(): + # Dir tests only work with Keras 3. + if config.keras_3(): self.assertEqual(ref_dir, dir(revived_instance)) # serialization roundtrip @@ -266,8 +266,8 @@ def run_serialization_test(self, instance): revived_cfg = revived_instance.get_config() revived_cfg_json = json.dumps(revived_cfg, sort_keys=True, indent=4) self.assertEqual(cfg_json, revived_cfg_json) - # Dir tests only work on keras-core. - if config.multi_backend(): + # Dir tests only work with Keras 3. + if config.keras_3(): new_dir = dir(revived_instance)[:] for lst in [ref_dir, new_dir]: if "__annotations__" in lst: diff --git a/keras_nlp/utils/tensor_utils.py b/keras_nlp/utils/tensor_utils.py index 9f639ed7c1..a88d80a4da 100644 --- a/keras_nlp/utils/tensor_utils.py +++ b/keras_nlp/utils/tensor_utils.py @@ -153,7 +153,7 @@ def is_tensor_type(x): def standardize_dtype(dtype): - if config.multi_backend(): + if config.keras_3(): return keras.backend.standardize_dtype(dtype) if hasattr(dtype, "name"): return dtype.name diff --git a/tools/checkpoint_conversion/convert_t5_checkpoints.py b/tools/checkpoint_conversion/convert_t5_checkpoints.py index 6f16a64e6b..89a365f00f 100644 --- a/tools/checkpoint_conversion/convert_t5_checkpoints.py +++ b/tools/checkpoint_conversion/convert_t5_checkpoints.py @@ -20,7 +20,7 @@ from absl import app from absl import flags from checkpoint_conversion_utils import get_md5_checksum -from keras_core import ops +from keras import ops import keras_nlp From a05f411a27eab437e71a1651c97e9addf26298ef Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 29 Nov 2023 12:14:48 -0800 Subject: [PATCH 36/87] Fix issues with version (#1332) * Fix issues with version * A few more simplifications --- keras_nlp/__init__.py | 3 ++- keras_nlp/{version.py => version_utils.py} | 0 pip_build.py | 14 ++++++++++++-- setup.py | 5 ++++- 4 files changed, 18 insertions(+), 4 deletions(-) rename keras_nlp/{version.py => version_utils.py} (100%) diff --git a/keras_nlp/__init__.py b/keras_nlp/__init__.py index e46172e307..30f8a53b16 100644 --- a/keras_nlp/__init__.py +++ b/keras_nlp/__init__.py @@ -26,4 +26,5 @@ from keras_nlp import samplers from keras_nlp import tokenizers from keras_nlp import utils -from keras_nlp.version import __version__ +from keras_nlp.version_utils import __version__ +from keras_nlp.version_utils import version diff --git a/keras_nlp/version.py b/keras_nlp/version_utils.py similarity index 100% rename from keras_nlp/version.py rename to keras_nlp/version_utils.py diff --git a/pip_build.py b/pip_build.py index 6bf576df66..3655e6fb63 100644 --- a/pip_build.py +++ b/pip_build.py @@ -63,11 +63,21 @@ def export_version_string(version, is_nightly=False): ) f.write(setup_contents) - # Make sure to export the __version__ string + # Overwrite the version string with our package version. + with open(os.path.join(package, "src", "version_utils.py")) as f: + version_contents = f.readlines() + with open(os.path.join(package, "src", "version_utils.py"), "w") as f: + for line in version_contents: + if line.startswith("__version__"): + f.write(f'__version__ = "{version}"\n') + else: + f.write(line) + # Make sure to export the __version__ string. with open(os.path.join(package, "__init__.py")) as f: init_contents = f.read() with open(os.path.join(package, "__init__.py"), "w") as f: - f.write(init_contents + "\n\n" + f'__version__ = "{version}"\n') + f.write(init_contents) + f.write("from keras_nlp.src.version_utils import __version__\n") def build(root_path, is_nightly=False): diff --git a/setup.py b/setup.py index 52f5c84fdb..6acd5416d7 100644 --- a/setup.py +++ b/setup.py @@ -37,7 +37,10 @@ def get_version(rel_path): HERE = pathlib.Path(__file__).parent README = (HERE / "README.md").read_text() -VERSION = get_version("keras_nlp/version.py") +if os.path.exists("keras_nlp/version_utils.py"): + VERSION = get_version("keras_nlp/version_utils.py") +else: + VERSION = get_version("keras_nlp/src/version_utils.py") setup( name="keras-nlp", From ca863cc8612982ddf264eb8b8235eb0bc8c633b1 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 4 Dec 2023 10:25:50 -0800 Subject: [PATCH 37/87] Fix typo in whisper presets files (#1337) --- keras_nlp/models/whisper/whisper_presets.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/keras_nlp/models/whisper/whisper_presets.py b/keras_nlp/models/whisper/whisper_presets.py index e8c0d075a4..b740ed833d 100644 --- a/keras_nlp/models/whisper/whisper_presets.py +++ b/keras_nlp/models/whisper/whisper_presets.py @@ -234,11 +234,11 @@ "special_tokens": ENGLISH_SPECIAL_TOKENS, "language_tokens": None, }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_en/v1/model.h5", + "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_en/v1/model.h5", "weights_hash": "b75a89225e20019d85ff5f1c362f8a49", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_en/v1/vocab.json", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_en/v1/vocab.json", "vocabulary_hash": "22377f841debacb023848b3468ea3281", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_en/v1/merges.txt", + "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_en/v1/merges.txt", "merges_hash": "093ecf3f30371012f2e96fcfb10ea6ab", }, "whisper_medium_en": { @@ -370,11 +370,11 @@ "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, "language_tokens": LANGUAGE_TOKENS, }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_multi/v1/model.h5", + "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_multi/v1/model.h5", "weights_hash": "c90c6a895e522056b77b924b6e907ed8", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_multi/v1/vocab.json", + "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_multi/v1/vocab.json", "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_multi/v1/merges.txt", + "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_multi/v1/merges.txt", "merges_hash": "c7f01d4100f6211417988889bf35ccd8", }, "whisper_medium_multi": { From 24a70ba615ea64e0dff63016b485918d196f5927 Mon Sep 17 00:00:00 2001 From: Pranav Prajapati <94780581+pranavvp16@users.noreply.github.com> Date: Tue, 5 Dec 2023 03:52:39 +0530 Subject: [PATCH 38/87] `ELECTRA` backbone implementation in keras (#1291) * Added ElectraBackbone * Added backbone tests for ELECTRA * Fix config * Add model import to __init__ --- keras_nlp/models/__init__.py | 1 + keras_nlp/models/electra/electra_backbone.py | 216 ++++++++++++++++++ .../models/electra/electra_backbone_test.py | 56 +++++ 3 files changed, 273 insertions(+) create mode 100644 keras_nlp/models/electra/electra_backbone.py create mode 100644 keras_nlp/models/electra/electra_backbone_test.py diff --git a/keras_nlp/models/__init__.py b/keras_nlp/models/__init__.py index eb4e74be3a..858be70ec5 100644 --- a/keras_nlp/models/__init__.py +++ b/keras_nlp/models/__init__.py @@ -63,6 +63,7 @@ from keras_nlp.models.distil_bert.distil_bert_tokenizer import ( DistilBertTokenizer, ) +from keras_nlp.models.electra.electra_backbone import ElectraBackbone from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.models.f_net.f_net_classifier import FNetClassifier from keras_nlp.models.f_net.f_net_masked_lm import FNetMaskedLM diff --git a/keras_nlp/models/electra/electra_backbone.py b/keras_nlp/models/electra/electra_backbone.py new file mode 100644 index 0000000000..9c67fe4753 --- /dev/null +++ b/keras_nlp/models/electra/electra_backbone.py @@ -0,0 +1,216 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras +from keras_nlp.layers.modeling.position_embedding import PositionEmbedding +from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding +from keras_nlp.layers.modeling.transformer_encoder import TransformerEncoder +from keras_nlp.models.backbone import Backbone +from keras_nlp.utils.keras_utils import gelu_approximate + + +def electra_kernel_initializer(stddev=0.02): + return keras.initializers.TruncatedNormal(stddev=stddev) + + +@keras_nlp_export("keras_nlp.models.ElectraBackbone") +class ElectraBackbone(Backbone): + """A Electra encoder network. + + This network implements a bidirectional Transformer-based encoder as + described in ["Electra: Pre-training Text Encoders as Discriminators Rather + Than Generators"](https://arxiv.org/abs/2003.10555). It includes the + embedding lookups and transformer layers, but not the masked language model + or classification task networks. + + The default constructor gives a fully customizable, randomly initialized + Electra encoder with any number of layers, heads, and embedding + dimensions. + + Disclaimer: Pre-trained models are provided on an "as is" basis, without + warranties or conditions of any kind. The underlying model is provided by a + third party and subject to a separate license, available + [here](https://huggingface.co/docs/transformers/model_doc/electra#overview). + + Args: + vocabulary_size: int. The size of the token vocabulary. + num_layers: int. The number of transformer layers. + num_heads: int. The number of attention heads for each transformer. + The hidden size must be divisible by the number of attention heads. + hidden_dim: int. The size of the transformer encoding and pooler layers. + embedding_dim: int. The size of the token embeddings. + intermediate_dim: int. The output dimension of the first Dense layer in + a two-layer feedforward network for each transformer. + dropout: float. Dropout probability for the Transformer encoder. + max_sequence_length: int. The maximum sequence length that this encoder + can consume. If None, `max_sequence_length` uses the value from + sequence length. This determines the variable shape for positional + embeddings. + + Examples: + ```python + input_data = { + "token_ids": np.ones(shape=(1, 12), dtype="int32"), + "segment_ids": np.array([[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0]]), + "padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]), + } + # Randomly initialized Electra encoder + backbone = keras_nlp.models.ElectraBackbone( + vocabulary_size=1000, + num_layers=2, + num_heads=2, + hidden_dim=32, + intermediate_dim=64, + dropout=0.1, + max_sequence_length=512, + ) + # Returns sequence and pooled outputs. + sequence_output, pooled_output = backbone(input_data) + ``` + """ + + def __init__( + self, + vocab_size, + num_layers, + num_heads, + hidden_dim, + embedding_dim, + intermediate_dim, + dropout=0.1, + max_sequence_length=512, + num_segments=2, + **kwargs, + ): + # Index of classification token in the vocabulary + cls_token_index = 0 + # Inputs + token_id_input = keras.Input( + shape=(None,), dtype="int32", name="token_ids" + ) + segment_id_input = keras.Input( + shape=(None,), dtype="int32", name="segment_ids" + ) + padding_mask = keras.Input( + shape=(None,), dtype="int32", name="padding_mask" + ) + + # Embed tokens, positions, and segment ids. + token_embedding_layer = ReversibleEmbedding( + input_dim=vocab_size, + output_dim=embedding_dim, + embeddings_initializer=electra_kernel_initializer(), + name="token_embedding", + ) + token_embedding = token_embedding_layer(token_id_input) + position_embedding = PositionEmbedding( + initializer=electra_kernel_initializer(), + sequence_length=max_sequence_length, + name="position_embedding", + )(token_embedding) + segment_embedding = keras.layers.Embedding( + input_dim=num_segments, + output_dim=embedding_dim, + embeddings_initializer=electra_kernel_initializer(), + name="segment_embedding", + )(segment_id_input) + + # Add all embeddings together. + x = keras.layers.Add()( + (token_embedding, position_embedding, segment_embedding), + ) + # Layer normalization + x = keras.layers.LayerNormalization( + name="embeddings_layer_norm", + axis=-1, + epsilon=1e-12, + dtype="float32", + )(x) + # Dropout + x = keras.layers.Dropout( + dropout, + name="embeddings_dropout", + )(x) + if hidden_dim != embedding_dim: + x = keras.layers.Dense( + hidden_dim, + kernel_initializer=electra_kernel_initializer(), + name="embeddings_projection", + )(x) + + # Apply successive transformer encoder blocks. + for i in range(num_layers): + x = TransformerEncoder( + num_heads=num_heads, + intermediate_dim=intermediate_dim, + activation=gelu_approximate, + dropout=dropout, + layer_norm_epsilon=1e-12, + kernel_initializer=electra_kernel_initializer(), + name=f"transformer_layer_{i}", + )(x, padding_mask=padding_mask) + + sequence_output = x + x = keras.layers.Dense( + hidden_dim, + kernel_initializer=electra_kernel_initializer(), + activation="tanh", + name="pooled_dense", + )(x) + pooled_output = x[:, cls_token_index, :] + + # Instantiate using Functional API Model constructor + super().__init__( + inputs={ + "token_ids": token_id_input, + "segment_ids": segment_id_input, + "padding_mask": padding_mask, + }, + outputs={ + "sequence_output": sequence_output, + "pooled_output": pooled_output, + }, + **kwargs, + ) + + # All references to self below this line + self.vocab_size = vocab_size + self.num_layers = num_layers + self.num_heads = num_heads + self.hidden_dim = hidden_dim + self.embedding_dim = embedding_dim + self.intermediate_dim = intermediate_dim + self.dropout = dropout + self.max_sequence_length = max_sequence_length + self.num_segments = num_segments + self.cls_token_index = cls_token_index + self.token_embedding = token_embedding_layer + + def get_config(self): + config = super().get_config() + config.update( + { + "vocab_size": self.vocab_size, + "num_layers": self.num_layers, + "num_heads": self.num_heads, + "hidden_dim": self.hidden_dim, + "embedding_dim": self.embedding_dim, + "intermediate_dim": self.intermediate_dim, + "dropout": self.dropout, + "max_sequence_length": self.max_sequence_length, + "num_segments": self.num_segments, + } + ) + return config diff --git a/keras_nlp/models/electra/electra_backbone_test.py b/keras_nlp/models/electra/electra_backbone_test.py new file mode 100644 index 0000000000..09e6c53344 --- /dev/null +++ b/keras_nlp/models/electra/electra_backbone_test.py @@ -0,0 +1,56 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from keras_nlp.backend import ops +from keras_nlp.models.electra.electra_backbone import ElectraBackbone +from keras_nlp.tests.test_case import TestCase + + +class ElectraBackboneTest(TestCase): + def setUp(self): + self.init_kwargs = { + "vocab_size": 10, + "num_layers": 2, + "num_heads": 2, + "hidden_dim": 2, + "embedding_dim": 2, + "intermediate_dim": 4, + "max_sequence_length": 5, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "segment_ids": ops.zeros((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), + } + + def test_backbone_basics(self): + self.run_backbone_test( + cls=ElectraBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape={ + "sequence_output": (2, 5, 2), + "pooled_output": (2, 2), + }, + ) + + @pytest.mark.large + def test_saved_model(self): + self.run_model_saving_test( + cls=ElectraBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) From 60922135a4fb49d321d667d1a3edefba81ee91f2 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 6 Dec 2023 12:09:42 -0800 Subject: [PATCH 39/87] Fix t5 tokenizer expected output (#1348) I am not exactly sure how this was ever working We had copied the bert output, t5 has a different tokenizer. --- keras_nlp/models/t5/t5_tokenizer_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/models/t5/t5_tokenizer_test.py b/keras_nlp/models/t5/t5_tokenizer_test.py index 77ad734660..a7558e8b13 100644 --- a/keras_nlp/models/t5/t5_tokenizer_test.py +++ b/keras_nlp/models/t5/t5_tokenizer_test.py @@ -52,7 +52,7 @@ def test_smallest_preset(self): cls=T5Tokenizer, preset=preset, input_data=["The quick brown fox."], - expected_output=[[1996, 4248, 2829, 4419, 1012]], + expected_output=[[37, 1704, 4216, 3, 20400, 5]], ) @pytest.mark.extra_large From 70bdfcf53f063899ac3bdf49a4ac495a8f5c529d Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 7 Dec 2023 09:34:25 -0800 Subject: [PATCH 40/87] Add __init__.py for electra (#1352) --- keras_nlp/models/electra/__init__.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 keras_nlp/models/electra/__init__.py diff --git a/keras_nlp/models/electra/__init__.py b/keras_nlp/models/electra/__init__.py new file mode 100644 index 0000000000..ba0c2545e4 --- /dev/null +++ b/keras_nlp/models/electra/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. From 488bd2b2ce477c37acbd2dbad98f4b2dbfb80930 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Fri, 8 Dec 2023 11:49:39 -0800 Subject: [PATCH 41/87] Remove lora dense for now (#1359) We are consider brining this into core Keras, with a slightly different design. Let's remove this version from KerasNLP so people don't rely on it. --- keras_nlp/layers/__init__.py | 1 - keras_nlp/layers/modeling/lora_dense.py | 234 ------------------- keras_nlp/layers/modeling/lora_dense_test.py | 135 ----------- 3 files changed, 370 deletions(-) delete mode 100644 keras_nlp/layers/modeling/lora_dense.py delete mode 100644 keras_nlp/layers/modeling/lora_dense_test.py diff --git a/keras_nlp/layers/__init__.py b/keras_nlp/layers/__init__.py index 71c5eb0411..595c4eb661 100644 --- a/keras_nlp/layers/__init__.py +++ b/keras_nlp/layers/__init__.py @@ -16,7 +16,6 @@ CachedMultiHeadAttention, ) from keras_nlp.layers.modeling.f_net_encoder import FNetEncoder -from keras_nlp.layers.modeling.lora_dense import LoraDense from keras_nlp.layers.modeling.masked_lm_head import MaskedLMHead from keras_nlp.layers.modeling.position_embedding import PositionEmbedding from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding diff --git a/keras_nlp/layers/modeling/lora_dense.py b/keras_nlp/layers/modeling/lora_dense.py deleted file mode 100644 index c439d86399..0000000000 --- a/keras_nlp/layers/modeling/lora_dense.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import re - -from keras_nlp.api_export import keras_nlp_export -from keras_nlp.backend import config -from keras_nlp.backend import keras -from keras_nlp.backend import ops - - -def validate_einsum_equation(equation): - # For simplicity, we greatly restrict possible einsum equations. The final - # axis of the input must be the first axis of our kernel, and must not - # appear in our output. - left, right, output = re.split(",|->", equation) - valid = ( - left[-1] == right[0] - and left[-1] not in output - and set(left[:-1]).isdisjoint(set(right[1:])) - ) - if not valid: - raise ValueError( - "When passing a `EinsumDense` layer to a `LoraDense` layer, the " - "einsum `equation` must always have the form `*x,x*->*`, where " - "each `*` can be any sequence. Conceptually, the `equation` should " - "always represent a dense matmul on the last axis of the input. " - f"Received invalid equation `'{equation}'`." - ) - - -@keras_nlp_export("keras_nlp.layers.LoraDense") -class LoraDense(keras.layers.Layer): - """A LoRA adapter layer for a dense input layer. - - This layer implements a low-rank decomposition of a dense transformation, as - described in [LoRA: Low-Rank Adaptation Of Large Language Models](https://arxiv.org/pdf/2106.09685.pdf) - This layer can be used to replace a dense layer with a layer whose - parameters are mostly frozen. - - By default, this layer takes in an `inner_dense` layer, freezes its - parameters, and builds a low-rank decomposed update to sum with the original - `inner_dense` output. These update parameters can be merged back into the - `inner_dense` kernel by calling `merge_weights()`. - - Args: - inner_dense: A `keras.layers.Dense` or `keras.layers.EinsumDense`. - The inner dense layer to freeze and wrap with the `LoraDense` - layer. Note that for `EinsumDense` layers, the einsum equation must - represent a dense transformation on the last axis of the input, - though adding new axes to the output (e.g. a multi-head axis) is - allowed. - rank: int. The inner rank of the decomposed dense transformation. The - lower this number, the fewer trainable parameters the layer will - have. - alpha: float. A constant value used for scaling the lora update. The - lora update to the original dense transformation will be scaled by - `alpha / rank`. - lora_a_initializer: The initializer to use for the inner projection - from layer inputs to the inner `rank` intermediate outputs. - freeze_kernel: If true, the kernel of the inner dense layer will have - `trainable` set to `False`. - freeze_bias: If true, the kernel of the inner dense layer will have - `trainable` set to `False`. - **kwargs: other keyword arguments. - - Examples: - - Wrap a `Dense` layer. - ```python - batch_size, feature_size = 4, 16 - rank = 4 - inputs = np.random.uniform(size=(batch_size, feature_size)) - inner_dense = keras.layers.Dense(feature_size) - lora_dense = keras_nlp.layers.LoraDense(inner_dense, rank=4) - # Output with inner dense begins equal. - assert np.allclose(inner_dense(inputs), lora_dense(inputs)) - - # Add some random updates to the lora parameters. - lora_dense.lora_a.assign(np.random.uniform(size=(feature_size, rank))) - lora_dense.lora_b.assign(np.random.uniform(size=(rank, feature_size))) - assert not np.allclose(inner_dense(inputs), lora_dense(inputs)) - - # Merge the lora dense and output - lora_dense.merge_weights() - assert np.allclose(inner_dense(inputs), lora_dense(inputs)) - ``` - - Wrap an `EinsumDense` layer with a multi-head projection. - ```python - batch_size, sequence_length, feature_size = 4, 10, 16 - num_heads = 2 - rank = 4 - inputs = np.random.uniform(size=(batch_size, sequence_length, feature_size)) - inner_dense = keras.layers.EinsumDense( - "abc,cde->abde", - output_shape=(sequence_length, num_heads, feature_size // num_heads), - ) - lora_dense = keras_nlp.layers.LoraDense(inner_dense, rank=4) - # Output shape (4, 10, 2, 8) - lora_dense(inputs) - ``` - """ - - def __init__( - self, - inner_dense, - rank=8, - alpha=8.0, - lora_a_initializer="variance_scaling", - freeze_kernel=True, - freeze_bias=True, - **kwargs, - ): - # Default to the same dtype as our inner layer. - if "dtype" not in kwargs: - kwargs["dtype"] = inner_dense.dtype_policy - super().__init__(**kwargs) - - if not config.keras_3(): - raise ValueError( - "Lora requires with Keras 3, but Keras 2 is installed. Please " - "see https://github.com/keras-team/keras-nlp#installation" - ) - - if isinstance(inner_dense, keras.layers.Dense): - self.inner_dense = inner_dense - elif isinstance(inner_dense, keras.layers.EinsumDense): - self.inner_dense = inner_dense - validate_einsum_equation(inner_dense.equation) - else: - raise ValueError( - "Only `Dense` and `EinsumDense` inner layers are supported. " - f"Received: inner_dense={inner_dense}" - ) - - self.rank = rank - self.alpha = alpha - self.scale = alpha / rank - self.freeze_kernel = freeze_kernel - self.freeze_bias = freeze_bias - self.lora_a_initializer = keras.initializers.get(lora_a_initializer) - - if inner_dense.built: - self.build_from_config(inner_dense.get_build_config()) - - def build(self, inputs_shape): - if not self.inner_dense.built: - self.inner_dense.build(inputs_shape) - - if self.freeze_kernel and self.inner_dense.kernel is not None: - self.inner_dense.kernel.trainable = False - - if self.freeze_bias and self.inner_dense.bias is not None: - self.inner_dense.bias.trainable = False - - input_dim = inputs_shape[-1] - self.lora_a = self.add_weight( - name="lora_a", - shape=(input_dim, self.rank), - initializer=self.lora_a_initializer, - ) - kernel_shape = self.inner_dense.kernel.shape - self.lora_b = self.add_weight( - name="lora_b", - shape=(self.rank,) + kernel_shape[1:], - initializer="zeros", - ) - self.built = True - - def merge_weights(self): - """Merge lora updates into the wrapped dense layer. - - This function should only be called outside of any compiled context - (e.g. not during `fit()`, `predict()` or `evaluate()`). It will merge - the updates from the lora layers into the original dense layer, and - re-initialize the lora variables. - """ - if not self.built: - return - - # Compute matmul of lora_a and lora_b to get a kernel sized update. - update = ops.tensordot(self.lora_a, self.lora_b, axes=([-1], [0])) - update = update * ops.cast(self.scale, update.dtype) - # Add lora updates back into the inner dense kernel. - self.inner_dense.kernel.assign_add(update) - # Re-initialize lora weights. - self.lora_a.assign( - self.lora_a_initializer(self.lora_a.shape, self.lora_a.dtype) - ) - self.lora_b.assign(ops.zeros_like(self.lora_b)) - - def call(self, inputs): - original_output = self.inner_dense(inputs) - # Compute the low-rank intermediate output. - update = ops.matmul(inputs, self.lora_a) - # Use the matching dense computation for a Dense or EinsumDense. - if isinstance(self.inner_dense, keras.layers.Dense): - update = ops.matmul(update, self.lora_b) - else: - update = ops.einsum(self.inner_dense.equation, update, self.lora_b) - # Scale and sum the lora update with the original frozen output. - return original_output + update * ops.cast(self.scale, update.dtype) - - @classmethod - def from_config(cls, config): - config["inner_dense"] = keras.layers.deserialize(config["inner_dense"]) - return super().from_config(config) - - def get_config(self): - config = super().get_config() - config.update( - { - "inner_dense": keras.layers.serialize(self.inner_dense), - "rank": self.rank, - "alpha": self.alpha, - "lora_a_initializer": keras.initializers.serialize( - self.lora_a_initializer - ), - "freeze_kernel": self.freeze_kernel, - "freeze_bias": self.freeze_bias, - } - ) - return config diff --git a/keras_nlp/layers/modeling/lora_dense_test.py b/keras_nlp/layers/modeling/lora_dense_test.py deleted file mode 100644 index a15718b03a..0000000000 --- a/keras_nlp/layers/modeling/lora_dense_test.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2023 The KerasNLP Authors -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import pytest - -from keras_nlp.backend import keras -from keras_nlp.backend import random -from keras_nlp.layers.modeling.lora_dense import LoraDense -from keras_nlp.tests.test_case import TestCase - - -@pytest.mark.keras_3_only -class LoraDenseTest(TestCase): - def test_layer_behaviors(self): - self.run_layer_test( - cls=LoraDense, - init_kwargs={ - "inner_dense": keras.layers.Dense(16), - "rank": 2, - "alpha": 16, - "lora_a_initializer": "HeNormal", - }, - input_data=random.uniform(shape=(2, 4, 8)), - expected_output_shape=(2, 4, 16), - expected_num_trainable_weights=2, - expected_num_non_trainable_weights=2, - expected_num_non_trainable_variables=2, - run_mixed_precision_check=False, - ) - - def test_layer_behaviors_einsum(self): - self.run_layer_test( - cls=LoraDense, - init_kwargs={ - "inner_dense": keras.layers.EinsumDense( - "abc,cde->abde", - output_shape=(None, 2, 16), - ), - "lora_a_initializer": "HeNormal", - }, - input_data=random.uniform(shape=(2, 4, 8)), - expected_output_shape=(2, 4, 2, 16), - expected_num_trainable_weights=2, - expected_num_non_trainable_weights=1, - expected_num_non_trainable_variables=1, - run_mixed_precision_check=False, - ) - - def test_merge_dense(self): - inner_dense = keras.layers.Dense(16) - layer = LoraDense(inner_dense, rank=4) - layer.build((2, 16)) - layer.lora_a.assign(random.uniform(shape=(16, 4))) - layer.lora_b.assign(random.uniform(shape=(4, 16))) - - input_data = random.uniform((2, 16)) - lora_output = layer(input_data) - dense_output = inner_dense(input_data) - self.assertNotAllClose(lora_output, dense_output) - - layer.merge_weights() - merged_lora_output = layer(input_data) - dense_output = inner_dense(input_data) - self.assertAllClose(lora_output, merged_lora_output) - self.assertAllClose(lora_output, dense_output) - - def test_merge_einsum(self): - inner_dense = keras.layers.EinsumDense( - "abc,cde->abde", - output_shape=(None, 2, 16), - ) - layer = LoraDense(inner_dense, rank=4) - layer.build((2, 4, 16)) - layer.lora_a.assign(random.uniform(shape=(16, 4))) - layer.lora_b.assign(random.uniform(shape=(4, 2, 16))) - - input_data = random.uniform((2, 4, 16)) - lora_output = layer(input_data) - dense_output = inner_dense(input_data) - self.assertNotAllClose(lora_output, dense_output) - - layer.merge_weights() - merged_lora_output = layer(input_data) - dense_output = inner_dense(input_data) - self.assertAllClose(lora_output, merged_lora_output) - self.assertAllClose(lora_output, dense_output) - - def test_freezing(self): - inner_dense = keras.layers.Dense(16) - layer = LoraDense(inner_dense, freeze_bias=False) - layer.build((2, 16)) - self.assertFalse(inner_dense.kernel.trainable) - self.assertTrue(inner_dense.bias.trainable) - - inner_dense = keras.layers.Dense(16) - layer = LoraDense(inner_dense) - layer.build((2, 16)) - self.assertFalse(inner_dense.kernel.trainable) - self.assertFalse(inner_dense.bias.trainable) - - def test_errors_if_not_dense(self): - with self.assertRaises(ValueError): - LoraDense(keras.layers.Concatenate()) - - def test_errors_invalid_einsum(self): - with self.assertRaises(ValueError): - # Kernel feature dim in the wrong place. - einsum = keras.layers.EinsumDense("abc,dec->abde", (2, 2, 16)) - LoraDense(einsum, rank=4) - - with self.assertRaises(ValueError): - # Input feature dim in the wrong place. - einsum = keras.layers.EinsumDense("acb,cde->abde", (2, 2, 16)) - LoraDense(einsum, rank=4) - - with self.assertRaises(ValueError): - # Input feature dim not summed over. - einsum = keras.layers.EinsumDense("abc,cde->abcde", (2, 2, 2, 16)) - LoraDense(einsum, rank=4) - - with self.assertRaises(ValueError): - # Double summations. - einsum = keras.layers.EinsumDense("abcd,cde->abe", (2, 2, 16)) - LoraDense(einsum, rank=4) From fd559ed648c6aa7170753c855353b7fb4680b448 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Fri, 8 Dec 2023 15:50:44 -0600 Subject: [PATCH 42/87] Adds Kokoro Build script for Keras-NLP GPU tests (#1355) * Adds Kokoro tests * Add Kokoro Tests * Add Kokoro Tests * Add Kokoro Tests * Add Kokoro Tests * Add Kokoro tests --- .kokoro/README.md | 1 + .kokoro/github/ubuntu/gpu/build.sh | 56 +++++++++++++++++++ .kokoro/github/ubuntu/gpu/jax/continuous.cfg | 20 +++++++ .kokoro/github/ubuntu/gpu/jax/presubmit.cfg | 16 ++++++ .../github/ubuntu/gpu/keras2/continuous.cfg | 20 +++++++ .../github/ubuntu/gpu/keras2/presubmit.cfg | 11 ++++ .../ubuntu/gpu/tensorflow/continuous.cfg | 20 +++++++ .../ubuntu/gpu/tensorflow/presubmit.cfg | 16 ++++++ .../github/ubuntu/gpu/torch/continuous.cfg | 20 +++++++ .kokoro/github/ubuntu/gpu/torch/presubmit.cfg | 16 ++++++ keras_nlp/conftest.py | 30 ++++++++++ 11 files changed, 226 insertions(+) create mode 100644 .kokoro/README.md create mode 100644 .kokoro/github/ubuntu/gpu/build.sh create mode 100644 .kokoro/github/ubuntu/gpu/jax/continuous.cfg create mode 100644 .kokoro/github/ubuntu/gpu/jax/presubmit.cfg create mode 100644 .kokoro/github/ubuntu/gpu/keras2/continuous.cfg create mode 100644 .kokoro/github/ubuntu/gpu/keras2/presubmit.cfg create mode 100644 .kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg create mode 100644 .kokoro/github/ubuntu/gpu/tensorflow/presubmit.cfg create mode 100644 .kokoro/github/ubuntu/gpu/torch/continuous.cfg create mode 100644 .kokoro/github/ubuntu/gpu/torch/presubmit.cfg diff --git a/.kokoro/README.md b/.kokoro/README.md new file mode 100644 index 0000000000..b1fae5ee5a --- /dev/null +++ b/.kokoro/README.md @@ -0,0 +1 @@ +CI to run on PR and merge to Master and for continous build. \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/build.sh b/.kokoro/github/ubuntu/gpu/build.sh new file mode 100644 index 0000000000..2571d0d048 --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/build.sh @@ -0,0 +1,56 @@ +set -e +set -x + +cd "${KOKORO_ROOT}/" + +sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 + +PYTHON_BINARY="/usr/bin/python3.9" + +"${PYTHON_BINARY}" -m venv venv +source venv/bin/activate +# Check the python version +python --version +python3 --version + +export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64:" +# Check cuda +nvidia-smi +nvcc --version + +cd "src/github/keras-nlp" +pip install -U pip setuptools + +if [ "${KERAS2:-0}" == "1" ] +then + echo "Keras2 detected." + pip install -r requirements-common.txt --progress-bar off + pip install tensorflow-text==2.14 tensorflow==2.14 keras-core + +elif [ "$KERAS_BACKEND" == "tensorflow" ] +then + echo "TensorFlow backend detected." + pip install -r requirements-tensorflow-cuda.txt --progress-bar off + +elif [ "$KERAS_BACKEND" == "jax" ] +then + echo "JAX backend detected." + pip install -r requirements-jax-cuda.txt --progress-bar off + +elif [ "$KERAS_BACKEND" == "torch" ] +then + echo "PyTorch backend detected." + pip install -r requirements-torch-cuda.txt --progress-bar off +fi + +pip install --no-deps -e "." --progress-bar off + +# Run Extra Large Tests for Continuous builds +if [ "${RUN_XLARGE:-0}" == "1" ] +then + pytest keras_nlp --check_gpu --run_large --run_extra_large \ + --cov=keras-nlp +else + pytest keras_nlp --check_gpu --run_large \ + --cov=keras-nlp +fi \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/jax/continuous.cfg b/.kokoro/github/ubuntu/gpu/jax/continuous.cfg new file mode 100644 index 0000000000..ac021237d5 --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/jax/continuous.cfg @@ -0,0 +1,20 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +env_vars: { + key: "KERAS_BACKEND" + value: "jax" +} + +env_vars: { + key: "RUN_XLARGE" + value: "1" +} + +# Leave default timeout of 180 mins \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/jax/presubmit.cfg b/.kokoro/github/ubuntu/gpu/jax/presubmit.cfg new file mode 100644 index 0000000000..1b9ffb605a --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/jax/presubmit.cfg @@ -0,0 +1,16 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +env_vars: { + key: "KERAS_BACKEND" + value: "jax" +} + +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg b/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg new file mode 100644 index 0000000000..6c99214ce2 --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg @@ -0,0 +1,20 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +env_vars: { + key: "KERAS2" + value: "1" +} + +env_vars: { + key: "RUN_XLARGE" + value: "1" +} + +# Leave default timeout of 180 mins \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg b/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg new file mode 100644 index 0000000000..e988e3b375 --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg @@ -0,0 +1,11 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg b/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg new file mode 100644 index 0000000000..4e9e5557b8 --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg @@ -0,0 +1,20 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +env_vars: { + key: "KERAS_BACKEND" + value: "tensorflow" +} + +env_vars: { + key: "RUN_XLARGE" + value: "1" +} + +# Leave default timeout of 180 mins \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/tensorflow/presubmit.cfg b/.kokoro/github/ubuntu/gpu/tensorflow/presubmit.cfg new file mode 100644 index 0000000000..b85ee6f4eb --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/tensorflow/presubmit.cfg @@ -0,0 +1,16 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +env_vars: { + key: "KERAS_BACKEND" + value: "tensorflow" +} + +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/torch/continuous.cfg b/.kokoro/github/ubuntu/gpu/torch/continuous.cfg new file mode 100644 index 0000000000..9d1fe25628 --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/torch/continuous.cfg @@ -0,0 +1,20 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +env_vars: { + key: "KERAS_BACKEND" + value: "torch" +} + +env_vars: { + key: "RUN_XLARGE" + value: "1" +} + +# Leave default timeout of 180 mins \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/torch/presubmit.cfg b/.kokoro/github/ubuntu/gpu/torch/presubmit.cfg new file mode 100644 index 0000000000..5d25106b3f --- /dev/null +++ b/.kokoro/github/ubuntu/gpu/torch/presubmit.cfg @@ -0,0 +1,16 @@ +build_file: "keras-nlp/.kokoro/github/ubuntu/gpu/build.sh" + +action { + define_artifacts { + regex: "**/sponge_log.log" + regex: "**/sponge_log.xml" + } +} + +env_vars: { + key: "KERAS_BACKEND" + value: "torch" +} + +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/keras_nlp/conftest.py b/keras_nlp/conftest.py index 3a8ddc16ad..b876a7a0a8 100644 --- a/keras_nlp/conftest.py +++ b/keras_nlp/conftest.py @@ -38,9 +38,35 @@ def pytest_addoption(parser): default="", help="restrict docs testing to modules whose name matches this flag", ) + parser.addoption( + "--check_gpu", + action="store_true", + default=False, + help="fail if a gpu is not present", + ) def pytest_configure(config): + # Verify that device has GPU and detected by backend + if config.getoption("--check_gpu"): + found_gpu = False + backend = backend_config.backend() + if backend == "jax": + import jax + + try: + found_gpu = bool(jax.devices("gpu")) + except RuntimeError: + found_gpu = False + elif backend == "tensorflow": + found_gpu = bool(tf.config.list_logical_devices("GPU")) + elif backend == "torch": + import torch + + found_gpu = bool(torch.cuda.device_count()) + if not found_gpu: + pytest.fail(f"No GPUs discovered on the {backend} backend.") + config.addinivalue_line( "markers", "large: mark test as being slow or requiring a network", @@ -53,6 +79,10 @@ def pytest_configure(config): "markers", "tf_only: mark test as a tf only test", ) + config.addinivalue_line( + "markers", + "keras_3_only: mark test as a keras 3 only test", + ) def pytest_collection_modifyitems(config, items): From 4cf65866b986123fcb5f91a73e623482c150dbbd Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 12 Dec 2023 11:19:05 -0600 Subject: [PATCH 43/87] Fixes GPU Test failures for Keras 3 (#1361) * Update Keras2 to TF 2.15 to align on cuda ver * Set encoding to utf8 for tokenizer --- .kokoro/github/ubuntu/gpu/build.sh | 2 +- .kokoro/github/ubuntu/gpu/keras2/presubmit.cfg | 5 +++++ keras_nlp/models/whisper/whisper_tokenizer.py | 2 +- keras_nlp/tokenizers/byte_pair_tokenizer.py | 2 +- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/.kokoro/github/ubuntu/gpu/build.sh b/.kokoro/github/ubuntu/gpu/build.sh index 2571d0d048..f3b0095977 100644 --- a/.kokoro/github/ubuntu/gpu/build.sh +++ b/.kokoro/github/ubuntu/gpu/build.sh @@ -25,7 +25,7 @@ if [ "${KERAS2:-0}" == "1" ] then echo "Keras2 detected." pip install -r requirements-common.txt --progress-bar off - pip install tensorflow-text==2.14 tensorflow==2.14 keras-core + pip install tensorflow-text==2.15 tensorflow[and-cuda]~=2.15 keras-core elif [ "$KERAS_BACKEND" == "tensorflow" ] then diff --git a/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg b/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg index e988e3b375..7e971ac96d 100644 --- a/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg +++ b/.kokoro/github/ubuntu/gpu/keras2/presubmit.cfg @@ -7,5 +7,10 @@ action { } } +env_vars: { + key: "KERAS2" + value: "1" +} + # Set timeout to 60 mins from default 180 mins timeout_mins: 60 \ No newline at end of file diff --git a/keras_nlp/models/whisper/whisper_tokenizer.py b/keras_nlp/models/whisper/whisper_tokenizer.py index b1406b0a04..cd4da7d15f 100644 --- a/keras_nlp/models/whisper/whisper_tokenizer.py +++ b/keras_nlp/models/whisper/whisper_tokenizer.py @@ -23,7 +23,7 @@ def _load_dict(dict_or_path): if isinstance(dict_or_path, str): - with open(dict_or_path, "r") as f: + with open(dict_or_path, "r", encoding="utf-8") as f: dict_or_path = json.load(f) return dict_or_path diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index 133c9565b0..b799874d2a 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -292,7 +292,7 @@ def __init__( super().__init__(dtype=dtype, **kwargs) if isinstance(vocabulary, str): - with open(vocabulary, "r") as f: + with open(vocabulary, "r", encoding="utf-8") as f: self.vocabulary = json.load(f) elif isinstance(vocabulary, dict): self.vocabulary = vocabulary.copy() From 1144b4570224ddaa325d64f4769698b60ce26ade Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 12 Dec 2023 14:49:25 -0600 Subject: [PATCH 44/87] Change Continuous config to also run only large tests (#1362) * Change Continous config to be same as presubmit * Skip Keras2 T5 backbone preset --- .kokoro/github/ubuntu/gpu/build.sh | 2 +- .kokoro/github/ubuntu/gpu/jax/continuous.cfg | 8 ++------ .kokoro/github/ubuntu/gpu/keras2/continuous.cfg | 8 ++------ .kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg | 8 ++------ .kokoro/github/ubuntu/gpu/torch/continuous.cfg | 8 ++------ keras_nlp/models/t5/t5_backbone_test.py | 9 +++++++++ 6 files changed, 18 insertions(+), 25 deletions(-) diff --git a/.kokoro/github/ubuntu/gpu/build.sh b/.kokoro/github/ubuntu/gpu/build.sh index f3b0095977..2017b77c82 100644 --- a/.kokoro/github/ubuntu/gpu/build.sh +++ b/.kokoro/github/ubuntu/gpu/build.sh @@ -19,7 +19,7 @@ nvidia-smi nvcc --version cd "src/github/keras-nlp" -pip install -U pip setuptools +pip install -U pip setuptools psutil if [ "${KERAS2:-0}" == "1" ] then diff --git a/.kokoro/github/ubuntu/gpu/jax/continuous.cfg b/.kokoro/github/ubuntu/gpu/jax/continuous.cfg index ac021237d5..1b9ffb605a 100644 --- a/.kokoro/github/ubuntu/gpu/jax/continuous.cfg +++ b/.kokoro/github/ubuntu/gpu/jax/continuous.cfg @@ -12,9 +12,5 @@ env_vars: { value: "jax" } -env_vars: { - key: "RUN_XLARGE" - value: "1" -} - -# Leave default timeout of 180 mins \ No newline at end of file +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg b/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg index 6c99214ce2..7e971ac96d 100644 --- a/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg +++ b/.kokoro/github/ubuntu/gpu/keras2/continuous.cfg @@ -12,9 +12,5 @@ env_vars: { value: "1" } -env_vars: { - key: "RUN_XLARGE" - value: "1" -} - -# Leave default timeout of 180 mins \ No newline at end of file +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg b/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg index 4e9e5557b8..b85ee6f4eb 100644 --- a/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg +++ b/.kokoro/github/ubuntu/gpu/tensorflow/continuous.cfg @@ -12,9 +12,5 @@ env_vars: { value: "tensorflow" } -env_vars: { - key: "RUN_XLARGE" - value: "1" -} - -# Leave default timeout of 180 mins \ No newline at end of file +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/.kokoro/github/ubuntu/gpu/torch/continuous.cfg b/.kokoro/github/ubuntu/gpu/torch/continuous.cfg index 9d1fe25628..5d25106b3f 100644 --- a/.kokoro/github/ubuntu/gpu/torch/continuous.cfg +++ b/.kokoro/github/ubuntu/gpu/torch/continuous.cfg @@ -12,9 +12,5 @@ env_vars: { value: "torch" } -env_vars: { - key: "RUN_XLARGE" - value: "1" -} - -# Leave default timeout of 180 mins \ No newline at end of file +# Set timeout to 60 mins from default 180 mins +timeout_mins: 60 \ No newline at end of file diff --git a/keras_nlp/models/t5/t5_backbone_test.py b/keras_nlp/models/t5/t5_backbone_test.py index 9006925f10..bb672afa2c 100644 --- a/keras_nlp/models/t5/t5_backbone_test.py +++ b/keras_nlp/models/t5/t5_backbone_test.py @@ -14,6 +14,7 @@ import pytest +from keras_nlp.backend import config as backend_config from keras_nlp.backend import ops from keras_nlp.models.t5.t5_backbone import T5Backbone from keras_nlp.tests.test_case import TestCase @@ -55,6 +56,10 @@ def test_saved_model(self): ) @pytest.mark.large + @pytest.mark.skipif( + not backend_config.keras_3(), + reason="TODO: Fails in Keras2", + ) def test_smallest_preset(self): self.run_preset_test( cls=T5Backbone, @@ -75,6 +80,10 @@ def test_smallest_preset(self): ) @pytest.mark.extra_large + @pytest.mark.skipif( + not backend_config.keras_3(), + reason="TODO: Fails in Keras2", + ) def test_all_presets(self): for preset in T5Backbone.presets: self.run_preset_test( From f78276f951b42ab80b0b0a3abc8b1555cb1a5ba3 Mon Sep 17 00:00:00 2001 From: Pranav Prajapati <94780581+pranavvp16@users.noreply.github.com> Date: Sat, 16 Dec 2023 12:04:37 +0530 Subject: [PATCH 45/87] Add `ElectraTokenizer` (#1357) --- keras_nlp/models/__init__.py | 1 + keras_nlp/models/electra/electra_tokenizer.py | 79 +++++++++++++++++++ .../models/electra/electra_tokenizer_test.py | 42 ++++++++++ 3 files changed, 122 insertions(+) create mode 100644 keras_nlp/models/electra/electra_tokenizer.py create mode 100644 keras_nlp/models/electra/electra_tokenizer_test.py diff --git a/keras_nlp/models/__init__.py b/keras_nlp/models/__init__.py index 858be70ec5..23500c7460 100644 --- a/keras_nlp/models/__init__.py +++ b/keras_nlp/models/__init__.py @@ -64,6 +64,7 @@ DistilBertTokenizer, ) from keras_nlp.models.electra.electra_backbone import ElectraBackbone +from keras_nlp.models.electra.electra_tokenizer import ElectraTokenizer from keras_nlp.models.f_net.f_net_backbone import FNetBackbone from keras_nlp.models.f_net.f_net_classifier import FNetClassifier from keras_nlp.models.f_net.f_net_masked_lm import FNetMaskedLM diff --git a/keras_nlp/models/electra/electra_tokenizer.py b/keras_nlp/models/electra/electra_tokenizer.py new file mode 100644 index 0000000000..c6ec29c42b --- /dev/null +++ b/keras_nlp/models/electra/electra_tokenizer.py @@ -0,0 +1,79 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from keras_nlp.api_export import keras_nlp_export +from keras_nlp.tokenizers import WordPieceTokenizer + + +@keras_nlp_export("keras_nlp.models.ElectraTokenizer") +class ElectraTokenizer(WordPieceTokenizer): + """A ELECTRA tokenizer using WordPiece subword segmentation. + + This tokenizer class will tokenize raw strings into integer sequences and + is based on `keras_nlp.tokenizers.WordPieceTokenizer`. + + If input is a batch of strings (rank > 0), the layer will output a + `tf.RaggedTensor` where the last dimension of the output is ragged. + + If input is a scalar string (rank == 0), the layer will output a dense + `tf.Tensor` with static shape `[None]`. + + Args: + vocabulary: A list of strings or a string filename path. If + passing a list, each element of the list should be a single word + piece token string. If passing a filename, the file should be a + plain text file containing a single word piece token per line. + lowercase: If `True`, the input text will be first lowered before + tokenization. + + Examples: + ```python + # Custom Vocabulary. + vocab = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] + vocab += ["The", "quick", "brown", "fox", "jumped", "."] + + # Instantiate the tokenizer. + tokenizer = keras_nlp.models.ElectraTokenizer(vocabulary=vocab) + + # Unbatched input. + tokenizer("The quick brown fox jumped.") + + # Batched input. + tokenizer(["The quick brown fox jumped.", "The fox slept."]) + + # Detokenization. + tokenizer.detokenize(tokenizer("The quick brown fox jumped.")) + ``` + """ + + def __init__(self, vocabulary, lowercase=False, **kwargs): + super().__init__(vocabulary=vocabulary, lowercase=lowercase, **kwargs) + + # Check for special tokens + cls_token = "[CLS]" + sep_token = "[SEP]" + pad_token = "[PAD]" + mask_token = "[MASK]" + + for token in [cls_token, pad_token, sep_token, mask_token]: + if token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + self.cls_token_id = self.token_to_id(cls_token) + self.sep_token_id = self.token_to_id(sep_token) + self.pad_token_id = self.token_to_id(pad_token) + self.mask_token_id = self.token_to_id(mask_token) diff --git a/keras_nlp/models/electra/electra_tokenizer_test.py b/keras_nlp/models/electra/electra_tokenizer_test.py new file mode 100644 index 0000000000..2e06fb900c --- /dev/null +++ b/keras_nlp/models/electra/electra_tokenizer_test.py @@ -0,0 +1,42 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from keras_nlp.models.electra.electra_tokenizer import ElectraTokenizer +from keras_nlp.tests.test_case import TestCase + + +class ElectraTokenizerTest(TestCase): + def setUp(self): + self.vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] + self.vocab += ["THE", "QUICK", "BROWN", "FOX"] + self.vocab += ["the", "quick", "brown", "fox"] + self.init_kwargs = {"vocabulary": self.vocab} + self.input_data = ["THE QUICK BROWN FOX", "THE FOX"] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=ElectraTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[5, 6, 7, 8], [5, 8]], + ) + + def test_lowercase(self): + tokenizer = ElectraTokenizer(vocabulary=self.vocab, lowercase=True) + output = tokenizer(self.input_data) + self.assertAllEqual(output, [[9, 10, 11, 12], [9, 12]]) + + def test_errors_missing_special_tokens(self): + with self.assertRaises(ValueError): + ElectraTokenizer(vocabulary=["a", "b", "c"]) From 4ea8c2311f4dff2db640e8b411410e2f671108a4 Mon Sep 17 00:00:00 2001 From: Tirth Patel Date: Tue, 19 Dec 2023 17:37:07 -0700 Subject: [PATCH 46/87] Add MistralAI's 7B Transformer as a backbone in KerasNLP Models (#1314) * Add MistralBackbone * Fix Keras 2 failure * Fix another Keras 2 failure * Make the caching step XLA compatible * Add dtype support for the MistralBackbone * Address review comments * Add docs; Make args keyword-only; Cosmetic fixes * Use keras.backend.floatx() instead of keras.config.floatx() for Keras 2 compatibility * Add review comments --- keras_nlp/layers/modeling/rotary_embedding.py | 4 +- keras_nlp/models/__init__.py | 1 + keras_nlp/models/mistral/__init__.py | 13 + keras_nlp/models/mistral/mistral_attention.py | 293 ++++++++++++ keras_nlp/models/mistral/mistral_backbone.py | 196 ++++++++ .../models/mistral/mistral_backbone_test.py | 56 +++ .../models/mistral/mistral_layer_norm.py | 48 ++ .../mistral/mistral_transformer_decoder.py | 233 +++++++++ .../convert_mistral_checkpoints.py | 443 ++++++++++++++++++ 9 files changed, 1285 insertions(+), 2 deletions(-) create mode 100644 keras_nlp/models/mistral/__init__.py create mode 100644 keras_nlp/models/mistral/mistral_attention.py create mode 100644 keras_nlp/models/mistral/mistral_backbone.py create mode 100644 keras_nlp/models/mistral/mistral_backbone_test.py create mode 100644 keras_nlp/models/mistral/mistral_layer_norm.py create mode 100644 keras_nlp/models/mistral/mistral_transformer_decoder.py create mode 100644 tools/checkpoint_conversion/convert_mistral_checkpoints.py diff --git a/keras_nlp/layers/modeling/rotary_embedding.py b/keras_nlp/layers/modeling/rotary_embedding.py index b3402f7e21..6f4ae449de 100644 --- a/keras_nlp/layers/modeling/rotary_embedding.py +++ b/keras_nlp/layers/modeling/rotary_embedding.py @@ -97,7 +97,7 @@ def _apply_rotary_pos_emb(self, tensor, cos_emb, sin_emb): return (tensor * cos_emb) + (half_rot_tensor * sin_emb) def _compute_cos_sin_embedding(self, x, rotary_dim, start_index): - freq_range = ops.arange(0, rotary_dim, 2, dtype="float32") + freq_range = ops.arange(0, rotary_dim, 2) freq_range = ops.cast(freq_range, self.compute_dtype) freq_range = freq_range / ops.cast( self.scaling_factor, self.compute_dtype @@ -107,7 +107,7 @@ def _compute_cos_sin_embedding(self, x, rotary_dim, start_index): ** (freq_range / ops.cast(rotary_dim, self.compute_dtype)) ) seq_len = ops.shape(x)[self.sequence_axis] - tensor = ops.arange(seq_len, dtype="float32") + start_index + tensor = ops.cast(ops.arange(seq_len), self.compute_dtype) + start_index tensor = ops.cast(tensor, dtype=inverse_freq.dtype) freq = ops.einsum("i, j -> ij", tensor, inverse_freq) embedding = ops.concatenate((freq, freq), axis=self.feature_axis) diff --git a/keras_nlp/models/__init__.py b/keras_nlp/models/__init__.py index 23500c7460..8f8e3a2ab3 100644 --- a/keras_nlp/models/__init__.py +++ b/keras_nlp/models/__init__.py @@ -89,6 +89,7 @@ GPTNeoXPreprocessor, ) from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer +from keras_nlp.models.mistral.mistral_backbone import MistralBackbone from keras_nlp.models.opt.opt_backbone import OPTBackbone from keras_nlp.models.opt.opt_causal_lm import OPTCausalLM from keras_nlp.models.opt.opt_causal_lm_preprocessor import ( diff --git a/keras_nlp/models/mistral/__init__.py b/keras_nlp/models/mistral/__init__.py new file mode 100644 index 0000000000..ba0c2545e4 --- /dev/null +++ b/keras_nlp/models/mistral/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/keras_nlp/models/mistral/mistral_attention.py b/keras_nlp/models/mistral/mistral_attention.py new file mode 100644 index 0000000000..680f1f6d1b --- /dev/null +++ b/keras_nlp/models/mistral/mistral_attention.py @@ -0,0 +1,293 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.backend import keras +from keras_nlp.backend import ops +from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding +from keras_nlp.utils.keras_utils import clone_initializer + + +# This is just a self-attention layer in Mistral. But it can be generalized +# to use the `keras_nlp.layers.CachedMultiHeadAttention` API. Since this layer +# implements grouped-query attention and sliding window attention, it might be +# useful outside of Mistral itself. +# TODO(tirthasheshpatel): Generalize the attention layer +# TODO(tirthasheshpatel): Merge `LlamaAttention` with this layer +# TODO(tirthasheshpatel): Use flash attention +class CachedMistralAttention(keras.layers.Layer): + """A cached grounded query attention layer with sliding window.""" + + def __init__( + self, + num_query_heads, + num_key_value_heads, + rope_max_wavelength=10000, + rope_scaling_factor=1.0, + kernel_initializer="glorot_uniform", + sliding_window=512, + dropout=0, + **kwargs, + ): + super().__init__(**kwargs) + self._num_query_heads = num_query_heads + self._num_key_value_heads = num_key_value_heads + self._sliding_window = sliding_window + self._dropout = dropout + + self._num_key_value_groups = num_query_heads // num_key_value_heads + self._rope_max_wavelength = rope_max_wavelength + + self._kernel_initializer = keras.initializers.get( + clone_initializer(kernel_initializer) + ) + + self._rope_scaling_factor = rope_scaling_factor + + def build(self, inputs_shape): + # Einsum variables: + # b = batch size + # q = query length + # k = key/value length + # m = model dim + # u = num query heads + # v = num key/value heads + # h = head dim + self._hidden_dim = inputs_shape[-1] + self._head_dim = self._hidden_dim // self._num_query_heads + + self._query_dense = keras.layers.EinsumDense( + equation="bqm,muh->bquh", + output_shape=(None, self._num_query_heads, self._head_dim), + kernel_initializer=self._kernel_initializer, + dtype=self.compute_dtype, + name="query", + ) + self._query_dense.build(inputs_shape) + + self._key_dense = keras.layers.EinsumDense( + equation="bkm,mvh->bkvh", + output_shape=( + None, + self._num_key_value_heads, + self._head_dim, + ), + kernel_initializer=self._kernel_initializer, + dtype=self.compute_dtype, + name="key", + ) + self._key_dense.build(inputs_shape) + + self._value_dense = keras.layers.EinsumDense( + equation="bkm,mvh->bkvh", + output_shape=( + None, + self._num_key_value_heads, + self._head_dim, + ), + kernel_initializer=self._kernel_initializer, + dtype=self.compute_dtype, + name="value", + ) + self._value_dense.build(inputs_shape) + + self._softmax = keras.layers.Softmax(axis=-1, name="attention_softmax") + + self._dropout_layer = keras.layers.Dropout( + rate=self._dropout, dtype=self.compute_dtype + ) + + self._output_dense = keras.layers.EinsumDense( + equation="bquh,uhm->bqm", + output_shape=(None, self._hidden_dim), + kernel_initializer=self._kernel_initializer, + dtype=self.compute_dtype, + name="attention_output", + ) + self._output_dense.build( + (None, None, self._num_query_heads, self._head_dim) + ) + + self.rotary_embedding_layer = RotaryEmbedding( + max_wavelength=self._rope_max_wavelength, + scaling_factor=self._rope_scaling_factor, + dtype=self.compute_dtype, + ) + + self._dot_product_equation = "bquh,bkuh->buqk" + self._combine_equation = "buqk,bkuh->bquh" + + self.built = True + + def call( + self, + hidden_states, + attention_mask=None, + cache=None, + cache_update_index=None, + training=None, + ): + seq_len = ops.shape(hidden_states)[1] + start_index = ( + cache_update_index if cache_update_index is not None else 0 + ) + # If `cache_update_index` is a tensor, RotaryEmbedding expects it + # to have dtype `self.compute_dtype`. + start_index = ops.cast( + start_index, self.rotary_embedding_layer.compute_dtype + ) + + query = self._query_dense(hidden_states) + + # Note that the original PyTorch implementation uses + # view_as_complex/view_as_real while we use split/concatenate to + # convert to/from complex numbers. The transformations below make + # the rope computation numerically equivalent to the original + # implementation. + def _mistral_rope(x): + x = ops.concatenate([x[..., ::2], x[..., 1::2]], axis=-1) + x = self.rotary_embedding_layer(x, start_index=start_index) + x = ops.reshape( + ops.stack(ops.split(x, 2, axis=-1), axis=-1), ops.shape(x) + ) + return x + + # Compute RoPE for queries + query = _mistral_rope(query) + + def _compute_key_value(x): + key, value = self._key_dense(x), self._value_dense(x) + key = _mistral_rope(key) + return key, value + + if cache is not None: + cache_k = cache[:, 0, ...] + cache_v = cache[:, 1, ...] + + if cache_update_index is not None: + # Compute the new keys and values + key, value = _compute_key_value(hidden_states) + + # Cache is a rotating buffer, we want to warp around if + # the sequence length exceeds the sliding window. + update_end_index = ( + cache_update_index + seq_len - 1 + ) % self._sliding_window + 1 + update_end_index = ops.cast(update_end_index, "int32") + cache_update_index = cache_update_index % self._sliding_window + update_start_index = ops.cond( + update_end_index > cache_update_index, + lambda: ops.cast(cache_update_index, "int32"), + lambda: ops.cast(0, "int32"), + ) + # Also note that the update step below assumes that the + # sequence length is always one when `cache_update_index != 0`. + # This is necessary to support XLA compilation. Ideally, we + # would want to use + # `key[:, -(update_end_index - update_start_index):, ...]` + # as the update but updating using a dynamic slice gives an + # XLA compilation error in TensorFlow. + # Passing a sequence of length > 1 with cache update might give + # incorrect results (since there is no way to determine how + # many most recent tokens are to be saved if the tokens exceed + # the sliding window length). + cache_k = ops.slice_update( + cache_k, + [0, update_start_index, 0, 0], + # We slice the keys and values since if the user has passed + # a sequence of length > `self._sliding_window`. We want to + # prefill the cache using just the most recent values in the + # sliding window. + ops.cast( + key[:, -self._sliding_window :, ...], cache_k.dtype + ), + ) + cache_v = ops.slice_update( + cache_v, + [0, update_start_index, 0, 0], + ops.cast( + value[:, -self._sliding_window :, ...], cache_v.dtype + ), + ) + cache = ops.stack([cache_k, cache_v], axis=1) + + # Get the required keys and values from the cache. + # Since we expect the user to pass a fixed-size cache, we just + # pick the first few slices up-to and including the newly computed + # keys and values. + cache_k = cache_k[:, :update_end_index, ...] + cache_v = cache_v[:, :update_end_index, ...] + + key = ops.cast(cache_k, dtype=self.compute_dtype) + value = ops.cast(cache_v, dtype=self.compute_dtype) + else: + # Compute keys and values + key, value = _compute_key_value(hidden_states) + + # [batch_shape, seq_len, num_key_value_heads, head_dim] + # -> [batch_shape, seq_len, num_heads, head_dim] + key = ops.repeat(key, repeats=self._num_key_value_groups, axis=2) + value = ops.repeat(value, repeats=self._num_key_value_groups, axis=2) + + attention_output = self._compute_attention( + query, key, value, attention_mask + ) + + attention_output = self._dropout_layer( + attention_output, training=training + ) + + attention_output = self._output_dense(attention_output) + + if cache is not None: + return attention_output, cache + return attention_output + + def _masked_softmax(self, attention_scores, attention_mask=None): + if attention_mask is not None: + return self._softmax( + attention_scores, attention_mask[:, None, :, :] + ) + return self._softmax(attention_scores) + + def _compute_attention(self, query, key, value, attention_mask=None): + attention_scores = ops.einsum(self._dot_product_equation, key, query) + + norm_factor = ops.sqrt(ops.cast(self._head_dim, self.compute_dtype)) + + attention_scores = attention_scores / norm_factor + + attention_scores = self._masked_softmax( + attention_scores, attention_mask + ) + attention_output = ops.einsum( + self._combine_equation, attention_scores, value + ) + + return attention_output + + def get_config(self): + config = super().get_config() + config.update( + { + "num_query_heads": self._num_query_heads, + "num_key_value_heads": self._num_key_value_heads, + "rope_max_wavelength": self._rope_max_wavelength, + "rope_scaling_factor": self._rope_scaling_factor, + "kernel_initializer": keras.initializers.serialize( + self._kernel_initializer + ), + "sliding_window": self._sliding_window, + "dropout": self._dropout, + } + ) + return config diff --git a/keras_nlp/models/mistral/mistral_backbone.py b/keras_nlp/models/mistral/mistral_backbone.py new file mode 100644 index 0000000000..42cec8b218 --- /dev/null +++ b/keras_nlp/models/mistral/mistral_backbone.py @@ -0,0 +1,196 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras +from keras_nlp.backend import ops +from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding +from keras_nlp.models.backbone import Backbone +from keras_nlp.models.mistral.mistral_layer_norm import ( + MistralLayerNormalization, +) +from keras_nlp.models.mistral.mistral_transformer_decoder import ( + MistralTransformerDecoder, +) + + +def _mistral_kernel_initializer(stddev=0.02): + return keras.initializers.RandomNormal(stddev=stddev) + + +@keras_nlp_export("keras_nlp.models.MistralBackbone") +class MistralBackbone(Backbone): + """ + The Mistral Transformer core architecture with hyperparameters. + + This network implements a Transformer-based decoder network, + Mistral, as described in + ["Mistral 7B"](https://arxiv.org/pdf/2310.06825.pdf). + It includes the embedding lookups and transformer layers. + + The default constructor gives a fully customizable, randomly initialized + Mistral model with any number of layers, heads, and embedding + dimensions. To load preset architectures and weights, use the `from_preset` + constructor. + + Args: + vocabulary_size (int): The size of the token vocabulary. + num_layers (int): The number of transformer layers. + num_query_heads (int): The number of query attention heads for + each transformer. + hidden_dim (int): The size of the transformer encoding and pooling layers. + intermediate_dim (int): The output dimension of the first Dense layer in a + three-layer feedforward network for each transformer. + num_key_value_heads (int): The number of key and value attention heads for + each transformer. + rope_max_wavelength (int, optional): The maximum angular wavelength of the + sine/cosine curves, for rotary embeddings. Defaults to `10000`. + rope_scaling_factor (float, optional): The scaling factor for calculation + of roatary embedding. Defaults to `1.0`. + layer_norm_epsilon (float, optional): Epsilon for the layer normalization + layers in the transformer decoder. Defaults to `1e-6`. + sliding_window (int, optional): The sliding window for the mistral + attention layers. This controls the maximum cache size for the attention + layers in each transformer decoder. Only `sliding_window` number of tokens + are saved in the cache and used to generate the next token. + Defaults to `512`. + dtype (str, optional): The dtype policy for the mistral model. + + Examples: + + ```python + input_data = { + "token_ids": np.ones(shape=(1, 12), dtype="int32"), + "padding_mask": np.array([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]]), + } + + # Pretrained Mistral decoder. + model = keras_nlp.models.MistralBackbone.from_preset("mistral7b_base_en") + model(input_data) + + # Randomly initialized Mistral decoder with custom config. + model = keras_nlp.models.MistralBackbone( + vocabulary_size=10, + hidden_dim=512, + num_layers=2, + num_query_heads=32, + num_key_value_heads=8, + intermediate_dim=1024, + sliding_window=512, + layer_norm_epsilon=1e-6, + dtype="float32" + ) + model(input_data) + ``` + """ + + def __init__( + self, + vocabulary_size, + num_layers, + num_query_heads, + hidden_dim, + intermediate_dim, + num_key_value_heads, + rope_max_wavelength=10000, + rope_scaling_factor=1.0, + layer_norm_epsilon=1e-6, + sliding_window=512, + dropout=0, + **kwargs, + ): + # Get the dtype + dtype = kwargs.pop("dtype", keras.backend.floatx()) + + # Inputs + token_ids = keras.Input(shape=(None,), dtype="int32", name="token_ids") + padding_mask = keras.Input( + shape=(None,), dtype="int32", name="padding_mask" + ) + + # Embed Tokens + token_embedding_layer = ReversibleEmbedding( + input_dim=vocabulary_size, + output_dim=hidden_dim, + tie_weights=False, + embeddings_initializer=_mistral_kernel_initializer(stddev=0.01), + dtype=dtype, + name="token_embedding", + ) + x = token_embedding_layer(token_ids) + + # Apply successive transformer decoder blocks + for i in range(num_layers): + x = MistralTransformerDecoder( + intermediate_dim=intermediate_dim, + num_query_heads=num_query_heads, + num_key_value_heads=num_key_value_heads, + rope_max_wavelength=rope_max_wavelength, + rope_scaling_factor=rope_scaling_factor, + layer_norm_epsilon=layer_norm_epsilon, + activation=ops.silu, + kernel_initializer=_mistral_kernel_initializer(stddev=0.02), + sliding_window=sliding_window, + dropout=dropout, + dtype=dtype, + name=f"transformer_layer_{i}", + )(x, decoder_padding_mask=padding_mask) + + sequence_output = MistralLayerNormalization( + name="sequence_output_layernorm", + epsilon=layer_norm_epsilon, + dtype=dtype, + )(x) + + # Instantiate using Functional API Model constructor + super().__init__( + inputs={ + "token_ids": token_ids, + "padding_mask": padding_mask, + }, + outputs=sequence_output, + **kwargs, + ) + + # All references to `self` below this line + self.vocabulary_size = vocabulary_size + self.num_layers = num_layers + self.num_query_heads = num_query_heads + self.hidden_dim = hidden_dim + self.intermediate_dim = intermediate_dim + self.rope_max_wavelength = rope_max_wavelength + self.num_key_value_heads = num_key_value_heads + self.rope_scaling_factor = rope_scaling_factor + self.sliding_window = sliding_window + self.layer_norm_epsilon = layer_norm_epsilon + self.dropout = dropout + self.token_embedding = token_embedding_layer + + def get_config(self): + config = super().get_config() + config.update( + { + "vocabulary_size": self.vocabulary_size, + "num_layers": self.num_layers, + "num_query_heads": self.num_query_heads, + "hidden_dim": self.hidden_dim, + "intermediate_dim": self.intermediate_dim, + "rope_max_wavelength": self.rope_max_wavelength, + "rope_scaling_factor": self.rope_scaling_factor, + "num_key_value_heads": self.num_key_value_heads, + "sliding_window": self.sliding_window, + "layer_norm_epsilon": self.layer_norm_epsilon, + "dropout": self.dropout, + } + ) + return config diff --git a/keras_nlp/models/mistral/mistral_backbone_test.py b/keras_nlp/models/mistral/mistral_backbone_test.py new file mode 100644 index 0000000000..fc2b0a592b --- /dev/null +++ b/keras_nlp/models/mistral/mistral_backbone_test.py @@ -0,0 +1,56 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import pytest + +from keras_nlp.backend import ops +from keras_nlp.models.mistral.mistral_backbone import MistralBackbone +from keras_nlp.tests.test_case import TestCase + + +class MistralBackboneTest(TestCase): + def setUp(self): + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_query_heads": 8, + "num_key_value_heads": 4, + "hidden_dim": 16, + "intermediate_dim": 8, + "sliding_window": 2, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), + } + + def test_backbone_basics(self): + self.run_backbone_test( + cls=MistralBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 16), + ) + + @pytest.mark.large + def test_saved_model(self): + self.run_model_saving_test( + cls=MistralBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) + + def test_num_parameters(self): + model = MistralBackbone(**self.init_kwargs) + # Reference value calculated using the PyTorch model + self.assertEqual(model.count_params(), 2704) diff --git a/keras_nlp/models/mistral/mistral_layer_norm.py b/keras_nlp/models/mistral/mistral_layer_norm.py new file mode 100644 index 0000000000..9f9ddf26b5 --- /dev/null +++ b/keras_nlp/models/mistral/mistral_layer_norm.py @@ -0,0 +1,48 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.backend import keras +from keras_nlp.backend import ops + + +# TODO: Deprecate this in favor of +# `keras.layers.LayerNormalization(rms_scaling=True)` once Keras 2 support is +# removed. +class MistralLayerNormalization(keras.layers.Layer): + """A normalization layer for Mistral that implements RMS normalization.""" + + def __init__(self, epsilon=1e-6, **kwargs): + super().__init__(**kwargs) + self._epsilon = epsilon + + def build(self, input_shape): + self._dim = input_shape[-1] + self._weight = self.add_weight( + name="weight", + trainable=True, + shape=(self._dim,), + initializer="ones", + dtype=self.compute_dtype, + ) + self.built = True + + def call(self, x): + x = x * ops.rsqrt( + ops.mean(ops.power(x, 2), axis=-1, keepdims=True) + self._epsilon + ) + return x * self._weight + + def get_config(self): + config = super().get_config() + config.update({"epsilon": self._epsilon}) + return config diff --git a/keras_nlp/models/mistral/mistral_transformer_decoder.py b/keras_nlp/models/mistral/mistral_transformer_decoder.py new file mode 100644 index 0000000000..9b6f7fdbf8 --- /dev/null +++ b/keras_nlp/models/mistral/mistral_transformer_decoder.py @@ -0,0 +1,233 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.backend import keras +from keras_nlp.backend import ops +from keras_nlp.layers.modeling.transformer_layer_utils import ( + compute_causal_mask, +) +from keras_nlp.layers.modeling.transformer_layer_utils import ( + merge_padding_and_attention_mask, +) +from keras_nlp.models.mistral.mistral_attention import CachedMistralAttention +from keras_nlp.models.mistral.mistral_layer_norm import ( + MistralLayerNormalization, +) +from keras_nlp.utils.keras_utils import clone_initializer + + +class MistralTransformerDecoder(keras.layers.Layer): + """A Transformer decoder layer for the Mistral backbone.""" + + def __init__( + self, + intermediate_dim, + num_query_heads, + num_key_value_heads, + rope_max_wavelength=10000, + rope_scaling_factor=1.0, + activation="relu", + layer_norm_epsilon=1e-5, + kernel_initializer="glorot_uniform", + sliding_window=512, + dropout=0, + **kwargs, + ): + super().__init__(**kwargs) + self.intermediate_dim = intermediate_dim + self.num_query_heads = num_query_heads + self.num_key_value_heads = num_key_value_heads + + self.rope_max_wavelength = rope_max_wavelength + self.rope_scaling_factor = rope_scaling_factor + + self.dropout = dropout + + self.sliding_window = sliding_window + self.activation = keras.activations.get(activation) + self.layer_norm_epsilon = layer_norm_epsilon + self.kernel_initializer = keras.initializers.get(kernel_initializer) + + self.supports_masking = True + + def build(self, decoder_sequence_shape): + self._decoder_sequence_shape = decoder_sequence_shape + self.hidden_dim = decoder_sequence_shape[-1] + + # Self attention layer. + self._self_attention_layer = CachedMistralAttention( + num_query_heads=self.num_query_heads, + num_key_value_heads=self.num_key_value_heads, + rope_max_wavelength=self.rope_max_wavelength, + rope_scaling_factor=self.rope_scaling_factor, + sliding_window=self.sliding_window, + kernel_initializer=clone_initializer(self.kernel_initializer), + dropout=self.dropout, + dtype=self.compute_dtype, + name="self_attention", + ) + self._self_attention_layer.build(decoder_sequence_shape) + + self._self_attention_layernorm = MistralLayerNormalization( + epsilon=self.layer_norm_epsilon, + name="self_attention_layernorm", + dtype=self.compute_dtype, + ) + self._self_attention_layernorm.build(decoder_sequence_shape) + self._self_attention_dropout = keras.layers.Dropout( + rate=self.dropout, + dtype=self.compute_dtype, + name="self_attention_dropout", + ) + + # Feedforward layers. + self._feedforward_intermediate_dense = keras.layers.Dense( + self.intermediate_dim, + kernel_initializer=clone_initializer(self.kernel_initializer), + use_bias=False, + dtype=self.compute_dtype, + name="feedforward_intermediate_dense", + ) + self._feedforward_intermediate_dense.build(decoder_sequence_shape) + + self._feedforward_gate_dense = keras.layers.Dense( + self.intermediate_dim, + activation=self.activation, + kernel_initializer=clone_initializer(self.kernel_initializer), + use_bias=False, + name="feedforward_gate_dense", + ) + self._feedforward_gate_dense.build(decoder_sequence_shape) + + self._feedforward_output_dense = keras.layers.Dense( + self.hidden_dim, + kernel_initializer=clone_initializer(self.kernel_initializer), + use_bias=False, + dtype=self.compute_dtype, + name="feedforward_output_dense", + ) + + self._feedforward_output_dense.build( + self._feedforward_gate_dense.compute_output_shape( + decoder_sequence_shape + ) + ) + + self._feedforward_layernorm = MistralLayerNormalization( + epsilon=self.layer_norm_epsilon, + name="feedforward_layernorm", + dtype=self.compute_dtype, + ) + self._feedforward_layernorm.build(decoder_sequence_shape) + + self.built = True + + def call( + self, + decoder_sequence, + decoder_padding_mask=None, + decoder_attention_mask=None, + self_attention_cache=None, + self_attention_cache_update_index=None, + training=None, + ): + self_attention_mask = self._compute_self_attention_mask( + decoder_sequence=decoder_sequence, + decoder_padding_mask=decoder_padding_mask, + decoder_attention_mask=decoder_attention_mask, + ) + residual = decoder_sequence + + x = self._self_attention_layernorm(decoder_sequence) + + # Self attention block. + x = self._self_attention_layer( + hidden_states=x, + attention_mask=self_attention_mask, + cache=self_attention_cache, + cache_update_index=self_attention_cache_update_index, + ) + + if self_attention_cache is not None: + x, self_attention_cache = x + + x = self._self_attention_dropout(x, training=training) + + x = x + residual + residual = x + + x = self._feedforward_layernorm(x) + gate_output = self._feedforward_gate_dense(x) + + x = self._feedforward_intermediate_dense(x) + + x = self._feedforward_output_dense(ops.multiply(x, gate_output)) + + decoder_output = x + residual + + if self_attention_cache is not None: + return decoder_output, self_attention_cache + return decoder_output + + def _compute_self_attention_mask( + self, + decoder_sequence, + decoder_padding_mask, + decoder_attention_mask, + ): + decoder_mask = merge_padding_and_attention_mask( + decoder_sequence, decoder_padding_mask, decoder_attention_mask + ) + batch_size = ops.shape(decoder_sequence)[0] + input_length = output_length = ops.shape(decoder_sequence)[1] + + # Mistral uses a banded attention mask + causal_mask_lower = compute_causal_mask( + batch_size, input_length, output_length, 0 + ) + # Below is a workaround for `ops.triu` for Keras 2. + # TODO(tirthasheshpatel): Use `ops.triu` once Keras 2 support is removed. + # causal_mask = ops.triu(causal_mask_lower, k=-self.sliding_window) + i = ops.arange(output_length)[:, None] + j = ops.arange(input_length)[None, :] + causal_mask_upper = ops.cast(i <= j + self.sliding_window, "int32") + causal_mask = ops.minimum(causal_mask_lower, causal_mask_upper) + + return ( + ops.minimum(decoder_mask, causal_mask) + if decoder_mask is not None + else causal_mask + ) + + def compute_output_shape(self, decoder_sequence_shape): + return decoder_sequence_shape + + def get_config(self): + config = super().get_config() + config.update( + { + "intermediate_dim": self.intermediate_dim, + "num_query_heads": self.num_query_heads, + "rope_max_wavelength": self.rope_max_wavelength, + "rope_scaling_factor": self.rope_scaling_factor, + "num_key_value_heads": self.num_key_value_heads, + "sliding_window": self.sliding_window, + "activation": keras.activations.serialize(self.activation), + "layer_norm_epsilon": self.layer_norm_epsilon, + "kernel_initializer": keras.initializers.serialize( + self.kernel_initializer + ), + "dropout": self.dropout, + } + ) + return config diff --git a/tools/checkpoint_conversion/convert_mistral_checkpoints.py b/tools/checkpoint_conversion/convert_mistral_checkpoints.py new file mode 100644 index 0000000000..3bc443d910 --- /dev/null +++ b/tools/checkpoint_conversion/convert_mistral_checkpoints.py @@ -0,0 +1,443 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import pathlib +from dataclasses import dataclass +from pathlib import Path +from typing import Optional +from typing import Tuple + +import torch +from torch import nn + +from keras_nlp.models import MistralBackbone + +MODEL_PATH = pathlib.Path("mistral-7B-v0.1") + +# Torch model taken from: +# https://github.com/mistralai/mistral-src/blob/147c4e68279b90eb61b19bdea44e16f5539d5a5d/one_file_ref.py + + +@dataclass +class ModelArgs: + dim: int + n_layers: int + head_dim: int + hidden_dim: int + n_heads: int + n_kv_heads: int + sliding_window: int + norm_eps: float + vocab_size: int + + max_batch_size: int = 0 + + +def repeat_kv(keys: torch.Tensor, values: torch.Tensor, repeats: int): + keys = torch.repeat_interleave(keys, repeats=repeats, dim=2) + values = torch.repeat_interleave(values, repeats=repeats, dim=2) + return keys, values + + +def _reshape_for_broadcast( + freqs_cis: torch.Tensor, x: torch.Tensor +) -> torch.Tensor: + """ + freqs_cis: complex - (seq_len, head_dim / 2) + x: complex - (bsz, seq_len, head_dim / 2) + """ + ndim = x.ndim + assert 1 < ndim + assert freqs_cis.shape == (x.shape[1], x.shape[-1]), ( + freqs_cis.shape, + (x.shape[1], x.shape[-1]), + ) + shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)] + return freqs_cis.view(*shape) + + +def apply_rotary_emb( + xq: torch.Tensor, + xk: torch.Tensor, + freqs_cis: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2)) + xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2)) + freqs_cis = _reshape_for_broadcast(freqs_cis, xq_) + xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3) + xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3) + return xq_out.type_as(xq), xk_out.type_as(xk) + + +class Attention(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.args = args + + self.n_heads: int = args.n_heads + self.n_kv_heads: int = args.n_kv_heads + + self.repeats = self.n_heads // self.n_kv_heads + self.sliding_window = self.args.sliding_window + + self.scale = self.args.head_dim**-0.5 + + self.wq = nn.Linear(args.dim, args.n_heads * args.head_dim, bias=False) + self.wk = nn.Linear( + args.dim, args.n_kv_heads * args.head_dim, bias=False + ) + self.wv = nn.Linear( + args.dim, args.n_kv_heads * args.head_dim, bias=False + ) + self.wo = nn.Linear(args.n_heads * args.head_dim, args.dim, bias=False) + self.cache_k = torch.empty( + ( + args.max_batch_size, + args.sliding_window, + self.n_kv_heads, + self.args.head_dim, + ), + dtype=torch.float16, + ) + self.cache_v = torch.empty( + ( + args.max_batch_size, + args.sliding_window, + self.n_kv_heads, + self.args.head_dim, + ), + dtype=torch.float16, + ) + + def forward( + self, + x: torch.Tensor, + freqs_cis: torch.Tensor, + positions: torch.Tensor, + mask: Optional[torch.Tensor], + ) -> torch.Tensor: + bsz, seqlen, _ = x.shape + + xq, xk, xv = self.wq(x), self.wk(x), self.wv(x) + xq = xq.view(bsz, seqlen, self.n_heads, self.args.head_dim) + xk = xk.view(bsz, seqlen, self.n_kv_heads, self.args.head_dim) + xv = xv.view(bsz, seqlen, self.n_kv_heads, self.args.head_dim) + xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis) + + # The cache is a rotating buffer + scatter_pos = (positions[-self.sliding_window :] % self.sliding_window)[ + None, :, None, None + ] + scatter_pos = scatter_pos.repeat( + bsz, 1, self.n_kv_heads, self.args.head_dim + ) + self.cache_k[:bsz].scatter_( + dim=1, + index=scatter_pos, + src=xk[:, -self.sliding_window :].to(self.cache_k.dtype), + ) + self.cache_v[:bsz].scatter_( + dim=1, + index=scatter_pos, + src=xv[:, -self.sliding_window :].to(self.cache_v.dtype), + ) + + if positions.shape[0] > 1: + # prefill + key, value = repeat_kv(xk, xv, self.repeats) + else: + cur_pos = positions[-1].item() + 1 + key, value = repeat_kv( + self.cache_k[:bsz, :cur_pos, ...].to(xk.dtype), + self.cache_v[:bsz, :cur_pos, ...].to(xv.dtype), + self.repeats, + ) + + query = xq.transpose(1, 2) + key = key.transpose(1, 2) + value = value.transpose(1, 2) + # scores : [bsz, n_heads, seqlen | 1, seqlen] + scores = torch.matmul(query, key.transpose(2, 3)) * self.scale + + if mask is not None: + scores += mask[None, None, ...] + + scores = scores.float() + scores = nn.functional.softmax(scores, dim=-1).type_as(query) + output = torch.matmul( + scores, value + ) # (bs, n_local_heads, slen, head_dim) + output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1) + return self.wo(output) + + +class FeedForward(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + + self.w1 = nn.Linear(args.dim, args.hidden_dim, bias=False) + self.w2 = nn.Linear(args.hidden_dim, args.dim, bias=False) + self.w3 = nn.Linear(args.dim, args.hidden_dim, bias=False) + + def forward(self, x) -> torch.Tensor: + return self.w2(nn.functional.silu(self.w1(x)) * self.w3(x)) + + +class RMSNorm(torch.nn.Module): + def __init__(self, dim: int, eps: float = 1e-6): + super().__init__() + self.eps = eps + self.weight = nn.Parameter(torch.ones(dim)) + + def _norm(self, x): + return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps) + + def forward(self, x): + output = self._norm(x.float()).type_as(x) + return output * self.weight + + +class TransformerBlock(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.n_heads = args.n_heads + self.dim = args.dim + self.attention = Attention(args) + self.feed_forward = FeedForward(args=args) + self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps) + self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps) + self.args = args + + def forward( + self, + x: torch.Tensor, + freqs_cis: torch.Tensor, + positions: torch.Tensor, + mask: Optional[torch.Tensor], + ) -> torch.Tensor: + r = self.attention.forward( + self.attention_norm(x), freqs_cis, positions, mask + ) + h = x + r + r = self.feed_forward.forward(self.ffn_norm(h)) + out = h + r + return out + + +def precompute_freqs_cis( + dim: int, end: int, theta: float = 10000.0 +) -> torch.Tensor: + freqs = 1.0 / ( + theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim) + ) + t = torch.arange(end, device=freqs.device) # type: ignore + freqs = torch.outer(t, freqs).float() # type: ignore + return torch.polar(torch.ones_like(freqs), freqs) # complex64 + + +class TorchTransformer(nn.Module): + def __init__(self, args: ModelArgs): + super().__init__() + self.args = args + self.vocab_size = args.vocab_size + self.n_layers = args.n_layers + assert self.vocab_size > 0 + + self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim) + + self.layers = torch.nn.ModuleList( + [TransformerBlock(args=args) for _ in range(args.n_layers)] + ) + + self.norm = RMSNorm(args.dim, eps=args.norm_eps) + + self.output = nn.Linear(args.dim, args.vocab_size, bias=False) + + self.freqs_cis = precompute_freqs_cis(self.args.head_dim, 128_000) + + def forward( + self, + input_ids: torch.Tensor, + positions: torch.Tensor, + ): + h = self.tok_embeddings(input_ids) + freqs_cis = self.freqs_cis[positions] + + mask: Optional[torch.Tensor] = None + if input_ids.shape[1] > 1: + seqlen = input_ids.shape[1] + tensor = torch.full( + (seqlen, seqlen), + dtype=h.dtype, + fill_value=1, + device=h.device, + ) + mask = torch.tril(tensor, diagonal=0).to(h.dtype) + # make the mask banded to account for sliding window + mask = torch.triu(mask, diagonal=-self.args.sliding_window) + mask = torch.log(mask) + + for layer in self.layers: + h = layer(h, freqs_cis, positions, mask) + + return self.output(self.norm(h)).float() + + @staticmethod + def from_folder( + folder: Path, max_batch_size: int = 1, device="cpu", dtype=torch.float16 + ): + with open(folder / "params.json", "r") as f: + model_args = ModelArgs(**json.loads(f.read())) + model_args.max_batch_size = max_batch_size + model = TorchTransformer(model_args).to(device=device, dtype=dtype) + loaded = torch.load(folder / "consolidated.00.pth") + model.load_state_dict(loaded) + return model + + +def port_weights( + model_k3: MistralBackbone, model_torch: TorchTransformer, params: ModelArgs +): + model_k3.get_layer("token_embedding").embeddings.assign( + model_torch.tok_embeddings.weight.detach().cpu().numpy() + ) + + for i in range(model_k3.num_layers): + model_k3.get_layer( + f"transformer_layer_{i}" + )._self_attention_layer._key_dense.set_weights( + [ + model_torch.layers[i] + .attention.wk.weight.T.reshape( + params.dim, params.n_kv_heads, params.head_dim + ) + .detach() + .cpu() + .numpy() + ] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._self_attention_layer._query_dense.set_weights( + [ + model_torch.layers[i] + .attention.wq.weight.T.reshape( + params.dim, params.n_heads, params.head_dim + ) + .detach() + .cpu() + .numpy() + ] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._self_attention_layer._value_dense.set_weights( + [ + model_torch.layers[i] + .attention.wv.weight.T.reshape( + params.dim, params.n_kv_heads, params.head_dim + ) + .detach() + .cpu() + .numpy() + ] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._self_attention_layer._output_dense.set_weights( + [ + model_torch.layers[i] + .attention.wo.weight.T.reshape( + params.n_heads, params.head_dim, params.dim + ) + .detach() + .cpu() + .numpy() + ] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._self_attention_layernorm.set_weights( + [model_torch.layers[i].attention_norm.weight.detach().cpu().numpy()] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._feedforward_intermediate_dense.set_weights( + [ + model_torch.layers[i] + .feed_forward.w3.weight.T.detach() + .cpu() + .numpy() + ] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._feedforward_output_dense.set_weights( + [ + model_torch.layers[i] + .feed_forward.w2.weight.T.detach() + .cpu() + .numpy() + ] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._feedforward_gate_dense.set_weights( + [ + model_torch.layers[i] + .feed_forward.w1.weight.T.detach() + .cpu() + .numpy() + ] + ) + model_k3.get_layer( + f"transformer_layer_{i}" + )._feedforward_layernorm.set_weights( + [model_torch.layers[i].ffn_norm.weight.detach().cpu().numpy()] + ) + + model_k3.get_layer("sequence_output_layernorm").set_weights( + [model_torch.norm.weight.detach().cpu().numpy()] + ) + model_k3.get_layer("token_embedding").reverse_embeddings.assign( + model_torch.output.weight.T.detach().cpu().numpy() + ) + + +if __name__ == "__main__": + with open(MODEL_PATH / "params.json", "r") as params_file: + params = ModelArgs(**json.load(params_file)) + + model_torch = TorchTransformer.from_folder( + MODEL_PATH, device="cpu", dtype=torch.float16 + ) + print("Torch model loaded") + model_k3 = MistralBackbone( + vocabulary_size=32000, + hidden_dim=4096, + num_layers=32, + num_query_heads=32, + num_key_value_heads=8, + intermediate_dim=14336, + sliding_window=4096, + layer_norm_epsilon=1e-6, + dtype="float16", + ) + print("Keras 3 model loaded.") + + port_weights(model_k3, model_torch, params) + print("Weight transfer done.") + + model_k3.save_weights("mistral_7b.weights.h5") + print("Weights saved.") From 124868211b2c0a42c81649a3bf9caf7c52df609f Mon Sep 17 00:00:00 2001 From: Mehdi BRAHIMI <36081903+mbrhd@users.noreply.github.com> Date: Wed, 20 Dec 2023 20:12:41 +0100 Subject: [PATCH 47/87] changing pooling output (#1364) * changing pooling output * pr comments --- keras_nlp/models/bert/bert_backbone.py | 5 ++--- keras_nlp/models/electra/electra_backbone.py | 7 ++++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/keras_nlp/models/bert/bert_backbone.py b/keras_nlp/models/bert/bert_backbone.py index ea3e3ad868..174b0f0e42 100644 --- a/keras_nlp/models/bert/bert_backbone.py +++ b/keras_nlp/models/bert/bert_backbone.py @@ -162,13 +162,12 @@ def __init__( # Construct the two BERT outputs. The pooled output is a dense layer on # top of the [CLS] token. sequence_output = x - x = keras.layers.Dense( + pooled_output = keras.layers.Dense( hidden_dim, kernel_initializer=bert_kernel_initializer(), activation="tanh", name="pooled_dense", - )(x) - pooled_output = x[:, cls_token_index, :] + )(x[:, cls_token_index, :]) # Instantiate using Functional API Model constructor super().__init__( diff --git a/keras_nlp/models/electra/electra_backbone.py b/keras_nlp/models/electra/electra_backbone.py index 9c67fe4753..66d1db8ccc 100644 --- a/keras_nlp/models/electra/electra_backbone.py +++ b/keras_nlp/models/electra/electra_backbone.py @@ -163,13 +163,14 @@ def __init__( )(x, padding_mask=padding_mask) sequence_output = x - x = keras.layers.Dense( + # Construct the two ELECTRA outputs. The pooled output is a dense layer on + # top of the [CLS] token. + pooled_output = keras.layers.Dense( hidden_dim, kernel_initializer=electra_kernel_initializer(), activation="tanh", name="pooled_dense", - )(x) - pooled_output = x[:, cls_token_index, :] + )(x[:, cls_token_index, :]) # Instantiate using Functional API Model constructor super().__init__( From 5fd92c868ac1b8ea94c321c1a7cae93850cf44ed Mon Sep 17 00:00:00 2001 From: Anshuman Mishra <51750587+shivance@users.noreply.github.com> Date: Fri, 22 Dec 2023 13:41:12 +0530 Subject: [PATCH 48/87] Add `LlamaBackbone` (#1203) * llama backbone * Fixes for rotary embedding --------- Co-authored-by: Matt Watson --- keras_nlp/layers/modeling/rotary_embedding.py | 51 +++-- .../layers/modeling/rotary_embedding_test.py | 12 + keras_nlp/models/__init__.py | 1 + keras_nlp/models/llama/__init__.py | 13 ++ keras_nlp/models/llama/llama_attention.py | 201 +++++++++++++++++ keras_nlp/models/llama/llama_backbone.py | 156 +++++++++++++ keras_nlp/models/llama/llama_backbone_test.py | 52 +++++ keras_nlp/models/llama/llama_decoder.py | 206 ++++++++++++++++++ keras_nlp/models/llama/llama_layernorm.py | 37 ++++ .../convert_llama_checkpoints.py | 141 ++++++++++++ 10 files changed, 848 insertions(+), 22 deletions(-) create mode 100644 keras_nlp/models/llama/__init__.py create mode 100644 keras_nlp/models/llama/llama_attention.py create mode 100644 keras_nlp/models/llama/llama_backbone.py create mode 100644 keras_nlp/models/llama/llama_backbone_test.py create mode 100644 keras_nlp/models/llama/llama_decoder.py create mode 100644 keras_nlp/models/llama/llama_layernorm.py create mode 100644 tools/checkpoint_conversion/convert_llama_checkpoints.py diff --git a/keras_nlp/layers/modeling/rotary_embedding.py b/keras_nlp/layers/modeling/rotary_embedding.py index 6f4ae449de..45f77ce494 100644 --- a/keras_nlp/layers/modeling/rotary_embedding.py +++ b/keras_nlp/layers/modeling/rotary_embedding.py @@ -85,10 +85,7 @@ def __init__( self.built = True def call(self, inputs, start_index=0): - rotary_dim = ops.shape(inputs)[-1] - cos_emb, sin_emb = self._compute_cos_sin_embedding( - inputs, rotary_dim, start_index - ) + cos_emb, sin_emb = self._compute_cos_sin_embedding(inputs, start_index) return self._apply_rotary_pos_emb(inputs, cos_emb, sin_emb) def _apply_rotary_pos_emb(self, tensor, cos_emb, sin_emb): @@ -96,34 +93,44 @@ def _apply_rotary_pos_emb(self, tensor, cos_emb, sin_emb): half_rot_tensor = ops.concatenate((-x2, x1), axis=self.feature_axis) return (tensor * cos_emb) + (half_rot_tensor * sin_emb) - def _compute_cos_sin_embedding(self, x, rotary_dim, start_index): - freq_range = ops.arange(0, rotary_dim, 2) - freq_range = ops.cast(freq_range, self.compute_dtype) - freq_range = freq_range / ops.cast( - self.scaling_factor, self.compute_dtype - ) - inverse_freq = 1.0 / ( - self.max_wavelength - ** (freq_range / ops.cast(rotary_dim, self.compute_dtype)) - ) - seq_len = ops.shape(x)[self.sequence_axis] - tensor = ops.cast(ops.arange(seq_len), self.compute_dtype) + start_index - tensor = ops.cast(tensor, dtype=inverse_freq.dtype) - freq = ops.einsum("i, j -> ij", tensor, inverse_freq) - embedding = ops.concatenate((freq, freq), axis=self.feature_axis) - + def _compute_cos_sin_embedding(self, inputs, start_index=0): def get_axis(axis): - return axis if axis > 0 else len(x.shape) + axis + return axis if axis > 0 else len(inputs.shape) + axis feature_axis = get_axis(self.feature_axis) sequence_axis = get_axis(self.sequence_axis) - for axis in range(len(x.shape)): + rotary_dim = ops.shape(inputs)[feature_axis] + inverse_freq = self._get_inverse_freq(rotary_dim) + + seq_len = ops.shape(inputs)[self.sequence_axis] + tensor = ops.cast(ops.arange(seq_len), self.compute_dtype) + start_index + + tensor = ops.cast(tensor, dtype=inverse_freq.dtype) + freq = ops.einsum("i,j->ij", tensor, inverse_freq) + embedding = ops.concatenate((freq, freq), axis=-1) + + # Reshape the embedding to be broadcastable with input shape. + if feature_axis < sequence_axis: + embedding = ops.transpose(embedding) + for axis in range(len(inputs.shape)): if axis != sequence_axis and axis != feature_axis: embedding = ops.expand_dims(embedding, axis) return ops.cos(embedding), ops.sin(embedding) + def _get_inverse_freq(self, rotary_dim): + freq_range = ops.arange(0, rotary_dim, 2) + freq_range = ops.cast(freq_range, self.compute_dtype) + freq_range = freq_range / ops.cast( + self.scaling_factor, self.compute_dtype + ) + inverse_freq = 1.0 / ( + self.max_wavelength + ** (freq_range / ops.cast(rotary_dim, self.compute_dtype)) + ) + return inverse_freq + def get_config(self): config = super().get_config() config.update( diff --git a/keras_nlp/layers/modeling/rotary_embedding_test.py b/keras_nlp/layers/modeling/rotary_embedding_test.py index 9874f69e5e..c0fc2906e7 100644 --- a/keras_nlp/layers/modeling/rotary_embedding_test.py +++ b/keras_nlp/layers/modeling/rotary_embedding_test.py @@ -97,6 +97,18 @@ def test_start_index(self): ) self.assertAllClose(full_output, sequential_output) + def test_permuted_axes(self): + batch_size, seq_length, feature_size = 2, 3, 4 + data = random.uniform(shape=(batch_size, seq_length, feature_size)) + layer = RotaryEmbedding(seq_length) + outputs = layer(data) + permuted_data = ops.transpose(data, (0, 2, 1)) + permuted_layer = RotaryEmbedding( + seq_length, sequence_axis=-1, feature_axis=-2 + ) + permuted_outputs = permuted_layer(permuted_data) + self.assertAllClose(outputs, ops.transpose(permuted_outputs, (0, 2, 1))) + def test_float16_dtype(self): embedding_layer = RotaryEmbedding(dtype="float16") seq_length = 100 diff --git a/keras_nlp/models/__init__.py b/keras_nlp/models/__init__.py index 8f8e3a2ab3..ab04d8eae0 100644 --- a/keras_nlp/models/__init__.py +++ b/keras_nlp/models/__init__.py @@ -89,6 +89,7 @@ GPTNeoXPreprocessor, ) from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer +from keras_nlp.models.llama.llama_backbone import LlamaBackbone from keras_nlp.models.mistral.mistral_backbone import MistralBackbone from keras_nlp.models.opt.opt_backbone import OPTBackbone from keras_nlp.models.opt.opt_causal_lm import OPTCausalLM diff --git a/keras_nlp/models/llama/__init__.py b/keras_nlp/models/llama/__init__.py new file mode 100644 index 0000000000..ba0c2545e4 --- /dev/null +++ b/keras_nlp/models/llama/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/keras_nlp/models/llama/llama_attention.py b/keras_nlp/models/llama/llama_attention.py new file mode 100644 index 0000000000..a2604e5351 --- /dev/null +++ b/keras_nlp/models/llama/llama_attention.py @@ -0,0 +1,201 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.backend import keras +from keras_nlp.backend import ops +from keras_nlp.layers.modeling.rotary_embedding import RotaryEmbedding +from keras_nlp.utils.keras_utils import clone_initializer + + +class LlamaAttention(keras.layers.Layer): + """Grouped query attention for Llama models""" + + def __init__( + self, + num_query_heads, + num_key_value_heads, + rope_scaling_factor=1.0, + kernel_initializer="glorot_uniform", + rope_max_wavelength=10000, + max_sequence_length=512, + **kwargs, + ): + super().__init__(**kwargs) + self.num_query_heads = num_query_heads + self.num_key_value_heads = num_key_value_heads + + self.num_key_value_groups = num_query_heads // num_key_value_heads + + self.kernel_initializer = keras.initializers.get(kernel_initializer) + self.max_sequence_length = max_sequence_length + + self.rope_scaling_factor = rope_scaling_factor + self.rope_max_wavelength = rope_max_wavelength + + def build(self, inputs_shape): + self.hidden_dim = inputs_shape[-1] + self.attn_head_size = self.hidden_dim // self.num_query_heads + + # Einsum variables: + # b = batch size + # q = query length + # k = key/value length + # m = model dim + # u = num query heads + # v = num key/value heads + # h = head dim + self._query_dense = keras.layers.EinsumDense( + equation="bqm,muh->bquh", + output_shape=(None, self.num_query_heads, self.attn_head_size), + kernel_initializer=clone_initializer(self.kernel_initializer), + name="query", + ) + self._query_dense.build(inputs_shape) + self._key_dense = keras.layers.EinsumDense( + equation="bkm,mvh->bkvh", + output_shape=(None, self.num_key_value_heads, self.attn_head_size), + kernel_initializer=clone_initializer(self.kernel_initializer), + name="key", + ) + self._key_dense.build(inputs_shape) + + self._value_dense = keras.layers.EinsumDense( + equation="bkm,mvh->bkvh", + output_shape=(None, self.num_key_value_heads, self.attn_head_size), + kernel_initializer=clone_initializer(self.kernel_initializer), + name="value", + ) + self._value_dense.build(inputs_shape) + + self._softmax = keras.layers.Softmax(axis=-1, name="attention_softmax") + + self._output_dense = keras.layers.EinsumDense( + equation="bqm,mh->bqh", + output_shape=(None, self.hidden_dim), + kernel_initializer=clone_initializer(self.kernel_initializer), + name="attention_output", + ) + self._output_dense.build(inputs_shape) + + self._rotary_embedding_layer = RotaryEmbedding( + max_wavelength=self.rope_max_wavelength, + scaling_factor=self.rope_scaling_factor, + ) + self._rotary_embedding_layer.build(inputs_shape) + + self.built = True + + def call( + self, + hidden_states, + attention_mask=None, + cache=None, + cache_update_index=None, + ): + query = self._query_dense(hidden_states) + + if cache is not None: + key_cache = cache[:, 0, ...] + value_cache = cache[:, 1, ...] + if cache_update_index is None: + key = key_cache + value = value_cache + else: + key_update = self._key_dense(hidden_states) + value_update = self._value_dense(hidden_states) + start = [0, cache_update_index, 0, 0] + key = ops.slice_update(key_cache, start, key_update) + value = ops.slice_update(value_cache, start, value_update) + cache = ops.stack((key, value), axis=1) + else: + if cache_update_index is not None: + raise ValueError( + "`cache_update_index` should not be set if `cache` is " + f"`None`. Received: cache={cache}, " + f"cache_update_index={cache_update_index}" + ) + key = self._key_dense(hidden_states) + value = self._value_dense(hidden_states) + + query = self._rotary_embedding_layer(query) + key = self._rotary_embedding_layer(key) + + key = ops.tile(key, [1, 1, self.num_key_value_groups, 1]) + value = ops.tile(value, [1, 1, self.num_key_value_groups, 1]) + + attention_output, attention_scores = self._compute_attention( + query, key, value, attention_mask + ) + + attention_output_shape = ops.shape(attention_output) + + attention_output = ops.reshape( + attention_output, + [ + attention_output_shape[0], + attention_output_shape[1], + self.hidden_dim, + ], + ) + + attention_output = self._output_dense(attention_output) + + if cache is not None: + return (attention_output, cache) + return attention_output + + def _masked_softmax(self, attention_scores, attention_mask=None): + if attention_mask is not None: + mask_expansion_axis = -3 + for _ in range( + len(attention_scores.shape) - len(attention_mask.shape) + ): + attention_mask = ops.expand_dims( + attention_mask, axis=mask_expansion_axis + ) + return self._softmax(attention_scores, attention_mask) + + def _compute_attention(self, query, key, value, attention_mask=None): + attention_scores = ops.einsum("aecd,abcd->acbe", key, query) + + norm_factor = ops.sqrt( + ops.convert_to_tensor(self.attn_head_size, self.compute_dtype) + ) + + attention_scores /= norm_factor + + attention_scores = self._masked_softmax( + attention_scores, attention_mask + ) + attention_output = ops.einsum( + "acbe,aecd->abcd", attention_scores, value + ) + + return attention_output, attention_scores + + def get_config(self): + config = super().get_config() + config.update( + { + "num_query_heads": self.num_query_heads, + "hidden_dim": self.hidden_dim, + "kernel_initializer": keras.initializers.serialize( + self.kernel_initializer + ), + "rope_max_wavelength": self.rope_max_wavelength, + "rope_scaling_factor": self.rope_scaling_factor, + "num_key_value_heads": self.num_key_value_heads, + "max_sequence_length": self.max_sequence_length, + } + ) + return config diff --git a/keras_nlp/models/llama/llama_backbone.py b/keras_nlp/models/llama/llama_backbone.py new file mode 100644 index 0000000000..63438544cc --- /dev/null +++ b/keras_nlp/models/llama/llama_backbone.py @@ -0,0 +1,156 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras +from keras_nlp.backend import ops +from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding +from keras_nlp.models.backbone import Backbone +from keras_nlp.models.llama.llama_decoder import LlamaDecoder +from keras_nlp.models.llama.llama_layernorm import LlamaLayerNorm + + +def _llama_kernel_initializer(stddev=0.02): + return keras.initializers.RandomNormal(stddev=stddev) + + +@keras_nlp_export("keras_nlp.models.LlamaBackbone") +class LlamaBackbone(Backbone): + """ + LLaMA core network with hyperparameters. + + This network implements a Transformer-based decoder network, + LLaMA, as described in ["LLaMA: Open Foundation and Fine-Tuned Language Models"](https://arxiv.org/abs/2302.13971). + + The default constructor gives a fully customizable, randomly initialized + LLaMA model with any number of layers, heads, and embedding + dimensions. This backbone also supports LLaMA2 checkpoints. + + Args: + vocabulary_size: int. The size of the token vocabulary. + num_layers: int. The number of transformer layers. + num_query_heads: int. The number of attention heads for each transformer. + The hidden size must be divisible by the number of attention heads. + hidden_dim: int. The size of the transformer encoding and pooler layers. + intermediate_dim: int. The output dimension of the first Dense layer in + a two-layer feedforward network for each transformer. + num_key_value_heads: int. This is the number of key_value heads that + should be used to implement Grouped Query Attention. If num_key_value_heads=num_attention_heads, + the model will use Multi Head Attention (MHA), if num_key_value_heads=1 + the model will use Multi Query Attention (MQA) + rope_scaling_factor: float. The scaling factor for calculation of rotary + embedding + rope_max_wavelength: int. The maximum angular wavelength of the + sine/cosine curves, for rotary embeddings. + layer_norm_epsilon: float. a value added to the denominator for + numerical stability. + max_sequence_length: int. The maximum sequence length that this encoder + can consume. If `None`, `max_sequence_length` uses the value from + sequence length. This determines the variable shape for positional + embeddings. + + """ + + def __init__( + self, + vocabulary_size, + num_layers, + num_query_heads, + hidden_dim, + intermediate_dim, + num_key_value_heads, + rope_scaling_factor=1.0, + rope_max_wavelength=10000, + layer_norm_epsilon=1e-5, + max_sequence_length=4096, + **kwargs, + ): + # Inputs + token_ids = keras.Input(shape=(None,), dtype="int32", name="token_ids") + padding_mask = keras.Input( + shape=(None,), dtype="int32", name="padding_mask" + ) + + # Embed tokens + token_embedding = ReversibleEmbedding( + input_dim=vocabulary_size, + output_dim=hidden_dim, + embeddings_initializer=_llama_kernel_initializer(stddev=0.01), + tie_weights=False, + name="token_embedding", + )(token_ids) + + x = token_embedding + + # Apply successive transformer decoder blocks. + for i in range(num_layers): + x = LlamaDecoder( + intermediate_dim=intermediate_dim, + num_query_heads=num_query_heads, + num_key_value_heads=num_key_value_heads, + rope_scaling_factor=rope_scaling_factor, + max_sequence_length=max_sequence_length, + rope_max_wavelength=rope_max_wavelength, + layer_norm_epsilon=layer_norm_epsilon, + activation=ops.silu, + kernel_initializer=_llama_kernel_initializer(stddev=0.02), + name=f"transformer_layer_{i}", + )(x, decoder_padding_mask=padding_mask) + + sequence_output = LlamaLayerNorm( + name="layer_norm", + epsilon=layer_norm_epsilon, + )(x) + + # Instantiate using Functional API Model constructor + super().__init__( + inputs={ + "token_ids": token_ids, + "padding_mask": padding_mask, + }, + outputs=sequence_output, + **kwargs, + ) + # All references to `self` below this line + self.vocabulary_size = vocabulary_size + self.num_layers = num_layers + self.num_query_heads = num_query_heads + self.hidden_dim = hidden_dim + self.intermediate_dim = intermediate_dim + self.rope_max_wavelength = rope_max_wavelength + self.num_key_value_heads = num_key_value_heads + self.rope_scaling_factor = rope_scaling_factor + self.max_sequence_length = max_sequence_length + self.layer_norm_epsilon = layer_norm_epsilon + + def get_config(self): + config = super().get_config() + config.update( + { + "vocabulary_size": self.vocabulary_size, + "num_layers": self.num_layers, + "num_query_heads": self.num_query_heads, + "hidden_dim": self.hidden_dim, + "intermediate_dim": self.intermediate_dim, + "rope_max_wavelength": self.rope_max_wavelength, + "rope_scaling_factor": self.rope_scaling_factor, + "num_key_value_heads": self.num_key_value_heads, + "max_sequence_length": self.max_sequence_length, + "layer_norm_epsilon": self.layer_norm_epsilon, + } + ) + return config + + @property + def token_embedding(self): + return self.get_layer("token_embedding") diff --git a/keras_nlp/models/llama/llama_backbone_test.py b/keras_nlp/models/llama/llama_backbone_test.py new file mode 100644 index 0000000000..efff972c6b --- /dev/null +++ b/keras_nlp/models/llama/llama_backbone_test.py @@ -0,0 +1,52 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from keras_nlp.backend import ops +from keras_nlp.models.llama.llama_backbone import LlamaBackbone +from keras_nlp.tests.test_case import TestCase + + +class LlamaTest(TestCase): + def setUp(self): + self.init_kwargs = { + "vocabulary_size": 10, + "num_layers": 2, + "num_query_heads": 4, + "num_key_value_heads": 2, + "hidden_dim": 8, + "intermediate_dim": 8, + "max_sequence_length": 10, + } + self.input_data = { + "token_ids": ops.ones((2, 5), dtype="int32"), + "padding_mask": ops.ones((2, 5), dtype="int32"), + } + + def test_backbone_basics(self): + self.run_backbone_test( + cls=LlamaBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output_shape=(2, 5, 8), + ) + + @pytest.mark.large + def test_saved_model(self): + self.run_model_saving_test( + cls=LlamaBackbone, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + ) diff --git a/keras_nlp/models/llama/llama_decoder.py b/keras_nlp/models/llama/llama_decoder.py new file mode 100644 index 0000000000..47bac478cc --- /dev/null +++ b/keras_nlp/models/llama/llama_decoder.py @@ -0,0 +1,206 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.backend import keras +from keras_nlp.backend import ops +from keras_nlp.layers.modeling.transformer_layer_utils import ( + compute_causal_mask, +) +from keras_nlp.layers.modeling.transformer_layer_utils import ( + merge_padding_and_attention_mask, +) +from keras_nlp.models.llama.llama_attention import LlamaAttention +from keras_nlp.models.llama.llama_layernorm import LlamaLayerNorm +from keras_nlp.utils.keras_utils import clone_initializer + + +class LlamaDecoder(keras.layers.Layer): + """Llama decoder block.""" + + def __init__( + self, + intermediate_dim, + num_query_heads, + num_key_value_heads, + rope_scaling_factor=1.0, + activation="relu", + layer_norm_epsilon=1e-5, + kernel_initializer="glorot_uniform", + rope_max_wavelength=10000, + max_sequence_length=512, + **kwargs, + ): + super().__init__(**kwargs) + self.intermediate_dim = intermediate_dim + self.num_query_heads = num_query_heads + self.num_key_value_heads = num_key_value_heads + + self.rope_max_wavelength = rope_max_wavelength + self.rope_scaling_factor = rope_scaling_factor + + self.max_sequence_length = max_sequence_length + self.activation = keras.activations.get(activation) + self.layer_norm_epsilon = layer_norm_epsilon + self.kernel_initializer = keras.initializers.get(kernel_initializer) + + def build(self, decoder_sequence_shape): + self.hidden_dim = decoder_sequence_shape[-1] + + # Self attention layers. + self._self_attention_layer = LlamaAttention( + num_query_heads=self.num_query_heads, + num_key_value_heads=self.num_key_value_heads, + rope_max_wavelength=self.rope_max_wavelength, + max_sequence_length=self.max_sequence_length, + rope_scaling_factor=self.rope_scaling_factor, + kernel_initializer=clone_initializer(self.kernel_initializer), + ) + self._self_attention_layer.build(decoder_sequence_shape) + + self._self_attention_layernorm = LlamaLayerNorm( + epsilon=self.layer_norm_epsilon, + ) + self._self_attention_layernorm.build(decoder_sequence_shape) + + # Feedforward layers. + self._feedforward_intermediate_dense = keras.layers.Dense( + self.intermediate_dim, + kernel_initializer=clone_initializer(self.kernel_initializer), + ) + self._feedforward_intermediate_dense.build(decoder_sequence_shape) + + self._feedforward_gate_dense = keras.layers.Dense( + self.intermediate_dim, + activation=self.activation, + kernel_initializer=clone_initializer(self.kernel_initializer), + ) + self._feedforward_gate_dense.build(decoder_sequence_shape) + + self._feedforward_output_dense = keras.layers.Dense( + self.hidden_dim, + kernel_initializer=clone_initializer(self.kernel_initializer), + ) + + intermediate_shape = list(decoder_sequence_shape) + intermediate_shape[-1] = self.intermediate_dim + self._feedforward_output_dense.build(tuple(intermediate_shape)) + + self._feedforward_layernorm = LlamaLayerNorm( + epsilon=self.layer_norm_epsilon, + ) + self._feedforward_layernorm.build(decoder_sequence_shape) + + self.built = True + + def call( + self, + decoder_sequence, + decoder_padding_mask=None, + decoder_attention_mask=None, + self_attention_cache=None, + self_attention_cache_update_index=None, + ): + self_attention_mask = self._compute_self_attention_mask( + decoder_sequence=decoder_sequence, + decoder_padding_mask=decoder_padding_mask, + decoder_attention_mask=decoder_attention_mask, + self_attention_cache=self_attention_cache, + self_attention_cache_update_index=self_attention_cache_update_index, + ) + residual = decoder_sequence + + x = self._self_attention_layernorm( + decoder_sequence, + ) + + x = self._self_attention_layer( + hidden_states=x, + attention_mask=self_attention_mask, + cache=self_attention_cache, + cache_update_index=self_attention_cache_update_index, + ) + + if self_attention_cache is not None: + x, self_attention_cache = x + + x = x + residual + residual = x + + x = self._feedforward_layernorm(x) + gate_output = self._feedforward_gate_dense(x) + + x = self._feedforward_intermediate_dense(x) + + x = self._feedforward_output_dense(ops.multiply(x, gate_output)) + + decoder_output = x + residual + + if self_attention_cache is not None: + return (decoder_output, self_attention_cache) + return decoder_output + + def _compute_self_attention_mask( + self, + decoder_sequence, + decoder_padding_mask, + decoder_attention_mask, + self_attention_cache=None, + self_attention_cache_update_index=None, + ): + decoder_mask = merge_padding_and_attention_mask( + decoder_sequence, decoder_padding_mask, decoder_attention_mask + ) + batch_size = ops.shape(decoder_sequence)[0] + input_length = output_length = ops.shape(decoder_sequence)[1] + # We need to handle a rectangular causal mask when doing cached + # decoding. For generative inference, `decoder_sequence` will + # generally be length 1, and `cache` will be the full generation length. + if self_attention_cache is not None: + input_length = ops.shape(self_attention_cache)[2] + + causal_mask = compute_causal_mask( + batch_size, + input_length, + output_length, + 0 + if self_attention_cache_update_index is None + else self_attention_cache_update_index, + ) + return ( + ops.minimum(decoder_mask, causal_mask) + if decoder_mask is not None + else causal_mask + ) + + def compute_output_shape(self, decoder_sequence_shape): + return decoder_sequence_shape + + def get_config(self): + config = super().get_config() + config.update( + { + "intermediate_dim": self.intermediate_dim, + "hidden_dim": self.hidden_dim, + "num_query_heads": self.num_query_heads, + "rope_max_wavelength": self.rope_max_wavelength, + "rope_scaling_factor": self.rope_scaling_factor, + "num_key_value_heads": self.num_key_value_heads, + "max_sequence_length": self.max_sequence_length, + "activation": keras.activations.serialize(self.activation), + "layer_norm_epsilon": self.layer_norm_epsilon, + "kernel_initializer": keras.initializers.serialize( + self.kernel_initializer + ), + } + ) + return config diff --git a/keras_nlp/models/llama/llama_layernorm.py b/keras_nlp/models/llama/llama_layernorm.py new file mode 100644 index 0000000000..0e85a45625 --- /dev/null +++ b/keras_nlp/models/llama/llama_layernorm.py @@ -0,0 +1,37 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.backend import keras +from keras_nlp.backend import ops + +# TODO: Should be replaced with LayerNormalization with `rms_scaling` param +# https://github.com/keras-team/keras-core/pull/726 + + +class LlamaLayerNorm(keras.layers.Layer): + def __init__(self, epsilon=1e-6, **kwargs): + super().__init__(**kwargs) + self.epsilon = epsilon + + def build(self, input_shape): + self.weight = self.add_weight( + name="weight", + shape=(input_shape[-1],), + initializer="ones", + ) + self.built = True + + def call(self, hidden_states): + variance = ops.mean(ops.square(hidden_states), axis=-1, keepdims=True) + hidden_states = hidden_states * 1 / ops.sqrt(variance + self.epsilon) + return self.weight * hidden_states diff --git a/tools/checkpoint_conversion/convert_llama_checkpoints.py b/tools/checkpoint_conversion/convert_llama_checkpoints.py new file mode 100644 index 0000000000..5eb3973f36 --- /dev/null +++ b/tools/checkpoint_conversion/convert_llama_checkpoints.py @@ -0,0 +1,141 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os + +import torch +from transformers import AutoModel + +from keras_nlp.models.llama.llama_backbone import LlamaBackbone + +os.environ["KERAS_BACKEND"] = "torch" + +# from huggingface_hub import login +# llama weights as of now are on request access +# login(token=' Date: Wed, 27 Dec 2023 14:44:09 -0800 Subject: [PATCH 50/87] Remove cloudbuild config (#1375) Thankfully we have outgrown it! --- .cloudbuild/Dockerfile | 4 -- .cloudbuild/README.md | 48 ---------------- .cloudbuild/cloudbuild.yaml | 77 ------------------------- .cloudbuild/cloudbuild_tpu.yaml | 79 -------------------------- .cloudbuild/jax/Dockerfile | 8 --- .cloudbuild/tensorflow/Dockerfile | 6 -- .cloudbuild/torch/Dockerfile | 8 --- .cloudbuild/unit_test_jobs.jsonnet | 43 -------------- .cloudbuild/unit_test_jobs_tpu.jsonnet | 42 -------------- .cloudbuild/update_images.sh | 13 ----- 10 files changed, 328 deletions(-) delete mode 100644 .cloudbuild/Dockerfile delete mode 100644 .cloudbuild/README.md delete mode 100644 .cloudbuild/cloudbuild.yaml delete mode 100644 .cloudbuild/cloudbuild_tpu.yaml delete mode 100644 .cloudbuild/jax/Dockerfile delete mode 100644 .cloudbuild/tensorflow/Dockerfile delete mode 100644 .cloudbuild/torch/Dockerfile delete mode 100644 .cloudbuild/unit_test_jobs.jsonnet delete mode 100644 .cloudbuild/unit_test_jobs_tpu.jsonnet delete mode 100755 .cloudbuild/update_images.sh diff --git a/.cloudbuild/Dockerfile b/.cloudbuild/Dockerfile deleted file mode 100644 index 456a354c84..0000000000 --- a/.cloudbuild/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -ARG IMAGE_NAME -FROM $IMAGE_NAME -COPY . /kerasnlp -WORKDIR /kerasnlp diff --git a/.cloudbuild/README.md b/.cloudbuild/README.md deleted file mode 100644 index 064caf5f33..0000000000 --- a/.cloudbuild/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# KerasNLP Accelerators Testing - -This `cloudbuild/` directory contains configurations for accelerators (GPU/TPU) -testing. Briefly, for each PR, it copies the PR's code to a base docker image -which contains KerasNLP dependencies to make a new docker image, and deploys the -new image to Google Kubernetes Engine cluster, then run all tests in -`keras_nlp/` via Google Cloud Build. - -- `cloudbuild.yaml`: The cloud build configuration that specifies steps to run - by cloud build. -- `Dockerfile`: The configuration to build the docker image for deployment. -- `requirements.txt`: Dependencies of KerasNLP. -- `unit_test_jobs.jsonnet`: Jsonnet config that tells GKE cluster to run all - unit tests in `keras_nlp/`. - -This test is powered by [ml-testing-accelerators](https://github.com/GoogleCloudPlatform/ml-testing-accelerators). - -### Adding Test Dependencies - -You must be authorized to run builds in the `keras-team-test` GCP project. -If you are not, please open a GitHub issue and ping a team member. -To authorize yourself with `keras-team-test`, run: - -```bash -gcloud config set project keras-team-test -``` - -To add/update dependency for GPU tests for a given framework: -- Add/update dependencies in `requirements.txt`. -- Add/update dependencies in `.cloudbuild/{framework}/Dockerfile`. -- Run the following: -``` -gcloud builds submit --region=us-west1 --tag us-west1-docker.pkg.dev/keras-team-test/keras-nlp-test/keras-nlp-image-{framework}:deps --timeout=30m -``` - -Alternately, to update all docker images at once, just run: -``` -./cloudbuild/update_images.sh -``` - -### Run TPU Testing - -Because of the TPU capacity limit, we cannot set automatic TPU testing. To -trigger the TPU testing, run the following command: - -``` -gcloud builds submit --config .cloudbuild/tpu_cloudbuild.yaml . --project=keras-team-test -``` diff --git a/.cloudbuild/cloudbuild.yaml b/.cloudbuild/cloudbuild.yaml deleted file mode 100644 index 474cf0e32a..0000000000 --- a/.cloudbuild/cloudbuild.yaml +++ /dev/null @@ -1,77 +0,0 @@ -substitutions: - # GCS bucket name. - _GCS_BUCKET: 'gs://keras-nlp-github-test' - # GKE cluster name. - _CLUSTER_NAME: 'keras-nlp-test-cluster' - # Location of GKE cluster. - _CLUSTER_ZONE: 'us-west1-b' - # Image name. - _IMAGE_NAME: 'us-west1-docker.pkg.dev/keras-team-test/keras-nlp-test/keras-nlp-image-${_BACKEND}' -steps: -- name: 'gcr.io/cloud-builders/docker' - id: build-image - entrypoint: 'bash' - args: - ['-c', 'docker build -f .cloudbuild/Dockerfile -t $_IMAGE_NAME:$BUILD_ID --build-arg IMAGE_NAME=$_IMAGE_NAME:deps .'] -- name: 'gcr.io/cloud-builders/docker' - id: push-image - waitFor: - - build-image - args: ['push', '$_IMAGE_NAME:$BUILD_ID'] -- name: 'golang' - id: download-jsonnet - waitFor: ['-'] - entrypoint: 'go' - args: [ - 'install', - 'github.com/google/go-jsonnet/cmd/jsonnet@latest', - ] -- name: 'gcr.io/cloud-builders/gcloud' - id: clone-templates - waitFor: ['-'] - entrypoint: 'git' - args: [ - 'clone', - 'https://github.com/GoogleCloudPlatform/ml-testing-accelerators.git', - ] -- name: 'golang' - id: build-templates - waitFor: - - download-jsonnet - - clone-templates - entrypoint: 'jsonnet' - args: [ - '.cloudbuild/unit_test_jobs.jsonnet', - '--string', - '-J', 'ml-testing-accelerators', - '--ext-str', 'image=$_IMAGE_NAME', - '--ext-str', 'tag_name=$BUILD_ID', - '--ext-str', 'gcs_bucket=$_GCS_BUCKET', - '--ext-str', 'backend=$_BACKEND', - '-o', 'output.yaml', - ] -- name: 'gcr.io/cloud-builders/gcloud' - id: create-job - waitFor: - - push-image - - build-templates - entrypoint: bash - args: - - -c - - | - set -u - set -e - set -x - gcloud container clusters get-credentials $_CLUSTER_NAME --zone $_CLUSTER_ZONE --project keras-team-test - job_name=$(kubectl create -f output.yaml -o name) - sleep 5 - pod_name=$(kubectl wait --for condition=ready --timeout=120m pod -l job-name=${job_name#job.batch/} -o name) - kubectl logs -f $pod_name --container=train - sleep 5 - gcloud artifacts docker images delete $_IMAGE_NAME:$BUILD_ID - exit $(kubectl get $pod_name -o jsonpath={.status.containerStatuses[0].state.terminated.exitCode}) -timeout: 120m -options: - volumes: - - name: go-modules - path: /go diff --git a/.cloudbuild/cloudbuild_tpu.yaml b/.cloudbuild/cloudbuild_tpu.yaml deleted file mode 100644 index c715d71fb7..0000000000 --- a/.cloudbuild/cloudbuild_tpu.yaml +++ /dev/null @@ -1,79 +0,0 @@ -substitutions: - # GCS bucket name. - _GCS_BUCKET: 'gs://keras-nlp-github-test' - # GKE cluster name. - _CLUSTER_NAME: 'keras-nlp-tpu-test-cluster' - # Location of GKE cluster. - _CLUSTER_ZONE: 'us-central1-a' - # Image name. - _IMAGE_NAME: 'us-west1-docker.pkg.dev/keras-team-test/keras-nlp-test/keras-nlp-image' -steps: -- name: 'docker' - id: build-image - args: [ - 'build', - '.', - '-f', '.cloudbuild/Dockerfile', - '-t', '$_IMAGE_NAME:$BUILD_ID', - ] -- name: 'docker' - id: push-image - waitFor: - - build-image - args: ['push', '$_IMAGE_NAME:$BUILD_ID'] -- name: 'golang' - id: download-jsonnet - waitFor: ['-'] - entrypoint: 'go' - args: [ - 'install', - 'github.com/google/go-jsonnet/cmd/jsonnet@latest', - ] -- name: 'gcr.io/cloud-builders/gcloud' - id: clone-templates - waitFor: ['-'] - entrypoint: 'git' - args: [ - 'clone', - 'https://github.com/GoogleCloudPlatform/ml-testing-accelerators.git', - ] -- name: 'golang' - id: build-templates - waitFor: - - download-jsonnet - - clone-templates - entrypoint: 'jsonnet' - args: [ - '.cloudbuild/unit_test_jobs_tpu.jsonnet', - '--string', - '-J', 'ml-testing-accelerators', - '--ext-str', 'image=$_IMAGE_NAME', - '--ext-str', 'tag_name=$BUILD_ID', - '--ext-str', 'gcs_bucket=$_GCS_BUCKET', - '-o', 'output.yaml', - ] -- name: 'gcr.io/cloud-builders/gcloud' - id: create-job - waitFor: - - push-image - - build-templates - entrypoint: bash - args: - - -c - - | - set -u - set -e - set -x - gcloud container clusters get-credentials $_CLUSTER_NAME --zone $_CLUSTER_ZONE --project keras-team-test - job_name=$(kubectl create -f output.yaml -o name) - sleep 5 - pod_name=$(kubectl wait --for condition=ready --timeout=120m pod -l job-name=${job_name#job.batch/} -o name) - kubectl logs -f $pod_name --container=train - sleep 5 - # gcloud artifacts docker images delete $_IMAGE_NAME:$BUILD_ID - exit $(kubectl get $pod_name -o jsonpath={.status.containerStatuses[0].state.terminated.exitCode}) -timeout: 120m -options: - volumes: - - name: go-modules - path: /go diff --git a/.cloudbuild/jax/Dockerfile b/.cloudbuild/jax/Dockerfile deleted file mode 100644 index ec84817cc2..0000000000 --- a/.cloudbuild/jax/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM nvidia/cuda:11.7.1-base-ubuntu20.04 -RUN apt-get update -RUN apt-get install -y python3 python3-pip -RUN apt-get install -y git -RUN git clone https://github.com/keras-team/keras-nlp.git -RUN cd keras-nlp -RUN pip install -r keras-nlp/requirements.txt -RUN pip install --upgrade "jax[cuda11_pip]" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html diff --git a/.cloudbuild/tensorflow/Dockerfile b/.cloudbuild/tensorflow/Dockerfile deleted file mode 100644 index d452d3761b..0000000000 --- a/.cloudbuild/tensorflow/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -FROM tensorflow/tensorflow:2.13.0-gpu -RUN apt-get -y update -RUN apt-get -y install git -RUN git clone https://github.com/keras-team/keras-nlp.git -RUN cd keras-nlp -RUN pip install -r keras-nlp/requirements.txt diff --git a/.cloudbuild/torch/Dockerfile b/.cloudbuild/torch/Dockerfile deleted file mode 100644 index ecd88b81a3..0000000000 --- a/.cloudbuild/torch/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -FROM nvidia/cuda:11.7.1-base-ubuntu20.04 -RUN apt-get update -RUN apt-get install -y python3 python3-pip -RUN apt-get install -y git -RUN git clone https://github.com/keras-team/keras-nlp.git -RUN cd keras-nlp -RUN pip install -r keras-nlp/requirements.txt -RUN pip install torch diff --git a/.cloudbuild/unit_test_jobs.jsonnet b/.cloudbuild/unit_test_jobs.jsonnet deleted file mode 100644 index 560581c2ef..0000000000 --- a/.cloudbuild/unit_test_jobs.jsonnet +++ /dev/null @@ -1,43 +0,0 @@ -local base = import 'templates/base.libsonnet'; -local gpus = import 'templates/gpus.libsonnet'; - -local image = std.extVar('image'); -local tagName = std.extVar('tag_name'); -local gcsBucket = std.extVar('gcs_bucket'); -local backend = std.extVar('backend'); - -local unittest = base.BaseTest { - // Configure job name. - frameworkPrefix: backend, - modelName: "keras-nlp", - mode: "unit-tests", - timeout: 7200, # 2 hours, in seconds - - // Set up runtime environment. - image: image, - imageTag: tagName, - accelerator: gpus.teslaT4, - outputBucket: gcsBucket, - - entrypoint: [ - 'bash', - '-c', - std.format( - ||| - export KERAS_BACKEND=%s - - # Run whatever is in `command` here. - cd keras-nlp - ${@:0} - |||, - backend - ) - ], - command: [ - 'pytest', - 'keras_nlp', - '--run_large', - ], -}; - -std.manifestYamlDoc(unittest.oneshotJob, quote_keys=false) diff --git a/.cloudbuild/unit_test_jobs_tpu.jsonnet b/.cloudbuild/unit_test_jobs_tpu.jsonnet deleted file mode 100644 index e429da40cc..0000000000 --- a/.cloudbuild/unit_test_jobs_tpu.jsonnet +++ /dev/null @@ -1,42 +0,0 @@ -local base = import 'templates/base.libsonnet'; -local tpus = import 'templates/tpus.libsonnet'; - -local image = std.extVar('image'); -local tagName = std.extVar('tag_name'); -local gcsBucket = std.extVar('gcs_bucket'); - -local unittest = base.BaseTest { - // Configure job name. - frameworkPrefix: "tf", - modelName: "keras-nlp", - mode: "unit-tests", - timeout: 7200, # 2 hours, in seconds - - // Set up runtime environment. - image: image, - imageTag: tagName, - accelerator: tpus.v3_8, - outputBucket: gcsBucket, - tpuSettings+: { - softwareVersion: '2.10.0', - }, - - entrypoint: [ - 'bash', - '-c', - ||| - # Run whatever is in `command` here. - cd keras-nlp - ${@:0} - ||| - ], - command: [ - 'pytest', - '-m', - 'tpu', - 'keras_nlp', - '--run_tpu', - ], -}; - -std.manifestYamlDoc(unittest.oneshotJob, quote_keys=false) diff --git a/.cloudbuild/update_images.sh b/.cloudbuild/update_images.sh deleted file mode 100755 index 2876df81e8..0000000000 --- a/.cloudbuild/update_images.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -ex - -base_dir=$(dirname $0) - -for platform in "jax" "tensorflow" "torch"; do - pushd "${base_dir}/${platform}" > /dev/null - gcloud builds submit \ - --region=us-west1 \ - --project=keras-team-test \ - --tag "us-west1-docker.pkg.dev/keras-team-test/keras-nlp-test/keras-nlp-image-${platform}:deps" \ - --timeout=30m - popd -done From 1cf5c396bded91b335854c8c4a6667a562b8de6e Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 27 Dec 2023 14:56:46 -0800 Subject: [PATCH 51/87] Fix one last bad preset hash (#1381) --- keras_nlp/models/whisper/whisper_presets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/models/whisper/whisper_presets.py b/keras_nlp/models/whisper/whisper_presets.py index b740ed833d..3c385deac9 100644 --- a/keras_nlp/models/whisper/whisper_presets.py +++ b/keras_nlp/models/whisper/whisper_presets.py @@ -474,7 +474,7 @@ "language_tokens": LANGUAGE_TOKENS, }, "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi_v2/v1/model.h5", - "weights_hash": "ca157162ec9c3329a659388528a3af88", + "weights_hash": "ccab1c93c5739007868ae73fe025806d", "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi_v2/v1/vocab.json", "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi_v2/v1/merges.txt", From 24bb087b20fa23137f20b3e9c5ca3e069f8285fa Mon Sep 17 00:00:00 2001 From: Tirth Patel Date: Tue, 2 Jan 2024 23:50:23 +0530 Subject: [PATCH 52/87] Add a tokenizer for the Mistral backbone (#1383) --- keras_nlp/models/mistral/mistral_tokenizer.py | 75 ++++++++++++++++++ .../models/mistral/mistral_tokenizer_test.py | 46 +++++++++++ .../tests/test_data/mistral_test_vocab.spm | Bin 0 -> 237763 bytes .../create_mistral_test_proto.py | 32 ++++++++ 4 files changed, 153 insertions(+) create mode 100644 keras_nlp/models/mistral/mistral_tokenizer.py create mode 100644 keras_nlp/models/mistral/mistral_tokenizer_test.py create mode 100644 keras_nlp/tests/test_data/mistral_test_vocab.spm create mode 100644 tools/sentencepiece_testing/create_mistral_test_proto.py diff --git a/keras_nlp/models/mistral/mistral_tokenizer.py b/keras_nlp/models/mistral/mistral_tokenizer.py new file mode 100644 index 0000000000..25177f6d6d --- /dev/null +++ b/keras_nlp/models/mistral/mistral_tokenizer.py @@ -0,0 +1,75 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from keras_nlp.api_export import keras_nlp_export +from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer + + +@keras_nlp_export("keras_nlp.models.MistralTokenizer") +class MistralTokenizer(SentencePieceTokenizer): + """Mistral tokenizer layer based on SentencePiece. + + This tokenizer class will tokenize raw strings into integer sequences and + is based on `keras_nlp.tokenizers.SentencePieceTokenizer`. Unlike the + underlying tokenizer, it will check for all special tokens needed by + Mistral models and provides a `from_preset()` method to automatically + download a matching vocabulary for a Mistral preset. + + This tokenizer does not provide truncation or padding of inputs. It can be + combined with a `keras_nlp.models.MistralPreprocessor` layer for input + packing. + + If input is a batch of strings (rank > 0), the layer will output a + `tf.RaggedTensor` where the last dimension of the output is ragged. + + If input is a scalar string (rank == 0), the layer will output a dense + `tf.Tensor` with static shape `[None]`. + + Args: + proto: Either a `string` path to a SentencePiece proto file, or a + `bytes` object with a serialized SentencePiece proto. See the + [SentencePiece repository](https://github.com/google/sentencepiece) + for more details on the format. + + Examples: + ```python + # Unbatched input. + tokenizer = keras_nlp.models.MistralTokenizer.from_preset( + "mistral_base_en", + ) + tokenizer("The quick brown fox jumped.") + + # Batched input. + tokenizer(["The quick brown fox jumped.", "The fox slept."]) + + # Detokenization. + tokenizer.detokenize(tokenizer("The quick brown fox jumped.")) + ``` + """ + + def __init__(self, proto, **kwargs): + super().__init__(proto=proto, **kwargs) + + # Check for necessary special tokens. + start_token = "" + end_token = "" + for token in [start_token, end_token]: + if token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.start_token_id = self.token_to_id(start_token) + self.end_token_id = self.token_to_id(end_token) diff --git a/keras_nlp/models/mistral/mistral_tokenizer_test.py b/keras_nlp/models/mistral/mistral_tokenizer_test.py new file mode 100644 index 0000000000..ea9e04f67d --- /dev/null +++ b/keras_nlp/models/mistral/mistral_tokenizer_test.py @@ -0,0 +1,46 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer +from keras_nlp.tests.test_case import TestCase + + +class MistralTokenizerTest(TestCase): + def setUp(self): + self.init_kwargs = { + # Generated using create_mistral_test_proto.py + "proto": os.path.join( + self.get_test_data_dir(), "mistral_test_vocab.spm" + ) + } + self.input_data = ["the quick brown fox", "the earth is round"] + + def test_tokenizer_basics(self): + self.run_preprocessing_layer_test( + cls=MistralTokenizer, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=[[3, 8, 4, 6], [3, 5, 7, 9]], + ) + + def test_errors_missing_special_tokens(self): + with self.assertRaises(ValueError): + MistralTokenizer( + # Generated using create_no_special_token_proto.py + proto=os.path.join( + self.get_test_data_dir(), "no_special_token_vocab.spm" + ) + ) diff --git a/keras_nlp/tests/test_data/mistral_test_vocab.spm b/keras_nlp/tests/test_data/mistral_test_vocab.spm new file mode 100644 index 0000000000000000000000000000000000000000..d753476f535c762c5646937ae2e1c783a225afe2 GIT binary patch literal 237763 zcmZUc4P2Dhdf=a#;X~z1ui+A-h}N*i8f&bvY6XlnUbTiAuc5{o)>vcJ60hMZC0>Kf zz`#Jz5`u&n=_?j689A@|!W+v8HW1XpS4Qp7#>XcYx)lOW)8rQJy|6r2d-R1ZD z|DNZZ_q^x(IbZMl4in-hM5g8aDkmdR_8U?-};* z+L8G9uRHw{5+;np`PaYRoBylCcM~H&{=s9%wqNhx^~fV`g1X^v_P+VZ89N~o=k9&K zA38U0_x7CM_xSbRykEUG=JB&BkLzYk_*g5SYBir}wO?o-{`jYP!t`5A`?%FOb&|t?5s%QfXHJPM3RO? zem*ENv6J{a=Z~cS@yL|mS5kR zEO}2&l>IMEl>E;o$=^LTQ4W0eb9wXIiL#wEe)H|mmE%RM-1{{ChtdBS zS-wCgRp@~)M7F3n;nWkNUr*iWcMw+`ai)tDC+j3IQ7b{pRWyh2`*mVAM9DeA>O08; zTtqi;%|e@AMh|?gli;gb$s(>6+HDJR8|;K(6?QlAd>J7{lQeP@_a6N3B6U+m9|I52 z-PBRw!w3lyE-+3by|}I9v6Op4GFPs?HCu+u)@c7Fc8S)Ezo|+i9;6fWgBt3FG(OSF zzdiq&whq6^#2fx@r`9kjT8i#yotu`<(es^AtSisDDwf41LVno)YoVv zutX!l$24;Iq(%zy_mdZMqDD>~O_F~&{+OILCdnr+CdrvwN%D{SpGh_8RS@nR{pg>r z{+xS%E=#EM^ZhgApN~(G3xOx)voBKR;@3|~=LU^raIJYLT267_f9-!#20xuGUwr-t za{0U2vQXvicC?fbU*Nk4nT2~U;Y??v#7RD?xUU~M7yo^PS%$3OdZlXX`%w~))DJ|7 z;bhcUeFP&SrDy|X%!?Mi9sk}aDP0sTra94KTp?0L9L?C>O5!QI5iRAxXsN}o@?^9e zCarq(+w`U8>?rA=PnvPJf_X)htmB?7peN0Z$n9XyjglPXKaj`3cl3*=b#fZrjE#Sa zJe(=w;{Im(i3i!49wjE?Ev*r0NhVL_A}8@LYNc%kMC_Vq3F5AzZx$WX$T{K&#zxXd zBE^88sYD}XUX2_i&2!X4CwVidWBD~c1jNSMmCA{{rVKd#*+Zu1=aO)BwaX`}+b=0)_EkLWMlH%Rzu z{8V4Og5Ex!{*DZM&Go0WQpYtj3WcX?+O1g{SEz`@-NZJ zW0b{kO(%D_$8v0m`1A!FCx-P4oqQ z7InK>S#UP(YYm>}dvZbYTAlI{6%b^CX?zr9KVwG}1ZBe8HGv zU!jwm_?a1#bknhA^t;5LfPKLQK3l>5Q=;u%-c*s4I$)oWAa~sl-N#AK?{K4NmO8%D6 zF3hvD)HUiO_?DKw6eqK}{tEH8oJ|nZQjz~g-Iq5eNEH56lSCT1HiX}C+<%R(?@hq2 z6XY`aEr(xsm*M9##$3^@;Js3*>u)Q3ye59xnRnq9GsyRp&~q)ktYY$R-@ zAwdq8Cy27!h1|Cizvalzv)oI(TX3%f6YaDexu1M2<+?eNyxgZvT6B`5!r*s!E$w^< z`@;Pj!mYwj^@C#E2O(gipM5HFl`@%jiwq+TUTlzh`&Z@HtC0=dQ)PM7pUm8|33nO! zu%g#hiDcuxL0Dz`1L$Yr6r6|RRm4Rc2GCnHvXV9`%_ZMlGm^K8M3J{CM>%y}M%YU7 zzng1bI7}MNjM+_VHL{DmcaSIZX6A-g`Xm?>ltmBrT#fvSFcuZ>5$ch2)`6M&-j3V? zIbhgBc<7AO$Va5P48ID>v6uT=NUsjP8n9^zbdYasSr5fQojCFLz$pmyGfps1mopv& zrm4Qn`rtHeiroa#U>ExHS}D#HIY)TYAZ<5~v?-&3epEzSd8B!nYZqZy#Yvwmqdv+) zkMgM27y%>kyr>cLDxDaYGNuvkt_u4l?Y2fM`^f(->i!^cRiN9c169TcgfZY}zCc^! ze-Hv?5n{+AZZN8AF5(yF{gwiGt z{dnd%?hnwH(+I=5A$T15d4dGreB>8gLYQl`I}}3^T;g6MQmp}26MwmdIb@LfI~y&_ zac_e4D&1!_@*QFIL)6y|kzc6sa+JIWao<;U7%h7Vw-d@KW8f*)3+Vaq75P{9|97N5 zo3K0RHyZM=4V}KHUn{bhaLLqFJ7Lt?WIp;L*owai&af`}C21BVQ-24Tzo$eq7BZgE zpGauVwUyGFXv z122*v)_4ZmO@EfUM5^&$KT6vTXrzHKhnX+R7m55G@vAKN` z(e-L>VZ2DeZ*2SziM+*qMGyyNanvX6+)SUZCGNk+ZyEJzPSwZ?WXER43jCEFok7|Y zHS#Y0i;1rkzcRwlMxPJOw2eNF^ymYo1=QzS+HA_Bxp_I)Cvo3V!rM1c@1N6;ztc#| z3icRW`_Y~aw{`LXVc16K50kfV=r>L1hSS(Twr)c&LSN54#RX$6ZY_h=`t6$$(6#~#Pwr9Jr#R#m!OxY$UWjy>#W~W=5nq-#I5Gk@sux`v0EQU zdyTS>M>h|qi21;DG1GPlT(>Vuk>+jFrJ3|T;(jAx+mL4b|A=($#Ba={t!Ji4XZm#M zB+d7^ex33pa(^ELx@hl?X|fUjPRg?x*?@lv!Bd)p-seFjtv+h zqhvk$PS^^2p_q2i&*U2A*Mk9lIsTQZ9>|x0`SsVNX&5Cw>b(K~TIdG-Os>-}&9s{z z_Zc`30Tz$J=d{v2O($=1uZg|&75p#3Eb2d4ruq{5j-kjgyPBfv`Knf~bG?i@F=Ub# zc!_(%`27{KYH76mDdEaaM@tLtw^_5?<60g0H(iUCeDZ8y{?GS_A7j zLr{I8G!!L=xYtNutR(!~%=s0^SU((KzN=@PrS2*>vv$}MEf0x1pl6(UQY#&-cb+3o z6Zgy}|Aj~s@#v3GU$k+6z7_mBLZZ2L1b_3|XsJdf;5K`sB?&o|Z2TM9i$Q*5iI$rY zq#wyy5bl<3)K47tgZ?SQ+H}?{_T2=YNByQ_)7i8!YjI`&J=|vl6M7l8UGCDz+g^=S z&PkMh;+vt`|4r=o3(g7X4{FVIoxBu{a{m;O|HO3@{brU5_bhhtOoaR!*Bf>x${hR- zGrt5me+gj!ooh8RlWeH-2z3th;ju*dJC5mofuC`NazDlWPe;f<;^$!eZYf}Vq8x3z zHIhXcx>zr`=FmUJYb27m|1C=N{|+Z2HR4RyNI!9SM@4*t>;;IUN5lCS^g$YFtc227 z5of(lPL%7!O&GNwcpd+}kVqO|5#M!K!nN`eZ0)OP@i*#Z9O?ZxRURre_*E=@H&)75 zFxO@OGKeAyzXJN>y$4U4IdnH3W>;HJEIZU4@ zkC)o#nNz5T)*j(-2s^*cnu+l134RkGNoVPUIcrfJPkLzf3K2Xy#+28g2`*~*+j*q2F9wUz!XxbQbHp5R}+JA!jK+PRDI zM8(OxYa?v6CW7&Uv!-upKgz1|b(A^v5_z}`S72=a&bnKzQ`H%ogZouo>sjBaeOhoA zeds80OdQX7>dz(cay3&PvHrD%Jm@ztr^59a zW_C-`mDj! zGn!zR(C^7FYla5urer+l(Z~mc(ZMjpz+{*T2@nlV>MRMF0)Zavg|&y8P}P}>>i@Iw zS8SzO=qDE;bHv^!yyZ7t_oE#b!Ok9Zq?xPKk~%|G0~QQd!o z`nyBlB|eApSNVr6TwAHG@8LRis`^T?MI)0{{wc3YKM6esW)m*(DfRKCR!&gniz?lv zRFfvPTq}eiYtRZ6 z4pckUA&X%d>C9Is|I|l$3VcU7KfwmEy&m#qU|!cB!Tkw!gguvRAlzEDZlS+2w)}X$ zpx>P#23Svg>max!O#&~aiQz(;RM%%nU=3ka9Wu6WP7^MNoDmO}21N z&2eMn*i6zIST1#xO@EtuxWjpjMPwarV=HB4Ev?G75qH`B3@N#@T(+REhmzCFS(kBr zB7Cdn0os|q5bS53=d3?)Rn7O<%Pu6(V%1(f_$L0G`3E`s4}6XMl=W}saw#6A-FS9t zxS^Fz#Jv^tk?B%Wz`lU|E~Ng{Gr##vVFu=OJ#&PrzoHM8JO!;`&SUh zSpL~x6j3g9COCF|4tg&2i!H11K;2(WST&}rzEz1{3;Cow_KfDMbjrba^ELOx5x{XF=qc07eF zqwGZ&xOb2+K}bcX4wSu~<9avv;S5xF(*KF$Jo=cOCH@e89^unP3RQkpKOsIf{}mh1 zKTDToGqB$o%6=IuS=ZMRZV}~ZK$de3GOXew4mGcgm17Ujv{ahhS4Eod(ATPmmdU%C z2&p08M{AbJU9P`Jxzt!vO20VN$UP0BvZC% z;)p>G;trH)WfFSuY6g9sK9Ak}9sZ1^`qlUsgPw8*Kj->a8N$+y=L6_H8LZn_53oik zBAwFRyt}})66So>zcW<0uc_CxVXKpf>p0gtxUY-x=M&_|Nd2R7ISH?xO$6R&oOluc8ThL-V%4|-Z$bp++AMN< zEI-uSCmMMX{Ty6S_dmw{E>!o3S>9@>En zaMsRiEu>9;7>i1|Pwms<@hdexBmYEO>p2%F9gUXXs&LFjW!TI|=((WuHs!yD__2|{ zMDKvwk0a$h=9HSpiJ$hbewjTnX_k?vcfL!LQtIcoEsi;&Ne)^8(tUPIm@d3K4r`)l^B(;~$6 z6#nevoXCm;q+h_Z_5Bf2N?-GR#C{_Wzhs^Ra!=n2v^#r+p6}@+EyP7V{saE%*_27G z0heec2X`PsD;4CalI!`nE2of8#);pcA7SraMx5{S9Q)YQI;p|$17%AuXsHk4|Dpcg zSMmIi-(ICFf8y&$H;wRK!sj|^Wj-CoJxG~(?MQza*xu=nE}}jchz#!1%74c_NZdy$ z_gCmOGZ|;FhrgW2UhU^>jgmzF1yaS4j_&$GCtd8FRUCsaYvon^)V+f*5eMln#(i}^ z=Njz2`xB`f^j^JfIp{0zzvPdW0={k9re7cawvf1lrG6rE|AMEA~0C^l5N;L9Y_I9S@TJhOM-m{3* zdTIY>Id?m}SOPsunBSI2Lybn<^s9Qp9v|V%rBfsAZ*tB?x}U(YbjELF9ewA6@gn~P zeGsm$(W-ao#P*6-?9`(?F8c5Bk3jxA()6lMMvxBL%ZGi`Qt#uqcGaepSY+vU?164k zzAmjihTB1W?eEYgX@sY}CgN91npeMI95L$TJ>u|7tkmxjIlhhLxZX*Ba=xgQ8p2Og zaWWoWrCyx$do{m4h2M|YpGKbnPl8Ib^f2jTLn^Hr?EXD$?isFieNDX*r^@dV74~zj zbbU*nR9-&R%CkTC9pnCPu5msmY4|+{Z_^LEpT*v&&*yQk0VmgA_(5(%9;eRgVI%rq zfqI6%3%L(|0|sbVOMgaI!t3xJ)Wa1RfN$Vm;Ug$!Eo8W_krU{>@G+c$e+Db)yB_tM z&^YRseBHy3cckP9ZT$c}B=mLq!0^$DqV^xP<*XqdV;p&tF;*gE)1UHe9=5`E*a`L2 zRrv5OIo_y|9Nc^1cgNVHBlF=P6hd=3YZRmjTGz5hN7`Ch;~}e|7M!W{U1S58p_~1l zdnxBf3z$1`TcP(p>wrnD*QaoPf!nFVfd}**%spkyN9e`OQ^uDVD_@C_Q@D*h57!Sd zpUz`_!Z_rkZ=J*cJeaVh(&G{0WbVC)`x5vluWwqUxLFgp26b{7KM!+o-yNPUzI;(* zyvn&DYZM>Xyu{lNK03Yy+ii-|$rZvI>4?L~>tN{5^O855Z@fCOWp`(GUX(Fd33yDi}KUP>Gz+ zP5Y%3eUUFEGd>XGJ3cXuJCBY0tCYXKSW|ZOX-&oBhcs`0F9LeDfHb=VF=7_6iucQ$7)i*dpM$U%0Fdy{TkvdOO_PG%EGHA4D z<#(Kin^#23M=PSlGB8z|hNepM$tR?R=YFlB6lq(VB38{rv5ia?yM3~>YaSEF{Um8w z5GAe9mKi11RZ(I?+S#+WABhr23FrBG?EG<^EGM0nuo~9Ede{g(oa6WAMN3~nw0MyH zxV@Y!_;@zphmO`Lp3g!!P3JrbX~*4;bYRye?x}!EsD@f7^3n#Nhn{-Q)9v&x3-63Fw)W@IzY4Gw zq#t(&<3#5ut>lkF!DOwgz!sI5=PzyU)K|I5BUHdLg}t)Vq#3!rC}=&0}1fsIf&}7lW|Xl z6qp6Gp(UAldWBBfzzR07uf^VTv3D@eNS3*Tn-2?NF)V}Suo8N1Ft&||^o_D_yhHt0 zQUBm$?DRuNi06ZF0-VqVuG=E+LFO75j*XVk40whyd|b4wCeC%R9yY=z*aBN&JA~hl zmYv8PrI*pC%eAr>eINMrPe?w}jh_n+qI-7Gc2J0341U%aKHOf~t{=M7qr?p^=$grU zxQnnA=%n9vjE@pOec87mO1#K^Y}2y`n>Y|9y}41+gB;sG+ZhimoEZ>*B~(K#)PaL= z?bwi=`)pt}U=PRW^HovOf=nWe2^zqBI$6v}3$h8C(MtvUXMNQ+XS!IywsgAKp&i|k zI$fOTR>C>J3Ekixoh~l$pnC?VOCRqG_|Z>+e|L)bwxx&{*?)4nwA`64t z=^RawF2cD8??!eHsrL?2Qswiv_x2{0LwU@D}*Ea;&x_3nAp$2^P){kXljRUh-yzdJ09KX%3+ zq|?jzgYF`JcMW|noAF~HS!NT@T$m3Fp(&X)5ADAg-MU*VZQHc641GB`HnR@G&knX+ zt*pdvHLQd6un{)F7T5~EZ~xnI=fGYtv34(gB|?lZM94ne`CyEQl;Ih?Z;c*m!Df)* zqfbaN(gYPy3Dr;wb>M8&NdwXh&0qz00qYNNpnEp6{@B2t0lgdij1xZaLO)|o%Lc8q zQl>V_qsnS?Q8q7SL^|y3#ao!y)O~OY)Vt$nkb0zgUeSX+_d?%z+82HrpV9r$F~Bq4 zA# zx8z6bmzDUn++i$&wm8P7NX96zk7vwcJajM)t|rVn7_+~Lynlwi5jMdV=$XV=IE6ei zc6y+HgmnRD7(S%`2J4SZ#?mat)NIC9WEXDN0>)hE1~cz6Y$cxUuoH5isXSUNM|l4d z-OBrmZMgTL=YxZI+i}|stRIjE@pFb)Um%OYeT{btkrm*&L%zJ^i@L1DT@7RNHn!Pc zLme{yw#={N54RuLL42KYl%INaUZelf?k=Pox(Bd-OQcwd(*aKKKvQF+SX`0fN4K7hl(zek zQp&@A(==>qCbqE@8(Dy@T!@rY>b^T#IfFb0Zl1HBM_vS1i1jvexaWXQF5$imz6Cn* zr?d9QCcV&-iv2C3kEUaPNZUN(u44WN2VCKvVYm)A!N&SMjJyl?py!y5^%pj7A)P(M zn@7CZz7Ofg-El`Jo%eaS;Ur@KbagT|csU1fvHxt<$phke2)gCi0K`B7OonjF6XN5= z?9fr(-NQXRjrlf?{+mnxg{k~l+U_ESvO|S*RZ%!58cL}l;eLL&~ zCvBdCbRDC8(f2BUY`5KeFi`NdH>epM%)hT+fGtPzX)v3S}jUTny*@0`X{AJ}vy*TavGmuh4!)WL9CoHQUqFUN`*S;|?F zsh=}xbStzFPb=}XosAbO2dOp=NbM&{0~{mqV&u7ngK$ph1`qh*6r6#clL^ubec*w9 z@PZHgBMH*Mdk3AX;^hPf%1&fgcD%SKpBuWNWdrGNCVj$L>q(#T+0omP4sX1iBaQQL z5iY@HxB|m)9eT)rFZu7Y^De|H>X*9pB7LCN=N+d>KS=sWCuQqGx?H4B+}%iLWW3xY zo-o{nd*F^E3_L*hj3;hQygWqLJxAGd<`B59N9mB{S9G%NPiIfyTJShC%_3^8?ZlcLpRJP4(AQ6EJXVB5uDFy zWik3P7#lw(X~hG}ajyhl74bs9Mk`)ejo&&jvj<*}v@mCHL~eqn16tXF+zM98*#`DZ z$_Nhf*bXfNlz)h8^Vn~tGAE`pHzM0{JCNJCcPHe)Uf2f*p%99pr-bphobk6F`>LV+ zJ86IL9bo?f9rTCJYplOQtiSFv{@!5x9l|~b7=OLY{oi0)#54R@yi_26+*ekj7uh-6 z7v}X_ky4Gj7V4k@!mq}Q8QBb0D0!to97rb&zee21P#1YZhChrKKeC%~&iEZ;Hu@Pj z2TtakeB^n!2$#T(pNqNoGPek*)#a;%w5p$;=TGkjK{p+_GAC^y`q!r zge(1y{`NZGZ=r|bF5H6$F#LRkJVb^LvwycliEagBFT_9sG~s6W3n#yePtyNCMer7p(idGg$k{?quHiTz(- z{)H0*%)eagB8;n-^+zk~56Wrw^1dkXYy_i`bJBPC9w?6TCvpygpB32#rQ@hip7%Nm z$XhOXJV0JGQL>dVPCNY+xf9&2%mK)~;KILe4}Bm{Bl~dYgD;zLX%YQ&2mLgQ^#`^; zX8%Yl*uc&h&|Z&?)Ub!;oD^ygy#-~&H&+{XS#uzzr# z#{RK2mrE<|PTpOGI^t*mv*H@h4v-dPQ;7VaJHQETskqb0-#qe{O#TwdUjcD&%>#Zg zKKGXB6aQm*VLpWDKMRq=&+~31Qk~uE zA*`p*F6F%#{8mCV_O}`t>L`>mD*eyjl6ANj!+I#~p}%oHzY%>CY=Nz?9d<%^##@qu zbe0#$UgSQ=hlAigQo#P9Knl@4s}4yY6r-EKkM09+)*;p+hxk^dKq~OBgledTrpyAd zKpncZK>x4pSxmGBxBZbm7S}0be?RKHq7m-H#A#otBDot=gH+aAgr{E0q3?7o+ z5b1yi`ma&88nkX%6@1_$|Sr@VIRzy{axQ{}&j41@c$M(!f-f$IYO@+|!l9^ifmKKiG> zlYWYy7g~l2D1QOvFW~&8Ky1nMe+~VADg7UGE9ol`0|_v8j)xrE$4z4YI|=)rg8k3L z{-J+>d3lI&4CxQ@d~mIL#~J(IjQvAbCicGo`-g7ualT~EEf{+Sq2~Xl60J-n{uHoO zVVC9DC6w}9*A&Z~edLi{+VHc(EW#N-<-MaoB>Nkk%*O3Px^L@bF8X{}2z`6lAM9Y= zP{8`hKGfWXHb-i z4S#h=s`0Od@YO?7hjeOqR*5YAns4r&rVQxrMT{|#oF${HGts^f?`5Jpz|XwpyTJJe zarWb%gr5nV&<%~62>Eal@k|l<-RTHvVZLf*j%vHX{5Z;-bDOzmfH`Q0`H}gvgL$+w zE1L5)&O-NazMseWej?}l$(-*ad*<*|OL56=xk%iX zpmb86n83(G&5|R#_;z={RGj{`T;|#p`1_$lQn{c&4yWhJk@{SDcS?a&dw(OtTnoQ5 zUf!c_YB%tHUG@Q~8Gk@N$c>abZ=_u3`b`xELc9-g`~c_8{4Pgh3HFMgnU{8d$NS$F z%{ac><@=V_@zUfSFRhkDY2jIo71~A<#ojwfY@L(D!E>GV#);B7I!!vRC5!XM6gi=J zTwIzbrK@0y7Izl_6yJ~2g-O-$zv5taPXa@(_Kz&~kO&T$f0FxmJra}tL zf|f~<(wfW}!&1&6z&0;Z?9^ZT+DLJ%h?LocnG5q_AuNVvupCxGPd4MvZpNRJ*f;dw z$G%}~pWtHsF`jQs;Y5h>2fFeYe+n3XkllOm>x>X1&v1Qwn6R4o)`9y%gt#mbvL4+N zj9?Clkd5e@z~4*Uousjdv|tN<=8*{5irfxOH4&17+zXaa1mjl(;}>IBOPrJjVx@hA zbB$4+bL8@z1JwRM^lY@SwwB?Sd7l^CGz`&R13V8Vj(j)>g-{G8sDSWs#)D6>3-oGm zF5;Ox(&gfr60!l*GbZ0H_TA{7D&~S?jQJZF`^%XFkiN}~|F`3$bu>=eM&iV}hkaol z`@#bDzeoq}o!r|DR&an5y1@f}=ow-EJIWpv=~==48@y}Tf9JCQhK@w`zsc->k$g zoG#+_k_OU&dzd(`!%YaoUAPAi;352U{);_(pno@Y489@ic!2fa0Ohw(e&TXAQhsz7 zy1Sn8Q~&1Vc+st5j)i;JWDK$?o$^Bhx^)|Qqr8*Rlfbc?yy0ihBd-VIWh#CtFbihG zT$m3FVKI#DR}!h8WY&YYm%~ah@_vFEABG=Gkaf62SL4LUTwK~gUww>oR{S=?CfEX7 zVLOCBPLQ3*9M}u{ARi8bbB|66k?QQ-oyWUX=q9KDbq4Q&e)_+c{_nfN{2SsMLatRq zE!05+n4uZ0(2~pg{D4{q@O%(#$FLh{M|VI6wmx>gm&y6wD$e&-aK5mX^s%jO=$Xm+ z1oVLi`oRl6@PnQ;x_TE-eTSl+KZL0h2kAP&H<9NNpYmK0J@gsvi97{o;2fNXHu7eL zvGey=8MjWdE;!3Nfi;kQfZsDBjCmD(7kL?)7_Y7%hryD?dqT*Y5Qe*O4<5in2pc); zu|-JeGy2FcxaTpx-$lki0!#)c{VWMN6;fapxJQ})VJ^Dblk|b{YJ|+k?Qf*rz4TG& zhn7g@_vA-=OzXTydun?+^ZO#^_c`oQ$wv|6z2O>T6U-)#g-|*TyXv5RQ`!HXX8#*x z9!K`GF7P6Kxc$hE&Fp{S1aUc`YbpERRQA8nO}gg#C|OK=%V0UIgeEWZKdeT#a=i`K zp|1x=h`f!m1_2vv#Lr3oHX*lw`!?tK$nD@FzkNrT-&il~#GM1aJk}irtV0aUZ^#z% z(MrDB$d?st=yqs7K>72cWH0yZgM2s$g-{G8sDPdU%0EQ;$(M)x^k1X=w<$kTot1RN zQT|DkADsAijZl8Vxslzt&7GW~us<_kqv{>o)zo)WE9GzGnE~-ziNEb^lvJbpcCr5_ zkL?S{Bhq#{O6u@;-r-%NU60NO+}W%L@iT)fi?(0OxCG6(t>8@l`Q{Ar9Gr)Xa0xDhnK|_e(lV%(VdQmax}cSt z$S_z>W5ZzY#Wq~n2`hM0WA0C74X~8Ce-di|^cL*3 zm3+2=^%`l9l7~dj|B|VjIDVIIHTDJxFd33yDx|qa?B~(K#)Ip0SMOy7VV@Fzf=59mUaknEKiTo}BVa(7BR&an5y1@fI zaXkN@!t?)`JpYG&_BCp6OoJ&Vd_07o11;xOjg7E}~xoKhO4kCDX-QHC_5G{BFP;&fj-a56rL1hMUkY6YdIF z(0_cdVHmgZD*cLg9vngHE<_y;@csjB(0Yw`bZ&G0jI_~Kc4Rwl2h#T;zYjt`zs~(P zp>!g4zMtQAK)(w;1?v2XJsx=g4v+#O7*|F z=pO8=k3KpdeIfX9`vy59z~1_ii}70q%fWnuzKpcoXa9}98hstChqeLilkvco_-Kq6 zJO9J2-$uQI>VF#vw*`z6!Pt`x^)Bd*8$G*(UV|o{#wiYwSxHz z*~0bK&4eL-E7-se?cji^+>-*cU^YBDf9Lz9PS!WLdotO#W--raGtVRYaeMRF#}+Wp z=dy1-&Av6rJ{IX5V&95%;dUdt58!trTILhaLNIkE)BedgI&-HCtloQ!P*;{u| z{yl_4{=el{@BeJ#zAdm7w!==yfxXaEMgL|TP-M)&seF1j713S*deh08$wcibKMnxF){*MEx-v4nT z)mYSx9DDy@7wdn<8V_zioC4<&ot#0Q1EW4x&LiDk_5cRnOGfvwFX)3y=$FBd?gKCM zL(2;6pM12DFZHgBZ7umKVC_LU9ldJ(5hqu;Zy2tF@%31_i44PC=)s`(3NPur0W_TEXvGaKf@d{_vJVHqrko;;qv7x4TY>EZVT`cJdJ z4zj;S`bXLS6K5xT=@Z~AVgJj%*Tp{F4c*{m|GScSR>L}25AJz{fsN>%T+SBu#LFi1 zE#R*q?#(>^-oW#3_Q6~6GvnTl+zCzWk8_ZF!Ghj4hkgB`c-eI^~3GIJ`_Q$r=IeW)u z><>=dRC5$(GkLX`OT;#($lem!{;#Tka zG}Aw731^-XFLlTUr9(3`-RJ$^JFF+rt=N1UIMAKo*hbk7P{v%!itNTO{3UIN^usAQ z1Lxp8T!c$-8Lq&v;_G<1jtr@f(%3iuw10k%^*;V#xC=hD22}I!^Soz+`yuGo(WfB> z5+MAfRwg5pU@D}*ESL>*LCwGOkqcol4D(&mGGu58Yha`^i{Fw)u7-869^A>;KWs$z zjK}_$>SPo87WnD?zx%9}CSm^u>>rzHtH=I3v5f(2WC+_pI)aQ(LCy!pGd^)1zLop8 zLk{@(=4~&shqmp7zTLD9^i!u^@S*z|4?0*|bRNO}OR)bvv_IE=djF3wr8B6Xag4#l zl@AA@5Q;(Bxhj9?72XlRJxqO9AS4nyP1DL&Cyn{q1&Ooo_VS!N_KKjH+aAgr{D~ngY(cc!v6O@Yk?!|8B3TG zxzCIAf#1dc_cr@q!kidouY&Bt?YhAJ7rMcDhy5?>W`m1y3@#F<`vze`#KE_5o(04` zhj$e5zYP9F{Be6{k{0p`erD1s<($R@O?KW{$L~5=EI-Vvd8})2hoOBB<=Q|wk@g+T zzl$jUQo7D;y+Hqk7)XE~$~*S_pS1qJ_NRS+dj7A<3S-az zy_BE0yUD9_JZmH3NrI`60&WfE$A1>OCz0~QZ1lO{r;U8Mls}U4!+iYAnY=@WyoY@? ztzxf;z6>n*w^3HL23wAMC5)Zxq*KlnloJf>k<~Xo#xlN{WzSsNktQa-2lDY9&^p4e zhmEiaw!l`{4m%+ShQHyA1Q~jUy{e?iKJ+V{@R+=33&g&mHvP0{U|v_PH7Rgm%sV9nhin_;7+fzZ1I9UEt2<{ExAr zXAkFpoJID5hx5UH{MFh0*n5a`I8!7&2ROk4p;vVh{+91CpXd1{?o%+Fru-I5=zD(8 z2KPBQ4;P{IwdG>k#kc;?&gA`X)*1Ny_ zk6Vpt?V#Rqut7_5hP0+;NZY&&u_A5h8DdAa<8~ms$w$!$-}v)e0eYBC91CIWy`3TWF7i?@ZTnG@IpUq#LpbckWI)f(1gAfxg9KbDAy3>;}o7utQ~JsyHd~CPzTsJ^+q_6V*sw^B-FZgdk6R$MI~K@$C!dj;&GY1F<~*qmJuUAJ&XKCk zbL5>nPf3?$inxXz7ZY(C$FX0pN|a7b;@Go1r+3Pu^@knXwCJRQFlN?xze`S+Mm{Y4 zFgHy;I?cPfD{xQ#;|C1EA&ZYk_M!Xs!rH%8& zl=w$tobrcKQuT-O!L|&kqa8{=Wgh!dCw0UTdL=`Smt#YcA@A<~6RAG1SQ_vTKb|4) zWiFH2-sL>AT_!cze=4o@>C#f3F4o?3X|t!(Ch1~pOy>+eo#&hB;`=&6%-q`yR&an5 zy1@gzo71IdLpsl1(xq=lI?rCxSr?{@pE~j(I}Pd5LEKLKPn4%K_oho%Zn|{ib|cL* z)5K3ar{D~ngQj_DVuADM))i^YjcIZb{Sr8O(>S+G;~X=Mu_#S0<97v|1(X-5zI(im zyb0<%$T0FQcuq40fwz}2$i?|2ZK|Fl-s9Q>cnINF7>m_h$XSy5-ZFeRZ$bg2uVV3+Kl~g(8*MED|?(aNI{^EvF#6WO09vp*l@o>b;0(pd?sVIBPV{^5Fb5Bq8L{^3US zP2guw?ZeOOA}-j1pLrYa!6COp6Z%eM4p_*0TQ>O)@h(5^ebA2XU~g}`!}Doq>7;yK z!UYL8MEQ{F*+TmW^FRAIJ?kU`Yrvw@tm$53KgDkX+~PXg8$gL;t1=1+)fxfSIlMYcbh!``;gF6 zCy z*?&GCOBtAdE%bdaHqgj5@Ym26CsEI|uX>l!uJUW49q?;ewNzSjmr9uU?!rBI01rX8 zo<0jP;JZyf%A_w`pif+*uW-iWIZ3};#JX&Nen@?E$Fc6BpSjRaEMU!*&bo91{T5nS z8@EE6f%OI0K)qM=)BDe_$4P=p=hZlwj7);5kOH&dr~Q96?Vm-PWYRXY^VoMD$c{$V z|8~~@7V4*${eLU_|9bZSHJod(7YOl9W$1Ix0C*2@n6=St;+zZfVIdg#UB$)7Ww0Dp z!fFWf+lAq;czB2sX}P#@>Io zu>ZBQ|3%uo?2D1@xE)CSHGXICHop^y+(dlaVJGCkUg#NM|3Ae3AL+Tl{{KGvfAFFE zp<{^s{|bKNa20#~>;&mTx-!}SBfE*$nabXrc=F*O6oMN+*CM_@K=+go_Z0R&=oR2E zCvF32Ap5Hlq!K^#HulWOT4+MALpFdVmvUuOu9N()0o={demX%MLH7UT*N$wt!2W-b za7)=QgKZ9W0qxkZ`u^9-Jq~a}H;kPJuuoCn0DExv5T=)~|8f4t`8zh^13z?NXPqNI zoWC)KbPZwuuKzfH^ApD@aQ4z?KBjMA(`Rs>1N9B?kKg}#$Xnk``qmWs82R%bAp9ie zc*c%z20IMeGh@AOR*r5)AL>|6j0w2tCXD2)I*V7R-j9&QB)sYzy~% z2!E6y3z5s9Wy%lV|E^*!&f46@TxDl2ZO>zUU(fF!eUvD^x7e4AV|_7=@6fqtC9H;Z z&@-Zb6aJ zSrc@FnQ{6u@mzsnxDHJaN;DoLb?0*#dM|N|cGn;1!#Iqb$ z!fH@w^R7JJc|cd+l=Q)R^o`(OO5a;R8YPSaV427B8~nF`viq&b?O={%9fY)#KO2ml z`!NpW;J28%vIgmkuow3}Fn+>(EZ<5#`tR4@LEME<3_kY87061chFU1S%6I>~Hx%YO z`a0YVV1{O}f&-k;4IT)6%()P9_-Xz}74i(6gY$3^F2QBE0>jY4Iesg&ffa0Ehjws4 z2jfO3W5x-jlkuYq{m1u@x*1z~*yHyuV(p!-&XQPvXjp$RR;X`){fq@p=Ev)#a}&aF z7u+)$hw(SQm@FRbTYcN`0RM;J$F_a=d7;0P-x15CpF#LId$er6*ZGF}cr%%`PqOHK zNq>YGNPx+pp3Nm8hpEe{$P}0bvtcgGhlQ{h!mRU+w|M^l27Ltg@Qa*zB3Hs{SO@Bv z>3ZZw*aTZZeLG>ne=GWr-*0b6_q9xu|M&atmIZu&4sBosTQ<+7SMiKLljqdPdz85- z$Zz_Oq=Uc>YEIg}sTvIw^a|2+H)C_mD{ z^ZRD*v4R7f&<0$1v_TMINr0WAebR<*$Ym^^ovxo zQN|zQZ3o8y`}MWVHCe0&!OWQ8dq0Bn7}f=_k^4&d-8~bGeP77fZJWe8Ad>kdjxiaz zi7;DWD}=w~chrzOAxF8(_(Y`x(YB%2x8~zsf<}g-{G` z#&s8%&^@k4Ytst!N>F=JpMm)UslF9ojbAm?LLD@K8JfWgEgIhIhPFK3^JedDV=lHc zC%3nC+;)9k1)RW&ijaaHW=TG`jJhUwzE zi|cM=_W^z*Wi9(a=-bTsJoKY`;m6-6QuFUA(kXq4=Xp=F_C|Nt69=3__mp!l@t#M& z2>wy-g|Xj1IKp!Y{LE8W!yvCf(;)je*++}&d_j$Bl`xnQgnU-dYXPrUoBJE$~KBfOnI>Rv%r5KCx zn1DiibF%Myy7i`nE5F5#{DpBl^l6A=Zp+9Sn2FgatiK_CEM! zcBz{aZ>!s}*l$Y_$Hq4yd0bvQWo}T7u{+{dVine)_8|Y3_;vIR*o1OyLlxfNe%T?s z3wuzZ&aM1c*Z*mLO5sDu@$*-cN02~|@-9X8+KV7fRv16uM)v)yGKS3K1Ww{KVxQg_ z@*FPU5?bZ)*#5pk&rX;BCY$R)zlQvM`44f7Ko_oy>-nv9mB?Ou3)~{_;2s{J{{q)& z%)wx?1Vb?#BhmK}{WnYGA5>rEqmrlMx=*Ek8;jcg%7D5dp^^_Fp&V%FR`wiJ1|hXx z8H75vbv+u`+_9g$@j3SYJoZ0Q*ykDsN^2a(V*(~&3Z|j`hVlQm-y8qGQ=j}^ee(y6 zTPE|uM<=rXr|SRjr~e-um63c^P^#BiF%q{evn)eU1w(4lVJ@#`;>bd#79A|_&%F&y?Ybxc1#!fn!Hw+e=je^{da0#^t*a{ zgX3#FzlJ%U>jLxd^-ad{Lruad&!mw|I(C!e%CQYq*nwRb=eh48>ztz=4I`w3MtX9Y z_k-x4ciYlDok-n(Ol zVfaa5 z*QIC3);W9`^vch<-*1`2VE?2PO^) z-@9EL_TTtW`0n%%g?;Cr3g2lS7WNMKNOn;STC{7KQq~MWF$SnxfFyT@;d@ zP1D_?kh)o<{r%f;&v6gXf1$bngHeK^Xx})%{Eq?Ve+&p&vP++1H<=U8->^3M{-V%~ zW3%|G&|<8A2CeiqRF5hOxu5(t442kOl%jS~QK*?*6vomMOI#bq(Z?fo&>R~i(TEA+ z(){U@$Z2R*-jtCukm-0>_GA}C|M&e+^`qa09GfD~7U+^ky2-lB?thjxi2ky~YHbdF z$K+mZ5N(W9hxYwW{Xg={Yx2uXY0k!6%*R5sJGR5Ion$ttO_R~K>DI;}^Uixz8>aa@ z{{+&0ZxR1!d_bG!~bIL2w6AhKL5)7&~WyCNTBiN zef?thLlaV%;+Qx$YU>E)&DjUxkZ?6BzTqCxzW!b1-@D4ccfJ32z5jQ;|99E{?}q&C zclBrA*FL`=jvUy>;#VklE<`^z4HBp%ZC( z8}j?5z1ulnyzd>}4_R^5qwa@1c>=YQ?}wA*Y1B-2Ur)I&oDn{Urt|NHlxuA4=RQ&Q z_JdIGz8a7iBOXaKA%zS6a|u__i)*-!KHNh4PWSJ6I`_K&r2Egfwhq@=@BVSL=UsX9 zUH)hHdiP!9Q{2C6Xmt&3j!Q3k5bj9lr2KV{e1O)&j`83AUv~e`NE-v`gOPtq+Rr%$ zx+cC`7?)N(>VbVD9=@Ym=P7aAP}JObH#97JHzZK;HD$j3;-;6}*8=ys-hI+XI^04_=NudPZWho^u8~=8>W!c5ZCT0BXggZk3Tmc%%Hc* zj~RZlne?nWy0cp!6@4!9i+nfoS~uCHZm%CD4-RAhKl8AkDvs@Gly6e%uO@jp_ceXL z{H#=C6<&@P7MYlW;kl>wWBTZ~kU!`QfKl5^q1cezK4SU@3)@)4fgeIb-Q{$OLhr&&wE(6$9~zPOW5aQ*ymH( zQ_^i2!L~;0W%l`b^9S}@_e6UA7pYq?7`4sj4|JG6K#xB8xc<*j`f#NDFRne16d%_j z7%498x=P8hXytPlM~+AILlox!tuV%4coGWp0))FZ>!(<+|NlAnKiN5^yMNbL7?=N| zb0E(Do8mvyP=*mTtItQ5ZrDbKQL>ce-`NEZB0v$TKNA8~%q8pjmo_t4kT zD?g`P{Tlzkv)X^pc<0l-`!T)&#PvrvIA#+nisY9tB=o93`hI3zx(}&G#P@xRKSTR@ z8@=b>*puWA?7|-G!vRzu|3ElI){xcY5!BKXWD1GH@^{jAUn763ms9e7TtnkO_5ZP7 z)BfLvEb=&klQ@n4zy80%{Gao#4UgylXuB8sf1Z=yoZ}oW;RBwg{ZDW6KDKxtTWh?(r1rjX0Jo%h2lrz7>h|2HtUqY} z-vi-Zdk6RbU%dzZzky`$5Z{#Yb1=Q?^VS#mqOl6})>F=Lne9T)%KM#X^*PW-BH!)Z zH~R)Ud<)0zS1#RB@nbO#<1qn~Fa>pkO0+>s*q`ikG}4o3s%I18=tcJR4fge2w)AcG z^4=BPVo?d#^h^C6Vc6Z_e|yUh`x z&qV4z+xI3L_%2&`>yu%&xOm>mUs$KOOPTIk=8Bt-%vFAYjr;<0j9n03jND{?foJ#$ zX7LleXj}kWe5v^5Scz4reb)KMu>a`^*V53<{-1F#p($RMhX5y|EKRhkN?d7IaC_MF%qTd`@DVZ$ja~X z+y9&&!Q7m<_C(Kb%r_IiWvX@-;+*XX!m-SqL{7mplp*?FVi`Pxo|Vtz*nye!*~pKQ z|L|x&$6Rqee{HS{+51`FDY+O+u^cP03cdW2YshujfQnyRtKjFp#d^LxM4w)AukUET z?+BY5Q;u3?!8WoAaZbo<_w~7xTga`q)s^CRU>Eja9}b|hU4HtO_fyROKZ5^%HUGbQ zvr)a7B%6?8zaM>$|Nkxif9-=mnqTJ}?SAXfe(1btd;lJ;+n+ng2e8+F{C>#!s&NDf z#JPEy^U|kh$M~=Rr|E6TKdVd>*G+a!mZrFBb$*^afm-@W@-%98yY8J2ultPfITWtj zy)>?MA7tGy`ERS|h6Easd|mn9?78BC|6IZq^x_(>qYt;xuHD_i@7hUb`D41sZs8o6 zm&cDDR{kfI|24}04&}dh^}F@A?I$9Q*OYs4%;y%|6MlgHUy&zc`>*i>VIaNg33>mU z)&i%u$pgNp8Q(5_0^*pJobRz4ajtsq-}{G2;`@H1 ztwT;jSqy6f%phZ3Gn4E!{%3X!%NKKFSbzU~av>HY*8dgy|6={WRCqaJ{lAi2h5xAj zuTvJ(D`OfaD<@_t7ZxcO&_qvZzZdpL93;Pu@cm0?9X27ZEm`^W)7qb&|4q;Tw&%ao z^WW?FZ}t3<@Amu$MH@id-*M?4ehEYFW&m+8IdsEsZwx zRA@Vsy`Ru#jA76GL=3Yra=#cAPSQ`Kj}N)xx5gG2V{j&hzc4VIBQM|*uHeyk)JuE4-ZhMkGOt-`S<40C&p7O z#c+&7`ziD9&zpaL*8F>G?|0p`{{9W?@9($%K8`+P{{3_2-%BfPzI_Wl<6N!I)n+_F zdV6UomCjg8O2Z`j6y#4yTU@ueF6WshuKIXsC?jW}_U+Ozlbnqj zdP9x-N|uJX!t>EYPwjM{_3qPeb^7n?(QsG)KN{&tG)?#3yzBRm|E>6{{T-#T7*+f$ zm2X=A?yUCTMR}}8`x<{V{&$Z2zd-(9BL9$asKyZ_kU|=5$RdvuIE^!iZHAto>!a4^-}{Tw5a-=T-{w}tevxyI zc{Km;0{s%Mpcj1~_id5aqmElqO6LxF4-e4)e{&Aw+6R(@QG%f;jG-ZW^+TqW6C>%R z7>jXe6&Ke48c#3uKR#ofVfrNGwOMj_)c;sFnGLd4I|YAg{O^|gzZ?HO&HpR;(yOc! zR7S7(dQoVf<^7{`k@t@-dN*<#l@BP4|E)Lnpxd=(j6GoYWYFrjIR1BrbY@~U=3+h; zVln#ub#!6=L(f-yBf`DLo-Zd?VindPj{jXpZonp#VeBp> z$FRem<$GcK*0X&Z7O;O;uzxqREy=<>xn=4E|Jj8-*oQ*lWn6&~u>TP~g){zh4i|63`V}#{i7RQ7%Z;(#Xk!t<(CqA zTzg?CS;601v%r0AbYFA)M|m||eA6uVg}C-Y5?l1MZ#`w~+GYN#x2?VDn7D>dExnXp z>4jIlp?-cV+Q;f=c_%qa{Y<8Wd)^!#dOyZT@DciF+{?$T$?1RNFdn`CYV8lQ$NYjx z9{T!HD}0arVQ8X3N~@z2A-^?x4Td~0Mlbk};9zN6hQeKdTl`Fhy1dU*Ku+rz`& zvqQrhFMZe=l^+h@d3$Ksw&3UXZU1?wl2!s$J4c1e-5(3vr+&;Dm?OfDr=GS)pK*hJ zOJmpeVd0-(Deq43SUqr#qf9|_-n@gt$lZ&~bHW(~~sPlUY(2ZcB84%7FgKQUPx z@_svklQ@ktc=P6vaCmibI7hGUR}_A*-8Z|xDEx4DQ8+Tw!;2CuD8xL)lXL5xUS#rKjL?Fbo5bHNn;0gVGn*1`2EQPID~fB z(vfs+HSS*C#%9-i?6~_!OSAiT{#MVXt)9Igjb7{g9U*&uZQKQ! zLKFmHR>_MygH-mljN8ir@`XR(T zokOg{x{zZN#W5AtemjC%c|Ad*_a-kPngjhtEd8@RmpW zdB!=ZN$vHf=d>@#LVb5%d3~I(SX(6@(3!el4=+vqj+^`2hX@ci$~4KF+^RmS8A`qwnQ`VI*0K^mE!M zpPwGa(#K;0TE%5BiJmtZr9!uuU$E(m=&R-&^X`vhI+MhGZ=eckfOI*(S~Zz`=oSE;|$KBcF^H#72U+z0bNYX_G*W*e%o1G}&X`)~k<&~8nF4s`CfhQV%Y z7j#&|0J*c)Fu>6PzMo>>51Ah2`yn&Jtz?_y>JIvTlfID}-*2~Xf=miGk*Uqr4`|dT zK(%unK>{hH(S|zpc|97QRsSINDaW;C3;qAX8yu5G9w%@Tr*Q`7P_Tb@vw!i}{#DOE zs^1^4|9gY2d!PMFW-e$Wv^e5&LvzyFIqbs(`?Kb|?ufD3>d_kKOwa(G!xLc^XtzNz8zEIfz>Sg_E>e|#2_qo}9((9yASby-P_q8uJ zx_{vm?nvhz9-#l%TmuH91ViyZ-+xp7AHn{|v90bOEy$o1ZHRjZ4wueIlwvGe?>gqD zHGk;Y%kCfJ=@XD=>*vtDfc=k2;^H1kQ^;wEYbTeHGZ6Psieo1dm?=CPP5s;#V%sH& z!u~hvp~CvNeWuvwNk#Q~DF7k33@D-{kl0Uw&MjI+VuU`LhaVYr5CcB^M`Xj{WaRPWqi^goUI5Z8X#irBX>Nw|HH{(Ths**CID zUt*ggH$p!JcTSnbyA-{8TTTH zdlICQY;r7gJu}58#5F|wxx@PP!j*rco*$y@r^mGgGmA>Xe0p}hYr{hNV&vJsIdnUw zYX$pvt@}p<5@_6QYyp~(!czZPj+I!2HCTrY*o1b^sbeSmpUfU)|I@qUJ!RPc{JTeQ zx_=x)`Yrc=+}r}Rq7Bt|pA6;F*@i0YK$4_*~v{v z-8MGBF(+{vXAtM&pCd2e658dX4*94PS#+WMl>B*9{>0Ip?B5sF|8vy!WXlruKiMkW zM#k|gSESR6Yq*Zq7qy?Y3;O8Utnf|K>Xs?tR)im*h&U!WvXh~|a+ z#C}K12=+T#>1`;iFHp>W7nensxNbaJ-*=B|IpW>#BM;yZs$(1ysJm(W0(-uJou9a@ z-~ZJ6<}V(0&S%}H+?#VaITE#p^(&BL5yuDB*IR?(u=2_EHn{HOWpf~rx~Z*p-+g@Q=`hZ*<1qn~ zFa^_4hK|jAkcew^;ctk{H%MtZ{2V4kH(jrp`Sy= z%cFcB%Aft7&Fh}gJkJJsR8JfoRyyVaF5wDl7g_&*-ssRvk9*QL%rZ~>3HkkH@Aly6 z5a-Y*(fH!%aLqC4L8ak3*@ss8E%FXB^v(gs1r(Qtd%_Qp6V8k88m13l{L!n%2i!0| zfK1;vK7h=;V|>5};{(wCtnmetr88YRWY<&vH_8|QY2>BV|8KN|aL4yKm@L6i497^+ zowdIJ3gZB}|6qTCgQeD%8y!j=Hx}bC9<}0XFoB+M?+q`E4wL9pkP_b{oD|>a8NI*$ zo#yv4%s@qpXN>$W`i0rTm0wVHeujw`N*}%|2vI8+AIGfmeq~wqa>Q>skh7z zcidtu#d55~Dy+de6xN?VF8?EoF8tp5|K4{qjy=oQhnDHuhR^(N{AqSI{}eVzYZD%? z&wrVJ3US_ix%h3ULWOU!b))>hR^OFygSsZJF|~`n2Zen@w`)i2*N(tGan+7LKpsM^ z^s31tsF`dHvDM?kww?baRMx3SeP5OP|1wm)Jt0&)^_O8g{U67CIP7@#!y)1L6#i-V zN5igB9}VAp>ce5@Qy&fAUi;@^&(%Kl8(Q~mz&kA_2IO2a=NFAWFxj|^`;E$BnL5Gzw+Uf)^MvrN$>YPWc`YBJ3O&}`2O4S->kKvx!)_{ z*q~QJdd%j~((ms>X4v0{nbK{;SpUg=Qafje_VqXIuP8ho6EF!=a9DZzg9YsH%_02I z+LATHhJ@PNL&6bjQr3;J-sD?P@S%SrBnEwh&*~c?Icj}q+W2*Tn6HPrtJ)towLg$} zPWxlB_Q!PX4>Bb@P5Nb+fti?%xtNcIXuq%hu~QwrU0In_CwHiu5%(z0*JxiXu>ac< z`@fNo$KRjz4WqFB@W!IBSUO9w94pb%;azCjY+U0!?U1+3vs-Wc;Y->lXxONI za#6eBwswJgDy%rp?$OaAK?Cr-9Oni%KejZj6j|| zdQkgoz4n*m(qzj#ZJQdbZizzD4HhMIHJ5d|P|X z@w4WCSG_rY;;J)6;RJaSwNnR#)8rY{&>Kd1ueS$;bHW$Ubk=*l>^Hp$|?w zAaP1tolGhd8u4iTk4ye@1--b2>*&KR+(BXdKN~;#9t-P#9DL}5DA)<&niuGUMD#5j zL-aki`2O48VS|wA>1+t;JV5{dQyxO}J!CrA4D{%8h;#cY|AGJD`}(n`vhyeM-*0yw z40TNPMr%ruBT;+XTASopL?1--JtQzrcsvUG3T$?d$F+~JMZ0aQ_SZBM1pl9eo{D~93fcQp{ZeEZDt=TP(x=!H^o;r=&Hi-V)KXUYZ4f~Y+-~1x`^ab`QTIiWM?9;XEQ?zeo-=Y&)bfFtLlJKP_I#3TLTUl~ce^?C&v^#JLSg*tD*ve!SF_jn zdwQaW{c8;VI{F5r-tk{?$=lMxCUNQQ#{WnA|7q`(UM_AMGP~LSJB|N0*1t-42XYhn zCGcqce;qrzu>Qd;*Y~0{mq_~s=Rj&b8{B{PU>^?P5UOzm3A7*fe9&3!`3~@WM|r;e zJYN*%e;0ecr##<_p6_|jx7qVWYrW^|f7QpGM>=V=A&XjZHIB{G6aL@e|0n1t5w9uk z&6w=xTK#rf+!>t11r*vNlYLLh{Y%1E(2EM+Ob(Tw)(-hi2-n5+q3*ag_}TaTf7bAFXp{0x^_Rb%?tZ)(vPz9n-{SEm&j{m zVSTQf5A$9dnQivpdih9NxeE5jF!qOVTqCrLOlvy~_uEL6Vk}zs%AfM`IC{ek`Rlf^ ze)I`w5>Cxye~`(m#x{$q*5;T(PDAZkeF@|Y)I6o%fP8Jtd*7dLu&+oP?G0m{#m`0W zzuQ-cjN|_olS{E2E3pcdU)64T!d?LMxE9DdGPC(%|LO+%CX}O7TRV%cyS|UJ-Uag5 z=C^A3u!`J)+HU(mkb6**iQ|PIj_2Ped;m=&vKAmLQop>V zez~lEIj(*|e!6}4r1gILG%K9P30(8N)TuA((J+f28jXw8Z+J8gtuCql%c%dl)qiB8 zaFU(gq`i`2v!9gCX;ktN#y!Q((9hulE}?z3{`~>`1O501)XiP$?1oUqBomke|L=*b zUTdra+5ctlZ-h14$-$^u_B;E(M0hCT-i2`w!-D;PTl@bR?f=Q%vvzrcY<$jp-Tk}y zKg0cJBuX(B`QMWy9e+_!&Z(h+Kr*FGaei5F5nV5~an2&{s<4+coOYv>r-g2^4{)^*JR?@SZ zbQpVOXk^)kDvHWOoIlX}Z+{j_ zW89~U!dSBJ4P$eB3vrLMI96qxxbc_}fBU8Uz4O)3vG-MNoL57(=2bqeSCvn%DxY3e zKD`=_u6Q*xuYNTg+xTiouYEPNEP9o1{ngOwxVFrzp>Dyep?=w`p#cdrB8euXFv)q2 zxc4a$`fyA`8D^kPn)PTv0*$-NegEbB@8$gO*qO+p4cqj^G$asK)QDKf_M=qwzod z3e5-E|73a&`+tFNbBX$ijO)*&FK-S52`4q=ZLm6gZCT3$U+KY|9Nh;iBKp|8vLOYixRa^c_b@2|2a%EP7@_r+I3IobP(SHm68bR|8( zR%}3FTpO8EFUGxMDwUa4pHxn7x89E9YRD>b2WshY|DIj+J=li>c(i}dA$qo3+X2<| zBZz$hxvlC?vTLvU?^*tDG)&b#z~lXQm$(M@X~KU}NTUr|s;84&BEEzh6RYz4ipzi?|M9VP1&xzmjjaYKZTG9_Nk4xp&=o zeE*BwE8D!V|KDoQJ|q9v>;KUQbWK{<(T7{OgL`;@|8)G(Mc?0H-yhOwL0n_Jb+_*i z)$H;9Yvd^mMhR-!<~90&hSC!&*;XRF9HSceVRgmP>{6?P!5Wxk8tgSt8Lzw)gCiF)_c;oi_hPod)F;;_$g z2XF}0ID!On-zX04tL6Uz@~Cp)vHd5U6V8)IFY5oqF~_A>c>bF`f3g*A3#|WdELP87 zv-w-14{+ea@qdHZZ_2-vbEJ_)9w+esst-<*ePrL)9ceNpI`nKNDh9KMgHWOFSi)*-!KHNgx0`~DDHYi&)p|O5IL3XTuEzbjrv@_7A^x)X{t?$$ z$)0`BzVnQ8`@R3Y-v4&b$Gd;L{+sun5iY!c$EBZpGE9=r6ih=IT3>Mg?r{b^E3bA6 z&!o>rUN|QYcavSs#ubaJe#!iCav^F5JQ)^~OHt!K8-x??VY%>1G+p)Xg&S{cN0Psr zf3I!KpVfF!+nQ`bDx=-*KWnfK8?XuG*oG>!Yghiy)^|AiHv7MZ{m-6HqlKPH%HP`f zZP-$*ZTgh&9Xq7g_iK5R+=Co{PVYzLLG^jh7JHqDJA`Uf{fqkkvqd4>kA3ly^>xH0 zkRRkdi+kLExx#&jPooW4w2I5D=WC#!z)74&_3QEuS+n2#C;B;hg?`ZsWW#pl!wu!b z-I#}z5A@VkKU=pUFb)s)gA^Hhkf{smEmdSqi67(^FQPK z)3p!C9HKA3ZnyhG0}^O-FG)11$5YrOjdE;56@D;AojJK*_&(qE;irni4o_bo@t!_8P^Wpb#P@9GJLb+Z^KUd zYg7M6*oGtVT2Q(6??To7zY7(E{zurp`tQQ`_+JjFS5kgUqYXXZvcBOW{cBJBdHD9A z5@VM>7_#E~{_3OVhPf}ivH62x@8rRue&(1?4AD-GPnOCwDkK#sQCJ*aLMn5`Mtw@KcnOe_kTrPFRI%0?|*hwxK8iG zE!@F9JV5`y)n>wAlwc@^qj!t>$Yjs=jkzQHj2#?Hj>C9Nz$8q;G?XFk?>B?&<^P`< z!+f-}W4KZOk9I)xjmPWjds^SI`1#1Sm)ZkZ{(p!6ML5p!ue4XdQhKXrpV6LKPLFdA zJF$|!3i%G-=Ea9T$gbPIzh_5WJ4fKThfDPD0FGp&l{%`%N z$)`q##>wWIi%YLA4OQe0w5})(yU0Dri0{*i~l#xadPmXMcboHrlm(Vw6vR*Vh}D9r!+t-5oy`(Nh%*Smk5bWD2x z=x~}mgI49hIr0KB&7(u-Ead_&312~Ox^pN)x~6&u%8|MkM~C`2#scwNjhm&tU)$_; z=h~|M->6;af7fsweYk}?xQF(n_J=k_=W6F63;R!#IpI8c^py5Lj@A2CYP3JQ)eVQW zKhU<<_@S4c)_?JI=sf;($f66~$RUrTuX}ztHsrXvzb zXg~su7rj3;o$~&Umxc$fvHv=G1%pw7p%{*lD8*Pr8^3zqs4$LRvvE|Y-TQ$so<0GS z@OXayP4(}6_3v$Ej&FaO-+F$nej|Hd=~vi`V=I4G9FIRkTw(oz9~Om~^x2q;`B;d) zZ|ZxTrTveFCF*zOSmQ?Nqp4f^8Eud64+x7LyA;cj>naK>$yHc`_UAl*@3GT+&63fl z*L~jezwG(H?fJj0{eNEj|FS+vvPC%7&8?$6f3l}S|NqOSq1V`fo-bS9Uz!`R3FX*^ zD(t{6yc65~zqX&k7e|LZ!s)%nSda&B2-RrqaE@m8Lw~gXa$$c)zKr~C*XDj7t-m}a z{%4*(S@H;KW4|xihMM*K_B-|8>hH}8=g}mbQXe;xN%sGi*NqLtYx~vz>dzC7dDMS! zl71Rz@E^_pt9wEHJLh2?|3~W&toXBV&VMi960V>Z*Ki$uXkWzsN9RlGcXVx3zazKe zVf*6fTm1iT^Z%3Si`plrv`^6L*fvyOw&t32?%*CCp!O>J|F*T(z9Ih)a&3GE1L=eD zNAv&q*bCz;lpiI2ABs5tFZ%z6(<^_(?$`bpNiW4%jKg?LKwnEhj}W^aSg&9WL2H^ebn`9IfaO+?3C!*x9N zPu9za!tL`s|3#kvGWj1}BjkTPw*UWB{$DHqqYb@3j{SA|`v0EqzRP~o(yaKHwfx%I z^`DLNpG(3$aSzacz56ltXCOH^>ff+NCOH(t5yu~oBp;1GE~SsfIK+3DUE-aQ-P!{= zGLAp4l5Wjr^QEm}eTP2Yu@f)})#m0m?lm9$p!wir>RIFS#ZNx%BK|_Pf6C`SgXz*RbE&+1<_Tcd~96`}-MoDmyqqHcn@MlTE@YvS1&+ z*Zx`T|4XqP75ZXUlI^ak1D#X(7tlrTM()0T{@dmsEMb33=a}^3UV|;t&y3OkPH!8b z-?XMAtddUUCq0WVT0fIsE3U>o`gQa~k2&=FOTq^FCZw*Khj7>2`J3j@ljY*l@7Sx5 ztU~Kx4%+Hd>*aLL>SS@qN+*vKIEmWx z*57AGoTex48@qA4IGmxML+TlM1If$24QyE=|0)AtTfzRC$M$}cZEZYE>j-^ij*s(q zu8@WOxA^qB#CIcik^POj?biR=tB(%}zJW&leQ{iV(^_ZL02TT_`~RKx z3I-$hsR5yc9E#!S{hana+4D(r1ITK5YAiVp<1qoX%G6l)O`^xS&v6d)6#6uz&U=1H zI;Ii5zPp~l2k->@A6xFfSD$G2mpOh0W@9eqqwg0_hK1zfsDI5nU2UE(y)gfO$bhh% z-YPEom{!uWH}nl-6@3lzo=Z-7+szNqMHcEG_GMw;)o%4VJGHRC4w)J;FwFFyb=ZJS zD91KbVF%jT*&XWJ&SLfN0QK)E^)H!2elq{NI<^_d*yL%)wy+m7#=o_q4b{)Ff2Fer z`)~lY;^Mw`hv;$7p$1gbk07;|{a?@i=Lcvc-(Nl?;_pbK;!D1WerAyNj%QM*GLTHjt0|KjyLVXL4X@-n@M75EX-~Vig>#k3b zR<$vGlgOH+{S}PSn?j$4+$XKQ!pD$6{AQD*e8Xg_*gTgPj1O?k48%6)Ofr4g_!)98 zaxZ%3bL7kQ@;%wf_m*AF-|x4DSd68(Anz?FS7H^`U>)kzQ}qkp*FINwCFSX+X5|Av z;!$Nq^JV#)Oy7{dZ_D5J|1guUHe^vzVa_jk0w>X@Zy@*bXG4#A{G>DR^uL+EjAPPHqvfXg%V;&fz3qm! z<8FJJOQ%wKTJ9g6_-9(7RHj}&{|U# z#*-6}NtT7qm&(*{Wnq%=6y#i6-dX@%t6k@cvM^0t8D?N6dcSI&1+phh4|B=B-%bzn z$%R;qrC5%YScPiO?Xmx0t$E_&YR9<#XU!9*C){g;`>gnc=Z@5I*Y9~HdtCoJ&xbnq zT8{=Ku8I>Me`}I%3WfgviV!wQs~p=(xug&r~nG?>FM>FhSX&&3k{zc1L`4g>ZL!AGYl1>_Ji2DGwE?|F5 zHonhymenTjT&wNsw|pccZ9H1P=7hL7|L-Ju8nyD~8S)%z{I5YeiRpa!!k5r=Q@Mdg z$0bo$@BY!y;W#wXlW3aiU15uI@=^JF#s7PeYabAu2C0Ep~t=08;XrHrk5e^$s-k~753#B9vP`};TM3+G!DK6Uwm-?WgoV zq4Og9_pCO}U2PlW_G|y)=o0OZG1?zUPuKpCW(KYOv_FvEZjS@$?7%MUL92U=bM*Jo zvscyM`}yDLhmgNv`~te)F@E8;aSh^*Ab}LpXhRl#-zo}uvgfD#B4qC``AeQvUqpLG ze^-vbD(=&J+HZyOkNt6mUOQR)$MZc$k3NwGeFPWimk`%3jB6Ma{Qv8XH4v9pzW0*X z5Z9HwPWB-~FRTmX+q)%v2RY9skHS1uvQ8eUkNE@%G-iDN@@rvztR(-}FNcJC{`Ua= z|A#UkgHeK^XxI1FflmEySv+pv(_>%U(ev{EW%-{>zodTN_?~@0Z(C;Eyfj9l6xFx2 zrOBF!`rj~)J{}Vg#~>tzxjt=}YTtE}jf$Ko8s+$s70xcr~_gYs{I zy4m}9H2x~?vrzE`K87Lsnxt3p-65fz+=eRbz%J~;K1AQ<7W94GoMrNWxI?H$?ngty z_7?kveaPHtZQtJT(a^&WmGE2dm$e5zD^JnWs0>epDn5qv!QwDQxD8q4(aQeIWQxOy z715NT3mq_qU^8_n$u8!X4bh1N7hITSU9H>^kNd=Z`G9)*4@c9P&7N z{14XO6V9L&ZAiaU5(Y~rx20bwA&*GAwZ}2yhYDv0u;DS1J{);^4&D3~T`26o`-1oJ zH})~m7g_l$Iu8)?SfS#Y}?rD*ldEhkqZqiq=H#Kt{fRtc{`j-F5YPMdwF z*me0YtOs8l*7{o^~pBjIEFlq{fT3M8kH4E96CHC{PV^ShJ!DC zFuZy8iEyC#zlHCu|8HUc+0TUU_WakdZ}oo--&yfr_4Sp7Hz zHvh2SG;<@^tUofI=Bct!HKNS?!)amrlK&q5QM+l!Gm}EK{~f_UbxaPshD{0IoIEM) z+&W2r>XfkO;-HZG@raP{`*)g+7n)ZR-q<)e>|HiEd~ZNWIG{bA@>?2h;GPI?YA0ps zJzwyhl5uYT3GyUP;|$KB`Gw*7u!e_p{q=sw`u(C`rr$67wLEpbU*9*M3>W-f|CTOb8b`(B2IMr(YgE)EY(1BQmfH;qwnT=l7e;Rnb0CHj3V{BXsv+Pfo- zKQlgm2|xb=_T6gs-9~M%D9 zIW}w_N3RYD%{R=yBh&Zo|3=TSEnCllka|k|oHfLp%8nvO5Qcp~AzFqvzN1 zQ}82n({rvhk4M+tNzbAk9eA`)UCUYAe88~?ai znzdJpLd{!6VJST^%RRlsuS8#kRK`7vOQKP~QRP>SuXw^fb$&~4=ldt)9^!HD@D21$ zD93-)K4@H^99g2hu}rywI`wks{bExWZh@MyfzQF=3uA&mplID~2(K>~5zz!cf5PRRX@eouNE`u^QBCi6Iflc;`O z8S>o2HeKu+I4!Prr}+ywLr)BFFE~fPgcQ9AkNkg?ANTxzJStrATQ9ERI{G5!{oW#b zUba3V+53%w;U4(_{WtsGkyf@0CQC3B!_nHTjnSdci5|zK#6IX!`dH*|Yj>jiMRfqG z=h-h_dgJ^y9urWzR{ytt{z>%2-qE4q_0eGpeHv1e_3a|LeROEVmeudIXWe_>Z%2eO z$IQS?%tqypE`2^0qUTdjhs9*?PxK!r^?$$Z92fO}pOxm#(R}NpL(}xpAvM|B z^1lpWsbiO8C01b#)?owM&+7lbtpERp{{O4`|9kZRBZoYWny=qHZ*(|@G+J)z|3@p@ zP^Uk?ey8)hmqc9CfJ_QEbvS=!blBt^<=BQQ?7%MU!9KM6Pe)Qef6eHSjn7ouEy!vX0WLN$({Ra^!MdUlw!k)o%O_g->qNAt0c z4qbT7^RJ%eJuL7Z_~2{lxi{7Mj;*0L%##k}pHTTl^NJ=*%d>5aYZ#zT-m6!iH>}XU z-mG4KDYmbDqe!8GuQM;L6F7;}ID>PzfIrM{UFYRT{MfX2O|0@z`L=sI%p~QcNVmM;^XCzsQu^5l`o!bBMU?*Zb ztE=1lUoVesW@nK{ud@Gdu>bF}|8KJYFUp%}l|S1ueE%P_&V+P&KWUE(a%)X+;UP?+ zzeX1NKYncG$Vc+JzKACIm||;??7H1l0$cmx<=j7(=ssZ65spxMPU!Q z#eEmXa}KjkvTz~)e_1(8KZI%=LEn&~kceUFr^qzgkVRa>zeaqX9@ivna6KpJCy^S( z{zdW`{)gw-&f?;@!!zVLv~C_4E|8ay(cjR?{*UASt_b%c?j4j@*A~VB@-fs+=YQYo z9O}o!UTJnn8;{2SUGtyo=)*1CL1F!C?Vl|j)}HYUI=VfhjJn}Z<^Rj-U(f28}R|I(B7 zo4!~7wd(V)_$i;GV>8A8be`oGK%pG{X)Kr7TPyep*#6xct^cP#zy5|ketr1~G_s|W zXhI5$rLz>vu@bAW2J5f^?d;Hw9?u9_bY1mq-tmm|?dRF&N9P%TfMbZ`1X}L1Ssl}Q z!}tTTnvY_Wbjq;}Rj6$?|DSEUgPxE^19s8(AT`FdO?8da-9JCTK5=o~hXdpxwBFUG zBaa|++;>?o|0^F7!YLHieb_7iCw-?i^8Z!qGw|`mHfo%wSMaY*_x(@)|0uiv7%Q)O z(f@19AofzE9E!AuQf!bQmvXT|zDSUc4K_%y;SeN9kYIxZd$GX=2{sggGR)XRIf#ge zl!G{(VP^Ks>=`zD&yPKOZwV4?kRU;VgiCNThaeZp91e1jgD~IM+7!O6-h0pe{_)P| z^Q`?mYd`CGp0z&fvwq57i`i_~v>}HgO8HA3i|uVk9Jf!f{=IWM<;@(r&KO^E!T1ug zC||XVWB>hrP@HMd9oyTk?OaHhX&IC(E>_7$4B> z{_eWJOY-M6_eaKggt4Emp#O67+jkSO-|xI@(&R<52L)wOssB-5L@bx$T;w<>`Lf@x zq8B$%QD$ADG3pWYIQO{Lecqw>q2EU3Pm}+z@&9@MPnPBf?z9G)-v(kZhGIB+^ka=6 zyBn3eWH0+SnjDLg|9r{wyQcp(p7V8W9AA)EKd0U9yB`k|g-t@?M{GX%VElhMeFkQt z_bo>qp>AqU8x^T-9r4pPQEV!Z--u3Ovqlj;NXr6`_~2d=39Bes|JzrVB!3X>Q_xODusuA~(DBu7+iBkW=+u9H4X|y4Sy1nnGkBv@BOQW=w<~N*? zz7y&o&N+f(IF6Gzjk7q9c4c};%xB0U?x~uWA0PVP@%e0h-~V2w{9otYka3Jg8=0P< z4PQJxxQwgF&Q(VkWBmbo&U5Hw=Woz&p}1e%9sZ*=+HiFTVb$scePq8kJ-cDXDv|?H zT7O`=bPY02%<)h(vpK0t>bcHqy6L{pcrO>cr^DV83eS~=BD$`7PdM1*y=IK#AX{s^ z*S+5BP4AhW+3DT9qTK=y)-h_Pr_hKdZ0N970blq{?QyX$r}7{El8EgEh0n?(?x$`U z`@5R`T_=B$jjzgI8SVRd{dD9g=ZwZ!jK@Sw!W2wH``q{YAN3{V&^1IpA1VG}^8F*~ zAAFDx?)OvL59qc3um7=J95WEhjG1KSTE8%xoP+Fi^$~Iba!)Gb63VtmlyPKHIZz-M z`E3a*et9=h_{ComOXCp@z%J}TuexEcex>Tk+B6+k;{cMVy{VmVjOQ1RPkFZLG--MpVx6hk zanoVXnyeC*LlH;N`|jPuG4eQ0qUXDJ6Q{}UFZ?BOmOPJ(F&vkXzH~3~!S;`A9Et5}oJnC2r6w{>3|ar7uwsmS5eM=z6^`QFr%VqJGT1M8kx8i6q%L{a&Jp zY<8R?3(sm3bZu$;y|#cm^gi_4q|8J6#=b-cI(PRaa_D+X+!_DT+LtIc^(9&s-%GS! z>PsBF)|W_k_azR|GxY4azC;_UYx)uc#WNT~F&wqRYQ!;uo_xl=iFXuzG*aW-o3N(I z?se+D#8_eJEB6xP$%)AByqB0nPC-U^XY0L0uH#-}n&Wbm_JvUfbZx(v$fItg^h;}l zv?is!k!+glKaj!<*UZFh%)vY?z#=R``#I$gI*);q3Ld7sdx%s*3U|=MD3}&i5lUn=*hcx6Ad@-Cf3l`BBdVQ%+5ERxtnOj zhTHNt-h3&9&ENTCSnr&Q-^uI0Eeo6Im0w`LzhW#6eG9f>2X>*Lt-elN^@wBSO7;7l z#{cb=|H+hcV~_Lpp&AEJVXSPDOd*YS?f)H!a{zPb(x#tBfnG$5eBG)HICxZhUz7gF z4()-+(%WhvjvR_Of@3(2-alx+BfIsVohEy}rEWoPP;cI-4>yj9x#&23+Z;u*@|*g7 zzNRmoo?WbtgRAu1Eae7YrI&sKMP*6>dC#$HuKI?s?$7(qt@YXSuhtB4+=qS@zHbb~ zU<}1@-~DiM1V*9vU-aXXV^OV48&B4pvELdw3AOYow_-eh^q;01f9Se6Ze<2J6SFY~-CuU@zgYA1J7sLXeMp3r=5haC zy_+8A)@S&qi|9G_z4H-s{OC(jWLFEwC$t|3e>lHzCp*OkH?o&aN7XNox@q5g*Q~%w ztil?s#d>T)`)dC8I{!iDPN-j;VYk?`0$Ft2!uGa```}*wH(6T$j@^tlwhh%IwOo2Td)naFB<op@*a-NfFP zZYO>^|88Q>;=75T^uM0itt|L)&A%mfy?j0KqgQVxc22&T_+j^z#E#AXp7_E1KPI*> zzLIEd9T{3`MuzlTPlkgpm|K(hR5*0=$*|9VRpS8Ote403_bZJx`?-FH>i!=IaZkGa zd2sb6KJbpxGq7UyvhJ-Cdk=tcW#{WR_jWg&&e z?y}H?Zgc)B9@YQ!|y zxcA^pGDFTL=OCLg&XQb!+zZOcS;|JTYoW4{EI6)y@zKyG&10Rj2uo1A&KjP=m(r7O zJsKJ|J{p$OS0LrM*>MxuxLte8u)$&D$id;wXY{4>O;=jWqk7XKRu7xKSxh@L^#d2Kfbhw4|&PZdvG8*dM}54HN&8BJ)!0q3OMf09fgj{i@SZOC+M7syC=9*&DBu>Emf-ox?#L*AeN@QgU5x3vGl zeCb~-{n`vF>iF5NJBH&piPJcX^Jr)HJNWaRD6Q|9VgFaN|M7pwmdVnut>EAz?hA+5 z{>>cOF3=U((F&+~!2~#i)<(Prex(?n!-BHi^g6F!|bM<^1YkX%r zg%48y7{)dt`4Zc>P};Zf4t!NBZn@0c>J~Y zk4LnBoKOcps{P}Fx&fJW+(vf)mjC~lvHySK|37M-e{n9wa;(5gtU}>aWnnF;3LJXB zh+oSaj^A9S{fj*>wST>8UAMi)Y6!1FW~aKxTiRt%S*ITMd+%d0-$lCO-feLVWwr8h zfOFSi3$~$lq5gN};|_XK8QFkc^gT#zR5tHcMsHVEllz3lK7?xW0J8LCJP)xCp_5II z>++@@x1r#;=(sDR4nWpj;(uS$zCb4Lvj616_Jzf+bxjdRa16(B5~pz%57+;>%a6Uu z{$FGN+3&(j>_6EuhW($w{*&qH>_3@toFz-^|GfI)^?%NbvsPFQF4B`%*#B<+8NHxQ zHRZV3aTD3dKfWwH&U-A)FEB1Y>-}F9){Bg`na-Dd7h{aSb9@U0dU1m997TAH*E zAJsm5SX%%N(K9E+b6x&KdqVz3Cvy1f{oC${vk(2M)EBW!{u@XR#!%F~NZM}+_WNEzP5ak2nc*ov9eKgD#J_qwqFo&z+mwae?x|{wB^&kHH630uCWv7;t zEAU{vPCldEO1-{F#yt*J`mH{szg;`P8y~&jo;CS?{W~ZAB95T&`;p-|c@n)}_bmTld?-Ek zeV!%H<05(x$3VxuPcG9R%wMSdlINnoJ>~t!HX!a@XnaQcH|zi2uAdl5{n3s3rJK&^ z|30Ok_^A2q;<$=l+`ui|K_B{kU;1Cx|Bg=N(1kn-D56DQcdP$6xK&>_c}RbGhRix{ zBhybl9R`SJAO>S7vh$szk9s&gr;oZ*pZ5s*C=?wR7V7`jr`<)47FNCZ=`fZYkJ=&n zQ^`q)V;CC5lk9IC!Mo}^+Mk+7N|)nCvT2z9Z)vWV<_09+6MjxOn(3*-Pk(6rpDE(3 zw6;ancdV^KFUNnn{?Cj5hyI`2^1S<-A)c9-jXB6lS4R5g(Q|M4wl>-afW8Pt#|6i6 zj$hYy?YhF2qI$J`9LSm{pAIX?m59$`f%mkEo;<9N`#tZSjC%~kxy5mg@fyFa#d>T) z6)K*vwm-QIJFp9Tun*NpB89@Y@B1XB`7f`V>k`WD?|=6lzP8_cA2r5;>~1tRg6#R% zr$doEf@3(2lQ@mDIFH`1m@i27;Bu7j@}tSP{!cG?1GjJoeTaJs^?OU%hk+Q3tgy^{ z>jKhq&)l!~7JjB)?zlM4cOWc3Sv`2F{=cd6KN_~m|C{B1_Opp>M)Z+O>p$Hz))u2& zUujHtRayTqnm!io-fxF8zLU%?_D*KKZ~x`-B1-fBYuNuL`F}tAPcO~S^*?Rvj0+XV zL`=dIOhY+lp!fUcf2%)rf92^g({ayN>?J|Y!8|NL^^3|+atW4VIckN~DA!idlk#}O zEc>d_S0Ob{+JrStmR9L5?XSx=H1LI!ud)x?noIlZ(o@UKTXW4Cti^hKu>WV1PKji#DXsIrlf(hs3o7+mLl$-2ZL| zJ@phJXx}XTTiqiW_bTk7$Gw3H>NlnL ze_i@<5b2}Re^}a(MH|u`%5m|W#d%yrc8GJHWE1E)HoFs-=~ofg^e-UqoUSqYNnX%T zAp8buXMMC(7vCT+R(IPQahUydgvGSRJrA2v@mX`xjlL5 zM(tO$2cmPadlq)+J!Jv1=d|^qjy3%W82q8#J=)Hj+bC5 zR$wK1J+oEh8dP8C7uJ%Me~9xA>_1Mg-EI8ecHlm9?f+#BgzmR}n~vlDKPfVeHsqqN4p$_P;24ghH2-h6_7(br`G2SB=kaj; z(7nnk)b-~V@e3P9@;`_1i};*PXyzlQu-r9op5Wv2l{fI2&pN({N^={lTH^E8|BcSO zeB7J-d%kdfK0j?PKkX&{H=ni@5B<+4`E|$+;^&>y|IG*M5#ME8MK5Yk>HqfI4SH-7 zXuvJ{9i%R}H#8xx)z>F1?Y{bL@y#N;(RhAxFfzN}U;lrZvHXsQ<6-~*I{xEo{r~*T z`pN#|Iq65^d~v_%KM>d2AK{u&7>%(QkBOLsDJZ4?=3k|MyY>wf(ITC#m!#iwiEAbN zP5*z#hxb32CeGTU(l2f0^rUuyxc>hPdVzoOu>U{a>r{Qf!sD2q4Er;a9_N_NAuGS@ z{g?w%oT|RS&&ZGQzLBHHHA!#D>+_tq0E^rzqJUPmu0{Eh9wW~qGmZ@w z7RMfBQ5yfFoNKr#eS>1Zs5(-AbtA<2e<}ZwMh-*@Nf$t>E236qv3;#rF2Sb^Av5%>RFNzd(Oe|K7c zkiG^*cC~Pu{k_iql52%kuQrB(+=N;-wTj$=(*A#^Jl6}xGC1CW=Dg=x<2jQ}nb`he z{r{Wl=DwRG+334z@|`sME>h$MV>C9N@O>Orzy7-K>bu%0#Z%SjThTYwZahcFIq5gf zBaZdx+HP!zbQg9?|69`klJ9T6?{Bg1kGwzsR6EgiYa<}N+8iG7RpS7X$Y$h|W%3|B zxBY$_Qkvd|;$-(56TAZT*))W`8Ym_X2&Ug zRgGlRQNL}q?;q+B*CI{jjZsF^X8Qof-?VonaoV+KaUK`ZgUc|^IdtN|K7hpwo)O|4 zye_0W?EBXmLhF9dqDh(;OFJ^2&mmkDM=x&R7B)_1_Xlaqq`yfoG)@nF^nP27vHj-s zFpwOK%Flg1RQ*D^qP{=W@o(JYaB>8)!ZH{|&yAD+#^|S`k415$`$PU2_vg2s&;CE! z{rLVZ^6}^G^X#07n1m^)c-CC{pP8plFGueiQ^E{#X4IeH50GZ@J%B{In9yVMr57m!6AGRev6$S`jft4ujKQDY0J^9M>p<&tc zVGVsPQj4Ege|bJMz5IM=^xHqb{JF4i_2+@m%d&U_Hk9{ydm*;CnmRTRNEF8G~u~4fo ztUmE*sC!la*9rX?Xgc-S{r;~rLqhYp$Bbhe6E;m46ZVK>A1V(&8>)s(2o)p8h0Vjp zg?}73A#8Eq)qXpGe;WCjuuWRuKKGfhb>ws5hc8bKJ5EfF-|>bNe*E&(uxt0{!jJZU zF6>G zUGWZ1J+FPr{QkM-_v0YaIQ07So}u~uubSWQy|q7MeE@V$wLSp4##tX=nDqgMSRY`} z^I?O!>PE!zd_DfJps&8WWqPBgVQEA-q=d)s3$eJF|->}`+yC41dtxUdnZJ|X;?``a{XPj9`V=xf%esJMIpSVIMU?g>KCF&#TU`M& z{5BJH6RbaEyg_OGp(f>0#`uH0b|Nx$PWy(k;6eY!Z0FCxJXBdLrt+P#(5~$3K<8v- zp)&15;~(Es_i#_GIQW9H0*9t5Tag{24uVDE>iO*RVF}sY_#^56Rv?JRjDQ>#+${sOIZzA!}CK4~5);TKX<>50aOZ#b~;qeuW49|8@M#dbTc( zu}@BCm&UM5i`oDA?38-I0@vz*+`W9-?!LC6!!Cf(IU>)LHytT zeBEpO-W@rIFXm^ib-;~4oOJ@=lt+1VrX3gZh({y&?YpUVES zt#R-0YVlt7{T*{o?QU}d$djnKB3*gu+N%BFOX|blVq4hilK;O`I@htk?z`Ua4X!JV z|EEX0T$+E^#5WhuS)9j3^x!hCq8IIKdB@Ajc;wK9JRYp`)pAbyaZoyAS`WFOjOUU? z+g@QqjO`Q8E!;sLvXhOPhq@kCSnq%p&T<%XxHCN zwqN6KZsfA@)z0#zQVtur_k%3dxLr>y$v}OaRkS394B!az3;~L9|wi*4c32f-1C@qWXX$B z$7Q5%>Te=@aRaxIRp*QKk300-T;C?WkKXSG{_BB1_=0a$_(T79`uqOxM*i3`B#3VL#~d&4UF8l?KWH#9k?am>d*WS`dheLXgzLY=RG$}bp~W=-@h!nR=tc3}_p zp>C`Ezf=B4lKpB_9z+`v$B3rx>K}H_0VI(^F{rDNZOEa0o&0}T9zzZ#JNuqICjS-D zvYGvRQT~5PUL_ALmN&_)<2Ev`|5Fst5gfyDJQzQBk{-*HPImJ&{VYoRdhV8gxBD;h zys)_b&qcBawb~9YlUEV@z)J0iN98%kH_)ssNI7mK<2r+PgvI@LE6Vi6YyaEu9)JAq z{r3LCW9FCpz3}O>&~LkZfPv_J%v!JHP*nb2J;Q z<6nX{O?nl!U>n-m;Er=_R5$yNuI+5oUN(w-jeGsH%wqp=a4}oQP91uQZM)9?p$*lq z8D}7#UD$(tsC~uwgO`nSqet7_fCEuSXJse#82|AMJBda#O<@0p#dVI4x%PwY8^;}=#Ay^h!~gb- zI#Ajl@jdDHEb=JOi)ay7>vPgSS^6K5{wJj$S$Z4d{KpFGP*;B18tPwEzKgfE>Hhr3 zi}d78_jW~J0R1vj7u*|~5a(iC6_y^R4}iRZ>~!ypyn~GY>^!Glf?<1Jnt>RMp?Ev`x#S3pLi;uKUwOP!Ig;C` z{!7o(3wilk+0Y_yw&I}k((0dwlqqpXP9`>*1-f^y*-ma)5!So51+_=z!aYK(Rz5P^JKwpFIX zf!H@!8h>y~Uzy`o$hyBZqbIP-@@N#uM{%+Hv8lPlbxFTH9aSZLcxG-P(A%|Kk0Bo`2^0oO6n( z{E7bjKRp$yU-7KHnXvZ+o%+f3j`X)1m!@@dIa!A25eMM|PQ$pTBGTz!l>M z&{AXl`Bv-CBfZu7{X5OyziIxybK1x{{oVCwn4te&UwY#h{rB`{G9}I%?(r7xpb!0a zcn=-EqmT-TuSW%ubh)II0^(eQ%vP@0$Txiq^b zg-PO>f@vtn49vuA%t8A&{mh@{=fH~#vx^X2H7XI{UYrh z&lArAEW#3Gg=Mgmo*U%5$8!1#6zK)z(S?=5s=eP;AJ-UcqvzB&N_~O!Vx-%%k>B#y@6Ug$mhX=UKZfHtiP{eNr&XSzKPZ3B($6EV z2@ux?YI9K|872+DyYsj^z`~tuK>mjVCZ$cHe zU>kPe;r>4z{s)cq(vN1(AcbAd*@Jzk#sMUeLK^L!c?U}K?8&Zk_v=;#^{XP^?BV`M zZ?XS4G*|vZ7Hx?A4{hSfp@<`R(Eo5OhOcA8S8LOtpG0vdyMw&v)P>W+V*kTg@;qYy z!$qyW5uv`6qC;P9C->R;EFxK_u#Y1GqaU7%5=DdybwE@iX z+cLJC9{U{zh<6~e^pgMoFZbIPI^PmEe<0TTOMZak=(7)Y?obTJ2*kO&H5f%ty0->? z$!Pjmq#QRpZXz4qZ_n@8*hc>QJ8Ue*JEy|h029ecn1X32#|-p7rJt3Y9d-6Ojh{XVJAJqGQv23x-Y-`Ak5yQMwOEf$ zD73gAvhErFFB*`Xe&45Tn!v~8TgJKe3!JkB+pq(>um}54jduO~9enLhK6ma_{`G49 zwZ8QNS>$WC@NZl9^M8@XA!O?LzuWo0NYf99=d$l9Nv4o(at_k;+%mqk`)Z3iim&pm zh2>ZCwGs1u^)-G`tpCw#uf%!x@)2t24cZH0ANn!J$I(oW`xrc!=U?}-^oyrqo%6^> z@i*O+{=3d2D~&s=@*YpR_B77oJT9UKA6oyBU5$NC`J?h0O8I}Q^zW4Zz0xDSf3yFt z|F|rktLViIWLHc7C1W1wxw+Cm>*L`Ly${9R;@<8*UX*^}{eJ8{v|5LO9EjTIJ{|^> zLs8>dHw==lVfIIKJOa&U^eei@M&YpyrB2?jpCFHn(f%jDH7w^Qw^|e>CFYDMBvgL~WkAw1h`nvpoP98_LL;lam`%T_khW*d0 z|L(VzxHvOpT+43;J?DmGS=fANWti=}Ij9v@gL(Ai ztI~!__O&XuA1Q;H(X>ojg)eYUPcWbUtKYJg&ET-e@e(Y>a_q0~7k+lvT#&>4!p{e( zhiHTD<=gitlUDeBC03!LtSl6;h939)T1&3SCiMJPJ0scs6?GhP8+Kq9KG?so$MHT? zW83x-q4yp0KYp)l`mJ{;tmj{SlVlo|Pek9gEbP8CJp2TEu2^eu>cFt~u5}o%4-EU{ z$A6YDw~reYesE$$*fC~Q_@TP?PHO}HXxz}S>&fBvVjCLX?tUz69rIZDrwNaRZMTiJ z688c8iYrfq&HbMUm6x6fRo5R6ZGOw)&F7v9n}$6V-l;DOMaM^Q499U2 zKRs1uU8}P2BkOzYy!5f~W9-U&ES&b+S^RL}qhSa3Zx7+Tp|*QnsNeW{sN4VDkbLVqp&{`d-|V+TRi*va?}iw zF6SpP!|_ZspI{%vS6auRo874A|Fb0xdHz2?v2l#{zq#7~kiu-&%)vY?K*i^+;Y=>U zQnc6b|C{&-WG>GqAoGq3WXb=3j{lE?{LwTH@u45=Pt&%SUm=baSc&x1{$UlFQ76x; zm#m?$#d>t!7C*Z9;d#V4w8dro3cuBEWbf}9vrMmE#)f|NjZpc!H$v4rZ)n4kmY1c; zf5oxAaSe~S*Y`H(?!bSx{_n=k+61x7xo=+MgOK}BjRW{l`LCVrKVAP5Xubei~;H$2G;Z zz|Yfj(jV7mxk&Fp+>^Y3ymPuxy?0%x5`Gz1(TmzM>q1RtUARF{-d^V$S{H87?;tho zd!ZRk&S}I4_a9HUelPSnC$nt5{^#|f-%n!wTiI8}K3rKJa_G9fUj1;rKH&ACcw&7R z;GBUN?z+L`(5PbsMxpN2?}hrs-wO@%zZa65*N4W9>q8TokwVK0>q9FJB8@|<*Q-O` z|Nl0!eewFxF=2h^9OM6|`+s_Vr2il0|M@`Gch`r}{%0)4V+9>_or9bmMeL>#~<-%rQCT3#}=3xOAp|swTcBxoTS>kvpD)bcYp@pU(awkK*lY~UR(+>r*Ixb|S#WH=VQ6{X z_#Yf}Uiy+TFMPiYnMK*kUsJ z=G9LcL*%z=)L!^VI6x**b535nEzcq4IF06YY;d%_WYcQb|Eess3D2SMOLbrJ2#(=6 zKKT4jIzEjG-x~I#&F!gE!;sL`t6nn%r6)~c7MUTndIOY z_Nnz@Xbe+c3@1lm6h@=sTm8dWay%wt5_+FlAEuDgkiPzrP)^RkY|KISis#pD?GSqI zHNN31>X7ykto$`wIn}#gDE}!3VqbCh7uJUb&g=Qy_q0Ejg*VU1=k^5HP@`T%Ulf1) zg>Q%K{;hF5@d@rmZ8>C@@+Qhz*aV+~GdbC}o z{e51tuFPuV^VVs5Cv&s353p5v6zFk&Uiw9CqW__<<>3TB>_yLuUi%0;?7AXq=nehd_aNg393Mk7J#~|vA0{1sFU^me z?>r>mlD<~qdFiWn9@<=U94B!aXK@}E(S!C`{C~tTwK+VT|Dlc@%i0#t=;8C(>iKw< z8D!~go2Ak3m&J1xy|{r`H^@xYj!)0M!u~&LogaE1ii_EQqmsD7?~-!fnQf3f;MndS!^x~=|?EWNE; zz5J8>(8r7m71tz8!8DYk*L=PiWViex& zk1p}2A2G&8_!2C|a%3;K2dtpidB^oAt#8%gxzJ1PKjy`zhUrst&MK_ITCB$=RACF+ zpX9esu>Qc9`+j`ax%+;5;e@j6D8K#H{-Jd>KOX59`R$AO?bEG4Fqt1eu7Bt@C$jr1 z?ET~HeV6$gp7n`#SX@G@=6}=t(DUNC zh#p)K#z5D{F}GERthY%Y ziihhzDsOZ0e3$Y#e^(j5TmIiE|8JH47ua8{C6ijE7&i@OVxgjIVEQ^;wkrI(X4P&4Fx|NpQyE620ZOpkN_8c(qgD2;!c z&PSHtlk#xmZuuY0^pre2$2Ie?0E@5$OR*d)(B8!VU#9-?s`>}nwNCwmEI2NbE$_+y zI9M+)*2w>Pd2j!H|GsUf@+Gg_e!qNKDZW)G{Eq)ju17^9dqr0LL0R!=S*W6K!8Ytb zT(@f%xd;1DjW};O&LciR&%LC4K$4z9ag6d|>ihK<=}rr4i|57P(EHWF;RtyQ$I0W;~dks2faPnG}q1dY?>|K0D~_a=5Cul!@jo9NACZ2!LO9%J83X@Bg) z1Hv3}%tM^}xqxhE$2(A3XP4dYV)OH_vC-^(QJa5@^0XBPm91&E^`ZTHUHd;`AN3;f zEI}OiR{0rr{%hXL^pA$6!b;=ZR`YvaWcRSf{VW%r6BgUWtCxK=tZ=*%tFQ*K4ZH?x z>CyHzU_E^kqEG+9^@Ho?OaDUWI6tzz69p`M+CzBRkpu z4&Q|OKxzCBnK}{Y|LJG`hPAL_oL}!3PLpSG9v9KRTK-=rkC8d~yz892g#x{JnElW5 z|HXfBGyh*cJTzb4TrB^SZI0vkpC0j4d`#V)yov|oe|qT;*3Y^@zlGv%ac}n@WL!V1 zB}l*T{f^)I5Xb-Y+bd6@H2%joo*eX%FwpT}G@ntfT~N*qi|I69QP^;dKyTwm!zgkz z#$r4Qe|RcPBqw1S%F(@HK$t=HeBE zH1E~_yxqG0uGxZZ*nwSm^R_hWKizQG`q%aP|Is;1|2?`E>c4+c|NTq)@5z?y=KtR| z|NpSL{y5a3{~y^V{r@$_2Mp1_kAp~SqdbI+Hp}c{{rhuAh4$Bs4|v`9fEUD%uAS`5 zUiO7Fjy~*hKgYcHePlHbAc@L9Yj^mK{!MM3^~M!6oU%WC-v0DW_AU@-Gg3%PbISQ? zv>}JAbZ1^Pejy%bqzy;t$57m=|9_+Yf9G_$|Kq}{*F7ChlBZF7#(oOqdDJ94SLsjU zqT?PkAN7CldCuLQvvcdbgZho;xxhN5+BeL7dCNHER%?~wvTLrQ7dLPVchHA^|1AHp zu^nt}CmH9$bdh<-1+wV-ZrN|{OOrV;HNMXd{=d3q#`DX1esM0$0C9Y%ebez^3`OB_ z`^b?Mb^6lC(HM*In22riUV2u)Fo|B;f0nJDLeC1zU>ZF)PT9cbmeXgT_@w+hLHxdnsALM<|WI z_iW?3J8^%p6~dd{PppHK#u<_i*I#f>9H-y7+VijRZkoI!a>Es6E#ABdC$p7?*<2&}Me_zo4H-`V=drm*Y|3C&=w4pSvO?_~UxYnY0 zl#h)(`ya=-rT6v^>xFMZ72^26Y=7-r7y5@Sj!WzIvhCaGJJ7sQy#b|lG`4Fu6jseA z-b3y~?F-tR$pfg_-9Nm!{l6Pi`rkv+aSD|kp9ob~hTDJZ6Jhi2PlSKm|94>v(tc~h zKPh*&Eqpq>y?bcbI^^l_!`J>k?6~yzA?LT=KYS|u__{G0-rbLc?eyM%I?ftC(o^*N z5gbGJr$>Z$E~s~Wa&Xx9j5Qq9YxhnTHep2AV_&SF-X0ZpPaPG0(mE>aO8lKUY(v9N zeP%zJ`-!kazu6C;8y>c|4i7&Vw<2ttyCU=$KTuI-oc>eBqYU!CCVOAg?FHn&PU18Q zpR_L?c@e!|9uO+Os@>!3_Qi928CPRCD!w*2+=#mNuHK6G$N#mk`@c8-?;F~L{B{R@ z==U@A91KJu@D;wG@0UIl!!ZJ*FdAbq9@XkUKgWJ$@6Y6+<>EPuiGG`eDR^_YG;9y% zhOwXe@9Otj18%1|3}g7m)A`5z%4Vd{vW|btS3lUFFF!&5_Ef$rO6#~e|Ka)ohmDU% zR|nsHFCTuVetN!fdb;)h{bxC5U?#Gy;_&-ydhQDUf46@4@AC_M_eK7B0r_*{x^7

f3OP#)3xb*<@x9$Jv+BavhJx2#wH�#z1G^J zN5pXqHD`P`ND4dd_#~P${?FQijjn0R+y7Dhzy3x3$71#L`O1yg)yq+8e|U?(;hM8J zkBf+V*YuE=aTV>(=~$+Y@hbm={G0uU`5>)0h%^o%gDl$6D~=nug*)g&9JkYNKYMFk zgTgnwjtB1UPRAHTPI5E@4c%{kY2!p{fpy% zkloK(|MO$k!O+iN8V_+w`GF~}pN4YGz)Z}>9Q1x)o7l7Fj|@{^a=iJM(vcnAK9c*JMnz>hy7|oa|b(xv94Kxl~{!}Sc~=8 zg!Y%%=lSgOT=w}D_L)zgU&cPa%081V@3H?lc!95bhW~ql{a?+#uVdfI7*-{oE!c(~ z$li8NEXV0N_t%M1`~S1@e{BDMg#AtMf62J#X7zS{*bB-m)aH%3cHKUd*8iEsuAJsW5`PI_@I=dh&w*OxyucB6XFL?u{_W!y57q=YWLGv^Icbxl}?mqm_-?abF zRj)=fQs{F{zn`-&f3(&QIT%AR93$|bw*NmT|KZ?d`A>cQuiO7O$hRB$CU5dl;uyeD z;>?_pKlv`B>2dteSn|QV-7daB?0YPrh${D0`HjzqgX$&Yoi`CZA1e!!$nHOWG)y6T zgE}5rjv1JV*_eZQSb#-Xf}Wr8C&=zP{j+56znU98(zk^;KXrw$l~{$fD13Q(SWi~( zcTZpVe29CdZgO0O*cMUoE#ngCaco5#hu?HlzEdYjk#UcqO4n2!njW?}X9sp+5B8z= zi@sa3`?uPN$eu^lVaXKIXp8cu?~*Ly2#z7{Efm*yJx(vJ_cG4-HTr2hTz}w6-}0E) z{%d~uwlZJmgV$ zsXP>4C=V@7=BH=OPj59peXIHDyUkBW_AT?%QSG-l|K+l{uA&#U{h!x%^?bNNPrl~f zUU~k0{^w6UTz_DpdtLT?xaFMm_VRFt>_gUn_p4SfLZ+i!yOjG{R~`mB9*lx}iuYBz z&-w0er~BLM{_5SI|2(wc{UM7sv|p0GYtl#N?wUVO&tEWq{*3wap5ah&4#xZXN4ml5{`G1cYcN*va1>;bKEx}SO#|o^(-^~A0=Wjx3oep+l zm2=i$E!HE}|2L6U*n;+v>@Pd|zn%a0mi9j!94G(d5ZjbNmfogaAh!Q)6VDFp!X9K7 z@-MXQ?W5;jVn0z$KY-$0`3!kmIH|G??Z(R#|VtVXpF^p^nTj6L3V%s`7nv>`CKAw z7^f^6ro8mtrwNPm|Kb>mICh}i@eDke|2LEVVE*51`W*cKo&R@Cnx?p}RR1=XXC8e4 z7GVkgm-GL&vr#DG|7QN*Qt>Ru3amu!EBELBt)f4e|F?#|7JoheZ@sXzwu4P%6|&a{ zge~MYWcYQRevA9t>~Opb1wMZfT}RbZ$o5yXU!e0XZS3e`OY?|h>`L<&ho~QnR6k&Y z)6P4@=4Ozkx4q1M&SKx^f1v&p>lJKp^K0xZ7I=Oiv>$ui!#-5Ud-*osVJG`cFOC2E z5}%)5o6&!<+5gd#?xn#wZS)*ccg2k+G;VZ{!s7hDBjho}wu|HBNqlhr-)YBZ@i+7T z^8e@gf9G9u5k0t!nE$Vmy|{sg^Z)q#ADaKy%IC)a&HTSx;<VPaqH`x<3Z|hPGcXgg@uBg5?|q%;^2$HYCyl?J|1n1#vF%_Uxd4Ty z9u14gUhC`i{_K7K|BJ>4d|O@bC+5cZZ7G&x1>*RvmE(f9!4k-Nyebpi@x(YWe5erY}bwBcp`?>hePGI3`Hhvxj?kRp?= zO(Bgo9&(&5~pz%wS457Yt~w$KUja@BE1JGZ4=G>^atw?To(4h{C(ANFK!^tzmM~7ZqciL zukDVVxkK+mzgo}5_g?p^^uMKTR-0^cyS7>1Op|Y?`L?nE6+hEP@7#eHjG-8g5g3K` z|E&G*1?_+M-;O^R@{urFJYz8)6Oo<8{$dh6x0(IL6#6t2=>_C>Y9GYr&;Nahefo|VZeHCkvfDe@L-u@C8-A35&m6weRZ(S zXr`x7(P&(>^HNBo4LKBX1nsYWz&}(U?0STMIGukuS^hsK|Eo*2;-K@=%e23)Q#U7L z|6tpE{Rdw+m%#cAac+OCe;gBM9QU(Poh9~N9CsYs@K2Jbqi(OTQvKsM#?3e`tv|s2 zU!iO0~AZxS<*Y4~e`W^6}SByO)2VyXWBChS55k8!rYh_#aTa$)93Z?ynx3jS| zY;8UJ>A&iqVdL;{{yn|qr;?*xGZy195tA?l(@>7~OWGf_>35Pj?fYG1-f@8}YMUsn z|38`iW*6gJyF=1dntv~CZ3+HWz4iaqr#5EPtuRBJwfn9AFMK9FsXh_c|F8I&vG9&l zvF|{gqDlOX`X}ZHi~9PwJ+wl?)cnp zIzwrlp1ExLYiv83JLG@F@?)v%mSY80Vine4E!yjSe~4pDbLe_qyeQC%D9!&pBmW~^ z@B0&H+#5biZ`-f_aZ3BXcs8L5TTpwQ{k`2kY@^3DZyMy|9rRuJ(D<>0wsm3qP>lm9 zeYgCPBt3q|DKd@1r?k(JMI1rp?~E%@|2{@Pj+2P7B zN`5MQb7UCmy5Sgs*alD~bPe(RQ9xWHptSz?8TtPL`%fM^ zp=^E4^G6%Hzxw|8KLZ)!_`lKO9FNK;JfE+O3=`>-Fa^_4`14btob3Hq?L5Et{q8i* z$MH;?2iNZn*x9LK%S;~ea|SIs|P{d8#1hn>{7-MCY~w7&ml zdP+a{Sl2AUQY^;`ti&p;LHk?!|IxYI{C|BjUHbO(D6G@}zgqvl{_fTj`v1xF1^xeI z#&MQxb6)!9(_yW6)?*W@kR9n9Y@z2K5x0KyZS)-|IxaZQlU@3M;{LzYtxw0Iy8KO#k)Ds~i}YXV z_`k?LBaX?QCq4I?^uJ;(0{sZ$n8JtqAG~aT24U6Yaq=W;mpvU$lV?%m+S0lR;yUm6 zB1-!ooKny5zj6PA?yvlP=n>xYwf_<>lULD;8@Po#DA)t1kL*{kj`-Yv4Fk#EU)~9W z$sI5MxA4P}6T|j%|26zz)_)DV&gm~({dZyKApK>B|6ABIQh(YDp9s5WeIop1(D1O& zJ^k~L5n-=>yLVn!ci8`t@bkU;qjru8KYPm@_wQR1^JDJooA$>1i+=C_VT{Zh#)Q0e zFC0qzC8QJo;aUGjXifat*pYt^sqTLd&51vSrZN8(8n69ZNGAR@G^o?npE0)cg1M#E zf2@7)zo^gs7kx4x3vpfapI^6k+9hLu-2a=2@nI8&(?_84xrx@IpBO6M8Xq>l_}{`m zO3Rl1L$yD}`Gft#KMffkw&Cqo{f~=35q>yb|KkfILdCE8hl(&D{CKWDL;Ps&h_G|& z--n;B^Nq2`qr^QL8M(a9_W!@3{(slnFo3$^b* zr`?IZ1>4ZchUU@kehu0cF%W|>6vNSeLjV66{r|{a(*J)~|Nl-lZm+%)vSpU~KMpQr z|8a;O*M`eFukEJm`j>^=BW0o8`{;=8gR8G7-s3FuDA9budpjEYSG>nF<~F*=Q5cP}7>|jVgegd$GLM)nN8JBz2ARF?`@GGM zpg&lDWHx;c{(Ak9()dR<@ZtE!jCnWCTZEeYecKn;A1TehZ?tzE{e$a|{4e((ZdLzB z3UU3tCE{9&ec_{@gzMJ!x77f z9J-`GkJ9+`-Rgg<&71Ih8*(V3wslabS*9PCp4@rA@4xV_x|-v-cUm)=HuL}Sq4|Ht z`yY2shRpKAPtq&38CE^64#uC(@%Ouw&3S%!fj?d(>lX5VwH-7diAFS`87Z6=$61`m zMfBh@uA&$13I0DiksB%gLE@h*{^>D(-@kil!)Ah|Ig?DFLr*I>{nVf^iX4rx=;5c0 zC%d1pw>!CEnEE51_f2h8lN{HblK;=i|KsF;#5HA_&`eLEpv<4|I9c5bC4SH@b`+}wP z<@oFQf1Ce&)7Ud>(D`j8;@p!}g584zbqpdi?eLzxFH2FLb`5{6g33%CBYa zU%jqKwzL|**kSylwAi;@|DpRwR(jf`p-!KF{c7U`*Xau&Eja!(HqBJ>q>l8Nxot)hK+?J(DTK`3GsT>d(p$Z2@ic9Ev!Cv@{+gkK-gx zBd+Zd*El;%&++#=S6kPOei6mkHh{c3Ko`2d&HoqP({7H+FU%SEl`)@=uc9}Gf8O^= zcB{|cA}hW#Al!++`7Zj%evO{<>((nhVcZ{m!!y1wyr~?HYlAhu_kMd4UoVA$&KZn` zE55gGd%=ifsN=#HtYJu2enx)92>K}0O;xU=VUTj3ZEftYT&FjaDfYDG5q1FwpJXS7 zunW%3AnX6z-qXH*n_WTY3HJZ+`|a++OYQKjuBkE4-T#cmcuYjLNgS9&&s}pZrqHJ$ z))@-OUlNz_a$(i%)(mncYOf3kv&lKAp*PgKZ_IPN0L{DIH%j{q>>m&o30s1tSdJA~ ziB(vG2mNbn>G2y`Pi{itUrPCLL*?&{OXBx$5w;CGunT+8+fo+xk+J`_n(X;nSvWu@ zkwO}EyWh9}gS?Oa^8ZNhi!ALwF`51UK0D#O9Ev!CV>phJXg@6fpc6TCA&&xzD6RiL zM*e^1{qcYFIJWhH{W&d;vq*2{XOnT>??ti)Sz(vStH@nZ4s2vQ>6(N)98C#U$n)6SXXw<*YOH!hhh zN5(mIY+*eb7P>c-)+Jl*o?m1CUzMhB-VKF!`@#(8&%|uZ!8|NL`)%tVpw!MYOP*dV zZ@(aqqqvU$KaBk!$^Mh+_t^h)%2s6QZRq(V|6d&-?r+xf*hfD!esGcamtZMYU?t*O zLaWF%QU9{<|i*a**(Y7p&&S{!2jn0XCEj6QV z-Mvu1@t)uAg(MpJSxso}xaZy73(Li`1>3L#yRZlQP>uG?z0i@rr~RW(`$u2sLLLPa z(emEC(29c>?y3Lq;l!CicKf~1hV+no;lTe(*xg4}desTO-xNhD=}=0#$w*o%U0%se zWkN>0l9Aj{vAkVg%VoL5C9c8MxGb0D5|?tEXo|3F^(%x}U!kuM;7*!GFvgqM(igqPuu(Ral2$3TVa zEOTA%C0_0MyS@?E|1a$Gr??aU3;tL9OZ+SRZ}^@0$LgJ%{eOk7ufX@WsQ!14<78q@ z`=8AGUzD6`%nJWn8vi>ULeI=z_&>7R^w;+L{?yHR>S)BX)X_!kNOIUQqwD;GFpg+0 z-vqln$;T(NK6r(H`{eL1aNI08=eT(?C7dP~QX#y@dESQ);6wNaWywd#kKq&O+tEI_ z>fHUV0Yfvc0mJl>m~$T67d~abGdPDf-0(kmgnq++?@{_=SU90fa7@&Oo|k_f7xx67 z!qeFPjDMhz0~p4Y54$&V{q5Q;WuU6%n9G3 zZ@=MM_zt=L7uUkGWHkTpyX5!q1N;aH^~X=hpW^5EB_@5N(Hely)8{IDyZANz8Wz^o z4bF9A{lkp^eM4df6qYa|%Fhrj9 z{YBs4xoYi0cIa#ElWfxdsC`MepWgNc^|N&$W^J20;`++99gwCk9M(qaJP;nR-%-2~ zZ$huJiI~2Ox6lVSoZmigqrU?qTb_-$_^xM#_1EWwcZt99>iyx}3e{7uA8AlmM0uzY-%N+eOb{Sls5&JF>$!=$Zd|T~Lm^)Oh`q(%f`S%8C>@FK@U0?3VC<`bK+}vctFR zA6v@~-yxqxuek4$-@{;B8F0zHbzd(7M#!iP_V9m3*3TeY%jfE-A>*nd^@5szP2~yh4a=EoZIz|yq{FeJDTxs z-QcA_mk1sNM!#%KtGB%;!Wt$mVXQRHbis( z-a)<#G5SEgc4cE$c(?F-kg+S(3q#tL@gv5c_=5NI9Umb7VEw<2gW;fK-iHt1L-+{J zb$JFji)rKdGi>S1_SpI_yF&Q?y$m>GB-@^~^BlPCz zf5<-=enOAt1PloOlpfilBlW-Qe{iJEi=e2}Iu*zZQY+@Gs0JR|))`2ukaA)531ymWqzYuGpDo5w9kzs8yuN96rNWdNqL zQhVN2YwfD(}maDP-1%Ol5^9Yr^?|4H)7YkW&&8qc8R zHTyF*R>mJtzfv>@@EhWzIe@+DfN#;;-k=ZhbL?Q(7L@}-wemfNkLwSpkpI{Cf2kAx z+41+T1K~Z=_zs>$%WJeh$nW6?_z|Wr@}KK4&TZd0GV}l2r20C=H;o)sUykjle~>`N z|6ly1?Nemmh2O5f*UnByW?f%>0>kS0XsrDw&hb4{{#+5;}#smZP+%Bv;MW*a6A3V*9t>a zuirsm*FU@c{kt<`RXxhjuN3Bl?u*uRsn&k6{Z+UN_h9n0I=10JxQ{-kuATLKUrWCq z3k9AHQtI|-{(jW{_-yBI!UMLw5pP1K{o&cXg+93H-K_I3i2e`oBzt=0dfP0%p{?NA zK2?_+{>J@2nP=_Ct=#a0xjA1h&$A}vFG8m>^zrqb@WsuY@P)-)V{(6QtnMGe=XZ9) z=k~6K_N}XYVqY-+_3Q99=XwX)3Z4(=%byP|xxWsrJHHB#T>3@$Yh!EgvhCe?bp6)w z*@A=NGiARF7qg@nKNvpW{L=6q+un!AH}{7xHkg0ccuVNmxg|WYY(9pv<^#5U2+c*= z;Vho4)=#i2KWDvMe!e~QU%Wl^VWi_e<9YWQ1G+5?b>0?wVu!-h=++nUbLG#!Exd#8 z$sHl)e{o;!x-)!4njga_@F|?ZIke#sBsT5|;~0zot1!B8UwBm9V@PJbT3`GkgkdNE<2|R_TakKoGG1e?ReUklO!~U<+zn;q`D7-%QmZmRc z@qb)W_N^!b*OZ0igmAPrU(`Q8MGi=#<(t;WC!=-zC&^iCzuEWpZ>;?p`3La(yTXs@ zO7I=LoL|7Fi$eNW#*OXw3?@%Yb60zb9{pqKTjY1}EHeEx)ym^asxDdFiP|A3S+Y;sQge52k3K`9lLob z+(JKyg+}*n3^_&4FB|_qqTH)4@=d7+m4Uri4~5(8cROOX55$ezyN)}AqyL8um$~i= z_aXf_*=NkTpFZH)2FJuZCcf(amh889C>(O!tI!tBKm2f3xQl)d?!#*_U8DVdN&SjB z%p-MK`M#r#hf()3Chd%WfHL@||Cjnvp+mWe;iNXDLjo{3}6_M@A^17iPJKm7sx&isSp`u|R`KXGFn z@gaH~H`Woq=3b9F?u~d8-h#K`9e5X})#)>s#T+v0-;Al_uz*qZ)R=l|oJ{Pg|H-8A zK@CY8oV|W}-;3+(fUJT&M z2l+sd>-upL%kGUniCLs0-F+tU41KQOv*|t%zCr&M7S#7?aVgAuK7BL4^?xXIoye@q zSK;}WxlZ@^9ml-bzxY|<@8Wy-5vH{zXOLN6M!vq;zEmzPR4IRoJ^vEdNJeWaP0*7? zo_~So@0)Hr>$&`RU-*f%diZ_(l#Gq>@5ImPzr^zx*y10$!RI6L{UC=ivSJ-OZTMf? z-tu*M>UegzMvul4_9f*HM80+lhwKAme}{~ z-swH_(}idI%}>XCitQU?18X-fkbTYUe+*y{Lx^8w|F>TE|Npsb_$8jluW=3gCVgAD z1=C&Z|2X?U#{Os9|IYsh^+SvL7V#Kd`k%TJN5w_#T)S^yam5I|K#`V`%C&id<$c~ zg>f=*Qv0LIw^?KSkDL;o^~~n~QqK{S^r<}0G2e5< zK&R`rZ{LRdpbsDO-Y`_@J>qSy@f~;<-i`O*efR)AghXXt_z3wid;*`sWWDtV8uP*# z`kcCQ)|lZrdK(tBuhaS0+t~B!&qu`d6y$|R$;Z%JmKPo;pFpgF?~3|yaE~9)bJo2O z7maz1s2_*ak8!fEPW>NON5<@z5>Lj3hslv9zIyEF=AG;7Kc=nW+bfL$oL2|5nO8J@ zS^101f7X`eX=QK<8ULCN<*z(Gmgo7Ci6YORObSnuQ}$bb|I5PqlY9XFB%1&4yO;L5 zG5>M$%8&Ka4DAn-^jU1*Doj2T>F+iF-~S4}LH`z7{(OH(oZ1(@Lr;>Ec$WTMd=Il1 zwNG1JUzh76)56hOl0EU9@RaR8z>n|~^oon&r}Sv<{J^mT;pg;UVnldYI8F{#9th8i z`!%j%-<0x5Ih!eO%R@LIdCe~gsy zyYI^mud?4=xCi&)wYVP-U|K)d%*y}3|4pC3STXxwT8RqwKbb60t`@QXcSATTjdgz4 zZzQk$Dkr>&d<))&ci>%kH{OHy;RE;(K7#G%Jae{gX1rTFF}nAUiI3I}NM6zoq0g!N zXSESNMNhLIqB)T_>wk5BPtx(WW#<3AReq=UmKuM^()UjvJmvZ49|(`qAHztc=U?mj zBU-EBadC;1ekAfKOe)u(CVLUpL9_DtTn#%zco@;x&%%as;<)c%kN-@)?;n}@FXZq4 z-?rsv=GHrI60_0qcn06Vw=i9!?@v4Xzq|g%iuUiC@ozFw$Np{9PO$$ZeTux#ck>XEejPeu`fr{qhIH z^JLrC9t`JwcfY1z!@g;C2X4VZ{Lkk9-DaQLaR(0JRk#cH;6D6r`73{|dajG|7oxiU z59a^ zn;wk`y^s7rwEe9ie29$tDnCL-^PfIOegdCD+rN28JL18R=&A@m_gvm1?hMXha>f7K zlNF(jK6lms+jlC$BlJhHPgPmN5de75XUe^5as`2$BknWvq<9^d;`;aTBT;u7A;UF1EOJbX0VN4^$G z>juoKyXLNDh5LmcKxW-Z<;A?ZDuuq~iqMY%?{aXnLi^r(*{BG^TNPm>pM8JT`W#Ef z_7~a9#$coQcyE->`62D+`Tb#9eLPd5jxN^!wWs=67GZ_olpl__J5^8YdU9~1I* zvO@h-#{NCRPOYm5Z<5|y(A!iI-bSABJ@!~L=^ga=MR^`0>+0k!y7th!#Ap8F5obUD zqp>aFE5B05e{Fww5B+`k06v6|;A8j%K7}(lhiG0=8`-B!>30tUuK(u$PfDCiIP#Hs z#6FMWF+7eZ@Dy4`_lKv+>1Fr7>i%8d9Om2If0z63aQ_$FzqH2O>o^iQ?q4~U+;RVy z!uls43Rhmo_J8S7{|~pGgR(4I|GQV((Hdd{;H=zGX4-} zTW0LZTsEh3{t^Rh%fY3b@H{A7UqMXbvTM}^;r^l9~i@O*>( zNA|JH`%64u*E#4qqc&Ci*!4EnNW+2fCda%5Z^JwAF1#D>!TT_+KADl`EFxdic``~j zUF19k>Yu!*|3&+AL;F*GG(jeXC&?-M^|13lAe|2({bu>wx^%slwEtFf!bijn9+5XL z=Y)^ZKY*}Kj9JK7UjfaWX8Ym+qZ_t=}+J(JdHkMfBl{M z-!HT6l`)zBa$i#BV?>{JuYCqEjB!k27HK?#>;E7Bt^Xr`-+Aqu6w;mAIT*dD|NZ#& zG01=R|KrEx7wLQp3;SOdzC%umOIB$=)1x&FW>09B(7zY$Bb@G(ciQD4@(1F6grDH2 z*#0X20rHo49#>wj?Ln^p8~vkymKVC3A@MQ!`&Hy!=oNPl zc^{%V5Cg6~UhTbAD%*ske?0Y_X16u4KiTs)|9_Wc$Fb9+_5ZKA_SZW8emsDqIDwO> z#I$ksnL76OX?7&LaK3=uSj3Jb7mVkR>IWEGVb84TA0Q`$llAOKOgS#GdoWZAeuqRK`=knR#Z1Gxp9TwQ8Y2lP{^La93e~-P${$@MZ+b?5(ce^h(_+Zt+&?v5j z|HQEH$PW9Ij2}N3{^s~g!&7;=p$kuT{!#dHLs@ttzbtfi{%iBqO2Ze6OT**nXs!rf z$g2pSzwl6atl;&by`nsP4xc?;%m>!`cwv7y|COxJHixi6POF<{Qt}x2 z&yJrpr)@n(1Yd*g2N|NhMS zeTD8bKk{$5HOynzK0T|qhO6WrdX0}}&4;r&h&gURI)kZOL#}WhhO3loY^I^kTSFZA z;tEiRA{<6Bj$nH}E0mDypFS8$$t!Qq3T0$DDsT)5&*3d!6;S_!Fs`-E2IpH+D79;9~;bnC; zIkcV=>cl0gUlQuc21IssBiV$=&Yoq@&!Jhk6>0V90_KszhG$kK{sJzd9UbUI7y69d zXZi<{`UkeuxsHirm_D*$e7~Q4kMR@idrVZb?=eZA!gM45fa9*?nCqxz{{V8K zoc~{uxuDJyLqFo^t!96#hsWrHC)LeJ(q}NDZ8MBGhVbJ552tLugqAOUGJOflSivgR&{w1W?_i%}Fs6T2+dRIaoYFQQ!Mc4mu!$|~U>8@hhv{ndKW6oV z%q^<_*3^Gw8qr+$(WL$-X^uCm|1r^cy>EK5R{f7?{jIE&?-l8@uMD~5WR>ryY+uNu z&o#=M_4`6Ty#Ncotu$_|Us)(FT7SQYJdED_eW92Jzp>T|T94BxRXVvA+?(LlWX<^5=je4G6$PSgZRiPR+IEB-=^5K1+x+r_U{Uei)-Cu|Q8Fg?Ws7|B2Ju(NR0CO1rs9 zySdAMg2>2+WcDH-EA8OS|AAZiAuTF{w8!SPk5l4jq?y7cEaEbju#6RC{_CAG&Xgq| zk;B^VBNu(Uoxb4??ce&Muxh_GtYZVcwM8LzswiyI2QM1?#}<7DBlKa!F@#-liIqd) zD!GSA?ES{>jkTvx=9n1rZEM-DKPP(Mh}JhN5Z8Cu z_&omL{{&ga=P`;gj3a@G9An?P{11%nPnGdM$m4%d;5v$22RSdC+Tee%#s7fpX}>cR zI#1i(+V^kH3PtqZ3wIiix-%T6587t{#q=W>aoq5_e+caQmjFt{B}xv3QnCz@-$OZB zfyfVH);-SQnDB9=8??)tw9~s?2l|%X->Umt=YN2qO8y5J#t2S0<|Hang=*B`6i#D$ zP5xKEW&F!7O1D${A2<4UM@u~)j3a@GUHN}c`=6Y0T#t8ME1f#jqXE6k-v4S*XrvDs z_m9T@o9NBR{7=(2AK&o)(Oj!utybP>|C}=hb-_Lt(T)y8>u+_E=l@FH{n`G|Mejxo z{pf4e{_nW%(>EAXFQ$z3t!V!*t0xh+&lr-JK?;|!h|8F+*Z+uF%wb+%a7r1NCKr?& zqe<<5X^tCTOkko?|05N@x_`~x)*pCpc36_``b$GtCbvJA9ahLytVQwI z!1i133=z@#TbsgL*uhoYSbuAeeq;TumRHNWKUDv&$p5(U|IKN>9W(qcX0`L?Fz?x? z)|7>OEfy{pg;C$~7{>W=BrrkGeBY^}FqPCc?Dzd(V8-`@A$lCcjlLh8jmb}O`H9S0 z@SXpycECUG4>|N)w7SM!;Y>y~h5pSN}Hr^Y(vY^*|_go+BthDJC5sjh~m%=dzT6_9>@VU_m%t$bQdf zzmvzr^=OM6Cr=>iCpbw~BDSOLlGKlo$_Z7%)flccZjET|f;ggo=SF?Ung1KDXN4O3 z^q#iJ5}-~ykk+4FUO89xQ({ZO6f+ok^Ra4z||^Mk_AhBHA&n z4xhnnuJhzbw@CialK-(##{QSjZ;t=j?|16|Q(2)y8lC7ukM>+Q8Qav~qHd1S`w_?Q z_{WT~9TClsAG0lqw6Z($U!S3;a0!dZ_{$p`iR|6W!b@1jdGL=~!$HuhgbZhu(&<8JkL z|Gscaxb1`d0<0r=n%>*w-JzBq&1oE{+!yNT^@!G#8{W~zB!`Z<|5o<@MRqco`Trk1 z&h{TpvXj~D#~ssvMl_)rt+;@Tn2sCYx9_ZD=E!+t#;GPYI=R5UAMI!ViyyZ=K~C5| zTH8P4|4&B$|7({{2RhM($qM`A91Pv`IrjW)9e;RwKNc#Dzt^(=PdOJE<=yE2f0=P0 zdT*oax8E3I#@7dqxUVwf@7AtQiW@F*Uj@eB$#}8x_b&GzvtQhPo*u)v)_{c z4(KyD|9xgI)BB zk2M?&SLvDg1Jd22XI=9CUEeU`hhLjS{x903elPHzi`4JrRGv8r_PaTMa7&rNCd%{=ZfYN( z**RKq0f`FzG-R?w|APMic6!=4e*oJ;lQHxojxi)LgA^`d5tp%qZT|yE%j4@G_l*cg^Z!=J zRjgqhJv;lt2DynX?4Wm3{$Jl0cIkr+@;|Q9_i(fQrvGn9dDnMD{eyvu-}XQ2Pmjj- zha1@m=x|Kb$9AKCAnO@zI^RPMnTzSm>UTtS^c?2tDWuhz3mDC1*I>L*`v4R4B%(F` zr;tb<2zk3=`<+xoXj{AuxZ?w=g#IuI%xa}39E0w+<4DpX^7*Zpg& z%#wewesJ^u!2eObKjr#sq;bPv@Dx4rkvvV-q7Dn??zhzalF@j@jq!|P@1@@Mo;d%3 z#_QkU+2|Ym3%+0CBHzbG+oON~MZTSFZ+@7cpZNpAk#A?lf1v(AXtK}t_w~<^t+;@T zXh#P+(ef4FFuCyR#?Vb(`Jna@x&CqQZ(03G&(!~Cn?jsE$-a-qxW?$w*wd^!DoLNg zg1maO|J)cVlCdqkZzTqB=-uqmV*cL|HQj3q2%1v5MH_@=f$ z8T}(5-rd6a#){XTS2uEw96-M&oArz78 z+M7L>v%+C|F`WLtc_xfkp9zVoGhw3a zOh^`=QNErDQw3*2U;JU?&ku(I3~oKlrhYiY3my)``48*AeK?$OzLTg#721AbeUj-A zs_8YDw$DuA!(sOD!{+}ze7$_%(?;A?zANuX>mLqd7)Rpx!(rl>cXaaMFj?_%m?{(J zx=u;wG-^?Y$qv`pbSBi(=aTLX4fIAVTySr#?y=py#?FK$aXm$6LNnQl-sLml0(lX! za?f?gb49yw2Zk?su1lV?XA@uXe6%6Q^lgpv)k|Q4o?J42Z^!yJm}XPYU>0*t>aWx4 zuR8ZfF8EeErP+mUGNh^CF8kTOa9MZ>%UHoG(&k`Z`S_WzMqi(93hU$sHnD{r?BXi+ko9%n^w-XW95NSq zi01$1lLaV55pK-?JxrgI&t}#4=~pY`g>TOP-DT?|lYcK||F`^0KVrWU#N_dTCHYss zLaA`;5A|KsBfp`cG5Htk|ER5E+m*k#GgOe>2h!){@n~(ZI(j`83gvs-Q{?>Ntk58?5lv`D zD=y$7w!i*xXeZZyb0&0FCiUfn zF?!_tJ1`^Px6AkaY)f+3_M7#;`ehJ9N7N0K>WC`!-%0hRW214iBpE}`I{&iH{h_;X zfB4Fdeo4p7AgZJ17S&N}>MAmgg&p-@nfkv%{ZA(H)c*zQ_9FE^IhAYfTla@Uf-Rlt zf3v>eWVil$=Sd+srr%&gzk$B~OTvpt)1x(j=6Chu)Bon`C&N=4pR}H2V|eoLr^AtpO4r2XWR!vd*cVg=ZZcO&K<4`f4T90 zh0D@jLfgp?hVvZ_p{27nwBnIiL-^~NhOlhg3LdThX!vaNr^9E;>%&Dn=3JleZwjln zt>N+7#_&bY=L_qf3>{64*1q)bJN>omwryY&TX?cRgdMWSKeV3qKDx0h{BOr{L$7rz zWB$AL)lOqt?FDSJg3xcgudhBgj1;{*3~%PM(XNjJCd`8y-+5&iyQHs3f6?NF4y<7mmTtidkS(wKH2tH>_=_YLV9np@t?AsP(&ZB)rWE_ zCmg01W29Oh5*No%eNH$ct^_UnZw;kn8P-3{jwP@B?EelGzRH0x0 zc3-nTnz-|HO1E3OGx}&^&eLyAP4;)S{c3Our%{VKq<^Kq;e>C{GoEu@vt&wmo?LKk z=_cR4G)ASFkmfj=>O?k*&q7X_A4Lvhrwdyg7PSuYmo6TR6f)@PXij! zgctiaGz(9c8o$J>c1H9s#d)?tDyiL)VjqyBwc0_r>Hk!&EriK@Z4k75#dCjaUbrBw zi`f3^fzVENMEbY&3+UTe7q|X;K0@Xtbc*jnH)7~V9Epo^#8?RyomF4sNdx?9=t)>S*t#u#pP-59bhZr?ro zCE5>$tZ&F8n2a3^xnv%aapzul-N+X%KzfrczT(`wu7TbYyCoEgFT!CIqj$;ri8w+Z ztTP6T5_%~{Dy%_?c-5`?QvH7-*Ym|dk?SmQU58y4hB1ON$CRT2$8a1ca1xc6)`pu| zQOB_7=g4{CRHeEA3mBEg7{-geAMbJEh-d3Olg>A#f1*kn)u=&_^5zs7i{-H){O9lc zeCqVy*NPk1R2E@~&p_Ni0EaQM#-HW5=YCRo;`#UZ-s+;`kMY|Ia-nJ14TM$fFT%TDg6$Fbmg)ADl) z(fYb$;*yv_3hTxlFOku|e;3Kfrij*jjQRy4JNmM?B`hQIKZw?aT>k?9|JQ^6f3)_% zs%>jn#|AdBg&pkTDz^0tZ~vq9E8eSpwqN)3epS zja_ZX6TS^HY9nXt-zqjDqP1>^lIs7I`kx#TUXfOwGY2^j5H6QBIHM#6|w^74%~m zN$Gz_979te=$bO~|y%+O^>#8@}d1XcpIs3%H1Obf6Pm z=td0vh~vfe*O+k2S>N+nHr1{A?uPtpUVrqA_$y!XoXAU9L^K!eGP#6htY8&uSjPqu z^4unQ!@qxvzJp!NMmG9}{JE!2+LAxD%@=UhwmoEhQ~8TrGnxj62 zo^ES9NH2zd#L*i&6krv6IjsB-_c(rlDAhUp_%b<7&pv4Kr&VF$aIUUmPj zY1XyOVSdv!A#Ln`fiJ*lq5HtNxCAEblf)!_3O#Gr*D%P`6Gb6hm1eYtLCpQ_(FbiG zKr+fBm!tNBYeTelLG-SowX>qWr}V8^A=mavdLEgNsGN!VoaTznD|)l{#eXbKUnueJ zOML?O0o*kzjdO20mZe8hcF^PMgQ38*9Pt5gD{w7d#>%|P@(PG)Wh_a59fqx$JgK# zPUBpcZyskcqfD7bW}PrHg?TI>jj_Yp;3qtzVr_6FkMO}4H&vhwjy~7ezwF!?%u}uw zu#t1v##wA+)H+`s>d}bk-+@hJGg>ii|Cu7s`H1I?-~YcuX8rXl{#P}8t;h-QBUz^W zuTcJb{%z(Ip8sY}xFD_V`4BFW?dXWYpYeY0wFby94uwwPE3Z5hy2y1lL3b232AJ@y zV)TB*(V`Dva?v@~jQa}DExWcQVfqXf#(V>pz57e9mF!u~2@T>?xP(RY=BR&*4us3} zL3Qr{yM2kijOd?s!|e9Bx_5|N5%=Q$qgCNGtYZV2^}oNaUZ8Jb2fMh6J!Jg**!cq( z#E`Z_JVza`{V+mieM`N7T;w4i1t>%jrc1Oxmh_J;>L1hIo2TDg|Eo*;?*#wAD*l0$ ze6`g5H`gaF*8b2ANT_2EOQ#q|P=d&ZKiPgDl+x$O$cMj-UXF!5eH%z&-hLJ0dYbql zkjK$`iH%F1L~O;gyR7~uD}|%^e>eWCJY>K4jPZv}{WTc4%Kr~T^f+#;|9@5+t$9!T z=28e%(umePJO6QgPV^kl@4q|$v7N1i|JnNg+s2%0q;(3X5zULKCF@X+D?fZmXdt8h zuf`~BJgO-Q`(~TT$p5XCynx5~!CfS8_`kK&Z}`7;&^z(_{%>94x)DRqp1c_8Tju|< zRpazABr%{Yh{}i|L}f%&HjLoK{slAkOW_jI@4Pi!CfmNozJ4{I9{Ms?u!=RTBN}_? zm(K?8*`=cb<&saeznfd<#-+^*^1&-l3df1+e?xT!;0w>X%XZ#qI z^vEWS{!L#+ug1tJwrLX^6+>AELyfq|hCfA~#w5L#tV5DMYYzS#>V+GSrZ3>eI)QyP z?B6=)IL-cz(j?=;!{o@!%R-}Ln$V0^T);)NqXX0Cn9rDlFk9t3wbJE#kTMq`&G%s8 z6#s+E=B;CVRbK-p=t)f4FZ%yqr!=B@?33uCpVb!3tj}*;C{ZB2BI!vdjRnVa+b4!u z+valpKOk=X0^yAR&o_NL^j>i>jL`>Ax;G^0GZ>*qa}eT)<{yY)x5AO zoi(gu1Cu%GNp<5UeeRTgBlYqYeFqD*+PmUX4eVd+itFKnf0f)r?=k+w-&QsuR;gY+ zDc`DhbA|JenO9dLA1gQFXx?HUWBYH-!PNfAw@)G3UeB(5dv++IA4V~bAX@jkguG(j zd?{Ima#SGEuJ4{q&S?KPWrySR6F7-z&8s=wScfF7OpR>dzT@(LmFvXdY1h@{y2g|* zi2R$P@&8K4SD_j;IEB-wMP~h{I{E*!=bz{K7kd6k(-(3)|7CpyxcUE&2G76A^CzSE zSL>gYXJ2RTzxDqkKmR&u)}sN9XhJhC;3C?w{bO_g`37{*d%D;y#)dL}n3c+aC;4vG zYZD~*g-+Wu>;Jg7wl{m8-`*c?`v17^A!O{|n0kt>KS)MC?eT7PJbh&CKxmal7rGHc zKjIie5}EaX8`%F%>}ztqj@`J(zFuNqlcUlZllHi}F8V*vg!(k9TPNA$Q|jeJF?&-w zDO|!LCJ)=EO#6dASF8R#rTsx)#zK|)7pc?g-#YCNaXq%Ll56M{zfNu-R`0%~AFZpq zDZGW@eD{USIEVA}^|Svm;Q9u~#4ox3Quz-fID1)J6Vd$O9ml6XsZEMq`c?c-{ckJf zKa3+2vqILA`yca-96u0p=yM&uE#%Vk5dAwaUFsWa^{t`6 zu{{k3LcVPU=-o9&Oco(pLttQ8y@tcW#TfS8MeAB*{xfM;B92Gc*<2Zqn zs6-X2QG-)Rf7JTB;Xz{?#;O0@SlMS@VuNoH(KorR53KdiZGX8oh5fJm-MdSvs@mI~8naHHQVrSEY*>VK?||C{CiPWiuGp3ol1{Qpy)9YKr#h^LWq>?JJX zGM2E6=}PrKW)bzV&DVQ3NMiwiF#lg%5;y+;cQ)yHr93ZkReEis=H>tJ!LUZ(ey2XV zd(8`er*B&L%3p=BL2hCTJLswOEs_g=cUQPd?xD9){;yXKe@Ff=cWRh_qx#4Sr(qEczyVrve$>x&NsPo)R@E3utHBP9}RiVlaB%{tR4+%+f%mBV~#$% zc{CK-_Su?OhyIOReGo4VgE{)i^Yn|G+cxB%Z6n8wbERG$y4U$S*taLkd~15F%lx@V`xfH}N-%cmPr|6Nv&=YI)1QRoig7WN+E#{h zhl|5s9w`px^a`{s9tr20?+q=5M?xze$tnqdeYGSUv+V>P%_szlh70tI7`gDEw#kFWP96+HX!)jox2=lMZd(T?H!DIX*@Yy1HeM0tk`vt3@K@4=JzRAF424A z?jMWv!5#PS+qz6&!pLcTkE`x~+5N9S7?#B)j_eC7NoBYDi`s_6xbh3-!_Y%v z{S8O?Dm)Z&?VE>u6d+?CY12mYX$pn6`;LYp@^Ga8+0oEfV*TGbW3Q*LkHHS{myh$8 zA0|hP$tGP#w02VZhuNXnF>No`SNtVD7W5KKtDk40I$B*5`RdQ-sGIYy`>ZXf+eTN6 z!LAv5B@_0WXjHdjl0J1x{add7ZD#*>{&xM}8TLPhSK0q7FA1g2QHJBLqnxb3u_%la znAV1v(UzDso-l`bV-6|Y{Qp3l{jXnP9En!;f0ywG?UYIFm8p7h+5#t~Q;8~6W70Jx zU3(3EPTe}|T2IkWBU;liUF{w#-K(}nt+*cTkvg&-(Z2&5$VS8*JD}|m&FgOxZpLtt z_Q$Sv#$oM@0{1_|{>RP#e^n@(i@n2i2ToAs9cBDU=6{ef{2VKW_JaDvI&SP0_J57`hjMcq2}~fl$-iise-SeMhp~MjvmV~v*YicEw7U@f11$RY z*W6+6|HJCp=$!Ngedy8J-YN8~t82vfBaSgdwtMvNuOxj?eu@6~Geb{dq)z@37jH1u zhkn=G*UmR-$yyiPe3RDrC}Bi@&ak-jy?No1;}&rlOIXGVRuQeeI^F2r$yx2ROuyEu zJhrF~#+YX_io`L`;Dmc8C#u~$IVrp*jdg5b6I<9e4}XW;McNqK76072N?-rVN$&SIjFd5nqZjl%oR2(6gbh;%mm8JF~)Z;S=a>&k85W zN({F8#t^SoepLFlPWZ;i9NVf;jT)T7Y1E<)eYNaccJcrQ*|bAR>$;_^Q$~*Tv#;6I zW4r9@y=X2!`+AFg-NByjVqagdt{fYFrj>nuF|yBnpPpL^Y5D@1yVQrMcdn?7-9R>? z3C(DW#-_9vTj>{Y5$))R@ca{Xb|G*-Bu$^t5Iv6g~moT!!e!sweZ)U5L%i>nBiZ#4=d~IF0ML*02 zdHy-yK-eEP>08*rF8X{U{l1w2a8_Gj-p&^$3S$GC*uoBWaTR;Wde*mq z?H|7^^q$tYLeE32&OB|hC(f^jEX44-ypCwik2nU7dHyJ}?J)W})xQ|%S5IMR=6b*M zFnt8Y_IYvrb40iVrAU8V`zx;gPpTvJ8_bdO!YO_2X)Iv0oBfY*Brt(!Ufg6%{qMY0 z?kDO4h}Mr?q)%4pYnOI8qCSAg_Mb!BOWDWlpEP{|^YzYy$o8+W?=c+53G_A^zp(8j zeNfs_A3!C&3ei7xh7p%`wAOBB{(YT&PP_k$;#^a_)jsqQvf42P|4Z(loLF=Jac zAIr^4r?=zx=RbCc>+zmC$u9IBvz`Ij;=7MkxvwVob=>tRhx)}2*SIg?Au?`zUnl$b zlJ5e87(!hB9hR?0&}^*!EaHwmca@L8=ghzN-p7QKm|ir#zQo2R=hn0f=qY3T>0RxF zuHTN&8>1gb0uzY-{V|CtBu;1>NGF9$Sj42bBrenETBVI8`Z5;iX{0)w3oGJ!^7zb< zYv`q~lN*T9qksMkVpDhvnRSJPhnC$FqB?b5KSZ?7{uM<1<~#Pg^0y)El2@^Z?bn+> z`=7{PsWZvkNdHIvC+f0j>^90HEukpniO)y+8Z2Ow$HB=Oe=!a2^BPc;B<`(%a z@=KiIlQ_*EabbzyB2t&H&+$(b`F1ddQB2u)Qu{oK2}Emh4IrA^--nSV-!9@9LYZ@v zqXNfp94BxRmFU~DzQ3{gfl}k&$gGRsWsKR_^~l9Tp};;>s74J=;WTPdhv|Of-iMr1?gNwJ37#bE_5S?oAYlQwev8qEto>4 zowp+Yx4Qp}?w?F_xqmV#JV{PzizT+4UpjG&A&E(GNzBmaPPqSL?w5WE3&QCrOwLyx z3X9@;`VWQ6)k?vw00tPPF<3}Ogz3}XcAj@iH_ z((g46KQAmJ{RT58F>?F`Ca)XPam6@-5|m>7!8=14dF2h(av&>k4AFS}aqV&s2y>N+}4I~dA~N3_CaR;{h#NATJd$LM*|wsgl4qj z0=7rB>A%WH@cW0tMd2$i=hH)Wpc7q)`jorL82S-MX8wV?XN(?=Nz5WipTWYOIuy|u zdGxRNjQy>CiT-gn*sjb-Dm#{yA(xdUm-wPNCWT8_M5aGs%=^M+;b}g6GnmC3n|l5< z``Z{mnp`MS|6fpcVVrIL2mXKd=~2%uNoN_6AKD7pTY268ZP7y4 z^fmjeBU<}sgN)Yx*(A3xsr|7-?)pyWj=4s5f9kkv!~&uxr-GyA_As;vYf7GG=k7<{rqISuP`hTywe|7!%s=6K%o@){}{(sbyWp0;r z3Q>f^=q+^ro=Y)(FxRzJ7}KSfU?k7AA&w!GiYr4oD)3_a-(2ZBbv~R$&>1Tw9@A;NV_>ZT%cdX zf^b?mMb5Wphjwv2S$ufOPW0v)8z8$8qYo@9*OrtA!u=Q)9uXc={=~`5`rc9fE&sLz z_imr{ugW{*6=Sb6WD1wC{hAOK$;(*6GTMH~zu;He@$^-!VIAAwJ`gq{ebo0zM*sHR zB6qNhtJp)v;$F;t~`ry(k629_E?TsVKnj+WBUOFr;V}E?x`g0$& z4xsV<^}l^KdvZs9ziMp9_PJI0f7v(iV?OA@QJXZ46rz9C^zB$b4Fed&P$}CM!x%x* zH5NOr6lEw!1&-l3rZ@TbW0oyE*UB%R-JD_@r?HUqe}F3f=QaG#$%OqTu1XV=^r?0J z^No4ogmg}#5>@COV}H*a4Au0(PWE>@KLh$HL~G{{BaR{GI4v%r&!LvA!{jDk1F`{0 z`m8>SIeit!-AALiw7!W2eFpR5Q{C3BKWzLR1NejgAG+oy$26lA7Z8>4nf{*?Ul-x& za`%tf%kCf1oP-pjfBY?Aw8Z_FxqsK2`9DH|`!96=x$Yl5{nkH_&UHUr?^_>1Z`}Q( zQ~cnLYuhw`;cnv#uh1{H=Kh!6|AzY)-({b~mO6lpA@cp~C*z3L`OJ(<*ZH2KFw*pe z8t*=`LFs*b`1|?w4;+^NF@!jxH9ki$odn!Tz5CScKU*Skm)DvVAHbg zQw{QHll)JPZW#aGGX6~__W0h>lWY3P96M#-X#I-MF5VF?OM3~+=WS%1`Z ztkTyoq7Qudh-*aTL-16wH5@&QCpT{iT?N(~Jbt_JmRrJ?s&5aSmA8k->u(ERZ2sf$ zg{s>^$JTA(vE@U?xekTTCGQCB+JG(ncZSyDJ3|}JkNt62ca9C5tNzpQmz94SK6CuG za545T!lU@?%)gKU{wZtI_D9P9IQ(^7y4H;B@Etu~`?B!GwU>o0+qSLe|Jda}3ZHNK zqwu71?#d_4UlG6lCF=!}(fIKb%C0Xf57+mrkN+wsT>1V_v#;8=hphi14Q0>vXZQqs z6u;5$pzMj#X+Er-QEuDBR`xk%K{S6M*Z!II&%HSzkA7qRLOwlOlVTPH^g`Us|JtUJ zuX=~`<%loBVHD#CN>GYEHb(!3@qu-=28QUF^?H|$4`7u2J%({4Frki1BAUZLb;cU|}YnKh7 zQd||PQG-)Bjat+pdREPd%FBA;Xw0Yujr1n;Evo-loCDFneuws?U8HZcK)*?z`I+`< zMw_|U=U*v*(l6j5+Hv#$I~TP7G2ba)Ax&SvsC+i2tQ{v4oBH3B%Td`pNuSa$lc=%A zf^<63g_e2Sy6n@yju)Pr@&3lx^7MWzR7e{s%y;W66c^1O7$cMDT{Rv}rVuN2U%UDh z_smHVUPR`D`=Mg>Vu>~~8U1_nvSXI8j1|1N{kJMS-J$;P zQb%{I|1n>$ZHY7%FuJM!$M~+gUfnyfrv6W=|Hst-NPjy!tVw4bo7ln*cCr0qV_{^} zhqp&wxtD*$-}zpVSx@+tzsa6E>DNtIO#VM)$FqKsJzVnl*&|thpWT=L zPucyQ|CBwD^{ec`qJPMq$a*e2dHTO*r@#K2>=uk>{VaQ|YCC)UbmGT(27@0Y9=uE8lpw!!w-^nWQ=PYbuX z$Mc`w%dVyODq|xb$~yXBop+0RdIK_k)z!++O80h(|Ifv1+5H{YvIn}ZWe<`=W7o3d zscYH8{nxTbQ016LG@%)-xPXgj$FyfRQ}ROgEIC*4LiRkF5>Ar~OD|-PN@EP;&DXLM zm_YKvwd~2-7qX{{UdZl{#`>TACcBfo^2y(1cahzQp&xOKA&D7e>R;d0Ic(p$mz}7( zmYouR3F$XzpOKSI&e3=+`!ap5E~Uo`x4+$d0ePkUfM=afzL4*<0ieCOywx@+y+_+2hKE zlfGZ!tX0=2ydXSZB@dH*uC2e!^RMvyPk8>vJb!Yy-t$L}V{(y)d=#J%ML3M<6Y{&X zW^0^>oR?;*;d&b{>T4cV_Kz(of5^m|@`p?cM?T_HP5i%;d)dX(IfAGUwuJ1xyq6tA zDSc4=68YYi)6>SsM}#Ba+y9@myN~a(tQ-G-?6XZ6FqMP^1re1Lm6UWT=u}clN=m-T zrBsw$Mn=VDlp{$epTPzU7%*VKfB^#r8{62%?%7?GT*(!y$jB(Ua!pE7zLS!Sa`k(j z^XA+2z4?6~kKgZ)*W-QMoyU2e$9deokDIfSjLz`<=ksr({eS7gm5_TdiMen8-wqv=^v=EJP7{v^#z4b>Hp37jx_D z_gPE5bQs;hjY*-5TP13u@TI;L+4bb#6PwBIH{@IL5Dw!AI&logQJbF-PLS{IFFVQH zg;O}_Terv$t#jp%UiI@iW#P2(|5)wcbnOKC^xp;Azanic8J!ba;@Z7^S50JI9&A(@wf2S*GK(zj^`{Lw~ES|16 zl0pjke*apVoytBK@2x)=%G_g*<8X2*ee4qb;a#3}B>QNL!C36krj}qFbNK@AHqSVj zIStX7!TZl!V{q191O1iKeD9Ba=-dC-?EOo;f2H>)C%7gZlQ0=mFcs4<9Zj>9KWW<6 z&z8o0=lZd?bMMd=SD*C$;yEC`TC#8cqfGy(@SXXO=uD&;;+ctAn2m-p`cUJoEntr3 zJ`M^qmw6sK=F;WQerN8Zb%C`8+;(}_1!N}n2%kkpXAt+TKj0V5fwaq$x!m`U_Fleu zMY{J)^X`YW0X6#mGMq?B8@q**c+&kpnca`raafJ*WA{kM@mEjj}v z(gn1Nw=IgBIno8}QV-P#S06nK+7RggqOt!W=JLbp4jg7af-2@n2N0bx8tDK!xgEoC zq{-7g=I>50pF|hB=}k^W=9LNIGXpb`K53FNc@&h{0*ZL_) zK`I7gD28JsMq>=dVjRXJ4HJ-#N$5FB|M$8vy>oyjvv&`31_?P8(=Z*ir|bnGv$HP> zGs#(aZ~xyM=IHFnzWskY28Fro9i!#V3F?6H@+w(+#`w<|YM*gxIav{nt;xP~^2l@E zagllbtWy`M2rde<#W4>{HkpsJXSDD9zsbhgQ;oC9RB07J2FgX;%F%s{O|6S9PuU%}`E+XoKccA*b@`(fL zm0I;m-AUtrvf+^Nzi_qn_SlMN48~#{8ios_Z!w;^#d94*8uJ81|I}{h-^M@s*LFI$ z=nT9`x}RaRi+>hU4hDCn=mDyKbiA zC%e}tnFrCA!D*bqSwv&sbL4sSBH^Ux?{z*Ml9|iRJM7cfNMTMzm3e^uGqwNRD)^Op zhBD8x579n>|MLIcBJ1j1GZe!y68~KON3%EOdS~>_|La4vHfx*FK27_N>QmbPGur=N z?Y}-ypMK=9_WzLfU$|1=qRcnhmtvfbis8oP*w0*r>_>gCXXqYE^>?&EW8HHc#v?7d zzc9@>=$e+n@6_pS6I`Qj8})Ci)&Dh<^#3PEx(5A!vVN-mKiMeU1aYKe5~Bb1PA1>` zzs3~ishEbo|8uAhT8osu9A!}04l>%~JDvXwlqlCT$z8{tNlVVge&(u^@AN4u$$e*( z$79yayy;vK{-xv1KVl!sQIT&AAoiomy1lu=%tHniAQM^0K`xrw=;&u#|D+vmAzQV> zZDc!p2U)GJ>s%c5kNjz_v98$~z!Gb8imd@eyiHk3k$;EN=ZY^M?T=UwImCYct^U9M zjgjHFy0nmcQRLS;A{3J)C`0Xe^$1ymdNdFj6~ zrt6#i!eQ>+{q1)ldyab6tlX`I1X z?2)HRPB>4Bx%`m4b>8{N%)RJ4|Ia&DqC%JgZqd0USfHTpT-&Z zBS(euq0YQdb@sja{r#vy_25y~myHSs=uc~pj8eak3iXFZ$={>s|3-zT!=pko_k+i! zMVa*h&DIB?ow=jfHTLKacAudbj*)0MCXNz&(3xAJYtK1+&FII zk%kFKN7tjH!X&c0ZbX<&PQg_4gppwyIUO@F6T8x`2(!r9n1i|4V_v;vjCBdj<>uM< zWzZ9s8UwOdl~^0lZ0@|x9J+AvsnTw`{Dayf_Q(oTFI^j$Gntp{&`(N|2eb(-mA-ks zZ!hiJ$>=PG(n94Q`%;zvWZz_@Z$tL&KTVT2#gT(te#6Lm&XdNql zw4ap!kIVm4m49Il2vbYerOTUS!)WE-y^6(Af-*$^kgAOAg~~N^4Rbx3u}@hqS9UAT z3Df(|*r1BNjo%?0M%4d3LU!U9j-yXsD1SxkV@|N2L>HFUMdyDf_38gkv!B6P^t@^v zJb50y=ziN8_}{#f|C7lSq+&3JqIQG(lfhg*)4kF6|DQYQ?7QTxUU>`^D9tej%rpKa z%L}!C+Op_e-2H9Fi8a>eiDLmWk%b)OA`kg!(ndB{YX3^if2U~Qhil)_cm7YZ@&7UH z9}b`v|MUL=qcZ^u#8ZeO6r&-<_&32Ap1EbHxKYMjiH-%@)_L04WNj^KxOMGF3iV|7 z7xYI`rA3Y+7LyrCfPH?L|eL*-$cA;wIFZy6uH}8XC?ew9}S9~pT&5S=JZl85Q*p`+YiY8wcw$8aMY?(gX9G&wlW?T_A zO?I};qalnFfA<4-h4JLBnpoI*T)kA^FYL(meG_gE)hT!BTTToI^8BZ){zIWI;Tn4j zCx-T$2ZgdD9}T64J{rpHQ`^^d8~w#C>Zp%|s`Gb+{Rwx5;^B0cQM_|6^nbNzTsR=? zcD~}?oPl9a;Q-&=y8Qe62QB5b#Kp)S|A)k!p+mxw373YYotK8}9jPJR^~=Xx99E=W z5>^&n67o_5sm&OHITwBBmrYY|&(N34*Z&`z8uGa3qX322Gt)kT z8L6R&xqOPb;Y{Og<}y?jI{Pi(8E|C9wABCF{~u>9wJ`N$!x8%enEU$w{nf$jEr;m` z4$%*w9UVv$rX;!s&1l0R9L5opPBuTEW&eTe%Ta;KG4>z0M-^E;%YEkBe?Zqzi@G`1 z{-a@}{RczsKM<}tjeY_x$?ng+?X0>L9oRMN!qDkH+0P|}W8`t{$&pXEpI|P}HHKzC z$=rpiJo*Dvq5`M5oyHlQ#W|cuFA}`y}C2{zbEy7 z^$i;NN8`uw;z`2T>pN0K%>Fu@0(cYg*=2G#LVc!J(e^fA6VtGw>X*#y?>?mFVX*>>-{slf2Q{*JJ_qeZ_PRHkJ`iD-@Dg)?}mEsZ#>ZT zls>p|b=1%9zSB5c{F!JUk`%JY?21^(C6_)$KR+lr2BniN33my^gHUpgWow>7`Ob6WKgH(o3b% z`=?s_mm1p6rH1JLuQ~2>7)Q{FJ=P*eZNM?+Xz$v-vG&Nr+O3n^ zx^N1o@z47EGwf$^4t?iu(ifg*?nT0Hl?{}Zy!(H@HskLj+SC);R_tf4I;0<=ACw|o zDh6XHhGQf~I~4WDWrfH@rv?+#$!_N{%qDkl^J_SmLO+kzk?MwI^+GEBANgPYe_heK##o&>10->_nsHRB<5(1 z%M@~Imw6tlhU-@izAID;Q-KU_wZ(UZ1!N`~D*eNp%t3vPz5Y||OGhqy9@?2Z z(26#cPQNRZ&A!VTg}XvIS&=F3IquPH|2y(sQ-DGgp%^77LnWG~+y6e({&%uvu6Q%V zo9WC3vV*<)i2d(4aKip~)G^niL72vQclYI;U0rv}%*qhEHRHF9eouQd~-}(acHEqld_QltqyE7bOZn1a%V2!=x%tz2M!?R8EjKZ{{ zbei`^-}}!JFDmi=|G#!5gihBT!*QIzNp#^9nzV(@h}xJIyx;$4?m%_#KlT5$mvznF z0}afL`ku9?(!*)-oWWU~LqnH6^7ihZXO8~KdvHQ5^fD*B;`uY_{?N8S|8cN$CAjS> zP7f($D)yA5hr#4flvK()!j?~UrY8GH>=#$`51NYU%5i#3p;UGe)sOApOAOJPpV7iC zr88b~XOecA&Xc|CFZRBY|JU<>qVor5x_%aB zV-Dsb+WR+;{QrIa&pmgCZheXF{`T>IZ17IAy;{GVdx_s{=%KFRqMNybIS|7Z0(Ci?~`EiwMomi3)~)2vNluEc(A zP!$TqQGzm5q6YP7MjM*ung2)M|NTxH{~tE~r&DhyJ9>@(#Z{w?JAnUj{;l?Rn)Vmb z`8S8ea~Ma^iH5P-Uv1nm=9YQd-?{o9%qP%cEZ(kdZkwU~ou&W5Ejs_Ei#&xr!k#A2 zpk($teV20WJ9n;Sk<2^@SAk#`a?MIaV&hzg@!YTFeOZIt@OXBo*SK8mr zoQlC1is2ZE(dd3MDU2bbGx5ff<1ij+n1FOl!emTAPhC=&N=`$mG5`DPFUsg-l+8-) zXRboDet)`fGcXggFdK6)7fo}Of8&IM#*i&&Wo}DV_m5ZpQGG)B#{uDLXDk2Os(P{k zjoP5BwnQJ&+`GA)NCzIBO+Qb(?aE(+Hh2MZCR((;2NBKfwbs!6m#GKHEPi|D=XFh?B0(}#-mFTOWbgcNX z&wEE_a4jRF(~w%|F0hFJOBT*v^g)0 z(9Ya}YH3)51BlK_tQ-8!+~#}d|DP7$8JxvAoJTJbe&^kP?hGh08vmz|-MgKQFlcB< zWgd*7h~_?rlOr)2WANVpzs53`*LydNV;+z9&MmGi@veA({reQ-j2XribBrsnztA`X zr#wrV>n0!_lQ0=mFcnS4AkDqT8DxubO5gf_TJPj1xDqznmDR^XnMSeX#lOG43_i9szE<+-u1) z<9$jjO+0J z{}q@ku^-W1hxkcp^KSlY6aEmg?^M2>>v)8@X{7X*J_pB0|I^C<`KVvdpSc6oQ>6bi z>5p2}rFl2;Hjs_3+m)LdI>ocZ+<5f=k7LYx{F9`F`*G&-0{WjkX9O~zMAdovANxcq zbLfB2#jUnT*}q@8x+f_#Oiv7_xSdAd`62U+3#xRu-?~So;7|z6;g#8jG-8go(G*bMUKW8bZ3V!mh9_)kbUd-)CG(5wa2sX^6V4H zzV-L^_oXxMnWjFhxBij2T>ocZ2K@>16jY_DTM(^1i26WNxlO}#%)m^{!ff>D^R#~> z$^3vbHQBp|7$1`JkbworL>6)ot?$ew>-G7f|2yX~=c5488uFGL`J+hwNE2_m{6Ur_ zn}hamlydFjKJ8&;q=QEE@1JO|c$#aYwlUgs6pif**^5w&5;RZh7os_UzWsIdoqhZ3 z=sw@uUl;xRFZxGt^pD&F3EFjijJg_g-<9UT$zA#O{;xDYah>w3|5ql?J;Idmt7I-e z;<oxn+S;S^5e40=kF!&$QH*5q)G?DkK`^JFg)UNsKHu95z~H}0K2M3g=$-1Z#z z{Q4ZJ%;jf2|7l}9=IraV>4&82NzdQq`MLjZ_aFEc`^8&Dw!cpI=i0vgk5|f@%%d>| zW6_i;4Sd6cslEZ(DvjGxTsOh_1E|&?s>zV{!qt*>Gkk-|(%$!NBun#^k3xC0SouK3 zF?sW(JbL_{`KfX4F&=5?xY2%CG98mJ8BNN2^GxN_c)n$}@@Y)phIVwIZ+-#MvM$i%2acn{ukMzY_&4KKfk+x{%M;27v4Yr z@vQzA`u@Lu${PM(s!OzcS@f52d`UVJD}~jXTy8@ftvZXVV8f1=a7eR7)P+joOnse$k54LKGgg- zjxircRs9uVKPr>Wf8zwVljy=J{PXF_EPizWkdgI zeZhF^3(W2Bx2B+K^eFm~{}F!S|8)Ig_NHU>zbEK_>9||anos|Wc66Xx95v!RfLhe0 zSX+Si{;$^;zyI9yJTx*$im)`gN2`1O)H9AXKQPAs&hnjoHFRXyk!rpwennUzOm{2u zbMo|Yd-b)?(L1ev_u2;M21IB4SG;UZoU}Sv!tQ<7@$bm9e;;k!TCr=WIp6p0H#mgV zbH;={>C%6~6=9?BZjC`P9%*m~d&>)OB&#K5tam zwqSTDIzK#YO}IR)J2ft>9eh*BpF1|J!N&80!-lhiLqX1s_O6Yy7Jv?XJssNF4~4~f z*N5za>qAc0jbX{i4~L}-J{&6CcZ=s(KK-V!V(bU}_so8{;g^I}XNQE9CJYF*%$B3Z8_AWY z28F!c3E}7)vGAO!t{)6nz@6yYxfmlJbU*;&Se<$?Qql?Y=7j}gL5#9ivJBq050X{x(R#DC{yFtP)Ru5NY9B|_;Q`^`efozf zo}1f$ToB^F!5?uI`(Md1+kvBB=x7wnN98hu;wUcd}*1kl0oI)!m-qZc>*t{zY*lv=3)waBKcYJ8SXR4>pa_+$d=TrV!vQMI^?6VQS8y0uy*Av`iG_? zJLB;=#&;R!U*{!+#V1F{vgh;*Ip+SCU@5m{#rjm031PYU=oNYW!bXNNk%h+p!~K zNUXT|g0ORny*N4JV!KDH*T>OAa^G98eJ&mxE1NbX7X6>7akTenO^bEjnHKZ-@qLEw z6{x}w`27dE@G6e*zX(mm%7Sm%2u(-)=lpa{<0q4SQ*H;7)ltXghttlW>2>zZ5pC`Hvo1mgd~ZPHRl~Rgjg3 zlfzE#yONW`4)-XYU|qq;D#U;79d-^qFVn^@0lX*hy=ma$bv{2ac67uwvCfOHi5<I-k5f zW_mmRXX*2}JiZE@S6=Vh8)EyoeMEX4-R*gI2!H#BV*ioEe)~lEM|>Agh;=@CSL~?7 zY)>~Ac;?2~FZj2pQ~ODy=Jcy#KM{vtL&rVP7%%%GhI`KR4~B*jLD} zVIlW#lk-8<6~92bWVvsA7J~9`AJL+a6b5k5zF7Gm;=Hj}`Wp|&!m=Cvn`L)$SRS4V z7xG`Z=7R76@-lo7P3n^T=bjF0@BeOC_rQX16}O|-ux)rFB{ZqX)_gA|T+eMS`_aFs z!|S-yIes__H$u77FPIpa>AjouKkj1w7^dJ+l&# ztkj9IMF|sQi<2kDvWHKM<&2ydTQYcJZ0XR6v1MZ?#&X9^j4dBMF}7mN#MsIS@?dr> ztQtQtwmOZU`14O+6mp{b9rX^o3N7uJNAByB9{!;vJ9Pscc7KZq(k3 ztC+bYvNP|Np-ZMa*KVfs8@ZL1O^lUQPK-tUyh`?_H1qjU*awZ@>pjbjiQ(uF`aN#7 z)_2rd-*Ge;U!++7f$0C?O)AT#4ELp@XdQb|I3aAi!XL%4^wfpn=&Ryamp3&>ZN2(+ zw{jW3D%SaoGPhniPZ}HRd|jFOvT$@OO^37z$1e=au(8HJE`5*s=-Iqmb0pc@$V1pZ z=Z@G(vMX|r(&0JpEG-VRABo&fS?^08d)M4_+Oy*5(OY7tBDXQjuig@C8XdxE_A`;2 zG>yJ(l-7+?Q;p?PL%qf`((Oe5i#{t%&2VdD#?rx^Q@*X!X`5lrKW&h?;M8ys&1gcr zZ9vG<7g#hjDJ(uRK>v&W_M~-D#|MO^o$L?F|N0ioowvOrwSQPSxPQoVUiYf{Yh$ZV z4G3%I^w+oS@1MH;!@9ZZt80=&!N~q$Lyl`lBU}JF7mLZazMzRl5G7~ za#%MjIjlzk^M={UVdLQBuqiEi|KzZl+>)9cwhpxxPTRMQ+@9k7%ThvduXggBb~3^H zYa2_1+e7YkT`5_{zK<*)u3gPd@&3{I%>D)3n-Z!MQbJ9p^OZBS)2^?ZlVa_pc3SvG z*Ef;PU1MSgxwWA6*qB&bdP-k&0O+8zi>%{ej!$< zbFL(>!Ls|4f?;R;M(9$;Z-pUO{C7Ao$Xrh77hYq20}VP?js6{Uj`^=R$lS7(-X)vf zMW^r|+_w8y`mUo%AxS(bSR9hVg=CKTl#(lx!llg19!LsQeR11E)%CvlIQ=i3$jZ^? zaK`ovzvVVkyK*n8>2p3ueg~`Yg6m#KvTLt^Zbke)JdCfQ)w?Y^c|&aRsT*SPUghBY z4Y9uO?%QW~4KNOp@1^$&EJH3<<)nm6*RJeR4)RjM+U6T#>n2?rTT^&LEdTJpu)(<> z8#Atrtv{UV+|g@do7uOVxIVUtEL_kpta0t1v9)(#*x~(lqZFoj;?=MuEZ&4xd=Fdw zr@nUR!caOmB|OLeJdWb0SoF(h!_UcI;w8L{SMVxy_2X~gP5cFK!*C&4##w&Siw%XEL09J3s6xm>>4G&9^>yekeQhOxSnmnNU7D)0&@5 z^JbZ0|Io}(m6{o^7Y-OOi0{nTee19RhoXO-b+L-X`!)j7|G7P6IJ zTiUbw9M6W1asF$rEwAt#zap#1y15JO{jxqwTb6RQbSQo#);9jBa1rwhH3P8`7Z@mu^3f4~(thIoAX!jLt4VOZq8i!&A)|1JzU zMGM1{+=XFj*21tXXJN?ATNsv;E6ywoE6*(qdA$q6s?!U@YOE>rk9MprTj)RF3&Z;6 zg`of&m^We*zrtzHhRwsD4O_4^_1RE#?%A-d_t~(W+`(Qv^x3d;q_RNnW-poWY}hmQ z*{~O-t|^=SY}hyVS?eR7wLaomWB+Hve)cM|+I2N!o(%`aJsWDtx^(GJHq7`=Xgr<~ zntB(6=F=JB;Q5Tua$-SfWp3l%ek>z&oXAip=7+5G`C-wN`C;+o`5}AK{E#zsepqt; z$*?qYzVVN_I`-u==KBx&cf(5I@`}D2R@KmVjQnm`lm9QV{IT=H+N}9uUFZC;{>1!H zaC*LSFh6WOHY2vFcz!6H{khoYv-882sh^8&JvKiS&H8TGmh~^O?Z@XE|73>ZywAmU zrac_nl`=o<9z8#ljFbOzKNs6u__ z_&&ZkPX7=G@bCCBeum#7`NLit=_s5tAj~A^U@qn%7kOyLqPM>r7Qg!4aG1FhOTPJZ zSc>D!r|>4;#&tKT7jY|Y$33_oYwvj`JVHK+eEqoB$#AoBh-@rF`OlvTrLR5{%3gjZ zY-QhpQdDDS^)sP~d=7ub(R-%EI>VIM(YHTo-0`W{0A=C|Y{6bM;s^K%Uc`aYXF@H0 z&HNj51KSD*g&S@Q@mp{Q;+6K_QwS_FFj`Dzv*#SQ z_U5F%F}duFv-i#=hULA9VFkI8J&*e;{;SC~!spY)uf;m7_wSa1=@aERhGZ#tjW-mE@q3?g8?{D4f8S|s(%#Ze($0aM+ zE4lCIUqx06UsG>>)AhAv9ee$B^P4lxZ<0-O&5vf7AI&sBO184Mac}3}LAK6G3T+ul z)-)xB4zg)-QfQu<6b_Ot3zDoCO|o7zDKwCc!c>n;3N@)o;Xrbd^`uFmVmkfh%%rfN ztP-Y_EMwnCmWyY1zWyBcU@vx{7(1~GTc_y{VjH$&6AG~zTd;nx{vQAnjth`NC z-pFi8B3z15xEeR$CftHM5LaJh)zkl} zKNgeO>^aA+yFY2&J-O_RvEDiQzupA-KSBOCugZND|JCFg;q&Vg!dlm_BiFN=G7tBP z`(AyPZ)uC>}{ZV0QIt`DnQ$ND$Llc9Tue{VI3+xW9h-0e>~ z7ca#cF=NC<->{~ZTcKy_*7@ssbzbQCCi!#sUm3YsKf}L!gtNXPd8IQkBKd%|0A$ZA z#@3O{P7Eg^`AchRBdKHEMRq@LKg&zj?d?bk-LEEwd!zhPY7IaXW_MC}F?bGfosPol z-}i{W=g#O%Jm&kX3*deh=Wrf9k0gcONPa);;rNYhA2C{FPBhET@;TLT()?Lp{zS;ehYre|dtW9JtG$yUej)j{%XB21O^LeKG z*bDQtjceY0-m}`gYT?QD2kC6NM|(I5bLc9J@tTf-nG}Tk8tlHt~tW(0oP!&xt47w=(Vw^m4Eb1 z=a^rjS6;?n*lp%qwjblx>k9P&#qq;*`xBhDQ&d(YP?f8?^geIqB3sJ=KY>Myj%XISJjAj!d2eG{XX~i&e8X5-zv|Im*3+0 zR;8XpT#F1cyLZC?vilYJ`j~hO9t%$iAElu((epj;rY?!z@u?_12br_J7}p7JoYrl? z+C4&;aqonQo^g@1+?LMIa~0BWEq7ge^!{&S$2)Ou>-8RI-wCrJ`X%oa7sM&+Ugj@@Qfx^v;Xajbr%Vj5yY# zJG$SMcu=`ze=c%+ITi+8r<{v_Tc&%yz3wElkc|&Z^&17j_At|9i5{*QpZudT6y=r0+@f^v%RgzJSv*m@ zwK@v>O=+SI-6s8`ZxN;cKZa>*hr3_($Z+jt+I;34$eS>b`4;jv+<~5^Sh$#5;`%IkDK5t- zJdU1=pGvGHyIz`_*hqFivLLaA?7$E3L%jcg&lB2zWXnrA(HwO`Qdl}NIV>AP|CvVr zPnWiWUT-BmTi)#Cu!?!Lytbz7f7AX?H|ISwQU5nl|JS$4NYww82j(S)X#Zyswqg6! z#IU2@+JZLcO_IB;XWMwKhxa3C@Dsd%7x8QS25-n)-Q@4_ z8hWs3Yb^YUd<%cWKd|-OM?+EXM?<1-kT0K9ar+bw&ORPK6PdYxCi*^j7;oGk3!f*y zg#0zJ@L1$7+~efe@pP&X?fNh;{Va9~bN5649=nvh9HVeG%ITB-b)5bt`9suVmvLPS*|YoQ z*e zx_&*^Y>egi?-wp)R`lYRK{XKn5N^arpc;zbh5PVHJcxh6t%>H~(G`Eny{?FL5BXZG z>*7mdT{|v{srs#3yfD^tg)?{Rwf6KTaES-BnST@~%aP&`>R~pY0(?1{&tDuUEf7o+; zpIqzTp6mSQbG`Pvz@+eoUi+`fP3(oo`-RQphS0m9a{`G+nDYtCK})*ZhftUYNzM7lmcHuBqmT>Y74Sb^op!%D2iDr6y2P%kDI zAqUx5iX~_+QAW{%gLrRUdIxhmYEX?@968WVjy{C8o`M&WONTAz7R zc$4|9$bNKrV$YRJ6XW&6Le`OC=KltU#p4E=_qfo!hxPPW+B+;PJ86x3;bmd@>C3~4 zaU;XZ!MyMuO8=MY%v*K# z26gsEa+5mz{q;{77tsG%|CDCjmTufe?&x&R`El!?)UCV7-DmZGum^ildQ2U6{@s0C z#zz&#M3vazssD3W-Prcd8KVdEYoaqo>-2M?Ge#TOqccXEGWCDRgOk*;nbt?owqDRP zL}#dWOcVD|c})H&mp}KRQeLeZLoJbAT$5O3-PYnmidj56X8`&GkyM+5RW}yk+ z$42bMFOYRARRxf${cuJJa*&Os_VF(nH%!?|4V%Qdh1`syDXC#AxqV)$aYJgzx0hoL z|8?ZrTyv7-`odKIL`V%A*ncN~e8_#);b(XS7s{iz;C}p>J&+&3DBOmJ@I^d>2k{L& zi6vNz%_zew*oh`o;s^K@UPZra)yEi(>DYubky$=}43FdM__MILa=#Cs##qe29AslY zw&HpG1Qj@eZXCoF%F@L+%X|a*7cxdpz{l|*K8G*kDdb=!cB33kIEG*24ElMdAsB_R zn21T3fgh+7ewQSUWOK5(gOKIo+1P?^;SAs7LxsH#ci_{Qg(tBTn-Rou5w5^E z+=;LBr$2>mN&FxE)LALwyZw^zQF0vjn{g|y;dfda^rAVV-)VE6C;yvv>U!aNUb!Ts zkiV6+ekSY^j2GUtfBbFnrm}CuIAI3icf$P<|H*ADh6Og%8sAEoQ{1n| zc6=VgT>n?@uQFeXop=qG;V4SbgDdb8l;N)ku3I7AJmwi>BJSaKA0EI2T#mns`z>K^ zX1)l2U_Z{Dk4If|3%3v8zjOa3xAl08do}qLviqJ7(EnUQ=Yn6Ne<~daw+eju1IGHP z#<{qa`4I@;^K(3pD=!JJi^K1E;)BIE1ixhdIj&*u$M0*Tr4jK3?Y3~g^%Fh{S5m(cm(lFJS*sq zdm=Nxp8IeOhT-*#W$HUamc8nWdhZO2$!zwVq3L1C$n>z3T;~7jxoPRfzv*EGxspAP z`zrpc$u)ER`#mE)tj$ah>&W%&1>86A-$-r}zA!C4Y^=M#jMAHQ4Bj6x& z3tIWLp`Ez{@$=pvi>9Q9#mJtK9&)gRc`24H@cprzc?DMT%fl+>)mSsZ_eWp*KlK9t z*)c|%sr@H6vTw@J{%dzPlUvxgaxdb)joi+@qh9;pru`>(b!z{Q+dFmgf)MSY+RI*g z&i?3L?LS%0UctSR|9-NHy}I81CfE1vFQ00E({#Ggnb!N0jqFVs##ouwK$9)(t=!xA zx04<06~0|%iS(?Lo@6O|*=*@OS9+7Xee06RzQbgX>HMuK?NPZ2_MH}_%8DC zqV|niJ)R=$RY=4A_&oj%Sy+$1!ZNt{FkFj|VlqCBhw&)ppa9z*#tl;mhi zp(8}q0C5a7F68zZ{B97P6FEw~ztq_4@7ljA@^NFg9~t-kyRq2$aoX9N)57)SIlN+R z#&3+-wo|S=k!GLcHDkk8V}v4X!*=X7hu)1##dU)*;bLv-MdED1XI=XQzlS4ti7Q`6U7x5 zb~XQB^Zy6`Ev~7>i{kl*u(jg<3GOg%tu!!yn!H8apA}{W|IZ6oHk59_``+wc)nR0) zN%DWvFf7y#8XoFiy)4v+S3<+EQ=xIh??Y2~IW%9>6%Jnc+t3nT53RSJ32nFjclz<3 z&=Fn@uZh20{2ji_MV@E(gZl5z0oe11`TNj6ls;;G<(h$E-&co*@^4%kD$He8J~=q- zf93|Yi~Cb(jMBmTH!}Z9TKocgU3(k**S%W@ z|L=%rg!Flwd{Np@6W22Dc(>>3z~@~5Dc5}5wcp_0C`|m6@y{9KpK&*Z#a+ff$iWgU z?YhbM_ok3b@49?$wErn)|C4_rU={P~F|n{_p1D4(#X77XYz_b$uo0Wa)16~8wqWaE zb4%EU?bvbF`e*D!w3mPPV0#y_2YbDKbFA7GEh>aC~W8LPLSx7U)rDc4#a@*sOjnl&TX zi#;gD4(!5CRGy;`LlyQTT1!-lXf09nZ>+z`Yn$YmQS#A#`D3#@I8Yw>wmkJI`RIrI z|AZTbxq;gQWQ)8sP##)^pW!k716`lU|6$jBU0(c{xc-c5_bPHV`qtN_^vG^|Ro~Qo69_&TwG-Gt^%NP{Og{_!s{-fTvZ1YXY>P~s)xV&;w zo+0bl>(9wcz48><#NN#PApaJ!mA$Q={>SwlZ5M^lNRzMQY3aHJ-jmcMI3!JAQ$C1wQWjAMyV?|KEu-N0^_u$5q^)Kw=cP zIDdr4#o5obm*DrV`I_*b=l=)tMe=E3HhRvV@Y`5KSB?U#$2zP<{+Wr^w$hO!n!_$4 zw`TPVTkH$i%)bzuuynMwMZ>Kz8f>jm{WW26vbk<#MT11F?xL&bpT~bCxgx>ZBr=zM z8QIu7HZ&CYW;wncYMBopI(lDj|1^ zdl$Kry_noF%Nc^6VLvK8Qw7SI_o1x$8fTwalP>MMqwg#YZ<97JNV^w&m$z`ObgJU^ z?I0-9e+-Y~N4SIAi?~Z%|BCteh-bPB&*CHQ^=bSI z)?yp7Jl`hI^F2I|{8xsBm&ui$DW0bMr)&Sn#mZK;GL|z_`6rjMFFSHW$W;!PAG;x} z$WZ?0D*yaiUHb>D8Iu?umv(DMCx#oO!6#5KLHQr6{A1H_|Q_i4);<;((YVbQ42j^_O6%zAAld9cu4@#*%AqY(|L zK{aY|0F@)1iI2xU8lRA#KZD1x0>8#Z z^5l0gO1``eU&j-uMI$bfA78^w^5-Y8M&8=<2l?E!9qP*ekVltC`4r#6HhfrK{UolC zKR=95Vh_%u7sKV*u^1+H*(_>hNtG!!K zB;~IsumV+x{^gQ|=uFwg$ZnQqSi-y%%lPFU9}$*IzZK+4{f<1W!fLF^H%B;bgtJsz{j1mhwaNcv1$*E5V1@EOs+~Jsb4LC@uI|)ltwVJ7RRbE)bWB}Y zBL8PPL!(T&_08KxUmDtLqVsbS!VA*iMZAT6(&%zr8s!800Dr;_zT@X{w>v0LXKPX+%gL62IKjL>7eyKJJe`7A{ydeCA+)ktE7o9XGPDcL= zn903en|u}7#NLd9DcWY#opfIB80Q6!>lZ%GznZ<~u=9M)`S9B8b_bRUV=Ro6??H`G<{RT$HLAD_d5HWqN3RI;9hYrE-^RXc0u?w^O7=i^jKDC z{lO`7`<^?R<6qhP&K&z>=Om9!$gMuuwk!y)tmy#dIpT z4p(CwK7!$RSh(!17lzk`JBws)SKy0y3{M~j>rsf^a3)>+7)~sEFm{f-cCfJ_ZpFR$ zZ+IAA!XlKxezy2={0hIp@DI2Kx8p(l2TUcz{Zc%B7pCD~@gx>t2dMDlZ7_8cKaG2b z(5GQ4W?(kHhHv3nY(f!EeBr@Z3Aqnds6#W_aSYpy%eQqJ=O3CG+dAAlVzPO}k;4Ew5yn4+kKmK|48DL&?7%_%JAQ;0@Fp(w zT^t(|pNPBBVP8i30(&wt?8TU8kH#E(HKIKmv+Uhqj`nagOrs~5VQ+^$8?`vV9Gz)b z&97>Vy&$8F^T*m7GR_{6RC`5I-Z@jRlwP7wFEHMol5~243G@QO?sQF`o`8E?m@L;U zA{U>#C}h)NzEX6Y9VmVe|CGxOJ_|=|$4VmxywdA^ybSY#3`v!8Odu}2N**BA0 z_-`eP*te0}d+933V)mWnF8;g8681ghUe`zOP{zKGEazWARHu;H`%-dQraFLJ&c1?N z$v=-=#lD(c4L_Si(R*YlOm z@Q)7%&Gh}p+%s3?J9|*ww!bJfR0&gEW~_(<=ghIUssH%bvcpCu;Cxynr{A-DG8T5wq@gd<6=y9c8G+-_#>i zobds88gVSgYj^`A#W4YQ;eI@B0DmcnOKz`r`@VoWWHZb(6fg^ODT_B$@X~3M;23g}fTNg5z`sv+V=Sq%+VqZ&+a6-L!#W{UJI9 z=FM~Mf7VWKvWBAYEL{To_UQwy0~r{KW?NHHV{HX?Xy1#At@}A?ZAGUw7A4kNq#3K* zQ(ewpdTyX~umi(>`>3l>fl3^hs~<3RpmQzwmT5t4vC)#+u>DAs=f; z+y9UCD8Pm@+W#5ae{Fc-$#>8H$#{4Di*|h5VeS9C0q^ujN}PW%PWxZu-!$6t68GHW z+PxzOgi?KhGJS!4Wcdu|A50k#DlO>hJ7*_F``@MgU!eUTZm$69xYeU!g845rF*hUn zhj0s8nfuNMJg@(c@5=+v%R_zt4WIJ=@bxb6aaH%d@6WsVG*0YO3?fbVy0IXU;-&*$Ec^j-h8*IuvxT6^uiUV9JiB(zF#i0q)>u+_Xr zxk@8zXjjPafmIs#knareU4(w}5c+_89A+Nw2<>tedk-IDPW2FTtI$!#cMFh#eaz7u zpj{*VchJu9@8h@?>A`=qoBKc&_XT8=i~B^G)Bv>ySE>%G_w(KxRPLc%p&WY!lzu?J zoAbrU5-3{1zHFrZo>rQ18+{$~-c}AWcY=OB(#5d@>EwJSGK=GP@4cX7=-&t)n1}Ol zE$t-(9)~A^<%-NaXnV~`6kKnyYn^Ea>s{s9ia zBXAtH5SJU71ziw=+i-gT{sml61VMNars3b=5AZ&GjkfKC0$9s^;&0)v;CCGV9r9*y zKt9yLL3kSc&;u;dWbWsl@^$zCyO6&@F2FU|H^DIc0NqBQAF9?}V9nzKb3GU6?_E&Y z`U@(Bid7f*?%V~|`Y-U!iwmsvUr;mDK`rhL$a?VOuWii*=J75lu;PL|oNGOQfoI_b zd2tKwy1<^V3+ls9FLZ9XpbqHXctKsr@R17|f{|x0Xc!qfdVw{O3+x5BpaDp3y`Thn z8zr6;L^CcZviAae0WK&;yqU8fDGMCuKavx&-~ULi4?j}Qdmkwm@{WDP`2Qmnoc)OL z|3}QVd?fb{#(%rG?vV48!C5H3gXiDFJpYhY`*{8#Yo7oAc>Zy&89yz^)+FtK_ZYm# z_(qzW`ESPj{wBu%$aao9qKyBcE1&T{^wcr_hhFS`(EkDRUoe1u5QbKK#Pk0n4Ks%@ z0ujdfQHVht5}nI?j8nv6exv)7T$6tRRq!YjL)j&Nu3cPvuR$eznQLn$*T8OQp#DoB z38NgBac%uM_E+%p681N68-$OrkHHhf*9$+y?H@V*E@gE)alXSbpApRb5pH+kb}N+P zw#d0Z;D3m7{FGScyBz-xzK`3-kpb3}FW<~_4q6$$P4dhQBSRdAVRR$&udu-M^S>}x z;D9@E55ni*R@e+z!{34Z{WtDYj-%Wc_H&=`|3R+0KPcxg_l?I^=z9EYhnwL|;(QxU zkS6anW_tgOemT5_+ewa3L6W>2MXo_FUQx-+1#V}VmvP*H{2Qbn`B`Ki@+M>y`8nh% zp7-CR9Gbxc8R+~E5P(MhgW}?ow|7n zYTagh8&WRzH#vU}mf#&Yj$WlF7#~A9R6u2Cx%H>j*3Zy2 zJRk2EpnL}SJ`jpWqx5@Nv%uX6**r4~N&9KY&0ze$mht}w{VI6BUxn}W^ZswY-0!fa zF?aF%2lQ#mpq%s9lfTOR3aje;@|ucSTdAdwRCj7n^=AjwfW5Ko;`;~4mc5Msqm2K- zyK!)N|4Z5O9{s?c0c|I~jx^7I`Vw8p?hnZyWt0o0z~^T&`>S~VH=!T0fMfG*<^RF; zKg@+V66X0&{n+2b#7`VQ367ITD4*vkAEcwqXE`C0O^aDgJ}W2cvvS=&D;r#4pH)!h zvkEz%7xG#8bv~<@{(gy{_nZe_o!_b@6GSvyGLuxx2gFK63_Qe z=v~S81##JY-ql8Nk zR}!-S!0(N2;(1M(S;!CILny0cEuIf*w)~WL>K^5rRiFQuW?yc#UjDg@Z*RD)|B3(i zmw1lWzNY^|HvC_&=>jrz<5^vDp2tdMt2NO0d%ktZcWlgYcB_?pEo-H`(DUCpE?Uhv zYRJ;N;ApjUId0~+naX)5s)qOcqP#Ph%}YY>eu44TPc5zI{9l2;=rwIZx_KGn-K$vt zKSY=a>-Sxnd{?gsoEfX-;Jaa2J6Ch#`-F10tX9s()pBiJ%@|>|8V;?d zkH4C}+G_f`tJU&6-}5`l_xxC|ZG?(Ft5tr-YQB-Rn!OUtCmbM;`(giPu6PnfUj(|yj6Tdzizc!vsSC!#oiL6-@RIG5XU`+pJ)@m z#Q>wTdDgr9K12BbV6Ovy2Z?JSxLP64YW45xy?Anvb%-e8s3*SyS~@h2(W5<Y*B{jHr8aJ$73luCsN8TOUc!gQjR~R zTz5)&!7?j9ol?OySP6lVNiZdJV3VpX0>sS5wq zb1C-!m0Pusm0NYsms|DG6lK8o-#%piql$0F{Uz)F?=b&y1@j+#AI^K^WAgEF&bBQd zyEy;xn%bFX`TzZW1kU&K&x&%UM+h@5{-QHXu;n&Oba zo`h8NHH|{%-q)1{j)z{C6SA?pj=ZXz=U-JWGH>6j%0KX`3XZ(aIQVt;KfKQVhu7Kv z@H*q*SK0sgD*GQ_Rr&O*s({MbS5*bo*lVEngIC!z^D6JYzsflHRW;7NuBOAx|LkD? zXAb>O%rgIRhWU>(@|``Swuv+HA3MYQpJ&wm;Tih6XV{N;hTniY!yM=t_24In-(KiD zdWP@!o>2$})}3MecZTub8LsU&cz5_ft zH)`9!81{-c*~|JS?fp&K`k{RtEQxo8VTs z3l2ad;TsOW$^GC>eTU;-+3vD=%f-1{bDw>MRO=uQ2UbNSE%y=9E$>lo8# zG1kwQx|$g4GZ)Z-TRG`^ZH@4+*`Z6edfON&24<}R`3y@7SuAC zAP;JQ>84D63O3URpXT{+4EbxQ`oelu-p;qW@A?$qdH5@pUCTE=?`JIVb=GIu-`4c) z%hdSj$aJ3cK8B=@yF!O))Rww!EdN`zRf3?-Krojqk#MFzw zzLhcFt*N#`FH~Ct$iek74egKc`!X>NPYi41L`)IViJll%3>iNbQ(`uzB<`u{81tXx zVO5RQOuw$l6K8FskHRpE}S99b5PtMvW2 zO18#TT-9K?4~_6{+;J6tFrot7@{f(McRsG%RdMB{kIRM3c1P)B#^u-&S5^iuCGLuA z^qLW+K8&*`VMGaJ{DZh+R~%Oq8F@ac5#%uGgy-TKdMC~pFRlUd6>5sBzbdXi+$HkvHb7USZzq(-nhJX#N~NDuGT&ER%TtBvbN+| zj?OkY*L8@`on*R_ZOVD4Pq|m*S$Rz^E8o+m0{j(LwW(-h9{s*Ns~B1GPMbTBj{-HKC9B5NxMw^;;wXycorj{*jYCRZ` zC*8*W_gCaYw#~K4zoJ6{Wc!W|bzJj`I$a&=I+|^DztE>cdJe_AH_PMO&vu(@+wrTW^Hf65O zx3X5`TMnf2te@v!yIkx2x}9>!UF%m~hM&GsrwWivD`)blm`peB#jE@(S?Oo(%eebh zzRu74Z+_PQ+I0_ps%KBAW{01-4nO_gPSqnDCOX;wazgiVzPZz7wcyse*Ux@(zr4ib z%kZlW>0ei11s?LNy(!=7*y%_Ae5;G}x}%-U|GBIn`Rm=+%l@}~tN;0IE40VY?;rb_ z^YClvg?uY~BHtQDj*zd&!FJaF{Nk3B8K3hj0m&CSl|p_Ux6Eq-%Gwx^V`o6l9RX$U z59l9o&)FN$%lG73-^TtZ6m-7I{C_}2djfKMx>SrTsp?WG&$aJzzPv8Ssvw@qx`3)q z1yp?^pqjY={r&*oRmr#NI|Do$3arNS0W}>cu$pHBYB|)U)}uL=XKO&-=Y#UO3aqxR zxt4!zfW0ySwQmWiBUE5@dJ3#AWcTv{^-%A@WI(sl&N;)o(4d>UI@cwaDBL+m&;SYi7D#*{*i_k?nH0 za;>bR-3;hYs^myFdj;C%<{B+}zg>mT7Fq?Qm(Mkt_fES;VE90;74Gb2?S?)KX${~O zTGOq5V51a^P@a#xgNSZ+tuZ7r@xJUxMh~LDl5Ola!j}K{!5XSo!=_gjv~f^t=zkMnE%eS z^4+b>e|4*HOOaLN&a&L0Ru$u~#MP?O{Y6&Uo+7IpS<%$0$`fAt_N~l+^)PSSs@i8; zRky!K^~i>Ot-Sx+swUi<_qHSg6U#T70aiUe7 z89nMkcCYMVF7Q_hR`sa&P^Q&~TmPz7=093Bu&0&%&#f9lh94@ihS#;y|0%K}M_U!0 zD6(Rt7yqD#`R`0CN&Zrl@96tQR%SBO%F6P{5%tI!_E2ta%eB^R{Y>0 zFZ=(zDx%EY2fQk#?_7c`#c$b(UX>#&(!KP5UsKhQUfwNrST(rSc6wEp_Nso)tA-O^ z`oCWG|GBN^sF$%~vDLcHi~hxym-Kvfy=pt;u>8Bc3LNfa|5LHmvEFHQZuF`Pf8AGj z)#E9)f}vuo7uiR?`l;{GF)#a{digCmFZVw$&&pot+A1m$8pm z$)r~)@-^xw&sX%R-0xG_H6>PQm5*NF zhkbH$?G@oZi2K0G*A+tcQ;#D%`!tMucugPAdmnukpJGRxRy4m)ku9&YXVJ$zHg&ku zr^FGTe4O{r_A!R=sdXRM%zHisaP#y0ZbNp@`qXvG$J&`s9TPsT86WpfpMvPulk_P< z8;PcAAMXX(Qy)}udhb-q{K z(8IMH^b}dWt|F^1S!A`o&`aKWljt1sUT8ReL0;x@U{5`U6E) z!{H*U@la4r(7d)+El~PA_m?}&{UylQA*jloy{dxhb-mP8ukv0A%&R0s!o zMptrQE8ZGZ$u+cZ?ps+$u8_UI@8}iE{vctwRz|Iy4`NpCs;G4s|F!sk4%vugNS=8& z^N)o{rYkd3$X_xy`3mwckRPK@UW5E=B-4ADpGM|F)H=8o@SgBxyB@KyL4JPf7G zv&<3K6<0Agx`p-I>lpj|n0E`sZv$V;Jo{G0Q8yFx@0drvficWB_DSIW?d!R>X!`HyE=d_%`UKXf&l!eUc z>{agb+{civz1+8sv2SnZIQOyB^vStzd3bLDig0(oz#K%hPtD;zHM#rL$bF?@oztqH zqt6=ZQ*FA$s@dUFwX086tDIIP{wgw@R{8sV%==_orNKUxeAuVrV|{W@_c1Tj$2T|o zRNyMH@{iKqPn}WjH1`Ago~{pl%0@cZald%~jIxlKw24tR7^FHssifzVO1zM=;^9v! zmLIjESy3y(Jk7}A9t|h?O^5tXY6!oB_#Hro?zmF@LFVvgqgL-R_C>Mbt_Ru8M!c@| zJv=k6RL2^A8zGN+x+}}_ue_4IC08=n+9&TjPRsM5(`rR#u8LS$yVlCVoT>A}q?P@C zhg?^rteh=tmHQrj6c$DEp@4bR!V^iWC?jIIccrZ2&{~y1DRZx7p`=w#Jyx7u%kz(U zUlu#7_eZRnVA85Rn6m21l2-kph}D2>^jyL^@1<%Eb*m-+Qs(rS_kI3Sd6B-Vl+}jx zAG%b5rnSuVtyM>IE&boMe4n3r;Dg=jVIUq1rmS9n%Id3ItA2OX3ca&d1L?II{BSM( zKl=Z3Yc>1<^S~KND?+_QAvPDW;%vf7%rfu0GHIpOcWCs(l$E*bQf1LUb&wY)WN*Ag zuJZ{iXI+%vzZkXhIxkf|6s)~Og|i8(Xicx&{!7{W#y$w#OZP^tvT(vG-#cnmL=#r! z{-{;enXsz!nFkN9<@wj8y0W#ZKXi#2SY&G?T+`uEtC{1LthH*TEIduf-w+X0CJ8>PU`SomrQvD;=@A@!NynAhNg2&-&k`tp8ua9=L=xFvtBFhPWQW z?_8?k_b$=M)`S&V*{NuH)QX+DRB^^RiDxfW5>nHbXmmxw%3KlS_dgPrBkOWGA^RHM z<(M0#{)5c_T~51U-$&?j6>N-Ig=a^p|Agg!Vbm(#8M8`|rDd$|gfCNhR;MbWm#K0; z`#yq~QUB}%xg%lKKFj(?-DS)PUC#G^FK6G!<@EnAS2OXpggTj*;QKF43CmlRuza1D ztL<#u^0Uz`5WZaPEOd9|2h>@1Idh-9yRm1~>Z#-T0==P_)yIN(KWiJe{x-Y^X~@iv200tEh(+Nvq|(l-0N*ZZ&Z| z)bD4XCJXSjTY6N-Vth4wCTdQktV+*is)F*2xK*($X_c-*Z`y0|3zw+`ijMHRX!O+! z!t|35B(1z>SvTT3%{dgeav#1-*?W5A0{c5xbzFD*+nIN~jJDsSQAl*ME<;<1F&>GN zPJ}XzQjWv##H(=VYH3?W*6yy3ey6jQx3!GJP3s|1t$nUB(_s)`_m6 z+<16(P9&`W51#h*ll#$NN`X6C>*D~0_^ zXgGS6eu1nj+pOqs*)R3NR{cB2P1pR2{A}hsTzn^r=YwbcW_=vlp0QbNo~!gp>|Oa+ zu}_zEu;^9lOkc$wfX(U)UZoz~gD`;GSMb+={wl5m(%-jPBhPQv(ALeY^KDk_{j2Er zU!||%C%TqA?b)iIU{4&_svKnInk~wz+C=|glboJST>o3-f*kC*(M^p1x6uFHqH6pW zuGqpjY771UEh=_x;{DGpDm}PGWywt{$4|uvn^fuFq^e_^R6VhY`L9i?J+X=Fe-q#T z+oFbNx2TzTyzoCLFCWk4VeFnOHpzReX|iT0xF`n?9$@ zNWXtiCH?MI8YWn}x^#7~r|5wxhucqBq)Bms5 z1m9il+;_FQHeSu1lB?ANLFn~wV1Dsx{WWQL!W-nx-=Iy{+g4qzPa|)F_PP!Fdt@}a zL0>|~_g}5=A)kV;!8f3P&(+M;ZD4)=Y7KKd)$~iQR)Obg z6+#iXp?J#%#@AOXGkCSKc5IMi74Oup*r0zQ4p;uw%+GF6?z#<(uQxDPxk2@3uV#F8 zwHlwlPEA>_tNGA%%&%O>{M^;-$=RTC(x`Z71N&1q@cT3yRQ=cn*5Wp>;JJ(S?_F~2 z-7P0%KeSse$ibcqc}I84L7F-66DWoP@WLOsCQH}v=KYu5>bUC)rEq%@UV=Aa1^ryF zSi^gi?|gli_dK7J0?c{0|A4*-?}qz-`U$Q0SFR;kgWt7q1+0Sg&`!JWhAr5)!YA;z z7r74}f=f9*hB?W{G|;=gVs>)`RF^f?V$hj z&M5^Ro>KdPDRrDZrB34R+C%?m&zO4dSgGLADfa)f4qkN$-#_E|4+DHKcrbd2hB_}% z7=|C>`M>WH_Bvg{y7wi_e_o{^X(XdGt^Jl6TI~A4;I~$S)Z?qd&S-9AfYPQQiT7YV0*o`w;H{ zK>Zfp0buWd+56x09^b3rJ%AQyUBNp5-~}JFfgb|U4js@5UGS&h?_SB?|26b|5B^;J zq!ogJRlNTWL+j`(&!#oZ`v)WViM*d?&lm5%Lmd9!@4vVBcdHqis&=ap8VZ)Y|5SBwH+`Djs+?P7|ND8Bhq>nW?N;d>^E$zGGz({84o0}9uHt%&BQN2a z{1~i;-}2nM4)?_!j9Cu;URkSlbHVS{=Q%fu-~F81jGy>pyA^}zk=@Mi?bgV_c@2Bs z<(|G}C#W zw*s_h|Nh;~E$xgI@}A#N$W}E z58$Wp44lNR5_u1Pe~z>=xW6EOgZmkH9lGdG^umAR+`n@!PC4K9S?&QGUk9It#|VE8 zzhA_D6MPFY^VtUqjym=LKsI(4sf)FQ>i88+b z04W%SS~u@{(4OjPD-FoT^|Vi9Gsi8+(!=N3yTP~P?%*485A*G~5Z{ms^X)kHKhz-e z)~;cFWDVn^HM}FVMnxHX^8t#dkE>+I8g9hAUjYtqLN@1IkOR38IZyr|_5t~W1okAP z){#Hxe`r#n&g1m+&<{O_kl}auz6Ek*mixnz)9QSW@`3JMlj`AIkTU3{jQWtRgz-#p ze>ukeW#y#W2;+yqH02AKT%S*KT|B|H@MU-oegyvvKZ8Q}0p}}_zvbF#K_2DUhx`wY z`;c+?CHxESSAY|K%W)p^cgPFyAMgqKd!K^Ma6N1XH-4@}-VI-c|C2bsg)GPYHssHs z274nojU7thclc{Sc0)T1!t8n4G17zI?j6DW7W4{xAPh58+ zFCZU5J`T^p&*0BKOPa`0{0}0>k=MX+?9<2@I0=9LS@eUm*x!J+VG;gZO_khj5oa5KlB;oMgE9QMpLzfqQ1?`L166Pe9EN!JnD|MRqe z+GSqGZrd~09z3i1 z_h|pnc!Ks1_IK(}(f+s6{&&#+cd<^7Y~$E}fPM4i$LyQ$ILtRoj8|o}zu7rF|irnGb1!MreY1 zXnC+NqrQGtF>R>sfihma3QKoVk|+y@}??3AL&VK;ivj)#z87x%d!_W_Rk z_r0l*pLULpeb7T255B;C0G+zgtz!%O1(5;zR_*IA{;nd@%drpHx{iL=+Bfa?zm03* z33F{Ae}r_x=dtgB7VKX_wj&?-SLSKCW=64(!JXJAksEpL-;JMp;hXsRHvAU%KVzQt zZv0$%6Kw!K1)pZ@^tZ^(a5Y>H2MD(vc_-WrH^W!q5x5N=gm1#P;2ZdT6ut#tCGMZ$ z=7d7{F>WQuL&*C$_aHol{WX#@N;}O@<-r=T=*GOKn-+p-iLgSbi0v#9RCRWIqV_o|` z??i8oo!F<4nd^A|gX0?7KV*OSpK{Tba%fArw57Z&c#mW~&wpegZLjDE?f-e&KeB}5 z(p6(BgYrEesDgMZSJD2VdMD3+sKs6f^~BLY9F1#e|L@cOKjitpb&UQ$?H{~^^+6kU zKLiNd4jtG#p^J0fV83U>aqm&uKlEb{!2st5VF-H|hIg|68zR`F5aV1N64;ZFDjU;i z)fj!=^}LU~p7)X0vmfI{#w%y|w#$0fywlLMa$C}K2;OFe)eO=)L>orWc5oG^I>c7yjH$d}{+) z!g1;Q?5q8dJ-EmUjw^AmYWlsZ+4NuI`n_s{{7y-j-vLSTUK6sBb4{K6J`~sCF|NOF zB6GPGpFsWyerArL5Vu!3KZ1P>N;s~7N$juVUW5JD$T?Vm29E!nZzg;KE{9LSySS5e zzTw92XFv?d2EPH#?`J>?v_TtQ&H^W7g9~yX7xD}*XWFLmlXuL4SpbDlVX)6u*fCVt zXDe~7v|TH0*Gk*99oG&R)CO~Wj8tZcW46dJTZZX2*tTM9#kQ>kTZwHe#a3$DjNh`^ ze#~-hIOnJ{M!hy_BmcK*lQ!!rZPBN7H9wT~nT%`T24mQ^ZN|3U7&jVYhcWoT_GdC~ zHpVT+*ok4hZ8M>aSL4%oH2#b?{YAD)9B zz!CT%{0M#wN8u+hmz`;w4%>9vrpq>SY%|w3^KG-xHj8Yt*fvXSv(z@rY_lA5K4RjW zkC@ozBPQ&8w8@w;6LvmUY0S9Kn29XgG-1=OZrcnRvtY=WZgV#6HvZG4Zey15+h?Ci zH}DAhOuDhfn85(%LXF>;jd`|dQe0^4FlK9)G5bwyi&-ZA#r&u-8%*kpZ7E~=P2Lv$ zCf%i6<9{i4(3k@rV-A`UIG<~B_^<``8Mokci|ura?X*)FJ8{}+JMFZcP*E8eMll4LwUss| znZYD|Op?ST6-*+=BrZD|TOkwGHf_6BXu9nN{*5rFx7mfm7uvSjzPH&$vJJ0xacr{- zV%v6YvkPIHT?pH5)Hb^Sw%G-+ZHKnm%5S?#+iaD$-K=f4qT6oKHe0=IJGISLY8x3N zHcUd<>6`4?schHwYeji)zkz>qYI_F%wq={PbM{7U-y!;m+iw=Vr0qMkooH~hBQqwB zws#Yyozok2Z6_gi$! zwW8wfg1N;w+jYNFJFgX$Zx_=}<80Ue-|O%HewRMSPKjOGW&U?*w{F#~8+Gfg`hxao z&jIauJFn`nzNjxgtJ`$j?Yiyh>vS7#Pdu*Mb^Dif`!{s^Pjvf45>mQDU(%QMd|h|y z&M)fDyMCfO?>(qHkLoVnb+7JvKzBVZ9QSIk`G+rS?*Z+7KzqNgFY9jI{YBl4xli}# zo-ga3`)}7h-_t!Wj%c6ueNp>v)4toaj|lb=>%OmP-viqBp!Usl>MQ!nm-UtJ=_^m` ztGZYB-lco*-3Jfo-mmH2`|l6|_Umi<+H<;3_ua1h_US$>Kh*uY|BicfKXKl_PY3jX z9@xvj4Ti1zgL?2bJ@^eh_#J&+U*F6BUOl95=$rZnJ*Jbhf)gy=b-v-pVXs=`QHY({zQ*b zn2+hP2lUwYAJ;$WpUnRucv41S9@gV}oG8Dm@1omx59*K(nSc164n3tqKh&Wg>w9`a zPwdeX==TKPo_Ih{Jg6s3R;a%xFc0&;4YuovALt26@<~1UJw16?Pd=q5pMG3V{#Z{^ z{)dGxJRRPn!w=~2gF5^j9e!Mg4?U^F{BzozHy)nm@Hri(8Znd>N!2FrytNWdd3voGkf&Rm-GzB59*mm^vrX5R^Qk64?b$Jd>U)hm|tV<8tc$lr^dQ8)~&G~jRiHp+@YHUbjVT}!IEUK}X#^M@FXe_C*QH^J6JWJyajXO1-t#Oyeb2Of-@jQ*^YrH_? zg&Hr?xLf1J8ZXg!xyCCrUa9dajaO^DM&q>_uhV$F#v3%=sPQI^H*35_xgQVPcwFNN zjVHw>{t2fhvNhq-M2;qMHIb)@LQNED!mWv7O_XS&R1;;IsL({ECaN^S8exqlYBf=( ziF!>mXrfUQO`2%dM2jX`HQ~{OR}(%>v}wYxi4IM4YNAUM-J0mpL{JmGn&{I+za~PO z7|_I^CWbT-7H=p_jA$aFiKuuJVIr=Hgt$r3tbS3fUzF+>t@?#Wzv$L4`t*x_O}aH% ztjQ8hmTIz0ljWMM&}5}1t29}y$r?@8YO+p~^_pzZWTPgVHQA!cR!w>|>D8o9lL1Y( zYqCR=oto^@WVa@JG#S)nuO|C6NfJYv3~Mr?$*3k{nv82Qp~<8sQ<@yrRHmk~H098g zQ&ZWRa%n0@Q@NVT(^S5u3N%%ysUl6eHC3#s5>1tAs!UVmnyS!LrKYMhRjsKSP1S0u zPE+-oYS2`(rdl-R)l@)J?V9S)RHvr8G}W!C9!&)`)vKvKP4#Okq^SW-4Qgsc)1{g& z({#C}D>YrE={ij}Xu46;Et>Xd+NWv1rUPRC|8%FOyENUc>7dvKG~KW1kfsMTJ*ep+ zO@}od(R5VPalMqKmmGS@sh9HgQh{D7)JsKr$*q@4^-`H$D%VR@dZ|e-wdkd&UP?SA z204sy7$9L(XeLK9d78=BOo3*KG*hgZ63vuqrc5*Cn!!V*W~ww(t(h9l)M}9||Ri*>wF$D8y@re4X?D^9(Vt5=Hjid(M~ z>y}?WIjECEIvLi?zJIfV5>Jq1OK)(@ig*p4x03OvyrXF z@EF6Z*L_B|EgSd^7+bqBI`}uT^P=&3mTiCCfw1ky*2TX$KY%W8c=U!>Z}{{^o8Iv2 zjR5~|v>T(x7(rw78Y5(k0gPFfF>;KNYm7W&oDnenDjeL`W+^U4wH0;@zSX` zy7)J-U>ILUaMlRUcbTX$mOTWQJ&-~dDcGKSmp$9*b(ws2ne@8sld~>67{Ww?Fac*> zb_j%t-ejrEWT|V}6}N8wO}K6oTepd=+r-vwymlL}-NtLT368HG6E}u&Gs3veW^XWt zU1a#T)xxlSE+@~?vQHDr*oef|5yL(-DWa{3)d=Hb*^Uh@dQH;3CX`(ab|KhB(5E;0 z`Dgyba8`EC>_*;IiG$$)HIGW7*w~e9*)>XtIW3 zhhA2Sqh&{w9kl(M40C}D8BZAYp^1CQXtOMg50k`@iFC+zU9KVQ%SXgEWMUgK!NdHU z7%(mh+ttJnHZg=L@2Clf#RRg&2D=KlYBz8$d$s_}Y#v$6zUc8xx$$TRveOJNha|aS z{!QkGO=1}4gc0VHNfX0_8#YM~+cw*WIZyK(F`-6`GsZ>x#UtB?@jGH{5&q4IDE}ro z4Eu07vm7rU*_p#`!Wlux=&~JKjDHh0X8dCOQQ+4c86U=P4BKobZ5N~1HfGC=D6x%Y zd#SOTX*h&!lxsGNf1<|-8l%@XNMknZqG96AA_WX=4*rc#$80vGi@+zwvXjZ1IkG*P zAP!@5^8ZG=F-XQ~d^$}sPA*jpLSXz+81E*a(*)#vHupgcpE3N#Af9X!55`4df^uXB zFp+25*Ptzk$Q+rdOo|w`=Zl8zc{#J@#%Etwreb8yyG%-^5g5bxG=ew~%ct!GZM`v; z{h4bY4~RGVmMtc(PX#7gj6Vt!(&UNOYYZYRuwyW> z6OfW5WK_KOI~&(*O0%OnovG7KozB+j9G%Y9={%h-(rLF&7wdGHPS@(RN2l9#I-t{? zIvv#MKAjHf^q@}1bULonDV@pGnJk@g=!{D|9eFmM$t6J^k7x3Arch^!#8Z+h{7i-3 zYO8vyErF8&7f1c{@!Qg8JY0LOhChd4Kw3AteG zoEzj^7#TJaKO^;jeR9wz2YqtLe-7#85MK^)kx4bp>r6&!{j-F-w1vq_>15_f-Vu`P8t#N6d|1m zbr>P72z3}Cy$E#}A-MgJ)JN6|lu{!#RgqJI?qqv#(+|0w!L z(Laj*QS^_Ze-!O&=pRG>82ZQ1KZgD>^pBx`4EjG<=?J!9w@N6$EV#?dp5o^kYyqh}mFGmf5d^o*lt96jUc8As1Jdd8{eIQ1N-p5xSWoO+H^&vEpRqkkOzMeETyOQBl|-BRe5qV7`Ym!j@c=$JxBIw*4~^h{B2DRfPt>nJ*oqT?t!j-uZv`i-LB zD7uZJ*C@J-qRS|{jFSIR@;^%cN6G&v`5z^(qvUmzypEF3QSv!TK1a#tDES;EpQGe+ zlst}-pHcENN`6Mk&nWpBB|oF&Wi+9;GfSZYDq(>Ci{gK?Egyv6&J1(b2;1i9)Y~}( z=P~~_kNLNAh$g2V8lYEi=enU7O7wP~1F|6(@}UR_mq)m~HV8mB{};voW?Me!eLIgx z^Ng@<`B{29pD6Q*GM^~(TfqxKz-N9R^g{@SfXwGdAgZ?uGJ)^~gfDPG9uP-C4-me9 z@CAe~B+fz)5O1Lmx_~?u;;#^Ym8eyXpPE9w9Uz`Sj^6HWdYM$h9)31A3mlLK`A`6b zPy}u$h7u@+GAM^i!+q!YAUKlE4u7=8iZ0sHg-w67_5ShfLVm?Xw}q;jKiV35F4|bM z{-dqo;#m`WHPq-_7P-kHCt2hoiyUN;gDlQv)j~ZqLNm027yQr;ozM><7=$4hh7pKD z4C3dqP&g|ANu6`#opVqI4pLyljoEK=&Oyu$%F02^4(~Z<**Rz8oRb1G2i@nKRTs~m za}J(!*5kT-20z`{e9&e%=PWcn9OTCw;Fq#@kW)v%aL&<*BR2Bw=rEk4OdNf7V$=-B zB{jm&e3JCqeK;BBSi|pVN&h3(=a{=K*Y}F408}X=QLh9(_^q5OtRR>l2J8i zIOp{L$s-3B-GXG_j?biHoXovKl!%06SfRvXTdQ;O8lz8p&pZZu4P?dt(eK>vdu)q9mUBV#mOCo`;*B+J9I!N zn50RUY+tNT4vf1ANqJWi;j)baqpFOv$xhF51`(!wXlctao8mA^Q!*w-Y&2IVjnYX| zw9_6!(uSO-Moj(CY@Bg0sn9&iDcy41%WzBJWxV%SFHI#Ll7dG50gG)`+3Ip>>-4F zC;-x8{65c^e4a7(Jmc$m#?$kRpXV7f&of@0XS_Vm7KU zQ*jtonl3{+vs-B%plKeHY1%-VmYL>KOJ{chewfxtyIkOgN@xQ=;D@Q3G*dThe}o{6 zD4m1<9OBKvKYg`yP8C!`4Uk3-;dAiM)J&Qwm~;+da`T}G8lf3lfpdApmFI+9AbegE zkX|17&5J9Yp97?mKLEr-$0}XW06rLmA*BlmTZnrhc_}3BLeeiJ?!qV#PZ7Em5f9s> z(?z6Fg#JYlApRoscV|I1;LlAw?lPbp-1u{oUpM)2_XFvETJ4q`XB^^FC`zP_$@`JQtF}v|e^mvbM-}l`ljmx5ucpkaiKm*hs!5|7|233r4e`|wM=fdAp;I04)=^#! zltBY!*g%?%)LSFRjpVrz9h(c4ZYc)*wUWnH^4&_=dx+0Nd3eZ|mw3F?t(UZXwLn-O zY5RJCIDGi`QCDrG-$vSPluH|VZX-OmgmfEuYUA2!YhMVWMjOTJM-@M+`^l!CqW9y) zPxS3X-%e5;WVeGLow#=fl9Gk{Gq5KS}qOf!f~kD?|6 z#Ple|I~vsjgNTL9MlEE8wcyBtN-fa)S)d=Vz%y`xiRcAaHW*n1#n1{J5Ys{q$BgP0 zxFam&^ud4@X#ERJwl3rjYk}6kkVja`U?HyodSM7SmtP1K(4>WeDhO$TR=!YJ0=O5F zm%;?3v`|EzijrDz6OS9eZqj$hwNUH?@>ATYg%a{v;sWwgicY0oEtH{O8RyDIfOyL5 zf%GcyTS+NOcBBfMVz+!CtDdmrV4M ziC!|s^36h@8wk@!JhZxne(e22THx8g5Ng-L0E!NbYGIH98pQ7)#WzUW%&{*F5oQP_ zhwwi{{=?)wOtpo{XP9)u6c%&t3nQe%^3TEu?jz)dx$y<&vKN@sU0^VHb65(wU|`^-Ovvskkld% z)%Db=*ov)E#@?8k;#k2+)^!asV{P=FXnYX7Z6uo7=|GZqgrGZ zVlf|o`NWlvUuGhBGeC>XG%V5-7MWRCWJY1JkYk#_ViEE1U|Mv?v{;-CgfAxT;zIDl zfEG*0KM$Y9Qrx-p7t8QdM!vZG7t2CgEGNI^oGY)@Vg))@aIPYt#Y)0d;#S!LZ9u*& z(Y=y5D+hrxs3hN&lv5S?ts=cD&R3Imb-5O44vV$uQriT?QRe{CtV7>A%C4UBsYmAq z(&VAUyARL~xYH088?%7C^MG1xLf7 zA9-u*0?LL3Pjme*@}ybxqpP2M@wpY=u>jKWlXpLP36N&M4a6BJ0rC=v0AbsSm#5NV zJ85-LMxErT6Sq$Mb&}ss(&-|67iHQ_KD!CqP2JIM7JKp2hyOmxw2$=riH|4FVu<{Q z$mam%HGtb7a*%utQWk^gNjq4i@3}Zc*dg*fgw8|Mf0(iiQwCxD4HM5WVMnOf5%R&h z(;|J-Mb>H->1!^s_Oi&_?;`WNi_Ft5GIzR|Bwx&#E;2W|$Xw_mbDWDj5*Aa`3v+r) z%sDM(x}XTkVNgr-!Izl5S<32zen`QnmK^y|2tgRo66=;r*__L*1s`-k7jQn8cybe3 z$|HUz@|KvuTgt;9?P4jfMN7=MEivP^M5k|wS+*sv{iOopE+~U02mx^vB(+rNhk%xf za4#aAqG2t$$*a2pI4&l=V*HhmKRRejbkLSc8lfFXgO1r!Dd)>LUsj`~3ev8?t%CR~ z$YW)-ma5vcRO1H9gwE7bE%Db9Z!LP);a*n?#92q2T-!@^-CE-2zEqFB!KtHq2KVNr zVajosa^&8;G(vfg;LbYK65W|4)_Rs$&sbs&V~O>MCDtL9=&md=r@q8o`V#ZtORUW= zG538w(+3fq=l*-1*~0T#1CY>pM+uZcrOq?Ka-La$^NefHJCi!k8urfNL+A_j5Re=hb#J2oGx zRfWtQ>*dcMo&}Hp{7?TrKX!cn^2c}H{qwg!?sVI;^VjFXx4%9Y{dX?^?_ZxQn}2<7 ze*E>h^?fA690WB(m~-TOQ~@%ZoeYv1Sb zpT9o+@96X7yT`wN;?MiUzxU+de|`Fa{nPL7pXdHFeo+5B_s2PZ^!RhT@X!0{`_un! zKCk?Bxw`hu{`$QB>hag``v3m=y!F@2&j6qI{+Rdv^WKMl&MrOv7&CvKmK&e*J&%9< zAm!)_ottJy#4h4^7CK*Sl&~9{>z`E_lBR| z34VGf`04lLPw(44{XG;u{b>8?9owgOXrF$M{`BMR=YRaUdN=mze=DEfOMQ9|_353{ zr}sCX-rszB7xU>I%%^uS;=4*cua-J`qub#g>-#p(vKRiD@zdXM^ z6VKGM;92x6d6qpZo>kBP`(v$pHawf2Ezh=R$Fu9%^Xz*LJcphm&#}j!(|@0O&OGOy z3(uwJ%Hz-Vzi&LZo;%OIXXcrE9y~h!`@cO;o@dXC=hgG(dG~1aDQ5il|9bxA`M2jk zo_{?5=lS#3@PD82r2k3(ll~|DPx}4;`gqds|KG=x{wMuU`k(Ya>Gw0*c+&r*|4ILo z{wMuU`u+MaoG%;x?>?UNKk0W8Z9M6J(*LCYN&l06zt@c?{XSuhC;dKU4WF{clYTz~ zjwk(3`k(Ya>3`Dyq~E8o@udGrzfWW1Nx$DS$CG}4o0svV|4F}Jr^b{1C;fgu9sU+L z<4OOM{wMv<`k(bb>wnh&tp8d6v;JrOeg+-S`k(bb>wnholizsO|E&L6|FeF-PmgE) z&-$PBKkI+i|E&L6|FiyQ{m=USx;dWpKkI+i@7K`rtp8d6v;JrO&-$PBKkN7F>v-1h z*Vyr_|5^XD{%8Hq`k(bb>wnh&tp8d6v;JrO&-$PBKkN6oV?67B*6(ME;b-gN6YTKo z_VDZWc+vl&|3&|ce!q?nzmAU={bp6etZMjKeE3;>`2K6W=zr1wqTkOa!>{+_MgNO_ zKdX!v{V)1o^uOqT(f^|VMgNQb7yU2#U-ZA|_p{A-(eKxa@uJ_)Im6F6<3<0A{uli( z`hD*VMU5Gc;cHzv_S0|Em90|EvC2{jd68_4|2m zyy}0||Ek~jrNif;@v8q-|EvC2{jd68^}p(W)&HvBXQuJ0|4qNoPQ%ZQ!?zgYP5+zz zH~nw=-}JxfH>Vrsbi3`GzrvFX9pG(J^ z{x|(^`rq`w>Gv*kyy^Ed>+o~Jc+>x;-|rRUO~21|!{@r;+m-RA|4qMdS%x#m!_O4s zP5+zzH~nw=-}Jxf_p`?EIdHt`f75S=HGDHO-t@oef7Ac2|6TvP{&)TF`h9L3@A}{M zzw3Y3|E~XC|GWNo{qOqU^}p+X*Z;2nUH`lOcm2M%9`E|!^}p+X*Z;2nUH`lOcm41B z{fs`|^}p+X*Z;2nUH`lOcm41B-}S%if7k!6|6TvP{&)TF`rq~YwrTjeYWTf%yz770 z|E~XC|GR$QM2&a-e(oCY`h6}K@A^OVf9U_v|Dpdw|A+n${XRF05B(qdKlJ-~ZG7ne z(Ep+TL;r{V5B(qdKlFd-|Iq)T|3m+W{tx{=gAdE*yN4>(QlJC{OmLS zN54(pu*n-XdBY}e*yIhHyy1Ppu*n;~9UL}!!zORo`@NMI;$s67|44b^+v*57F8#Z~vCU5v$ zIBfEUP2RA{8#Z~vCU4l}4V%32zxr+RhE3kE$s0C#!zORo`u*nt!zORo`u*n-XdBY}e*yIhHyz#&KZSsaq-mu9V zHhIJMH^U}x*yIhHyy2VEVUss}Z94o+Ic)NVP2RA{8#Z~vSE<7$Z}>WO*yIhHyy550 z;X9yVlQ+C?88&&tCU1D}GQ4*gHhJS;`fc)tP2RA{8{WkXo4jF@H*E5TP2RA{8#Z~v zCU1C8Gi>sPP2TV`?XbxkHhJS;`v0Zh_f6wp`v0Zh&%47ronfmtZ1u*!^n1TE{0uyN z-8*dchOOSP)f@lP?=#ZyE^OH94ex-4t={mJ@UYbzwtB-?!^2i@*y;^m5f59v;l0_g z)f>L!8h)-GwtB->Z`kS$TfN~O(y-MVwtBJ3}H;j8Ikt2cbFHf;5Vt=_QJ z8@77GR&UtqjeqO6)f?Vr4O_iot2b=*#=rI3>J3}HVXHTM#XW5GhOOSP)f={Y!&YzD z>J3}H;p^{Vt2b=*hOOSP)f+xLjDPF@xBh?Y_c>zt95Mc_--d5^X*T|&--d7a+I;wY zF>LsT4d1Zg8~@R7!#8}LK5Y1g4d3wjWBB|rZ1{#3Zo`Ie_$)GfXE<#5hR-D9Kl*L? zhArQ)-}sMyo4(=m@UZC{ zHhsgUZ`kw=o4#SwH*ETbP2aHT8#aBzrf>LOXV~-&pP`3M->~T$J|7L6zG2fheCIQ4 z`i4#4u<08%eZyy`VbeEk`o=%{|Iz=Ce%rp`Gu5!|8@7GJwr~8S{~!HcD-7Qq4WGA$ z*9*hOZ`k;afArh~@` zHh;tBZ+KNPZ2pGV6~pFl*!&IO$BuvW+x(4x^!x5=c$G11|HeQ1?f=F<`fUJ*@3e;R zw8lUBZ2`wW`v1}YkA5#nhh5;X3mkTV!!B_6?rV5)I_v_6UEr__9KH)1UXu*3Nrumr z!{^Fj7dY$!hh5;X3mpHa-!5?Y&TQBP4zE*&UEr__9KJ&vzQY(kiw?WM;k%6CGwJw$ zpGoZkhh5;X3mkTV!!B^x1rEEwVHY^Ojv01=!!B^x1rD!fh8McSE^zohWY`4`yTD-= zIP3z4??;C3M}}?Sunioxfx|X%cmX`@1BdVO#{cQ}GI;#a|3|;g;PAR<*bI(8`hC|u z>;{M3;P|88c5wXBZ$CJ^5*mN>|Iu$tIBW@rE#dg1|BwDZ`t1sb*G9vxaM%?NyTb8D z{~!H-^#9TSN58G%ur(aEhU1TZd&6OGIP49Fz2UGo9QKC8-f;Nteb^k1Kl=aZ|D*qp z{y+Nd5XT?=c8J5Ps$q*bY!Qbo;_%vP`0jH2(QiL6{^<7|{`m9ful$#5j=s~3m*V-a zzdheP-#tG(KRv%ZzdaMr)U)7O^elOnJu9A7&zfi5v*FqFYR}6=iGDQx%6Clu01!NThE>6-ZS&eJrABo&wqQKJkOpN&#ULn^X~cZ{Ez2< zJ^%9j+w&jKKc4^d=>MYsi~cYAzvy?xCSUaXPB&llf6@O%zwg*$Y8Pw1e9>>wmoNIQ z`tn7;WnaGNx9-aq{T6=tqTk9dU-Vo0<%@o6zkJbe@s}_9eQ%sE`Yr$RMgJH57J&Jp z-wH5a^!sivU-Wy&k}vxG?RfG_Q5{#8#yqb)qV7!aT7yZ_PF?Pro z{k|WJ_cQSwb$myiull{l%vb$i^?%j>RsUE0U-f_0?|ban6~=ej`Kte`{;&Fd&zP_J zeb<<;`oHS;onyZ0w@JxY{a^Ka7Zh8>eAWL||5yEXiTSGktNyS0zv}<0-`p^Eh54%g ztNyS04F+Q{7~{Zv)&EuhSN&i0o7m;6e&4UhZZ5`gF^-Ga!n7qa0E#_^p zzlwQV%-iy}extVht^aTRMr|=_%isFF){Xa9F=LDUS`63nxBkEN`(8DF>o-!1U0N); zVttms^;?z2Tr7X<_gXmKf5pHgR$VdOiosTVCmZvsm`=rO<5)(;h9{O$v4V;fR18vL z`xG0e*gnOeDF#h3bC>s=`&p`-yN>`KF(%3Re}cv%^*8n|`h;TvfQLa8==|!d1n0%HgW=O+Q-| zwkqHBf7Aa>zgvA_t>Sy<@K)ii!dvB=e&#CP4d$DE<|^Oxf7Aa>KYx{P`oHNn%n64T z4l5j1IIP$^<(vL*`gyE;)BjEXH~nl@zUlv_|C|1A`oHP_rvICM?_px-6GNYT*Z*Dr zcl}=7$9u|r*UxpuYy9}GJm2+y*YA~nd~Y7F_48f-cm3b>f7kzAzp+rh>;JBw2MZ4t z9xUJWf7kzA|9Ac0^?%oI8W2t_oLD%q7#hXUD4bY~jbb(sW25+;JC*yZ-O`4VUsmzwuIj=;zDAm*t24 zANo17{LueH{}25?^cy+Fj3Gbtn=-^eJw{J4P!Ee1d^n1S$!}S=h$9v!W z(9f&ohyEY>xwZVz|3kl7M1JW1q5p^eANqgj=h|XG6$7fUZTX?!d?Jin7`K>FgmH`a z7%{NQ5B)|~`Jw-Z{vY~(=>MVrhyEY>f9U_I|EK<+`hV*GssE>b??S@Kg_Db+R(|UL zso(pN7~aRb>v&HR@2}%sNq*|*=<-wlPyIjjGj;Ki?i?<#r+)7k^Hcv%{Xg}ao5VZE*bT<$EZ#llm;PV+4bWnW5}+Wz^#9WDy<~pr z|E2$ze(&xB8U!@RFa5vt|I+_UzwuhU!^|)Jzw~>TnP2)1+VV@k_nP^o|Cjz>`hV$% z74l0zun;@O*aXD*Ep`F13&=11zx12M?H@2p}w5Zi(L((nCM+y;&D zTx<#QOaCwZzx3M`;J9aG$+6H|JMIo|8M=j_1i4uw|={Y{MK*2li&J(>;J9)w|;wu{MP?l|8M=j_5arY zTmNtUzxDst|64zS5$}WJeQHG$|3v>p|3v>pzjx4?=%47H=(oqn zME^wpME^wpM8A<@Ci=av&P4x2|3v>p|3tq5V7W6OZU(mmxe?h;kLKgHd=wHykpnpOC zf_{6REa+d*zo36X|APJn{R{dR^e^bQcg})-n};muH)_p-{ssNUty$1-7MBJ6hOSxE zZ|s^y{fqjIUSsr{Mg5EV7xf#!#{2Fp>R;5qsDDxaqJHzb7{|uGBK8$Al8w1t7WFUc zU(|1Z9mCly>NlQ^yTn=4zo>sv|Dygy{k9od)W4{IQU9WTv%MJGW>Np5{zd(Z`WN*t z>Nmp8qW(qw#<*G3Z}*f%{fqh+_50hI#||pye_7OTteZvs2D@3(zog%AH>QEHmCBO- zCH*#2S<-Jel_mYQQ(4l#q<=~OlKv(AOZu1eFX>;>@8)ur^e^dO(!ZpCN&k}mCH+hK zm-H{`H$}{n{w4iO`j_0i=s`;sO7OZu1fFY8~{Z?qhf$e2WCS^u(rBj(u2#7-v5`fX*h ztbbYmvi@cL%len~FY8~{Z)=}r{mc57_1oQNS^u(rJDV))w>^tdbe8om>tEJyARSZ8 z7)fVYzdcTt^)KsR)^9kSW&O+gm-XA~WLdwxPL}l>Q)gNKvi@cL%len~+i7H3|FZsN z{mc57^)KsR(Z8a9MgNNa75yvv?c%bc-!MD&aaqyN4rE2Yp>|gEujpUVzoLId|BC(< z{Wc+4(Z8a9MgNNa75yvv%}it9ofZ8n`d9R?=(oSiivAV-EBaUTujpUVzoLId|BC(< z{VV!c^sne&(Z8a9MgNNa75yvvSM;ywHzbcCc~R;8rs()4gs{U2|tNK^R;8rs()3#tzuU7uj*gb zzp8&#zX@+v^{?t*)xWBLRsX7f)84G=U(>&)e@*|I{x$t}k6F{drhiSp4P@5zujx0| z&6@r-{cHNy^snh()4!&FP5+wyHT`S)Z6~v)-;6kG`q%WY={M%jn*KHYYx)iQv!;Jd z|C;_a{cHNy^snh()4!&FP5+wyHT`S)*YvOHU(>&)e@*|I{x$t;`q%U`I$6`t>10hm zK#(>4Yx>vquj^mezpmfRIqUk@^{?w+*T1gc{xj?P*Y&UKU)R5`e_j8&{&oH9`q%ZZ z>tENuu76$sy8d-ud-v#x(#|GNHl{ptEMzo0oO{_IX*?zpfvU$h!V@{pfhAAsee=drv6R+FiOnhv#EbmzYTCU^>6Cm)X!aIQ~##^P5lg3HuZ1n-_&m} zoK5|k`Zx7&>fhAAso&&2oBB8PZ|dLFzo~yy|EB&;{hRtX_1ooTQ~##^P5qntH}!Ao z-_*aU-)=8k`nU9N=?7@CrGHDmS%0?lZ|UFCZ{L?K{agCC^l$0k(!ZtOt~p!!xAbr6 z-_nl+WJ^C5kS+aN`nU9hIoZ;`rGHERmi{gMTl(>VZ0X<9zomam|CatO{agCC^xGI_ zOaGRB)F4~>xAcQP+0wtIANYwKVz%_72-(uVrGHERmi{gMTl%;4Z|etxvaNqxKemu< z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y{%!r+`nUCO>)+PDt$$npw*GDX z+xoZlZ|k=S46Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y{%!r+`nUCO z>)+PDt$$npw*GDX+xoZlZ|mRDzoUOg|Bn70{X6fhDBtAAHNoRVGrwyfFJZ_k=t{k!_@oCA_&SO2d5UH!ZIclGb; z-_^gXe^>vmew*m*>fhDBtAAJjuKr#9yZU$a@9N*xZ#x}8E%wvd)o(+cUH!ZIclGb; z-_^gXe^>vmeuyT!`dNbP>bJSguKr#9yZY^K!x&^&|E~T${d@ZN^zZ54)4!+RMmKx< zZML(ge^39O{yqJB`uFtj>EF{2>|{^>p8h@kd;0hE@9E#uzo&ms|DJxZCwuz$^zZ54 z)4!*GPye3&J^g$7_w?`S-_vh1o<040`uFtj>1QOer+-iXp8h@kd-|cF?CG~Hj%{&l zi(^}yJ^g$7_w?`SXI#TrWMBWj{(b%X`uFwk>j#gruYX_vzW#mv`}+6w@9W>!zpsB^ z|Gxfx{rmd&_3!K7*T1iSU;n=Tef|6T_x11V-`Bsde_#K;e!J(`J!fD4zW#mv%t!2? zv#)<&|Gxfx{rmb^knHP+qq47mU;n;-J|z44ZKt!ZADYU({(b%X`uFwk>)+SEuOFxi zPm%-u2l@~6ALu{Of1n?>3T%}F{RjFF^dIQA-_L>m1N{g35A+}CKhS@mAEFGPl>_|; z`VaKOS~<{vpr2m}zmfy}09Ow5+j-|e|AGDk{kFvaM1N{g35A+}C2Q+h_ z-#$DC`VaIU=s(bZp#MPsf&K&i2l@~6ALu{Of1v+R|Dpav{fGJw^&jd#)PJbo<~)b` z5A`4FKh%GyA2!RO{zLsX={eL7pXE^hp?(|n9O^&Rf2jXZ|Dpav{fGMP*K?@p#|itp8a5vHoNI$NCwC z9P2;Uf2{vlKeG_#CCpZi^&jg$)_<)3SpTv9WBteakM$qxKh}S&|5*RA{$u^e`niWd z$~o45tp8a5vHoNI$NG=;AL~EXf2{vl|A~H1A}9J!^q=TwCUTOa-bkLFbWss2;_ zr}|IzpXxu=4~pkh|Ec~{{R~P@^|MVm)qkp=amuOwQ~jsOa+gs{d5~ss2;_r}|IzpXxu=f2#jf|Ec~{{ipg*^`GhIUUR1ZO#hjF1~zB< z&-9<^KhuAv|4ctim^1y1Y|iwb=|9tdrvFU;nf^2VXZp|dpXoo-f2RLT|C#rWLbN%P~&-I_{KiAJr=3M`|{&W53`p@;B z>p$0juK!&Bx&Cwg=lVIdoa;Z=f3BZ5%DMh?{pb4GqnzvK*uo&?T>rWLbN%P~&-I_{ zKi7Y*|6KpM{&W53`kAFTE|7Em4h-a6|GEBi{pb1}90(tmbN!55xX&<7IoE%#|6Kot z{tNvM6XZhwh5ifujuhlV|Aqbw{TKQ#^k3+|(0`%-LO*wx3;h@RFZ46hao8Xi`Y-ff z=)chK&_OQrJ9dx@{TKQ#^k3+|(C+|3F7#jMztDf7-$8`%SmCk4tL8%gh5ifu7y2*s zU+BNkf1&?E|Aqbw{TKQ#^k3+|(9gQ&LO<`C3;h@RFZ5sP=LW-h=TiTr{!9Ir`Y-ig z>c7-~ssB>{rT$C(m-;XD^RjWFA(#3u^}E!NOZ^OOF7;pPztn%J|5E>@ezrE3`Y-ig z>c7c7-~ssB>{rT$C(jAXdpTA%v?&*e)0m41#cSNgB?U+KTnf2IFQ|CRnL{a5-q`&{Y2(to9&y~~yU zEB#mcuk^Ecxzf)c=Su&T{ww`g`mgj~>A%u{rTyuk~N+zt+z<=UV@@{%if$`mgn0>%Z2|nC4plwf<}U*ZQyZU+cft zf35#o|F!;W{nz@h^yuk~N+zt(@P|62dG{%if$`mgn0>*o@4 zt^Zp8wf<}U*ZQyZU+cftf35#o|F!;W{TySi^)s=#(eJiFZuH;iztMlA|3?3f{u})_ z`fv2#=)ci_qyI+#js6?`H~QU1$c_FR{Wtn=^xx>e(SM`=Mn4~(8~r!>Z}i{jztMlA z|3?3f{u})_`dv54js6?`H~Me%-{`;5?|xEl^xx>e(SM`=M*ofe8~r!>Z}i{jztMlA z|3<(2+quzyqyI)ftDal^xB74O-|D~Bf2;pi|E>O8{kQsW_225h)qku1R{yR3Tm85C zZ}s2mztw-M|5pF4{#*UG`fv5$>c7>0tN&L2t^Qm6xB74O-|D~B@1jd?_225h)z6&f zR{yR3Tm85CZ}s2mztw-M|5iV5pIiO6`fv5$>c7>`;OAEVt^Qm6xB74O-|4^8f2aRW z|DFCj{dfBB^xx^f(|@P`PXC?$JNA%x|r~gj> zo&G!hclz)2-|4^8f2aRW|DFCj{dfBB^xx^f(|@P`PXC?$JNA%x|r~gj>o&G!hclz)2-|N5Ef3N>u|GoZu{rCFs_228i*MG17UjM!R zd;RzN@Acp7zt?}S|6c#S{(JrR`tSAM>%Z53um4{Ez5aXs_xkVk-|N5Ef3N>u|GoZu z{rCFs_228i*MG17UjM!Rd;RzN@Acp7zt?}S|6c#S{(JrR`tS8K?Qv@%_xkVk-|N5E z&$;Jb|4cvco|*oce)mi=(?8Qc)9<2CX8PU#&P=}>;F;-n2Rt+VGyOCD40LAtXZmOQ zXZmOQXZqbX%S``F|4jc(|4jc(zZ(#7J0&yyGyOCDEOlo3XZl?;%1r-E|4jc(|4jc( zKX09x{+WL6I&PC?rhle?rk`=nOh3z?nSM9R!)C|b^0<(cnf|%{x&FESx&FESx&FES zx&FESx&FC+C-~z8f9Cq<`se!R`seyt>df`K0h77@x&FC+Hv}@*&x2>Kf3AP7f3AP7 zf3AP7-`$wZ_0RRs^}E`Xx&FESx&FESx&FESx&FC+zC3gNbNzGubNzGubNzGu?$2bd z-wm3~_0RRs_0RRs_0RRYZ;%K55BeYUKj?QUDi8V}^grl-(C-F99`rxxf6)J+-|db( z=zq}vp#MR?zdb=7^grl#Z7L7?AM`)yf6)J+-;KLG=yw+<5BeYUKj?qZ|DgXt|AYPq z{SW#d^grl-(Ep(SLBD%CdC>o$|3UwQe)o0qp#MStgZ>Bo5BeYUySo#2ck-bBLH~pP z2mNl3VMSl(pDbzKk9eqCy)9c^*`!=)c>geQU9a5C0$jKm33A|M36e|NT96J^K0o@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36i|I7cE|1bYv{=fWx`Tz3&<^Rk7 zm;W#SU;e-RfBFCN|Kh;FaKZuzx;ps|9;ia|Cj$S|6l&U{D1lX z^8e-k%m0`EFaKZuzx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7_iz3DfBFCN|KR;5qsDDxa zqW(qw{D1lXF6v*@zo>svKmT9;zx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#S zU;e-RfBFCN|KI^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(E zX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0qC9nJNo&G!hclz)2-|4^8 zf2aRW|DFCj{dfBB^xx^f(|@P`PQNn%bq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodM{*{(JrR`tS8S15jrG>I^`g0qDK{d;RzN@Acp7zt?}S|6c#S{(JrR`tSAM>%Z53 zum4{EO#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*( zO#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*(O#e*3GXQl4pfmk5{WJYD{muZ? z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1hw{d4_u{d4_u{d4_u z{d4_u{d4`!0Mr?P&h^jr&-KssI|EQ>0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gz39&-Kss&-Kss&-Kss&-Kss&-Kss&-Kss&-Kss&-Kss&-Kss&-Kss z|9@57%Whm*0fteY9YhbBa|VDA3_v*XEeUNd%?N~eAoPFIegu|*hGvkRD$6SSv+Sx< z`?B8jzv+L||EB*<|C|0d{crl;^uOtU(~kid24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_ zh5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~ zVE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g z7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh= z24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ z0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja5 z0EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV% zfMEcJ0T>2g7=U2_h5;A`U>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0T>2g7=U2_h5;A` zU>Ja50EPh=24EO~VE~2!7zSV%fMEcJ0r2l1PFs%#FdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaM_GKMi0sfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(E#@T`$+>B4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|n zMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU( z0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy z07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=F zfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfP zU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR z7!6=FfYAU(0~ifpG=R|nMgtfPU^IYe0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaN0CKMf!nKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<8tY`e^{s0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<8t+{WO4R0MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5DlQ8=%)ch1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0d(oV^k4cf{WO4R0MP)V0d(oV^k4cf{g?hr|E2%Zf9b#UU-~com;Out zrT@}@>A&<}`Y-*L{!9O*|I&Zyzw}@FFa4MPOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#U zU-~com;OutrT@}@>%aBi`fvTW{#*a8|JHx&zxChxZ~eFaTmP;9)_?22_22q${kQ&G z|E>Slf9t>X-}-O;xBgrIt^d|f1BeC?4ImmoG=OLT(Ey?WL<5Kh(5?U0PXmYs5Dg$2 zK)3!||E>Slf9t>X-}-O;xBgrIt^d}4>%aBi`fvR-fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0hF(@z741`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP*Y zxqcczG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT=cg75 z4ZnULerK@GWB7x??2F+txQ;jc?S1&W!Ara0{bSC2p5OfVm;OD!-}u%4-EhZa{pX|p z_n7~?^T+Ss`P29BeCy6vfAOjB5BcvK?z}(JfA73M(pP`|?olz8)`P3b^WbIaJUD@W9vlxo4-UDV2d6>K2k!&U2Ty+IgTMUq!D++Yc_`88q9*7kDh+cM*!g=z?cb=F4=gCg|^JHK6d9uO7;EO+7UO&&)Dh+=&SQ#|D-!jL3o=ps#XRrL{ z*~`Uw_B?!^om)N64uqZO-}@TR_N$*~o378Z9V+L=vdVd}MsQwC)}I$Mt>?wm-g)tU z>%4eFab8?Oo)@r$!MFDEjmLX=|JGh?(>^cu)Sefciq4B&Byni z&#M!;=hZ&+^J;_Fd9~~4d~&&G@B{ngiRgTC$l`pm-S&L8c6dHp*g2oAc%09sP0nZU mh0kXfr04TDe$T#GIzC^lv7Rp$QO*~$=jV&*v-8EZ*gpZgev@zj literal 0 HcmV?d00001 diff --git a/tools/sentencepiece_testing/create_mistral_test_proto.py b/tools/sentencepiece_testing/create_mistral_test_proto.py new file mode 100644 index 0000000000..1a2a501b7a --- /dev/null +++ b/tools/sentencepiece_testing/create_mistral_test_proto.py @@ -0,0 +1,32 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from tools.sentencepiece_testing.utils import train_sentencepiece + + +def main(): + train_sentencepiece( + ["the quick brown fox", "the earth is round"], + "mistral_test_vocab.spm", + vocab_size=10, + model_type="WORD", + pad_id=-1, + unk_id=0, + bos_id=1, + eos_id=2, + ) + + +if __name__ == "__main__": + main() From 62eaf03d9eb5b4f60ee78e1350d2e016ccf34e5a Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 20 Nov 2023 11:56:07 -0800 Subject: [PATCH 53/87] Start landing code for Kaggle integration (#1320) * Demo preset directories * Address comments * Move packer and masker to build and debug tests * Address comments * Fix maskedLM preprocessor test * Fix remaining tests * Fix serialization tests on tf backend --------- Co-authored-by: Neel Kovelamudi --- .../modeling/token_and_position_embedding.py | 2 +- .../token_and_position_embedding_test.py | 3 +- .../preprocessing/preprocessing_layer.py | 7 +- keras_nlp/models/backbone.py | 63 ++++--- keras_nlp/models/bert/bert_classifier_test.py | 2 +- .../bert/bert_masked_lm_preprocessor.py | 53 +++--- keras_nlp/models/bert/bert_masked_lm_test.py | 2 +- keras_nlp/models/bert/bert_preprocessor.py | 26 ++- keras_nlp/models/bert/bert_tokenizer.py | 45 +++-- .../distil_bert_classifier_test.py | 2 +- .../distil_bert_masked_lm_preprocessor.py | 53 +++--- .../distil_bert/distil_bert_masked_lm_test.py | 2 +- .../distil_bert/distil_bert_preprocessor.py | 31 ++-- .../distil_bert/distil_bert_tokenizer.py | 43 +++-- keras_nlp/models/preprocessor.py | 58 +++--- keras_nlp/models/task.py | 86 +++++---- keras_nlp/tests/test_case.py | 17 +- keras_nlp/tokenizers/word_piece_tokenizer.py | 122 ++++++++----- .../tokenizers/word_piece_tokenizer_test.py | 1 + keras_nlp/utils/preset_utils.py | 169 ++++++++++++++++++ 20 files changed, 543 insertions(+), 244 deletions(-) create mode 100644 keras_nlp/utils/preset_utils.py diff --git a/keras_nlp/layers/modeling/token_and_position_embedding.py b/keras_nlp/layers/modeling/token_and_position_embedding.py index f3dffe345f..bb7107f96f 100644 --- a/keras_nlp/layers/modeling/token_and_position_embedding.py +++ b/keras_nlp/layers/modeling/token_and_position_embedding.py @@ -122,7 +122,7 @@ def get_config(self): ), "tie_weights": self.token_embedding.tie_weights, "mask_zero": self.token_embedding.mask_zero, - }, + } ) return config diff --git a/keras_nlp/layers/modeling/token_and_position_embedding_test.py b/keras_nlp/layers/modeling/token_and_position_embedding_test.py index 16269b5df0..122d74e13d 100644 --- a/keras_nlp/layers/modeling/token_and_position_embedding_test.py +++ b/keras_nlp/layers/modeling/token_and_position_embedding_test.py @@ -14,7 +14,6 @@ import numpy as np -from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.backend import random from keras_nlp.layers.modeling.token_and_position_embedding import ( @@ -31,7 +30,7 @@ def test_layer_behaviors(self): "vocabulary_size": 5, "sequence_length": 4, "embedding_dim": 3, - "embeddings_initializer": keras.initializers.Constant(1.0), + "embeddings_initializer": "ones", }, input_data=random.randint(minval=0, maxval=5, shape=(2, 4)), expected_output_shape=(2, 4, 3), diff --git a/keras_nlp/layers/preprocessing/preprocessing_layer.py b/keras_nlp/layers/preprocessing/preprocessing_layer.py index 63c40713e4..d6101da150 100644 --- a/keras_nlp/layers/preprocessing/preprocessing_layer.py +++ b/keras_nlp/layers/preprocessing/preprocessing_layer.py @@ -29,7 +29,12 @@ def __init__(self, **kwargs): super().__init__(**kwargs) self._convert_input_args = False self._allow_non_tensor_positional_args = True - self.built = True + # Most pre-preprocessing has no build. + if not hasattr(self, "build"): + self.built = True + + def get_build_config(self): + return None def __call__(self, *args, **kwargs): # Always place on CPU for preprocessing, to avoid expensive back and diff --git a/keras_nlp/models/backbone.py b/keras_nlp/models/backbone.py index a55f767394..7ddfeb36da 100644 --- a/keras_nlp/models/backbone.py +++ b/keras_nlp/models/backbone.py @@ -15,6 +15,8 @@ import os from keras_nlp.backend import keras +from keras_nlp.utils.preset_utils import check_preset_class +from keras_nlp.utils.preset_utils import load_from_preset from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring @@ -66,6 +68,31 @@ def from_config(cls, config): def presets(cls): return {} + @classmethod + def _legacy_from_preset( + cls, + preset, + load_weights=True, + **kwargs, + ): + metadata = cls.presets[preset] + config = metadata["config"] + model = cls.from_config({**config, **kwargs}) + + if not load_weights: + return model + + filename = os.path.basename(metadata["weights_url"]) + weights = keras.utils.get_file( + filename, + metadata["weights_url"], + cache_subdir=os.path.join("models", preset), + file_hash=metadata["weights_hash"], + ) + + model.load_weights(weights) + return model + @classmethod def from_preset( cls, @@ -94,35 +121,17 @@ def from_preset( ) ``` """ - - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class." - ) - - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - metadata = cls.presets[preset] - config = metadata["config"] - model = cls.from_config({**config, **kwargs}) - - if not load_weights: - return model - - filename = os.path.basename(metadata["weights_url"]) - weights = keras.utils.get_file( - filename, - metadata["weights_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["weights_hash"], + # TODO: delete me! + if preset in cls.presets: + return cls._legacy_from_preset(preset, **kwargs) + + check_preset_class(preset, cls) + return load_from_preset( + preset, + load_weights=load_weights, + config_overrides=kwargs, ) - model.load_weights(weights) - return model - def __init_subclass__(cls, **kwargs): # Use __init_subclass__ to setup a correct docstring for from_preset. super().__init_subclass__(**kwargs) diff --git a/keras_nlp/models/bert/bert_classifier_test.py b/keras_nlp/models/bert/bert_classifier_test.py index d5a767d3c7..53da9d4fab 100644 --- a/keras_nlp/models/bert/bert_classifier_test.py +++ b/keras_nlp/models/bert/bert_classifier_test.py @@ -36,7 +36,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/bert/bert_masked_lm_preprocessor.py b/keras_nlp/models/bert/bert_masked_lm_preprocessor.py index 685c65c70b..cdc61fbac3 100644 --- a/keras_nlp/models/bert/bert_masked_lm_preprocessor.py +++ b/keras_nlp/models/bert/bert_masked_lm_preprocessor.py @@ -134,33 +134,30 @@ def __init__( truncate=truncate, **kwargs, ) - + self.mask_selection_rate = mask_selection_rate + self.mask_selection_length = mask_selection_length + self.mask_token_rate = mask_token_rate + self.random_token_rate = random_token_rate + self.masker = None + + def build(self, input_shape): + super().build(input_shape) + # Defer masker creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( - mask_selection_rate=mask_selection_rate, - mask_selection_length=mask_selection_length, - mask_token_rate=mask_token_rate, - random_token_rate=random_token_rate, - vocabulary_size=tokenizer.vocabulary_size(), - mask_token_id=tokenizer.mask_token_id, + mask_selection_rate=self.mask_selection_rate, + mask_selection_length=self.mask_selection_length, + mask_token_rate=self.mask_token_rate, + random_token_rate=self.random_token_rate, + vocabulary_size=self.tokenizer.vocabulary_size(), + mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ - tokenizer.cls_token_id, - tokenizer.sep_token_id, - tokenizer.pad_token_id, + self.tokenizer.cls_token_id, + self.tokenizer.sep_token_id, + self.tokenizer.pad_token_id, ], ) - def get_config(self): - config = super().get_config() - config.update( - { - "mask_selection_rate": self.masker.mask_selection_rate, - "mask_selection_length": self.masker.mask_selection_length, - "mask_token_rate": self.masker.mask_token_rate, - "random_token_rate": self.masker.random_token_rate, - } - ) - return config - def call(self, x, y=None, sample_weight=None): if y is not None or sample_weight is not None: logging.warning( @@ -187,3 +184,15 @@ def call(self, x, y=None, sample_weight=None): y = masker_outputs["mask_ids"] sample_weight = masker_outputs["mask_weights"] return pack_x_y_sample_weight(x, y, sample_weight) + + def get_config(self): + config = super().get_config() + config.update( + { + "mask_selection_rate": self.mask_selection_rate, + "mask_selection_length": self.mask_selection_length, + "mask_token_rate": self.mask_token_rate, + "random_token_rate": self.random_token_rate, + } + ) + return config diff --git a/keras_nlp/models/bert/bert_masked_lm_test.py b/keras_nlp/models/bert/bert_masked_lm_test.py index 0bad92a401..dd6f41b0b7 100644 --- a/keras_nlp/models/bert/bert_masked_lm_test.py +++ b/keras_nlp/models/bert/bert_masked_lm_test.py @@ -43,7 +43,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/bert/bert_preprocessor.py b/keras_nlp/models/bert/bert_preprocessor.py index 214193753d..c6cf1e77e8 100644 --- a/keras_nlp/models/bert/bert_preprocessor.py +++ b/keras_nlp/models/bert/bert_preprocessor.py @@ -139,24 +139,32 @@ def __init__( ): super().__init__(**kwargs) self.tokenizer = tokenizer - self.packer = MultiSegmentPacker( - start_value=self.tokenizer.cls_token_id, - end_value=self.tokenizer.sep_token_id, - pad_value=self.tokenizer.pad_token_id, - truncate=truncate, - sequence_length=sequence_length, - ) + self.sequence_length = sequence_length + self.truncate = truncate + self.packer = None def get_config(self): config = super().get_config() config.update( { - "sequence_length": self.packer.sequence_length, - "truncate": self.packer.truncate, + "sequence_length": self.sequence_length, + "truncate": self.truncate, } ) return config + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. + self.packer = MultiSegmentPacker( + start_value=self.tokenizer.cls_token_id, + end_value=self.tokenizer.sep_token_id, + pad_value=self.tokenizer.pad_token_id, + truncate=self.truncate, + sequence_length=self.sequence_length, + ) + self.built = True + def call(self, x, y=None, sample_weight=None): x = convert_inputs_to_list_of_tensor_segments(x) x = [self.tokenizer(segment) for segment in x] diff --git a/keras_nlp/models/bert/bert_tokenizer.py b/keras_nlp/models/bert/bert_tokenizer.py index 5c01bf24e1..a833f423d9 100644 --- a/keras_nlp/models/bert/bert_tokenizer.py +++ b/keras_nlp/models/bert/bert_tokenizer.py @@ -74,7 +74,7 @@ class BertTokenizer(WordPieceTokenizer): def __init__( self, - vocabulary, + vocabulary=None, lowercase=False, **kwargs, ): @@ -84,23 +84,32 @@ def __init__( **kwargs, ) - # Check for necessary special tokens. - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "[PAD]" - mask_token = "[MASK]" - for token in [cls_token, pad_token, sep_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - self.mask_token_id = self.token_to_id(mask_token) + def set_vocabulary(self, vocabulary): + super().set_vocabulary(vocabulary) + + if vocabulary is not None: + # Check for necessary special tokens. + cls_token = "[CLS]" + sep_token = "[SEP]" + pad_token = "[PAD]" + mask_token = "[MASK]" + for token in [cls_token, pad_token, sep_token]: + if token not in self.vocabulary: + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.cls_token_id = self.token_to_id(cls_token) + self.sep_token_id = self.token_to_id(sep_token) + self.pad_token_id = self.token_to_id(pad_token) + self.mask_token_id = self.token_to_id(mask_token) + else: + self.cls_token_id = None + self.sep_token_id = None + self.pad_token_id = None + self.mask_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/distil_bert/distil_bert_classifier_test.py b/keras_nlp/models/distil_bert/distil_bert_classifier_test.py index 782cf76574..d25f176894 100644 --- a/keras_nlp/models/distil_bert/distil_bert_classifier_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_classifier_test.py @@ -42,7 +42,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor.py b/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor.py index 3fcf9bced1..f1360f58b7 100644 --- a/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor.py +++ b/keras_nlp/models/distil_bert/distil_bert_masked_lm_preprocessor.py @@ -136,33 +136,30 @@ def __init__( truncate=truncate, **kwargs, ) - + self.mask_selection_rate = mask_selection_rate + self.mask_selection_length = mask_selection_length + self.mask_token_rate = mask_token_rate + self.random_token_rate = random_token_rate + self.masker = None + + def build(self, input_shape): + super().build(input_shape) + # Defer masker creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( - mask_selection_rate=mask_selection_rate, - mask_selection_length=mask_selection_length, - mask_token_rate=mask_token_rate, - random_token_rate=random_token_rate, - vocabulary_size=tokenizer.vocabulary_size(), - mask_token_id=tokenizer.mask_token_id, + mask_selection_rate=self.mask_selection_rate, + mask_selection_length=self.mask_selection_length, + mask_token_rate=self.mask_token_rate, + random_token_rate=self.random_token_rate, + vocabulary_size=self.tokenizer.vocabulary_size(), + mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ - tokenizer.cls_token_id, - tokenizer.sep_token_id, - tokenizer.pad_token_id, + self.tokenizer.cls_token_id, + self.tokenizer.sep_token_id, + self.tokenizer.pad_token_id, ], ) - def get_config(self): - config = super().get_config() - config.update( - { - "mask_selection_rate": self.masker.mask_selection_rate, - "mask_selection_length": self.masker.mask_selection_length, - "mask_token_rate": self.masker.mask_token_rate, - "random_token_rate": self.masker.random_token_rate, - } - ) - return config - def call(self, x, y=None, sample_weight=None): if y is not None or sample_weight is not None: logging.warning( @@ -183,3 +180,15 @@ def call(self, x, y=None, sample_weight=None): y = masker_outputs["mask_ids"] sample_weight = masker_outputs["mask_weights"] return pack_x_y_sample_weight(x, y, sample_weight) + + def get_config(self): + config = super().get_config() + config.update( + { + "mask_selection_rate": self.mask_selection_rate, + "mask_selection_length": self.mask_selection_length, + "mask_token_rate": self.mask_token_rate, + "random_token_rate": self.random_token_rate, + } + ) + return config diff --git a/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py b/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py index 52e846f4fd..4aa8327ae7 100644 --- a/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py +++ b/keras_nlp/models/distil_bert/distil_bert_masked_lm_test.py @@ -47,7 +47,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/distil_bert/distil_bert_preprocessor.py b/keras_nlp/models/distil_bert/distil_bert_preprocessor.py index f2c4326234..107275f80a 100644 --- a/keras_nlp/models/distil_bert/distil_bert_preprocessor.py +++ b/keras_nlp/models/distil_bert/distil_bert_preprocessor.py @@ -127,24 +127,21 @@ def __init__( ): super().__init__(**kwargs) self.tokenizer = tokenizer + self.sequence_length = sequence_length + self.truncate = truncate + + def build(self, input_shape): + super().build(input_shape) + # Defer masker creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = MultiSegmentPacker( start_value=self.tokenizer.cls_token_id, end_value=self.tokenizer.sep_token_id, pad_value=self.tokenizer.pad_token_id, - truncate=truncate, - sequence_length=sequence_length, + truncate=self.truncate, + sequence_length=self.sequence_length, ) - def get_config(self): - config = super().get_config() - config.update( - { - "sequence_length": self.packer.sequence_length, - "truncate": self.packer.truncate, - } - ) - return config - def call(self, x, y=None, sample_weight=None): x = convert_inputs_to_list_of_tensor_segments(x) x = [self.tokenizer(segment) for segment in x] @@ -155,6 +152,16 @@ def call(self, x, y=None, sample_weight=None): } return pack_x_y_sample_weight(x, y, sample_weight) + def get_config(self): + config = super().get_config() + config.update( + { + "sequence_length": self.sequence_length, + "truncate": self.truncate, + } + ) + return config + @classproperty def tokenizer_cls(cls): return DistilBertTokenizer diff --git a/keras_nlp/models/distil_bert/distil_bert_tokenizer.py b/keras_nlp/models/distil_bert/distil_bert_tokenizer.py index 9e344a378b..1b0ef069db 100644 --- a/keras_nlp/models/distil_bert/distil_bert_tokenizer.py +++ b/keras_nlp/models/distil_bert/distil_bert_tokenizer.py @@ -82,23 +82,32 @@ def __init__( **kwargs, ) - # Check for necessary special tokens. - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "[PAD]" - mask_token = "[MASK]" - for token in [cls_token, pad_token, sep_token, mask_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - self.mask_token_id = self.token_to_id(mask_token) + def set_vocabulary(self, vocabulary): + super().set_vocabulary(vocabulary) + + if vocabulary is not None: + # Check for necessary special tokens. + cls_token = "[CLS]" + sep_token = "[SEP]" + pad_token = "[PAD]" + mask_token = "[MASK]" + for token in [cls_token, pad_token, sep_token]: + if token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.cls_token_id = self.token_to_id(cls_token) + self.sep_token_id = self.token_to_id(sep_token) + self.pad_token_id = self.token_to_id(pad_token) + self.mask_token_id = self.token_to_id(mask_token) + else: + self.cls_token_id = None + self.sep_token_id = None + self.pad_token_id = None + self.mask_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/preprocessor.py b/keras_nlp/models/preprocessor.py index b5ea48a09b..fb663f0e61 100644 --- a/keras_nlp/models/preprocessor.py +++ b/keras_nlp/models/preprocessor.py @@ -16,6 +16,8 @@ from keras_nlp.layers.preprocessing.preprocessing_layer import ( PreprocessingLayer, ) +from keras_nlp.utils.preset_utils import check_preset_class +from keras_nlp.utils.preset_utils import load_from_preset from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring @@ -63,34 +65,11 @@ def presets(cls): return {} @classmethod - def from_preset( + def _legacy_from_preset( cls, preset, **kwargs, ): - """Instantiate {{preprocessor_name}} from preset architecture. - - Args: - preset: string. Must be one of "{{preset_names}}". - - Examples: - ```python - # Load a preprocessor layer from a preset. - preprocessor = keras_nlp.models.{{preprocessor_name}}.from_preset( - "{{example_preset_name}}", - ) - ``` - """ - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class." - ) - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - tokenizer = cls.tokenizer_cls.from_preset(preset) metadata = cls.presets[preset] @@ -120,6 +99,37 @@ def from_preset( **kwargs, ) + @classmethod + def from_preset( + cls, + preset, + **kwargs, + ): + """Instantiate {{preprocessor_name}} from preset architecture. + + Args: + preset: string. Must be one of "{{preset_names}}". + + Examples: + ```python + # Load a preprocessor layer from a preset. + preprocessor = keras_nlp.models.{{preprocessor_name}}.from_preset( + "{{example_preset_name}}", + ) + ``` + """ + # TODO: delete me! + if preset in cls.presets: + return cls._legacy_from_preset(preset, **kwargs) + + config_file = "tokenizer.json" + check_preset_class(preset, cls.tokenizer_cls, config_file=config_file) + tokenizer = load_from_preset( + preset, + config_file=config_file, + ) + return cls(tokenizer=tokenizer, **kwargs) + def __init_subclass__(cls, **kwargs): # Use __init_subclass__ to setup a correct docstring for from_preset. super().__init_subclass__(**kwargs) diff --git a/keras_nlp/models/task.py b/keras_nlp/models/task.py index f159fbfef8..88f74b9a0d 100644 --- a/keras_nlp/models/task.py +++ b/keras_nlp/models/task.py @@ -22,6 +22,8 @@ from keras_nlp.backend import keras from keras_nlp.utils.keras_utils import print_msg from keras_nlp.utils.pipeline_model import PipelineModel +from keras_nlp.utils.preset_utils import check_preset_class +from keras_nlp.utils.preset_utils import load_from_preset from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring @@ -149,42 +151,12 @@ def presets(cls): return {} @classmethod - def from_preset( + def _legacy_from_preset( cls, preset, load_weights=True, **kwargs, ): - """Instantiate {{model_task_name}} model from preset architecture and weights. - - Args: - preset: string. Must be one of "{{preset_names}}". - load_weights: Whether to load pre-trained weights into model. - Defaults to `True`. - - Examples: - ```python - # Load architecture and weights from preset - model = {{model_task_name}}.from_preset("{{example_preset_name}}") - - # Load randomly initialized model from preset architecture - model = {{model_task_name}}.from_preset( - "{{example_preset_name}}", - load_weights=False - ) - ``` - """ - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class." - ) - - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - if "preprocessor" not in kwargs: kwargs["preprocessor"] = cls.preprocessor_cls.from_preset(preset) @@ -211,6 +183,58 @@ def from_preset( model.load_weights(weights) return model + @classmethod + def from_preset( + cls, + preset, + load_weights=True, + **kwargs, + ): + """Instantiate {{model_task_name}} model from preset architecture and weights. + + Args: + preset: string. Must be one of "{{preset_names}}". + load_weights: Whether to load pre-trained weights into model. + Defaults to `True`. + + Examples: + ```python + # Load architecture and weights from preset + model = {{model_task_name}}.from_preset("{{example_preset_name}}") + + # Load randomly initialized model from preset architecture + model = {{model_task_name}}.from_preset( + "{{example_preset_name}}", + load_weights=False + ) + ``` + """ + # TODO: delete me! + if preset in cls.presets: + return cls._legacy_from_preset(preset, load_weights, **kwargs) + + preset_cls = check_preset_class(preset, (cls, cls.backbone_cls)) + + # Backbone case. + if preset_cls == cls.backbone_cls: + backbone = load_from_preset( + preset, + load_weights=load_weights, + ) + tokenizer = load_from_preset( + preset, + config_file="tokenizer.json", + ) + preprocessor = cls.preprocessor_cls(tokenizer=tokenizer) + return cls(backbone=backbone, preprocessor=preprocessor, **kwargs) + + # Task case. + return load_from_preset( + preset, + load_weights=load_weights, + config_overrides=kwargs, + ) + def __init_subclass__(cls, **kwargs): # Use __init_subclass__ to setup a correct docstring for from_preset. super().__init_subclass__(**kwargs) diff --git a/keras_nlp/tests/test_case.py b/keras_nlp/tests/test_case.py index 7797ca1cce..455a8569b7 100644 --- a/keras_nlp/tests/test_case.py +++ b/keras_nlp/tests/test_case.py @@ -244,6 +244,11 @@ def run_serialization_test(self, instance): """Check idempotency of serialize/deserialize. Not this is a much faster test than saving.""" + run_dir_test = True + # Tokenizers will not initialize the tensorflow trackable system after + # clone, leading to some weird errors here. + if config.backend() == "tensorflow" and isinstance(instance, Tokenizer): + run_dir_test = False # get_config roundtrip cls = instance.__class__ cfg = instance.get_config() @@ -253,9 +258,8 @@ def run_serialization_test(self, instance): revived_cfg = revived_instance.get_config() revived_cfg_json = json.dumps(revived_cfg, sort_keys=True, indent=4) self.assertEqual(cfg_json, revived_cfg_json) - # Dir tests only work with Keras 3. - if config.keras_3(): - self.assertEqual(ref_dir, dir(revived_instance)) + if run_dir_test: + self.assertEqual(set(ref_dir), set(dir(revived_instance))) # serialization roundtrip serialized = keras.saving.serialize_keras_object(instance) @@ -266,13 +270,12 @@ def run_serialization_test(self, instance): revived_cfg = revived_instance.get_config() revived_cfg_json = json.dumps(revived_cfg, sort_keys=True, indent=4) self.assertEqual(cfg_json, revived_cfg_json) - # Dir tests only work with Keras 3. - if config.keras_3(): + if run_dir_test: new_dir = dir(revived_instance)[:] for lst in [ref_dir, new_dir]: if "__annotations__" in lst: lst.remove("__annotations__") - self.assertEqual(ref_dir, new_dir) + self.assertEqual(set(ref_dir), set(new_dir)) def run_model_saving_test( self, @@ -394,7 +397,7 @@ def run_preset_test( """Run instantiation and a forward pass for a preset.""" self.assertRegex(cls.from_preset.__doc__, preset) - with self.assertRaises(ValueError): + with self.assertRaises(Exception): cls.from_preset("clowntown", **init_kwargs) instance = cls.from_preset(preset, **init_kwargs) diff --git a/keras_nlp/tokenizers/word_piece_tokenizer.py b/keras_nlp/tokenizers/word_piece_tokenizer.py index fe37bebf78..ffd3b29fe7 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer.py @@ -21,6 +21,8 @@ from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.tokenizers import tokenizer +from keras_nlp.utils.preset_utils import check_preset_class +from keras_nlp.utils.preset_utils import load_from_preset from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring from keras_nlp.utils.tensor_utils import assert_tf_text_installed @@ -33,6 +35,8 @@ except ImportError: tf_text = None +FILENAME = "vocabulary.txt" + # Matches whitespace and control characters. WHITESPACE_REGEX = r"|".join( [ @@ -312,19 +316,6 @@ def __init__( ) super().__init__(dtype=dtype, **kwargs) - - if isinstance(vocabulary, str): - self.vocabulary = [ - line.rstrip() for line in tf.io.gfile.GFile(vocabulary) - ] - elif isinstance(vocabulary, Iterable): - # Make a copy. - self.vocabulary = list(vocabulary) - else: - raise ValueError( - "Vocabulary must be an file path or list of terms. " - f"Received: vocabulary={vocabulary}" - ) if oov_token is None: raise ValueError("`oov_token` cannot be None.") @@ -335,8 +326,36 @@ def __init__( self.split_on_cjk = split_on_cjk self.suffix_indicator = suffix_indicator self.oov_token = oov_token + self.set_vocabulary(vocabulary) + + def save_assets(self, dir_path): + with tf.io.gfile.GFile(os.path.join(dir_path, FILENAME), "w") as file: + for token in self.vocabulary: + file.write(f"{token}\n") + + def load_assets(self, dir_path): + self.set_vocabulary(os.path.join(dir_path, FILENAME)) + + def set_vocabulary(self, vocabulary): + """Set the tokenizer vocabulary to a file or list of strings.""" + if vocabulary is None: + self.vocabulary = None + self._fast_word_piece = None + return + + if isinstance(vocabulary, str): + with tf.io.gfile.GFile(vocabulary) as file: + self.vocabulary = [line.rstrip() for line in file] + elif isinstance(vocabulary, Iterable): + # Make a defensive copy. + self.vocabulary = list(vocabulary) + else: + raise ValueError( + "Vocabulary must be an file path or list of terms. " + f"Received: vocabulary={vocabulary}" + ) - if oov_token not in self.vocabulary: + if self.oov_token not in self.vocabulary: raise ValueError( f'Cannot find `oov_token="{self.oov_token}"` in the ' "vocabulary.\n" @@ -348,8 +367,8 @@ def __init__( self._fast_word_piece = tf_text.FastWordpieceTokenizer( vocab=self.vocabulary, token_out_type=self.compute_dtype, - suffix_indicator=suffix_indicator, - unknown_token=oov_token, + suffix_indicator=self.suffix_indicator, + unknown_token=self.oov_token, no_pretokenization=True, support_detokenization=True, ) @@ -382,10 +401,7 @@ def get_config(self): config = super().get_config() config.update( { - # Ideally a vocabulary would be saved as a plain text asset in - # the saved model. We have no good way to support this - # currently, so we save the vocabulary in the config. - "vocabulary": self.vocabulary, + "vocabulary": None, # Save vocabulary via an asset! "sequence_length": self.sequence_length, "lowercase": self.lowercase, "strip_accents": self.strip_accents, @@ -410,6 +426,11 @@ def tokenize(self, inputs): ) # Apply WordPiece and coerce shape for outputs. + if self._fast_word_piece is None: + raise ValueError( + "No vocabulary has been set for WordPieceTokenizer. Make sure " + "to pass a `vocabulary` argument when creating the layer." + ) tokens = self._fast_word_piece.tokenize(inputs) # By default tf.text tokenizes text with two ragged dimensions (one for # split words and one for split subwords). We will collapse to a single @@ -439,6 +460,30 @@ def detokenize(self, inputs): def presets(cls): return {} + @classmethod + def _legacy_from_preset( + cls, + preset, + **kwargs, + ): + metadata = cls.presets[preset] + + vocabulary = keras.utils.get_file( + "vocab.txt", + metadata["vocabulary_url"], + cache_subdir=os.path.join("models", preset), + file_hash=metadata["vocabulary_hash"], + ) + + config = metadata["preprocessor_config"] + config.update( + { + "vocabulary": vocabulary, + }, + ) + + return cls.from_config({**config, **kwargs}) + @classmethod def from_preset( cls, @@ -462,35 +507,18 @@ def from_preset( tokenizer.detokenize([5, 6, 7, 8, 9]) ``` """ - - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class" - ) - - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - metadata = cls.presets[preset] - - vocabulary = keras.utils.get_file( - "vocab.txt", - metadata["vocabulary_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["vocabulary_hash"], + # TODO: delete me! + if preset in cls.presets: + return cls._legacy_from_preset(preset, **kwargs) + + config_file = "tokenizer.json" + check_preset_class(preset, cls, config_file=config_file) + return load_from_preset( + preset, + config_file=config_file, + config_overrides=kwargs, ) - config = metadata["preprocessor_config"] - config.update( - { - "vocabulary": vocabulary, - }, - ) - - return cls.from_config({**config, **kwargs}) - def __init_subclass__(cls, **kwargs): # Use __init_subclass__ to setup a correct docstring for from_preset. super().__init_subclass__(**kwargs) diff --git a/keras_nlp/tokenizers/word_piece_tokenizer_test.py b/keras_nlp/tokenizers/word_piece_tokenizer_test.py index 7ba691c5c7..ead098c36c 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer_test.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer_test.py @@ -190,6 +190,7 @@ def test_config(self): cloned_tokenizer = WordPieceTokenizer.from_config( original_tokenizer.get_config() ) + cloned_tokenizer.set_vocabulary(original_tokenizer.get_vocabulary()) self.assertAllEqual( original_tokenizer(input_data), cloned_tokenizer(input_data), diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py new file mode 100644 index 0000000000..e137bcccc1 --- /dev/null +++ b/keras_nlp/utils/preset_utils.py @@ -0,0 +1,169 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import json +import os + +from keras_nlp.backend import keras + +try: + import kagglehub +except ImportError: + kagglehub = None + +KAGGLE_PREFIX = "kaggle://" +TOKENIZER_ASSET_DIR = "assets/tokenizer" + + +def get_file(preset, path): + """Download a preset file in necessary and return the local path.""" + if preset.startswith(KAGGLE_PREFIX): + kaggle_handle = preset.removeprefix(KAGGLE_PREFIX) + if kagglehub is None: + raise ImportError( + "`from_preset()` requires the `kagglehub` package. " + "Please install with `pip install kagglehub`." + ) + if len(kaggle_handle.split("/")) not in (4, 5): + raise ValueError( + "Unexpected kaggle preset handle. Kaggle model handles should have " + "the form kaggle://{org}/{model}/keras/{variant}[/{version}]. For " + "example, kaggle://keras-nlp/albert/keras/bert_base_en_uncased." + ) + return kagglehub.model_download(kaggle_handle, path) + return os.path.join(preset, path) + + +def get_tokenizer(layer): + """Get the tokenizer from any KerasNLP model or layer.""" + # Avoid circular import. + from keras_nlp.tokenizers.tokenizer import Tokenizer + + if isinstance(layer, Tokenizer): + return layer + if hasattr(layer, "tokenizer"): + return layer.tokenizer + if hasattr(layer, "preprocessor"): + return getattr(layer.preprocessor, "tokenizer", None) + return None + + +def recursive_pop(config, key): + """Remove a key from a nested config object""" + config.pop(key, None) + for value in config.values(): + if isinstance(value, dict): + recursive_pop(value, key) + + +def save_to_preset( + layer, + preset, + save_weights=True, + config_filename="config.json", + weights_filename="model.weights.h5", +): + """Save a KerasNLP layer to a preset directory.""" + os.makedirs(preset, exist_ok=True) + + # Save tokenizers assets. + tokenizer = get_tokenizer(layer) + assets = [] + if tokenizer: + asset_dir = os.path.join(preset, TOKENIZER_ASSET_DIR) + os.makedirs(asset_dir, exist_ok=True) + tokenizer.save_assets(asset_dir) + for asset_path in os.listdir(asset_dir): + assets.append(os.path.join(TOKENIZER_ASSET_DIR, asset_path)) + + # Optionally save weights. + save_weights = save_weights and hasattr(layer, "save_weights") + if save_weights: + weights_path = os.path.join(preset, weights_filename) + layer.save_weights(weights_path) + + # Save a serialized Keras object. + config_path = os.path.join(preset, config_filename) + config = keras.saving.serialize_keras_object(layer) + # Include references to weights and assets. + config["assets"] = assets + config["weights"] = weights_filename if save_weights else None + recursive_pop(config, "config_config") + recursive_pop(config, "build_config") + with open(config_path, "w") as config_file: + config_file.write(json.dumps(config, indent=4)) + + # Save any associated metadata. + metadata = { + # TODO: save keras version and keras-nlp version. + "date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"), + } + metadata_path = os.path.join(preset, "metadata.json") + with open(metadata_path, "w") as metadata_file: + metadata_file.write(json.dumps(metadata, indent=4)) + + +def load_from_preset( + preset, + load_weights=True, + config_file="config.json", + config_overrides={}, +): + """Load a KerasNLP layer to a preset directory.""" + # Load a serialized Keras object. + config_path = get_file(preset, config_file) + with open(config_path) as config_file: + config = json.load(config_file) + config["config"] = {**config["config"], **config_overrides} + layer = keras.saving.deserialize_keras_object(config) + + # Load any assets for our tokenizers. + tokenizer = get_tokenizer(layer) + if tokenizer and config["assets"]: + for asset in config["assets"]: + get_file(preset, asset) + config_dir = os.path.dirname(config_path) + asset_dir = os.path.join(config_dir, TOKENIZER_ASSET_DIR) + tokenizer.load_assets(asset_dir) + + # Optionally load weights. + load_weights = load_weights and config["weights"] + if load_weights: + weights_path = get_file(preset, config["weights"]) + layer.load_weights(weights_path) + + return layer + + +def check_preset_class( + preset, + classes, + config_file="config.json", +): + """Validate a preset is being loaded on the correct class.""" + config_path = get_file(preset, config_file) + with open(config_path) as config_file: + config = json.load(config_file) + cls = keras.saving.get_registered_object(config["registered_name"]) + if not isinstance(classes, (tuple, list)): + classes = (classes,) + if cls not in classes: + raise ValueError( + f"Unexpected class in preset `'{preset}'`. " + "When calling `from_preset()` on a class object, the preset class " + f"much match allowed classes. Allowed classes are `{classes}`. " + f"Received: `{cls}`." + ) + return cls From 21fb04ce753f0e05b1fb424beb7f3b19a404a5b3 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Tue, 21 Nov 2023 13:38:25 -0800 Subject: [PATCH 54/87] Switch byte pair tokenizer to save_assets/load_assets (#1322) As part of this work, we need to also switch all downstream preprocessing layers to create packers on build (instead of on call). --- keras_nlp/models/bart/bart_preprocessor.py | 39 ++-- .../bart/bart_seq_2_seq_lm_preprocessor.py | 45 ++--- .../bart_seq_2_seq_lm_preprocessor_test.py | 2 +- keras_nlp/models/bart/bart_tokenizer.py | 49 +++-- keras_nlp/models/bert/bert_preprocessor.py | 20 +- keras_nlp/models/bert/bert_tokenizer.py | 18 +- .../distil_bert/distil_bert_tokenizer.py | 20 +- .../gpt2/gpt2_causal_lm_preprocessor.py | 6 + keras_nlp/models/gpt2/gpt2_causal_lm_test.py | 2 +- keras_nlp/models/gpt2/gpt2_preprocessor.py | 35 ++-- keras_nlp/models/gpt2/gpt2_tokenizer.py | 41 +++-- .../gpt_neo_x_causal_lm_preprocessor.py | 6 + .../gpt_neo_x/gpt_neo_x_causal_lm_test.py | 2 +- .../gpt_neo_x/gpt_neo_x_preprocessor.py | 36 ++-- .../models/gpt_neo_x/gpt_neo_x_tokenizer.py | 39 ++-- .../models/opt/opt_causal_lm_preprocessor.py | 6 + keras_nlp/models/opt/opt_causal_lm_test.py | 2 +- keras_nlp/models/opt/opt_preprocessor.py | 14 +- keras_nlp/models/opt/opt_tokenizer.py | 50 +++-- .../models/roberta/roberta_classifier_test.py | 2 +- .../roberta/roberta_masked_lm_preprocessor.py | 54 +++--- .../models/roberta/roberta_masked_lm_test.py | 2 +- .../models/roberta/roberta_preprocessor.py | 32 ++-- keras_nlp/models/roberta/roberta_tokenizer.py | 60 +++--- .../models/whisper/whisper_preprocessor.py | 104 ++++++----- keras_nlp/models/whisper/whisper_tokenizer.py | 98 +++++----- keras_nlp/tokenizers/byte_pair_tokenizer.py | 174 +++++++++++++----- keras_nlp/tokenizers/word_piece_tokenizer.py | 28 ++- 28 files changed, 599 insertions(+), 387 deletions(-) diff --git a/keras_nlp/models/bart/bart_preprocessor.py b/keras_nlp/models/bart/bart_preprocessor.py index ebe0310b69..eaf85d883c 100644 --- a/keras_nlp/models/bart/bart_preprocessor.py +++ b/keras_nlp/models/bart/bart_preprocessor.py @@ -140,15 +140,23 @@ def __init__( ): super().__init__(**kwargs) self.tokenizer = tokenizer + self.encoder_sequence_length = encoder_sequence_length + self.decoder_sequence_length = decoder_sequence_length + self.encoder_packer = None + self.decoder_packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. # TODO: Use `MultiSegmentPacker` instead of `StartEndPacker` once we # want to move to multi-segment packing and have improved # `MultiSegmentPacker`'s performance. self.encoder_packer = StartEndPacker( - start_value=tokenizer.start_token_id, - end_value=tokenizer.end_token_id, - pad_value=tokenizer.pad_token_id, - sequence_length=encoder_sequence_length, + start_value=self.tokenizer.start_token_id, + end_value=self.tokenizer.end_token_id, + pad_value=self.tokenizer.pad_token_id, + sequence_length=self.encoder_sequence_length, return_padding_mask=True, ) @@ -161,19 +169,10 @@ def __init__( ], end_value=self.tokenizer.end_token_id, pad_value=self.tokenizer.pad_token_id, - sequence_length=decoder_sequence_length, + sequence_length=self.decoder_sequence_length, return_padding_mask=True, ) - - def get_config(self): - config = super().get_config() - config.update( - { - "encoder_sequence_length": self.encoder_packer.sequence_length, - "decoder_sequence_length": self.decoder_packer.sequence_length, - } - ) - return config + self.built = True def call(self, x, y=None, sample_weight=None): if not ( @@ -217,6 +216,16 @@ def call(self, x, y=None, sample_weight=None): return pack_x_y_sample_weight(x, y, sample_weight) + def get_config(self): + config = super().get_config() + config.update( + { + "encoder_sequence_length": self.encoder_sequence_length, + "decoder_sequence_length": self.decoder_sequence_length, + } + ) + return config + @classproperty def tokenizer_cls(cls): return BartTokenizer diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py index 8c15de8574..048c88e82e 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py @@ -46,16 +46,6 @@ class BartSeq2SeqLMPreprocessor(BartPreprocessor): tokenizer: A `keras_nlp.models.BartTokenizer` instance. encoder_sequence_length: The length of the packed encoder inputs. decoder_sequence_length: The length of the packed decoder inputs. - truncate: string. The algorithm to truncate a list of batched segments - to fit within `sequence_length`. The value can be either - `round_robin` or `waterfall`: - - `"round_robin"`: Available space is assigned one token at a - time in a round-robin fashion to the inputs that still need - some, until the limit is reached. - - `"waterfall"`: The allocation of the budget is done using a - "waterfall" algorithm that allocates quota in a - left-to-right manner and fills up the buckets until we run - out of budget. It supports an arbitrary number of segments. Call arguments: x: A dictionary with `encoder_text` and `decoder_text` as its keys. @@ -139,7 +129,6 @@ def __init__( tokenizer, encoder_sequence_length, decoder_sequence_length, - truncate="round_robin", **kwargs ): # Since we truncate the last token from `decoder_token_ids`, we need to @@ -156,16 +145,6 @@ def __init__( self._encoder_sequence_length = encoder_sequence_length self._decoder_sequence_length = decoder_sequence_length - def get_config(self): - config = super().get_config() - config.update( - { - "encoder_sequence_length": self._encoder_sequence_length, - "decoder_sequence_length": self._decoder_sequence_length, - } - ) - return config - def call(self, x, y=None, sample_weight=None): if y is not None or sample_weight is not None: logging.warning( @@ -191,10 +170,6 @@ def call(self, x, y=None, sample_weight=None): sample_weight = decoder_padding_mask[..., 1:] return pack_x_y_sample_weight(x, y, sample_weight) - @classproperty - def presets(cls): - return copy.deepcopy(backbone_presets) - def generate_preprocess( self, x, @@ -212,6 +187,9 @@ def generate_preprocess( the decoder sequence (as generation is expected to continue at the end of the inputted decoder prompt). """ + if not self.built: + self.build(None) + # If `sequence_length` is not provided, we use the default value. if sequence_length is None: sequence_length = self._decoder_sequence_length @@ -262,6 +240,9 @@ def generate_postprocess( padding and start/end tokens, and then converting the integer sequence back to a string. """ + if not self.built: + self.build(None) + decoder_token_ids, decoder_padding_mask = ( x["decoder_token_ids"], x["decoder_padding_mask"], @@ -279,3 +260,17 @@ def generate_postprocess( decoder_token_ids, decoder_padding_mask ) return self.tokenizer.detokenize(decoder_token_ids) + + def get_config(self): + config = super().get_config() + config.update( + { + "encoder_sequence_length": self._encoder_sequence_length, + "decoder_sequence_length": self._decoder_sequence_length, + } + ) + return config + + @classproperty + def presets(cls): + return copy.deepcopy(backbone_presets) diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py index f67dab70a0..33fbd5fc3a 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor_test.py @@ -21,7 +21,7 @@ from keras_nlp.tests.test_case import TestCase -class BartPreprocessorTest(TestCase): +class BartSeq2SeqLMPreprocessorTest(TestCase): def setUp(self): self.vocab = ["", "", "", "air", "Ġair", "plane", "Ġat"] self.vocab += ["port", ""] diff --git a/keras_nlp/models/bart/bart_tokenizer.py b/keras_nlp/models/bart/bart_tokenizer.py index 0f8728d7dd..17fb237b88 100644 --- a/keras_nlp/models/bart/bart_tokenizer.py +++ b/keras_nlp/models/bart/bart_tokenizer.py @@ -78,34 +78,45 @@ class BartTokenizer(BytePairTokenizer): def __init__( self, - vocabulary, - merges, + vocabulary=None, + merges=None, **kwargs, ): - # Special tokens. - start_token = "" - pad_token = "" - end_token = "" + self.start_token = "" + self.pad_token = "" + self.end_token = "" super().__init__( vocabulary=vocabulary, merges=merges, - unsplittable_tokens=[start_token, pad_token, end_token], + unsplittable_tokens=[ + self.start_token, + self.pad_token, + self.end_token, + ], **kwargs, ) - # Check whether special tokens are present in the vocabulary. - for token in [start_token, pad_token, end_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.start_token_id = self.token_to_id(start_token) - self.pad_token_id = self.token_to_id(pad_token) - self.end_token_id = self.token_to_id(end_token) + def set_vocabulary_and_merges(self, vocabulary, merges): + super().set_vocabulary_and_merges(vocabulary, merges) + + if vocabulary is not None: + # Check for necessary special tokens. + for token in [self.start_token, self.pad_token, self.end_token]: + if token not in self.vocabulary: + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.start_token_id = self.token_to_id(self.start_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.end_token_id = self.token_to_id(self.end_token) + else: + self.start_token_id = None + self.pad_token_id = None + self.end_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/bert/bert_preprocessor.py b/keras_nlp/models/bert/bert_preprocessor.py index c6cf1e77e8..bad38f22a5 100644 --- a/keras_nlp/models/bert/bert_preprocessor.py +++ b/keras_nlp/models/bert/bert_preprocessor.py @@ -143,16 +143,6 @@ def __init__( self.truncate = truncate self.packer = None - def get_config(self): - config = super().get_config() - config.update( - { - "sequence_length": self.sequence_length, - "truncate": self.truncate, - } - ) - return config - def build(self, input_shape): # Defer packer creation to `build()` so that we can be sure tokenizer # assets have loaded when restoring a saved model. @@ -176,6 +166,16 @@ def call(self, x, y=None, sample_weight=None): } return pack_x_y_sample_weight(x, y, sample_weight) + def get_config(self): + config = super().get_config() + config.update( + { + "sequence_length": self.sequence_length, + "truncate": self.truncate, + } + ) + return config + @classproperty def tokenizer_cls(cls): return BertTokenizer diff --git a/keras_nlp/models/bert/bert_tokenizer.py b/keras_nlp/models/bert/bert_tokenizer.py index a833f423d9..1b634fe9b3 100644 --- a/keras_nlp/models/bert/bert_tokenizer.py +++ b/keras_nlp/models/bert/bert_tokenizer.py @@ -78,6 +78,10 @@ def __init__( lowercase=False, **kwargs, ): + self.cls_token = "[CLS]" + self.sep_token = "[SEP]" + self.pad_token = "[PAD]" + self.mask_token = "[MASK]" super().__init__( vocabulary=vocabulary, lowercase=lowercase, @@ -89,11 +93,7 @@ def set_vocabulary(self, vocabulary): if vocabulary is not None: # Check for necessary special tokens. - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "[PAD]" - mask_token = "[MASK]" - for token in [cls_token, pad_token, sep_token]: + for token in [self.cls_token, self.pad_token, self.sep_token]: if token not in self.vocabulary: raise ValueError( f"Cannot find token `'{token}'` in the provided " @@ -101,10 +101,10 @@ def set_vocabulary(self, vocabulary): "`vocabulary` or use a pretrained `vocabulary` name." ) - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - self.mask_token_id = self.token_to_id(mask_token) + self.cls_token_id = self.token_to_id(self.cls_token) + self.sep_token_id = self.token_to_id(self.sep_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.mask_token_id = self.token_to_id(self.mask_token) else: self.cls_token_id = None self.sep_token_id = None diff --git a/keras_nlp/models/distil_bert/distil_bert_tokenizer.py b/keras_nlp/models/distil_bert/distil_bert_tokenizer.py index 1b0ef069db..4a18398a1e 100644 --- a/keras_nlp/models/distil_bert/distil_bert_tokenizer.py +++ b/keras_nlp/models/distil_bert/distil_bert_tokenizer.py @@ -76,6 +76,10 @@ def __init__( lowercase=False, **kwargs, ): + self.cls_token = "[CLS]" + self.sep_token = "[SEP]" + self.pad_token = "[PAD]" + self.mask_token = "[MASK]" super().__init__( vocabulary=vocabulary, lowercase=lowercase, @@ -87,22 +91,18 @@ def set_vocabulary(self, vocabulary): if vocabulary is not None: # Check for necessary special tokens. - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "[PAD]" - mask_token = "[MASK]" - for token in [cls_token, pad_token, sep_token]: - if token not in self.get_vocabulary(): + for token in [self.cls_token, self.pad_token, self.sep_token]: + if token not in self.vocabulary: raise ValueError( f"Cannot find token `'{token}'` in the provided " f"`vocabulary`. Please provide `'{token}'` in your " "`vocabulary` or use a pretrained `vocabulary` name." ) - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - self.mask_token_id = self.token_to_id(mask_token) + self.cls_token_id = self.token_to_id(self.cls_token) + self.sep_token_id = self.token_to_id(self.sep_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.mask_token_id = self.token_to_id(self.mask_token) else: self.cls_token_id = None self.sep_token_id = None diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py index 41ea591df8..97d0b42d97 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_preprocessor.py @@ -142,6 +142,9 @@ def generate_preprocess( the sequence (as generation is expected to continue at the end of the inputted prompt). """ + if not self.built: + self.build(None) + x = convert_inputs_to_list_of_tensor_segments(x)[0] x = self.tokenizer(x) token_ids, padding_mask = self.packer( @@ -162,6 +165,9 @@ def generate_postprocess( padding and start/end tokens, and then converting the integer sequence back to a string. """ + if not self.built: + self.build(None) + token_ids, padding_mask = x["token_ids"], x["padding_mask"] token_ids = ops.convert_to_numpy(token_ids) padding_mask = ops.convert_to_numpy(padding_mask) diff --git a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py index 7ce931505c..f34b6baa47 100644 --- a/keras_nlp/models/gpt2/gpt2_causal_lm_test.py +++ b/keras_nlp/models/gpt2/gpt2_causal_lm_test.py @@ -44,7 +44,7 @@ def setUp(self): num_heads=2, hidden_dim=4, intermediate_dim=8, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/gpt2/gpt2_preprocessor.py b/keras_nlp/models/gpt2/gpt2_preprocessor.py index bb37364364..29182f77b6 100644 --- a/keras_nlp/models/gpt2/gpt2_preprocessor.py +++ b/keras_nlp/models/gpt2/gpt2_preprocessor.py @@ -123,24 +123,18 @@ def __init__( self.sequence_length = sequence_length self.add_start_token = add_start_token self.add_end_token = add_end_token + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = StartEndPacker( - start_value=tokenizer.start_token_id, - end_value=tokenizer.end_token_id, - pad_value=tokenizer.pad_token_id, - sequence_length=sequence_length, + start_value=self.tokenizer.start_token_id, + end_value=self.tokenizer.end_token_id, + pad_value=self.tokenizer.pad_token_id, + sequence_length=self.sequence_length, return_padding_mask=True, ) - - def get_config(self): - config = super().get_config() - config.update( - { - "sequence_length": self.sequence_length, - "add_start_token": self.add_start_token, - "add_end_token": self.add_end_token, - } - ) - return config + self.built = True def call( self, @@ -170,6 +164,17 @@ def call( } return pack_x_y_sample_weight(x, y, sample_weight) + def get_config(self): + config = super().get_config() + config.update( + { + "sequence_length": self.sequence_length, + "add_start_token": self.add_start_token, + "add_end_token": self.add_end_token, + } + ) + return config + @classproperty def presets(cls): return copy.deepcopy(backbone_presets) diff --git a/keras_nlp/models/gpt2/gpt2_tokenizer.py b/keras_nlp/models/gpt2/gpt2_tokenizer.py index 9401b385d8..15b35bed87 100644 --- a/keras_nlp/models/gpt2/gpt2_tokenizer.py +++ b/keras_nlp/models/gpt2/gpt2_tokenizer.py @@ -70,32 +70,39 @@ class GPT2Tokenizer(BytePairTokenizer): def __init__( self, - vocabulary, - merges, + vocabulary=None, + merges=None, **kwargs, ): - # Special tokens. - end_token = "<|endoftext|>" + # GPT2 uses the same start as end token, i.e., "<|endoftext|>". + self.end_token = self.start_token = "<|endoftext|>" super().__init__( vocabulary=vocabulary, merges=merges, - unsplittable_tokens=[end_token], + unsplittable_tokens=[self.end_token], **kwargs, ) - # Check whether special tokens are present in the vocabulary. - if end_token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{end_token}'` in the provided " - f"`vocabulary`. Please provide `'{end_token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.end_token_id = self.token_to_id(end_token) - # GPT2 uses the same start as end token, i.e., "<|endoftext|>". - self.start_token_id = self.end_token_id - self.pad_token_id = 0 + def set_vocabulary_and_merges(self, vocabulary, merges): + super().set_vocabulary_and_merges(vocabulary, merges) + + if vocabulary is not None: + # Check for necessary special tokens. + if self.end_token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{self.end_token}'` in the provided " + f"`vocabulary`. Please provide `'{self.end_token}'` in " + "your `vocabulary` or use a pretrained `vocabulary` name." + ) + + self.end_token_id = self.token_to_id(self.end_token) + self.start_token_id = self.end_token_id + self.pad_token_id = 0 + else: + self.end_token_id = None + self.start_token_id = None + self.pad_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py index 3ed16c3ff9..92ff9bbb03 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py @@ -110,6 +110,9 @@ def generate_preprocess( the sequence (as generation is expected to continue at the end of the inputted prompt). """ + if not self.built: + self.build(None) + x = convert_inputs_to_list_of_tensor_segments(x)[0] x = self.tokenizer(x) token_ids, padding_mask = self.packer( @@ -130,6 +133,9 @@ def generate_postprocess( padding and start/end tokens, and then converting the integer sequence back to a string. """ + if not self.built: + self.build(None) + token_ids, padding_mask = x["token_ids"], x["padding_mask"] if not isinstance(token_ids, tf.Tensor): token_ids = ops.convert_to_numpy(token_ids) diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py index 6857d5f40e..c8839c8be9 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_test.py @@ -44,7 +44,7 @@ def setUp(self): num_heads=2, hidden_dim=4, intermediate_dim=8, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py index 0dad9a053a..1db4fe4c9b 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py @@ -79,24 +79,19 @@ def __init__( self.sequence_length = sequence_length self.add_start_token = add_start_token self.add_end_token = add_end_token + self.packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = StartEndPacker( - start_value=tokenizer.start_token_id, - end_value=tokenizer.end_token_id, - pad_value=tokenizer.pad_token_id, - sequence_length=sequence_length, + start_value=self.tokenizer.start_token_id, + end_value=self.tokenizer.end_token_id, + pad_value=self.tokenizer.pad_token_id, + sequence_length=self.sequence_length, return_padding_mask=True, ) - - def get_config(self): - config = super().get_config() - config.update( - { - "sequence_length": self.sequence_length, - "add_start_token": self.add_start_token, - "add_end_token": self.add_end_token, - } - ) - return config + self.built = True def call( self, @@ -126,6 +121,17 @@ def call( } return pack_x_y_sample_weight(x, y, sample_weight) + def get_config(self): + config = super().get_config() + config.update( + { + "sequence_length": self.sequence_length, + "add_start_token": self.add_start_token, + "add_end_token": self.add_end_token, + } + ) + return config + @classproperty def tokenizer_cls(cls): return GPTNeoXTokenizer diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py index 3935d85a65..d109c5849d 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py @@ -45,32 +45,39 @@ class GPTNeoXTokenizer(BytePairTokenizer): def __init__( self, - vocabulary, - merges, + vocabulary=None, + merges=None, **kwargs, ): - # Special tokens. - end_token = "<|endoftext|>" + # GPTNeoX uses the same start as end token, i.e., "<|endoftext|>". + self.end_token = self.start_token = "<|endoftext|>" super().__init__( vocabulary=vocabulary, merges=merges, - unsplittable_tokens=[end_token], + unsplittable_tokens=[self.end_token], **kwargs, ) - # Check whether special tokens are present in the vocabulary. - if end_token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{end_token}'` in the provided " - f"`vocabulary`. Please provide `'{end_token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) + def set_vocabulary_and_merges(self, vocabulary, merges): + super().set_vocabulary_and_merges(vocabulary, merges) - self.end_token_id = self.token_to_id(end_token) - # GPTNeoX uses the same start as end token, i.e., "<|endoftext|>". - self.start_token_id = self.end_token_id - self.pad_token_id = 0 + if vocabulary is not None: + # Check for necessary special tokens. + if self.end_token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{self.end_token}'` in the provided " + f"`vocabulary`. Please provide `'{self.end_token}'` in " + "your `vocabulary` or use a pretrained `vocabulary` name." + ) + + self.end_token_id = self.token_to_id(self.end_token) + self.start_token_id = self.end_token_id + self.pad_token_id = 0 + else: + self.end_token_id = None + self.start_token_id = None + self.pad_token_id = None def get_config(self): config = super().get_config() diff --git a/keras_nlp/models/opt/opt_causal_lm_preprocessor.py b/keras_nlp/models/opt/opt_causal_lm_preprocessor.py index 9cc8c7f495..1895854e41 100644 --- a/keras_nlp/models/opt/opt_causal_lm_preprocessor.py +++ b/keras_nlp/models/opt/opt_causal_lm_preprocessor.py @@ -143,6 +143,9 @@ def generate_preprocess( the sequence (as generation is expected to continue at the end of the inputted prompt). """ + if not self.built: + self.build(None) + x = convert_inputs_to_list_of_tensor_segments(x)[0] x = self.tokenizer(x) token_ids, padding_mask = self.packer( @@ -163,6 +166,9 @@ def generate_postprocess( padding and start/end tokens, and then converting the integer sequence back to a string. """ + if not self.built: + self.build(None) + token_ids, padding_mask = x["token_ids"], x["padding_mask"] token_ids = ops.convert_to_numpy(token_ids) padding_mask = ops.convert_to_numpy(padding_mask) diff --git a/keras_nlp/models/opt/opt_causal_lm_test.py b/keras_nlp/models/opt/opt_causal_lm_test.py index e6e707e72a..3ba27178d1 100644 --- a/keras_nlp/models/opt/opt_causal_lm_test.py +++ b/keras_nlp/models/opt/opt_causal_lm_test.py @@ -43,7 +43,7 @@ def setUp(self): num_heads=2, hidden_dim=4, intermediate_dim=8, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/opt/opt_preprocessor.py b/keras_nlp/models/opt/opt_preprocessor.py index 6a6b5537bf..cdca904870 100644 --- a/keras_nlp/models/opt/opt_preprocessor.py +++ b/keras_nlp/models/opt/opt_preprocessor.py @@ -123,13 +123,19 @@ def __init__( self.sequence_length = sequence_length self.add_start_token = add_start_token self.add_end_token = add_end_token + self.packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = StartEndPacker( - start_value=tokenizer.start_token_id, - end_value=tokenizer.end_token_id, - pad_value=tokenizer.pad_token_id, - sequence_length=sequence_length, + start_value=self.tokenizer.start_token_id, + end_value=self.tokenizer.end_token_id, + pad_value=self.tokenizer.pad_token_id, + sequence_length=self.sequence_length, return_padding_mask=True, ) + self.built = True def get_config(self): config = super().get_config() diff --git a/keras_nlp/models/opt/opt_tokenizer.py b/keras_nlp/models/opt/opt_tokenizer.py index b15aa94842..4fb62ee73a 100644 --- a/keras_nlp/models/opt/opt_tokenizer.py +++ b/keras_nlp/models/opt/opt_tokenizer.py @@ -70,35 +70,45 @@ class OPTTokenizer(BytePairTokenizer): def __init__( self, - vocabulary, - merges, + vocabulary=None, + merges=None, **kwargs, ): - # Special tokens. We use `""` as both a start and end token, as OPT - # was only pre-trained with `""` marking document boundaries. - start_token = "" - pad_token = "" - end_token = "" + self.start_token = "" + self.pad_token = "" + self.end_token = "" super().__init__( vocabulary=vocabulary, merges=merges, - unsplittable_tokens=[start_token, pad_token, end_token], + unsplittable_tokens=[ + self.start_token, + self.pad_token, + self.end_token, + ], **kwargs, ) - # Check whether special tokens are present in the vocabulary. - for token in [start_token, pad_token, end_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.start_token_id = self.token_to_id(start_token) - self.pad_token_id = self.token_to_id(pad_token) - self.end_token_id = self.token_to_id(end_token) + def set_vocabulary_and_merges(self, vocabulary, merges): + super().set_vocabulary_and_merges(vocabulary, merges) + + if vocabulary is not None: + # Check for necessary special tokens. + for token in [self.start_token, self.pad_token, self.end_token]: + if token not in self.vocabulary: + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.start_token_id = self.token_to_id(self.start_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.end_token_id = self.token_to_id(self.end_token) + else: + self.start_token_id = None + self.pad_token_id = None + self.end_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/roberta/roberta_classifier_test.py b/keras_nlp/models/roberta/roberta_classifier_test.py index 04c054f4bc..e85d2a3703 100644 --- a/keras_nlp/models/roberta/roberta_classifier_test.py +++ b/keras_nlp/models/roberta/roberta_classifier_test.py @@ -40,7 +40,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/roberta/roberta_masked_lm_preprocessor.py b/keras_nlp/models/roberta/roberta_masked_lm_preprocessor.py index 0d5a24e129..c69c300dc8 100644 --- a/keras_nlp/models/roberta/roberta_masked_lm_preprocessor.py +++ b/keras_nlp/models/roberta/roberta_masked_lm_preprocessor.py @@ -137,32 +137,30 @@ def __init__( truncate=truncate, **kwargs, ) - + self.mask_selection_rate = mask_selection_rate + self.mask_selection_length = mask_selection_length + self.mask_token_rate = mask_token_rate + self.random_token_rate = random_token_rate + self.masker = None + + def build(self, input_shape): + super().build(input_shape) + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( - mask_selection_rate=mask_selection_rate, - mask_selection_length=mask_selection_length, - mask_token_rate=mask_token_rate, - random_token_rate=random_token_rate, - vocabulary_size=tokenizer.vocabulary_size(), - mask_token_id=tokenizer.mask_token_id, + mask_selection_rate=self.mask_selection_rate, + mask_selection_length=self.mask_selection_length, + mask_token_rate=self.mask_token_rate, + random_token_rate=self.random_token_rate, + vocabulary_size=self.tokenizer.vocabulary_size(), + mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ - tokenizer.start_token_id, - tokenizer.end_token_id, - tokenizer.pad_token_id, + self.tokenizer.start_token_id, + self.tokenizer.end_token_id, + self.tokenizer.pad_token_id, ], ) - - def get_config(self): - config = super().get_config() - config.update( - { - "mask_selection_rate": self.masker.mask_selection_rate, - "mask_selection_length": self.masker.mask_selection_length, - "mask_token_rate": self.masker.mask_token_rate, - "random_token_rate": self.masker.random_token_rate, - } - ) - return config + self.built = True def call(self, x, y=None, sample_weight=None): if y is not None or sample_weight is not None: @@ -184,3 +182,15 @@ def call(self, x, y=None, sample_weight=None): y = masker_outputs["mask_ids"] sample_weight = masker_outputs["mask_weights"] return pack_x_y_sample_weight(x, y, sample_weight) + + def get_config(self): + config = super().get_config() + config.update( + { + "mask_selection_rate": self.mask_selection_rate, + "mask_selection_length": self.mask_selection_length, + "mask_token_rate": self.mask_token_rate, + "random_token_rate": self.random_token_rate, + } + ) + return config diff --git a/keras_nlp/models/roberta/roberta_masked_lm_test.py b/keras_nlp/models/roberta/roberta_masked_lm_test.py index 663b5a5b0c..f4e410fa69 100644 --- a/keras_nlp/models/roberta/roberta_masked_lm_test.py +++ b/keras_nlp/models/roberta/roberta_masked_lm_test.py @@ -47,7 +47,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/roberta/roberta_preprocessor.py b/keras_nlp/models/roberta/roberta_preprocessor.py index 7e641a93a8..556561d17c 100644 --- a/keras_nlp/models/roberta/roberta_preprocessor.py +++ b/keras_nlp/models/roberta/roberta_preprocessor.py @@ -143,24 +143,22 @@ def __init__( super().__init__(**kwargs) self.tokenizer = tokenizer + self.truncate = truncate + self.sequence_length = sequence_length + self.packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = MultiSegmentPacker( start_value=self.tokenizer.start_token_id, end_value=self.tokenizer.end_token_id, sep_value=[self.tokenizer.end_token_id] * 2, pad_value=self.tokenizer.pad_token_id, - truncate=truncate, - sequence_length=sequence_length, + truncate=self.truncate, + sequence_length=self.sequence_length, ) - - def get_config(self): - config = super().get_config() - config.update( - { - "sequence_length": self.packer.sequence_length, - "truncate": self.packer.truncate, - } - ) - return config + self.built = True def call(self, x, y=None, sample_weight=None): x = convert_inputs_to_list_of_tensor_segments(x) @@ -172,6 +170,16 @@ def call(self, x, y=None, sample_weight=None): } return pack_x_y_sample_weight(x, y, sample_weight) + def get_config(self): + config = super().get_config() + config.update( + { + "sequence_length": self.sequence_length, + "truncate": self.truncate, + } + ) + return config + @classproperty def tokenizer_cls(cls): return RobertaTokenizer diff --git a/keras_nlp/models/roberta/roberta_tokenizer.py b/keras_nlp/models/roberta/roberta_tokenizer.py index 838f32a8ac..0cfabff754 100644 --- a/keras_nlp/models/roberta/roberta_tokenizer.py +++ b/keras_nlp/models/roberta/roberta_tokenizer.py @@ -77,36 +77,54 @@ class RobertaTokenizer(BytePairTokenizer): def __init__( self, - vocabulary, - merges, + vocabulary=None, + merges=None, **kwargs, ): - # Special tokens. - start_token = "" - pad_token = "" - end_token = "" - mask_token = "" + self.start_token = "" + self.pad_token = "" + self.end_token = "" + self.mask_token = "" super().__init__( vocabulary=vocabulary, merges=merges, - unsplittable_tokens=[start_token, pad_token, end_token, mask_token], + unsplittable_tokens=[ + self.start_token, + self.pad_token, + self.end_token, + self.mask_token, + ], **kwargs, ) - # Check whether special tokens are present in the vocabulary. - for token in [start_token, pad_token, end_token, mask_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.start_token_id = self.token_to_id(start_token) - self.pad_token_id = self.token_to_id(pad_token) - self.end_token_id = self.token_to_id(end_token) - self.mask_token_id = self.token_to_id(mask_token) + def set_vocabulary_and_merges(self, vocabulary, merges): + super().set_vocabulary_and_merges(vocabulary, merges) + + if vocabulary is not None: + # Check for necessary special tokens. + for token in [ + self.start_token, + self.pad_token, + self.end_token, + self.mask_token, + ]: + if token not in self.vocabulary: + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.start_token_id = self.token_to_id(self.start_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.end_token_id = self.token_to_id(self.end_token) + self.mask_token_id = self.token_to_id(self.mask_token) + else: + self.start_token_id = None + self.pad_token_id = None + self.end_token_id = None + self.mask_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/whisper/whisper_preprocessor.py b/keras_nlp/models/whisper/whisper_preprocessor.py index 8545890cc2..2f8673c52f 100644 --- a/keras_nlp/models/whisper/whisper_preprocessor.py +++ b/keras_nlp/models/whisper/whisper_preprocessor.py @@ -164,48 +164,61 @@ def __init__( super().__init__(**kwargs) self.audio_feature_extractor = audio_feature_extractor self.tokenizer = tokenizer + self.decoder_sequence_length = decoder_sequence_length + self.language = language + self.task = task + self.no_timestamps = no_timestamps + self.decoder_packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. # Create list of tokens to be prepended to decoder inputs. bos_tokens = [self.tokenizer.bos_token_id] if self.tokenizer.language_tokens is not None: if ( - language is None - or language not in self.tokenizer.language_tokens + self.language is None + or self.language not in self.tokenizer.language_tokens ): raise ValueError( "You must pass a non-None value for `language` when using " "a multilingual tokenizer. The value must be one of " f'{",".join(self.tokenizer.language_tokens.keys())}. ' - f"Received: language={language}." + f"Received: language={self.language}." ) - if task is None or task not in ["transcribe", "translate"]: + if self.task is None or self.task not in [ + "transcribe", + "translate", + ]: raise ValueError( "You must pass a non-None value for `task` when using " "a multilingual tokenizer. The value must be one of " - f'`"transcribe"`, `"translate"`. Received: task={task}.' + '`"transcribe"`, `"translate"`. ' + f"Received: task={self.task}." ) - bos_tokens += [self.tokenizer.language_tokens[language]] + bos_tokens += [self.tokenizer.language_tokens[self.language]] - if task == "transcribe": + if self.task == "transcribe": bos_tokens += [self.tokenizer.special_tokens["<|transcribe|>"]] - elif task == "translate": + elif self.task == "translate": bos_tokens += [self.tokenizer.special_tokens["<|translate|>"]] else: - if language is not None: + if self.language is not None: logging.info( "`tokenizer` is monolingual, and `language` has a " "non-`None` value. Setting `language` to `None`." ) - language = None - if task is not None: + self.language = None + if self.task is not None: logging.info( "`tokenizer` is monolingual, and `task` has a " "non-`None` value. Setting `task` to `None`." ) - task = None + self.task = None - if no_timestamps: + if self.no_timestamps: bos_tokens += [self.tokenizer.no_timestamps_token_id] # TODO: Use `MultiSegmentPacker` instead of `StartEndPacker` once we @@ -215,44 +228,10 @@ def __init__( start_value=bos_tokens, end_value=self.tokenizer.eos_token_id, pad_value=self.tokenizer.pad_token_id, - sequence_length=decoder_sequence_length, + sequence_length=self.decoder_sequence_length, return_padding_mask=True, ) - self.decoder_sequence_length = decoder_sequence_length - self.language = language - self.task = task - self.no_timestamps = no_timestamps - - def get_config(self): - config = super().get_config() - config.update( - { - "audio_feature_extractor": keras.layers.serialize( - self.audio_feature_extractor - ), - "decoder_sequence_length": self.decoder_sequence_length, - "language": self.language, - "task": self.task, - "no_timestamps": self.no_timestamps, - } - ) - return config - - @classmethod - def from_config(cls, config): - if "tokenizer" in config and isinstance(config["tokenizer"], dict): - config["tokenizer"] = keras.layers.deserialize(config["tokenizer"]) - - if "audio_feature_extractor" in config and isinstance( - config["audio_feature_extractor"], dict - ): - config["audio_feature_extractor"] = keras.layers.deserialize( - config["audio_feature_extractor"] - ) - - return cls(**config) - def call(self, x, y=None, sample_weight=None, decoder_sequence_length=None): if not ( isinstance(x, dict) @@ -294,6 +273,35 @@ def call(self, x, y=None, sample_weight=None, decoder_sequence_length=None): return pack_x_y_sample_weight(x, y, sample_weight) + def get_config(self): + config = super().get_config() + config.update( + { + "audio_feature_extractor": keras.layers.serialize( + self.audio_feature_extractor + ), + "decoder_sequence_length": self.decoder_sequence_length, + "language": self.language, + "task": self.task, + "no_timestamps": self.no_timestamps, + } + ) + return config + + @classmethod + def from_config(cls, config): + if "tokenizer" in config and isinstance(config["tokenizer"], dict): + config["tokenizer"] = keras.layers.deserialize(config["tokenizer"]) + + if "audio_feature_extractor" in config and isinstance( + config["audio_feature_extractor"], dict + ): + config["audio_feature_extractor"] = keras.layers.deserialize( + config["audio_feature_extractor"] + ) + + return cls(**config) + @classproperty def audio_feature_extractor_cls(cls): return WhisperAudioFeatureExtractor diff --git a/keras_nlp/models/whisper/whisper_tokenizer.py b/keras_nlp/models/whisper/whisper_tokenizer.py index cd4da7d15f..9a0d55786f 100644 --- a/keras_nlp/models/whisper/whisper_tokenizer.py +++ b/keras_nlp/models/whisper/whisper_tokenizer.py @@ -52,49 +52,36 @@ class WhisperTokenizer(BytePairTokenizer): def __init__( self, - vocabulary, - merges, - special_tokens, + vocabulary=None, + merges=None, + special_tokens=None, language_tokens=None, **kwargs, ): - vocabulary = _load_dict(vocabulary) - - # Necessary special tokens. - bos_token = "<|startoftranscript|>" - eos_token = "<|endoftext|>" - + special_tokens = _load_dict(special_tokens) if language_tokens is not None: - # Multilingual tokenizer. - # TODO: The pad token for the multilingual tokenizer is actually - # "", but it errors out (OOM). After BPE is fixed, we can update - # this to "". For now, we will use `"<|endoftext|>"`. - pad_token = "<|endoftext|>" language_tokens = _load_dict(language_tokens) - # Add language tokens to the vocabulary. This makes detokenization - # easier for us. - vocabulary = { - **vocabulary, - **language_tokens, - } - else: - # English tokenizer. - pad_token = "<|endoftext|>" - - no_timestamps_token = "<|notimestamps|>" + # Necessary special tokens. + self.bos_token = "<|startoftranscript|>" + self.eos_token = "<|endoftext|>" + # TODO: The pad token for the multilingual tokenizer is actually + # "", but it errors out (OOM). After BPE is fixed, we can update + # this to "". For now, we will use `"<|endoftext|>"`. + self.pad_token = "<|endoftext|>" + + self.no_timestamps_token = "<|notimestamps|>" # Task special tokens. - translate_token = "<|translate|>" - transcribe_token = "<|transcribe|>" + self.translate_token = "<|translate|>" + self.transcribe_token = "<|transcribe|>" - special_tokens = _load_dict(special_tokens) for token in [ - bos_token, - eos_token, - pad_token, - no_timestamps_token, - translate_token, - transcribe_token, + self.bos_token, + self.eos_token, + self.pad_token, + self.no_timestamps_token, + self.translate_token, + self.transcribe_token, ]: if token not in special_tokens: raise ValueError( @@ -102,15 +89,16 @@ def __init__( f"`special_tokens`. Please provide `'{token}'` in your " "`special_tokens`." ) - # Add special tokens to `vocabulary` for easy detokenization. - vocabulary[token] = special_tokens[token] - self.bos_token_id = special_tokens[bos_token] - self.eos_token_id = special_tokens[eos_token] - self.pad_token_id = special_tokens[pad_token] - self.no_timestamps_token_id = special_tokens[no_timestamps_token] - self.translate_token_id = special_tokens[translate_token] - self.transcribe_token_id = special_tokens[transcribe_token] + self.bos_token_id = special_tokens[self.bos_token] + self.eos_token_id = special_tokens[self.eos_token] + self.pad_token_id = special_tokens[self.pad_token] + self.no_timestamps_token_id = special_tokens[self.no_timestamps_token] + self.translate_token_id = special_tokens[self.translate_token] + self.transcribe_token_id = special_tokens[self.transcribe_token] + + self.special_tokens = special_tokens + self.language_tokens = language_tokens # TODO: Add language tokens to `unsplittable_tokens` once we figure # out the performance issue with a large list. @@ -123,8 +111,30 @@ def __init__( **kwargs, ) - self.special_tokens = special_tokens - self.language_tokens = language_tokens + def set_vocabulary_and_merges(self, vocabulary, merges): + if vocabulary is not None: + vocabulary = _load_dict(vocabulary) + + if self.language_tokens is not None: + # Multilingual tokenizer. + # Add language tokens to the vocabulary. This makes + # detokenization easier for us. + vocabulary = { + **vocabulary, + **self.language_tokens, + } + + for token in [ + self.bos_token, + self.eos_token, + self.pad_token, + self.no_timestamps_token, + self.translate_token, + self.transcribe_token, + ]: + vocabulary[token] = self.special_tokens[token] + + super().set_vocabulary_and_merges(vocabulary, merges) def get_config(self): config = super().get_config() diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index b799874d2a..fe78e40e53 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -30,6 +30,8 @@ from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.tokenizers import tokenizer +from keras_nlp.utils.preset_utils import check_preset_class +from keras_nlp.utils.preset_utils import load_from_preset from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring from keras_nlp.utils.tensor_utils import assert_tf_text_installed @@ -42,6 +44,10 @@ except ImportError: tf_text = None +VOCAB_FILENAME = "vocabulary.json" +MERGES_FILENAME = "merges.txt" + + # As python and TF handles special spaces differently, we need to # manually handle special spaces during string split. SPECIAL_WHITESPACES = r"\x{a0}\x{2009}\x{202f}\x{3000}" @@ -273,8 +279,8 @@ class BytePairTokenizer(tokenizer.Tokenizer): def __init__( self, - vocabulary, - merges, + vocabulary=None, + merges=None, sequence_length=None, add_prefix_space=False, unsplittable_tokens=None, @@ -325,11 +331,63 @@ def __init__( unicode_list, byte_list, default="" ) + self.set_vocabulary_and_merges(vocabulary, merges) + + def save_assets(self, dir_path): + vocab_path = os.path.join(dir_path, VOCAB_FILENAME) + merges_path = os.path.join(dir_path, MERGES_FILENAME) + with open(vocab_path, "w") as file: + file.write(json.dumps(self.vocabulary)) + with open(merges_path, "w") as file: + for merge in self.merges: + file.write(f"{merge}\n") + + def load_assets(self, dir_path): + vocab_path = os.path.join(dir_path, VOCAB_FILENAME) + merges_path = os.path.join(dir_path, MERGES_FILENAME) + self.set_vocabulary_and_merges(vocab_path, merges_path) + + def set_vocabulary_and_merges(self, vocabulary, merges): + """Set the vocabulary and merge rules from data or files.""" + if vocabulary is None or merges is None: + # Clear vocab related state. + self.vocabulary = None + self.merges = None + self.cache = None + self.id_to_token_map = None + self.token_to_id_map = None + self.merge_ranks_lookup_default = None + self.merge_ranks = None + return + + if isinstance(vocabulary, str): + with open(vocabulary, "r") as f: + self.vocabulary = json.load(f) + elif isinstance(vocabulary, dict): + self.vocabulary = vocabulary.copy() + else: + raise ValueError( + "Vocabulary must be an file path or dictionary mapping string " + "token to int ids. Received: " + f"`type(vocabulary)={type(vocabulary)}`." + ) + if isinstance(merges, str): + self.merges = [bp.rstrip() for bp in open(merges)] + elif isinstance(merges, Iterable): + self.merges = list(merges) + else: + raise ValueError( + "Merges must be a file path or a list of merge rules. " + f"Received: `type(merges)={type(merges)}`" + ) + self.cache = BytePairTokenizerCache() - if unsplittable_tokens: + if self.unsplittable_tokens: # Put special tokens into cache, so it won't be further split and # merged. - self.cache.insert(unsplittable_tokens, unsplittable_tokens) + self.cache.insert( + self.unsplittable_tokens, self.unsplittable_tokens + ) # Create mapping between string tokens to int ids, and vice versa. byte_pairs = [x[0] for x in self.vocabulary.items()] @@ -356,10 +414,12 @@ def __init__( def get_vocabulary(self) -> List[str]: """Get the tokenizer vocabulary as a list of strings tokens.""" + self._check_vocabulary() return self.vocabulary.keys() def vocabulary_size(self) -> int: """Get the size of the tokenizer vocabulary.""" + self._check_vocabulary() return len(self.vocabulary) def id_to_token(self, id: int) -> str: @@ -367,6 +427,7 @@ def id_to_token(self, id: int) -> str: # This will be slow, but keep memory usage down compared to building a # dict. Assuming the main use case is looking up a few special tokens # early in the vocab, this should be fine. + self._check_vocabulary() keys = self.get_vocabulary() for token in keys: @@ -376,24 +437,9 @@ def id_to_token(self, id: int) -> str: def token_to_id(self, token: str) -> int: """Convert a string token to an integer id.""" + self._check_vocabulary() return self.vocabulary[token] - def get_config(self): - config = super().get_config() - config.update( - { - # Ideally vocabulary and merge list would be saved as plain text - # assets in the saved model. We have no good way to support - # this currently, so we save the vocabulary in the config. - "vocabulary": self.vocabulary, - "merges": self.merges, - "sequence_length": self.sequence_length, - "add_prefix_space": self.add_prefix_space, - "unsplittable_tokens": self.unsplittable_tokens, - } - ) - return config - @tf.function def _bpe_merge_one_step(self, words, mask): """Perform one step of byte-pair merge.""" @@ -499,7 +545,16 @@ def loop_condition(_, mask): ) return merged_words + def _check_vocabulary(self): + if self.vocabulary is None: + raise ValueError( + "No vocabulary has been set for BytePairTokenizer. Make sure " + "to pass `vocabulary` and `merges` arguments when creating the " + "layer." + ) + def tokenize(self, inputs): + self._check_vocabulary() if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)): inputs = tf.convert_to_tensor(inputs) @@ -560,6 +615,7 @@ def process_unseen_tokens(): return tokens def detokenize(self, inputs): + self._check_vocabulary() inputs, unbatched, _ = convert_to_ragged_batch(inputs) inputs = tf.cast(inputs, self.dtype) unicode_text = tf.strings.reduce_join( @@ -592,48 +648,31 @@ def _bpe_merge_and_update_cache(self, tokens): ) self.cache.insert(tokens, tokenized_words) + def get_config(self): + config = super().get_config() + config.update( + { + "sequence_length": self.sequence_length, + "add_prefix_space": self.add_prefix_space, + "unsplittable_tokens": self.unsplittable_tokens, + } + ) + return config + @classproperty def presets(cls): return {} @classmethod - def from_preset( + def _legacy_from_preset( cls, preset, **kwargs, ): - """Instantiate {{model_name}} tokenizer from preset vocabulary. - - Args: - preset: string. Must be one of "{{preset_names}}". - - Examples: - ```python - # Load a preset tokenizer. - tokenizer = {{model_name}}.from_preset("{{example_preset_name}}") - - # Tokenize some input. - tokenizer("The quick brown fox tripped.") - - # Detokenize some input. - tokenizer.detokenize([5, 6, 7, 8, 9]) - ``` - """ - - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class" - ) - - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) metadata = cls.presets[preset] vocabulary = keras.utils.get_file( - "vocab.json", + "vocab.txt", metadata["vocabulary_url"], cache_subdir=os.path.join("models", preset), file_hash=metadata["vocabulary_hash"], @@ -655,6 +694,41 @@ def from_preset( return cls.from_config({**config, **kwargs}) + @classmethod + def from_preset( + cls, + preset, + **kwargs, + ): + """Instantiate {{model_name}} tokenizer from preset vocabulary. + + Args: + preset: string. Must be one of "{{preset_names}}". + + Examples: + ```python + # Load a preset tokenizer. + tokenizer = {{model_name}}.from_preset("{{example_preset_name}}") + + # Tokenize some input. + tokenizer("The quick brown fox tripped.") + + # Detokenize some input. + tokenizer.detokenize([5, 6, 7, 8, 9]) + ``` + """ + # TODO: delete me! + if preset in cls.presets: + return cls._legacy_from_preset(preset, **kwargs) + + config_file = "tokenizer.json" + check_preset_class(preset, cls, config_file=config_file) + return load_from_preset( + preset, + config_file=config_file, + config_overrides=kwargs, + ) + def __init_subclass__(cls, **kwargs): # Use __init_subclass__ to setup a correct docstring for from_preset. super().__init_subclass__(**kwargs) diff --git a/keras_nlp/tokenizers/word_piece_tokenizer.py b/keras_nlp/tokenizers/word_piece_tokenizer.py index ffd3b29fe7..6d1fa8e7f1 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer.py @@ -35,7 +35,7 @@ except ImportError: tf_text = None -FILENAME = "vocabulary.txt" +VOCAB_FILENAME = "vocabulary.txt" # Matches whitespace and control characters. WHITESPACE_REGEX = r"|".join( @@ -329,12 +329,14 @@ def __init__( self.set_vocabulary(vocabulary) def save_assets(self, dir_path): - with tf.io.gfile.GFile(os.path.join(dir_path, FILENAME), "w") as file: + path = os.path.join(dir_path, VOCAB_FILENAME) + with open(path, "w") as file: for token in self.vocabulary: file.write(f"{token}\n") def load_assets(self, dir_path): - self.set_vocabulary(os.path.join(dir_path, FILENAME)) + path = os.path.join(dir_path, VOCAB_FILENAME) + self.set_vocabulary(path) def set_vocabulary(self, vocabulary): """Set the tokenizer vocabulary to a file or list of strings.""" @@ -344,7 +346,7 @@ def set_vocabulary(self, vocabulary): return if isinstance(vocabulary, str): - with tf.io.gfile.GFile(vocabulary) as file: + with open(vocabulary) as file: self.vocabulary = [line.rstrip() for line in file] elif isinstance(vocabulary, Iterable): # Make a defensive copy. @@ -375,14 +377,17 @@ def set_vocabulary(self, vocabulary): def get_vocabulary(self) -> List[str]: """Get the tokenizer vocabulary as a list of strings tokens.""" + self._check_vocabulary() return self.vocabulary def vocabulary_size(self) -> int: """Get the size of the tokenizer vocabulary.""" + self._check_vocabulary() return len(self.vocabulary) def id_to_token(self, id: int) -> str: """Convert an integer id to a string token.""" + self._check_vocabulary() if id >= self.vocabulary_size() or id < 0: raise ValueError( f"`id` must be in range [0, {self.vocabulary_size() - 1}]. " @@ -395,6 +400,7 @@ def token_to_id(self, token: str) -> int: # This will be slow, but keep memory usage down compared to building a # . Assuming the main use case is looking up a few special tokens # early in the vocab, this should be fine. + self._check_vocabulary() return self.vocabulary.index(token) def get_config(self): @@ -412,7 +418,15 @@ def get_config(self): ) return config + def _check_vocabulary(self): + if self.vocabulary is None: + raise ValueError( + "No vocabulary has been set for WordPieceTokenizer. Make sure " + "to pass a `vocabulary` argument when creating the layer." + ) + def tokenize(self, inputs): + self._check_vocabulary() if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)): inputs = tf.convert_to_tensor(inputs) @@ -426,11 +440,6 @@ def tokenize(self, inputs): ) # Apply WordPiece and coerce shape for outputs. - if self._fast_word_piece is None: - raise ValueError( - "No vocabulary has been set for WordPieceTokenizer. Make sure " - "to pass a `vocabulary` argument when creating the layer." - ) tokens = self._fast_word_piece.tokenize(inputs) # By default tf.text tokenizes text with two ragged dimensions (one for # split words and one for split subwords). We will collapse to a single @@ -450,6 +459,7 @@ def tokenize(self, inputs): return tokens def detokenize(self, inputs): + self._check_vocabulary() inputs, unbatched, _ = convert_to_ragged_batch(inputs) outputs = self._fast_word_piece.detokenize(inputs) if unbatched: From 0e3c6740380e710eac7fafdf0d5364ee68c20312 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Tue, 21 Nov 2023 12:02:40 -1000 Subject: [PATCH 55/87] Convert SentencePieceTokenizer and associated models to new assets paradigm (#1323) * Convert SentencePiece tokenizer to save_assets/load_assets * Convert albert to new assets paradigm * Convert DebertaV3 to new assets paradigm * Fix formatting issues * Convert FNet to new assets paradigm * Convert XLMRoberta to new assets paradigm * Convert T5 Tokenizer to new assets paradigm * Fix sentencepiece tokenizer config test * Change set_vocabulary to set_proto * Change proto to raw proto * Change to proto_bytes --- .../models/albert/albert_classifier_test.py | 2 +- .../albert/albert_masked_lm_preprocessor.py | 37 +++--- .../models/albert/albert_masked_lm_test.py | 2 +- .../models/albert/albert_preprocessor.py | 16 ++- keras_nlp/models/albert/albert_tokenizer.py | 47 +++++--- .../deberta_v3/deberta_v3_classifier_test.py | 2 +- .../deberta_v3_masked_lm_preprocessor.py | 36 ++++-- .../deberta_v3/deberta_v3_masked_lm_test.py | 2 +- .../deberta_v3/deberta_v3_preprocessor.py | 16 ++- .../models/deberta_v3/deberta_v3_tokenizer.py | 53 ++++---- .../models/f_net/f_net_classifier_test.py | 2 +- .../f_net/f_net_masked_lm_preprocessor.py | 37 +++--- .../models/f_net/f_net_masked_lm_test.py | 2 +- keras_nlp/models/f_net/f_net_preprocessor.py | 16 ++- keras_nlp/models/f_net/f_net_tokenizer.py | 46 ++++--- keras_nlp/models/t5/t5_tokenizer.py | 36 +++--- .../xlm_roberta_classifier_test.py | 2 +- .../xlm_roberta_masked_lm_preprocessor.py | 37 +++--- .../xlm_roberta/xlm_roberta_masked_lm_test.py | 2 +- .../xlm_roberta/xlm_roberta_preprocessor.py | 15 ++- .../xlm_roberta/xlm_roberta_tokenizer.py | 17 ++- .../tokenizers/sentence_piece_tokenizer.py | 113 ++++++++++++------ .../sentence_piece_tokenizer_test.py | 1 + 23 files changed, 350 insertions(+), 189 deletions(-) diff --git a/keras_nlp/models/albert/albert_classifier_test.py b/keras_nlp/models/albert/albert_classifier_test.py index ebf8a630eb..5a60ff998b 100644 --- a/keras_nlp/models/albert/albert_classifier_test.py +++ b/keras_nlp/models/albert/albert_classifier_test.py @@ -42,7 +42,7 @@ def setUp(self): hidden_dim=2, embedding_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/albert/albert_masked_lm_preprocessor.py b/keras_nlp/models/albert/albert_masked_lm_preprocessor.py index 9a52d28a2c..89cf134465 100644 --- a/keras_nlp/models/albert/albert_masked_lm_preprocessor.py +++ b/keras_nlp/models/albert/albert_masked_lm_preprocessor.py @@ -131,18 +131,27 @@ def __init__( truncate=truncate, **kwargs, ) - + self.mask_selection_rate = mask_selection_rate + self.mask_selection_length = mask_selection_length + self.mask_token_rate = mask_token_rate + self.random_token_rate = random_token_rate + self.masker = None + + def build(self, input_shape): + super().build(input_shape) + # Defer masker creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( - mask_selection_rate=mask_selection_rate, - mask_selection_length=mask_selection_length, - mask_token_rate=mask_token_rate, - random_token_rate=random_token_rate, - vocabulary_size=tokenizer.vocabulary_size(), - mask_token_id=tokenizer.mask_token_id, + mask_selection_rate=self.mask_selection_rate, + mask_selection_length=self.mask_selection_length, + mask_token_rate=self.mask_token_rate, + random_token_rate=self.random_token_rate, + vocabulary_size=self.tokenizer.vocabulary_size(), + mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ - tokenizer.cls_token_id, - tokenizer.sep_token_id, - tokenizer.pad_token_id, + self.tokenizer.cls_token_id, + self.tokenizer.sep_token_id, + self.tokenizer.pad_token_id, ], ) @@ -150,10 +159,10 @@ def get_config(self): config = super().get_config() config.update( { - "mask_selection_rate": self.masker.mask_selection_rate, - "mask_selection_length": self.masker.mask_selection_length, - "mask_token_rate": self.masker.mask_token_rate, - "random_token_rate": self.masker.random_token_rate, + "mask_selection_rate": self.mask_selection_rate, + "mask_selection_length": self.mask_selection_length, + "mask_token_rate": self.mask_token_rate, + "random_token_rate": self.random_token_rate, } ) return config diff --git a/keras_nlp/models/albert/albert_masked_lm_test.py b/keras_nlp/models/albert/albert_masked_lm_test.py index f992ed2b37..49e6a595cd 100644 --- a/keras_nlp/models/albert/albert_masked_lm_test.py +++ b/keras_nlp/models/albert/albert_masked_lm_test.py @@ -50,7 +50,7 @@ def setUp(self): hidden_dim=2, embedding_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/albert/albert_preprocessor.py b/keras_nlp/models/albert/albert_preprocessor.py index 4849aab392..5d5628a729 100644 --- a/keras_nlp/models/albert/albert_preprocessor.py +++ b/keras_nlp/models/albert/albert_preprocessor.py @@ -158,20 +158,28 @@ def __init__( ): super().__init__(**kwargs) self.tokenizer = tokenizer + self.truncate = truncate + self.sequence_length = sequence_length + self.packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = MultiSegmentPacker( start_value=self.tokenizer.cls_token_id, end_value=self.tokenizer.sep_token_id, pad_value=self.tokenizer.pad_token_id, - truncate=truncate, - sequence_length=sequence_length, + truncate=self.truncate, + sequence_length=self.sequence_length, ) + self.built = True def get_config(self): config = super().get_config() config.update( { - "sequence_length": self.packer.sequence_length, - "truncate": self.packer.truncate, + "sequence_length": self.sequence_length, + "truncate": self.truncate, } ) return config diff --git a/keras_nlp/models/albert/albert_tokenizer.py b/keras_nlp/models/albert/albert_tokenizer.py index 1b85be3a99..44aed44cf5 100644 --- a/keras_nlp/models/albert/albert_tokenizer.py +++ b/keras_nlp/models/albert/albert_tokenizer.py @@ -87,25 +87,38 @@ class AlbertTokenizer(SentencePieceTokenizer): """ def __init__(self, proto, **kwargs): + self.cls_token = "[CLS]" + self.sep_token = "[SEP]" + self.pad_token = "" + self.mask_token = "[MASK]" + super().__init__(proto=proto, **kwargs) - # Check for necessary special tokens. - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "" - mask_token = "[MASK]" - for token in [cls_token, sep_token, pad_token, mask_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - self.mask_token_id = self.token_to_id(mask_token) + def set_proto(self, proto): + super().set_proto(proto) + if proto is not None: + for token in [ + self.cls_token, + self.sep_token, + self.pad_token, + self.mask_token, + ]: + if token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.cls_token_id = self.token_to_id(self.cls_token) + self.sep_token_id = self.token_to_id(self.sep_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.mask_token_id = self.token_to_id(self.mask_token) + else: + self.cls_token_id = None + self.sep_token_id = None + self.pad_token_id = None + self.mask_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py index 0e0ab7642d..7d4f61e045 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_classifier_test.py @@ -45,7 +45,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py index 1644c13823..519b0b4fca 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_preprocessor.py @@ -133,17 +133,27 @@ def __init__( **kwargs, ) + self.mask_selection_rate = mask_selection_rate + self.mask_selection_length = mask_selection_length + self.mask_token_rate = mask_token_rate + self.random_token_rate = random_token_rate + self.masker = None + + def build(self, input_shape): + super().build(input_shape) + # Defer masker creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( - mask_selection_rate=mask_selection_rate, - mask_selection_length=mask_selection_length, - mask_token_rate=mask_token_rate, - random_token_rate=random_token_rate, - vocabulary_size=tokenizer.vocabulary_size(), - mask_token_id=tokenizer.mask_token_id, + mask_selection_rate=self.mask_selection_rate, + mask_selection_length=self.mask_selection_length, + mask_token_rate=self.mask_token_rate, + random_token_rate=self.random_token_rate, + vocabulary_size=self.tokenizer.vocabulary_size(), + mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ - tokenizer.cls_token_id, - tokenizer.sep_token_id, - tokenizer.pad_token_id, + self.tokenizer.cls_token_id, + self.tokenizer.sep_token_id, + self.tokenizer.pad_token_id, ], ) @@ -151,10 +161,10 @@ def get_config(self): config = super().get_config() config.update( { - "mask_selection_rate": self.masker.mask_selection_rate, - "mask_selection_length": self.masker.mask_selection_length, - "mask_token_rate": self.masker.mask_token_rate, - "random_token_rate": self.masker.random_token_rate, + "mask_selection_rate": self.mask_selection_rate, + "mask_selection_length": self.mask_selection_length, + "mask_token_rate": self.mask_token_rate, + "random_token_rate": self.random_token_rate, } ) return config diff --git a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py index 32bf71de13..b103f390f6 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_masked_lm_test.py @@ -48,7 +48,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor.py b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor.py index dee91dcffa..93f4fbbd22 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_preprocessor.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_preprocessor.py @@ -156,20 +156,28 @@ def __init__( ): super().__init__(**kwargs) self.tokenizer = tokenizer + self.truncate = truncate + self.sequence_length = sequence_length + self.packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = MultiSegmentPacker( start_value=self.tokenizer.cls_token_id, end_value=self.tokenizer.sep_token_id, pad_value=self.tokenizer.pad_token_id, - truncate=truncate, - sequence_length=sequence_length, + truncate=self.truncate, + sequence_length=self.sequence_length, ) + self.built = True def get_config(self): config = super().get_config() config.update( { - "sequence_length": self.packer.sequence_length, - "truncate": self.packer.truncate, + "sequence_length": self.sequence_length, + "truncate": self.truncate, } ) return config diff --git a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py index 03c9cd5821..e66c373e65 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_tokenizer.py @@ -93,33 +93,38 @@ class DebertaV3Tokenizer(SentencePieceTokenizer): """ def __init__(self, proto, **kwargs): + self.cls_token = "[CLS]" + self.sep_token = "[SEP]" + self.pad_token = "[PAD]" + self.mask_token = "[MASK]" + super().__init__(proto=proto, **kwargs) - # Check for necessary special tokens. - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "[PAD]" - mask_token = "[MASK]" - - # We do not throw an error if `mask_token` is not present in the - # vocabulary. - for token in [cls_token, pad_token, sep_token]: - if token not in super().get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - # If the mask token is not in the vocabulary, add it to the end of the - # vocabulary. - if mask_token in super().get_vocabulary(): - self.mask_token_id = super().token_to_id(mask_token) + def set_proto(self, proto): + super().set_proto(proto) + if proto is not None: + for token in [self.cls_token, self.pad_token, self.sep_token]: + if token not in super().get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.cls_token_id = self.token_to_id(self.cls_token) + self.sep_token_id = self.token_to_id(self.sep_token) + self.pad_token_id = self.token_to_id(self.pad_token) + # If the mask token is not in the vocabulary, add it to the end of the + # vocabulary. + if self.mask_token in super().get_vocabulary(): + self.mask_token_id = super().token_to_id(self.mask_token) + else: + self.mask_token_id = super().vocabulary_size() else: - self.mask_token_id = super().vocabulary_size() + self.cls_token_id = None + self.sep_token_id = None + self.pad_token_id = None + self.mask_token_id = None def vocabulary_size(self): sentence_piece_size = super().vocabulary_size() diff --git a/keras_nlp/models/f_net/f_net_classifier_test.py b/keras_nlp/models/f_net/f_net_classifier_test.py index c871fbcc7b..4defce4a71 100644 --- a/keras_nlp/models/f_net/f_net_classifier_test.py +++ b/keras_nlp/models/f_net/f_net_classifier_test.py @@ -40,7 +40,7 @@ def setUp(self): num_layers=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py index 87fa1a316d..51b4a4d1e7 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_preprocessor.py @@ -136,18 +136,27 @@ def __init__( truncate=truncate, **kwargs, ) - + self.mask_selection_rate = mask_selection_rate + self.mask_selection_length = mask_selection_length + self.mask_token_rate = mask_token_rate + self.random_token_rate = random_token_rate + self.masker = None + + def build(self, input_shape): + super().build(input_shape) + # Defer masker creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( - mask_selection_rate=mask_selection_rate, - mask_selection_length=mask_selection_length, - mask_token_rate=mask_token_rate, - random_token_rate=random_token_rate, - vocabulary_size=tokenizer.vocabulary_size(), - mask_token_id=tokenizer.mask_token_id, + mask_selection_rate=self.mask_selection_rate, + mask_selection_length=self.mask_selection_length, + mask_token_rate=self.mask_token_rate, + random_token_rate=self.random_token_rate, + vocabulary_size=self.tokenizer.vocabulary_size(), + mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ - tokenizer.cls_token_id, - tokenizer.sep_token_id, - tokenizer.pad_token_id, + self.tokenizer.cls_token_id, + self.tokenizer.sep_token_id, + self.tokenizer.pad_token_id, ], ) @@ -155,10 +164,10 @@ def get_config(self): config = super().get_config() config.update( { - "mask_selection_rate": self.masker.mask_selection_rate, - "mask_selection_length": self.masker.mask_selection_length, - "mask_token_rate": self.masker.mask_token_rate, - "random_token_rate": self.masker.random_token_rate, + "mask_selection_rate": self.mask_selection_rate, + "mask_selection_length": self.mask_selection_length, + "mask_token_rate": self.mask_token_rate, + "random_token_rate": self.random_token_rate, } ) return config diff --git a/keras_nlp/models/f_net/f_net_masked_lm_test.py b/keras_nlp/models/f_net/f_net_masked_lm_test.py index b4931a76fc..1db6b361ed 100644 --- a/keras_nlp/models/f_net/f_net_masked_lm_test.py +++ b/keras_nlp/models/f_net/f_net_masked_lm_test.py @@ -47,7 +47,7 @@ def setUp(self): num_layers=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/f_net/f_net_preprocessor.py b/keras_nlp/models/f_net/f_net_preprocessor.py index 5ebd5d1645..296493c930 100644 --- a/keras_nlp/models/f_net/f_net_preprocessor.py +++ b/keras_nlp/models/f_net/f_net_preprocessor.py @@ -129,20 +129,28 @@ def __init__( ): super().__init__(**kwargs) self.tokenizer = tokenizer + self.truncate = truncate + self.sequence_length = sequence_length + self.packer = None + + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = MultiSegmentPacker( start_value=self.tokenizer.cls_token_id, end_value=self.tokenizer.sep_token_id, pad_value=self.tokenizer.pad_token_id, - truncate=truncate, - sequence_length=sequence_length, + truncate=self.truncate, + sequence_length=self.sequence_length, ) + self.built = True def get_config(self): config = super().get_config() config.update( { - "sequence_length": self.packer.sequence_length, - "truncate": self.packer.truncate, + "sequence_length": self.sequence_length, + "truncate": self.truncate, } ) return config diff --git a/keras_nlp/models/f_net/f_net_tokenizer.py b/keras_nlp/models/f_net/f_net_tokenizer.py index 294e02e4db..ae3f569b1d 100644 --- a/keras_nlp/models/f_net/f_net_tokenizer.py +++ b/keras_nlp/models/f_net/f_net_tokenizer.py @@ -63,25 +63,37 @@ class FNetTokenizer(SentencePieceTokenizer): """ def __init__(self, proto, **kwargs): + self.cls_token = "[CLS]" + self.sep_token = "[SEP]" + self.pad_token = "" + self.mask_token = "[MASK]" super().__init__(proto=proto, **kwargs) - # Check for necessary special tokens. - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "" - mask_token = "[MASK]" - for token in [cls_token, sep_token, pad_token, mask_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - self.mask_token_id = self.token_to_id(mask_token) + def set_proto(self, proto): + super().set_proto(proto) + if proto is not None: + for token in [ + self.cls_token, + self.sep_token, + self.pad_token, + self.mask_token, + ]: + if token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.cls_token_id = self.token_to_id(self.cls_token) + self.sep_token_id = self.token_to_id(self.sep_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.mask_token_id = self.token_to_id(self.mask_token) + else: + self.cls_token_id = None + self.sep_token_id = None + self.pad_token_id = None + self.mask_token_id = None @classproperty def presets(cls): diff --git a/keras_nlp/models/t5/t5_tokenizer.py b/keras_nlp/models/t5/t5_tokenizer.py index ae9facb318..ec5f0bf324 100644 --- a/keras_nlp/models/t5/t5_tokenizer.py +++ b/keras_nlp/models/t5/t5_tokenizer.py @@ -73,20 +73,26 @@ class T5Tokenizer(SentencePieceTokenizer): """ def __init__(self, proto, **kwargs): - super().__init__(proto=proto, **kwargs) + self.end_token = "" + self.pad_token = "" - # Check for necessary special tokens. - end_token = "" - pad_token = "" - for token in [pad_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) + super().__init__(proto=proto, **kwargs) - self.pad_token_id = self.token_to_id(pad_token) - self.end_token_id = self.token_to_id(end_token) - # T5 uses the same start token as end token, i.e., "<\s>". - self.start_token_id = self.end_token_id + def set_proto(self, proto): + super().set_proto(proto) + if proto is not None: + for token in [self.end_token, self.pad_token]: + if token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + self.end_token_id = self.token_to_id(self.end_token) + self.pad_token_id = self.token_to_id(self.pad_token) + # T5 uses the same start token as end token, i.e., "<\s>". + self.start_token_id = self.end_token_id + else: + self.end_token_id = None + self.pad_token_id = None + self.start_token_id = None diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py index 8255a40cf5..bfa6500247 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_classifier_test.py @@ -47,7 +47,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py index 2a8750c583..a26905e9e3 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_preprocessor.py @@ -137,18 +137,27 @@ def __init__( truncate=truncate, **kwargs, ) - + self.mask_selection_rate = mask_selection_rate + self.mask_selection_length = mask_selection_length + self.mask_token_rate = mask_token_rate + self.random_token_rate = random_token_rate + self.masker = None + + def build(self, input_shape): + super().build(input_shape) + # Defer masker creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.masker = MaskedLMMaskGenerator( - mask_selection_rate=mask_selection_rate, - mask_selection_length=mask_selection_length, - mask_token_rate=mask_token_rate, - random_token_rate=random_token_rate, - vocabulary_size=tokenizer.vocabulary_size(), - mask_token_id=tokenizer.mask_token_id, + mask_selection_rate=self.mask_selection_rate, + mask_selection_length=self.mask_selection_length, + mask_token_rate=self.mask_token_rate, + random_token_rate=self.random_token_rate, + vocabulary_size=self.tokenizer.vocabulary_size(), + mask_token_id=self.tokenizer.mask_token_id, unselectable_token_ids=[ - tokenizer.start_token_id, - tokenizer.end_token_id, - tokenizer.pad_token_id, + self.tokenizer.start_token_id, + self.tokenizer.end_token_id, + self.tokenizer.pad_token_id, ], ) @@ -156,10 +165,10 @@ def get_config(self): config = super().get_config() config.update( { - "mask_selection_rate": self.masker.mask_selection_rate, - "mask_selection_length": self.masker.mask_selection_length, - "mask_token_rate": self.masker.mask_token_rate, - "random_token_rate": self.masker.random_token_rate, + "mask_selection_rate": self.mask_selection_rate, + "mask_selection_length": self.mask_selection_length, + "mask_token_rate": self.mask_token_rate, + "random_token_rate": self.random_token_rate, } ) return config diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py index bcbafe4ad9..d9a1ce68f1 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_masked_lm_test.py @@ -52,7 +52,7 @@ def setUp(self): num_heads=2, hidden_dim=2, intermediate_dim=4, - max_sequence_length=self.preprocessor.packer.sequence_length, + max_sequence_length=self.preprocessor.sequence_length, ) self.init_kwargs = { "preprocessor": self.preprocessor, diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor.py b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor.py index e557bd0635..23b48073f7 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_preprocessor.py @@ -156,22 +156,29 @@ def __init__( super().__init__(**kwargs) self.tokenizer = tokenizer + self.truncate = truncate + self.sequence_length = sequence_length + self.packer = None + def build(self, input_shape): + # Defer packer creation to `build()` so that we can be sure tokenizer + # assets have loaded when restoring a saved model. self.packer = MultiSegmentPacker( start_value=self.tokenizer.start_token_id, end_value=self.tokenizer.end_token_id, sep_value=[self.tokenizer.end_token_id] * 2, pad_value=self.tokenizer.pad_token_id, - truncate=truncate, - sequence_length=sequence_length, + truncate=self.truncate, + sequence_length=self.sequence_length, ) + self.built = True def get_config(self): config = super().get_config() config.update( { - "sequence_length": self.packer.sequence_length, - "truncate": self.packer.truncate, + "sequence_length": self.sequence_length, + "truncate": self.truncate, } ) return config diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py index eacdcc7337..576f30bca1 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_tokenizer.py @@ -88,8 +88,6 @@ def train_sentencepiece(ds, vocab_size): """ def __init__(self, proto, **kwargs): - super().__init__(proto=proto, **kwargs) - # List of special tokens. self._vocabulary_prefix = ["", "", "", ""] @@ -98,7 +96,15 @@ def __init__(self, proto, **kwargs): self.pad_token_id = 1 # self.end_token_id = 2 # self.unk_token_id = 3 # - self.mask_token_id = self.vocabulary_size() - 1 # + + super().__init__(proto=proto, **kwargs) + + def set_proto(self, proto): + super().set_proto(proto) + if proto is not None: + self.mask_token_id = self.vocabulary_size() - 1 + else: + self.mask_token_id = None def vocabulary_size(self): """Get the size of the tokenizer vocabulary.""" @@ -106,6 +112,7 @@ def vocabulary_size(self): def get_vocabulary(self): """Get the size of the tokenizer vocabulary.""" + self._check_vocabulary() vocabulary = tensor_to_list( self._sentence_piece.id_to_string( tf.range(super().vocabulary_size()) @@ -115,6 +122,7 @@ def get_vocabulary(self): def id_to_token(self, id): """Convert an integer id to a string token.""" + self._check_vocabulary() if id == self.mask_token_id: return "" @@ -132,6 +140,7 @@ def id_to_token(self, id): def token_to_id(self, token): """Convert a string token to an integer id.""" + self._check_vocabulary() if token in self._vocabulary_prefix: return self._vocabulary_prefix.index(token) @@ -146,6 +155,7 @@ def token_to_id(self, token): return int(spm_token_id.numpy()) + 1 def tokenize(self, inputs): + self._check_vocabulary() tokens = super().tokenize(inputs) # Correct `unk_token_id` (0 -> 3). Note that we do not correct @@ -157,6 +167,7 @@ def tokenize(self, inputs): return tf.add(tokens, 1) def detokenize(self, inputs): + self._check_vocabulary() tokens = tf.ragged.boolean_mask( inputs, tf.not_equal(inputs, self.mask_token_id) ) diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer.py b/keras_nlp/tokenizers/sentence_piece_tokenizer.py index 3a53c12ad3..0f95930abe 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer.py @@ -22,6 +22,8 @@ from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.tokenizers import tokenizer +from keras_nlp.utils.preset_utils import check_preset_class +from keras_nlp.utils.preset_utils import load_from_preset from keras_nlp.utils.python_utils import classproperty from keras_nlp.utils.python_utils import format_docstring from keras_nlp.utils.tensor_utils import assert_tf_text_installed @@ -36,6 +38,9 @@ tf_text = None +VOCAB_FILENAME = "vocabulary.txt" + + @keras_nlp_export("keras_nlp.tokenizers.SentencePieceTokenizer") class SentencePieceTokenizer(tokenizer.Tokenizer): """A SentencePiece tokenizer layer. @@ -106,7 +111,7 @@ def train_sentence_piece_file(ds, path, size): def __init__( self, - proto, + proto=None, sequence_length: int = None, dtype="int32", **kwargs, @@ -121,6 +126,25 @@ def __init__( super().__init__(dtype=dtype, **kwargs) + self.proto = None + self.sequence_length = sequence_length + self.set_proto(proto) + + def save_assets(self, dir_path): + path = os.path.join(dir_path, VOCAB_FILENAME) + with open(path, "w") as file: + file.write(self.proto) + + def load_assets(self, dir_path): + path = os.path.join(dir_path, VOCAB_FILENAME) + self.set_proto(path) + + def set_proto(self, proto): + if proto is None: + self.proto = None + self._sentence_piece = None + return + if isinstance(proto, str): # A string could be either a filepath, or a base64 encoded byte # array (which we need for serialization). We will heuristically @@ -134,7 +158,7 @@ def __init__( except binascii.Error: pass if not is_base64: - proto_bytes = tf.io.gfile.GFile(proto, "rb").read() + proto_bytes = open(proto, "rb").read() elif isinstance(proto, bytes): proto_bytes = proto else: @@ -148,18 +172,18 @@ def __init__( model=proto_bytes, out_type=self.compute_dtype, ) - # Keras cannot serialize a bytestring, so we base64 encode the model # byte array as a string for saving. - self.proto = base64.b64encode(proto_bytes).decode("ascii") - self.sequence_length = sequence_length + self.proto = proto_bytes def vocabulary_size(self) -> int: """Get the size of the tokenizer vocabulary.""" + self._check_vocabulary() return int(self._sentence_piece.vocab_size().numpy()) def get_vocabulary(self) -> List[str]: """Get the tokenizer vocabulary.""" + self._check_vocabulary() return tensor_to_list( self._sentence_piece.id_to_string( tf.range(int(self._sentence_piece.vocab_size().numpy())) @@ -168,6 +192,7 @@ def get_vocabulary(self) -> List[str]: def id_to_token(self, id: int) -> str: """Convert an integer id to a string token.""" + self._check_vocabulary() if id >= self.vocabulary_size() or id < 0: raise ValueError( f"`id` must be in range [0, {self.vocabulary_size() - 1}]. " @@ -177,28 +202,40 @@ def id_to_token(self, id: int) -> str: def token_to_id(self, token: str) -> int: """Convert a string token to an integer id.""" + self._check_vocabulary() return int(self._sentence_piece.string_to_id(token).numpy()) def get_config(self): config = super().get_config() config.update( { - # Ideally the model would be saved as a file asset in - # the saved model. We have no good way to support this - # currently, so we save the model string in the config. - "proto": self.proto, + "proto": None, # Save vocabulary via an asset! "sequence_length": self.sequence_length, } ) return config + def _check_vocabulary(self): + if self.proto is None: + raise ValueError( + "No vocabulary has been set for SentencePieceTokenizer. Make " + "sure to pass a `proto` argument when creating the layer." + ) + def tokenize(self, inputs): + self._check_vocabulary() if not isinstance(inputs, (tf.Tensor, tf.RaggedTensor)): inputs = tf.convert_to_tensor(inputs) scalar_input = inputs.shape.rank == 0 if scalar_input: inputs = tf.expand_dims(inputs, 0) + if self._sentence_piece is None: + raise ValueError( + "No vocabulary has been set for SentencePieceTokenizer. Make " + "sure to pass a `vocabulary` argument when creating the layer." + ) + tokens = self._sentence_piece.tokenize(inputs) # Convert to a dense output if `sequence_length` is set. @@ -215,6 +252,7 @@ def tokenize(self, inputs): return tokens def detokenize(self, inputs): + self._check_vocabulary() inputs, unbatched, _ = convert_to_ragged_batch(inputs) outputs = self._sentence_piece.detokenize(inputs) if unbatched: @@ -225,6 +263,30 @@ def detokenize(self, inputs): def presets(cls): return {} + @classmethod + def _legacy_from_preset( + cls, + preset, + **kwargs, + ): + metadata = cls.presets[preset] + + spm_proto = keras.utils.get_file( + "vocab.spm", + metadata["spm_proto_url"], + cache_subdir=os.path.join("models", preset), + file_hash=metadata["spm_proto_hash"], + ) + + config = metadata["preprocessor_config"] + config.update( + { + "proto": spm_proto, + }, + ) + + return cls.from_config({**config, **kwargs}) + @classmethod def from_preset( cls, @@ -249,34 +311,17 @@ def from_preset( ``` """ - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class" - ) - - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - metadata = cls.presets[preset] - - spm_proto = keras.utils.get_file( - "vocab.spm", - metadata["spm_proto_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["spm_proto_hash"], - ) + if preset in cls.presets: + return cls._legacy_from_preset(preset, **kwargs) - config = metadata["preprocessor_config"] - config.update( - { - "proto": spm_proto, - }, + config_file = "tokenizer.json" + check_preset_class(preset, cls, config_file=config_file) + return load_from_preset( + preset, + config_file=config_file, + config_overrides=kwargs, ) - return cls.from_config({**config, **kwargs}) - def __init_subclass__(cls, **kwargs): # Use __init_subclass__ to setup a correct docstring for from_preset. super().__init_subclass__(**kwargs) diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py b/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py index f3b39711bd..74477cdf03 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer_test.py @@ -161,6 +161,7 @@ def test_config(self): cloned_tokenizer = SentencePieceTokenizer.from_config( original_tokenizer.get_config() ) + cloned_tokenizer.set_proto(original_tokenizer.proto) self.assertAllEqual( original_tokenizer(input_data), cloned_tokenizer(input_data), From 3619a6a4c5746b1a6717e8341da6e59c90b61adf Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Wed, 22 Nov 2023 14:44:31 -1000 Subject: [PATCH 56/87] Add tests for Presets workflow, Add Metadata (#1326) * Add metadata and Albert preset utils test * Add Bart bytepiece preset workflow test * Add BERT WordPiece preset workflow test * Parameterize tests, switch to classifier, address comments * Address comments and nits * Fix formatting * Add large test marker --- .../tokenizers/sentence_piece_tokenizer.py | 4 +- keras_nlp/utils/preset_utils.py | 23 +++-- keras_nlp/utils/preset_utils_test.py | 89 +++++++++++++++++++ 3 files changed, 106 insertions(+), 10 deletions(-) create mode 100644 keras_nlp/utils/preset_utils_test.py diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer.py b/keras_nlp/tokenizers/sentence_piece_tokenizer.py index 0f95930abe..eb6abb8140 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer.py @@ -38,7 +38,7 @@ tf_text = None -VOCAB_FILENAME = "vocabulary.txt" +VOCAB_FILENAME = "vocabulary.spm" @keras_nlp_export("keras_nlp.tokenizers.SentencePieceTokenizer") @@ -132,7 +132,7 @@ def __init__( def save_assets(self, dir_path): path = os.path.join(dir_path, VOCAB_FILENAME) - with open(path, "w") as file: + with open(path, "wb") as file: file.write(self.proto) def load_assets(self, dir_path): diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index e137bcccc1..1657d8e746 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -100,19 +100,26 @@ def save_to_preset( # Include references to weights and assets. config["assets"] = assets config["weights"] = weights_filename if save_weights else None - recursive_pop(config, "config_config") + recursive_pop(config, "compile_config") recursive_pop(config, "build_config") with open(config_path, "w") as config_file: config_file.write(json.dumps(config, indent=4)) + from keras_nlp import __version__ as keras_nlp_version + + keras_version = keras.version() if hasattr(keras, "version") else None + # Save any associated metadata. - metadata = { - # TODO: save keras version and keras-nlp version. - "date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"), - } - metadata_path = os.path.join(preset, "metadata.json") - with open(metadata_path, "w") as metadata_file: - metadata_file.write(json.dumps(metadata, indent=4)) + if config_filename == "config.json": + metadata = { + "keras_version": keras_version, + "keras_nlp_version": keras_nlp_version, + "parameter_count": layer.count_params(), + "date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"), + } + metadata_path = os.path.join(preset, "metadata.json") + with open(metadata_path, "w") as metadata_file: + metadata_file.write(json.dumps(metadata, indent=4)) def load_from_preset( diff --git a/keras_nlp/utils/preset_utils_test.py b/keras_nlp/utils/preset_utils_test.py new file mode 100644 index 0000000000..40eb99fb01 --- /dev/null +++ b/keras_nlp/utils/preset_utils_test.py @@ -0,0 +1,89 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os + +import pytest +from absl.testing import parameterized + +from keras_nlp.models import AlbertClassifier +from keras_nlp.models import BertClassifier +from keras_nlp.models import RobertaClassifier +from keras_nlp.tests.test_case import TestCase +from keras_nlp.utils import preset_utils + + +class PresetUtilsTest(TestCase): + @parameterized.parameters( + (AlbertClassifier, "albert_base_en_uncased", "sentencepiece"), + (RobertaClassifier, "roberta_base_en", "bytepair"), + (BertClassifier, "bert_tiny_en_uncased", "wordpiece"), + ) + @pytest.mark.keras_3_only + @pytest.mark.large + def test_preset_saving(self, cls, preset_name, tokenizer_type): + save_dir = self.get_temp_dir() + model = cls.from_preset(preset_name, num_classes=2) + preset_utils.save_to_preset(model, save_dir) + + if tokenizer_type == "bytepair": + vocab_filename = "assets/tokenizer/vocabulary.json" + expected_assets = [ + "assets/tokenizer/vocabulary.json", + "assets/tokenizer/merges.txt", + ] + elif tokenizer_type == "sentencepiece": + vocab_filename = "assets/tokenizer/vocabulary.spm" + expected_assets = ["assets/tokenizer/vocabulary.spm"] + else: + vocab_filename = "assets/tokenizer/vocabulary.txt" + expected_assets = ["assets/tokenizer/vocabulary.txt"] + + # Check existence of files + self.assertTrue(os.path.exists(os.path.join(save_dir, vocab_filename))) + self.assertTrue(os.path.exists(os.path.join(save_dir, "config.json"))) + self.assertTrue( + os.path.exists(os.path.join(save_dir, "model.weights.h5")) + ) + self.assertTrue(os.path.exists(os.path.join(save_dir, "metadata.json"))) + + # Check the model config (`config.json`) + config_json = open(os.path.join(save_dir, "config.json"), "r").read() + self.assertTrue( + "build_config" not in config_json + ) # Test on raw json to include nested keys + self.assertTrue( + "compile_config" not in config_json + ) # Test on raw json to include nested keys + config = json.loads(config_json) + self.assertAllEqual(config["assets"], expected_assets) + self.assertEqual(config["weights"], "model.weights.h5") + + # Try loading the model from preset directory + restored_model = preset_utils.load_from_preset(save_dir) + + train_data = ( + ["the quick brown fox.", "the slow brown fox."], # Features. + ) + model_input_data = model.preprocessor(*train_data) + restored_model_input_data = restored_model.preprocessor(*train_data) + + # Check that saved vocab is equal to the original preset vocab + self.assertAllClose(model_input_data, restored_model_input_data) + + # Check model outputs + self.assertAllEqual( + model(model_input_data), restored_model(restored_model_input_data) + ) From 38806fd292c3dc6600efe1cb2762425189300599 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Tue, 28 Nov 2023 18:29:36 -0800 Subject: [PATCH 57/87] Automatically add the keras framework to kaggle handles (#1331) --- keras_nlp/utils/preset_utils.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 1657d8e746..04ca3a39cd 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -30,17 +30,25 @@ def get_file(preset, path): """Download a preset file in necessary and return the local path.""" if preset.startswith(KAGGLE_PREFIX): - kaggle_handle = preset.removeprefix(KAGGLE_PREFIX) if kagglehub is None: raise ImportError( "`from_preset()` requires the `kagglehub` package. " "Please install with `pip install kagglehub`." ) - if len(kaggle_handle.split("/")) not in (4, 5): + segments = preset.removeprefix(KAGGLE_PREFIX).split("/") + # Insert the kaggle framework into the handle. + if len(segments) == 3: + org, model, variant = segments + kaggle_handle = f"{org}/{model}/keras/{variant}/1" + elif len(segments) == 4: + org, model, variant, version = segments + kaggle_handle = f"{org}/{model}/keras/{variant}/{version}" + else: raise ValueError( - "Unexpected kaggle preset handle. Kaggle model handles should have " - "the form kaggle://{org}/{model}/keras/{variant}[/{version}]. For " - "example, kaggle://keras-nlp/albert/keras/bert_base_en_uncased." + "Unexpected kaggle preset handle. Kaggle model handles should " + "have the form kaggle://{org}/{model}/{variant}[/{version}]. " + "For example, 'kaggle://keras/bert/bert_base_en'. " + f"Received: preset={preset}" ) return kagglehub.model_download(kaggle_handle, path) return os.path.join(preset, path) From e0d34dcbb1cd9b7b59ff371bea8a20bc85c99621 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:20:27 -0800 Subject: [PATCH 58/87] Fix a failing byte pair tokenizer test (#1336) Now that vocab and merges are not config (they are assets state), we need to remember to copy them over when cloning a tokenizer. This failure is only showing up on GPU testing because of https://github.com/keras-team/keras-nlp/issues/409 --- keras_nlp/tokenizers/byte_pair_tokenizer_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer_test.py b/keras_nlp/tokenizers/byte_pair_tokenizer_test.py index d5f7b3762a..00f8f9b87f 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer_test.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer_test.py @@ -164,6 +164,9 @@ def test_config(self): cloned_tokenizer = BytePairTokenizer.from_config( self.tokenizer.get_config() ) + cloned_tokenizer.set_vocabulary_and_merges( + self.tokenizer.vocabulary, self.tokenizer.merges + ) self.assertAllEqual( self.tokenizer(input_data), cloned_tokenizer(input_data), From 0820d62ea5db0c8caf861f3eced736db63fc7334 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:20:36 -0800 Subject: [PATCH 59/87] Use set comparison for assets (#1335) We were relying on the order of `os.listdir` previously, which is not fixed I believe. https://github.com/keras-team/keras-nlp/pull/1333/checks?check_run_id=19158555913 --- keras_nlp/utils/preset_utils_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/utils/preset_utils_test.py b/keras_nlp/utils/preset_utils_test.py index 40eb99fb01..3190e09f50 100644 --- a/keras_nlp/utils/preset_utils_test.py +++ b/keras_nlp/utils/preset_utils_test.py @@ -68,7 +68,7 @@ def test_preset_saving(self, cls, preset_name, tokenizer_type): "compile_config" not in config_json ) # Test on raw json to include nested keys config = json.loads(config_json) - self.assertAllEqual(config["assets"], expected_assets) + self.assertEqual(set(config["assets"]), set(expected_assets)) self.assertEqual(config["weights"], "model.weights.h5") # Try loading the model from preset directory From c4b0c3c83d9585859356575a74d3a5b52f9f7025 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:24:02 -0800 Subject: [PATCH 60/87] Fix whisper tokenizer saving (#1334) --- .../models/whisper/whisper_preprocessor.py | 109 ++---------------- keras_nlp/models/whisper/whisper_presets.py | 18 --- keras_nlp/models/whisper/whisper_tokenizer.py | 13 +++ 3 files changed, 23 insertions(+), 117 deletions(-) diff --git a/keras_nlp/models/whisper/whisper_preprocessor.py b/keras_nlp/models/whisper/whisper_preprocessor.py index 2f8673c52f..abcff0d770 100644 --- a/keras_nlp/models/whisper/whisper_preprocessor.py +++ b/keras_nlp/models/whisper/whisper_preprocessor.py @@ -30,7 +30,6 @@ ) from keras_nlp.utils.keras_utils import pack_x_y_sample_weight from keras_nlp.utils.python_utils import classproperty -from keras_nlp.utils.python_utils import format_docstring @keras_nlp_export("keras_nlp.models.WhisperPreprocessor") @@ -49,9 +48,11 @@ class WhisperPreprocessor(Preprocessor): directly to a Whisper model. Args: - audio_feature_extractor: A `keras_nlp.models.WhisperAudioFeatureExtractor` - instance. tokenizer: A `keras_nlp.models.WhisperTokenizer` instance. + audio_feature_extractor: A + `keras_nlp.models.WhisperAudioFeatureExtractor` instance or `None`. + If `None` a feature extractor with default parameters will be + created. decoder_sequence_length: The length of the packed decoder inputs. language: string, language token. Should only be passed if your tokenizer is multilingual. @@ -73,7 +74,9 @@ class WhisperPreprocessor(Preprocessor): Directly calling the layer on data. ```python - preprocessor = keras_nlp.models.WhisperPreprocessor.from_preset("whisper_tiny_en") + preprocessor = keras_nlp.models.WhisperPreprocessor.from_preset( + "whisper_tiny_en", + ) # Preprocess unbatched inputs. input_data = { @@ -153,8 +156,8 @@ class WhisperPreprocessor(Preprocessor): def __init__( self, - audio_feature_extractor, tokenizer, + audio_feature_extractor=None, decoder_sequence_length=448, language=None, task=None, @@ -162,6 +165,8 @@ def __init__( **kwargs, ): super().__init__(**kwargs) + if audio_feature_extractor is None: + audio_feature_extractor = WhisperAudioFeatureExtractor() self.audio_feature_extractor = audio_feature_extractor self.tokenizer = tokenizer self.decoder_sequence_length = decoder_sequence_length @@ -313,97 +318,3 @@ def tokenizer_cls(cls): @classproperty def presets(cls): return copy.deepcopy(backbone_presets) - - @classmethod - def from_preset( - cls, - preset, - language=None, - task=None, - no_timestamps=True, - **kwargs, - ): - """Instantiate `WhisperPreprocessor` from preset architecture. - - Args: - preset: string. Must be one of "{{preset_names}}". - language: string, language token (eg., `"<|en|>"`). Should only be - passed if your tokenizer is multilingual. - task: string, task name. One of `"transcribe"`, `"translate"`. - Should only be passed if your tokenizer is multilingual. - no_timestamps: bool. If True, `"<|no_timestamps|>"` will be added as - a special token to your input. - - Examples: - ```python - # Load a preprocessor layer from a preset. - preprocessor = keras_nlp.models.WhisperPreprocessor.from_preset( - "{{example_preset_name}}", - ) - ``` - """ - # Override base class's `from_preset` to handle audio feature extractor - # , `decoder_sequence_length` and special tokens. - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class." - ) - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - - audio_feature_extractor = cls.audio_feature_extractor_cls.from_preset( - preset - ) - tokenizer = cls.tokenizer_cls.from_preset(preset) - - metadata = cls.presets[preset] - # For task model presets, the backbone config is nested. - if "backbone" in metadata["config"]: - backbone_config = metadata["config"]["backbone"]["config"] - else: - backbone_config = metadata["config"] - - # Use model's `max_decoder_sequence_length` if `decoder_sequence_length` - # is unspecified; otherwise check that `decoder_sequence_length` is not - # too long. - decoder_sequence_length = kwargs.pop("decoder_sequence_length", None) - max_decoder_sequence_length = backbone_config[ - "max_decoder_sequence_length" - ] - - def check_sequence_length(sequence_length, max_sequence_length, name): - if sequence_length is not None: - if sequence_length > max_sequence_length: - raise ValueError( - f"`{name}` cannot be longer than `{preset}` " - f"preset's `max_{name}` of {max_sequence_length}. " - f"Received: {sequence_length}." - ) - return sequence_length - else: - return max_sequence_length - - decoder_sequence_length = check_sequence_length( - decoder_sequence_length, - max_decoder_sequence_length, - "decoder_sequence_length", - ) - - return cls( - audio_feature_extractor=audio_feature_extractor, - tokenizer=tokenizer, - decoder_sequence_length=decoder_sequence_length, - language=language, - task=task, - no_timestamps=no_timestamps, - **kwargs, - ) - - -format_docstring( - example_preset_name=next(iter(backbone_presets), ""), - preset_names='", "'.join(backbone_presets), -)(WhisperPreprocessor.from_preset.__func__) diff --git a/keras_nlp/models/whisper/whisper_presets.py b/keras_nlp/models/whisper/whisper_presets.py index 3c385deac9..4917e9c5c2 100644 --- a/keras_nlp/models/whisper/whisper_presets.py +++ b/keras_nlp/models/whisper/whisper_presets.py @@ -27,14 +27,6 @@ "<|transcribe|>": 50357, } -AUDIO_FEATURE_EXTRACTOR_CONFIG = { - "num_mels": 80, - "num_fft_bins": 400, - "stride": 160, - "sampling_rate": 16000, - "max_audio_length": 30, -} - LANGUAGE_TOKENS = { "<|af|>": 50327, "<|am|>": 50334, @@ -161,7 +153,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": ENGLISH_SPECIAL_TOKENS, "language_tokens": None, @@ -195,7 +186,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": ENGLISH_SPECIAL_TOKENS, "language_tokens": None, @@ -229,7 +219,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": ENGLISH_SPECIAL_TOKENS, "language_tokens": None, @@ -263,7 +252,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": ENGLISH_SPECIAL_TOKENS, "language_tokens": None, @@ -297,7 +285,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, "language_tokens": LANGUAGE_TOKENS, @@ -331,7 +318,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, "language_tokens": LANGUAGE_TOKENS, @@ -365,7 +351,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, "language_tokens": LANGUAGE_TOKENS, @@ -399,7 +384,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, "language_tokens": LANGUAGE_TOKENS, @@ -433,7 +417,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, "language_tokens": LANGUAGE_TOKENS, @@ -468,7 +451,6 @@ "max_encoder_sequence_length": 3000, "max_decoder_sequence_length": 448, }, - "audio_feature_extractor_config": AUDIO_FEATURE_EXTRACTOR_CONFIG, "preprocessor_config": { "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, "language_tokens": LANGUAGE_TOKENS, diff --git a/keras_nlp/models/whisper/whisper_tokenizer.py b/keras_nlp/models/whisper/whisper_tokenizer.py index 9a0d55786f..7b68dfd790 100644 --- a/keras_nlp/models/whisper/whisper_tokenizer.py +++ b/keras_nlp/models/whisper/whisper_tokenizer.py @@ -111,9 +111,20 @@ def __init__( **kwargs, ) + def save_assets(self, dir_path): + # TODO: whisper is currently mutating it's vocabulary before passing + # it to the super class, so we need to restore the unmutated vocabulary + # before saving our assets. We should find a more robust (and memory + # efficient) way to do this. + vocabulary = self.vocabulary + self.vocabulary = self._initial_vocabulary + super().save_assets(dir_path) + self.vocabulary = vocabulary + def set_vocabulary_and_merges(self, vocabulary, merges): if vocabulary is not None: vocabulary = _load_dict(vocabulary) + self._initial_vocabulary = dict(vocabulary) if self.language_tokens is not None: # Multilingual tokenizer. @@ -133,6 +144,8 @@ def set_vocabulary_and_merges(self, vocabulary, merges): self.transcribe_token, ]: vocabulary[token] = self.special_tokens[token] + else: + self._initial_vocabulary = None super().set_vocabulary_and_merges(vocabulary, merges) From e3f8d062ebe72f2d9c95812a1a4b578ec863764f Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 29 Nov 2023 17:25:59 -0800 Subject: [PATCH 61/87] Remove special case Bart from_preset (#1333) In doing this we need to remove an error for the case where a user would try to use a sequence_length longer than the supported max length of the backbone. preprocessor = BertPreprocessor.from_preset( "bert_base_uncased", sequence_length=1500, ) We would do this by reaching into the backbone config to read out the max length. Overall I think we shoud probably avoid cross cutting dependencies like this, a preprocessor should not reach into a backbone config. Also it is valid to want to use the vocab of a model to preprocess at a longer sequence length than the backbone would allow (maybe you are using a custom model). Instead we should probably try to make a friendly error message from the backbone (or position embedding), if a sequence length is too long. --- keras_nlp/models/bart/bart_preprocessor.py | 60 ---------------------- keras_nlp/models/preprocessor.py | 23 --------- 2 files changed, 83 deletions(-) diff --git a/keras_nlp/models/bart/bart_preprocessor.py b/keras_nlp/models/bart/bart_preprocessor.py index eaf85d883c..ffe2148839 100644 --- a/keras_nlp/models/bart/bart_preprocessor.py +++ b/keras_nlp/models/bart/bart_preprocessor.py @@ -233,63 +233,3 @@ def tokenizer_cls(cls): @classproperty def presets(cls): return copy.deepcopy(backbone_presets) - - @classmethod - def from_preset( - cls, - preset, - **kwargs, - ): - # Override base class's `from_preset` to handle `encoder_sequence_length` - # and `decoder_sequence_length`. - if not cls.presets: - raise NotImplementedError( - "No presets have been created for this class." - ) - if preset not in cls.presets: - raise ValueError( - "`preset` must be one of " - f"""{", ".join(cls.presets)}. Received: {preset}.""" - ) - - tokenizer = cls.tokenizer_cls.from_preset(preset) - - metadata = cls.presets[preset] - # For task model presets, the backbone config is nested. - if "backbone" in metadata["config"]: - backbone_config = metadata["config"]["backbone"]["config"] - else: - backbone_config = metadata["config"] - - # Use model's `max_sequence_length` if either `encoder_sequence_length` - # or `decoder_sequence_length` are unspecified; otherwise check that - # `encoder_sequence_length`/`decoder_sequence_length` are not too long. - encoder_sequence_length = kwargs.pop("encoder_sequence_length", None) - decoder_sequence_length = kwargs.pop("decoder_sequence_length", None) - max_sequence_length = backbone_config["max_sequence_length"] - - def check_sequence_length(sequence_length, name): - if sequence_length is not None: - if sequence_length > max_sequence_length: - raise ValueError( - f"`{name}` cannot be longer than `{preset}` " - f"preset's `max_sequence_length` of {max_sequence_length}. " - f"Received: {sequence_length}." - ) - return sequence_length - else: - return max_sequence_length - - encoder_sequence_length = check_sequence_length( - encoder_sequence_length, "encoder_sequence_length" - ) - decoder_sequence_length = check_sequence_length( - decoder_sequence_length, "decoder_sequence_length" - ) - - return cls( - tokenizer=tokenizer, - encoder_sequence_length=encoder_sequence_length, - decoder_sequence_length=decoder_sequence_length, - **kwargs, - ) diff --git a/keras_nlp/models/preprocessor.py b/keras_nlp/models/preprocessor.py index fb663f0e61..5e54b2d7e3 100644 --- a/keras_nlp/models/preprocessor.py +++ b/keras_nlp/models/preprocessor.py @@ -71,31 +71,8 @@ def _legacy_from_preset( **kwargs, ): tokenizer = cls.tokenizer_cls.from_preset(preset) - - metadata = cls.presets[preset] - # For task model presets, the backbone config is nested. - if "backbone" in metadata["config"]: - backbone_config = metadata["config"]["backbone"]["config"] - else: - backbone_config = metadata["config"] - - # Use model's `max_sequence_length` if `sequence_length` unspecified; - # otherwise check that `sequence_length` not too long. - sequence_length = kwargs.pop("sequence_length", None) - max_sequence_length = backbone_config["max_sequence_length"] - if sequence_length is not None: - if sequence_length > max_sequence_length: - raise ValueError( - f"`sequence_length` cannot be longer than `{preset}` " - f"preset's `max_sequence_length` of {max_sequence_length}. " - f"Received: {sequence_length}." - ) - else: - sequence_length = max_sequence_length - return cls( tokenizer=tokenizer, - sequence_length=sequence_length, **kwargs, ) From dbb64876268c1aeb8fe23a47d69eaf3a59388b56 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 30 Nov 2023 11:44:31 -0800 Subject: [PATCH 62/87] Fix t5 tokenizer presets (#1339) --- keras_nlp/models/t5/t5_backbone.py | 2 ++ keras_nlp/models/t5/t5_presets.py | 24 ++++++++++++------------ keras_nlp/models/t5/t5_tokenizer.py | 7 +++++++ 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/keras_nlp/models/t5/t5_backbone.py b/keras_nlp/models/t5/t5_backbone.py index 314a5d68df..6e76094d71 100644 --- a/keras_nlp/models/t5/t5_backbone.py +++ b/keras_nlp/models/t5/t5_backbone.py @@ -222,6 +222,7 @@ def __init__( self.activation = keras.activations.get(activation) self.key_value_dim = key_value_dim self.dropout = dropout + self.use_gated_activation = use_gated_activation self.layer_norm_epsilon = layer_norm_epsilon self.tie_embedding_weights = tie_embedding_weights self.token_embedding = token_embedding_layer @@ -238,6 +239,7 @@ def get_config(self): "activation": keras.activations.serialize(self.activation), "key_value_dim": self.key_value_dim, "dropout": self.dropout, + "use_gated_activation": self.use_gated_activation, "layer_norm_epsilon": self.layer_norm_epsilon, "tie_embedding_weights": self.tie_embedding_weights, } diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index dd2bea7a4e..699ea1ce76 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -41,8 +41,8 @@ "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/model.weights.h5", "weights_hash": "2e10b5f72405d464ee55026b07e60741", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/vocab.spm", - "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/vocab.spm", + "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "t5_base_multi": { "metadata": { @@ -70,8 +70,8 @@ "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/model.weights.h5", "weights_hash": "bed6ef276cfe83d1323467051211978d", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/vocab.spm", - "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/vocab.spm", + "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "t5_large_multi": { "metadata": { @@ -99,8 +99,8 @@ "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/model.weights.h5", "weights_hash": "7854a05c2e6812899bf6f0f104792cda", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/vocab.spm", - "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/vocab.spm", + "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "flan_small_multi": { "metadata": { @@ -129,8 +129,8 @@ "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/model.weights.h5", "weights_hash": "aa0fbaddb1759ef313bbc4f9e4f1e197", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/vocab.spm", - "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/vocab.spm", + "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "flan_base_multi": { "metadata": { @@ -158,8 +158,8 @@ "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/model.weights.h5", "weights_hash": "84a10bec83fd093931bb2a6264115d31", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/vocab.spm", - "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/vocab.spm", + "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, "flan_large_multi": { "metadata": { @@ -187,7 +187,7 @@ "preprocessor_config": {}, "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/model.weights.h5", "weights_hash": "513f530ce790efa7e261c0ef965f3697", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/vocab.spm", - "vocabulary_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/vocab.spm", + "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", }, } diff --git a/keras_nlp/models/t5/t5_tokenizer.py b/keras_nlp/models/t5/t5_tokenizer.py index ec5f0bf324..b5dee49b85 100644 --- a/keras_nlp/models/t5/t5_tokenizer.py +++ b/keras_nlp/models/t5/t5_tokenizer.py @@ -11,9 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import copy from keras_nlp.api_export import keras_nlp_export +from keras_nlp.models.t5.t5_presets import backbone_presets from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer +from keras_nlp.utils.python_utils import classproperty @keras_nlp_export("keras_nlp.models.T5Tokenizer") @@ -96,3 +99,7 @@ def set_proto(self, proto): self.end_token_id = None self.pad_token_id = None self.start_token_id = None + + @classproperty + def presets(cls): + return copy.deepcopy(backbone_presets) From 6130253e4ea12ee45fadca3b7ca4ebab11f24ef6 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 30 Nov 2023 15:24:08 -0800 Subject: [PATCH 63/87] Script to convert presets (#1340) --- tools/convert_legacy_presets.py | 99 +++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 tools/convert_legacy_presets.py diff --git a/tools/convert_legacy_presets.py b/tools/convert_legacy_presets.py new file mode 100644 index 0000000000..5443beefc5 --- /dev/null +++ b/tools/convert_legacy_presets.py @@ -0,0 +1,99 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This script was used to convert our legacy presets into the directory format +used by Kaggle. + +This script is for reference only. +""" + +import os +import shutil + +os.environ["KERAS_HOME"] = os.getcwd() + +import keras_nlp # noqa: E402 +from keras_nlp.src.utils.preset_utils import save_to_preset # noqa: E402 + +BUCKET = "keras-nlp-kaggle" + +backbone_models = [ + (keras_nlp.models.AlbertBackbone, keras_nlp.models.AlbertTokenizer), + (keras_nlp.models.BartBackbone, keras_nlp.models.BartTokenizer), + (keras_nlp.models.BertBackbone, keras_nlp.models.BertTokenizer), + (keras_nlp.models.DebertaV3Backbone, keras_nlp.models.DebertaV3Tokenizer), + (keras_nlp.models.DistilBertBackbone, keras_nlp.models.DistilBertTokenizer), + (keras_nlp.models.FNetBackbone, keras_nlp.models.FNetTokenizer), + (keras_nlp.models.GPT2Backbone, keras_nlp.models.GPT2Tokenizer), + (keras_nlp.models.OPTBackbone, keras_nlp.models.OPTTokenizer), + (keras_nlp.models.RobertaBackbone, keras_nlp.models.RobertaTokenizer), + (keras_nlp.models.T5Backbone, keras_nlp.models.T5Tokenizer), + (keras_nlp.models.WhisperBackbone, keras_nlp.models.WhisperTokenizer), + (keras_nlp.models.XLMRobertaBackbone, keras_nlp.models.XLMRobertaTokenizer), +] +for backbone_cls, tokenizer_cls in backbone_models: + for preset in backbone_cls.presets: + backbone = backbone_cls.from_preset(preset) + tokenizer = tokenizer_cls.from_preset(preset) + save_to_preset( + backbone, + preset, + config_filename="config.json", + ) + save_to_preset( + tokenizer, + preset, + config_filename="tokenizer.json", + ) + # Delete first to clean up any exising version. + os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}") + os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}") + for root, _, files in os.walk(preset): + for file in files: + path = os.path.join(BUCKET, root, file) + os.system( + f"gcloud storage objects update gs://{path} " + "--add-acl-grant=entity=AllUsers,role=READER" + ) + # Clean up local disk usage. + shutil.rmtree("models") + shutil.rmtree(preset) + +# Handle our single task model. +preset = "bert_tiny_en_uncased_sst2" +task = keras_nlp.models.BertClassifier.from_preset(preset) +tokenizer = keras_nlp.models.BertTokenizer.from_preset(preset) +save_to_preset( + task, + preset, + config_filename="config.json", +) +save_to_preset( + tokenizer, + preset, + config_filename="tokenizer.json", +) +# Delete first to clean up any exising version. +os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}") +os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}") +for root, _, files in os.walk(preset): + for file in files: + path = os.path.join(BUCKET, root, file) + os.system( + f"gcloud storage objects update gs://{path} " + "--add-acl-grant=entity=AllUsers,role=READER" + ) +# Clean up local disk usage. +shutil.rmtree("models") +shutil.rmtree(preset) From 814959b15756d1e18cfe4699d2e043c30f8ab139 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Fri, 1 Dec 2023 10:32:44 -0800 Subject: [PATCH 64/87] Switch all preset to the new Kaggle format (#1338) These are not uploaded to Kaggle just yet, but will be shortly. --- keras_nlp/models/albert/albert_presets.py | 76 +--- keras_nlp/models/backbone.py | 32 +- keras_nlp/models/bart/bart_presets.py | 33 +- .../bart/bart_seq_2_seq_lm_preprocessor.py | 4 +- keras_nlp/models/bert/bert_presets.py | 187 +--------- .../models/deberta_v3/deberta_v3_presets.py | 80 +---- .../models/distil_bert/distil_bert_presets.py | 51 +-- keras_nlp/models/f_net/f_net_presets.py | 30 +- keras_nlp/models/gpt2/gpt2_presets.py | 85 +---- keras_nlp/models/opt/opt_presets.py | 68 +--- keras_nlp/models/preprocessor.py | 17 +- keras_nlp/models/roberta/roberta_presets.py | 34 +- keras_nlp/models/t5/t5_presets.py | 110 +----- keras_nlp/models/task.py | 40 +-- keras_nlp/models/whisper/whisper_presets.py | 337 +----------------- .../models/xlm_roberta/xlm_roberta_presets.py | 30 +- keras_nlp/tokenizers/byte_pair_tokenizer.py | 37 +- .../tokenizers/sentence_piece_tokenizer.py | 30 +- keras_nlp/tokenizers/word_piece_tokenizer.py | 30 +- keras_nlp/utils/preset_utils.py | 17 +- 20 files changed, 92 insertions(+), 1236 deletions(-) diff --git a/keras_nlp/models/albert/albert_presets.py b/keras_nlp/models/albert/albert_presets.py index 34126f52cd..eb163a64bf 100644 --- a/keras_nlp/models/albert/albert_presets.py +++ b/keras_nlp/models/albert/albert_presets.py @@ -26,24 +26,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30000, - "num_layers": 12, - "num_heads": 12, - "num_groups": 1, - "num_inner_repetitions": 1, - "embedding_dim": 128, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.0, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_base_en_uncased/v1/model.h5", - "weights_hash": "b83ccf3418dd84adc569324183176813", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_base_en_uncased/v1/vocab.spm", - "spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5", + "kaggle_handle": "gs://keras-nlp-kaggle/albert_base_en_uncased", }, "albert_large_en_uncased": { "metadata": { @@ -56,24 +39,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30000, - "num_layers": 24, - "num_heads": 16, - "num_groups": 1, - "num_inner_repetitions": 1, - "embedding_dim": 128, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_large_en_uncased/v1/model.h5", - "weights_hash": "c7754804efb245f06dd6e7ced32e082c", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_large_en_uncased/v1/vocab.spm", - "spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5", + "kaggle_handle": "gs://keras-nlp-kaggle/albert_large_en_uncased", }, "albert_extra_large_en_uncased": { "metadata": { @@ -86,24 +52,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30000, - "num_layers": 24, - "num_heads": 16, - "num_groups": 1, - "num_inner_repetitions": 1, - "embedding_dim": 128, - "hidden_dim": 2048, - "intermediate_dim": 8192, - "dropout": 0, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_large_en_uncased/v1/model.h5", - "weights_hash": "713209be8aadfa614fd79f18c9aeb16d", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_large_en_uncased/v1/vocab.spm", - "spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5", + "kaggle_handle": "gs://keras-nlp-kaggle/albert_extra_large_en_uncased", }, "albert_extra_extra_large_en_uncased": { "metadata": { @@ -116,23 +65,6 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30000, - "num_layers": 12, - "num_heads": 64, - "num_groups": 1, - "num_inner_repetitions": 1, - "embedding_dim": 128, - "hidden_dim": 4096, - "intermediate_dim": 16384, - "dropout": 0, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_extra_large_en_uncased/v1/model.h5", - "weights_hash": "a835177b692fb6a82139f94c66db2f22", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/albert_extra_extra_large_en_uncased/v1/vocab.spm", - "spm_proto_hash": "73e62ff8e90f951f24c8b907913039a5", + "kaggle_handle": "gs://keras-nlp-kaggle/albert_extra_extra_large_en_uncased", }, } diff --git a/keras_nlp/models/backbone.py b/keras_nlp/models/backbone.py index 7ddfeb36da..9b8f9a5a96 100644 --- a/keras_nlp/models/backbone.py +++ b/keras_nlp/models/backbone.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - from keras_nlp.backend import keras from keras_nlp.utils.preset_utils import check_preset_class from keras_nlp.utils.preset_utils import load_from_preset @@ -68,31 +66,6 @@ def from_config(cls, config): def presets(cls): return {} - @classmethod - def _legacy_from_preset( - cls, - preset, - load_weights=True, - **kwargs, - ): - metadata = cls.presets[preset] - config = metadata["config"] - model = cls.from_config({**config, **kwargs}) - - if not load_weights: - return model - - filename = os.path.basename(metadata["weights_url"]) - weights = keras.utils.get_file( - filename, - metadata["weights_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["weights_hash"], - ) - - model.load_weights(weights) - return model - @classmethod def from_preset( cls, @@ -121,9 +94,10 @@ def from_preset( ) ``` """ - # TODO: delete me! + # We support short IDs for official presets, e.g. `"bert_base_en"`. + # Map these to a Kaggle Models handle. if preset in cls.presets: - return cls._legacy_from_preset(preset, **kwargs) + preset = cls.presets[preset]["kaggle_handle"] check_preset_class(preset, cls) return load_from_preset( diff --git a/keras_nlp/models/bart/bart_presets.py b/keras_nlp/models/bart/bart_presets.py index aa06254c10..d5547b37da 100644 --- a/keras_nlp/models/bart/bart_presets.py +++ b/keras_nlp/models/bart/bart_presets.py @@ -25,22 +25,7 @@ "path": "bart", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md", }, - "config": { - "vocabulary_size": 50265, - "num_layers": 6, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 1024, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/model.h5", - "weights_hash": "5b59403f0cafafbd89680e0785791163", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/vocab.json", - "vocabulary_hash": "be4d3c6f3f5495426b2c03b334334354", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/bart_base_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/bart_base_en", }, "bart_large_en": { "metadata": { @@ -62,13 +47,7 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en/v1/model.h5", - "weights_hash": "6bfe7e591af8c5699ce6f9f18753af9a", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en/v1/vocab.json", - "vocabulary_hash": "cf410ee085c5c69c957bb1f6d8456596", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/bart_large_en", }, "bart_large_en_cnn": { "metadata": { @@ -90,12 +69,6 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en_cnn/v1/model.h5", - "weights_hash": "99782ecd9365956f016096fef9afd62c", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en_cnn/v1/vocab.json", - "vocabulary_hash": "be4d3c6f3f5495426b2c03b334334354", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/bart_large_en_cnn/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/bart_large_en_cnn", }, } diff --git a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py index 048c88e82e..3d398d29d1 100644 --- a/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py +++ b/keras_nlp/models/bart/bart_seq_2_seq_lm_preprocessor.py @@ -127,8 +127,8 @@ class BartSeq2SeqLMPreprocessor(BartPreprocessor): def __init__( self, tokenizer, - encoder_sequence_length, - decoder_sequence_length, + encoder_sequence_length=1024, + decoder_sequence_length=1024, **kwargs ): # Since we truncate the last token from `decoder_token_ids`, we need to diff --git a/keras_nlp/models/bert/bert_presets.py b/keras_nlp/models/bert/bert_presets.py index 7a3bbdce63..6919d2b566 100644 --- a/keras_nlp/models/bert/bert_presets.py +++ b/keras_nlp/models/bert/bert_presets.py @@ -27,23 +27,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30522, - "num_layers": 2, - "num_heads": 2, - "hidden_dim": 128, - "intermediate_dim": 512, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": True, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_tiny_en_uncased/v1/model.h5", - "weights_hash": "c2b29fcbf8f814a0812e4ab89ef5c068", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_tiny_en_uncased/v1/vocab.txt", - "vocabulary_hash": "64800d5d8528ce344256daf115d4965e", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_tiny_en_uncased", }, "bert_small_en_uncased": { "metadata": { @@ -56,23 +40,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30522, - "num_layers": 4, - "num_heads": 8, - "hidden_dim": 512, - "intermediate_dim": 2048, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": True, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_small_en_uncased/v1/model.h5", - "weights_hash": "08632c9479b034f342ba2c2b7afba5f7", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_small_en_uncased/v1/vocab.txt", - "vocabulary_hash": "64800d5d8528ce344256daf115d4965e", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_small_en_uncased", }, "bert_medium_en_uncased": { "metadata": { @@ -85,23 +53,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30522, - "num_layers": 8, - "num_heads": 8, - "hidden_dim": 512, - "intermediate_dim": 2048, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": True, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_medium_en_uncased/v1/model.h5", - "weights_hash": "bb990e1184ec6b6185450c73833cd661", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_medium_en_uncased/v1/vocab.txt", - "vocabulary_hash": "64800d5d8528ce344256daf115d4965e", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_medium_en_uncased", }, "bert_base_en_uncased": { "metadata": { @@ -114,23 +66,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30522, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": True, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_en_uncased/v1/model.h5", - "weights_hash": "9b2b2139f221988759ac9cdd17050b31", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_en_uncased/v1/vocab.txt", - "vocabulary_hash": "64800d5d8528ce344256daf115d4965e", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_en_uncased", }, "bert_base_en": { "metadata": { @@ -143,23 +79,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 28996, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": False, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_en/v1/model.h5", - "weights_hash": "f94a6cb012e18f4fb8ec92abb91864e9", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_en/v1/vocab.txt", - "vocabulary_hash": "bb6ca9b42e790e5cd986bbb16444d0e0", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_en", }, "bert_base_zh": { "metadata": { @@ -171,23 +91,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 21128, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": False, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_zh/v1/model.h5", - "weights_hash": "79afa421e386076e62ab42dad555ab0c", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_zh/v1/vocab.txt", - "vocabulary_hash": "3b5b76c4aef48ecf8cb3abaafe960f09", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_zh", }, "bert_base_multi": { "metadata": { @@ -199,23 +103,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 119547, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": False, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_multi/v1/model.h5", - "weights_hash": "b0631cec0a1f2513c6cfd75ba29c33aa", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_base_multi/v1/vocab.txt", - "vocabulary_hash": "d9d865138d17f1958502ed060ecfeeb6", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_multi", }, "bert_large_en_uncased": { "metadata": { @@ -228,23 +116,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 30522, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": True, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_large_en_uncased/v1/model.h5", - "weights_hash": "cc5cacc9565ef400ee4376105f40ddae", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_large_en_uncased/v1/vocab.txt", - "vocabulary_hash": "64800d5d8528ce344256daf115d4965e", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_large_en_uncased", }, "bert_large_en": { "metadata": { @@ -257,23 +129,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "vocabulary_size": 28996, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 2, - }, - "preprocessor_config": { - "lowercase": False, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_large_en/v1/model.h5", - "weights_hash": "8b8ab82290bbf4f8db87d4f100648890", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_large_en/v1/vocab.txt", - "vocabulary_hash": "bb6ca9b42e790e5cd986bbb16444d0e0", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_large_en", }, } @@ -288,29 +144,6 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "config": { - "backbone": { - "class_name": "keras_nlp>BertBackbone", - "config": { - "vocabulary_size": 30522, - "hidden_dim": 128, - "intermediate_dim": 512, - "num_layers": 2, - "num_heads": 2, - "max_sequence_length": 512, - "num_segments": 2, - "dropout": 0.1, - }, - }, - "num_classes": 2, - "dropout": 0.1, - }, - "preprocessor_config": { - "lowercase": True, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/bert_tiny_en_uncased_sst2/v1/model.h5", - "weights_hash": "1f9c2d59f9e229e08f3fbd44239cfb0b", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/bert_tiny_en_uncased_sst2/v1/vocab.txt", - "vocabulary_hash": "64800d5d8528ce344256daf115d4965e", + "kaggle_handle": "gs://keras-nlp-kaggle/bert_tiny_en_uncased_sst2", } } diff --git a/keras_nlp/models/deberta_v3/deberta_v3_presets.py b/keras_nlp/models/deberta_v3/deberta_v3_presets.py index f5df6cb599..771d7ad9c5 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_presets.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_presets.py @@ -25,21 +25,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-xsmall", }, - "config": { - "vocabulary_size": 128100, - "num_layers": 12, - "num_heads": 6, - "hidden_dim": 384, - "intermediate_dim": 1536, - "dropout": 0.1, - "max_sequence_length": 512, - "bucket_size": 256, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_extra_small_en/v1/model.h5", - "weights_hash": "d8e10327107e5c5e20b45548a5028619", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_extra_small_en/v1/vocab.spm", - "spm_proto_hash": "1613fcbf3b82999c187b09c9db79b568", + "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_extra_small_en", }, "deberta_v3_small_en": { "metadata": { @@ -52,21 +38,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-small", }, - "config": { - "vocabulary_size": 128100, - "num_layers": 6, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "bucket_size": 256, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_small_en/v1/model.h5", - "weights_hash": "84118eb7c5a735f2061ecccaf71bb888", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_small_en/v1/vocab.spm", - "spm_proto_hash": "1613fcbf3b82999c187b09c9db79b568", + "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_small_en", }, "deberta_v3_base_en": { "metadata": { @@ -79,21 +51,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-base", }, - "config": { - "vocabulary_size": 128100, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "bucket_size": 256, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_base_en/v1/model.h5", - "weights_hash": "cebce044aeed36aec9b94e3b8a255430", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_base_en/v1/vocab.spm", - "spm_proto_hash": "1613fcbf3b82999c187b09c9db79b568", + "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_base_en", }, "deberta_v3_large_en": { "metadata": { @@ -106,21 +64,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-large", }, - "config": { - "vocabulary_size": 128100, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "max_sequence_length": 512, - "bucket_size": 256, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_large_en/v1/model.h5", - "weights_hash": "bce7690f358a9e39304f8c0ebc71a745", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_large_en/v1/vocab.spm", - "spm_proto_hash": "1613fcbf3b82999c187b09c9db79b568", + "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_large_en", }, "deberta_v3_base_multi": { "metadata": { @@ -133,20 +77,6 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/mdeberta-v3-base", }, - "config": { - "vocabulary_size": 251000, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "bucket_size": 256, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_base_multi/v1/model.h5", - "weights_hash": "26e5a824b26afd2ee336835bd337bbeb", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/deberta_v3_base_multi/v1/vocab.spm", - "spm_proto_hash": "b4ca07289eac48600b29529119d565e2", + "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_base_multi", }, } diff --git a/keras_nlp/models/distil_bert/distil_bert_presets.py b/keras_nlp/models/distil_bert/distil_bert_presets.py index 3f939fb6da..b2a99ef688 100644 --- a/keras_nlp/models/distil_bert/distil_bert_presets.py +++ b/keras_nlp/models/distil_bert/distil_bert_presets.py @@ -26,22 +26,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-uncased", }, - "config": { - "vocabulary_size": 30522, - "num_layers": 6, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - }, - "preprocessor_config": { - "lowercase": True, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/distil_bert_base_en_uncased/v1/model.h5", - "weights_hash": "6625a649572e74086d74c46b8d0b0da3", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/distil_bert_base_en_uncased/v1/vocab.txt", - "vocabulary_hash": "64800d5d8528ce344256daf115d4965e", + "kaggle_handle": "gs://keras-nlp-kaggle/distil_bert_base_en_uncased", }, "distil_bert_base_en": { "metadata": { @@ -55,22 +40,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-cased", }, - "config": { - "vocabulary_size": 28996, - "num_layers": 6, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - }, - "preprocessor_config": { - "lowercase": False, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/distil_bert_base_en/v1/model.h5", - "weights_hash": "fa36aa6865978efbf85a5c8264e5eb57", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/distil_bert_base_en/v1/vocab.txt", - "vocabulary_hash": "bb6ca9b42e790e5cd986bbb16444d0e0", + "kaggle_handle": "gs://keras-nlp-kaggle/distil_bert_base_en", }, "distil_bert_base_multi": { "metadata": { @@ -82,21 +52,6 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-multilingual-cased", }, - "config": { - "vocabulary_size": 119547, - "num_layers": 6, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - }, - "preprocessor_config": { - "lowercase": False, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/distil_bert_base_multi/v1/model.h5", - "weights_hash": "c0f11095e2a6455bd3b1a6d14800a7fa", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/distil_bert_base_multi/v1/vocab.txt", - "vocabulary_hash": "d9d865138d17f1958502ed060ecfeeb6", + "kaggle_handle": "gs://keras-nlp-kaggle/distil_bert_base_multi", }, } diff --git a/keras_nlp/models/f_net/f_net_presets.py b/keras_nlp/models/f_net/f_net_presets.py index b3df5f8e2c..48cc9827b4 100644 --- a/keras_nlp/models/f_net/f_net_presets.py +++ b/keras_nlp/models/f_net/f_net_presets.py @@ -25,20 +25,7 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "config": { - "vocabulary_size": 32000, - "num_layers": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 4, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/f_net_base_en/v1/model.h5", - "weights_hash": "35db90842b85a985a0e54c86c00746fe", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/f_net_base_en/v1/vocab.spm", - "spm_proto_hash": "71c5f4610bef1daf116998a113a01f3d", + "kaggle_handle": "gs://keras-nlp-kaggle/f_net_base_en", }, "f_net_large_en": { "metadata": { @@ -51,19 +38,6 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "config": { - "vocabulary_size": 32000, - "num_layers": 24, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "max_sequence_length": 512, - "num_segments": 4, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/f_net_large_en/v1/model.h5", - "weights_hash": "7ae4a3faa67ff054f8cecffb5619f779", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/f_net_large_en/v1/vocab.spm", - "spm_proto_hash": "71c5f4610bef1daf116998a113a01f3d", + "kaggle_handle": "gs://keras-nlp-kaggle/f_net_large_en", }, } diff --git a/keras_nlp/models/gpt2/gpt2_presets.py b/keras_nlp/models/gpt2/gpt2_presets.py index 7101bdb104..e5e546a92a 100644 --- a/keras_nlp/models/gpt2/gpt2_presets.py +++ b/keras_nlp/models/gpt2/gpt2_presets.py @@ -26,22 +26,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "config": { - "vocabulary_size": 50257, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 1024, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_base_en/v1/model.h5", - "weights_hash": "f4ea6e1b214516dd7de452461ee6e16e", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_base_en/v1/vocab.json", - "vocabulary_hash": "dffec25a898b1f5e569bec4dffd7e5c0", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_base_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_base_en", }, "gpt2_medium_en": { "metadata": { @@ -54,22 +39,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "config": { - "vocabulary_size": 50257, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "max_sequence_length": 1024, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_medium_en/v1/model.h5", - "weights_hash": "580ff9b79c04fc90e6d6f47e975c5afe", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_medium_en/v1/vocab.json", - "vocabulary_hash": "dffec25a898b1f5e569bec4dffd7e5c0", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_medium_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_medium_en", }, "gpt2_large_en": { "metadata": { @@ -82,22 +52,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "config": { - "vocabulary_size": 50257, - "num_layers": 36, - "num_heads": 20, - "hidden_dim": 1280, - "intermediate_dim": 5120, - "dropout": 0.1, - "max_sequence_length": 1024, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_large_en/v1/model.h5", - "weights_hash": "67957cb3dfc9e965960dabe068811e1a", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_large_en/v1/vocab.json", - "vocabulary_hash": "dffec25a898b1f5e569bec4dffd7e5c0", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_large_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_large_en", }, "gpt2_extra_large_en": { "metadata": { @@ -110,22 +65,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "config": { - "vocabulary_size": 50257, - "num_layers": 48, - "num_heads": 25, - "hidden_dim": 1600, - "intermediate_dim": 6400, - "dropout": 0.1, - "max_sequence_length": 1024, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_extra_large_en/v1/model.h5", - "weights_hash": "d093c1ee0d9705d845c0190909aa2917", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_extra_large_en/v1/vocab.json", - "vocabulary_hash": "dffec25a898b1f5e569bec4dffd7e5c0", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_extra_large_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_extra_large_en", }, "gpt2_base_en_cnn_dailymail": { "metadata": { @@ -137,21 +77,6 @@ "official_name": "GPT-2", "path": "gpt2", }, - "config": { - "vocabulary_size": 50257, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 1024, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_base_en_news/v1/model.h5", - "weights_hash": "09d86ca6e1b4213886b720a1392f2a70", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_base_en_news/v1/vocab.json", - "vocabulary_hash": "dffec25a898b1f5e569bec4dffd7e5c0", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/gpt2_base_en_news/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_base_en_cnn_dailymail", }, } diff --git a/keras_nlp/models/opt/opt_presets.py b/keras_nlp/models/opt/opt_presets.py index 7af2641138..3ca0fd7b32 100644 --- a/keras_nlp/models/opt/opt_presets.py +++ b/keras_nlp/models/opt/opt_presets.py @@ -26,22 +26,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "config": { - "vocabulary_size": 50272, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 2048, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/opt_125m_en/v1/model.h5", - "weights_hash": "63e444998982e48da4a1a3970f4c6203", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/opt_125m_en/v1/vocab.json", - "vocabulary_hash": "cf410ee085c5c69c957bb1f6d8456596", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/opt_125m_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/opt_125m_en", }, # We skip the 350m checkpoint because it does not match the structure of # other checkpoints. @@ -56,22 +41,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "config": { - "vocabulary_size": 50272, - "num_layers": 24, - "num_heads": 32, - "hidden_dim": 2048, - "intermediate_dim": 8192, - "dropout": 0.1, - "max_sequence_length": 2048, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/opt_1.3b_en/v1/model.h5", - "weights_hash": "0365ac8483e99a912c9770521909ecce", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/opt_1.3b_en/v1/vocab.json", - "vocabulary_hash": "cf410ee085c5c69c957bb1f6d8456596", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/opt_1.3b_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/opt_1.3b_en", }, "opt_2.7b_en": { "metadata": { @@ -84,22 +54,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "config": { - "vocabulary_size": 50272, - "num_layers": 32, - "num_heads": 32, - "hidden_dim": 2560, - "intermediate_dim": 10240, - "dropout": 0.1, - "max_sequence_length": 2048, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/opt_2.7b_en/v1/model.h5", - "weights_hash": "af56da9206a95b9287356955c5bc14e7", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/opt_2.7b_en/v1/vocab.json", - "vocabulary_hash": "cf410ee085c5c69c957bb1f6d8456596", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/opt_2.7b_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/opt_2.7b_en", }, "opt_6.7b_en": { "metadata": { @@ -112,21 +67,6 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "config": { - "vocabulary_size": 50272, - "num_layers": 32, - "num_heads": 32, - "hidden_dim": 4096, - "intermediate_dim": 16384, - "dropout": 0.1, - "max_sequence_length": 2048, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/opt_6.7b_en/v1/model.h5", - "weights_hash": "543120fbe601b70e6ec04cc909781e21", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/opt_6.7b_en/v1/vocab.json", - "vocabulary_hash": "cf410ee085c5c69c957bb1f6d8456596", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/opt_6.7b_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/opt_6.7b_en", }, } diff --git a/keras_nlp/models/preprocessor.py b/keras_nlp/models/preprocessor.py index 5e54b2d7e3..16a65e57c2 100644 --- a/keras_nlp/models/preprocessor.py +++ b/keras_nlp/models/preprocessor.py @@ -64,18 +64,6 @@ def tokenizer_cls(cls): def presets(cls): return {} - @classmethod - def _legacy_from_preset( - cls, - preset, - **kwargs, - ): - tokenizer = cls.tokenizer_cls.from_preset(preset) - return cls( - tokenizer=tokenizer, - **kwargs, - ) - @classmethod def from_preset( cls, @@ -95,9 +83,10 @@ def from_preset( ) ``` """ - # TODO: delete me! + # We support short IDs for official presets, e.g. `"bert_base_en"`. + # Map these to a Kaggle Models handle. if preset in cls.presets: - return cls._legacy_from_preset(preset, **kwargs) + preset = cls.presets[preset]["kaggle_handle"] config_file = "tokenizer.json" check_preset_class(preset, cls.tokenizer_cls, config_file=config_file) diff --git a/keras_nlp/models/roberta/roberta_presets.py b/keras_nlp/models/roberta/roberta_presets.py index f098bed5d7..a57f7cf479 100644 --- a/keras_nlp/models/roberta/roberta_presets.py +++ b/keras_nlp/models/roberta/roberta_presets.py @@ -25,22 +25,7 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "config": { - "vocabulary_size": 50265, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/roberta_base_en/v1/model.h5", - "weights_hash": "958eede1c7edaa9308e027be18fde7a8", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/roberta_base_en/v1/vocab.json", - "vocabulary_hash": "be4d3c6f3f5495426b2c03b334334354", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/roberta_base_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/roberta_base_en", }, "roberta_large_en": { "metadata": { @@ -53,21 +38,6 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "config": { - "vocabulary_size": 50265, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "max_sequence_length": 512, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/roberta_large_en/v1/model.h5", - "weights_hash": "1978b864c317a697fe62a894d3664f14", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/roberta_large_en/v1/vocab.json", - "vocabulary_hash": "be4d3c6f3f5495426b2c03b334334354", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/roberta_large_en/v1/merges.txt", - "merges_hash": "75a37753dd7a28a2c5df80c28bf06e4e", + "kaggle_handle": "gs://keras-nlp-kaggle/roberta_large_en", }, } diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index 699ea1ce76..d5c502c5ba 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -25,24 +25,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "config": { - "vocabulary_size": 32128, - "num_layers": 6, - "num_heads": 8, - "hidden_dim": 512, - "intermediate_dim": 2048, - "key_value_dim": 64, - "dropout": 0.1, - "activation": "relu", - "use_gated_activation": False, - "layer_norm_epsilon": 1e-06, - "tie_embedding_weights": True, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/model.weights.h5", - "weights_hash": "2e10b5f72405d464ee55026b07e60741", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/t5_small_multi/v1/vocab.spm", - "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "kaggle_handle": "gs://keras-nlp-kaggle/t5_small_multi", }, "t5_base_multi": { "metadata": { @@ -55,23 +38,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "config": { - "vocabulary_size": 32128, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "activation": "relu", - "use_gated_activation": False, - "layer_norm_epsilon": 1e-06, - "tie_embedding_weights": True, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/model.weights.h5", - "weights_hash": "bed6ef276cfe83d1323467051211978d", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/t5_base_multi/v1/vocab.spm", - "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "kaggle_handle": "gs://keras-nlp-kaggle/t5_base_multi", }, "t5_large_multi": { "metadata": { @@ -84,23 +51,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "config": { - "vocabulary_size": 32128, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "activation": "relu", - "use_gated_activation": False, - "layer_norm_epsilon": 1e-06, - "tie_embedding_weights": True, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/model.weights.h5", - "weights_hash": "7854a05c2e6812899bf6f0f104792cda", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/t5_large_multi/v1/vocab.spm", - "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "kaggle_handle": "gs://keras-nlp-kaggle/t5_large_multi", }, "flan_small_multi": { "metadata": { @@ -113,24 +64,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "config": { - "vocabulary_size": 32128, - "num_layers": 8, - "num_heads": 6, - "hidden_dim": 512, - "intermediate_dim": 1024, - "key_value_dim": 64, - "dropout": 0.1, - "activation": "keras_nlp>gelu_approximate", - "use_gated_activation": True, - "layer_norm_epsilon": 1e-06, - "tie_embedding_weights": False, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/model.weights.h5", - "weights_hash": "aa0fbaddb1759ef313bbc4f9e4f1e197", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/flan_small_multi/v1/vocab.spm", - "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "kaggle_handle": "gs://keras-nlp-kaggle/flan_small_multi", }, "flan_base_multi": { "metadata": { @@ -143,23 +77,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "config": { - "vocabulary_size": 32128, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 2048, - "dropout": 0.1, - "activation": "keras_nlp>gelu_approximate", - "use_gated_activation": True, - "layer_norm_epsilon": 1e-06, - "tie_embedding_weights": False, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/model.weights.h5", - "weights_hash": "84a10bec83fd093931bb2a6264115d31", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/flan_base_multi/v1/vocab.spm", - "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "kaggle_handle": "gs://keras-nlp-kaggle/flan_base_multi", }, "flan_large_multi": { "metadata": { @@ -172,22 +90,6 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "config": { - "vocabulary_size": 32128, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 2816, - "dropout": 0.1, - "activation": "keras_nlp>gelu_approximate", - "use_gated_activation": True, - "layer_norm_epsilon": 1e-06, - "tie_embedding_weights": False, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/model.weights.h5", - "weights_hash": "513f530ce790efa7e261c0ef965f3697", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/flan_large_multi/v1/vocab.spm", - "spm_proto_hash": "9d15ef55d09d5a425ceb63fa31f7cae3", + "kaggle_handle": "gs://keras-nlp-kaggle/flan_large_multi", }, } diff --git a/keras_nlp/models/task.py b/keras_nlp/models/task.py index 88f74b9a0d..97f06d0b1d 100644 --- a/keras_nlp/models/task.py +++ b/keras_nlp/models/task.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os - from rich import console as rich_console from rich import markup from rich import table as rich_table @@ -150,39 +148,6 @@ def preprocessor_cls(cls): def presets(cls): return {} - @classmethod - def _legacy_from_preset( - cls, - preset, - load_weights=True, - **kwargs, - ): - if "preprocessor" not in kwargs: - kwargs["preprocessor"] = cls.preprocessor_cls.from_preset(preset) - - # Check if preset is backbone-only model - if preset in cls.backbone_cls.presets: - backbone = cls.backbone_cls.from_preset(preset, load_weights) - return cls(backbone, **kwargs) - - # Otherwise must be one of class presets - metadata = cls.presets[preset] - config = metadata["config"] - model = cls.from_config({**config, **kwargs}) - - if not load_weights: - return model - - weights = keras.utils.get_file( - "model.h5", - metadata["weights_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["weights_hash"], - ) - - model.load_weights(weights) - return model - @classmethod def from_preset( cls, @@ -209,9 +174,10 @@ def from_preset( ) ``` """ - # TODO: delete me! + # We support short IDs for official presets, e.g. `"bert_base_en"`. + # Map these to a Kaggle Models handle. if preset in cls.presets: - return cls._legacy_from_preset(preset, load_weights, **kwargs) + preset = cls.presets[preset]["kaggle_handle"] preset_cls = check_preset_class(preset, (cls, cls.backbone_cls)) diff --git a/keras_nlp/models/whisper/whisper_presets.py b/keras_nlp/models/whisper/whisper_presets.py index 4917e9c5c2..81c10ce870 100644 --- a/keras_nlp/models/whisper/whisper_presets.py +++ b/keras_nlp/models/whisper/whisper_presets.py @@ -11,123 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -MULTILINGUAL_SPECIAL_TOKENS = { - "<|startoftranscript|>": 50258, - "<|endoftext|>": 50257, - "<|notimestamps|>": 50363, - "<|translate|>": 50359, - "<|transcribe|>": 50358, -} - -ENGLISH_SPECIAL_TOKENS = { - "<|startoftranscript|>": 50257, - "<|endoftext|>": 50256, - "<|notimestamps|>": 50362, - "<|translate|>": 50358, - "<|transcribe|>": 50357, -} - -LANGUAGE_TOKENS = { - "<|af|>": 50327, - "<|am|>": 50334, - "<|ar|>": 50272, - "<|as|>": 50350, - "<|az|>": 50304, - "<|ba|>": 50355, - "<|be|>": 50330, - "<|bg|>": 50292, - "<|bn|>": 50302, - "<|bo|>": 50347, - "<|br|>": 50309, - "<|bs|>": 50315, - "<|ca|>": 50270, - "<|cs|>": 50283, - "<|cy|>": 50297, - "<|da|>": 50285, - "<|de|>": 50261, - "<|el|>": 50281, - "<|en|>": 50259, - "<|es|>": 50262, - "<|et|>": 50307, - "<|eu|>": 50310, - "<|fa|>": 50300, - "<|fi|>": 50277, - "<|fo|>": 50338, - "<|fr|>": 50265, - "<|gl|>": 50319, - "<|gu|>": 50333, - "<|haw|>": 50352, - "<|ha|>": 50354, - "<|he|>": 50279, - "<|hi|>": 50276, - "<|hr|>": 50291, - "<|ht|>": 50339, - "<|hu|>": 50286, - "<|hy|>": 50312, - "<|id|>": 50275, - "<|is|>": 50311, - "<|it|>": 50274, - "<|ja|>": 50266, - "<|jw|>": 50356, - "<|ka|>": 50329, - "<|kk|>": 50316, - "<|km|>": 50323, - "<|kn|>": 50306, - "<|ko|>": 50264, - "<|la|>": 50294, - "<|lb|>": 50345, - "<|ln|>": 50353, - "<|lo|>": 50336, - "<|lt|>": 50293, - "<|lv|>": 50301, - "<|mg|>": 50349, - "<|mi|>": 50295, - "<|mk|>": 50308, - "<|ml|>": 50296, - "<|mn|>": 50314, - "<|mr|>": 50320, - "<|ms|>": 50282, - "<|mt|>": 50343, - "<|my|>": 50346, - "<|ne|>": 50313, - "<|nl|>": 50271, - "<|nn|>": 50342, - "<|no|>": 50288, - "<|oc|>": 50328, - "<|pa|>": 50321, - "<|pl|>": 50269, - "<|ps|>": 50340, - "<|pt|>": 50267, - "<|ro|>": 50284, - "<|ru|>": 50263, - "<|sa|>": 50344, - "<|sd|>": 50332, - "<|si|>": 50322, - "<|sk|>": 50298, - "<|sl|>": 50305, - "<|sn|>": 50324, - "<|so|>": 50326, - "<|sq|>": 50317, - "<|sr|>": 50303, - "<|su|>": 50357, - "<|sv|>": 50273, - "<|sw|>": 50318, - "<|ta|>": 50287, - "<|te|>": 50299, - "<|tg|>": 50331, - "<|th|>": 50289, - "<|tk|>": 50341, - "<|tl|>": 50348, - "<|tr|>": 50268, - "<|tt|>": 50351, - "<|uk|>": 50280, - "<|ur|>": 50290, - "<|uz|>": 50337, - "<|vi|>": 50278, - "<|yi|>": 50335, - "<|yo|>": 50325, - "<|zh|>": 50260, -} # Metadata for loading pretrained model weights. backbone_presets = { @@ -142,27 +25,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51864, - "num_layers": 4, - "num_heads": 6, - "hidden_dim": 384, - "intermediate_dim": 1536, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": ENGLISH_SPECIAL_TOKENS, - "language_tokens": None, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_tiny_en/v1/model.h5", - "weights_hash": "3dc3768ac48ec90b1029fbf52ffbacc7", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_tiny_en/v1/vocab.json", - "vocabulary_hash": "22377f841debacb023848b3468ea3281", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_tiny_en/v1/merges.txt", - "merges_hash": "093ecf3f30371012f2e96fcfb10ea6ab", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_tiny_en", }, "whisper_base_en": { "metadata": { @@ -175,27 +38,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51864, - "num_layers": 6, - "num_heads": 8, - "hidden_dim": 512, - "intermediate_dim": 2048, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": ENGLISH_SPECIAL_TOKENS, - "language_tokens": None, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_en/v1/model.h5", - "weights_hash": "799d3c143993d42f7446bafbc0f46d7d", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_en/v1/vocab.json", - "vocabulary_hash": "22377f841debacb023848b3468ea3281", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_en/v1/merges.txt", - "merges_hash": "093ecf3f30371012f2e96fcfb10ea6ab", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_base_en", }, "whisper_small_en": { "metadata": { @@ -208,27 +51,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51864, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": ENGLISH_SPECIAL_TOKENS, - "language_tokens": None, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_en/v1/model.h5", - "weights_hash": "b75a89225e20019d85ff5f1c362f8a49", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_en/v1/vocab.json", - "vocabulary_hash": "22377f841debacb023848b3468ea3281", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_en/v1/merges.txt", - "merges_hash": "093ecf3f30371012f2e96fcfb10ea6ab", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_small_en", }, "whisper_medium_en": { "metadata": { @@ -241,27 +64,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51864, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": ENGLISH_SPECIAL_TOKENS, - "language_tokens": None, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_medium_en/v1/model.h5", - "weights_hash": "107184882d1cc65926815e4cc50dc5f3", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_medium_en/v1/vocab.json", - "vocabulary_hash": "22377f841debacb023848b3468ea3281", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_medium_en/v1/merges.txt", - "merges_hash": "093ecf3f30371012f2e96fcfb10ea6ab", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_medium_en", }, "whisper_tiny_multi": { "metadata": { @@ -274,27 +77,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51865, - "num_layers": 4, - "num_heads": 6, - "hidden_dim": 384, - "intermediate_dim": 1536, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, - "language_tokens": LANGUAGE_TOKENS, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_tiny_multi/v1/model.h5", - "weights_hash": "b1279a81001ad5eb35970d1aea706396", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_tiny_multi/v1/vocab.json", - "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_tiny_multi/v1/merges.txt", - "merges_hash": "c7f01d4100f6211417988889bf35ccd8", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_tiny_multi", }, "whisper_base_multi": { "metadata": { @@ -307,27 +90,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51865, - "num_layers": 6, - "num_heads": 8, - "hidden_dim": 512, - "intermediate_dim": 2048, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, - "language_tokens": LANGUAGE_TOKENS, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_multi/v1/model.h5", - "weights_hash": "5208396e2d5efac43114a4a3d4f583ab", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_multi/v1/vocab.json", - "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_base_multi/v1/merges.txt", - "merges_hash": "c7f01d4100f6211417988889bf35ccd8", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_base_multi", }, "whisper_small_multi": { "metadata": { @@ -340,27 +103,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51865, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, - "language_tokens": LANGUAGE_TOKENS, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_multi/v1/model.h5", - "weights_hash": "c90c6a895e522056b77b924b6e907ed8", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_multi/v1/vocab.json", - "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_small_multi/v1/merges.txt", - "merges_hash": "c7f01d4100f6211417988889bf35ccd8", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_small_multi", }, "whisper_medium_multi": { "metadata": { @@ -373,27 +116,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51865, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, - "language_tokens": LANGUAGE_TOKENS, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_medium_multi/v1/model.h5", - "weights_hash": "6f993f732fe397e9c5e3a96a9505a3a9", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_medium_multi/v1/vocab.json", - "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_medium_multi/v1/merges.txt", - "merges_hash": "c7f01d4100f6211417988889bf35ccd8", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_medium_multi", }, "whisper_large_multi": { "metadata": { @@ -406,27 +129,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51865, - "num_layers": 32, - "num_heads": 20, - "hidden_dim": 1280, - "intermediate_dim": 5120, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, - "language_tokens": LANGUAGE_TOKENS, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi/v1/model.h5", - "weights_hash": "ccab1c93c5739007868ae73fe025806d", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi/v1/vocab.json", - "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi/v1/merges.txt", - "merges_hash": "c7f01d4100f6211417988889bf35ccd8", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_large_multi", }, "whisper_large_multi_v2": { "metadata": { @@ -440,26 +143,6 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "config": { - "vocabulary_size": 51865, - "num_layers": 32, - "num_heads": 20, - "hidden_dim": 1280, - "intermediate_dim": 5120, - "num_mels": 80, - "dropout": 0.0, - "max_encoder_sequence_length": 3000, - "max_decoder_sequence_length": 448, - }, - "preprocessor_config": { - "special_tokens": MULTILINGUAL_SPECIAL_TOKENS, - "language_tokens": LANGUAGE_TOKENS, - }, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi_v2/v1/model.h5", - "weights_hash": "ccab1c93c5739007868ae73fe025806d", - "vocabulary_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi_v2/v1/vocab.json", - "vocabulary_hash": "1b87ed3e3ecd9ccfdca74e64cbe81d68", - "merges_url": "https://storage.googleapis.com/keras-nlp/models/whisper_large_multi_v2/v1/merges.txt", - "merges_hash": "c7f01d4100f6211417988889bf35ccd8", + "kaggle_handle": "gs://keras-nlp-kaggle/whisper_large_multi_v2", }, } diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py index 350c069f1d..5b7a571e48 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py @@ -25,20 +25,7 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "config": { - "vocabulary_size": 250002, - "num_layers": 12, - "num_heads": 12, - "hidden_dim": 768, - "intermediate_dim": 3072, - "dropout": 0.1, - "max_sequence_length": 512, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/xlm_roberta_base_multi/v1/model.h5", - "weights_hash": "2eb6fcda5a42f0a88056213ba3d93906", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/xlm_roberta_base_multi/v1/vocab.spm", - "spm_proto_hash": "bf25eb5120ad92ef5c7d8596b5dc4046", + "kaggle_handle": "gs://keras-nlp-kaggle/xlm_roberta_base_multi", }, "xlm_roberta_large_multi": { "metadata": { @@ -51,19 +38,6 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "config": { - "vocabulary_size": 250002, - "num_layers": 24, - "num_heads": 16, - "hidden_dim": 1024, - "intermediate_dim": 4096, - "dropout": 0.1, - "max_sequence_length": 512, - }, - "preprocessor_config": {}, - "weights_url": "https://storage.googleapis.com/keras-nlp/models/xlm_roberta_large_multi/v1/model.h5", - "weights_hash": "276211827174b71751f2ce3a89da503a", - "spm_proto_url": "https://storage.googleapis.com/keras-nlp/models/xlm_roberta_large_multi/v1/vocab.spm", - "spm_proto_hash": "bf25eb5120ad92ef5c7d8596b5dc4046", + "kaggle_handle": "gs://keras-nlp-kaggle/xlm_roberta_large_multi", }, } diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index fe78e40e53..5a84a5ac95 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -28,7 +28,6 @@ import tensorflow as tf from keras_nlp.api_export import keras_nlp_export -from keras_nlp.backend import keras from keras_nlp.tokenizers import tokenizer from keras_nlp.utils.preset_utils import check_preset_class from keras_nlp.utils.preset_utils import load_from_preset @@ -663,37 +662,6 @@ def get_config(self): def presets(cls): return {} - @classmethod - def _legacy_from_preset( - cls, - preset, - **kwargs, - ): - metadata = cls.presets[preset] - - vocabulary = keras.utils.get_file( - "vocab.txt", - metadata["vocabulary_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["vocabulary_hash"], - ) - merges = keras.utils.get_file( - "merges.txt", - metadata["merges_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["merges_hash"], - ) - - config = metadata["preprocessor_config"] - config.update( - { - "vocabulary": vocabulary, - "merges": merges, - }, - ) - - return cls.from_config({**config, **kwargs}) - @classmethod def from_preset( cls, @@ -717,9 +685,10 @@ def from_preset( tokenizer.detokenize([5, 6, 7, 8, 9]) ``` """ - # TODO: delete me! + # We support short IDs for official presets, e.g. `"bert_base_en"`. + # Map these to a Kaggle Models handle. if preset in cls.presets: - return cls._legacy_from_preset(preset, **kwargs) + preset = cls.presets[preset]["kaggle_handle"] config_file = "tokenizer.json" check_preset_class(preset, cls, config_file=config_file) diff --git a/keras_nlp/tokenizers/sentence_piece_tokenizer.py b/keras_nlp/tokenizers/sentence_piece_tokenizer.py index eb6abb8140..ae655aceb6 100644 --- a/keras_nlp/tokenizers/sentence_piece_tokenizer.py +++ b/keras_nlp/tokenizers/sentence_piece_tokenizer.py @@ -20,7 +20,6 @@ import tensorflow as tf from keras_nlp.api_export import keras_nlp_export -from keras_nlp.backend import keras from keras_nlp.tokenizers import tokenizer from keras_nlp.utils.preset_utils import check_preset_class from keras_nlp.utils.preset_utils import load_from_preset @@ -263,30 +262,6 @@ def detokenize(self, inputs): def presets(cls): return {} - @classmethod - def _legacy_from_preset( - cls, - preset, - **kwargs, - ): - metadata = cls.presets[preset] - - spm_proto = keras.utils.get_file( - "vocab.spm", - metadata["spm_proto_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["spm_proto_hash"], - ) - - config = metadata["preprocessor_config"] - config.update( - { - "proto": spm_proto, - }, - ) - - return cls.from_config({**config, **kwargs}) - @classmethod def from_preset( cls, @@ -310,9 +285,10 @@ def from_preset( tokenizer.detokenize([5, 6, 7, 8, 9]) ``` """ - + # We support short IDs for official presets, e.g. `"bert_base_en"`. + # Map these to a Kaggle Models handle. if preset in cls.presets: - return cls._legacy_from_preset(preset, **kwargs) + preset = cls.presets[preset]["kaggle_handle"] config_file = "tokenizer.json" check_preset_class(preset, cls, config_file=config_file) diff --git a/keras_nlp/tokenizers/word_piece_tokenizer.py b/keras_nlp/tokenizers/word_piece_tokenizer.py index 6d1fa8e7f1..4e7b05b230 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer.py @@ -19,7 +19,6 @@ import tensorflow as tf from keras_nlp.api_export import keras_nlp_export -from keras_nlp.backend import keras from keras_nlp.tokenizers import tokenizer from keras_nlp.utils.preset_utils import check_preset_class from keras_nlp.utils.preset_utils import load_from_preset @@ -470,30 +469,6 @@ def detokenize(self, inputs): def presets(cls): return {} - @classmethod - def _legacy_from_preset( - cls, - preset, - **kwargs, - ): - metadata = cls.presets[preset] - - vocabulary = keras.utils.get_file( - "vocab.txt", - metadata["vocabulary_url"], - cache_subdir=os.path.join("models", preset), - file_hash=metadata["vocabulary_hash"], - ) - - config = metadata["preprocessor_config"] - config.update( - { - "vocabulary": vocabulary, - }, - ) - - return cls.from_config({**config, **kwargs}) - @classmethod def from_preset( cls, @@ -517,9 +492,10 @@ def from_preset( tokenizer.detokenize([5, 6, 7, 8, 9]) ``` """ - # TODO: delete me! + # We support short IDs for official presets, e.g. `"bert_base_en"`. + # Map these to a Kaggle Models handle. if preset in cls.presets: - return cls._legacy_from_preset(preset, **kwargs) + preset = cls.presets[preset]["kaggle_handle"] config_file = "tokenizer.json" check_preset_class(preset, cls, config_file=config_file) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 04ca3a39cd..f2234f615d 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -24,6 +24,7 @@ kagglehub = None KAGGLE_PREFIX = "kaggle://" +GS_PREFIX = "gs://" TOKENIZER_ASSET_DIR = "assets/tokenizer" @@ -51,7 +52,21 @@ def get_file(preset, path): f"Received: preset={preset}" ) return kagglehub.model_download(kaggle_handle, path) - return os.path.join(preset, path) + elif preset.startswith(GS_PREFIX): + url = os.path.join(preset, path) + url = url.replace(GS_PREFIX, "https://storage.googleapis.com/") + subdir = preset.replace(GS_PREFIX, "gs_") + subdir = subdir.replace("/", "_").replace("-", "_") + filename = os.path.basename(path) + subdir = os.path.join(subdir, os.path.dirname(path)) + return keras.utils.get_file( + filename, + url, + cache_subdir=os.path.join("models", subdir), + ) + else: + # Assume a local filepath. + return os.path.join(preset, path) def get_tokenizer(layer): From 2aced24f7f49444e0836a4b5129e26e20915634d Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Mon, 4 Dec 2023 10:24:34 -0800 Subject: [PATCH 65/87] Let kagglehub select latest version (#1342) This is a newly minted feature in kagglehub. We should probably add a unit test for this, but probably after our models are actually uploaded the hub. --- keras_nlp/utils/preset_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index f2234f615d..3b31eb244f 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -40,7 +40,7 @@ def get_file(preset, path): # Insert the kaggle framework into the handle. if len(segments) == 3: org, model, variant = segments - kaggle_handle = f"{org}/{model}/keras/{variant}/1" + kaggle_handle = f"{org}/{model}/keras/{variant}" elif len(segments) == 4: org, model, variant, version = segments kaggle_handle = f"{org}/{model}/keras/{variant}/{version}" From 245b7e98dff163dc099388992863ef644a3b6404 Mon Sep 17 00:00:00 2001 From: Philippe Modard Date: Tue, 5 Dec 2023 19:01:43 +0100 Subject: [PATCH 66/87] Use the proper title for example (#1346) * Use the proper title for example Otherwise it won't be rendered correctly by the [`TFKerasDocumentationGenerator`](https://github.com/keras-team/keras-io/blob/fc340b9989cdf17fba44e66efa22758afad39b87/scripts/docstrings.py#L27-L28) * more fixes --- keras_nlp/models/deberta_v3/deberta_v3_backbone.py | 2 +- keras_nlp/models/gpt2/gpt2_backbone.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/keras_nlp/models/deberta_v3/deberta_v3_backbone.py b/keras_nlp/models/deberta_v3/deberta_v3_backbone.py index 76e2cf9dd7..aa5077ec67 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_backbone.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_backbone.py @@ -68,7 +68,7 @@ class DebertaV3Backbone(Backbone): bucket_size: int. The size of the relative position buckets. Generally equal to `max_sequence_length // 2`. - Example usage: + Example: ```python input_data = { "token_ids": np.ones(shape=(1, 12), dtype="int32"), diff --git a/keras_nlp/models/gpt2/gpt2_backbone.py b/keras_nlp/models/gpt2/gpt2_backbone.py index 3f357d6408..89c23f71de 100644 --- a/keras_nlp/models/gpt2/gpt2_backbone.py +++ b/keras_nlp/models/gpt2/gpt2_backbone.py @@ -62,7 +62,7 @@ class GPT2Backbone(Backbone): sequence length. This determines the variable shape for positional embeddings. - Example usage: + Example: ```python input_data = { "token_ids": np.ones(shape=(1, 12), dtype="int32"), From 6ad8a300fb65a3f515442f77e33ca12110fbd39f Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 6 Dec 2023 15:56:41 -0800 Subject: [PATCH 67/87] Update conversion script (#1347) Fixes names, and avoids running on import. --- tools/convert_legacy_presets.py | 156 ++++++++++++++++++-------------- 1 file changed, 87 insertions(+), 69 deletions(-) diff --git a/tools/convert_legacy_presets.py b/tools/convert_legacy_presets.py index 5443beefc5..c1470cf64a 100644 --- a/tools/convert_legacy_presets.py +++ b/tools/convert_legacy_presets.py @@ -19,81 +19,99 @@ """ import os +import re import shutil os.environ["KERAS_HOME"] = os.getcwd() -import keras_nlp # noqa: E402 +from keras_nlp import models # noqa: E402 from keras_nlp.src.utils.preset_utils import save_to_preset # noqa: E402 BUCKET = "keras-nlp-kaggle" -backbone_models = [ - (keras_nlp.models.AlbertBackbone, keras_nlp.models.AlbertTokenizer), - (keras_nlp.models.BartBackbone, keras_nlp.models.BartTokenizer), - (keras_nlp.models.BertBackbone, keras_nlp.models.BertTokenizer), - (keras_nlp.models.DebertaV3Backbone, keras_nlp.models.DebertaV3Tokenizer), - (keras_nlp.models.DistilBertBackbone, keras_nlp.models.DistilBertTokenizer), - (keras_nlp.models.FNetBackbone, keras_nlp.models.FNetTokenizer), - (keras_nlp.models.GPT2Backbone, keras_nlp.models.GPT2Tokenizer), - (keras_nlp.models.OPTBackbone, keras_nlp.models.OPTTokenizer), - (keras_nlp.models.RobertaBackbone, keras_nlp.models.RobertaTokenizer), - (keras_nlp.models.T5Backbone, keras_nlp.models.T5Tokenizer), - (keras_nlp.models.WhisperBackbone, keras_nlp.models.WhisperTokenizer), - (keras_nlp.models.XLMRobertaBackbone, keras_nlp.models.XLMRobertaTokenizer), -] -for backbone_cls, tokenizer_cls in backbone_models: - for preset in backbone_cls.presets: - backbone = backbone_cls.from_preset(preset) - tokenizer = tokenizer_cls.from_preset(preset) - save_to_preset( - backbone, - preset, - config_filename="config.json", - ) - save_to_preset( - tokenizer, - preset, - config_filename="tokenizer.json", - ) - # Delete first to clean up any exising version. - os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}") - os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}") - for root, _, files in os.walk(preset): - for file in files: - path = os.path.join(BUCKET, root, file) - os.system( - f"gcloud storage objects update gs://{path} " - "--add-acl-grant=entity=AllUsers,role=READER" - ) - # Clean up local disk usage. - shutil.rmtree("models") - shutil.rmtree(preset) -# Handle our single task model. -preset = "bert_tiny_en_uncased_sst2" -task = keras_nlp.models.BertClassifier.from_preset(preset) -tokenizer = keras_nlp.models.BertTokenizer.from_preset(preset) -save_to_preset( - task, - preset, - config_filename="config.json", -) -save_to_preset( - tokenizer, - preset, - config_filename="tokenizer.json", -) -# Delete first to clean up any exising version. -os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}") -os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}") -for root, _, files in os.walk(preset): - for file in files: - path = os.path.join(BUCKET, root, file) - os.system( - f"gcloud storage objects update gs://{path} " - "--add-acl-grant=entity=AllUsers,role=READER" - ) -# Clean up local disk usage. -shutil.rmtree("models") -shutil.rmtree(preset) +def to_snake_case(name): + name = re.sub(r"\W+", "", name) + name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) + name = re.sub("([a-z])([A-Z])", r"\1_\2", name).lower() + return name + + +if __name__ == "__main__": + backbone_models = [ + (models.AlbertBackbone, models.AlbertTokenizer), + (models.BartBackbone, models.BartTokenizer), + (models.BertBackbone, models.BertTokenizer), + (models.DebertaV3Backbone, models.DebertaV3Tokenizer), + (models.DistilBertBackbone, models.DistilBertTokenizer), + (models.FNetBackbone, models.FNetTokenizer), + (models.GPT2Backbone, models.GPT2Tokenizer), + (models.OPTBackbone, models.OPTTokenizer), + (models.RobertaBackbone, models.RobertaTokenizer), + (models.T5Backbone, models.T5Tokenizer), + (models.WhisperBackbone, models.WhisperTokenizer), + (models.XLMRobertaBackbone, models.XLMRobertaTokenizer), + ] + for backbone_cls, tokenizer_cls in backbone_models: + for preset in backbone_cls.presets: + backbone = backbone_cls.from_preset( + preset, name=to_snake_case(backbone_cls.__name__) + ) + tokenizer = tokenizer_cls.from_preset( + preset, name=to_snake_case(tokenizer_cls.__name__) + ) + save_to_preset( + backbone, + preset, + config_filename="config.json", + ) + save_to_preset( + tokenizer, + preset, + config_filename="tokenizer.json", + ) + # Delete first to clean up any exising version. + os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}") + os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}") + for root, _, files in os.walk(preset): + for file in files: + path = os.path.join(BUCKET, root, file) + os.system( + f"gcloud storage objects update gs://{path} " + "--add-acl-grant=entity=AllUsers,role=READER" + ) + # Clean up local disk usage. + shutil.rmtree("models") + shutil.rmtree(preset) + + # Handle our single task model. + preset = "bert_tiny_en_uncased_sst2" + task = models.BertClassifier.from_preset( + preset, name=to_snake_case(models.BertClassifier.__name__) + ) + tokenizer = models.BertTokenizer.from_preset( + preset, name=to_snake_case(models.BertTokenizer.__name__) + ) + save_to_preset( + task, + preset, + config_filename="config.json", + ) + save_to_preset( + tokenizer, + preset, + config_filename="tokenizer.json", + ) + # Delete first to clean up any exising version. + os.system(f"gsutil rm -rf gs://{BUCKET}/{preset}") + os.system(f"gsutil cp -r {preset} gs://{BUCKET}/{preset}") + for root, _, files in os.walk(preset): + for file in files: + path = os.path.join(BUCKET, root, file) + os.system( + f"gcloud storage objects update gs://{path} " + "--add-acl-grant=entity=AllUsers,role=READER" + ) + # Clean up local disk usage. + shutil.rmtree("models") + shutil.rmtree(preset) From 7cc43234a01878eb316cccd2aa6773341c2a0948 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:37:03 -0800 Subject: [PATCH 68/87] Improve preset error messages (#1349) --- keras_nlp/utils/preset_utils.py | 16 +++++++++++++++- keras_nlp/utils/preset_utils_test.py | 7 +++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 3b31eb244f..8bd9ef851f 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -30,6 +30,10 @@ def get_file(preset, path): """Download a preset file in necessary and return the local path.""" + if not isinstance(preset, str): + raise ValueError( + f"A preset identifier must be a string. Received: preset={preset}" + ) if preset.startswith(KAGGLE_PREFIX): if kagglehub is None: raise ImportError( @@ -64,9 +68,19 @@ def get_file(preset, path): url, cache_subdir=os.path.join("models", subdir), ) - else: + elif os.path.exists(preset): # Assume a local filepath. return os.path.join(preset, path) + else: + raise ValueError( + "Unknown preset identifier. A preset must be a one of:\n" + "1) a built in preset identifier like `'bert_base_en'`\n" + "2) a Kaggle Models handle like `'kaggle://keras/bert/bert_base_en'`\n" + "3) a path to a local preset directory like `'./bert_base_en`\n" + "Use `print(cls.presets.keys())` to view all built-in presets for " + "API symbol `cls`.\n" + f"Received: preset='{preset}'" + ) def get_tokenizer(layer): diff --git a/keras_nlp/utils/preset_utils_test.py b/keras_nlp/utils/preset_utils_test.py index 3190e09f50..e0c6be467c 100644 --- a/keras_nlp/utils/preset_utils_test.py +++ b/keras_nlp/utils/preset_utils_test.py @@ -87,3 +87,10 @@ def test_preset_saving(self, cls, preset_name, tokenizer_type): self.assertAllEqual( model(model_input_data), restored_model(restored_model_input_data) ) + + def test_preset_errors(self): + with self.assertRaisesRegex(ValueError, "must be a string"): + AlbertClassifier.from_preset(AlbertClassifier) + + with self.assertRaisesRegex(ValueError, "Unknown preset identifier"): + AlbertClassifier.from_preset("snaggle://bort/bort/bort") From 9cc8110b0dd34b4be39558b9b926ed2e604214a7 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Wed, 6 Dec 2023 16:37:32 -0800 Subject: [PATCH 69/87] Use subclass checking check_preset_class (#1344) Not currently needed for anything, just to keep in sync with KerasCV. --- keras_nlp/utils/preset_utils.py | 4 +++- keras_nlp/utils/preset_utils_test.py | 23 +++++++++++++++++------ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 8bd9ef851f..2ee1e8ddd6 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -203,7 +203,9 @@ def check_preset_class( cls = keras.saving.get_registered_object(config["registered_name"]) if not isinstance(classes, (tuple, list)): classes = (classes,) - if cls not in classes: + # Allow subclasses for testing a base class, e.g. + # `check_preset_class(preset, Backbone)` + if not any(issubclass(cls, x) for x in classes): raise ValueError( f"Unexpected class in preset `'{preset}'`. " "When calling `from_preset()` on a class object, the preset class " diff --git a/keras_nlp/utils/preset_utils_test.py b/keras_nlp/utils/preset_utils_test.py index e0c6be467c..44dc39f477 100644 --- a/keras_nlp/utils/preset_utils_test.py +++ b/keras_nlp/utils/preset_utils_test.py @@ -18,11 +18,15 @@ import pytest from absl.testing import parameterized -from keras_nlp.models import AlbertClassifier -from keras_nlp.models import BertClassifier -from keras_nlp.models import RobertaClassifier +from keras_nlp.models.albert.albert_classifier import AlbertClassifier +from keras_nlp.models.backbone import Backbone +from keras_nlp.models.bert.bert_classifier import BertClassifier +from keras_nlp.models.roberta.roberta_classifier import RobertaClassifier +from keras_nlp.models.task import Task from keras_nlp.tests.test_case import TestCase -from keras_nlp.utils import preset_utils +from keras_nlp.utils.preset_utils import check_preset_class +from keras_nlp.utils.preset_utils import load_from_preset +from keras_nlp.utils.preset_utils import save_to_preset class PresetUtilsTest(TestCase): @@ -36,7 +40,7 @@ class PresetUtilsTest(TestCase): def test_preset_saving(self, cls, preset_name, tokenizer_type): save_dir = self.get_temp_dir() model = cls.from_preset(preset_name, num_classes=2) - preset_utils.save_to_preset(model, save_dir) + save_to_preset(model, save_dir) if tokenizer_type == "bytepair": vocab_filename = "assets/tokenizer/vocabulary.json" @@ -72,7 +76,14 @@ def test_preset_saving(self, cls, preset_name, tokenizer_type): self.assertEqual(config["weights"], "model.weights.h5") # Try loading the model from preset directory - restored_model = preset_utils.load_from_preset(save_dir) + self.assertEqual(cls, check_preset_class(save_dir, cls)) + self.assertEqual(cls, check_preset_class(save_dir, Task)) + with self.assertRaises(ValueError): + # Preset is a subclass of Task, not Backbone. + check_preset_class(save_dir, Backbone) + + # Try loading the model from preset directory + restored_model = load_from_preset(save_dir) train_data = ( ["the quick brown fox.", "the slow brown fox."], # Features. From 4606f32f8f009c92dcc65148eb9490d27c9f81ee Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 7 Dec 2023 12:11:08 -0800 Subject: [PATCH 70/87] Add a hacky fix for TF 2.13 and 2.14 weights.h5 loading (#1353) We have a bug where weights.h5 for a functional model will read and write to the wrong paths in TF 2.13 and 2.14. We can work around this for these versions (while thankfully needing none of this for Keras 3). --- keras_nlp/utils/preset_utils.py | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 2ee1e8ddd6..c03e6a0770 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -13,6 +13,7 @@ # limitations under the License. import datetime +import inspect import json import os @@ -159,6 +160,21 @@ def save_to_preset( metadata_file.write(json.dumps(metadata, indent=4)) +def legacy_load_weights(layer, weights_path): + # Hacky fix for TensorFlow 2.13 and 2.14 when loading a `.weights.h5` file. + # We find the `Functional` class, and temporarily remove the + # `_layer_checkpoint_dependencies` property, which on older version of + # TensorFlow complete broke the variable paths for functional models. + functional_cls = None + for cls in inspect.getmro(layer.__class__): + if cls.__name__ == "Functional": + functional_cls = cls + property = functional_cls._layer_checkpoint_dependencies + functional_cls._layer_checkpoint_dependencies = None + layer.load_weights(weights_path) + functional_cls._layer_checkpoint_dependencies = property + + def load_from_preset( preset, load_weights=True, @@ -186,7 +202,10 @@ def load_from_preset( load_weights = load_weights and config["weights"] if load_weights: weights_path = get_file(preset, config["weights"]) - layer.load_weights(weights_path) + if hasattr(layer, "_layer_checkpoint_dependencies"): + legacy_load_weights(layer, weights_path) + else: + layer.load_weights(weights_path) return layer From 9cb5838c9c01cca32d3863f43112bc2b9e3cdabe Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 7 Dec 2023 15:16:23 -0800 Subject: [PATCH 71/87] Another fix for saving on Keras 2 (#1354) This we would also catch if we were running our large testing on 2.13 or 2.14. Tensorflow will turn all dictionary attributes on a layer into "trackable dicts" python does not know how to save these, so we need to cast our byte pair vocabulary to a dict before writing the json. --- keras_nlp/tokenizers/byte_pair_tokenizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index 5a84a5ac95..95dad35744 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -336,7 +336,7 @@ def save_assets(self, dir_path): vocab_path = os.path.join(dir_path, VOCAB_FILENAME) merges_path = os.path.join(dir_path, MERGES_FILENAME) with open(vocab_path, "w") as file: - file.write(json.dumps(self.vocabulary)) + file.write(json.dumps(dict(self.vocabulary))) with open(merges_path, "w") as file: for merge in self.merges: file.write(f"{merge}\n") From 039ff4520bde904999ae6246309712a68a8ec85b Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 7 Dec 2023 15:49:28 -0800 Subject: [PATCH 72/87] Switch our preset to there final kaggle location (#1345) --- keras_nlp/models/albert/albert_presets.py | 8 +++---- keras_nlp/models/bart/bart_presets.py | 6 ++--- keras_nlp/models/bert/bert_presets.py | 22 +++++++++---------- .../models/deberta_v3/deberta_v3_presets.py | 10 ++++----- .../models/distil_bert/distil_bert_presets.py | 6 ++--- keras_nlp/models/f_net/f_net_presets.py | 4 ++-- keras_nlp/models/gpt2/gpt2_presets.py | 10 ++++----- keras_nlp/models/opt/opt_presets.py | 8 +++---- keras_nlp/models/roberta/roberta_presets.py | 4 ++-- keras_nlp/models/t5/t5_presets.py | 12 +++++----- keras_nlp/models/whisper/whisper_presets.py | 20 ++++++++--------- .../models/xlm_roberta/xlm_roberta_presets.py | 4 ++-- requirements-common.txt | 1 + setup.py | 1 + 14 files changed, 58 insertions(+), 58 deletions(-) diff --git a/keras_nlp/models/albert/albert_presets.py b/keras_nlp/models/albert/albert_presets.py index eb163a64bf..c65f6861b0 100644 --- a/keras_nlp/models/albert/albert_presets.py +++ b/keras_nlp/models/albert/albert_presets.py @@ -26,7 +26,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/albert_base_en_uncased", + "kaggle_handle": "kaggle://keras/albert/albert_base_en_uncased/1", }, "albert_large_en_uncased": { "metadata": { @@ -39,7 +39,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/albert_large_en_uncased", + "kaggle_handle": "kaggle://keras/albert/albert_large_en_uncased/1", }, "albert_extra_large_en_uncased": { "metadata": { @@ -52,7 +52,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/albert_extra_large_en_uncased", + "kaggle_handle": "kaggle://keras/albert/albert_extra_large_en_uncased/1", }, "albert_extra_extra_large_en_uncased": { "metadata": { @@ -65,6 +65,6 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/albert_extra_extra_large_en_uncased", + "kaggle_handle": "kaggle://keras/albert/albert_extra_extra_large_en_uncased/1", }, } diff --git a/keras_nlp/models/bart/bart_presets.py b/keras_nlp/models/bart/bart_presets.py index d5547b37da..a0f4c80bda 100644 --- a/keras_nlp/models/bart/bart_presets.py +++ b/keras_nlp/models/bart/bart_presets.py @@ -25,7 +25,7 @@ "path": "bart", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bart_base_en", + "kaggle_handle": "kaggle://keras/bart/bart_base_en/1", }, "bart_large_en": { "metadata": { @@ -47,7 +47,7 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "kaggle_handle": "gs://keras-nlp-kaggle/bart_large_en", + "kaggle_handle": "kaggle://keras/bart/bart_large_en/1", }, "bart_large_en_cnn": { "metadata": { @@ -69,6 +69,6 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "kaggle_handle": "gs://keras-nlp-kaggle/bart_large_en_cnn", + "kaggle_handle": "kaggle://keras/bart/bart_large_en_cnn/1", }, } diff --git a/keras_nlp/models/bert/bert_presets.py b/keras_nlp/models/bert/bert_presets.py index 6919d2b566..b3de88d991 100644 --- a/keras_nlp/models/bert/bert_presets.py +++ b/keras_nlp/models/bert/bert_presets.py @@ -13,8 +13,6 @@ # limitations under the License. """BERT model preset configurations.""" -# TODO(jbischof): document presets in keras.io and use URL in docstrings -# Metadata for loading pretrained model weights. backbone_presets = { "bert_tiny_en_uncased": { "metadata": { @@ -27,7 +25,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_tiny_en_uncased", + "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased/1", }, "bert_small_en_uncased": { "metadata": { @@ -40,7 +38,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_small_en_uncased", + "kaggle_handle": "kaggle://keras/bert/bert_small_en_uncased/1", }, "bert_medium_en_uncased": { "metadata": { @@ -53,7 +51,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_medium_en_uncased", + "kaggle_handle": "kaggle://keras/bert/bert_medium_en_uncased/1", }, "bert_base_en_uncased": { "metadata": { @@ -66,7 +64,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_en_uncased", + "kaggle_handle": "kaggle://keras/bert/bert_base_en_uncased/1", }, "bert_base_en": { "metadata": { @@ -79,7 +77,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_en", + "kaggle_handle": "kaggle://keras/bert/bert_base_en/1", }, "bert_base_zh": { "metadata": { @@ -91,7 +89,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_zh", + "kaggle_handle": "kaggle://keras/bert/bert_base_zh/1", }, "bert_base_multi": { "metadata": { @@ -103,7 +101,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_base_multi", + "kaggle_handle": "kaggle://keras/bert/bert_base_multi/1", }, "bert_large_en_uncased": { "metadata": { @@ -116,7 +114,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_large_en_uncased", + "kaggle_handle": "kaggle://keras/bert/bert_large_en_uncased/1", }, "bert_large_en": { "metadata": { @@ -129,7 +127,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_large_en", + "kaggle_handle": "kaggle://keras/bert/bert_large_en/1", }, } @@ -144,6 +142,6 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/bert_tiny_en_uncased_sst2", + "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased_sst2/1", } } diff --git a/keras_nlp/models/deberta_v3/deberta_v3_presets.py b/keras_nlp/models/deberta_v3/deberta_v3_presets.py index 771d7ad9c5..28db7bbe11 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_presets.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_presets.py @@ -25,7 +25,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-xsmall", }, - "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_extra_small_en", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_extra_small_en/1", }, "deberta_v3_small_en": { "metadata": { @@ -38,7 +38,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-small", }, - "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_small_en", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_small_en/1", }, "deberta_v3_base_en": { "metadata": { @@ -51,7 +51,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-base", }, - "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_base_en", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_en/1", }, "deberta_v3_large_en": { "metadata": { @@ -64,7 +64,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-large", }, - "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_large_en", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_large_en/1", }, "deberta_v3_base_multi": { "metadata": { @@ -77,6 +77,6 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/mdeberta-v3-base", }, - "kaggle_handle": "gs://keras-nlp-kaggle/deberta_v3_base_multi", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_multi/1", }, } diff --git a/keras_nlp/models/distil_bert/distil_bert_presets.py b/keras_nlp/models/distil_bert/distil_bert_presets.py index b2a99ef688..d4f83779f8 100644 --- a/keras_nlp/models/distil_bert/distil_bert_presets.py +++ b/keras_nlp/models/distil_bert/distil_bert_presets.py @@ -26,7 +26,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-uncased", }, - "kaggle_handle": "gs://keras-nlp-kaggle/distil_bert_base_en_uncased", + "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en_uncased/1", }, "distil_bert_base_en": { "metadata": { @@ -40,7 +40,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-cased", }, - "kaggle_handle": "gs://keras-nlp-kaggle/distil_bert_base_en", + "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en/1", }, "distil_bert_base_multi": { "metadata": { @@ -52,6 +52,6 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-multilingual-cased", }, - "kaggle_handle": "gs://keras-nlp-kaggle/distil_bert_base_multi", + "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_multi/1", }, } diff --git a/keras_nlp/models/f_net/f_net_presets.py b/keras_nlp/models/f_net/f_net_presets.py index 48cc9827b4..bc5b6b45e2 100644 --- a/keras_nlp/models/f_net/f_net_presets.py +++ b/keras_nlp/models/f_net/f_net_presets.py @@ -25,7 +25,7 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/f_net_base_en", + "kaggle_handle": "kaggle://keras/f_net/f_net_base_en/1", }, "f_net_large_en": { "metadata": { @@ -38,6 +38,6 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/f_net_large_en", + "kaggle_handle": "kaggle://keras/f_net/f_net_large_en/1", }, } diff --git a/keras_nlp/models/gpt2/gpt2_presets.py b/keras_nlp/models/gpt2/gpt2_presets.py index e5e546a92a..3c0e6e35a3 100644 --- a/keras_nlp/models/gpt2/gpt2_presets.py +++ b/keras_nlp/models/gpt2/gpt2_presets.py @@ -26,7 +26,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_base_en", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en/1", }, "gpt2_medium_en": { "metadata": { @@ -39,7 +39,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_medium_en", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_medium_en/1", }, "gpt2_large_en": { "metadata": { @@ -52,7 +52,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_large_en", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_large_en/1", }, "gpt2_extra_large_en": { "metadata": { @@ -65,7 +65,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_extra_large_en", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_extra_large_en/1", }, "gpt2_base_en_cnn_dailymail": { "metadata": { @@ -77,6 +77,6 @@ "official_name": "GPT-2", "path": "gpt2", }, - "kaggle_handle": "gs://keras-nlp-kaggle/gpt2_base_en_cnn_dailymail", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en_cnn_dailymail/1", }, } diff --git a/keras_nlp/models/opt/opt_presets.py b/keras_nlp/models/opt/opt_presets.py index 3ca0fd7b32..73cb6d57e9 100644 --- a/keras_nlp/models/opt/opt_presets.py +++ b/keras_nlp/models/opt/opt_presets.py @@ -26,7 +26,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/opt_125m_en", + "kaggle_handle": "kaggle://keras/opt/opt_125m_en/1", }, # We skip the 350m checkpoint because it does not match the structure of # other checkpoints. @@ -41,7 +41,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/opt_1.3b_en", + "kaggle_handle": "kaggle://keras/opt/opt_1.3b_en/1", }, "opt_2.7b_en": { "metadata": { @@ -54,7 +54,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/opt_2.7b_en", + "kaggle_handle": "kaggle://keras/opt/opt_2.7b_en/1", }, "opt_6.7b_en": { "metadata": { @@ -67,6 +67,6 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/opt_6.7b_en", + "kaggle_handle": "kaggle://keras/opt/opt_6.7b_en/1", }, } diff --git a/keras_nlp/models/roberta/roberta_presets.py b/keras_nlp/models/roberta/roberta_presets.py index a57f7cf479..7aa1d91dea 100644 --- a/keras_nlp/models/roberta/roberta_presets.py +++ b/keras_nlp/models/roberta/roberta_presets.py @@ -25,7 +25,7 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/roberta_base_en", + "kaggle_handle": "kaggle://keras/roberta/roberta_base_en/1", }, "roberta_large_en": { "metadata": { @@ -38,6 +38,6 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/roberta_large_en", + "kaggle_handle": "kaggle://keras/roberta/roberta_large_en/1", }, } diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index d5c502c5ba..34356e2098 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -25,7 +25,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/t5_small_multi", + "kaggle_handle": "kaggle://keras/t5/t5_small_multi/1", }, "t5_base_multi": { "metadata": { @@ -38,7 +38,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/t5_base_multi", + "kaggle_handle": "kaggle://keras/t5/t5_base_multi/1", }, "t5_large_multi": { "metadata": { @@ -51,7 +51,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/t5_large_multi", + "kaggle_handle": "kaggle://keras/t5/t5_large_multi/1", }, "flan_small_multi": { "metadata": { @@ -64,7 +64,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/flan_small_multi", + "kaggle_handle": "kaggle://keras/t5/flan_small_multi/1", }, "flan_base_multi": { "metadata": { @@ -77,7 +77,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/flan_base_multi", + "kaggle_handle": "kaggle://keras/t5/flan_base_multi/1", }, "flan_large_multi": { "metadata": { @@ -90,6 +90,6 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/flan_large_multi", + "kaggle_handle": "kaggle://keras/t5/flan_large_multi/1", }, } diff --git a/keras_nlp/models/whisper/whisper_presets.py b/keras_nlp/models/whisper/whisper_presets.py index 81c10ce870..1b3c7b4fe0 100644 --- a/keras_nlp/models/whisper/whisper_presets.py +++ b/keras_nlp/models/whisper/whisper_presets.py @@ -25,7 +25,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_tiny_en", + "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_en/1", }, "whisper_base_en": { "metadata": { @@ -38,7 +38,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_base_en", + "kaggle_handle": "kaggle://keras/whisper/whisper_base_en/1", }, "whisper_small_en": { "metadata": { @@ -51,7 +51,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_small_en", + "kaggle_handle": "kaggle://keras/whisper/whisper_small_en/1", }, "whisper_medium_en": { "metadata": { @@ -64,7 +64,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_medium_en", + "kaggle_handle": "kaggle://keras/whisper/whisper_medium_en/1", }, "whisper_tiny_multi": { "metadata": { @@ -77,7 +77,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_tiny_multi", + "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_multi/1", }, "whisper_base_multi": { "metadata": { @@ -90,7 +90,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_base_multi", + "kaggle_handle": "kaggle://keras/whisper/whisper_base_multi/1", }, "whisper_small_multi": { "metadata": { @@ -103,7 +103,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_small_multi", + "kaggle_handle": "kaggle://keras/whisper/whisper_small_multi/1", }, "whisper_medium_multi": { "metadata": { @@ -116,7 +116,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_medium_multi", + "kaggle_handle": "kaggle://keras/whisper/whisper_medium_multi/1", }, "whisper_large_multi": { "metadata": { @@ -129,7 +129,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_large_multi", + "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi/1", }, "whisper_large_multi_v2": { "metadata": { @@ -143,6 +143,6 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/whisper_large_multi_v2", + "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi_v2/1", }, } diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py index 5b7a571e48..f4b486a22a 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py @@ -25,7 +25,7 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/xlm_roberta_base_multi", + "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_base_multi/1", }, "xlm_roberta_large_multi": { "metadata": { @@ -38,6 +38,6 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "kaggle_handle": "gs://keras-nlp-kaggle/xlm_roberta_large_multi", + "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_large_multi/1", }, } diff --git a/requirements-common.txt b/requirements-common.txt index 5c9710de4b..5c2a8a3d90 100644 --- a/requirements-common.txt +++ b/requirements-common.txt @@ -2,6 +2,7 @@ dm-tree regex rich +kagglehub # Tooling deps. astor packaging diff --git a/setup.py b/setup.py index 6acd5416d7..b246d18ea1 100644 --- a/setup.py +++ b/setup.py @@ -62,6 +62,7 @@ def get_version(rel_path): "regex", "rich", "dm-tree", + "kagglehub", # Don't require tensorflow-text on MacOS, there are no binaries for ARM. # Also, we rely on tensorflow *transitively* through tensorflow-text. # This avoid a slowdown during `pip install keras-nlp` where pip would From 9cc3f84e3e490008c078441154358715a4c62d08 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Tue, 12 Dec 2023 15:18:59 -0800 Subject: [PATCH 73/87] Fix rebase issue in bytepair tokenizer (#1366) --- keras_nlp/tokenizers/byte_pair_tokenizer.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index 95dad35744..de5fe82421 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -295,27 +295,6 @@ def __init__( ) super().__init__(dtype=dtype, **kwargs) - - if isinstance(vocabulary, str): - with open(vocabulary, "r", encoding="utf-8") as f: - self.vocabulary = json.load(f) - elif isinstance(vocabulary, dict): - self.vocabulary = vocabulary.copy() - else: - raise ValueError( - "Vocabulary must be an file path or dictionary mapping string " - "token to int ids. Received: " - f"`type(vocabulary)={type(vocabulary)}`." - ) - if isinstance(merges, str): - self.merges = [bp.rstrip() for bp in tf.io.gfile.GFile(merges)] - elif isinstance(merges, Iterable): - self.merges = list(merges) - else: - raise ValueError( - "Merges must be a file path or a list of merge rules. " - f"Received: `type(merges)={type(merges)}`" - ) self.sequence_length = sequence_length self.add_prefix_space = add_prefix_space self.unsplittable_tokens = unsplittable_tokens From 6f7f9a0247d67eac08caeb21a8a92520135436f3 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Wed, 13 Dec 2023 10:24:30 -0600 Subject: [PATCH 74/87] Change encoding to utf-8 to fix Kaggle branch test failure for PyTorch (#1367) * Change encoding to utf-8 * Change encoding to utf-8 * Change encoding to utf-8 --- keras_nlp/tokenizers/byte_pair_tokenizer.py | 8 ++++---- keras_nlp/tokenizers/word_piece_tokenizer.py | 4 ++-- keras_nlp/tokenizers/word_piece_tokenizer_trainer.py | 2 +- keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/keras_nlp/tokenizers/byte_pair_tokenizer.py b/keras_nlp/tokenizers/byte_pair_tokenizer.py index de5fe82421..55992a16d7 100644 --- a/keras_nlp/tokenizers/byte_pair_tokenizer.py +++ b/keras_nlp/tokenizers/byte_pair_tokenizer.py @@ -314,9 +314,9 @@ def __init__( def save_assets(self, dir_path): vocab_path = os.path.join(dir_path, VOCAB_FILENAME) merges_path = os.path.join(dir_path, MERGES_FILENAME) - with open(vocab_path, "w") as file: + with open(vocab_path, "w", encoding="utf-8") as file: file.write(json.dumps(dict(self.vocabulary))) - with open(merges_path, "w") as file: + with open(merges_path, "w", encoding="utf-8") as file: for merge in self.merges: file.write(f"{merge}\n") @@ -339,7 +339,7 @@ def set_vocabulary_and_merges(self, vocabulary, merges): return if isinstance(vocabulary, str): - with open(vocabulary, "r") as f: + with open(vocabulary, "r", encoding="utf-8") as f: self.vocabulary = json.load(f) elif isinstance(vocabulary, dict): self.vocabulary = vocabulary.copy() @@ -350,7 +350,7 @@ def set_vocabulary_and_merges(self, vocabulary, merges): f"`type(vocabulary)={type(vocabulary)}`." ) if isinstance(merges, str): - self.merges = [bp.rstrip() for bp in open(merges)] + self.merges = [bp.rstrip() for bp in open(merges, encoding="utf-8")] elif isinstance(merges, Iterable): self.merges = list(merges) else: diff --git a/keras_nlp/tokenizers/word_piece_tokenizer.py b/keras_nlp/tokenizers/word_piece_tokenizer.py index 4e7b05b230..75f956899f 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer.py @@ -329,7 +329,7 @@ def __init__( def save_assets(self, dir_path): path = os.path.join(dir_path, VOCAB_FILENAME) - with open(path, "w") as file: + with open(path, "w", encoding="utf-8") as file: for token in self.vocabulary: file.write(f"{token}\n") @@ -345,7 +345,7 @@ def set_vocabulary(self, vocabulary): return if isinstance(vocabulary, str): - with open(vocabulary) as file: + with open(vocabulary, "r", encoding="utf-8") as file: self.vocabulary = [line.rstrip() for line in file] elif isinstance(vocabulary, Iterable): # Make a defensive copy. diff --git a/keras_nlp/tokenizers/word_piece_tokenizer_trainer.py b/keras_nlp/tokenizers/word_piece_tokenizer_trainer.py index 8571097e06..dc90075a5c 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer_trainer.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer_trainer.py @@ -172,7 +172,7 @@ def normalize_and_split(x): if vocabulary_output_file is not None: vocab_text = "".join([line + "\n" for line in vocab]) # Write vocab to file. - with open(vocabulary_output_file, "w") as vocab_file: + with open(vocabulary_output_file, "w", encoding="utf-8") as vocab_file: vocab_file.write(vocab_text) else: return vocab diff --git a/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py b/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py index 62d0f4adf1..03186944bc 100644 --- a/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py +++ b/keras_nlp/tokenizers/word_piece_tokenizer_trainer_test.py @@ -177,7 +177,7 @@ def test_output_file(self): reserved_tokens=[], ) vocab_from_file = [] - with open(vocab_file, "r") as f: + with open(vocab_file, "r", encoding="utf-8") as f: for line in f: vocab_from_file.append(line.strip()) self.assertAllEqual(vocab_from_file, test_output) From ddfca77bc8757caa8e29004da7ff48104984ab82 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Wed, 13 Dec 2023 20:30:06 -0800 Subject: [PATCH 75/87] Fix GPU test issue with Keras 2 (#1368) * Fix layer checkpoint deps * Delete layer checkpoint deps * Change to empty list --- keras_nlp/utils/preset_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index c03e6a0770..8a5927300c 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -170,7 +170,7 @@ def legacy_load_weights(layer, weights_path): if cls.__name__ == "Functional": functional_cls = cls property = functional_cls._layer_checkpoint_dependencies - functional_cls._layer_checkpoint_dependencies = None + functional_cls._layer_checkpoint_dependencies = [] layer.load_weights(weights_path) functional_cls._layer_checkpoint_dependencies = property From 0e43f09c0853d59b285f049470df3c240ff76166 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Thu, 14 Dec 2023 17:37:19 -0800 Subject: [PATCH 76/87] Add in-place modification of file keys for backwards compatibility (#1369) --- keras_nlp/utils/preset_utils.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 8a5927300c..72e205f401 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -17,6 +17,9 @@ import json import os +import h5py + +from keras_nlp.backend import config as backend_config from keras_nlp.backend import keras try: @@ -171,6 +174,19 @@ def legacy_load_weights(layer, weights_path): functional_cls = cls property = functional_cls._layer_checkpoint_dependencies functional_cls._layer_checkpoint_dependencies = [] + + from keras_nlp.models.backbone import Backbone + + if not backend_config.keras_3() and isinstance(layer, Backbone): + # Hacky fix for Keras 2 backwards compatibility. Keras 2 traverses loading + # weights in the reverse order, causing a naming mismatch when loading + # Kaggle weights saved from Keras 3. + f = h5py.File(weights_path, "r+") + if "_token_embedding" in f.keys(): + data = f["_token_embedding"] + f["token_embedding"] = data + f.close() + layer.load_weights(weights_path) functional_cls._layer_checkpoint_dependencies = property From 4d84eb164746ecc0511fcd8f5b3f28acb6139e40 Mon Sep 17 00:00:00 2001 From: Neel Kovelamudi <60985914+nkovela1@users.noreply.github.com> Date: Fri, 15 Dec 2023 17:22:27 -0800 Subject: [PATCH 77/87] Add file renaming logic for modification (#1370) --- keras_nlp/utils/preset_utils.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 72e205f401..3678a988a9 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -181,6 +181,12 @@ def legacy_load_weights(layer, weights_path): # Hacky fix for Keras 2 backwards compatibility. Keras 2 traverses loading # weights in the reverse order, causing a naming mismatch when loading # Kaggle weights saved from Keras 3. + new_weights_path = os.path.join( + os.path.dirname(weights_path), + "legacy_" + os.path.basename(weights_path), + ) + os.rename(weights_path, new_weights_path) + weights_path = new_weights_path f = h5py.File(weights_path, "r+") if "_token_embedding" in f.keys(): data = f["_token_embedding"] From 29a0ae5af78c7ac756500e907cbabc850b047168 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Tue, 19 Dec 2023 21:29:22 -0600 Subject: [PATCH 78/87] Fix task pre-processor in tasks (#1373) * Allow custom preprocessor * Allow custom preprocessor * Fix preprocessor validation in tasks --- keras_nlp/models/task.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/keras_nlp/models/task.py b/keras_nlp/models/task.py index 97f06d0b1d..772b540db4 100644 --- a/keras_nlp/models/task.py +++ b/keras_nlp/models/task.py @@ -174,6 +174,13 @@ def from_preset( ) ``` """ + if "backbone" in kwargs: + raise ValueError( + "You cannot pass a `backbone` argument to the `from_preset` " + f"method. Instead, call the {cls.__name__} default " + "constructor with a `backbone` argument. " + f"Received: backbone={kwargs['backbone']}." + ) # We support short IDs for official presets, e.g. `"bert_base_en"`. # Map these to a Kaggle Models handle. if preset in cls.presets: @@ -187,11 +194,14 @@ def from_preset( preset, load_weights=load_weights, ) - tokenizer = load_from_preset( - preset, - config_file="tokenizer.json", - ) - preprocessor = cls.preprocessor_cls(tokenizer=tokenizer) + if "preprocessor" in kwargs: + preprocessor = kwargs.pop("preprocessor") + else: + tokenizer = load_from_preset( + preset, + config_file="tokenizer.json", + ) + preprocessor = cls.preprocessor_cls(tokenizer=tokenizer) return cls(backbone=backbone, preprocessor=preprocessor, **kwargs) # Task case. From 401e5698b9990d23997b57398e4935630b93fb5f Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 4 Jan 2024 10:52:18 -0800 Subject: [PATCH 79/87] Backwards compatible fix for functional model saving (#1378) The functional model attribute path should always take precedence, so we can have stable checkpoints for functional subclassed models. See https://github.com/keras-team/keras/pull/18982/ Note that this will cause a bunch of failures until we re-upload weights. We should apply this workaround to Keras 3 and Keras 2 until we release Keras 3. Then restrict to only Keras 2. Then finally delete entirely when we drop Keras 2 support. --- keras_nlp/models/albert/albert_presets.py | 8 ++-- keras_nlp/models/backbone.py | 13 ++++++ keras_nlp/models/bart/bart_presets.py | 6 +-- keras_nlp/models/bert/bert_presets.py | 20 ++++----- .../models/deberta_v3/deberta_v3_presets.py | 10 ++--- .../models/distil_bert/distil_bert_presets.py | 6 +-- keras_nlp/models/f_net/f_net_presets.py | 4 +- keras_nlp/models/gpt2/gpt2_presets.py | 10 ++--- keras_nlp/models/opt/opt_presets.py | 8 ++-- keras_nlp/models/roberta/roberta_presets.py | 4 +- keras_nlp/models/t5/t5_presets.py | 12 +++--- keras_nlp/models/task.py | 13 ++++++ keras_nlp/models/whisper/whisper_presets.py | 20 ++++----- .../models/xlm_roberta/xlm_roberta_presets.py | 4 +- keras_nlp/utils/preset_utils.py | 43 +------------------ 15 files changed, 83 insertions(+), 98 deletions(-) diff --git a/keras_nlp/models/albert/albert_presets.py b/keras_nlp/models/albert/albert_presets.py index c65f6861b0..3314241feb 100644 --- a/keras_nlp/models/albert/albert_presets.py +++ b/keras_nlp/models/albert/albert_presets.py @@ -26,7 +26,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_base_en_uncased/1", + "kaggle_handle": "kaggle://keras/albert/albert_base_en_uncased/2", }, "albert_large_en_uncased": { "metadata": { @@ -39,7 +39,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_large_en_uncased/1", + "kaggle_handle": "kaggle://keras/albert/albert_large_en_uncased/2", }, "albert_extra_large_en_uncased": { "metadata": { @@ -52,7 +52,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_extra_large_en_uncased/1", + "kaggle_handle": "kaggle://keras/albert/albert_extra_large_en_uncased/2", }, "albert_extra_extra_large_en_uncased": { "metadata": { @@ -65,6 +65,6 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_extra_extra_large_en_uncased/1", + "kaggle_handle": "kaggle://keras/albert/albert_extra_extra_large_en_uncased/2", }, } diff --git a/keras_nlp/models/backbone.py b/keras_nlp/models/backbone.py index 9b8f9a5a96..69da56593b 100644 --- a/keras_nlp/models/backbone.py +++ b/keras_nlp/models/backbone.py @@ -24,6 +24,19 @@ class Backbone(keras.Model): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._token_embedding = None + self._functional_layer_ids = set( + id(layer) for layer in self._flatten_layers() + ) + + def __dir__(self): + # Temporary fixes for weight saving. This mimics the following PR for + # older version of Keras: https://github.com/keras-team/keras/pull/18982 + def filter_fn(attr): + if attr == "_layer_checkpoint_dependencies": + return False + return id(getattr(self, attr)) not in self._functional_layer_ids + + return filter(filter_fn, super().__dir__()) def __setattr__(self, name, value): # Work around torch setattr for properties. diff --git a/keras_nlp/models/bart/bart_presets.py b/keras_nlp/models/bart/bart_presets.py index a0f4c80bda..6249d46744 100644 --- a/keras_nlp/models/bart/bart_presets.py +++ b/keras_nlp/models/bart/bart_presets.py @@ -25,7 +25,7 @@ "path": "bart", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md", }, - "kaggle_handle": "kaggle://keras/bart/bart_base_en/1", + "kaggle_handle": "kaggle://keras/bart/bart_base_en/2", }, "bart_large_en": { "metadata": { @@ -47,7 +47,7 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "kaggle_handle": "kaggle://keras/bart/bart_large_en/1", + "kaggle_handle": "kaggle://keras/bart/bart_large_en/2", }, "bart_large_en_cnn": { "metadata": { @@ -69,6 +69,6 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "kaggle_handle": "kaggle://keras/bart/bart_large_en_cnn/1", + "kaggle_handle": "kaggle://keras/bart/bart_large_en_cnn/2", }, } diff --git a/keras_nlp/models/bert/bert_presets.py b/keras_nlp/models/bert/bert_presets.py index b3de88d991..1730e17228 100644 --- a/keras_nlp/models/bert/bert_presets.py +++ b/keras_nlp/models/bert/bert_presets.py @@ -25,7 +25,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased/1", + "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased/2", }, "bert_small_en_uncased": { "metadata": { @@ -38,7 +38,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_small_en_uncased/1", + "kaggle_handle": "kaggle://keras/bert/bert_small_en_uncased/2", }, "bert_medium_en_uncased": { "metadata": { @@ -51,7 +51,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_medium_en_uncased/1", + "kaggle_handle": "kaggle://keras/bert/bert_medium_en_uncased/2", }, "bert_base_en_uncased": { "metadata": { @@ -64,7 +64,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_en_uncased/1", + "kaggle_handle": "kaggle://keras/bert/bert_base_en_uncased/2", }, "bert_base_en": { "metadata": { @@ -77,7 +77,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_en/1", + "kaggle_handle": "kaggle://keras/bert/bert_base_en/2", }, "bert_base_zh": { "metadata": { @@ -89,7 +89,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_zh/1", + "kaggle_handle": "kaggle://keras/bert/bert_base_zh/2", }, "bert_base_multi": { "metadata": { @@ -101,7 +101,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_multi/1", + "kaggle_handle": "kaggle://keras/bert/bert_base_multi/2", }, "bert_large_en_uncased": { "metadata": { @@ -114,7 +114,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_large_en_uncased/1", + "kaggle_handle": "kaggle://keras/bert/bert_large_en_uncased/2", }, "bert_large_en": { "metadata": { @@ -127,7 +127,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_large_en/1", + "kaggle_handle": "kaggle://keras/bert/bert_large_en/2", }, } @@ -142,6 +142,6 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased_sst2/1", + "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased_sst2/3", } } diff --git a/keras_nlp/models/deberta_v3/deberta_v3_presets.py b/keras_nlp/models/deberta_v3/deberta_v3_presets.py index 28db7bbe11..ba8734e787 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_presets.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_presets.py @@ -25,7 +25,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-xsmall", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_extra_small_en/1", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_extra_small_en/2", }, "deberta_v3_small_en": { "metadata": { @@ -38,7 +38,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-small", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_small_en/1", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_small_en/2", }, "deberta_v3_base_en": { "metadata": { @@ -51,7 +51,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-base", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_en/1", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_en/2", }, "deberta_v3_large_en": { "metadata": { @@ -64,7 +64,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-large", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_large_en/1", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_large_en/2", }, "deberta_v3_base_multi": { "metadata": { @@ -77,6 +77,6 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/mdeberta-v3-base", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_multi/1", + "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_multi/2", }, } diff --git a/keras_nlp/models/distil_bert/distil_bert_presets.py b/keras_nlp/models/distil_bert/distil_bert_presets.py index d4f83779f8..cdbd234984 100644 --- a/keras_nlp/models/distil_bert/distil_bert_presets.py +++ b/keras_nlp/models/distil_bert/distil_bert_presets.py @@ -26,7 +26,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-uncased", }, - "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en_uncased/1", + "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en_uncased/2", }, "distil_bert_base_en": { "metadata": { @@ -40,7 +40,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-cased", }, - "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en/1", + "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en/2", }, "distil_bert_base_multi": { "metadata": { @@ -52,6 +52,6 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-multilingual-cased", }, - "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_multi/1", + "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_multi/2", }, } diff --git a/keras_nlp/models/f_net/f_net_presets.py b/keras_nlp/models/f_net/f_net_presets.py index bc5b6b45e2..0db5f1c2c9 100644 --- a/keras_nlp/models/f_net/f_net_presets.py +++ b/keras_nlp/models/f_net/f_net_presets.py @@ -25,7 +25,7 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "kaggle_handle": "kaggle://keras/f_net/f_net_base_en/1", + "kaggle_handle": "kaggle://keras/f_net/f_net_base_en/2", }, "f_net_large_en": { "metadata": { @@ -38,6 +38,6 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "kaggle_handle": "kaggle://keras/f_net/f_net_large_en/1", + "kaggle_handle": "kaggle://keras/f_net/f_net_large_en/2", }, } diff --git a/keras_nlp/models/gpt2/gpt2_presets.py b/keras_nlp/models/gpt2/gpt2_presets.py index 3c0e6e35a3..660c021f83 100644 --- a/keras_nlp/models/gpt2/gpt2_presets.py +++ b/keras_nlp/models/gpt2/gpt2_presets.py @@ -26,7 +26,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en/1", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en/2", }, "gpt2_medium_en": { "metadata": { @@ -39,7 +39,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_medium_en/1", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_medium_en/2", }, "gpt2_large_en": { "metadata": { @@ -52,7 +52,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_large_en/1", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_large_en/2", }, "gpt2_extra_large_en": { "metadata": { @@ -65,7 +65,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_extra_large_en/1", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_extra_large_en/2", }, "gpt2_base_en_cnn_dailymail": { "metadata": { @@ -77,6 +77,6 @@ "official_name": "GPT-2", "path": "gpt2", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en_cnn_dailymail/1", + "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en_cnn_dailymail/2", }, } diff --git a/keras_nlp/models/opt/opt_presets.py b/keras_nlp/models/opt/opt_presets.py index 73cb6d57e9..abafa14734 100644 --- a/keras_nlp/models/opt/opt_presets.py +++ b/keras_nlp/models/opt/opt_presets.py @@ -26,7 +26,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_125m_en/1", + "kaggle_handle": "kaggle://keras/opt/opt_125m_en/2", }, # We skip the 350m checkpoint because it does not match the structure of # other checkpoints. @@ -41,7 +41,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_1.3b_en/1", + "kaggle_handle": "kaggle://keras/opt/opt_1.3b_en/2", }, "opt_2.7b_en": { "metadata": { @@ -54,7 +54,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_2.7b_en/1", + "kaggle_handle": "kaggle://keras/opt/opt_2.7b_en/2", }, "opt_6.7b_en": { "metadata": { @@ -67,6 +67,6 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_6.7b_en/1", + "kaggle_handle": "kaggle://keras/opt/opt_6.7b_en/2", }, } diff --git a/keras_nlp/models/roberta/roberta_presets.py b/keras_nlp/models/roberta/roberta_presets.py index 7aa1d91dea..9ed6bc417e 100644 --- a/keras_nlp/models/roberta/roberta_presets.py +++ b/keras_nlp/models/roberta/roberta_presets.py @@ -25,7 +25,7 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "kaggle_handle": "kaggle://keras/roberta/roberta_base_en/1", + "kaggle_handle": "kaggle://keras/roberta/roberta_base_en/2", }, "roberta_large_en": { "metadata": { @@ -38,6 +38,6 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "kaggle_handle": "kaggle://keras/roberta/roberta_large_en/1", + "kaggle_handle": "kaggle://keras/roberta/roberta_large_en/2", }, } diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index 34356e2098..c25bef1c1a 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -25,7 +25,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/t5_small_multi/1", + "kaggle_handle": "kaggle://keras/t5/t5_small_multi/2", }, "t5_base_multi": { "metadata": { @@ -38,7 +38,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/t5_base_multi/1", + "kaggle_handle": "kaggle://keras/t5/t5_base_multi/2", }, "t5_large_multi": { "metadata": { @@ -51,7 +51,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/t5_large_multi/1", + "kaggle_handle": "kaggle://keras/t5/t5_large_multi/2", }, "flan_small_multi": { "metadata": { @@ -64,7 +64,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/flan_small_multi/1", + "kaggle_handle": "kaggle://keras/t5/flan_small_multi/2", }, "flan_base_multi": { "metadata": { @@ -77,7 +77,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/flan_base_multi/1", + "kaggle_handle": "kaggle://keras/t5/flan_base_multi/2", }, "flan_large_multi": { "metadata": { @@ -90,6 +90,6 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/flan_large_multi/1", + "kaggle_handle": "kaggle://keras/t5/flan_large_multi/2", }, } diff --git a/keras_nlp/models/task.py b/keras_nlp/models/task.py index 772b540db4..ee28e3a984 100644 --- a/keras_nlp/models/task.py +++ b/keras_nlp/models/task.py @@ -34,6 +34,19 @@ def __init__(self, *args, **kwargs): self._backbone = None self._preprocessor = None super().__init__(*args, **kwargs) + self._functional_layer_ids = set( + id(layer) for layer in self._flatten_layers() + ) + + def __dir__(self): + # Temporary fixes for weight saving. This mimics the following PR for + # older version of Keras: https://github.com/keras-team/keras/pull/18982 + def filter_fn(attr): + if attr == "_layer_checkpoint_dependencies": + return False + return id(getattr(self, attr)) not in self._functional_layer_ids + + return filter(filter_fn, super().__dir__()) def _check_for_loss_mismatch(self, loss): """Check for a softmax/from_logits mismatch after compile. diff --git a/keras_nlp/models/whisper/whisper_presets.py b/keras_nlp/models/whisper/whisper_presets.py index 1b3c7b4fe0..7494eef8c5 100644 --- a/keras_nlp/models/whisper/whisper_presets.py +++ b/keras_nlp/models/whisper/whisper_presets.py @@ -25,7 +25,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_en/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_en/2", }, "whisper_base_en": { "metadata": { @@ -38,7 +38,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_base_en/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_base_en/2", }, "whisper_small_en": { "metadata": { @@ -51,7 +51,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_small_en/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_small_en/2", }, "whisper_medium_en": { "metadata": { @@ -64,7 +64,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_medium_en/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_medium_en/2", }, "whisper_tiny_multi": { "metadata": { @@ -77,7 +77,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_multi/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_multi/2", }, "whisper_base_multi": { "metadata": { @@ -90,7 +90,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_base_multi/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_base_multi/2", }, "whisper_small_multi": { "metadata": { @@ -103,7 +103,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_small_multi/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_small_multi/2", }, "whisper_medium_multi": { "metadata": { @@ -116,7 +116,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_medium_multi/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_medium_multi/2", }, "whisper_large_multi": { "metadata": { @@ -129,7 +129,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi/2", }, "whisper_large_multi_v2": { "metadata": { @@ -143,6 +143,6 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi_v2/1", + "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi_v2/2", }, } diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py index f4b486a22a..d8676b468a 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py @@ -25,7 +25,7 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_base_multi/1", + "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_base_multi/2", }, "xlm_roberta_large_multi": { "metadata": { @@ -38,6 +38,6 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_large_multi/1", + "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_large_multi/2", }, } diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 3678a988a9..2ee1e8ddd6 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -13,13 +13,9 @@ # limitations under the License. import datetime -import inspect import json import os -import h5py - -from keras_nlp.backend import config as backend_config from keras_nlp.backend import keras try: @@ -163,40 +159,6 @@ def save_to_preset( metadata_file.write(json.dumps(metadata, indent=4)) -def legacy_load_weights(layer, weights_path): - # Hacky fix for TensorFlow 2.13 and 2.14 when loading a `.weights.h5` file. - # We find the `Functional` class, and temporarily remove the - # `_layer_checkpoint_dependencies` property, which on older version of - # TensorFlow complete broke the variable paths for functional models. - functional_cls = None - for cls in inspect.getmro(layer.__class__): - if cls.__name__ == "Functional": - functional_cls = cls - property = functional_cls._layer_checkpoint_dependencies - functional_cls._layer_checkpoint_dependencies = [] - - from keras_nlp.models.backbone import Backbone - - if not backend_config.keras_3() and isinstance(layer, Backbone): - # Hacky fix for Keras 2 backwards compatibility. Keras 2 traverses loading - # weights in the reverse order, causing a naming mismatch when loading - # Kaggle weights saved from Keras 3. - new_weights_path = os.path.join( - os.path.dirname(weights_path), - "legacy_" + os.path.basename(weights_path), - ) - os.rename(weights_path, new_weights_path) - weights_path = new_weights_path - f = h5py.File(weights_path, "r+") - if "_token_embedding" in f.keys(): - data = f["_token_embedding"] - f["token_embedding"] = data - f.close() - - layer.load_weights(weights_path) - functional_cls._layer_checkpoint_dependencies = property - - def load_from_preset( preset, load_weights=True, @@ -224,10 +186,7 @@ def load_from_preset( load_weights = load_weights and config["weights"] if load_weights: weights_path = get_file(preset, config["weights"]) - if hasattr(layer, "_layer_checkpoint_dependencies"): - legacy_load_weights(layer, weights_path) - else: - layer.load_weights(weights_path) + layer.load_weights(weights_path) return layer From f1ab62ad07459273de02831741905df3140653fd Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 4 Jan 2024 12:34:32 -0800 Subject: [PATCH 80/87] Fix mistral and electra tokenizer to match kaggle changes (#1387) We are changing all tokenizer to store vocabularies via assets (and not in the config). This requires some changes to tokenizer so files state can be set after object creation. --- keras_nlp/models/electra/electra_tokenizer.py | 43 +++++++++++-------- keras_nlp/models/mistral/mistral_tokenizer.py | 30 +++++++------ 2 files changed, 43 insertions(+), 30 deletions(-) diff --git a/keras_nlp/models/electra/electra_tokenizer.py b/keras_nlp/models/electra/electra_tokenizer.py index c6ec29c42b..acd665c2a3 100644 --- a/keras_nlp/models/electra/electra_tokenizer.py +++ b/keras_nlp/models/electra/electra_tokenizer.py @@ -58,22 +58,31 @@ class ElectraTokenizer(WordPieceTokenizer): """ def __init__(self, vocabulary, lowercase=False, **kwargs): + self.cls_token = "[CLS]" + self.sep_token = "[SEP]" + self.pad_token = "[PAD]" + self.mask_token = "[MASK]" super().__init__(vocabulary=vocabulary, lowercase=lowercase, **kwargs) - # Check for special tokens - cls_token = "[CLS]" - sep_token = "[SEP]" - pad_token = "[PAD]" - mask_token = "[MASK]" - - for token in [cls_token, pad_token, sep_token, mask_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - self.cls_token_id = self.token_to_id(cls_token) - self.sep_token_id = self.token_to_id(sep_token) - self.pad_token_id = self.token_to_id(pad_token) - self.mask_token_id = self.token_to_id(mask_token) + def set_vocabulary(self, vocabulary): + super().set_vocabulary(vocabulary) + + if vocabulary is not None: + # Check for necessary special tokens. + for token in [self.cls_token, self.pad_token, self.sep_token]: + if token not in self.vocabulary: + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + + self.cls_token_id = self.token_to_id(self.cls_token) + self.sep_token_id = self.token_to_id(self.sep_token) + self.pad_token_id = self.token_to_id(self.pad_token) + self.mask_token_id = self.token_to_id(self.mask_token) + else: + self.cls_token_id = None + self.sep_token_id = None + self.pad_token_id = None + self.mask_token_id = None diff --git a/keras_nlp/models/mistral/mistral_tokenizer.py b/keras_nlp/models/mistral/mistral_tokenizer.py index 25177f6d6d..12636f69f1 100644 --- a/keras_nlp/models/mistral/mistral_tokenizer.py +++ b/keras_nlp/models/mistral/mistral_tokenizer.py @@ -58,18 +58,22 @@ class MistralTokenizer(SentencePieceTokenizer): """ def __init__(self, proto, **kwargs): + self.start_token = "" + self.end_token = "" super().__init__(proto=proto, **kwargs) - # Check for necessary special tokens. - start_token = "" - end_token = "" - for token in [start_token, end_token]: - if token not in self.get_vocabulary(): - raise ValueError( - f"Cannot find token `'{token}'` in the provided " - f"`vocabulary`. Please provide `'{token}'` in your " - "`vocabulary` or use a pretrained `vocabulary` name." - ) - - self.start_token_id = self.token_to_id(start_token) - self.end_token_id = self.token_to_id(end_token) + def set_proto(self, proto): + super().set_proto(proto) + if proto is not None: + for token in [self.start_token, self.end_token]: + if token not in self.get_vocabulary(): + raise ValueError( + f"Cannot find token `'{token}'` in the provided " + f"`vocabulary`. Please provide `'{token}'` in your " + "`vocabulary` or use a pretrained `vocabulary` name." + ) + self.start_token_id = self.token_to_id(self.start_token) + self.end_token_id = self.token_to_id(self.end_token) + else: + self.start_token_id = None + self.end_token_id = None From f213dfedfe366fb0cc4ec3cbee154d845f8df3e4 Mon Sep 17 00:00:00 2001 From: Ramesh Sampath <1437573+sampathweb@users.noreply.github.com> Date: Fri, 5 Jan 2024 09:25:14 +0530 Subject: [PATCH 81/87] Align requirments with Keras (#1386) * Align requirments with Keras * Update TF nightly versions --- requirements-jax-cuda.txt | 4 ++-- requirements-tensorflow-cuda.txt | 5 ++--- requirements-torch-cuda.txt | 10 +++++----- requirements.txt | 4 ++-- 4 files changed, 11 insertions(+), 12 deletions(-) diff --git a/requirements-jax-cuda.txt b/requirements-jax-cuda.txt index 4be6e416e6..c09b306264 100644 --- a/requirements-jax-cuda.txt +++ b/requirements-jax-cuda.txt @@ -1,6 +1,6 @@ # Tensorflow cpu-only version. -tf-nightly-cpu==2.16.0.dev20231109 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231221 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231221 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-tensorflow-cuda.txt b/requirements-tensorflow-cuda.txt index f0a77c64f2..21a8ed2463 100644 --- a/requirements-tensorflow-cuda.txt +++ b/requirements-tensorflow-cuda.txt @@ -1,7 +1,6 @@ # Tensorflow with cuda support. ---extra-index-url https://pypi.nvidia.com -tf-nightly[and-cuda]==2.16.0.dev20231109 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. +tf-nightly[and-cuda]==2.16.0.dev20231221 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231221 # Pin a working nightly until rc0. # Torch cpu-only version. --extra-index-url https://download.pytorch.org/whl/cpu diff --git a/requirements-torch-cuda.txt b/requirements-torch-cuda.txt index 7e956c4516..c71c51e478 100644 --- a/requirements-torch-cuda.txt +++ b/requirements-torch-cuda.txt @@ -1,11 +1,11 @@ # Tensorflow cpu-only version. -tf-nightly-cpu==2.16.0.dev20231109 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231221 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231221 # Pin a working nightly until rc0. # Torch with cuda support. ---extra-index-url https://download.pytorch.org/whl/cu118 -torch==2.1.0 -torchvision==0.16.0 +--extra-index-url https://download.pytorch.org/whl/cu121 +torch==2.1.2 +torchvision==0.16.2 # Jax cpu-only version. jax[cpu] diff --git a/requirements.txt b/requirements.txt index fe4340aa9b..fa1dc91943 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ # Tensorflow. -tf-nightly-cpu==2.16.0.dev20231109 # Pin a working nightly until rc0. -tensorflow-text-nightly==2.16.0.dev20231109 # Pin a working nightly until rc0. +tf-nightly-cpu==2.16.0.dev20231221 # Pin a working nightly until rc0. +tensorflow-text-nightly==2.16.0.dev20231221 # Pin a working nightly until rc0. # Torch. --extra-index-url https://download.pytorch.org/whl/cpu From 89a77f0c91ba01c89db71e21ec317874dcc3a261 Mon Sep 17 00:00:00 2001 From: Tirth Patel Date: Sat, 6 Jan 2024 00:19:20 +0530 Subject: [PATCH 82/87] Add a preprocessor for the Mistral backbone (#1385) * Add mistral preprocessor * Add docs for the preprocessor * Address review comments * Fix lint issue --- .../models/mistral/mistral_preprocessor.py | 175 ++++++++++++++++++ .../mistral/mistral_preprocessor_test.py | 59 ++++++ 2 files changed, 234 insertions(+) create mode 100644 keras_nlp/models/mistral/mistral_preprocessor.py create mode 100644 keras_nlp/models/mistral/mistral_preprocessor_test.py diff --git a/keras_nlp/models/mistral/mistral_preprocessor.py b/keras_nlp/models/mistral/mistral_preprocessor.py new file mode 100644 index 0000000000..d5d838303e --- /dev/null +++ b/keras_nlp/models/mistral/mistral_preprocessor.py @@ -0,0 +1,175 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from keras_nlp.api_export import keras_nlp_export +from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker +from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer +from keras_nlp.models.preprocessor import Preprocessor +from keras_nlp.utils.keras_utils import ( + convert_inputs_to_list_of_tensor_segments, +) +from keras_nlp.utils.keras_utils import pack_x_y_sample_weight +from keras_nlp.utils.python_utils import classproperty + + +@keras_nlp_export("keras_nlp.models.MistralPreprocessor") +class MistralPreprocessor(Preprocessor): + """A Mistral preprocessing layer which tokenizes and packs inputs. + + This preprocessing layer will do three things: + + 1. Tokenize any number of input segments using the `tokenizer`. + 2. Pack the inputs together using a `keras_nlp.layers.StartEndPacker`. + with the appropriate tokens. + 3. Construct a dictionary with keys `"token_ids"`, and `"padding_mask"` + that can be passed directly to `keras_nlp.models.MistralBackbone`. + + This layer can be used directly with `tf.data.Dataset.map` to preprocess + string data in the `(x, y, sample_weight)` format used by + `keras.Model.fit`. + + Args: + tokenizer: A `keras_nlp.models.MistralTokenizer` instance. + sequence_length: The length of the packed inputs. + add_start_token: If `True`, the preprocessor will prepend the tokenizer + start token to each input sequence. Default is `True`. + add_end_token: If `True`, the preprocessor will append the tokenizer + end token to each input sequence. Default is `False`. + + Call arguments: + x: A tensor of single string sequences, or a tuple of multiple + tensor sequences to be packed together. Inputs may be batched or + unbatched. For single sequences, raw python inputs will be converted + to tensors. For multiple sequences, pass tensors directly. + y: Any label data. Will be passed through unaltered. + sample_weight: Any label weight data. Will be passed through unaltered. + sequence_length: Pass to override the configured `sequence_length` of + the layer. + + Examples: + + Directly calling the from_preset(). + ```python + preprocessor = keras_nlp.models.MistralPreprocessor.from_preset( + "mistral_base_en" + ) + + # Tokenize and pack a single sentence. + preprocessor("The quick brown fox jumped.") + + # Tokenize and a batch of single sentences. + preprocessor(["The quick brown fox jumped.", "Call me Ishmael."]) + + # Preprocess a batch of sentence pairs. + # When handling multiple sequences, always convert to tensors first! + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + preprocessor((first, second)) + ``` + + Mapping with `tf.data.Dataset`. + ```python + preprocessor = keras_nlp.models.MistralPreprocessor.from_preset( + "mistral_base_en" + ) + first = tf.constant(["The quick brown fox jumped.", "Call me Ishmael."]) + second = tf.constant(["The fox tripped.", "Oh look, a whale."]) + label = tf.constant([1, 1]) + + # Map labeled single sentences. + ds = tf.data.Dataset.from_tensor_slices((first, label)) + ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) + + # Map unlabeled single sentences. + ds = tf.data.Dataset.from_tensor_slices(first) + ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) + + # Map labeled sentence pairs. + ds = tf.data.Dataset.from_tensor_slices(((first, second), label)) + ds = ds.map(preprocessor, num_parallel_calls=tf.data.AUTOTUNE) + + # Map unlabeled sentence pairs. + ds = tf.data.Dataset.from_tensor_slices((first, second)) + + # Watch out for tf.data's default unpacking of tuples here! + # Best to invoke the `preprocessor` directly in this case. + ds = ds.map( + lambda first, second: preprocessor(x=(first, second)), + num_parallel_calls=tf.data.AUTOTUNE, + ) + ``` + """ + + def __init__( + self, + tokenizer, + sequence_length=1024, + add_start_token=True, + add_end_token=False, + **kwargs, + ): + super().__init__(**kwargs) + self.tokenizer = tokenizer + self.add_start_token = add_start_token + self.add_end_token = add_end_token + self.sequence_length = sequence_length + self.packer = StartEndPacker( + start_value=self.tokenizer.start_token_id, + end_value=self.tokenizer.end_token_id, + sequence_length=sequence_length, + return_padding_mask=True, + ) + + def get_config(self): + config = super().get_config() + config.update( + { + "sequence_length": self.sequence_length, + "add_start_token": self.add_start_token, + "add_end_token": self.add_end_token, + } + ) + return config + + def call( + self, + x, + y=None, + sample_weight=None, + sequence_length=None, + ): + x = convert_inputs_to_list_of_tensor_segments(x) + if len(x) != 1: + raise ValueError( + "Mistral requires each input feature to contain only " + f"one segment, but received {len(x)}. If you are using Mistral" + " for a multi-segment classification task, please refer to " + "classification models like BERT or RoBERTa." + ) + sequence_length = sequence_length or self.sequence_length + token_ids, padding_mask = self.packer( + self.tokenizer(x[0]), + sequence_length=sequence_length, + add_start_value=self.add_start_token, + add_end_value=self.add_end_token, + ) + x = { + "token_ids": token_ids, + "padding_mask": padding_mask, + } + return pack_x_y_sample_weight(x, y, sample_weight) + + @classproperty + def tokenizer_cls(cls): + return MistralTokenizer diff --git a/keras_nlp/models/mistral/mistral_preprocessor_test.py b/keras_nlp/models/mistral/mistral_preprocessor_test.py new file mode 100644 index 0000000000..40528fd4e8 --- /dev/null +++ b/keras_nlp/models/mistral/mistral_preprocessor_test.py @@ -0,0 +1,59 @@ +# Copyright 2023 The KerasNLP Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +from keras_nlp.models.mistral.mistral_preprocessor import MistralPreprocessor +from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer +from keras_nlp.tests.test_case import TestCase + + +class MistralPreprocessorTest(TestCase): + def setUp(self): + self.tokenizer = MistralTokenizer( + # Generated using create_mistral_test_proto.py + proto=os.path.join( + self.get_test_data_dir(), "mistral_test_vocab.spm" + ) + ) + self.init_kwargs = { + "tokenizer": self.tokenizer, + "sequence_length": 8, + } + self.input_data = ( + ["the quick brown fox"], + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ) + + def test_preprocessor_basics(self): + self.run_preprocessing_layer_test( + cls=MistralPreprocessor, + init_kwargs=self.init_kwargs, + input_data=self.input_data, + expected_output=( + { + "token_ids": [[1, 3, 8, 4, 6, 0, 0, 0]], + "padding_mask": [[1, 1, 1, 1, 1, 0, 0, 0]], + }, + [1], # Pass through labels. + [1.0], # Pass through sample_weights. + ), + ) + + def test_errors_for_2d_list_input(self): + preprocessor = MistralPreprocessor(**self.init_kwargs) + ambiguous_input = [["one", "two"], ["three", "four"]] + with self.assertRaises(ValueError): + preprocessor(ambiguous_input) From fde58790d3f5d841e46e5c2d82cf888d8c44f2eb Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Fri, 5 Jan 2024 12:08:15 -0800 Subject: [PATCH 83/87] Switch to always expect full Kaggle preset handles (#1390) --- keras_nlp/models/albert/albert_presets.py | 8 +++---- keras_nlp/models/bart/bart_presets.py | 6 ++--- keras_nlp/models/bert/bert_presets.py | 20 ++++++++-------- .../models/deberta_v3/deberta_v3_presets.py | 10 ++++---- .../models/distil_bert/distil_bert_presets.py | 6 ++--- keras_nlp/models/f_net/f_net_presets.py | 4 ++-- keras_nlp/models/gpt2/gpt2_presets.py | 10 ++++---- keras_nlp/models/opt/opt_presets.py | 8 +++---- keras_nlp/models/roberta/roberta_presets.py | 4 ++-- keras_nlp/models/t5/t5_presets.py | 12 +++++----- keras_nlp/models/whisper/whisper_presets.py | 20 ++++++++-------- .../models/xlm_roberta/xlm_roberta_presets.py | 4 ++-- keras_nlp/utils/preset_utils.py | 23 ++++++++----------- 13 files changed, 65 insertions(+), 70 deletions(-) diff --git a/keras_nlp/models/albert/albert_presets.py b/keras_nlp/models/albert/albert_presets.py index 3314241feb..3cd8215295 100644 --- a/keras_nlp/models/albert/albert_presets.py +++ b/keras_nlp/models/albert/albert_presets.py @@ -26,7 +26,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_base_en_uncased/2", + "kaggle_handle": "kaggle://keras/albert/keras/albert_base_en_uncased/2", }, "albert_large_en_uncased": { "metadata": { @@ -39,7 +39,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_large_en_uncased/2", + "kaggle_handle": "kaggle://keras/albert/keras/albert_large_en_uncased/2", }, "albert_extra_large_en_uncased": { "metadata": { @@ -52,7 +52,7 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_extra_large_en_uncased/2", + "kaggle_handle": "kaggle://keras/albert/keras/albert_extra_large_en_uncased/2", }, "albert_extra_extra_large_en_uncased": { "metadata": { @@ -65,6 +65,6 @@ "path": "albert", "model_card": "https://github.com/google-research/albert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/albert/albert_extra_extra_large_en_uncased/2", + "kaggle_handle": "kaggle://keras/albert/keras/albert_extra_extra_large_en_uncased/2", }, } diff --git a/keras_nlp/models/bart/bart_presets.py b/keras_nlp/models/bart/bart_presets.py index 6249d46744..cca8d54959 100644 --- a/keras_nlp/models/bart/bart_presets.py +++ b/keras_nlp/models/bart/bart_presets.py @@ -25,7 +25,7 @@ "path": "bart", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/bart/README.md", }, - "kaggle_handle": "kaggle://keras/bart/bart_base_en/2", + "kaggle_handle": "kaggle://keras/bart/keras/bart_base_en/2", }, "bart_large_en": { "metadata": { @@ -47,7 +47,7 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "kaggle_handle": "kaggle://keras/bart/bart_large_en/2", + "kaggle_handle": "kaggle://keras/bart/keras/bart_large_en/2", }, "bart_large_en_cnn": { "metadata": { @@ -69,6 +69,6 @@ "dropout": 0.1, "max_sequence_length": 1024, }, - "kaggle_handle": "kaggle://keras/bart/bart_large_en_cnn/2", + "kaggle_handle": "kaggle://keras/bart/keras/bart_large_en_cnn/2", }, } diff --git a/keras_nlp/models/bert/bert_presets.py b/keras_nlp/models/bert/bert_presets.py index 1730e17228..09315db192 100644 --- a/keras_nlp/models/bert/bert_presets.py +++ b/keras_nlp/models/bert/bert_presets.py @@ -25,7 +25,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_tiny_en_uncased/2", }, "bert_small_en_uncased": { "metadata": { @@ -38,7 +38,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_small_en_uncased/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_small_en_uncased/2", }, "bert_medium_en_uncased": { "metadata": { @@ -51,7 +51,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_medium_en_uncased/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_medium_en_uncased/2", }, "bert_base_en_uncased": { "metadata": { @@ -64,7 +64,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_en_uncased/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_base_en_uncased/2", }, "bert_base_en": { "metadata": { @@ -77,7 +77,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_en/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_base_en/2", }, "bert_base_zh": { "metadata": { @@ -89,7 +89,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_zh/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_base_zh/2", }, "bert_base_multi": { "metadata": { @@ -101,7 +101,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_base_multi/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_base_multi/2", }, "bert_large_en_uncased": { "metadata": { @@ -114,7 +114,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_large_en_uncased/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_large_en_uncased/2", }, "bert_large_en": { "metadata": { @@ -127,7 +127,7 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_large_en/2", + "kaggle_handle": "kaggle://keras/bert/keras/bert_large_en/2", }, } @@ -142,6 +142,6 @@ "path": "bert", "model_card": "https://github.com/google-research/bert/blob/master/README.md", }, - "kaggle_handle": "kaggle://keras/bert/bert_tiny_en_uncased_sst2/3", + "kaggle_handle": "kaggle://keras/bert/keras/bert_tiny_en_uncased_sst2/3", } } diff --git a/keras_nlp/models/deberta_v3/deberta_v3_presets.py b/keras_nlp/models/deberta_v3/deberta_v3_presets.py index ba8734e787..febfdffd91 100644 --- a/keras_nlp/models/deberta_v3/deberta_v3_presets.py +++ b/keras_nlp/models/deberta_v3/deberta_v3_presets.py @@ -25,7 +25,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-xsmall", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_extra_small_en/2", + "kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_extra_small_en/2", }, "deberta_v3_small_en": { "metadata": { @@ -38,7 +38,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-small", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_small_en/2", + "kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_small_en/2", }, "deberta_v3_base_en": { "metadata": { @@ -51,7 +51,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-base", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_en/2", + "kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_base_en/2", }, "deberta_v3_large_en": { "metadata": { @@ -64,7 +64,7 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/deberta-v3-large", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_large_en/2", + "kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_large_en/2", }, "deberta_v3_base_multi": { "metadata": { @@ -77,6 +77,6 @@ "path": "deberta_v3", "model_card": "https://huggingface.co/microsoft/mdeberta-v3-base", }, - "kaggle_handle": "kaggle://keras/deberta_v3/deberta_v3_base_multi/2", + "kaggle_handle": "kaggle://keras/deberta_v3/keras/deberta_v3_base_multi/2", }, } diff --git a/keras_nlp/models/distil_bert/distil_bert_presets.py b/keras_nlp/models/distil_bert/distil_bert_presets.py index cdbd234984..2bc3415342 100644 --- a/keras_nlp/models/distil_bert/distil_bert_presets.py +++ b/keras_nlp/models/distil_bert/distil_bert_presets.py @@ -26,7 +26,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-uncased", }, - "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en_uncased/2", + "kaggle_handle": "kaggle://keras/distil_bert/keras/distil_bert_base_en_uncased/2", }, "distil_bert_base_en": { "metadata": { @@ -40,7 +40,7 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-cased", }, - "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_en/2", + "kaggle_handle": "kaggle://keras/distil_bert/keras/distil_bert_base_en/2", }, "distil_bert_base_multi": { "metadata": { @@ -52,6 +52,6 @@ "path": "distil_bert", "model_card": "https://huggingface.co/distilbert-base-multilingual-cased", }, - "kaggle_handle": "kaggle://keras/distil_bert/distil_bert_base_multi/2", + "kaggle_handle": "kaggle://keras/distil_bert/keras/distil_bert_base_multi/2", }, } diff --git a/keras_nlp/models/f_net/f_net_presets.py b/keras_nlp/models/f_net/f_net_presets.py index 0db5f1c2c9..13e0e2482a 100644 --- a/keras_nlp/models/f_net/f_net_presets.py +++ b/keras_nlp/models/f_net/f_net_presets.py @@ -25,7 +25,7 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "kaggle_handle": "kaggle://keras/f_net/f_net_base_en/2", + "kaggle_handle": "kaggle://keras/f_net/keras/f_net_base_en/2", }, "f_net_large_en": { "metadata": { @@ -38,6 +38,6 @@ "path": "f_net", "model_card": "https://github.com/google-research/google-research/blob/master/f_net/README.md", }, - "kaggle_handle": "kaggle://keras/f_net/f_net_large_en/2", + "kaggle_handle": "kaggle://keras/f_net/keras/f_net_large_en/2", }, } diff --git a/keras_nlp/models/gpt2/gpt2_presets.py b/keras_nlp/models/gpt2/gpt2_presets.py index 660c021f83..c51f170aa2 100644 --- a/keras_nlp/models/gpt2/gpt2_presets.py +++ b/keras_nlp/models/gpt2/gpt2_presets.py @@ -26,7 +26,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en/2", + "kaggle_handle": "kaggle://keras/gpt2/keras/gpt2_base_en/2", }, "gpt2_medium_en": { "metadata": { @@ -39,7 +39,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_medium_en/2", + "kaggle_handle": "kaggle://keras/gpt2/keras/gpt2_medium_en/2", }, "gpt2_large_en": { "metadata": { @@ -52,7 +52,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_large_en/2", + "kaggle_handle": "kaggle://keras/gpt2/keras/gpt2_large_en/2", }, "gpt2_extra_large_en": { "metadata": { @@ -65,7 +65,7 @@ "path": "gpt2", "model_card": "https://github.com/openai/gpt-2/blob/master/model_card.md", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_extra_large_en/2", + "kaggle_handle": "kaggle://keras/gpt2/keras/gpt2_extra_large_en/2", }, "gpt2_base_en_cnn_dailymail": { "metadata": { @@ -77,6 +77,6 @@ "official_name": "GPT-2", "path": "gpt2", }, - "kaggle_handle": "kaggle://keras/gpt2/gpt2_base_en_cnn_dailymail/2", + "kaggle_handle": "kaggle://keras/gpt2/keras/gpt2_base_en_cnn_dailymail/2", }, } diff --git a/keras_nlp/models/opt/opt_presets.py b/keras_nlp/models/opt/opt_presets.py index abafa14734..50091be243 100644 --- a/keras_nlp/models/opt/opt_presets.py +++ b/keras_nlp/models/opt/opt_presets.py @@ -26,7 +26,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_125m_en/2", + "kaggle_handle": "kaggle://keras/opt/keras/opt_125m_en/2", }, # We skip the 350m checkpoint because it does not match the structure of # other checkpoints. @@ -41,7 +41,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_1.3b_en/2", + "kaggle_handle": "kaggle://keras/opt/keras/opt_1.3b_en/2", }, "opt_2.7b_en": { "metadata": { @@ -54,7 +54,7 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_2.7b_en/2", + "kaggle_handle": "kaggle://keras/opt/keras/opt_2.7b_en/2", }, "opt_6.7b_en": { "metadata": { @@ -67,6 +67,6 @@ "path": "opt", "model_card": "https://github.com/facebookresearch/metaseq/blob/main/projects/OPT/model_card.md", }, - "kaggle_handle": "kaggle://keras/opt/opt_6.7b_en/2", + "kaggle_handle": "kaggle://keras/opt/keras/opt_6.7b_en/2", }, } diff --git a/keras_nlp/models/roberta/roberta_presets.py b/keras_nlp/models/roberta/roberta_presets.py index 9ed6bc417e..66848cecd0 100644 --- a/keras_nlp/models/roberta/roberta_presets.py +++ b/keras_nlp/models/roberta/roberta_presets.py @@ -25,7 +25,7 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "kaggle_handle": "kaggle://keras/roberta/roberta_base_en/2", + "kaggle_handle": "kaggle://keras/roberta/keras/roberta_base_en/2", }, "roberta_large_en": { "metadata": { @@ -38,6 +38,6 @@ "path": "roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/roberta/README.md", }, - "kaggle_handle": "kaggle://keras/roberta/roberta_large_en/2", + "kaggle_handle": "kaggle://keras/roberta/keras/roberta_large_en/2", }, } diff --git a/keras_nlp/models/t5/t5_presets.py b/keras_nlp/models/t5/t5_presets.py index c25bef1c1a..58b301b7f0 100644 --- a/keras_nlp/models/t5/t5_presets.py +++ b/keras_nlp/models/t5/t5_presets.py @@ -25,7 +25,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/t5_small_multi/2", + "kaggle_handle": "kaggle://keras/t5/keras/t5_small_multi/2", }, "t5_base_multi": { "metadata": { @@ -38,7 +38,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/t5_base_multi/2", + "kaggle_handle": "kaggle://keras/t5/keras/t5_base_multi/2", }, "t5_large_multi": { "metadata": { @@ -51,7 +51,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/t5_large_multi/2", + "kaggle_handle": "kaggle://keras/t5/keras/t5_large_multi/2", }, "flan_small_multi": { "metadata": { @@ -64,7 +64,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/flan_small_multi/2", + "kaggle_handle": "kaggle://keras/t5/keras/flan_small_multi/2", }, "flan_base_multi": { "metadata": { @@ -77,7 +77,7 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/flan_base_multi/2", + "kaggle_handle": "kaggle://keras/t5/keras/flan_base_multi/2", }, "flan_large_multi": { "metadata": { @@ -90,6 +90,6 @@ "path": "t5", "model_card": "https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.md", }, - "kaggle_handle": "kaggle://keras/t5/flan_large_multi/2", + "kaggle_handle": "kaggle://keras/t5/keras/flan_large_multi/2", }, } diff --git a/keras_nlp/models/whisper/whisper_presets.py b/keras_nlp/models/whisper/whisper_presets.py index 7494eef8c5..b881ee57b9 100644 --- a/keras_nlp/models/whisper/whisper_presets.py +++ b/keras_nlp/models/whisper/whisper_presets.py @@ -25,7 +25,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_en/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_tiny_en/2", }, "whisper_base_en": { "metadata": { @@ -38,7 +38,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_base_en/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_base_en/2", }, "whisper_small_en": { "metadata": { @@ -51,7 +51,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_small_en/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_small_en/2", }, "whisper_medium_en": { "metadata": { @@ -64,7 +64,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_medium_en/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_medium_en/2", }, "whisper_tiny_multi": { "metadata": { @@ -77,7 +77,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_tiny_multi/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_tiny_multi/2", }, "whisper_base_multi": { "metadata": { @@ -90,7 +90,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_base_multi/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_base_multi/2", }, "whisper_small_multi": { "metadata": { @@ -103,7 +103,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_small_multi/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_small_multi/2", }, "whisper_medium_multi": { "metadata": { @@ -116,7 +116,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_medium_multi/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_medium_multi/2", }, "whisper_large_multi": { "metadata": { @@ -129,7 +129,7 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_large_multi/2", }, "whisper_large_multi_v2": { "metadata": { @@ -143,6 +143,6 @@ "path": "whisper", "model_card": "https://github.com/openai/whisper/blob/main/model-card.md", }, - "kaggle_handle": "kaggle://keras/whisper/whisper_large_multi_v2/2", + "kaggle_handle": "kaggle://keras/whisper/keras/whisper_large_multi_v2/2", }, } diff --git a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py index d8676b468a..477e508906 100644 --- a/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py +++ b/keras_nlp/models/xlm_roberta/xlm_roberta_presets.py @@ -25,7 +25,7 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_base_multi/2", + "kaggle_handle": "kaggle://keras/xlm_roberta/keras/xlm_roberta_base_multi/2", }, "xlm_roberta_large_multi": { "metadata": { @@ -38,6 +38,6 @@ "path": "xlm_roberta", "model_card": "https://github.com/facebookresearch/fairseq/blob/main/examples/xlmr/README.md", }, - "kaggle_handle": "kaggle://keras/xlm_roberta/xlm_roberta_large_multi/2", + "kaggle_handle": "kaggle://keras/xlm_roberta/keras/xlm_roberta_large_multi/2", }, } diff --git a/keras_nlp/utils/preset_utils.py b/keras_nlp/utils/preset_utils.py index 2ee1e8ddd6..6bb2748fd9 100644 --- a/keras_nlp/utils/preset_utils.py +++ b/keras_nlp/utils/preset_utils.py @@ -40,20 +40,15 @@ def get_file(preset, path): "`from_preset()` requires the `kagglehub` package. " "Please install with `pip install kagglehub`." ) - segments = preset.removeprefix(KAGGLE_PREFIX).split("/") - # Insert the kaggle framework into the handle. - if len(segments) == 3: - org, model, variant = segments - kaggle_handle = f"{org}/{model}/keras/{variant}" - elif len(segments) == 4: - org, model, variant, version = segments - kaggle_handle = f"{org}/{model}/keras/{variant}/{version}" - else: + kaggle_handle = preset.removeprefix(KAGGLE_PREFIX) + num_segments = len(kaggle_handle.split("/")) + if num_segments not in (4, 5): raise ValueError( - "Unexpected kaggle preset handle. Kaggle model handles should " - "have the form kaggle://{org}/{model}/{variant}[/{version}]. " - "For example, 'kaggle://keras/bert/bert_base_en'. " - f"Received: preset={preset}" + "Unexpected Kaggle preset. Kaggle model handles should have " + "the form kaggle://{org}/{model}/keras/{variant}[/{version}]. " + "For example, 'kaggle://username/bert/keras/bert_base_en' or " + "'kaggle://username/bert/keras/bert_base_en/1' (to specify a " + f"version). Received: preset={preset}" ) return kagglehub.model_download(kaggle_handle, path) elif preset.startswith(GS_PREFIX): @@ -75,7 +70,7 @@ def get_file(preset, path): raise ValueError( "Unknown preset identifier. A preset must be a one of:\n" "1) a built in preset identifier like `'bert_base_en'`\n" - "2) a Kaggle Models handle like `'kaggle://keras/bert/bert_base_en'`\n" + "2) a Kaggle Models handle like `'kaggle://keras/bert/keras/bert_base_en'`\n" "3) a path to a local preset directory like `'./bert_base_en`\n" "Use `print(cls.presets.keys())` to view all built-in presets for " "API symbol `cls`.\n" From bf4a9af2d4c951005c86ef963eb3270b79fed0b0 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 4 Jan 2024 13:31:35 -0800 Subject: [PATCH 84/87] Version bump for 0.7.0.dev4 dev release (#1388) Test release before we cut the actual 0.7.0 release --- keras_nlp/version_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/version_utils.py b/keras_nlp/version_utils.py index 15fede3a08..55ea7348f6 100644 --- a/keras_nlp/version_utils.py +++ b/keras_nlp/version_utils.py @@ -15,7 +15,7 @@ from keras_nlp.api_export import keras_nlp_export # Unique source of truth for the version number. -__version__ = "0.7.0" +__version__ = "0.7.0.dev4" @keras_nlp_export("keras_nlp.version") From fb2ebaa1d4f76b8d9fe98de39017144f33141bc1 Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Thu, 4 Jan 2024 15:54:35 -0800 Subject: [PATCH 85/87] Unexport models from the 0.7 release (#1360) We should not merge to the `kaggle` branch, we should merge this to `r0.7` after it is created. This unexports our "not yet ready from prime time models". - electra - gpt-neox - t5 - whisper - xlnet These are all still in flight to some degree --- keras_nlp/models/electra/electra_backbone.py | 3 +-- keras_nlp/models/electra/electra_tokenizer.py | 4 ++-- keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py | 3 +-- keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm.py | 3 +-- .../models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py | 4 ++-- keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py | 4 ++-- keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py | 4 ++-- keras_nlp/models/llama/llama_backbone.py | 3 +-- keras_nlp/models/mistral/mistral_backbone.py | 3 +-- keras_nlp/models/mistral/mistral_preprocessor.py | 4 ++-- keras_nlp/models/mistral/mistral_tokenizer.py | 4 ++-- keras_nlp/models/t5/t5_backbone.py | 3 +-- keras_nlp/models/t5/t5_tokenizer.py | 4 ++-- keras_nlp/models/whisper/whisper_audio_feature_extractor.py | 4 ++-- keras_nlp/models/whisper/whisper_backbone.py | 3 +-- keras_nlp/models/whisper/whisper_preprocessor.py | 3 +-- keras_nlp/models/whisper/whisper_tokenizer.py | 4 ++-- keras_nlp/models/xlnet/xlnet_backbone.py | 3 +-- 18 files changed, 27 insertions(+), 36 deletions(-) diff --git a/keras_nlp/models/electra/electra_backbone.py b/keras_nlp/models/electra/electra_backbone.py index 66d1db8ccc..f5f547bb77 100644 --- a/keras_nlp/models/electra/electra_backbone.py +++ b/keras_nlp/models/electra/electra_backbone.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.modeling.position_embedding import PositionEmbedding from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding @@ -25,7 +24,7 @@ def electra_kernel_initializer(stddev=0.02): return keras.initializers.TruncatedNormal(stddev=stddev) -@keras_nlp_export("keras_nlp.models.ElectraBackbone") +@keras.saving.register_keras_serializable(package="keras_nlp") class ElectraBackbone(Backbone): """A Electra encoder network. diff --git a/keras_nlp/models/electra/electra_tokenizer.py b/keras_nlp/models/electra/electra_tokenizer.py index acd665c2a3..4fb7829424 100644 --- a/keras_nlp/models/electra/electra_tokenizer.py +++ b/keras_nlp/models/electra/electra_tokenizer.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.tokenizers import WordPieceTokenizer -@keras_nlp_export("keras_nlp.models.ElectraTokenizer") +@keras.saving.register_keras_serializable(package="keras_nlp") class ElectraTokenizer(WordPieceTokenizer): """A ELECTRA tokenizer using WordPiece subword segmentation. diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py index 6804331aed..5f86766433 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_backbone.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.models.backbone import Backbone @@ -24,7 +23,7 @@ def _gpt_neo_x_kernel_initializer(stddev=0.02): return keras.initializers.RandomNormal(stddev=stddev) -@keras_nlp_export("keras_nlp.models.GPTNeoXBackbone") +@keras.saving.register_keras_serializable(package="keras_nlp") class GPTNeoXBackbone(Backbone): """GPT-NeoX core network with hyperparameters. diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm.py index 0f813470aa..a11331176f 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.generative_task import GenerativeTask @@ -23,7 +22,7 @@ from keras_nlp.utils.python_utils import classproperty -@keras_nlp_export("keras_nlp.models.GPTNeoXCausalLM") +@keras.saving.register_keras_serializable(package="keras_nlp") class GPTNeoXCausalLM(GenerativeTask): """An end-to-end GPTNeoX model for causal language modeling. diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py index 92ff9bbb03..665622540e 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_causal_lm_preprocessor.py @@ -15,7 +15,7 @@ import tensorflow as tf from absl import logging -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.models.gpt_neo_x.gpt_neo_x_preprocessor import ( GPTNeoXPreprocessor, @@ -26,7 +26,7 @@ from keras_nlp.utils.keras_utils import pack_x_y_sample_weight -@keras_nlp_export("keras_nlp.models.GPTNeoXCausalLMPreprocessor") +@keras.saving.register_keras_serializable(package="keras_nlp") class GPTNeoXCausalLMPreprocessor(GPTNeoXPreprocessor): """GPT-NeoX Causal LM preprocessor. diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py index 1db4fe4c9b..8f0d5731aa 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_preprocessor.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker from keras_nlp.models.gpt_neo_x.gpt_neo_x_tokenizer import GPTNeoXTokenizer from keras_nlp.models.preprocessor import Preprocessor @@ -23,7 +23,7 @@ from keras_nlp.utils.python_utils import classproperty -@keras_nlp_export("keras_nlp.models.GPTNeoXPreprocessor") +@keras.saving.register_keras_serializable(package="keras_nlp") class GPTNeoXPreprocessor(Preprocessor): """GPTNeoX preprocessing layer which tokenizes and packs inputs. diff --git a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py index d109c5849d..cc63e99af6 100644 --- a/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py +++ b/keras_nlp/models/gpt_neo_x/gpt_neo_x_tokenizer.py @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer -@keras_nlp_export("keras_nlp.models.GPTNeoXTokenizer") +@keras.saving.register_keras_serializable(package="keras_nlp") class GPTNeoXTokenizer(BytePairTokenizer): """A GPTNeoX tokenizer using Byte-Pair Encoding subword segmentation. diff --git a/keras_nlp/models/llama/llama_backbone.py b/keras_nlp/models/llama/llama_backbone.py index 63438544cc..8e501d9ee8 100644 --- a/keras_nlp/models/llama/llama_backbone.py +++ b/keras_nlp/models/llama/llama_backbone.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding @@ -24,7 +23,7 @@ def _llama_kernel_initializer(stddev=0.02): return keras.initializers.RandomNormal(stddev=stddev) -@keras_nlp_export("keras_nlp.models.LlamaBackbone") +@keras.saving.register_keras_serializable(package="keras_nlp") class LlamaBackbone(Backbone): """ LLaMA core network with hyperparameters. diff --git a/keras_nlp/models/mistral/mistral_backbone.py b/keras_nlp/models/mistral/mistral_backbone.py index 42cec8b218..fbdebd74b4 100644 --- a/keras_nlp/models/mistral/mistral_backbone.py +++ b/keras_nlp/models/mistral/mistral_backbone.py @@ -11,7 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding @@ -28,7 +27,7 @@ def _mistral_kernel_initializer(stddev=0.02): return keras.initializers.RandomNormal(stddev=stddev) -@keras_nlp_export("keras_nlp.models.MistralBackbone") +@keras.saving.register_keras_serializable(package="keras_nlp") class MistralBackbone(Backbone): """ The Mistral Transformer core architecture with hyperparameters. diff --git a/keras_nlp/models/mistral/mistral_preprocessor.py b/keras_nlp/models/mistral/mistral_preprocessor.py index d5d838303e..e6d54c793a 100644 --- a/keras_nlp/models/mistral/mistral_preprocessor.py +++ b/keras_nlp/models/mistral/mistral_preprocessor.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker from keras_nlp.models.mistral.mistral_tokenizer import MistralTokenizer from keras_nlp.models.preprocessor import Preprocessor @@ -23,7 +23,7 @@ from keras_nlp.utils.python_utils import classproperty -@keras_nlp_export("keras_nlp.models.MistralPreprocessor") +@keras.saving.register_keras_serializable(package="keras_nlp") class MistralPreprocessor(Preprocessor): """A Mistral preprocessing layer which tokenizes and packs inputs. diff --git a/keras_nlp/models/mistral/mistral_tokenizer.py b/keras_nlp/models/mistral/mistral_tokenizer.py index 12636f69f1..2031d907cc 100644 --- a/keras_nlp/models/mistral/mistral_tokenizer.py +++ b/keras_nlp/models/mistral/mistral_tokenizer.py @@ -11,11 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer -@keras_nlp_export("keras_nlp.models.MistralTokenizer") +@keras.saving.register_keras_serializable(package="keras_nlp") class MistralTokenizer(SentencePieceTokenizer): """Mistral tokenizer layer based on SentencePiece. diff --git a/keras_nlp/models/t5/t5_backbone.py b/keras_nlp/models/t5/t5_backbone.py index 6e76094d71..2df5bd00bb 100644 --- a/keras_nlp/models/t5/t5_backbone.py +++ b/keras_nlp/models/t5/t5_backbone.py @@ -13,7 +13,6 @@ # limitations under the License. import copy -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.modeling.reversible_embedding import ReversibleEmbedding from keras_nlp.models.backbone import Backbone @@ -23,7 +22,7 @@ from keras_nlp.utils.python_utils import classproperty -@keras_nlp_export("keras_nlp.models.T5Backbone") +@keras.saving.register_keras_serializable(package="keras_nlp") class T5Backbone(Backbone): """T5 encoder-decoder backbone model. diff --git a/keras_nlp/models/t5/t5_tokenizer.py b/keras_nlp/models/t5/t5_tokenizer.py index b5dee49b85..5feb2d9ab8 100644 --- a/keras_nlp/models/t5/t5_tokenizer.py +++ b/keras_nlp/models/t5/t5_tokenizer.py @@ -13,13 +13,13 @@ # limitations under the License. import copy -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.models.t5.t5_presets import backbone_presets from keras_nlp.tokenizers.sentence_piece_tokenizer import SentencePieceTokenizer from keras_nlp.utils.python_utils import classproperty -@keras_nlp_export("keras_nlp.models.T5Tokenizer") +@keras.saving.register_keras_serializable(package="keras_nlp") class T5Tokenizer(SentencePieceTokenizer): """T5 tokenizer layer based on SentencePiece. diff --git a/keras_nlp/models/whisper/whisper_audio_feature_extractor.py b/keras_nlp/models/whisper/whisper_audio_feature_extractor.py index e41519bbc9..5fade1d63b 100644 --- a/keras_nlp/models/whisper/whisper_audio_feature_extractor.py +++ b/keras_nlp/models/whisper/whisper_audio_feature_extractor.py @@ -17,7 +17,7 @@ import numpy as np import tensorflow as tf -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.layers.preprocessing.preprocessing_layer import ( PreprocessingLayer, ) @@ -26,7 +26,7 @@ from keras_nlp.utils.python_utils import format_docstring -@keras_nlp_export("keras_nlp.models.WhisperAudioFeatureExtractor") +@keras.saving.register_keras_serializable(package="keras_nlp") class WhisperAudioFeatureExtractor(PreprocessingLayer): """ Whisper audio feature extractor layer. diff --git a/keras_nlp/models/whisper/whisper_backbone.py b/keras_nlp/models/whisper/whisper_backbone.py index 32cfab215b..c66fcc8089 100644 --- a/keras_nlp/models/whisper/whisper_backbone.py +++ b/keras_nlp/models/whisper/whisper_backbone.py @@ -14,7 +14,6 @@ import copy -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.backend import ops from keras_nlp.layers.modeling.position_embedding import PositionEmbedding @@ -38,7 +37,7 @@ def call(self, x): return ops.pad(x, [[0, 0], [1, 1], [0, 0]]) -@keras_nlp_export("keras_nlp.models.WhisperBackbone") +@keras.saving.register_keras_serializable(package="keras_nlp") class WhisperBackbone(Backbone): """A Whisper encoder-decoder network for speech. diff --git a/keras_nlp/models/whisper/whisper_preprocessor.py b/keras_nlp/models/whisper/whisper_preprocessor.py index abcff0d770..dbac2e11fc 100644 --- a/keras_nlp/models/whisper/whisper_preprocessor.py +++ b/keras_nlp/models/whisper/whisper_preprocessor.py @@ -16,7 +16,6 @@ from absl import logging -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.layers.preprocessing.start_end_packer import StartEndPacker from keras_nlp.models.preprocessor import Preprocessor @@ -32,7 +31,7 @@ from keras_nlp.utils.python_utils import classproperty -@keras_nlp_export("keras_nlp.models.WhisperPreprocessor") +@keras.saving.register_keras_serializable(package="keras_nlp") class WhisperPreprocessor(Preprocessor): """A Whisper preprocessing layer which handles audio and text input. diff --git a/keras_nlp/models/whisper/whisper_tokenizer.py b/keras_nlp/models/whisper/whisper_tokenizer.py index 7b68dfd790..4446193738 100644 --- a/keras_nlp/models/whisper/whisper_tokenizer.py +++ b/keras_nlp/models/whisper/whisper_tokenizer.py @@ -15,7 +15,7 @@ import copy import json -from keras_nlp.api_export import keras_nlp_export +from keras_nlp.backend import keras from keras_nlp.models.whisper.whisper_presets import backbone_presets from keras_nlp.tokenizers.byte_pair_tokenizer import BytePairTokenizer from keras_nlp.utils.python_utils import classproperty @@ -28,7 +28,7 @@ def _load_dict(dict_or_path): return dict_or_path -@keras_nlp_export("keras_nlp.models.WhisperTokenizer") +@keras.saving.register_keras_serializable(package="keras_nlp") class WhisperTokenizer(BytePairTokenizer): """Whisper text tokenizer using Byte-Pair Encoding subword segmentation. diff --git a/keras_nlp/models/xlnet/xlnet_backbone.py b/keras_nlp/models/xlnet/xlnet_backbone.py index 1d1b4d2343..fb196233c9 100644 --- a/keras_nlp/models/xlnet/xlnet_backbone.py +++ b/keras_nlp/models/xlnet/xlnet_backbone.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from keras_nlp.api_export import keras_nlp_export from keras_nlp.backend import keras from keras_nlp.models.backbone import Backbone from keras_nlp.models.xlnet.xlnet_content_and_query_embedding import ( @@ -23,7 +22,7 @@ from keras_nlp.models.xlnet.xlnet_encoder import XLNetSegmentMatrixLayer -@keras_nlp_export("keras_nlp.models.XLNetBackbone") +@keras.saving.register_keras_serializable(package="keras_nlp") class XLNetBackbone(Backbone): """XLNet encoder network. From e459281b15691a9e94789cbf8d41b78d7219a71b Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Fri, 5 Jan 2024 12:48:30 -0800 Subject: [PATCH 86/87] Version bump for dev release (#1391) --- keras_nlp/version_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/version_utils.py b/keras_nlp/version_utils.py index 55ea7348f6..b0643a6d4c 100644 --- a/keras_nlp/version_utils.py +++ b/keras_nlp/version_utils.py @@ -15,7 +15,7 @@ from keras_nlp.api_export import keras_nlp_export # Unique source of truth for the version number. -__version__ = "0.7.0.dev4" +__version__ = "0.7.0.dev5" @keras_nlp_export("keras_nlp.version") From ce93ca8bd6a84121f72ab0ea6bfc63083222f9ad Mon Sep 17 00:00:00 2001 From: Matt Watson <1389937+mattdangerw@users.noreply.github.com> Date: Fri, 5 Jan 2024 14:09:16 -0800 Subject: [PATCH 87/87] Version bump for final release (#1392) --- keras_nlp/version_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keras_nlp/version_utils.py b/keras_nlp/version_utils.py index b0643a6d4c..15fede3a08 100644 --- a/keras_nlp/version_utils.py +++ b/keras_nlp/version_utils.py @@ -15,7 +15,7 @@ from keras_nlp.api_export import keras_nlp_export # Unique source of truth for the version number. -__version__ = "0.7.0.dev5" +__version__ = "0.7.0" @keras_nlp_export("keras_nlp.version")

Q?fm6Bv%_ul9`bhb4je+X2H~)>Ag)Xp!XQ2RSI^V(;wx;w z3-{nvxDWT^0UX8a@dmsJZ^7Gevwv1ze24IYv6pv}@4>b)p!bn2pWPQeKt7D~+CFXj zttl2>7CtQeQLKMDD}0>1ayEoBZAI<9Gs@^7*Q~FHfeBSsyZ$ z6`r*1X*`4UR|`WgIe=ko>+=~WqxJtM$t$n+Es<$_1ud`MpRutr{(${SPRgWmQ!eTbiC2fMbY92lyV?=gH_e?W!&zsCPdo$$|&zjqx7?~=y1@Ex?gTKj|i z9)5ryVfrHfxenvp_MIa$|G!PDuTy-}$Wis>*pB)K31s~L#ZTHkMfP3z?fQG|>~v(- z_0=aZte%g?+JE94KgG}SOFV~P;~MsjJO7E_&OfM8zpLZt>(%e-|8%YT9i!43)0Z%= zt&q@Wm~f6HCL8rn;D9u4!9m=HZR0rWU(OA;)31EFFhuqG9rShmv)kXlJ2O_*qx}40 zVNU42Xib-D?HAi$fxB=ICQqwl8xDk5(dX2)v!3sL^!u?;;MpLhZja{gN9~VKb^azi zVB72Q24vbFp3R%+gPY#XI{$*`{{T<1r&q4G&Egx{3ZCuLb-Ceh-0xF))_&Z|4Nsbz z^M&#}YeN1abSguiSlZ^dE;Nd4sUU; zx1p`zxp2PxxzLjP>(ILMtMKThUxdFlw)PI&-igQ7Zw;R+I2b-z_RDZFOM3Bx;WN!I z4)3zChXWIwRT$CNo;;Cx=1iSKc)=TB*+e81w z+e05lI$mWw?_Og-w}qk3+d@z5PSo~jw(T!Jy$HYC3WY#P7#orV9lwbX2%9IM%LJpNEYm&;Gl=ct# zgnggH(|86q%a0jj&BD_s+5a`{|2qBaxom>M>tk}H*nVHZ zY8FTKZ54g6$G4YOx zue!e_`z;;{haC3`v_FF8UlfTYf0KhyFe+th+Y#P|CHzzW&@JAIO%lKeXBRQJnvf_StXthsWtp z;7L4<7{1TH*^2;K-<3l;xzw96P63xvSB!}Env{vYFhxKo)YJdM)c;AF}5T^U(f86Zxf@-uSXsCdb|N| z!dvh*yaUtf^cl=z4w?0D#?*0Gz^Hm^Og%MDCic|-WKwu?RsF9ni~I-QDV=xWJ$N5( z_z!-7exv?;nEqibs86G{byMVg>2KHni|aQ%?mMiJ|6>QjNA34<#KaGrW>?oKH`!L_ zjYSR%j|gYx*^_7NxBUt8(8)GDipTIcp1_lM8qc5?1Gw^jJ`m)(ew@Uzd!tWc7U@WL zpGka$KG*NrbRP&`qkjVn>ie{~6y`mjzM0?pKNPx7WY*=Y@cheMr+fUCV_xWA{2k%% z;(PcJrnMz!kXc_wzP{PMR4y%4DSwJR{}R_oMr$ff(33@;e}U)kn{GSnx%_xv_=&W7 z_O~2cy_o(kH!-ACFKu9 zzIF?UeC<+*^6DIJvF#vkL+=Ic|7PuedXI1Q4)PFQfsu>e8R8g1#*U9?hsS)EciDaq zUWL|)if|u!KOVqQydIGcZDxE=`Sb?i^k{Z?6M6o%%BSz&8s0{K2i}Qy;XT;?iv!_( zN_Ue@F*Tb`e-5h9^(1g8^t-Uo=-TVu1Dkvv z8UK@$2mibO$%%vE9{Z(V%KuROK&9t~mRIV_5kFk!xm9?M$2>>$mFs`OK=t+ghNK;pK8*W(R%6MlF8$?w_sm-K)57RG!F<7DEb_D7X(v&Q%zIVC*n zna$}JnI>nB|F+-8vze!-8a%fw&j_QXo+BpdQ+b|azUPR6PSnBh5k8y2*$)A`ri z*z@YoN5%CNp3Qv+#_FI4NOTzk7d;tC=n*Z;+m-f0b|8erlkM+|G?GKamS!~}b zOnxQO-);WC{}p_V{tdMJ`TmeNwJ&^&o+Kyn9r}0iJfrvQ{)yCY{R?vS?U%CuF;c?szArnx!hUz*9=rR3{{7&yJHU5yL@1H(+%Ja`Z5FVpHj*&{wzt;0dv{u6t;u0zSNaWL)RIWcm z_9Ci-X65s_8g_>8Fru-ag$?Dzao@on|CxH3+q1+Fho{!D5UjIqzP12XnUuKKSqgrC^&r}!n(FMTjPN49BOVNiu8Qz;&*fd>&fpv-SNy*{SrOXkb65SpeWxNkN`DLs zg-4aYM?=a!^LSib&uT?@f_xIa?G@o^@)^V&Hz1wCR7L0&9>8$bQRVN^Fl4_t`h@$< zi9N}+GLBJ^XxyByrC(7yLxHY&pKRz(=eXWw77KF5-={YCb&G1zE6-s`1ven|Uy zet(!&AJ3Giql@)_ZR-EJs%}Q)8?=B?Wy)Bi{C`aT$Ao;HtWZCdv44-SQ|l_i8>IIp z^fpz5w~%Lik3H5*dK*1{QJ%-hx;lA_u08Y)@tOa4#M#gPXlzUP%CFS%U)~?yMSl<8 zhY#Rkd>9|a$8iSd5X~!UBm0yo{qAAF_22yeNr`g_M?Nx-+UGGmjwkRWo<_^){_qSr zz3l#1-M{Ob!+g8@?{fbg?*D@Om)4kj9Y-R^{VT_kJMJG-SpV2V;mT{+{x3f2|KZki zP?kmOfA>l|T0?Ar9LFTmf3`o&lG}fGG;Dvg&r^pDAhSMjT;0>G4r1R%W8tG6 z?8`3pC7G~oLVXs^dyD45O$kqnpXpQw>ZkvM_5Z~U8GnegEi?9HE}PRie~AIM<=|3I zc#a-3&!88-reDLpS!FnS#3i&tZlNEdfTv1K|$(T)A_rPt)%W;gE1<{ap2F zs*wGCSp9p!+y!NJGtTO}dxd>6^Z#036z-xAUez|)VRyZlAJ8YX$yc>ambFpHq4gJq zd+d|g)z?qnhsjoBwd4axdM=rD5$m$TQQ_AkeOkRBJl`Pyk$vp){u0mEbq>1Ds7)0= zcD;=?(r_TW!7*>bTktl#1MkGU@E%O7PiCY!i^$h>o{Z8>7dcOX`X?{yf6@Nj(Ed~( zO^`|9Npi}5J?#AVN#_Gdzfu0SE?w^>?Z4HW@UXbSBl5=OobX}#M=>Hid{`Y!4wdqq z5qAdX(1sWKCp;?LqMUe~%=q_x^VaYL{YgBHXV7QtufJ3O`(?JhGA8q1?n}yijOf$u zwa);CF^);hB8{)$`v1p&>;K5#cV7D@g>Fk>muijt(qWS$_)W`qU7lqc(-Wm>w-+tqthFhZTuiO_7l2<-- zN4Sl=9d}^6Po4CveyuNB5AxPL-|@b1NPJBGeg%0Kdd1yCz6#MChym9gulC+5m2JY& zKc4zdv)dZjpX~XY|G!JJPFZrjGr6njOh5oG)NE z7O^AA1>^an`T@pP*fVST2gnKGWIa0)Q;tjQ9t>5|sYVS>VY19V?8(#gxqS9FTfCND zhXuB2S~z9ge4fnM-(xSZzuC_9_RHAc-R_GGK3H`yG>U8CKQSykvco4`MaF{B-(94SpitfXaz1SO-rMuTkTJ87tUvMo^26BG0)4Cn`cZ4y z>hBN9tPg5q*M=@>b|Z#<#4(1*Pw$yR=iSQ-U&OiMyTV`QJP?w$&7iGcd$~-ztLTBy zTJdkfqZf`E|GO`wY`cWV#$FRXReFE;WW{~qV#$5s@#6c#XO>?aTK-(wq5gTI;E%%R zJnzre82_s>{^$Gp!i9rj(Xp4Ygk?O{9Ks4Yt!|o0$z$X{JAT%X7si_M!uSck7MM_H zMLx=tkQVKlVabvxhunU))~yt3k}Y3PI+gjBx9eg3a{ZY-*sC5 zf1>MFV=Tse#cg5>ld)U1e{T&t^f~{;o89s6jl0eN`!nnJ6}r#-$iLy%FppjP^sL?* zu9AD`H9nd(AI|0==C}dr45n@kxx#rEu2QbCnT9%V4RPd)D?lNNa2Ulng6;XNP(rSM z;$SExue>!Yl#%7Az%e8|hvVc4oJ1uiJ`Un3%H1Obf6Pm=reYo=^seyAJ|goIwp=``pAay z{eJd6#!s;CF;UIF$0U6U(~bNCj=PRyuA|0z>8X?a1IUGP{(nX0f;vwO{fMKtn*FUF z9-|MQR5v3@pTUT>%`oB^!VCXDoU;8AT0XD-Wp^#o+k)}huV#hI^d&4~1*=#?Uyb^| zgME&{nEqL9^Z1H#O51z{>-O2eCbqDHU0lT;rmNNenAHz5x2XPGQ~!}^M04Follq^e zIo_=P$3)}xzUj$Y^*^HZx3W^cSESFrJmiv-Rlc9HeIbuN*C=n+?+f|#0xbBp(zvmH zWudre{rw{HFnaU%g<|puVvZY-PSmb05iZ5>Rdo-B>=#G5xC*5A8`BPl!ZG@BoWMz( zRhKurw{z~Ng&p5E>Un-PJ5<_Mg=*B`6i(yH2ls_qa$Q+mM{fVgzEDp#pb<@IMk_90 z{TJ%r*Qk5h%U51@AY2sJjt+F<=J-Kj2wlS6Xqhhx+n={SkFmLE{VmOoaQ^R#LfZ#l z9{O#+vHn(^KDXrgVT_){0zHis<}vR5Cr)ceNA0vK?dB%!<}UvUA|oG?*^7Lvw1YGM z2X5tuw5SZy9-G%bPKlq9W(t?Eh|5^QGFFiJuXoBgQX+pY1m8 zk6htAq+{l?_Zz!6)}BI{V`9j+t!2OdoalWcTHmZdT;E~i^Z0}R6J!~m$0)`yjszxh zjD6?wKQOjGRmT4ykN-h|>nL&^{6w!Mx+-W@O z&TyDMXrBQT(~n@pal`BWA+YOT0w@ueC^-~L$udNK59MS9B0q>(_c(`R!pD(r&@OM% zPVaUd=v#JwtL|@|{{e<7`5#~yBRJuhlc+=$s!@YeIF0Ew`Ct8(@h`h5-A?U)-00sO zE%kgbjszxl<^MhHe{#xkJ>GS#bm~x#2J|j_|Eoozkv?eLKN|aQqBkS+KTY3!e8c-k zbFF%{T6v@WbIusl1^Zk?J30`pztu^e|0{X-XZu4Jy&Ey~qpwx_zvH@3-(XC=m@?M4 zqW!@qTu^R|Cbj>iIc|J0fr(1}kC?<1 z*57XYedx~X{xx@7f8gEOVM)5{FAiau-2QZSSRq%j7R6%&+i$)zL`3UvZ3=H;2Ul@p z{jELvjrF%$UMcVXQ2o0i|KrC0H>de_%<#LI)y|v4yl0eW!}TR8rfp-}i%o8Q%|v=y41;`hIXWCO^gHCo*fncm8edfPdT{a_G5eb&b2o zqvxXlg($*d^wq1MFn~b}DWjrwb2IDcl9~1WH`xDM$}w_cPq{_jSl@qojs0J*{%!c@ z?f=B;fl%x`M^J)NOgcUqKQE)tWhn#gQ%iggkAZL_a(1S#`j@@|C){R$LvD-L74JkL>b#;RZy$Hw(zv>15xs z=j;A5ehSL_p*qjEOa0&BT=H|v&$2?J>Foc8u?esPSrt>P$8oyD|gID^dd_YUSg_nXh3J}R&O_wf(U-}jL8J!INnj$41f z_Qx;S|IzwCzw(_+zXHc_0w+<4DpVtF?7xQG{-E~9-RkfDec_aF+xz(iSV!A` zLoGd;(>PGMFVxZN5v?gVyrYdt4jps|`?Y|37-1?LVAkC$rg)JEj4RXhJhu zaRC=G9XGyj-&x1Zk@LokQ%!7ia)Et6+Ry$MKW=-1oUnhiwtvR|pN#(h*Djq7bfOEB z752$F7`o|m?D^R`{_ymEEL0kQuVw$AaxOB;yV3vuGUGt>-bUANzcIv&uMZq?UuDMM ztzDlKH(cVr3XH##@nYleUG6_-zqtL#A=eq#Z!o-Oza{@2&}VS|z2327Q@DghT*eZn z8*RhvlKi*m{jYidWM=$F-W!$P*hTl>>RwNH|JB|`Eull%fjC0xnEuWO9 zf1!S{-b1-<6*z|DIDwO>L=~!0gHt$-TGXK)4QNCY`uGO*V?ZC_Acp913@aa^`AMVd z@G)fmFWRJjFYum=)bHd}o;eBjyE%VwOPRqY%JdIzY9FB4Ia+Z6i3 z=Kpn*G4vyjF(fgA6fR*Am$8Ix{{u+N0xrr_8pm$UL zU*8vY>4OdOKd#dEaI^iU|8Gcn*LOtygMo_Q_CM=SkH+?g8`%lya7@(4cB6kF>nqxH zzK0w#7t@#3?}+N?In2{jNUJj!Fq+G*!FZwe0Ve24L~Hy{A(1=~@}$#dU?=j=%cn>G z#*4~O z|935-)8gygKRMEMAXGT!7>?rvPNEW3sK)fJ``1>PCI4Xk;O75{|D$?;%JtVs zjqwqw$Ix;~B-?OTFzqasC61*T2ED(Kq-Pe80p+zK@Nz zNB{ndd^_9T_y|8g^9O_@-_DHxK>dNxWS{Nt>z^T8aRC?6jt+F9<%_;ya^aPYp_{z& ze(fW2{UhGrvig&rssGP5g*bhZeIJc+jnSjAr&)DWl0Jh4dG%)hxiP5z9N(aP6^-pj zWBieCakMV>h&pSyMm~FgQ%E`P5*Be8OIXGVW_IN9O>KWN`bR*<&s})or1LmeH1;-n z#MoQb^)a~QvNEDj-yu1PHB60Q#k*zCS#l0YT=O({o8fRemjWD_+9cU=ETpQR5yr=d@mM`D9_8aS8Ut< z{(+G7@3mQwi#+6G`_<;a-g_pj8|yC+j@CvijO0f`C?eOjH+wE;g~Rk>w0@{D93guz z@jVk?LLVdt7PCSry$sPBAj4zIfBOunv-;MWyn{1gAnQyR#1P^b#t1fCU%BHda16)s zJM;fa&V-ruGhr5Um`4g}EMWB5nJ{+pOc<{|6B1Qt!bI7bkSsoRB-0^O(`ztopP9l(!tCKk%>R4j zdilPmjkv3PSKg1-KN7|;j>Pdt!o)G}=;R||vf`02RVL1Los!OJ)S?cP9j>wIOsJ>N zCEXhu=#5yo;NDu@W4n8eoe53idWz13X0jE%%V)v`@*-m8p6ib1igw`+3}5nGmpo_B zCcfhNXhV+a+ZyMqm%s!)xn%y{j`eLY&8D8gEasZjU#Hbyb?%Q`@U3=AvkTp5@UCKH zKjIiepSDIn20V+w4gVMPY~tiFMpm55J}>kyo)J#r64GziZ+uo=_OpHAvhWg?v4T~k z&B46#kuzb9zCPU)*2xWQVhcOi#Z~Mf>#M%$FP{lHWG?a$&Hv3O3s8t6+?fA+m_8?; z&8qLyuT;hh-<Re{@~?h{QsLGg>bs^#enUfJ@-No^ zQCr2fD}Qlks35n0l^u?eZPwX6Z@%S~Pn-$Ih1bt!g%jjSRAT$be;lgFM15AMCTnmC zrxC5Kl03x@q|eFY(b`~j^m;55%J;UX$oa!rp+Q_Dn$V0^T);(afAx{jPOksvOz0r5 z{NfQ0O*z$|z2RHv6xW4r^jwv0cyWm4{PqjS(c7s^>dOgZ^vL&jU`D=gm+$-8mgKPQ zH|u}(%OHl1s2eKP5moBHlj=>!M&o8lGKQXY{$-u}LwDi+@Wmbdl8%`{R7cM(s-xD_ zRb(0qJLpG@Sb{|nUZMe2WYD%ae%?hl3pTRPMKW_`iQZvFMnlR|P#zrlun z1AYCMgcp&fM{5Ah@9M{=|IO8pg{L<@W-*Wu4tc^o1vw#~Z2K$rqqb@xy|>u-PgzbVq7T;ULphZb4%3SmOvtl2?BAe}@Y47>;AR??5;~o9?jP`@7nHH8_RSs6`#pztZ1u!Z+v{&$+HyG9^4uF1WUIlW$)dqtZ-BbDT^%$Aq}4 zMspYR|Macw5B+QVm5=+wV6k#Rc@)hx$oLm3AL^Z_0gY(F3;i3Kg{Mo6Ut(4}Bl?%( zJli0Z)NVbdQz|XO%=gE|C z+FAh%Nc0~JYtFNd4Qyg^SsJSc!xlaIXW;DK!LUP5->c4Zd^$?QKJ)g8?9QGo_J3rD zR%*X>91Oemy^2_u>z;Alt?YQ~s-0(J3^%)O4A~a9@1Fe%dy~!3|aqeB$K<|m&5(>o^;V_EPyJY=D9H9@^83RTMy%ZxA)}TbZ>Q;TJ z{y&lH`C_2Rbr!g;!>$X%7(tn1%29!1IF1uIiAqds!_BOyW7zX^O_c(FHvvr~7VsE13`!^QGfh5SVhW2C^E1J!JRR@<9x z$M5IA25B^+3C(E51zbcsrcbl)>$DNbxu)xL{!+qe;e|ErzZe@Ju77|`XfI9BlU?eW z4*df);&S(e4(W8F3*DH^lSdJwXV%wcC->9iSa7{*`8kDXecdr}Nz5RHb>ohg$mrj{ zi)3U|L~A}q{Q{94eOcTRmJ#_MMC(GXf0qCMYr+3NT6m;W$}Iow?Tb;UE57c=)i=KhhmsQu$S$x8iym_n^Idfa0jS&v?Asnec$ z13g9`C~#f%YQ;SsL;f|dKYB*|mCt)l-@irQ!7gSa8+}9m z+*2oQ$)DQh3%F|A96!J3Y>Rw(GUGn^%C5uL*DZ{Zh0a@q-Xr_MVe$wDyY)}#`-}Ip z{b%$~bm*Tzi!p@s8;ipE_Zt8InR&eWuuAM7l>?>Z_EXuRj4Ve5j^Q{?;3O(hg=*B` z6i%ZS+Y?dyy(p}I!uKH@{R60;Y(OKLkkB4)CR=d<7ZLe(CU^Oo(dW8-o2%-BAHO_w z3NOUzG3CIRZmIxUv!d+l| zQchnozK&VUVIC=@v4GJUssb8zv-HgHuk^37htr| zePCQ%0u%O0Vv;_Ep0(?17-Z^+q7bf1Gg`wS=Kl8RgSHPK8Re17QTxHQAzHg2dRNif zSyA6p`qr$FYx^WUkIY9@&P07qbH(Nrz0v#PKbEF1lz8{0z5x{2uMm;%MVhT&MBnb? z`#=_B;qFp4{+;0nef_nELJ3)lGL)kN$I!FH4~>jzTb>|KqE}ocS%v7|I?=y?;@ZVS z7?Gc%f9&*YgZA-37|gOg*LHHK&~|O=VS394b3(P_Yj6suajwfZkF%Ijrc5KVP8gZO zJQk3~*kNt(6P{7AHaL<;_~471D$oW;pKI)2c5V#jDOU^F$T@7|EH*M~ov#k{Xhihy zz$UU8t(dm|Op)h&#B;{)|KB0A{(2Svs~Wym^EOn6o?dOzZ5(FZWO=p1XteTCR}^iR8Cc6(gi zJ4CLCdtv|4s_+`tv4PC`-(OWP(6_LIU0lT;GX8z+`~eJNNZTQvqmI{p7$LL1pzCE6cL`o|Xak7@7C({HZ-)usJ+f`4EY|G-MVTI&9r>k}7ie`p6J)Uk)9 zQ;Z`hLFB`qY(Ef6>2qY{!(T=($HJbz4Wuw{zY1|ZP5cnZegKdX(_yr+G0DTFF%L~EX%|A;;(dXDG! zKb-&A&Q`+zYW@FhW6m|wI)&4S=Ec;Kb*RUcAHFCwkWv3vV-z+X)f9z&v(04W|JF)g zz!Uu7E|NF=-`eRn{NFn0o%ntKw=Qwrh@odsUX1iD^Z(eYarzjN7*G~OWyBDoG9oG) zM({%af*Jdza0%(R-x@BHZC_?zzmiW6eHklQ#TwQTjXm_s=K~nTkh~vP4rJC}E|2Qp zys%-vO>AKYySR!yOqa<2^4rbvU->Gt{)=}R`FUj4U#^t@<^9AF`CppJV)-9a1#Ejh zhgtv8bLC5zL*^oygONw(qveAy2?bG{|AZBiML3LYZGmF)=6doa#(uuZhIqI3wD_p~ zQ4;NEte}+4{QrROKsmhv$8a1yY|lmaQAR(3ljzMeevC?bWRph!rmvz`W8@Uuw26(1 zp{#?UMqFgWpCV6Vl3q*JAxWP#2Y(Lr!VO5%7jR>pz`h#xZ=G|TX8%TMl5yc-a%AQu zq0uo-Xhth8;3C@5foXHhXUsvEt#Y1P>GC~DnTwF-d$4ed|G{PR)-k@SuK^SEBqr?_ z{r|618qqxVNp#WAY71u8=Ql2tD3D%}^rVx5VwATaK``V>%JX& zuecb-=z}NS83?!c{eyX=kj4T=mHlHe-(R=y4-@oc-1pb+`$OVtURagR8rHFa$sF~hx^a^}cS^sJ zdU=b!gN0h{U2&-f_AhqD_3**JO75Ze7=Pk#Dw`0iRIi?tZ`HfG!gcn{r||%zfPL< zXh0*H(2NVXh<0rM*xY}<0Uh+7E_REtp^P79r83|tzFYO$1j&7&)Ar2zKklvVjh^Q> z_lKMQKkj=78T&V;o?`0{l95k)yjvYlA6YvPTBXs2Zp6@!IL44fX8qp=_J0%mnw+m= zH!iZTm)O_jsC34pJ+7{c{tq;vK8@a_Z|PWwY#kL|1E8hXXAlN*TDyD#ZS>*{U_Z(%s!eIYZ>;XHl)?0*cn zzQHl^OYXl^{=*2)Ue?w`G(UL9@#&9glVX>C75_{B+e-Nly)YbbDRPs4$bZ(9L+ca0H~MTpiA z7+6-X;jnNqhJAO@x)z!Lj9$tLN5qw&6lEw!1&-l3PT(XeQH5&M;1tpyvi@#((Ab7? z>OVJD_Q@C7;9ErWO>XN0YyETEU#d-E|0{p@FGJf~E5dnWY<0rhUovkblCRqr8puX8 zp&6~XfR+az3>V3Ebf6Pgj7N5n(fD_w(!ay#G4vyj$;PM;NZX$t+5EGm!t@#3==VzL zdz_E@A1mblX8FHU{%@Bjv4@UfZa7`9FLxtkJjMu8;0s^Mc>*n-;$ES0QYWo7ln* zdMbU3y^XblK;!yTj{=#L(fI#|9`%bc!7Jxr%PTL+H>v>kLTST zKC^mv_-t!Q=qS54Jh6Ch_*}9yd_L>dp|jxC;mMA&@P#9<2~X9%CUkX{g{LcC8~ zwc)h$O|Bd@=5REu&=bo?L!R^GqW}x5M?>27l}f4f$u=$T8zwsh5WCb-oVv?a4CVnjY&iKd!@nG-h6$87KSVZh7d=sR++( zK4>kpig4Jz#W;czj9vPZFly{9Gfvj@Cn33FTnweQmEqjs;_#P8ibFZQ0&R;&!ujTV zLrdY2(27U1O2S`XEeXeLJAuctO2elvye53I{ng=OQE7Pmcv<*NVOeN<)_Ac#*(a*Y z!{<)DHhgyXHKBw4WcP#N3&$(MNyk>Axk-J8r;a@sBA>Co&4)t&-b3104|xU;g`uK{ z*gp@ke;#80JQS)NQ;iy&!fDi^4)vI}-%Nq^4=`73{eueM>Yiu5dzAmf(JCCG77lAW#419n!}@4~1O&<{=*i$k<2P zw9$N;LgDScqoIgA9O-{{H1w5N|F_QA>*?!butWUisJ^@qLuyMW&A-qWm0=(s$QJ7z)9&;q6*cRbWKUuUPGT#x6ZoOQ}ok_)-+64 zyT?lRs;yBgu19;Mj;u%Y@4yDK5i!ROXnRES`kRECF1O^x7xjO%>i_7}|8ar6pgysV8+(QQU!(n@ z+#E*&6G(3IFIwhbgiQZoY+uN%hj;h&e99ORyZzv0=?~7;Urm!!B*cG;?>HJO5fHA-x!%= zTNSENgHt$-TGXMhmVL`k9>5@*b|`6Gx0H3t$dP{bHJf^Dmwmk#&E;obZ?UgC*wbC? z>kHPEW5dt1vd=F@_POuVb4wvjUqEx0`VjTb6}7P&$VN1w8Ew(nl=fmP{Q@qc9UT## zf1;BOPwzrEVwkLP4d|!ORXTT*Yo?E3p_DCuf=z$ydLKkjHQQYL3{tp+-Y)$gSfmfO zv&~Zn!)5vsMt0cm7ufI3Y;|&3+zM8)h8K>ntqZs4huI*{KkFL^`@<%E3p?0FpKqk! zH#0yE7JD}pznlNB+Gh`0%jyB-A`kf}!1QJLAG53Sfc*Ri>t9H7Oj$BcCZs(3maH)d|ul!eJ!Z&D|w$tWSG{UV>80>XVCnNapv{TVxv1T(;hweWA?uazsAo6=cul zec>2+9K$Jf)kSU1n7WHTkkrm5PuO-6H|KkL_m#qF?TMC`Wru2d4Nl=SYEg%JG@vc= zu{XBXNN++jT5$mvu|3LvpN#s-JIH8Wa3|S?Zp6@!Xx>Ad977T_SRYc3kWv5o*;3zV zh5TN`p7|9!{euU?rD&gfeMiyy0p)CjTlxAw>w4au6TYtx^v3^7FWYa(OY~(d zG$@ylTJ$Wj5~U^GRr1PP`NBkDY+w^x*ugHYVh>s0@hxEc$1e%Jr}eGS^AM{uPn+zC z^XnlCF}yCXBUPYO<%x#z4IWl{VVKy499T-y^Y2%Y&%IGly=kyP)V;s^iQ2(#HAgr zwVRoLUuU1w?*F1V*A#EH4}FBJc1#UU;WTPdhk7(%+CDS(ops#J{~yG)5n`^hiT$_d z{ukZ>Hy zxMR;<dz5v{X-1yR5Gj{UBDJA_^GD)zAbTJvZBGx;lZCYc-Q|H%JDT^5bq zMtP(q6oowT`AEOCC=`-yU-rC)ib4_nFp6;mB`C$*BELm`i8Fi>r}-l;Eb&`J>hkqD z{)r;r4#qHwDf>=ppC>VaXicsGM05N5Fw*4PMI1vYbB=OU;24hM1Wuw7eOuP|H#R>| zYWy3Sb@985F&n!cxp*iP*ry8BsKF_mMlI?v-EaIGv)1#U^WXFNdgJOyHyCHHz0>z^ z{Cmmx_oDIdxbg29W7(LbPZ{S=(CeksfJVeT>&YVfphUQR^Ua8LN9%pTtSRJ zkmJ7c_%R5tVOV%Xc&NaAl6{A@!7+eA3?YtTj9}d{8`wnp-NpgP9bCm8rZ33>bWK9EFK4ld1nd;hRjo!2gfFW}kILYyWJJ(b_+oU!$@UH|7au>VMDT#yr7Y$6v)BvR1Uakc&LzQ}+H@_sD5O)%ls6tSAVClE@ zKRr&*tkWZ}RokZqr*InS2X76vWF6`;eOdm;?5g~!KApcReq? z-dtk?WH(~;fkoxolJY>fAH%{U!b8fRIGI`BJF36s-~)4r;S#oA z9l|1c8B173+Yk8{{7O5XzKS)hWBZ#2!bYTz`X0&X-@aSq4t8-Bd&v5(Zwk4{Lp};n zh-m$|)|XiyTv|oKS6;5YaYR{Dm%rmyMJ_wAKYs_V7ggZQ*mxe;htrbzA7zx-C4ue8@Q0q44SC z9id$tu%-Xb&{}+FXv6ujKMw28v4L~de;WR>@=wDjkKYz9#{NZk44<0$7c#&I~2;Gxz%Z%jHk{1zbcsZvKDg zg7!b=JLM~+=?fT@&&HIs<78q}|C@3-Dw`+iQ~G5RHP%>=PA9t1GH+X#eHz&D!gDj; z-xyn--j9U}X(NUCZheK~qWJ@3WD>or#-qs;Vx{hDSHI$(IVr-6$o%)EQGbwfB2M;Q zRsSP1KSzB(RIFYs(MBete{WuP%o3Kdf)}>`R)wcK)c;-T=x+5t=IgaBk;Vc>H`V_b z-&NPEdneY^|4H@#nED^-Z)S%z>8xWDTiC%awts9ajEws5_Q)&u@{jmC-zzff3BUX| z*>fkI8@b~0u#hj!M(u-o*K+LoIa{9nO?JL*1t`R%dOwLG`j@hPlN~F5KD&4I`Ru>V z`p4|U;<^j0ky^yIs8Gh_|3h{>>lfL>C4ZkilJ)o5efj^C-QW37*#lX>${sBGhwO>0 zXS0*1|66wYtG~%^!D!acvd5~nv&TRYGT)dXu-*GK_pzB)pAUQO4EjymNmOb2mEqer2j%h>_n$e02 zxQKR4dv-G=&u7n)a~02L&yy+PG`X<($H z|JiS{JIN~_`%QKi*^L6+% z10(Kfc=7q{_{#IyL)a9T*twRyMebnI^V}t`B1xY;u3R|j`xVYwb&bLc!t+(~FxltY z`pZ233eW$9=YP!eCx`1jf8;nO7kS7>0SZxs!b*1`HT5*x1H4cFnHhBquqsij0hslWS6v@|~2V zlsoEWvo@a_vjB z_I3huyZP}d_Gs;GB^jOJ`H$z{MEn2Jg)1TVU=nlR{=Xr{;A8c}$EaV_F(2p`l(Wgg zOu}vJd5`KxLWwDPR4B>MZq3p86$6Syx}xKAoPVzJvZ zlcAr(oXITA z=ahxh#{Xlrf77)S=+l1}X#a||v1D{kXo+i2;|$K?9L}Q`3BQ*AjP;w7jjgriEo7@P ze;e61|DJCAe@@$^O+7%?&Nly^Y5twApaIePzwV2ZL$Y|f-be~5q8 z`-|s*_-e_%`HwRFqr!LRKcX{{W{77dW??oO#^^(hx3+*en)^5?%v|Pq=$K2FKl|Oe zkJbg&8gSd?T^Ep<*du%v8J$7gxBh@%GzZcyPv&ypKiYfw<`wDQGtIjn)&|t*`;+?+ zjr}Xgs&mRY@?4XT0u-VM#VA1;qOpI|A!X}?GDb#Y|5k3%*uVX(vZ1g0mP@+gNH@RW~2*ff63X~WCJS2-}m1}`n2c_kVqHMD&DpzZstfAuuDBu zBV2v-ENDZd1Bk}{hnUL`t2=O*`3S0*BOO3=#%QDi=;U?`$B`ya_n5ys!F&>3=%zP0 z6`5Bigwy01oW*(cB0g99oS{9+q(88ZamgF>0Z*ldgx~0=AO)!yjG-8gkr<6J7>jWj zk2Fj`Iwqm#DE;5-#`Mksn#|rk%o!x)R7}Hk)Sj{zgv`#qD9j{h;r;!8bC{#EC;RsQ z?HCm1vUiM@Hz%kA#>=Z@=^5icW2k+`spVuvG`1%D&dDRsdB;WO^|MZ0q$0Q|%ofKy zEZJl}&Ysb}^ZzCrXHPZGCR@?Q+)j2Zus&d>@$Y%#U*p-@6660i<6kn`XWv+GVaO0y zk9S-^W+DqY$VDF7rGGwIfI<|Z+d7J3vdfthyG|J^Gnb(fHP{omopT@NXivdDg60qa#h+Xglm)IKypM zk?|#Y4twgI`$zVoL|R9>lV}Y?!b|ca_Ro9wy&~O7w1(k>@xQ)qd7iRgsN7+Ht~vnG z|L0O%mx}28?ZM}i12ZuT(Z2xWg~ow>pr2fYT`+rU< zTl!WXod4+kYTCSimUqPm|6kN^i_U+XEzUWZi=JO7@7lw8%o&m0_`m&i`v2Gczk;Ed5`_BJAYYiWBJ*u=t(Hi zslU(YA83195uMx9e$4p4)44vv9L5oJ;uwyj=bof+g6z7Pj-TvapJX0HUk0af24@kC zeb15S(TjwWp1;@mbVz0{H}9}dUn7M%6;KV#B%RWT=1pdqacZ;m6 zcg;`?$4LBR`5(>Rl(b}wSM*B4FKdMh@|IcXud$s@iKz;g=!`lBt+JE6n zeTy>RWM7JLIx2=6mt#M36|x`oy`G_aDAnK5290&kaTt%Z=>Ebqjc z_HvX#VLQlZkMDH;Gf<*j&m?ypcP1@48~d57PQKfxs3iBDQ67(3GxMf%MfjJFH~)xz zC`U!UHGtTUD(m*<3NsHGSb$7qAqTl=YNMl{ZT*vWxP@%h4!4o*>>XsazOHj|)Iaj4 zxyHI?YXD2E(J8hD5b-u;DMkJrPM<5je6&AeJ>(Gk`M3K2`nN`gxfWH zmY@u^=hY)*4eHU1=xm4RjMw&;%!y0;gA0^5?uXFV{-(2|RhX`C_X~%)clWp7h3q*R zLMM3)$0K+9Jx`EbKT8NF$-ea$Z;lAj|Npw!cd6S>k*9G6XR$|~DmmdiDdzG+^459h zBQy7+@BBaST!{){3b;k*l4LpKK8}R{&`ux)sVGeu<$M}v;Ex;?%7;4hKGoUx=J)rb z3e|&0Szk6P9H2j~Ju*uDJ}T578YO>^qW>EenhuW&&D;+jmlkE#2Q*tBfOh7NV%ONC zKiGYSVmL;k;g~o|>_KO4iLO28{C(!J=y0F*3F1f>mvG~_jYk?LARS$gjtY~=?z$0S zGC2iP(Gy07Y2GBV1kJuwCOuck%V9sP-vO_;9NgmK9v{d@$^}fBdZzrR(7)lG3f9y+D z{*!%^k-iPtxBoOv-V{d;a*>C86rd1AXd0vZj}!j{@uPLD_|bk+{y#4NPgVYfIUr0e zS(h$vk`1GkfA=aDM+wRh{X?oUvKK1X%r(sQXvRKeypgJeWCmnt&cgueiB_+S{I%Foz$oQJI#IuXVLSjb@1eQ^rHJ6Yv6zTZvIau zQ;>?m7>e3i=Jv^v7>zM_fB(-|=4i}+5aXD~qoYgRpiX#y{=2%_d*VQf^g-Ql>60vd z&?v2%$iDxp&6U2hrSB~1i}&|e=6tDNNE7b_q+^WoG>M#yDJUIJe=$M5cSQQ3;)L|W ze&#AfXZCc(uLx6x?QZoi0%t5ti_G#(`^8CN2J=kx43baD*_ea5k=v73SpR1KU(^Oq zQkSVS=W&03|4#;U`Aqjl-~WH^q_gjmw|eC7+7%{xgfm<66 zA^TBd#&0PTZ|J+Np(o_lv&s4YdHq6me{E|pZd++h1NS|-`VU$CLML;1fxZEbF&{@2 z-NF8|`VTq!4>-ZC_VfkeB-w=q=2PTp)HA=Y|2xBe7XOR>&v?8Heft#CrT-Y|KTi5j zk^YnPZCrC6y-0Xj-Ha5ZVlevje^ZqI8S?|6lZf!VMM2aE!z* zYs*HHCFCBAVO~A@kBMW+eW~(qviv(z{w4RbSCJddsco2TZf)}5u)gTSVconBhqcp( zI$!a%#5FVin7Dn`1z}rSawwX7S=c(~van_PaC3CdvzT#3*fiPMHjjodPW;^u+!e-? zyJ})#=W+E?eZR0H*Y{1hJyfUMrEfVg9LV#ZvigsNx`b=&Eu0wIZyprNj(j|n9{PAF zw@+ad;A|0bA}EHOD0?zmUdnmvUjA0bk{E*b8%RadP!JWbV z3r|ILR`8F*xx>P`DVK-!nU{xxWdA{%(JySA(l1PM?PP48d_mYU&c3f{<^#R+6mDJe zqfMG2DYK@fQ@KyWbo7{Wnn6bU|7McW`QNk1*_ea5i2nUKkIcXVWTK(S_^Hs^ zP3D$d)BIfcb=7uwkx0%aO zRp{)ud}qLs71L7xYyW?owba7YlMP4g3t;Z+|Myo1v$q_kA2>umfOd2sO_-AC8Z@H~ zhj182P&(QCe3tzOt}jOgD#zG=;2u?E^(^<9YySaVLoMp&So@ELk@g=9wf{i4<}~^V zv?RMf_qMa@R&-$3tP4Y@`(!_t5RQ?@u_s49;eLX-Jl7bS{Umc2s`BU$P>Bkh;&vKm za2Drq9=%BDQeN?o>!(xLQ!yCrb?VMM7|J65W^WpSK+ZtPF>Oz;dP5sM zlYJKU&!xB59!GnBDw#{gQ-*yL^#4)8T#5Zf`u_zX%y!Kj%*8xpU{RSeBi-U?D)j!9 z-oHfuf3Ek>@cxT3On%E*0XQmBb!xZQQi0orf~`{chU50*A72Q~6uFa1rIyhlG& zdG0;E+H86zI-Z&mdL{a)nq`8V=U zM}hCJEw7nv|DW-GZKnAL=K8tx6J%qQC(^956VF(T!+12z(Epfb?KE>su{M0Fc>v~g zbd>3TaBCx5>z(_w92vq)_O9pcB_z9@$1t1RC5|~{ zpZ@P2=R`B_Iic@!%vo>D<KfTwt%?{r*InG-|82J%70y7xA&I)|EB*t zE6h2ZM=uh(i-Y=Ki{Qss=CMCzQN*& z{{1tQ%pR5=hLa;P8e=dP<1ij+n1CL871GJ9Oz%A}gh|ZN8kZ^L)W~1E(f*LL7uF<% zY3$w35S&iVz)URpmGpVjp82BmaNK)M<~9qnvFCU?{a<>R!(4ve-t4pKVJ`DLR1Md! z8hlr%6s7_h+-i&O3Jb_gG*tSBIhlj{8hibx*q4r6_B^yRcc2w*D4l*+D4TtkH41ly za*+;iNc+5UIryQTnzC_*txP=-o0O}GDjrv2|^%Utnhh&R)j4P*y<^%48uao~jg z@2F$0M}shp^X~4;JG;8>mYJ0y>^UR6c-!e)O3p_6@;w)_UokG^zMr`Y6{tk*$vZu}kG}VxC01S&)k9P-hb%-YcK1Xy$2eY8}&VFPo;;`;yHt} zIERKVd*toiKhGTfllS0+SmP$`ck=QS;=pQr{)0N}&m_n)SBB~$Te~=iWH9w<;TS{lVjTXS#kCW@8TKBHH^mkNp3A{?9#k zhi-j|?*8`if9tN0A6+8i#+6`UHuiE3(~Xou29I{cmB^DX*f$gKo)U(|NNg~ z<`2&Qc|OVc6iLQK#{XyaJ0|-EC@nGm)Ry&~f77f@VXnk}ZBP{o#8HAWRH6p;Xhs{F z=9<-~ati8vh?Q{-;xKCp&tL|HW0KjXQweN-iP7kOGAWE9 zqcicwlH)KQX_$a?Ou}SLK~G&$m`YAVsWJZt>o3aaWR%TH>}Re*w0?iOa5FFyvoITT zFc(d8m4D-egT|08Xk~6oRrilq{!x8G`Nsj_YG*6|+Nyf80gc+AthPiS(%ieboJa>A zolQSayzR{g-x#xak_`{x!{mUvfuu>O6DamEbeiaEv=*k5Rzfm5C(&2zwvTA`quxSHZH*dVQSABmmD`PIcc0iHgYR*>5UYLiSV)3dLjz%20_K)T0@t3C4B!;QtEDmDrDHuS5K#w0ST8 zwF!R+*>@`6&UHM(+%!`9OP_;dr2lE<|9sT1=g-`M>M7EHn)F94>e9TMcpJz@*X_zp z4V~gyVs1S8|Hm=rJ^o2j!u>dNc>(=To-+cOPonBP{f~Vjl{xf3=;Bser0n0XT-}ot z8m1?PQ`}CY@BEN?#s$^^oMAtU_VLCAXq~1EqB`Fip+akeiq%1=JErWS0ntCYn$8(t zoL7cB#dFwoCtXLjvv-i^#CaaQNce+q^vv*(Or{`1-Yp%id@Fa+8CW0aFV7l3pbDwN z4aQImN6&-Kn<7VJ47#&J7)$o`KghoId+LHk`r6~!cX{>+WZ(LG`}@+F_e@hC)?5F` zT(1AKFN6Msc?zo1)GdhC9z=bhsobVvI%Z%dW??pZ^m*F9m1KUvnVRg~LyQl}dC0&5 zWFiYWh}L)JlJ)w0(f^(EnDbG9XbpKwj{H$1f24^wUH%};lFdQ;H%hs7ai8|EGSWdK z`u9(?S3J$NQQH{pIf};ih3rKrMhTiH^$XD)K;Qm4`p&-nb#$Na@2`vg{TKZsIQmEK zfduWkK1N-Qx$jDI;N-4+d;eFOpSVu>)&DCK=N@57_*F8OAMsqh`Vh?Z=+i&UA*7b!&1sM|S(C<9V_d39lLlV%JFj-y8RCA0kSh6mENtdwzY6ROa%tp8vEl9&`3} z+Vn%x^`z(T^8DQYxBCx#i~ZuQBHLf5`*UsI{>LljP3F-UgRyAJlm@=x!BpRXY?a1s zDXyE~`~g(!57lHyd*N!yx*5K~WNGjFHj<_J%15C*TC9Aa;+VX7QXW12?)=m^_ZW{f zblhk^ESZi;n2aXny?LhcX*}ODTlqAmZ$mpe&^N!9r~S*({?Yf}BeD<_6{Ca8zepH$wi>#%5?+YurQI+(EWCTmLsF7G?-L6SFWIdqygM z!<{+E+_!!?-I`wJd8ksCL}kB{jMgugY6GJ)$oI8*f3l*|`zsgwm5-{ZoZL7fWVmht zGLeN`Ed9Fm0V76+CS|HQPydT-QMOu{KbYTLK>svN{|g_S|9Dpa3w{4zKV=R7uhb>l zy)636IP%1uj{@{O=UfM}i*BHZ?7rO|0qX;bBR6@ege*hfdOvBCea{GeJkQUq2KDHe z?H$l|+_T~R{Xot9+OW$%#B<0)IE*9MV@|xJWMt@ME+1_|0V6~7{^5T?78`8j#|xV`$?=jfeQzjtke za|5C?{wrR#CQe!%EMfP)>-cx%*}soAZmrlg)SU18_Zu9->N#V=o^TQa*NI?*m816*nDbeC``CCZ0fo=Y@atOY+Eoq6rCR)wkBL2)}0y` z)(*ZY_@`lyz4`D!Sx}h>&CETX9Grd#~;M*G|7Qtjix3*3Y{v6r8^- zY&bt6Y|L~%!^??b!$9+2(><&2v{gP`GDx09Q|^FJk&vV>p}xzyARKj0=uu;aW`4WG zd6;}2KSrWBvPwP{>tG%#KW#2BpEmvKSZluet|t24>N{;plRCAT-$C}h;;2*K)_1D! z(5T++JCEg*`ml)Zm&|$SM~UUaEq%wo783@9T4u{pxoh_o zVLW^HL(XLw^WAXN8Ek*z*MoB~jEesaN8_=u-u{9AWL`T&etpy)#$auPYg5s5c0jn8 z+$o(dA#=wL3?C+|+@tkoZT)lVfv7FvcGNzOro#im!TaMk}F5-J+ZR*(A8nW%Qazs)G#BGO; zHxIud_HSg%j3KeB`m4J=!`-AVY5dFN1XZCq`ADr9|9A2R@@iqy$*++2lC#MB`OU=? z_C)gYDigwT^U*8v`h}JB4|ydC;dA0zO|I#pTR+n;to5UbbzX42 zFa^^tP{tC%M&s^H#}Y!Jc6YP(cZ>RW>l}4?;gHxiVYXvO#*kQX^95n&6nk-U#>IAz zRy zzQ$V72Kzr7g{?d144O0cg{JE}9`YYOvMkNHk)76<@T(vz4=0D6+;=4>haK)wJi)qx zk;!2%`);yisPk?-!xq=yMNY@IlgVNG_$2e>(LVm<@EvZs*zK7=epQJ7#yjj_UhCVP z#W}3=&69oq6xfa%-^hMIS^N&aRoILYR7UViu$0}l`nc)i_|c?WVx84D$Bte}zD!<> zuj7%MW1TA_^Y~cjknypjzlh|=V@JPzWvugO*T#^L3hSFzkYY@=&-wEowr6Z zJ=XbykHwDOcPI0N*wNcR7V9i!Uq-%tOYG<~?sE@$9oe6Jesrw!nvcYeR^Xo7V@EfQ ziFH0dF?MvsHL=c%uZbPazQgr*@aNvjy+W9`-T&p=W1Ual9y7fi|BLi_TpnMA&MU8X z?G3Sg+&(6~j_&rnJA}XeBe8!^V!wT&{3E`LC&W4*y(@N9Vz#H73p{gU?3eso)T#ZX zQFHoLv7d^=uc6~5-1mt40QrP`qdVe%&&D9kRc-!fSNPFbWyzrMuk7WAuZkt|e~$mv zA2KLh%f4mC=-2_W4vlER^`Nqgzkqe) zJ`(#S`Gjx%9js0s6jsesPtP6{o?%}x`O4U1o(Hp_M_VE46u%y#+D487+X4YVr<#iiLu;q6JyIqPmHY?GcmSuf;^ZV3#-OYjIB=NC;t4?7loYY zen-87^eBkTFIgLO%KD43o@>14^6rJvvv|j&cPiUam>aeC;wok?iR{d~W$2RW&b6EA z{6=o2WfNm%l@nu8Kd+L#Db0L-6!t;m_j=EAV`4Zugno}(t@Rys)^{8Y#uq8pe<1pQ zc$3PqDZ_o~C|bu}6ix`+uJA{3EIoB$IQpvi)#XjiQCqKm-K|{4uZnd(qs*;W&XdN* zI$u|2zA7BuO4A{2!to2kGHk5zk4xX9K6*Cq)*MOpHu4a*&$%OZlI)7yqjY%AJ4=hh z>_;NEQ`Y;E$KEqHo%XCadi0jqsmN^%^Q*VSnns6kn*B`VCQYMn8>MyQ)Kp`+)KIVS zjC4EE|Dw+dQ#0J!n6Y$l=ag^jblPT^^G_ROE;uzDL^GNYZyOM@^aU0TO$v*T4AB3g zzddPP)bRmfX(#)G^1r^ta_4QYNbMh14(=cFoY%do{@U2;Qv<@9IsNr5`}?PE|FCYZ z`s$kGP%yH8*pTDe5y|>9$)Pa4f7qw-qsrCME42%=2ZUmMrJdx_R_#Jf zlJ_^$*XH#Pbz`rK z)sG%ved_@2<$%yMdSGZC(?3uN$6NG?T2n{H+D^(B;|GQg#Jy{oHhy1CLWrrCE=SJr zl(3{QB`hsS3Cp@tLhhI`v5#_}gop4rzK=YtsT>gUrzBhdl^oX1N)GE$z`S90a@aUH zIc!Rc-ak2PCby&}hpj`ch12$JBe$n`|FV=&+^d~Dr=3jj{@TV8;r5VwT~|t$vF{_x zhig}JQ@nq4KC^!T_ojsEgp^Q|>3ro3?X>Ib=A>9Vsht+S(e+JabJv*IL2fN*JvJuR zmYx#Y7o^z7ni3Y@oE(au>KFd(T~2CScQcnf&@Wt)pkIg;>YOXdYq0G8q+r+?zY)5W z@mpcY75_aB3^JD!`i0k+-#~-TRil3gon!th4l=iFrFY4uchM>QJGbrrmA>m}Qb-a{ z3KoZ?a3PsvKBeT!q;M(ovImj^RbSlpP<6d;K2HBjC$e(1Ih?Wm!tc0^)UMo%YWkcn zkl({9yx_Xmk?h(lpj#2Y4-exTX!UN3PTmk(eCmc+yjM9me?zSAyZiRpT?34RxrbuKCmxFJJpE8? z*SU>rvLs!}sUbw;NA zmnr{c+Q0d1sGIq0sGs_5Xkcz!@T@ZYY-pbTtUBk}&_cHIYfF1ppX1rkG0uO@wdEC_ z_#M;I`6)s|aVZ3p|MET}={aySNzr`Q$XZ#I`H|Y1` z8(4(3*og!9SNsmY#~*RUjUgVNzA$9XUKkd+@8XPw#=i?gPSL`!BzIw0nzb-2%UKw5 z^A?8XwPwCCwH(H4}CW59H}gjyV*-7JRA0meKzbxscXt+KO6SVeb)Mj zXRVKT*4Y2qu%Eq(tae?^m}kR*anFWYvMycvlMOSz7aEUegr?pFq4{)1ICwrIw47KF zTAACpw;#(09VapriuoZceSTOpWqw#Zd49;AG(Y4_ogbE*e=;o1oNxSNu8w{AjQRe9 z{{660xV)n8hgCK79V5RV*5v%IPVLwooNrpcBRY@yGPFtCFA73 z+%LrT7JeaC+WduBS;l;8*uNJ(>3jVXzKnmyzhF65VGs7<0R9zU9;bhZ1Nb-m1V6{` zko-}vjdT>w84zZYb1)b4kc&JtW6?X`4~t*@emKnBi6!5DIxNL;=2LhR@8G(d)Qh+k zx8ok%kG1za6CNR-M81C9>twiDIYc&=q5K!mgwj`^31u%o6SlJNKq;!Rv-+9PL_UYV z;^;k7Vx3`1?C3k6HtzULY=AOx1-4)>8u3H?6ffdH=`*1gzhV9@y733p-S6eiFS*hfH;-J!zM5Q9)Gy>$8vmB` z3+r67o-CN|KNvHOf5}aAqw#No@oz%dLT+U*;=Yalc5;WYb#Xl%TU&G%MMBukUUJ;{ z_hdrYOO~E7M{!R3+-rR-S;1b(eLw#yvRe3>diqt@*OGPY_0#EBXTCeX-8Vm;K|h;G z_eu7hf55+;e+Rk!W6}eE##{Is{*HdPg!n*=!X3C5i?I}|u?}0X9Y4g6(2YOfHS{3y z))2oIH{d3G9_z3Xjrb9MiXL2&rd`5U@Lep#Qmn>0Y(y1m@l(8r-=Z5mcng2S-_h^m zo*m`rz@PCJ{*G;hgTf8Bh4?MF1My1x?#@T!462tP| z#IS-~$)3l375~-b8sYQl;@4sw*86u$!SoA#e|=+elX=#{j0=7L3w?j*bg>t4-^PDC zxueoKV)fSMx0#D1ce9rqztH!;(D%3Q^^Ez^bLK~T&Et|4?3LX2^RFVSg|DeMzv=o~ zvW~rey7|qS<~PZvx#mYR%#UW8A0=Da+qk#$?;u;}B!#w&Bx{G2IVm(xO$rCe zmIX=HizZnwniLwyMq#Q)CWV^Rq;McP$$HYHP%)kUa%NK4PgV(2N|v$jBg@6JJ70ee zd$1QfP>h|}g{{-{2eA#?u?dCPj4fC{Sbq{55cMb5Ojh2eDsSXE_LXCl$8pLdxjJ2W zO;%o0l~-~Fdp7qR{!7TE?D5IUyXzN`i`f^-1B<)}gj zUc_(k2fT(J{2714-;t=Va1k!WC|r#ja1(C99f+$hvg+yo)E|q}9?w(wB z##rwh{apmx!@swh#BKc9Chqnpor{-ZjhHdwqHkGK%dOBe zb?f}~ygD!Re4G4*`>%}Lte@fEJ;GUEk-XBG7?FIyS^%==6=Um2W+#Rdk^Gf4wUN}Z z?jpONx1Z%D>-Kgeh3;1q!@W^{DYXV53bQ*Yycj%(xK2l5_3wMc-*ablCLZ&B)&+1s zi*q=So=1{GZzR7YzWBQ&NW1P$!atc5l9^MGioqCxo=1g~c2_dD1?@j>W4;F89UI~o zj?u284lkp7x47=3@A*iG$5TENTXMhl=!~?IF73}qyTRJ5k@Ccs^l|QC9_yQbOPld% zEUYjGvuy&uVZ!#ONG@A*8_ee8vK+Qv2SJ?~juCEYoy)zgo7Q5e|BY9A`)BVEsPWD^rr@j;m zTaWY$+YVcA`MR)C`Y=c7)AOA8UAO3B&n>R@Z}BVP_Yn8CetqWXJ9ZCZ&u7=KitJH& z>^UmDv{;PUeXCN>A+ALRnccf#0NMSD ze0@wj29JfOgpbltndtd}cT<-{@Ay=do`cL;UySR7H%{v|VC^0u%(!>MM9;WLT5e0{ z=eY{$x0buEJ$nClu;bl0xAl6Dv+su45PcKru`LQ0-FG;5X}9PR*Q>V{CAsDmYv-c- zOT)hU&G@izwtIW7!Xx^ZMe_bvU8`Q+p2hwB>kGKoNkeJ6nK}A)iz=*3n0$R$^rpVg z^WOPwX?3%CH}Q9?@3Mtyzui3L#qU1rGJe};2K|)~HrJ@n_^0%Vi;w>+8{3cd3pb>kTRHzSVq=#K7pB_336*`JHtUXF!9 z*D2@X-|0gl*V9H8Jd{x3-|od6VQW>)Cc6 za#l6=Fz@ZPK9wx%eD@5i@)Opq9IyE@$HEuMuONR-EIbyu3->trO?;90 zD|igw#1r@~zK5r=5R0)CE3g{ta4_$9*hp@{c03`j?_wA8BIRW-S&n|(2I3;Tal1au zOFxfY!rcAPzr`*kFUKfcjdJ>=e;%j5N&X16*kxSTLiX%_Ikt=J9$>=oSl5n=Vyb@Y7B7tT z+<15FlTnxl?}+u>dPl7L&Z}a&V)35qM#XvteJJ+dm~ZfWH{lWHFXI;G+c2Mb5td;U zR59_*cm`j^TztuM+#&4UP#wm7{`kGjpF%71_pu58jJ5a|?8YzfC%lgMH0RY}5f&qR zo;4Yn=HD_d2utT)5SC5%9~}LY<(0-W#qOxDX15di;#nCEX5KumnfrX!9l#g zF1>@f9W|&%Ee;_1H%1*A(SS02#8P!oIk`{2v2rdQ347H@IuaB!?@+Jp;=j{8M@hbZ zCbzxho*|Dg#)RKjrt{Pv+l>`VJ>Nm=NS?C}u+tj4Us^-t+=NfGxaWTD zLy0;|ncYD5yO(?opQQ`C5MT7XzcWs|METxm4D$s0_3Ycpa&Dt;5Am-o$+Kq|o(XMPm1eg~mjm4F6r&vr+ilpVnud6y9WhE3zM5p4fBc(!_ZE zu#k0RnEAhfVezLna2TC9jlbdVNYvN593RIU zLp~m+k)!0Ld&T)6X5t%o2D#XRO0?nUcnN>Rn@AYrIWYnmcm~&F3F3qGfw2gSkv*RN z4@8W?t+9vF5`l{dQvhTZ4o%?Z&Oh5n&gN4Y6K)!A0&)ycMI z^X#s>P1?4TH78O-^|4gz-&4Z@vYvk(_eS;x@-E>%i&<#GzhWcy;+M!em8t?r)qXgm z1UbmYQv3Lqj2otGrG`!7+(K?f(UjD%mE1lr)wm%w+o~Df(zx*TW~-A!XC&EVH9q|L-;bD!Grh~p2QNY#b%V@73@S4D)B@7 z8n2??wd!Mx#&m4LnaC`kKZeKgP5edJTe;td&tfcQU=Fe|A6xM}eu@g5KsOHJ3T5eH zoMpa&e4C7s6Yxnqh%ex)cnUdKiQOni6OQ3GID>wkX$VGPEGA+SX5feFgx@ELBiWoR zF1f&c@E~Nlcs916TR6k__)uZ5!yWi6X5mRJ#byL?T!bqy4tL^f{pnAkTN3|!KXq1$ z_-?->e4HG|{bt;XYxtek2EAy`==a*3=gI%3ow{DQo>wjjDdg{Dt)C0K1mlG_?H_+f zys7LPF;194_`Ps{!hdkvilM^glGDk*3tvWVyvDZ@<`nnqu^nH;FxUT;`>V{CVkch1 zWjKlw^xz8o6lM4;g6md@H;;J+nTUJ1-G>J-0hi-H#r>8rH#1*^Ke8WZ&&Q*#xrN(@ z@ZY)riracT#=V;S8rgl%hvx5n&_9(9gj)r^`XOWeRO4LS%KQj~@A(Cu$Ca0a z*Tv!YJn_Ne8-iak{{q)A_v7~s($a|d0`gY!HdHcO#u9Hu_s=f&k6)DHsllQ90rF!DM6n2wmj>1Rq?|z2;9z25hC7u;@$32mmU(bEG2E*|B#WMAs zAn&d~I*WMq0+N-p#N^xU*`H&b$IE`Q>31^J=V_;QOPm{hxY)|Lhne&D8#r8`(EyX#cgl zo5?NgTe%nU-$rg{-%+pqZ`1yhyE?W1$L*auc|nNwQ0-+eJ!gM(ulAoTXRqL1$$vjt z#a>-+f0OI`_Lon!ziB$%=uGSV$wv033}dWJYoN&%_Ezp~{M*S6_6pywvP61TN>8$s zy==Dho-4h{-M)3nWZz?|??LWhFHZKoeCu6gYmxLUk)E#aAe-2mbEJ2k^d{?0`X1aH z_&1W(>@|mduTI~KTsdoE$iphE#|etZ%C zf-J1ZUtt+sd>F39$1xe7#lv_Mb5MY7k=-+X=|Vapd`5CKrO*+gYJfNf8W(c=9DYBD z&WRkQ-d}3$^`F|mD)Mn-w;vn#{hP7a`ElCWo72MeP6yg!RKB31iyzPcjLnz_u1?|8-@8T|6TlE;9jg< zUdq0X{aW!1^K3tMzaI7>p5a5nU(5dn@!ck_kL&mU1QW#-7j`xO-|+uC|1GYm#f#$k zyRfz5|0(V;Zml#hf112S+@BX_1^+J!S2mPx!290pUe#e_s7dmF(l9L44jLZnUcD^T zhgU+wuv4LN#2-RacsVp*(-jV0`Mb~(UJtFeo(XNY{&)KEp3o6q4X=s6T>Krr%SE1N z_k;TH&H>o-i23`_Ka@UdedU^gVc*w>hVpM+8Y;|XRz5j6?0@D%p^EPMUf1vSOmBI% zeV!>ZN*nKYhxdB|SBv{IXpGXq`!_QGT3Y-PdtG}Q`!~H?2mkMhXN2^5oP1H*PZQTN z?|8T8>cAIV{~6bO(zV~>-Y87`l=06QM^mfW}dk|ti?L4A8ZZ)8?X_Z#?zf+GqzysU~@~@hV9sK*7|4cM6{QG z_h5S$u?Kt6cYf&j_x3N7(VoRh>_-)<=h30#0BTV;^(yV3c?5eP8_A||{?C?X{?8uB z7BV`sB|1C39UUlWzA&t}hI!)=Yc|My_BG_XvJ0*0uvX)oH5;>yC&+CmJa0|MS!+2? zThl@2B-@*hrLJFM&w5sIs=|6@SX^Sff;{0@3b&g7Dl(US8M(ss%gOp0Yj)_P8p#Iw ztD3ee)#=vqupeNL#_Fx7-yN&BpSRbNy(!mP9`Yc2Nt!hy*o!?V#t!VlPE?+w4?`99 zBU(#TifAoS^lz-c$!nYBnNjl5e)(gwJUCDu`K~?>?8_Jw%7v|%YW}0%w`}uG$?8sd=D56aQl25}*z3>9OTF?G*~H$= z{UHAqvX#B9p8m)69c>qd&qcpPe@3Ejb2lbRuhr7-E8KpA-O}tW^~RUE zPnU)n*e#7;knX<`&r;vzTH$^leOK}_-{oV{|1OLY=3#tMxJKM2-pk$dGw$=Ycz-DT zXT@76&bx){@g2X!y#k+f{g3(oC;#7zGe?-8y2n-ApFm<1w>W={$Hm#twU^)zuK9-W zU*!Ks@GQT;Vxak9B?WJQBStnQ+#=by)aCAlKO+9WcUeHq!2)?^`MFZRT`aqjJxFu*^55%Z|>z+AEGd;wm9`i+dNjlf9VSG0Pc(o?$;K zJyQkBnfIZr`5I@RSd%X8yQA+c4R4b+FG#x=e3!Ryt#qp5_T4BgrTZPyEG7;A#2kE+ zE2V#;@RNLtON9Fq`&+`cNY@ek|3$b@C~F(=JAVJcev>%=jEBYlU3?EKFwisnjoj`Y zkBQ?_aomfqaDNPsp&u_?Tz93(w+X?)6#x6V_rIvOM1=&+`L3kNj7L zg_p^do++ND{HJUG$i>Q5wlbD8Q~4*CvM)PwL&#MQmmj+!tjJLQ=PLjFT3!1`tQnIS z9+!4&M<<3Gq`{|9FhThrtNdfraOEGHPb>e}dS3a*wyqn(_k`Us*c>Do{crCz@+;D& zBvtuOP7HTTkH_#8>`QSzC@RENiT%t~s9r$-hXc&DsN+|U2IfXIO`!k72j>%>8W-B! zt9_{XPxood8|BObZDG-<(2nN(=*)U;C3&#WUh(PnjH3|^s6jPqaR8Mgor#afJ>xe~ z#$1YWX9Mp;iT&rhXN;oD)TWOe6^bW~3Om?$kvpf13PsMj--_+U)_awilg}9yHem}k zqk!9bejAgGsmS~?eRy-@>zYUD!;dg_uqLhZ_OLoDgjI9QN6oZmZJM=eldV~sYA(Cd zoVK#ERGC_$&dW+QS%bxgtp&kS&s>fU{05(rpFfAkumZoqMe^kLFiO6>4d28Qs6`_# zk{@5gP4efbutwh6^GEsIwH@lpf0sv>NBI=r!8UwUUi~z#kUu|)Ph$_xq8G#E*|8WV zt}F2%zKtDdLMdAD4jz&ZAH~~_!6wCPK9v=~wOTQK5O8t&Jtio!n$u~zhZiKl(^N+%=&$SjT=VM{Rf?;9f z_{+j3X&$Y^+C0=Z<+t^mZ(d|PA-?T-=H||h2*vZv&0!aI=WGA4=gf$(w?O+>sr{?h z{fWCeTQ`Cx_eKdPNOU2{hMKd$c7XRSkY_EiHK(R55*St9>uIzyvOx%JK4Mqe7* zYohaW62c48;6=QJe$wc2TpHyA{1AV}4Zhp!PEFNu9g?R zh^O%*jEd?lY(ctwaXY?&Ka7=kG*h!xxN4-?~sA z31{#Q{)A~4*%O1MNW-I;iwx|-^JvEpQH@3OR@$iC^E@89eEIQO~` zm*YCzjN9=EdbxMlO>U=D{!h|T67d1(hxLW@IoNQTJ_o7Xl5h#uEqJ!Y9Jc3HLhtoT8%G^Wa`_FD@}R-*!RxEc22wbM#nNX#K${bNikO?<~(d+w*eY%e{2E_nAijg>vSKDfC}c>Axn^e@&wQN~iytK>u|_`Iq17 zl=pghuYqhNn@%hLXWktvwaAC9UCRF{eJXCz`CwVbh;ie<#aP6gqix7e@&6m++a<=1 zx!jiVTY=@wdGm~cGmL#Rje!>!`z9L$J11Zr)-o43cW1q^=SE?pF>s+cHi>JCG4p0) z(4x`Cz{btnxo_iMe8L#`q_OX@=-hu}-#MR;?VkJj*j{aLbe4EouQBj>XY;V{V~@@z zivFQdC9dc!(VDTJk5!NReC#3L>>fDCeH^^Q#dpb%6 z>4yxm$7fLZrR)9#M-j$9f{)?T_#D23Ozglx{2P9Z7w{%7^j#bq6Q78?(P3Xk`vQA1 zGVH~eXOG4ldo`jx8?)@)V2<{1G)$u>m|<^+JsY(+z#N@vSIw_#jJ+VEjq}Ia8#2xw zkyLv{Qr53)wf5TljA!i`ci3+k5FM$YS=L z92hH^T$J{elxU1qF^1Lw@Kx2ga5*W>;3jhia%buU!^nFDV@>jZQ3XdmqyeC(b6PWY63 z_yE3wuVOhiU?*zu6TE;omEB}zbrG}fc6~Trvfs(>@GM;d z`}XMrtpgbtie_6=QDbcdc4*&=i>>=PX>CQPH5Mh-TBI4P+f!Z6UV3hzb+7}&e*37a zP=QJun5!Q!b)a)D2ZkCnOde?7XP`0LK>LsfhJ)N%&}1D)Gue&~w4x31L-u`R(TM?J zF|yCw_l+f|oqvF3+;T@+|7#K32kU>eN{sAMf=~S{a>K{A8xMz>bTYaKYYCld|cIi@B8yEK8+Lm6hjGhbKUxZ zLr5_o8v|})69))5-~b-?e7|eU$+_o#KKFj4@A|L3_Imx-+H3Fi+IwJRC+lAj#vTFl z8^SS&W4FH#_yPTYc#&&hifhRJ-SEA%lYv!=L3juKhOOp3%2gU&L%TwT4y@APhkR#% z?;`Y*hk*~s$6@B-j?gYwvG?#X=2QILFFFG70R(!Kl?`fqO zx6#)z?``EEb0_H6BV8OjkWS8LBC|Mt_udOShW?GEW?$OX6t`z9EHAE4VP^h4FU3#@ruV6Nu^{k;n+TYo{NP_gO)-<`X_TK@&U zd2xZY{tIe`I;h3H0a*`z{I#vQz&zds1y)>;hjXpxFYqk9ATMsgT^HE%bwPdj>4nZM z7t{gW8!xB}89H)7!!Y{n1&tsFj$U9*9Gylz)-`~XeAKA`v zM}+Y|bmcSthn_md|ImxQ5Bfh~{tE`N55e$?k9hunq!H#2Mj^~NKLSyRLA-N$k8zSX z%x`pml56rWpb8#^Vko=h&$Wwd?=`4|FLQ0Jmn(<$dD46qH-6$Q(+_j-1@iSYc;O7(PaVAiw^KJyL9N@2ZwHi%{Y}oFgC%$e zj-yxU3C71z4i!+@S#JGld`!jhDo^GRWjIe6W>QxxDCd=sdktlB755sb06)A0)1P6F z8T<~4KTV%!BhNR8LJZ=NfFz`jhWY;IfChH(d}N$#e(PuW8lI1L3{pOWd>;tKBN6&N ztXbghglwLfg{1v7<^=`g#AiU+#BU)0n&X{R8?mWl+xf>&ahb zeuY(aetAvBtgY12N2)tDr24Z%YQWxDcJciKWXoR0{}IOj;N3X1y#J+ad5?Zz&!Dyw zUq_nfKYfWVWcP>Uk21=IQsDD5nf+Bf|C`VcS-`RRw(|er`XAv!91ZdOr+)13Vd5u- zpE$>fBb3kcln>HT=ChoT$)?4uCZClP@maZUpOp=+kk2Zp@>zwP&l~Vr`E@?4nErl= zpZB0R@6KS%e}d;Rl+Mx*I?MBVn&Vq7$Lq<;v0T!r9#9PUB`Ei*7F@Ch!9VbuqndDi7Nrwf8h5Lb`bw8;{> zdVE)}2%H(K<>0$vSvyyA z%J=+OuWf{iJ*!oI$7;TjwVJ&W%qJWmkNcTV;QM=(&>dOL+%{vpfz>?gR;zFCN9yfd z&7Q2)>H%NbYI&>phJM{@wPvkWyNkUgNWXiv+8~B|6hDzBev1K8vw7CL{60hI|6s2J zeus!_Ft}O+p4IB#@sUQ6Bcu}wt=2GPz3>UWdkgpeU5o+tv%mUb#sH98$NuUq^!=++ z%7enJlnT~fUOQX3fyk&ny|lg!69TaL$)a!w?bU6z!qDXAQPQn~J=@`7bnembdwYs#&{ zP?GPzm0Rwrq>8r=s$`ZAsnPHM>YD-GwLz-jh;u(Ge?hJFFXVinAAbxwH@8}u6-+M*_Fu3jvgyvbhHH)-#0 z(%#=xA@&0Ji2Gar7QTml_Seb*7i5ET_SbTp`n58+kK6>e!d-9x8VTQU_)YEyZ|XZ7 zKMgep-efQ6n~dGwROQ|`8N0oy@AKR|!boP#{50j8TW`6<{;AAFkU z!!hKqq3R3kRe3w#>b~n!eCOe>RCX=j{JfvBz}H!yWq(`Kw=YxUqnE2C?@Bd4xmI=0 ztW)g|_#Ip(iR$&2vX-YHGyf8KC2WSjA^uyCpM$WjRfG*7(T?kMmOj=enX`Ti8KuuO z3NOJsm+(AcZ{%;_uj$f!0Um@OfH%T;8|?2a`B}rnjoV#jThi>lNUR@s&)*Z+(v zkd+&ws(N8W)ySInM^t-$hW)4`s*luL4aCzJiK^-SDEld+YIz~5*0oW2R#jQvy-~(L z)m9tn`9sxKU}seA`6JB#)mxq9uj}Cfb^EKWo;3|tFe9p7{PnGj@@`GFHSj{UHHaKq zAJy>wFuyMo)yTw%Mo&Z)CY{KM5k--)V^PItqe|eOoQ^X8Ngh_!Sk3h7nmjSqW*e-A zm7}T;#n`i5W7WE1s@Yg$Ri|Tow==2AL`)U#n97l5k(f%~kEvv9OvP0Vmiy2s@5UWh z;RmBCz%BpSD0}B)%3T#xPWrf9$ZU6nK4wggEiq+f@KWNgm{Qk_D*0iIJqe?VBV!-L z6usiOBFOOb5se~8NGCKG)9^bn#&|IelCOcLnEIchSFY)ruqN4Yn|)D2zJqv|{n zQ^&&*wUbWZ!y3!~PL0)u^zDtwdq+&3=VNN!Q*UL~wJB>$p5^FllXG2%_}odRE77K$ zclwljMV^({N(n`;Ep!+5>MZXHudjsW8SaO8eG-J`d6McOnRaB z^Q;m4jZSwcye`{{kiY2qKE={`R(zk!N<7=fJYbtrceE*UZN8PYBHwZ#ooD?#|JvnR z@7L{=L+)C?@-qDNjXG6;WLi0sPsL=qaW7uwSIJ60YhT9Qukv+%-hcD6{@1R1@KZf| zLNz=5%yszb_jalt*)Y+`{+AQFm-Ee?E~^E%*1dlAll$c*9$$uEZAkyR0xR&4U+qo# zR>w|1`sZ6+q}LtkWd6@(1<7CUzFzjfQq40CjzRO3()Tm@LiRB ztG+Y9v!TFhJReZgfdZ>}HlUV6U1~j=V|lg)fI0>W ztWHmX)rIVSKA;}zJ(vin*WabSfdK74pn>xdjXA*&S*NIuqU8M zFu?qOK(SDO{jY6GR0UXbdQ~Z8O{87bp>|cd+o?NrSXXG3&-Ku!X;*1F*J|0?uI8QH z;`2Y5jX~;gu3hz83$40cg;p&x_rrGO9OIgqZdbOeoqlAy9Ijj|>u5Iv`jaX-(#>9h zcDcDmi{5Wn;j@KS0qNy)jpn`6u2C2{kZXlHyIH%T4?|jmxDBl7RzEW3DYTN0wJSjz zeTF>5*0k#f$jHui=Cs@OW9$Lq_2c)a*!jFmruP_mLFPeUe!F@fJIVYg&swgB?#^~~ z`P=DlqaSXWWv$A}FR>ict-SwIWM${K%C)12abPR=t{&#UGp&4gEAwC7D%?_J6}hu4 z_du(P@mJz%Rq6gBt87n^RgSD^YE|V4FMaz~=D&KFw{2DJv#qMz-=lhD!@gGD|7}$h z?#+8!ng42K{iju)W3BSew#s*^mHnSy`QL9cg#nRV(u!ts30Z%Kqn84I@Jjl~^O|TIv53S>dCticAz)QPPWj(8K(9rj;On zNy<0%evy@#$h5MuJaR-la)vyVo7-}&bz3>e++!Z)d3?&>>QTXtpb9s7xc_#9@o_=EC3lx6wmJnR+o$bW@L0pe-j=wbi2N1fLcTV15n9duhg*Lcw1ZS}76 zsIN1q{+(IYz+n&b8(r*&a9hKNGp*2m5AQ#FH2Ro_=bzh(%(<;7GPcX3__IDGKJ+M= z4)Xr9hx?zGXHl;lNat2B^B-QhGQ7&U#>-gGtGt6=_o51krnA)`oFKK>PRo|mO88&+-f_$s!MxSKj&4$2`~L$FZ=)8R&&J5Sh3h@ zUFSvrV#`ZpCv^qC>)rG(AE4=FQ6kEZ8VyhR~N51;0 z?}1}p_CNLVTXJ6Re_o!Iz4U**>@D>wJndBkdvw;z{$DR+AFmP#uae{|M`93*z z`IL3GkLS6MK9f&5E~n)>+NbPOj2X`Q*gL{Cbl#^t(kXei#3~N?e-aFgJ7{aI4 zeOxo|`4qs-&-1$t**)u1*C`)sXFhdI__$_#+&g^=qF+zKr!Z|KlBRvU7i3R;P;u%Y z0m(=&V&TIz3PS@uH~So$m(?!S$&Bj ztM!Fm^42SF6ZiMJBCCy;Qv6j#Rv;_L7&oZeBSF#mf@*^1wY_SA z(&xFq++pr7LB%)6O?EJQM0nVCfXlDWxO zkbi;v7=7{@Zh3?0xtxLC5>x<JWBq%cCu~(Go*cneSR;0J;L|(qn6pndy?b+BYn)<6Kg4)v+Y-KR$GD-G+MR{b1()`33Nrc11v9X?gN z`c$>bX;tE{BExBwzu(8aPo`BG>{H2yeJVcIC--z8^Fn=mbF)tct`aN%DDC~!8RbrM zKcMgF`p~Crq;nnji}%kc3zssH#Gvi8itl_s2@~Ee~ zvMm3~E7@CeC3CHP^1kD=JRdr(R%GU?u$8rItsKmmIzLQU+3$DAbw$$3*|Ju-@6ktL zQ8XV4m`5!?zL9rdAa4r2m`u}rlHSz)Tz!?cE zOua-PIv2KLY{H7qGVi-GVI|jhDD`2|%G`CSvgn^W$cq!QH(nyw`M8y{F2e6$q^!Ko zOO+1=YcEmZY}_hZ(<`_CQue;F4+8hny%DP{6t~LvrmTuc+^XCkv8p=bR&_q};K8*# z|GHFHwpR6rE>Q!EY>k9#I-IhaIc~{Xt5(Xw(}ez2mooo#soL@{k^iACzJFroI#X6h zB4u@EU8=5h*y_e_4}OEl-ZDSyf0wfUe+hfw;@03C_h%U9dI-I9sYc$rM59~dR(NHn zBI%SBJ$0#KjC118UaABnr!P@zMcm3<5#{$k;+7-paycRU8s6oYOHuzp=Kn6I-LUUt z;Bpmgj9P_fQ`CRla=(zWig!k>5@cx^>pP*#RG!tTipXWE+|Ry`;APZ5`#|o9TeZ)! z{!w=sb3&K%{ol*k_i;J>|I5`(ye$Kr%uDe7m!`Pot%_T|&db$yHfH(R=oScFu67o> zJMshSEW4b!Pu|_wld^j1c)mdIK-B7ELA;-}mVq59Yj7+5pJQ=r_?^oY+F4+YJaoB6 zA$-E@Cq@7BQ7e{?TX8njB_PTEl+-HfVMW4fc`s=-u83JpTo3j8*{8_@eC?JV)v*{~ z&7O&x6G^MmbD653JR@dR>`GXrtI(VFTKvLgDuJRS{4N@O^@0%nIQ|!?hr(4?=~uYt zKX#Rh-J6;7xJotnb>m)+TMmBS&iAO@>}*x zy|7jP&T-Qwxt4ZPw`Xn>D<3GwXbt6@C9I`u$hwYxs$*B~N>{>L=La z2ev8)nYm_*vZ^-GKiDLvXA{@|7P%k?dv0VCZ+Ee-qdLCgy*)(7)TFmx%vWI1Ltkfy~JD z$^yqU@4rI!?DcX%4)$Eg`+)afp#ZmkC(M7rNATYW``@87#BU2mu2=a(+f;F2yDATE zSJjH`s@}R?HKFaQZQ4%%?grKGy+IAoxZ?)i|IbkKsSLGDXD|nqAHk-A|F5V2UrqnNn*M(^{r_tE|JC&WtLgt&)Bmrg-Br{7uhs1K)@oN1wX?Q{#kYg3^)UMc|e<2Q6{?*LSZcy&J4UDfhFju)j^=GeUe08-NpTAB`S+A@4&~?nO zT*v&})$GaHpmNfvcxVIrQ#bJYG#ga?*ap_(Hn8Bii}mkaa_rqLCuBdgTQ11Ko(p+L zcgsPVIq(xGh63=yAGjt<*YD>2m)+{P>k1`tdl6oOH(>?+T(4Nedz9~deV6wXetH!7%_V*Z15Z`P&V$l?#CX#cExKTQAUG4`b$WF7nn>)xy}SMeOuu@BdNW0f1`kHBkEy?*Ksk7Ty71?||9+-}D~e ztKdC=7HD0;I{@GXAGCoV0?-Z}&Ydq0E4S|{~dfmnr zG`m$fx5)nY^C}N<&F|Z-(mUpLg6n7&&cYmwa!p;u^%g^3!ZrCZSPj4Bxpy7zi#r&z z9Q?hqR_*43->uJcE`{IyoZF0_*kijDg~*ZJ%Z4-;UYM9MWzDXwUxryO~?sE#IEq z@*aCzo&&pi_j?uTI*hd1CA_!ayb-i7~! z)r>*^60U-;bAP=B`9;Fti2MwE0q%l-fID%26!~?y7mkzGlgJ;yPvIFjiCZP|9{m0s zX=QMKLH-8!Gw?ce(Vysr|Hiq0=Uj|(zU{Nz13118J`Il%{v3Y4i2Ww`7G&nL4-_1A z>;Zsm>@LV*%`6x4&WYiCXDGex>NE=HEDm{ck2Xk}0i6;KIPPz^Ou`~J&} zC1+Jnoc8;Fq|pS;9JfI0QTG3V_t>O-`CR{XT>lWj-ag0rZ;0!k@n09R+s(T#RgCqT zxc-rS9QVJ^_5UH)KXQoUVcbLS|6C(wT>lUr;QJ2{g&4%k`2GVVAqBN=-u0k8)zelQ zkd5nUpU7s8TacxP&$D-fZ^zxiH{>4X+i?SYLoUR(O=X;b7bnlu}59flEK`&+0hioN`XM+37G43xbC)Gw6KLn;J zU&!S8e46Xx39f}N!*lQ>_-FVT6v7WUUxEBB*G>!aD91kJe{kG~jKMGAUvR$yobX$Y z^N_zoUV#6APtf1{6l{j;VLQ0-b0zX__$vIL#Q7~`IqtV1e+D(!8^LMpPy)ZhUjwom z+F=N0&(n^P9{e6hb|cRr|BShxe(YBwKLsJ|QMeiVo$!0&x*K@``3UlHcn*FBfA(3@ zM5gdRgd9g+1IMvXBWK_w{P}0m56)tL1Kx&3_zUKtE{DH`D?dy6@E-2}NV#t}?i~M5 zj(-p0{^!GOGj4szAy|pq$KjXQug3i^u>S`8Iruw{|B~Yi*snL|X!Cy#SHj=mem!zK z?w>>62KT|u9Djy$Tj6uqGuQk^S!TVTeUVONHv1%9M`-`g)Bb6fc^SW9{^K_)*hBk= zB5*_T9kl=Slu94^jmjoy|Ho+mD_&&&oAwXYXMe6gz5jXetm@yR{X^pk+CSLesXs;g z-%9)6LHpmuIz6(DWB&p6&66LqZ@%L&-z+)8zIyW0jqKq#h0eojAxd_Wu$5bfkX0O4n+6hV%-(Svq8j10NB&jq;;aNNJ|O%3>I=jhl6J+$%Q z3)~0LsT~&C291XOCH!An>lv|;x{fUxb*fxQ#DIM)sK zdo~>R9;N+5KlT9_n-b*w|>3y zSFTsVn)NE&%6iMg>*d5I($K5!!dO z$=nBi&OYAP6*{n9BZM18_V4F=WiSXs(6e^Eg3t?n&;gy$1>MlLe?5KQ^$I{cw1Nk` z-~+Q+KMNe}J99#Io#cWX?75H^k@BJ7ju%wumWp`c&)vkgHjpJ8m%h)w+7H=-i>%aHU|23}Pt2W5*l!W*lkOc2FAsac@)XDEdaUCAx`uip_muv9}{yOe8*nf?jg9T{d_|N%f!YANz_!PX0J6Y!&Zv1`*M1gGZ8_@iI z1|&fnwBh9}a6&e?AO~_G&+u}lZ5ls$#~hdiPzV(U`)q|BLxp{|64y%GwbFL2v|ZbA z?SLU|FvrJ8WtKQ*iwv`6m~Mk@E5=rA+e)yN*tSw^rMAuZEt~DfEZ2r}jyhx1Yoj*u zf2%fWv#!z>eOg!ZLs_56xCU-8hHcwsY}<`-qcL_EgAZ(fCgWyf++vKK7{=Q+6Uuls zK8;7?&v-MwjHf;DMYs)ahdba)a3|aad*RD)H{1jJ;4AP|xEJ=r*Wf<59}d6+@F090 z9)fScLHG_l0*}ID@K5kKd>0PE_uvV55)Q*t@H9LF&%*cNIrsq_fgi$;;Ky(jegbpZ znYQV$O{ZI|^XPYL)g~kqJwssk_-^8|A-cGWq#!yGi%A{U)FP=IAkIRkkr}+y1TfqUD_i+tO^x z>fd}O)W7wc9R6Fx3ZFu8?Mz2*Xf40 zv$D7AhDAq28Opd;8P_S}2L6qpB#haZhiQAjhdG~NpUALJWY{ORDPtQaw%I4P*(bKy zC$=kNJ14f=C$`&8H!9;soNlz8ZnT|tD1!y#s5Xq&D4wwtugR(ada+GZ=d?G|mb)!VjH+iaz_kuhS!B$S=L$)26cc5S~_ zl=t=<_&2AvXYg-ZwrM+OZ`Af3qMx|^X36>)ZwW6wRwQn-cc5UCRo39nseY2e@yC`lpC+&K_MYmilD&8)bTa2?^_dB)o zT2c9SG3_+YcK!do{{HWG>2vIq*ri?Of0uUaR^7T$x8ABRXpisXO!~eQD3vb*Jw9qVBxwC%W_AgSzvm z?$TZN>aGWL*W~vW`-4iR9#zNW7|r~7o@?YeKD?!)p!-LL!axJUOB=l%P1Ko97Fz5Lr?*t$Qc2XE7Z z-_V2K(bx6$z5MUhL;8lksejPJ`bT|B-_}7LyzcOII(Ypf@SG05l+<_horB-kBYNce zr{RbmIryX=;qXyCa+v>Zuw9Sp(J$)JgL?EyJ$jh`ZGh`f^eBb-m>zpTkA44f{geL5 z{2ziRWd!D7J+8-z^1J#jx_$Sc4(X8jhwtgoQ#$lR9s04prziBp9zB76PvGr|2lT{) zdctId`g;QNF#p?NyPo)go}eV3)RW)SlZW->Q+o30$Mxio^(5tgSop%z;XOM1fDS*X z!{5>2$94G7lRC^lr_Fie;b{(^(_u>PqPa~^>8U+>>Pvd+0X_Agp8B4iI;^Lj)6;tT z0X?H~N6&mo&v5*po_R#iJf~;%eSQDnqXs*!i*lQu({ta~b3f1zbVNVY4-fHg zb4WkZkM-k2{2$U${Y3w)pXzzN=+%opz3A7A0lnCx7yEC6FA1H-GBuW^F^9&S8q3z0 zOJg}2%hgz(#_}~*ps_-Y6=}?^v0{ytXsk?Qheu{MqQHP)`N4vlqctV?6v8tc(mP-DFs>(f}j#s)MtsIei94Qni~SUaRpsjn`|uLF0`YZ_;?P##=Prs&S9Ty&CsvyiMbNjR!Q|uJI0y zcWS&#1q9qD&JNnyA!7l_ppttkFcRCh9a%uZadtG-{$r6U~}v z(L}2zJeu%o!l#KgP53p@p@~jSbZMen6Fr&;YNA&YeVXXk#DFFSH8G@#VNHa@8wwMn znh0wmBHl!ph-o4&Zc;R>Uli*XrTRsye&NwCy7h}b{i0u!ZcP?zvP6@mnk>^~xh5+# zS*gh?O;&5NMw7LgtkYz@CL1)_sL5tcwrH|dlO9caHR;o2K$Gp7?9gPVCc8A*t;rru z1~u8M$v#bz#IPnqnha|)qRFTxW15U>GNH+&CR3Wq)Kr$H9GY@!DqB-7P334RS5tYK z%GXqZrV2Gxq$#(iiZxZDsZvdqX{uaP6`HElRF$TxHC3ajT20kys$NqKnrhZmi>ADq z3TUcbQyrS>)Kr(Ix;53Ksi3BMHPxr7eoYN%YEV-{ni|z~siw;`U9Ra$O;>5UPSXvV zZq#&(rahYWY1*&pfY|>(-Kps=O?PWLD7FDj_iK7U(}S8G()6&VLz)h2I-=>AUdqx- z4!z{mOZj@KKra>Qr6RrL)=Q;&sZ1}G>!m8a)TEbM^io7G#h(&`97Z?{kT5DVlcSkD z&E#vQKr=;}Db`GhW=b_vrkQfh;Gt48Rhp^ROpRu0HB+aVdd)OwrcpCZnrYTdi)LCi zJr(Vg`D@A(6tyhZmN}XPb=#_*{6zD{e zPL%3InNC#bM2$`~>O_-HG>bQKPK@egmQFf!vQ#I_bh2D0D|E72Cu?-FUMCxLvQa0S zbh23|TXfQ^lRlkn(@DQh26VDrC%bgATPJ&TGN_ZiI@za_{W>|IlY=@rq?5xs8PdtH zPDXUf|C&RmhEMs8(QS;NF-CMMiSc?V|F4Z2BW#Q~#_NrGy^MczQ1)6%ub1<04l4LJ zvXXxzt6qy316#E*YK>9%+ORRO)f=OMe_NV(8u>Q|O?tiA$W~)`jN#SmJ|o+f4g3a- zt=$+M{2SSM(Re+}w!iK`*mh&<;@_MfM3*-_dc&(Xe0rlzZ}{~_fd4nzjnQL_pfP%l zF<^{Aj9Hg4a*UB{j67rH8>0Z@j5%?}oH%1noGCR%l}wcFv(7gRD!nn?6Z!m^kWcat$!mxcVC(qHc zPZP@6h{V9Fm(TtnEGkBDv9#5Qb#hxj)!U|bZotBE0GVhB;* z5fcuJ31o{6b`@~dZs1(@YypUUWB83hJlQ55jElkq<;V_TBG0z3L0b@!IWke1 z6ftbi7Y*C#EZfO@!E)Ibm^>AjJQbKc6_{u-{wPdHlP6lQF^I6hj={uEKuVI35%J#dY)rFB z&8BoZQ>UFeovqV3I-RT2c{*LB({7zE*6A{xuGMLePPgfFK&Lx(I;hiqIz6D%LpmMR z>6lI@btY41vUJ9wGcNIT1DJgB##-ET$fmW4-e#oTX2af}ltk@;3KgmAo-97Pl@i?U@=y(}7*#lTw$ z0woYAk4Tc0WT_oIS#EntHZBX#vIs1T?qxCfr!a%dL5Ke2Fk~F&G6R}(Hi}tq$N~pA zAsbwf1G$g~`A`6bPy}u$h7u@+GAM^isDgShaW_FTw1Nk`;DdJPgl_18AoM~X48n-! zT;$6|`YzIQ)j%!O0dcvA%SBu+;&QctAJE4|ysi!)ZdVr&zl-=?#P1@0UQd~G^}_&= zj%x^pA*8t+^v*%=9Q4gWpB(hbL7yD*pF?^%#Fs-{ImDGi967|1LmWB8kwY9g#F0yW za|xeIo^vUm+zKG?x#T;SvdC=#!sQaKhw|;gZx7|#ga01<_mE~!3=*0PqJNNj2~sD+ z)Xi`;5Z5r}I!s){=p4fD2zd_UH;msf{=)bVqf6MhlSY_4g-It&9fnCOOdWLg4Vg{hM; zbrPmd!sIVZorI~Aa0gH)VahN}orEdJFy$DgPQuhlnDPu$F0`GwFy$GhJj0Y{nCl=s z3So#q6wo;w2g)d%gp}qY=pRA<2>M6RKZ5=d^pBu_1pOoEA3^^J`bW?|g8mWokDz}9 z{UhifLH`K)N6M6RKZ5=d z^yii9xd{44(4SY==OXAILH`K)N6BCSqGuF6qv#n$&nS9E(KCvkQS^+WXB0hS z=ov%L7<$IgGlrfq^o*fr3_WA$8AHz)ddAQ*hMqC>jG<=?J!9w@L(dp`#?Ui{o-y={ zp=S&|W9S(}&lvR_qn=~bbBub9QO_~zIfni*^pBx`4EGmf5d z^o*lt96jUc$zx?Mj-GM!jH729J>%#ZN6$EV#?dp5o^kYyqhB2T;^-Ggzc~8E(Jzi( zarBC#R~)_K=oLq=IC{m=D~?`q^opZb9KGV`6-Tc)dd1Nzj$U!}ilbK?z2fK2-$$OH#C&_z~ zx=K=4Npwh}LlPa5=#WH*BswIit0Z-mM3*GGB+(^_E=hDrqDvB8lIW78&XVYpq|TD) zl%&p*=#}J}Orl#7-IC~*M7JcmCDAR3Zb@`YqFWN(lIWI1wO`&579aHF-LcbLHrO+>hZYlIip-T!~Qs|N* z|0(jHBL6A!pCbP$@|q&ADe{^kpDFU0BA+SpnIfMl@|hx^De{;iKPmE)B0nkelOjJU z@{=Mjskq+GEQJcFghBoxN<|(c5_r$c9|Vhaw~_1 ze}(v~M6GK4)D-IN0PzHJ^mcdC%cK(W@UyvD;D9{HhXN>sB5*@7lt3wzK{-?!?mNc^ z!I5ls_@gaabkUYBZ2F_E_m5{6@+&U7EmYO~(bm{=(Z-_nA8id6&zjh)p+@Jj$W0bG z$s!k7hTgnk%+AsB`c7=;K#A$Becg|p(2&^brmIR|Cn zAO$wunEf{A9K`IPtQ^Gb@Sby)opZ*|IVmu6(0$HXb@A*u=g>K4J+8}V@Y9XW2W^IP z&O+nEL4M2uekpqgIdudK=Nz3lVk6&<4#PRh#L;IbM$K?cvJTQCr(Dnj6 zQltPJCe>~`4da6VMC@G7FbBbNPUDp`JqFvsB#VtK8C8RZb58%CJaTZ+ElBq5_)I#+ z`JB^Zr<=E|?6MPizL;Jw-g=i)>OE)lH|b&{ADPPseK_Es{GuGiViMw*h|tE(fhn&4 zmF7ho+AJ3q73&xE$4MWIa616j6AK0~GCv6nu zN^x+dI7aP~xL9>z986kVI*ypZE;mz^%eFjhMoV)oB_SnrHT*G`rrMnja6&e?AO~^{ zc8Vt37d3QJ_0D{|bd3tmf^*JdL?ILbC1f8LpSxJ(&JtWo?4mSrml|w^sNZf5Xv51F z@c+v0#S7Yar)@ba49lJ?u~werGUrM(`7sf2Z0c*-MkzaKr*;$g<8@{-Oq1~0sTxJ9 z>@&t|wc#8&rAQs^%dynpMl*KSns^;4JCupNmeb4SLo;;N*`;U7k_KrS8O4h*mz2pf zHmZkeF*Ys+D#_Vku&)v$sCN_Vvd!eB(O{=x^4MgsOO30=cCf3}=t7flHk(sMZ!XW} z13R|nbEecyiDTmu?pwBz6)qX{GwHM#mcw3j;A*nnOgmhz8Bu%6YVOj#~vq#h%i04cs*vINwY|X(e_K8hbeQN zc{&2~dF`4nXw`gC77T0NU8nhyDnOPNYQC&n^OaE51RlU%Qv<|T%a01zCpF)YtN8$7 z+5?*J?1d4{cMWR3XGrsX#hM>71R<>X5a~0vpJ!~(9zw{60w68M@AHhw=NV(qGrpc@ zJU!3&d7d%zJmckg#>?}Jk>?p7&oefjXZ*_EL(MZroM&t}&)AS%h)@aW#8_^gaoRj% zuX)B<^NgM58Q(y<94o_-d4?48bfxF%^3Kz}ou@lHPd9Qt8H1G4bQ#i_-AeNSP4k#c z(+1MC%ruu;I=c(-!?aG?8qu4s-PNb zfHZOlpM!s()RJZ$I@J+x9p%+P88lFa4W!vfy)|;&NS+(f zvAIy`mSVtPD|u`s->sCrhxj~{hlhN5iN{ObdP&Py3xxHNwyzh6!-sz#b=5}tZKT~s zxwMhzHo|jDNVk!vHm2 z=`N~)J4w2Stn?(64pKZp)Zn9f>0YwhOI7xA+(*^)5l0_k`^a7&arRN%eNjicm~yF!{fK$@ASG&4)-$k0L_Q8BrcW+ExgBvP8mqcoE@X(ndU zOvt2}h)FYnl4b%W%_K>hNs=@ZB55Wv(o9yQV?@hjM4Cy5G!qMHCJfRH#?uVK(+sfF z46xG-q|*$d(+T3^4v}WSn`R)JW&oRJAev_InPw1~PN60P#B_?{O+~c8AYvi2Q43ii zEjY5EQVaBc7U%~o@C;mFB6`7<4MrA0F|8_}+bl5HZ|CUVw9i8hf;GdXC+ zU&|0s`mNYoTcFF3j+e~G>)BPV1rKU_sZK8eyhKEUUTCY-g5RNq09kJ*KnL~?66oSw zH?o_=dnlG3^yncILHq|%l@`1Zq&kD6Kr!@^U@z6!OD1~BL@ya*`DUTd4TR|<9$MW( zKlc7%E%5AL7--kRAc_vAv@k>g4dHi);u|7u=GYg82{Vk6!}uR2{~_`oqS`{_Geo)} z3X3`Sg;COB`DbAi_fhh~-1q`>*$d3+E-)Xv5T&4bmM$Wf_Ji+LT;1;mvXf)R*8N{h@wEau}cpSbez%S;4s256C) zhDDmfA~Oq%%qT1ta!eCgEFvBrOpETQ7K^ii@WrHETnK&`)M5$w=i#$hiaVG7Vi|tQ z$QPIYV%dNe%gJv!=gMofSb>feoT~_Ev63*AxK*}58<6ixbgv}N$|0Z(D#>>x(IB3va6?j>e0D@G34K-hNT<*BsTPFfw5Q73uo#H|y5o#eNZ zbh-%NMVWS!&u+qYQ+KqR#a{gM;lGbE?IZnu;^T?4I6(df$mbyCHHg~~a)^8lQ5Hk! zNjq4i@3}Zk*kSTKjLyTH->1!^s_Oi&_ z?;`WNi_Ft5GIzR|AYaUxE;2W|$Xw_mbDWDj5*Cxx3v+r)%sDM(x}XTkVMt5#!Izl5 zS<32zen>(}OOAXfgdhxRiFM1RY|iD@f)6^N3pk%kJh^c#LUsj`~3ev8?t%CR~$YW)-ma5vcRO1H9gwE7b zE%Db9Z!LP);a*n?#92q2T-!@^-CE-2zEqFB!KtHq?M5jUY7$V#dd1NVmiKY0ZApZh|t4nWEj8(`v=U6BJMj+5Fo96Z4oAV;}`1ip& zaFX??-6X2k9It!}@Zodq$KSvI`tT0?^4G_r|IX$A`|D$6^RJK1_rE^2{>L-*+{hsU3n-{3#|2LEyB-+QS0*nfv#_dbqKJpTLr+V^q%=dTa{JNh{J>hZ6i z`13yT?>+hVUmt#8|M2_!$GQKEAJjk2{c+A8J^tJ-{PTYL{_ww>k1Ky&uC6`vzdo)% zd;B%L{_kHOxBi;>8Q|m2AM?(C-uv*6`K8AnWA4w>a^vHE&*L9I`Rnqu{nv-zyFXt1 z_5bCM_vWwb`#*nucrX3o{pp7vZ$G@h{P@Qo%X`X?fBZRmZ}{Pz;D>jDAAV2%@V@QC z-$UWUkG3D)v3+=l_Tl&F4?o_1{KucGcVi#^xANh=)Q9&_AKp2Acz^TZ{mqAWF(2N+ ze0T>l4zB+ihmU`a`TJkve(f(p&hyFh+4HyOi|4E7o9Dadhv%nf;+c9DJd2(s&$4I5 zv+DVOf2?)ShG)~W<=OV^cy>K|o_)`O=g@QHIrjK-`q!!F%yaI!@LYPXJpNq&b>q49 z%sh9Vx#!;V;L-7~e|erf&z=|0tLK;J&7;w$nDMXw^Zeuax92~e-=6<^{`@uk-)B7O zf71V?|4ILoe*eEdp7i_w_wl6vN&l1nC;dm6DPgvtgzfW1ir>ya$-_L;KN&l1nC;d3`Di)7W^@@Au5{q~G7>WjyJB((l))@udGrzu!-XzXi^C(*LCYN&mC{XZ_Fm zpY=cMf7bu3|5?AELC3THXZ_FmpY{9XH=gxB>wnh&tl#g`<5~Z+{%8Hq`k(bb>wnh& ztp8d6vwpvBj%WSP`k(dtHFP}df7bu3|5^XD{%8Hq`u+Mkp7r}Rc0B8U*8i;kS^u;C zXZ_FmpY=cMf7bu3|5^XD{%8Hq`hD&g&-$PB`-~7q|DxZ|D&s}}i~bk=FZy5f zzvzF_|Dyjz|BL?1(f^|VMgNO_-@A<${V)1o^uOqT z(f^|VMgNQb7yU2#{fsnT^uOqT(f^{~uS4TS|BHS@{BXzv_S0|Em90zt1Sc4rsjU_YJ{#)$jMgVHY&)f`-p7 z<5mBw{#X5eejBg)U-jDzjaU7z`d{_G>VMV$s{d90tNvI0e%>3e`d{_G>i2!=@Ofyw z>VMV$s{d90tNvI0ulis0zv}myX}s$HrQc_#;pfKTTa58b|1bT&^#9WTOaCwZ=5)iH zZkW@JU;2OP|E1rz9^;q(U;2OP|E2$ze&30XU;2OP|E2$zem|FvU;2OP|E2$z{$KjN z%N)P-`xOSv#xMQ9^!t`&ICDJwOfi1x|E2$z{$Kik z>HnqQ&l3`GzrvFX3`GzrvFX3`GzrvFXQ>-t@oef7Ab_|4qMd zqQ;wkKX;8c{XQ3rH~sJW-}S%if7k!6|6TvPexDo0yZ(3m@B00`Hs1BW>wnk(uK!*C zyZ(3m@A}{Mzw3Y3|E~XC|GR#l!G~|ch8gBC!yIOq<6Zx|e&3D_-;RxU{qOpHOE!E< zHs1BW>wnkp^ZR($|E~XC|GR!aI}SfP4&SDYcm41Bea;#0`rq}x>wnkpbN}%3#(3BN zuHQFr!#8i^UBB<`hwtr&&ql*{_roS{*yIhHykV0!Z1RTBOvCs2!}s~aCU5vof7s*= zo4jF@H*E5TP2RA{8~>}{CU4l}4V%1SlQ(?-KYZ^nZ1Tqc>bJ=oe)bvvtKTMX*yIhH zykV0!Z1RRp-tfL)*yIi04i1~VVUss(@`g>`@D5?v`u*n-XdBY}e*yIiGG=@#yu*n-X zdBgjSVUss(@`g>`u*n-XdBY}e*yIhHykV0!Z1RRp-uOTLHhIG)Z`kAwo4n!sn_-hT zZ1RRp-tf)ou*n;~HXVMZ95#8wCU4l}4V%2-tJGnWH+-EsZ1RRp-tcqh@Ey>w$s69c z44b@RlQ+C~8Q!}Lo4oOlew(~ulQ(SghIcW;CU4l}4V%1SlQ(SghE3kE$s69&44b@R zlQ;ZKJ8bfXP2TuN|3CVD-!%Tw|Brq@?+)*DhOOSP)f@ll_kL&i8F=`*ci8F;TfJec zH~!J@Gt%%bY}o1z?|_D_-td+1u+J48J4_m$Az1gtU8@}Tjey$$2 zdc#(4*y;^iz2P0wu+J3}H;XAQmt2cbrJbX4Az8@RjHw{0R4?mX= zTfJecH~f4)d<{K(4Ly8*8@77GR&Utq4O_k8tLb5@H+-))Z1sk%-mujhwtB->Z`kUM zf9to^8{TCNTfJecH*EFBzxCVd4O_iot2cbbJ#6)ct=_QJ8@77GR&Utq4O_k8>+fN! zH*EEWt=_QJ8$LUXf9wCZ{(tNDIb!%6G5)RJhHrRjHvXgEhHv=VeE57ZZ1{!^->~5u z|Iu&5H+-EwZ1{!^-|+ck`1~X>@zTxxmu<08%eZ!`2*z^sX zzG2fhZ2E>x->~T$HhsgUZ}?tk*z^sbp@&W1u<08<9}SznVbeE!=QC{jhE3nF=^HkE z!)K>q(>HAT#&7+<_5apy+c$is8n%7Iwr|+>jo-Sn=`0i-$me8zxCVtjoa^{)Wxpu=yKaRScWI z;dRBZ`5QKW!}qb{w|<+y@ms&|u7+0`!}f3d)^Gnee(SdZ9KO>UzSA1N_1glD-}-;+ z|E=Fk(qR`k>;i{f;IIoEzWW+poDRFdVHY^;0*CLyhSwy+Ym(t};lJs_1gsw-|NXVGC7IDD5ed?p?L?=z`g;IIoEc7ek# zaM%S7yTD-=IP3z4*D=E`aM%S7yTIYq%yE1~g6{~!Ifgu|9_*bKR2hQr=)*c%Re!(neY>MbN4sra^Z-+R% zsv5S4!xnMaA`Y*uhVL%NAN}?NuNj=IA@kcqyL$`P=iw^VRds^WF2q z^V2i&Og#&pMbDCF*|XwV^{jc;JsX})&z5J~v*X$I?0NP*2cAREk>}WR;yLx4dCol- zo=eY_=h}1Qx%JFEcb>WD-t*vj^!&^7*RC;gxFf70)YO+M-Ooo+tq|D^wue&4ah)GpS1`J~^XFQ4>V_2rX(%f5WlZ{3$q z`YrtONxzj}KIymg%P0NTe)*)|;xC`{``$R8^jrSrlm1WoEdcXLzZGCU>G$1UKI!+4 zC7<;B+wtU+e&098_sy{ojPL$pB^WEgcr_VI!FU&wPx`F|W9*Pm`h7nb?`PsW>iCX2 zpY?l;J6(v;NQeKkNUj-}l(DD~#{5^I89A{h#&wo-v>G`>rvc^?%myJI8$1 zZ>H?hlS{k~t1-CT_0VjLIyx7feMnku$cF?oy0Tg=;He--n#n78F`{YGv1 zTmRqsjoM<=mcR9TtsC#JV#XHxwHU7DZ~cGk_q}TV)^DU1yR=wx#riCN>$fV4xmfl6@#t#PB!LMF`bIn#<7fw4NokiVg(f|s2HTg_9-?_v3-g`Qw*A7 z$d$kKTQ9|CDZbl{%~EWZ^0)rK_5ZEkb}6*uP%RfVgH z_Y>i&@WdA{oZs^2U9_})BT>*uTfulm30|EmA1eq*70)&EsL4;CIQJXpT!|EmA1{;&GK z>i??WG$5QGysihU+n0kN3X$rk_{KH~ruAb8Gpg z|C@fZh;JCbJHdR{|6Tug z{onO_N0{&Wy(i3f{onO_UzqRuzw7_5pVf=`NW4SLcl}0WF(t`&{onO}*Z*Drcm3Wm z=DYsy`oHTpH;H$Su^WuhS-gA95B)#%8=%D$B|t%b=>MVLd&&IJ|3m)|{odUNGze&r zANqgj|Dpefe&e-xhnXMxf9UrvGe7hjwB?6>?=|y7{}25?^#9NgE98fMU?Fylu?dLr zTkHa27my$Nf9N-h$q)TM^#9QRL;nx`-dV+VAhrYfq2K$fxD6WPx!4lqhyEY>f9SU> z$WQ&GLw@T2sh@brPyI%Au{p?3{q#eA>Nmg1PyNPs`KkY>ew&2+)c;ezX-bF_QPyKca`KjN0CqMQ7)c;fePyO}``KkY>{-64P>i?<#r~aS%f9n6K|EGQe zBi;wc`{4Z4PhiA*;r!H3VuZwqcXlxjjCaNHt~fvS|J3h2Ui|Ip^Hcv%{q_?fH8RmZ z(NAoI*vLfxL_fI^1H(-8d*7Ui{)zsH{)zsHe(#_&(Ld2Y(Ql8DiT;WHiT;WHiGCx+ zO!RwSor(U5{)zsH{)v7A#!U22^iT9p^iT9p^iT9p^xJuaAPGSddyh=?d-t7*e(%3y z{FsUUiT;WHiT;WHiT;WHiGJ_MGtocMKh;0gKh;0gKh;0gKh}xVCIpNtl^*@x z-)E|Ss(-4Vj0qVNGA3k9ruwJ)r~0S*r~0S*r}_;&Gu1!UKhxK|APJn{R{dR^e^aN(7&L6LH~mO1^o;9jY_ki z->@_b`WN&Ym}Wu$f_}S%Ea+d*zo36X|AKy7g)Hb_(7&L6LH~mO1^xCsSzo_5-I)<}Z)Nec+ zcZsv8e^LLU{zd(Z`fW3^sDDxaqW(qwW_vNV&7%HA{fqh+^)KpQ)Nh2FMg5EVjd8Q6 z-|i`k`WN*t>i4%Zj~!IZ|FWpxST~FM4R*7ne@VaLZcGDXE0rbvOZsi5vZUW`Dogrp zr?RAfN&k}mCH+hKm-H{`U(&y%-_7ML>0i>nq<=~OlKv(AOZu1eFX>;>Z;F^D{Y(0n z^e^eR&&rbiCH+hKO%=1Ge@Xw6e*2Xy>0i>nq<=}jJy(|WFX>;>Z-ksB{Y(0n^e^dO z(!ZqN_9aXDm-H{|U)H~@-)K1|kuizPvi@cLM$EC3iJeTA_1ns1S^u*BW&O+gm-R2} zU)H~@-_}0M`j_=D>$khlvi@cLb~ahoZ+jM_=q&4B*1xRZKsu(FF_O-*etVoO>tEKt ztlw}t%len~FYC9}$+CWXoh<7&rp~hdW&O+gm-R2}x6{b7{$>5k`j_=D>tEKtqJKsI zivAV-EBaUT+r?!?zhQRlR;8r zs()4gs{U2|tNK^<8@|ULF{}EyjjZZl)xWBLRsX7fTg9yEU)8^=e^vjgeiPoT>R;8r zs()4gs{U2|roCC!zovgp|C;_a{cHN|9GDw`q%WY>0i^o zrhiTUn*KHYYx>vp+fHUpzZr4X^snh(({Ie5HT`S)*Yq3qXHEZ_{x$t;`q%WY>0i^o zrhiTUn*KHYYx>vpujyaYzovgp|C;_a{cHNy^snh>bh4(O)5)5CfFNu7*YvOJU)R5` ze_g+sbJq2*>tENuu76#>{b$zouj^mezpj5>|GNHl{ptENuu76#>ooUwfuj^mezpj5>|GNHl{Wf`7*T1gcHZSY? z?ens(e_cNyk#+s+`q%ZZ>tEMzzn69W>-yLAZ|L99PugZf|Azhz{Wh-I(7&O7L;r^U z4gDMXH}r4l-_XCIe?$L<{tf*b`Z=iB2xddSooqJrZ|L99zoCCa|Av11*=*?F(7&O7 zL%%(3HuP`k-_UPcn+^RN`kAF{=-<%4p?^ca-EB7X+up|ZHs?h{tf-Mx!KUasee=drv6R+oBB8PZ|dLFzo~yy|EB&;{hRtX^>6Cm)W4~JQ~##^ zP5qntVU(E1XH);CejDIy>fhAAsh_*brv6R+oBA27Z0g_Czp39|IGg%6^>6Cm)W4~J zQ@_c5HuZ1n-_*aUe^dXa{!RUx`Zx7&>bJ|wrv6R+oBB8PZ|dLFzo~yyzujK8^l$0k z(htyNOaGRBv;J)9-_pOO-@Y$f`nU9N>EF`7rGHDmU30edZ|UFCzoj1u$d-OAAY1yk z^l#}0bF!s>OaGSsE&W^ixAfx!+0wtIe@p+C{w@7m`nU9N>9;Y=mi{gMs6n>$Z|Mhp zvZa4ZKkySf#BAwD5wfL!OaGSsE&W^ixAbr8-_{QXWn2HYerzGz`nUCO>)+PDt$$np zw*GDX+xoZlZ|mRIzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-_~yv9z-JB z`nUCO>)+PDt$$npw*GDX+xoZlZ|mRIzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K& zxAkx9-`2mQe@Fk0{vG{0`gio(^kzr@j{Y6}JNkF@@95vrzoUOgza4va^xOZ2>Bx@$ z9sN7{cl7V*-_gILe@DMvXzbpzqu=&D3`lnL@95vrzoUOg|Bn70{X6=1^zZ23(Z8dA zN55@pcJ%M)-_gILe@Fk0eugnS`gip2=-<)5qkl*Lj{Y6}JNj*vv#WnszYTtN_3!H6 z)xWEMSO2blI3>IKZCSIc-<~zQ`gir)IR_-muKr#9yZU$a@9N*xzpH;&|E~UB{Wj6r z)xWEMSO2d5UH!ZIclGb;-_^gX-*!5HTI{E@tKWt?yZU$a@9N*xzpH;&|E~UB{SZxd z^|J)o)o*i~UH!ZIclF!fhB3&l{$2fh`uFtj>EF}8r+-hsjc)ez+iYh~|DOIm{d@ZN z^zZ54)4!)5*vX#$J^g$7_w?`S-_yURe^39O{yqI*Pxkch>EF}8r+-iXp8h@kd;0hE z@9E#uzo*}3JbU{0^zZ54)6Ym`Pye3&J^g$7_w++U+0$=Z9NXg97RR)+SE zuYX_vzW#mv`}+6w@9W>!zpsB^|Gxfx{dUi>d(OW8ef|6TnUB~(XJ7xm{(b%X`uFv- zAlcUsM`d6CzW#mvd`R~7+fHX+KQxtn{rmd&_3!K7*T1iSUq4V4o+JnQ5A+}CKhS@m z|3E)%71$~V`VaIU=s(bJzn=sB2l@~6ALu{Of1v+BKSUWoD+l@y^dIPlwQ`{UKtI0{ zekBL`0j?bAxAV?{{sa97`fZiN2;@Njf&K&i2l@~6ALu{O4`}8vaM1N{g35A+}CKhS@m|3Lqt{zLtT`VaLV>Oa(fsQ*yE&3O*>AL>8Uf2jXZKWvsm z{fGK((sQUEKFgv0L;W`DIn;ls|4{#-{zLtT`VaNnujf$zq5ebtw(L37f2jXZ|Dpav z{fGK(+;ga(mB^v~L;Z*P?YMKO|4{#-{zLtT`VaLV>Oa(fsGqOMp?)CF-~Ys;|4{#- z{v-XYdXDrT=|9pB&4tIvk^Uq7NBWQSGe$Ymf299N|B?P9{YUzb^dIRz(to7?NI!U& zBmGDEkMtkuKhl4s|49Fl{v-WI`j7M<=|9qcr2j}i&y*wmNBWQSAL&2Rf299N|B?P9 z{YUzb^dIRz(to7?NdJ-kBmGDE*^(UTKhl4s|49Fl{$u^e`j7P=>p#|itp8a5vHoNI z$NG=;a|Suqf2p#|itp8a5vHoNI$NG=;AL~EXf2{vl|FQmK{m1%`^>Yt_lyj{ASpTv9WBtea zkM$qxKh}S&|5*RA{uBM2L{9Xd=s(fVOyorWiT)G)C;CtHa}+tz4?*Wd|B3z+{U`cQ z^q=TI(SM@|HzxxU~(GO$?$j*uW6a6RpPxPPYKhb}p|3p9gkrVwV`cL$q=x2;_s{d5~ zss2;_r}|IzpXxu=f2#jf|EYc+DX03`x}54i)qkp=AI+)$Q~jsSvpBs{d3!Oa%Zz2;2+nf^2V3~bKypXoo-f2RLT|CxT4 zFlYK1*_`P=(|@M_O#hkwGyP}!&-9<^KhuAv|4jdx{xkh&`p@*A=|9tdrvFU;nf^2V zXZp|dpXujxa;E=G|C#rWLbN%P~ z&-HU^IoE%#|6D(Blym*(`p@;VM>*Hev4ug(x&Cwg=laj}pX)!@f3E*r|GEBi{pb46 z^)pLxTp;KA9T>>D{&W53`p@+{I1oNA=lU7BaGznEa<2be|GEAP{TKQjCdh^U3;h@R z9Vy6#{tNvV`Y-ff=)cf^q5nevg?{cX7y2*sU+8D1+jvBG18SIvd~3;h@RFZ5sNztDf7|3d$T{tNvV z`Y-ff=)cf^p`Uflg?`>O7y2*sU+BNo&kct2&ZYiK{g?VL^UXIjm--pnTc7-~ssB>{rT$C(m-;XDyAF{{{g?VL^H(cbqv_`nly?>A%*0t)K79wf<}U*ZQyZ zU+cftf32T!&b9t){nz@h^yuk~N+zt(@P|62dG{%if$`mgn0 z>%Z22t^Zm-qnc~|*ZQyZU+cftf35#o|F!;W{nz@h^yuk~N+ zzt(@P|62dG{%if$`Z>m2>t|wfqu*_V+~~j2f203K|Be0|{Wtn=^xx>e(SM`=M*ofe z8~r!>Z}hv3kQ@Cs`fv2#=)ci_qyI+#jeb5nH~Me%-{`;5f203K|Be0|{Wtn=^t*17 z8~r!>Z}i{jztMlA-~FWA=)ci_qyI+#js6?`H~Me%-{`;5f203K|BZh4w{xTaM*od| zRz0`+Z}s2mztw-M|5pF4{#*UG`fv5$>c7>0tN&L2t^Qm6xB74O-|D~Bf2;pi|E>O8 z{kQsW_225h)qku1R{yR3Tm85CZ}s2mztw-M-$j?)>c7>0tDiZ|t^Qm6xB74O-|D~B zf2;pi|E+%BKDYXB_225h)qks>!OyM!Tm85CZ}s2mpXs0JpXs0JpXs0JpXs0JpXs0J zpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0J zpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0JpXs0J zpXs0JztexG|4#p%{yY75`tS7L>A%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{dfBB z^xx^f(|@P`PXC?$JNA%x|r~gj>o&G!hclz)2 z-|4^8f2aRW|DFCj{dfBB^xx@c+T+$j?)2a3ztexGpL5Th{<(hMJ#+nY{qC7$u79q7 zuHQwW%=Nqfowo7ETtDNS zxqg;EbNz0Xhs}<=<#8b?bN%=F@Acp7zt?}S|6c#S{(JrR`tSAM>vw`bPVnbm|GoZu z{rCFs^|REu*Y5^Q?)Bg6zt``EK<@SP;JMd-um4{Ez5aXs_xkVkyBm{x{rCFs^}E`X zd;RzN@Acp7zt?}S|6c#Se!e{S`tSAM>%Z53um4{Ey?*y+aBo5BeYUyMd4g{SW#d^grl#yCVS-Nng+{s;XJ`XBT^=zq}vp#MStgZ>Bo5BeYUKj?qZ z@19N`^grl-(Ep&{eVsh$f6)J+|3UwQ{s;Z;?!?`lJm`PW|DgXtzuO~u)c>g8Wvo2v zf7I_DNgnk->VMS#sNc<=JnDbc|ET{_|D*m#{g3(|^}FYjNBxibAN9Mml}G)L`rY}- zqy9(zkNO|=Kk9$f|ET{_|D*m#{g3(|^*`!=)c>geQU9ageQ9u75{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1g=e@|VHe*QoFfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1lX^8e-k%m0`EFaKZuzx;ps|MLIk|I7cE|1bYv{=fWx z`Tz3&<^Rk7m;W#SU;e-RfBFCN|K*xQ=|Cj$S|6l&UpY`+q<^Rk7m;W#SU;e-RfBFCN|KR;5)|Cj$S|6l&U{D1lX^8e-k%m0`EFaKZuzx;ps|MLIk|I7cE|1bYv{=fWx`Tz3& z<^Rk7m;W#SU;e-RfBFCN|KI^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(E zX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrGI@3SXKhrI^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todM{b{yY75`tS5R15jrG z>I^`g0qC9nJNA%x|r~gj>T>o7ET>o7ET>o7E zT>o7ET>o7ET>o7ET>o7ET>o7ET>o7ET>o7ET>o7ET>o7ET>o7ET>o7ET>o7ET>o7E zT>o7ET>o7ET>o7ET>o7ET>o6ZGXQl4pmY6m{d4_u{muZ?8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0jM(obq1jK`tSAM>%Z53um4{Ez5aXs_xkVkI|EQ>0D7I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS_xkVk z-|N5Ef3N>u|GoZu{rCF+zbfuzH?pjNqA1@Eq6Wo#13(A{ATrZUHdN3QBM{<)(Am=c z1eSqpvmW{%`XBlq`XBlq`XBlq`XBlq`XBlq`XBlq`XBlq`XBlq z`XBlq`XBlq`XBl+0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b z127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A z01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j z0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3 zz%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8e zFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~< z3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk| z!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0 zFaW~<3z%T&*{ljVN(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifp-|D9Uj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H-hMx60HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQt5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rLPAL^$8L<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT{YXCzAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLTeXXAc5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W^b`FwfM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks10Z{g?hr z|D~S>5Dg$2Ks10Z{g?hr|E2%Zf9b#UU-~com;OutrT@}@>A&<}`Y-*L{!9O*|I&Zy zzw}@FFa4MPOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#UU-~com;OutrT@}@>A&<}`fvTW z{#*a8|JHx&zxChxZ~eFaTmP;9)_?22_22q${kQ&G|E>Slf9t>X-}-O;xBgrIt^d}4 z>%aBi`e^{s0HOgz1BeC?4ImmoG=OLT(Ez&j-}-3)(Ey?WL<8v7f9t>X-}-O;xBgrI zt^d}4>%aBi`fvTW{#*a8p9T;OAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$=x6$A0MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2KtI<{1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4dDFLLZRW;Z^Q2l)_DwnFqnNY zTn5+ihQGZHe>Zq(H@tn!na}f^AOF(7m$w_g`oA0Qc&z_?)c+pye|P@){X2j9{+(~# z`RXq|_3a`5eZ!r%NBZxbw@3Qwuit&jzixc@Dc{}kDeo+Cop-K(4Ib;AtI6}u%hGx0 z1payFc<_1WklT6ZG{|}HeZYC|$?v@Pmw(A)={(uGa-Qt}IL{`m&$F4x^K5G7JiAsn&;Iz%GZWxE z+lhak?F&E8Hh36(@fXYM=fzs3;m-yugNC%9fPN_rC2p#yu9z4ZHXKe(1dX-P?12-{aSN^MCc~n8#0+KB1ct_aB-^ zx%fz{`9!PzRIB@3EB~f_Sd{;zeOQG5K&q@4>mC;QNz+FOkB$4)p8Z>Y^*6cOe{wI7 zAl+IKqpAtfYasZ&R(hgEj{E`om+0vt2Bm-f(7#&g%SA%{BEKCk`X=}gN>aFa3SnVY zis+X?7OV&T0{o$ca23e(mvpjxtxi@wCi;wBI?y?jB65^U>AAJA`;BX_1gaq_Sr$r{76qy_n`N>(4qydq~FNh=$iTrd>WXd(# zIV>{umdKMgL{dgXrrj1v9Tk~=Ph{JY@sdkEw?CI4fBnrk+3{4OysA%-ou4Pl-+YrO zyI+r!UC+cx?l*Drx1Yz!p0)Av+O&AtJMky-tJmUX9p!jEZnEraoh-lpda~p{HBt7z zFi{FVog{zv_(VDI=}+a2Zzsxj()i7{Kb3>XeNa@Wk%~CcuheTLJVErUo)G!7k3?3l z6?y&@ku`6}$j^-$S@%?otbI)*>zBmJpMMi8nXkvn3(v>OhHs+eFM?6hPQAYPbhKoB z@R(V|G#8SOlHFP< zUab|wBCY7bv_~gp**Y;SrCoMtOD9$1MXcQWH2#Ot{}@@ZKquAc z!Oumus5s%&lcL{1-RO4^S3Gg1i68aufF+{O=%jQ$;@k?xVY@qu>Wo z5+Yo1oJM+aTghV?_k?AxTzzx4441Ff{!82vtr>q)wMM*1C+G(?)D3BTjDtRW+~<0Guao5;^*@ms(yJugIr`B*UHvKd{#2Gw z=jZ!p$Uh&SA{T-`lTSZSlZ#*dOu9B|B!g=$L$Pv-`~G|X&t&kE+4A{ke;}8?n=K1f z-fqQ8De(ori;`Km=Mv6zHb$J}qni8rk#q6iN0?>EO0HL_w!Rl52}u1wj2KSFjMYad zDq4y+QpWsP(cAIwjghiNv0|DNE5?-~)x^<)-K`>?@*A;I5sH;M{HjjI%3;!KK)*#_ zYRQR_PWq%7cN>^j#>jf^*#dgf%tCGldtQv>BL9Ir2EU_UJgt+{=w@vE6XfAc5f}Hj z&`-R`uJjl&5pP+oNNWmtst`Gee{maaGazEu#7YQvHGQ-Am`2VKM<_0uJ`ybk{7j`9 zDfemQAZebX9=gcuVfx1N$& zUkT+qOh35Ey~A*UcnpPv$-|yz5JxBWU#OGM@HbD=$sOv`Fi#_0qs$kKDfX2*xrv{d zF-bQaTSmV_JT18O)Po^Kqzk=#o=81qXsN}Ixb>u8f;5ldCQo*Q$bIhX8n2TI>cN;! z`q-h7_EK|}e!Lp%M0C@rpHE{Y$T&Lzy}E*MFT_YFivIpOazFC9SgD_vD8W|xjxAQo zXiHOeBK-Z$yfQFM5*B3pKy;y-Ipfv zqG|`~>o1X0Deq*M3$tJ$OaT2H$^;L%XEJ#-9$;=m8ZzlSO^iSIn@7pt6554%c9yzE zeT3fB(wE|8HrHP!{?@aJVp=NlKdAePrbLOszj~5L6W50DJC6IW(e=HF*ma^@Cf_AT z66GHLb5*+ZvxUf+xR)X4Aq@@0Ig|Qusrn)PuSl~yj&V0mnnSedX^||#Rv8lIa7Cgh zyIshAtMFTa>^jT6#JdIedN9#W+mZXp$5O7FqshxX+N4z{xhf2Phu6{0x3MqWzaiXe z{8T?E!F>>dHu~8oB3CJsX}8EQ(%{1eskeVse!Uvm$UW7ThyBUSJ)3ZslMgF;eYHpq z?i++vwm*P=7EZx=C|ONh#9;uvMI)ua)%b(Q5#kmS89O#+LO^64Hqie=nSZU_avo^K=E{L2#Pt%d8Jh z)27%>a5?Nke_ksknIh*1ZyKcS=8-mKG|-QVNh_Z;FLUi8468WlljYP$dH7)-)fyvc zB%T*FVqUEi<5I>n!rf6}zo6aLYGoh!|C+i#NL-cZcIrTt@jhV;_?a)z*7zTUV0n}n z@`)Ra>Y9uAg?ay%TG7#e-@?y`oxF{lfO|3oKTVL})09V|CG>K%L`ds@LGZl<<~i;U z(wCPLhIK>eIP$YZ3BB>qFSLX(*JyVrfnvDCy+)*31FRwb3JY_{AoX`PR#xEN1RGSk z&uZj5!s>^puNxviSL5Xfr|K|P_7ZL+)z3*bxgukQb!NPP}rchGM% zAszKZuyC#(aCHLOplrxsOyWI)MDtPkuO@t_A^BtNY2 z478j6EOm)geHO2k(J2K&5RZJD?2)av?pog9sCy) zUm1SogrAK*A6jS|eLU&W2TTj7&vmrfl!tTk3a(G$zN3VcXaa+%Jev6Q!niSEv4)g317T}b|IWf!`yF+krwXxN`;vt;z4(S`4-`y zBRp%N0PZtj7*GF4j`@G8l~%@pWb&obP~&Ov71Dh*M!qGkRJcUg>Ny%R&%L2kxq|yY z@Q)y`t9U*pJb5)x=lVgBKj2y#sJW+PHGLMT=Ag11johWKjK~Nw#GW$v4gEzkO)3s( zB$jcqDw{cGn?@dR-7qv=3`wcdNE(KmR5@%&71PR8%1^uPVs6Mx6@w*Jo+7Q1Hm(!) z@P+9z7q`ik%G|{DV?aHX_;8n^SER~a;#2Fa-%{oZu0O!7=G5_&FP5=eA5VLYvX4hM z52lLwz;rRwc8OfKFG`h`ZPTTN^giT%BVpT-X8iw%bnV1%%%iPmrb<`(bm=0^_qcwY z@+5J89|XH;@6KtGg?|_2*^F$&Kb7()!+e+x`d-Qp#WSf#)@(-VWikF`71$Deq8uBp zxS^3N#9cW`yb#mU#6VcRq6rpISd{!v9au*C78)`ElP?^bKIT&3b+i)VjVJyTz}7_z&O1=2_pH zdhthJ&V9jGIVZsG4-z zE9`|5+Ce{)Ym{FP2J{v9SE+g+Uk2vaUz4U`l=!IkM*Qo*1Nxa9dqXwHIg zw{D|;;;|p}PZ-vwvtF_9Ch$D!HyxYKp^aIKEBo)^J{y?O%dzbWmqy<5X{2gSlJpbb z4AuT`V85SpPC$Q9Yp(0$rFfM4r-=L)uAAsLvsAcev5RM-$L}Xy2`oY|7Bh zdcie^{xM!7(Zv04F{1x>I1#N8XSzoEiNiN4;vZx$KpZ_9&cC1!mXpRRD2o$uHt6I; zg-+arQTu_{@ZSqbr12&3U56!Ht0=|RzKoSXlTOBw-v3bLp;AL%#?g1SA3FrdrMb4LXS;@3HV0y!SNcwzNkx`?Jtt`hIJSlVx0tBo?^{ zJ|nHJT9LWj>qMS_W#}33b65$_LOx-e$!{96p8U_6$vIoLPS$ZP6@4n~oIzc@CUSH~ zyu2e?IZit_STyo}1>-clcT6M4kii+uyK4Xc9P(u(l@{VGHrUTOno3B+o-{sD_7Yzo z_moD96rgVfe<^)q4rc*riIR)EcxIxM&JpP*ol?e%efT-q2cIZSl&-TH>E!Id!`|3U z-*G`V7hN=e=%*EM;&Fr9c*qd@;>r6$h*jHuGhrVKhPh5aUf1g zZzRZ38pkx0AYS~A@f_h4@*H?z73l_$jr%zZLsmaQ!bjLE84_6kCrIsK`b0&7)IHCf zLOrzg2!})1`7PE=gx^5$o5<45iBeL*nFNh$Se?MWCP588A z4P!~CMt(t^ZRgrnXj3}z<$|8E+dx|T&<`p%>8o?IV*1EV+#%-Py~qM6f^ydQR^k}j zpY^l;WnYj@d}XWy%v{%F!-i4nS;b2}4eQ7&HXr(eIzU+zEk_O&@TGWQR0|5 zp7Ye7O7Nuw_WAUS$Kx62AESS8j>X0(cr_VtFPR{rZ^w=GuPx+3zmYi=uFoJ}Z_|Hh zs|b4VbK?1$HM#N^{7Ey!bEObx3ZY*T|1Q#gjrAqhU&Ypr9%F3zXqo7-cf%lkfxL!Z zCEOc?dlNr9eHlu3(1s1@8}WmqVa6ra`f6;bIn6$v`)?9w`OJs?@s3KLHJExv6Y3WF zJ^5wL&`8~sj^{iYd7m&k7={Tj8Ky!a#DbGLOGc(bum^i#?V%=Ab>^b_|1A7fT&47b zSi(9~y)gG1nBY{o)xJ0p|I*EzwIUx7o=O=jXN|IV+QxW;Gb;45YwS2rXGI!Cx*H!-eHi(~u#-wS-l5$k?96^`pHR@?TZCo45~(q5Q~l*}^q7$Bm6+ zGf8V;h163v{VnR@Hs>)Gk@dKZZIqR@v?^N`?(%yXQhIxZY(d`urKeZ0F5~({_*TsW zv@?An)XzN6S%2`Vn(wifT}YnAs=a#X4g5Lt4{`P%{0jLA>)*^3QZh=r@$A%aLo1tz zdn@Rp)1|bKeF6DhNd2j2e)E~a49w|z<_J}P#UUhhRIKJ0`k8(+FxRX0uOyDK{IkC( zrd;YwaP0aV^gQYpTUO(Ny1$07YD`yss|vji3P^YC8O@jJl!Ni+EAELSe5zEMnH zDY>>n5(%3Osj!%P=8(sO$W_WszSkiC7(YGbG_0hoxc741gv>?i)9|0OLTakHRw(i` zVUJ?}!Ov++&VBX~rY4)bseVfSytHWvekzQUIL5~PriXb~{n$wRdGS~6cnVog*^4i5 z?;v49kcLhjD0@A}bq@sK4AgYd|B2%~`k0+1JqkaM@aZB&D!;0q5TBa=N(|_qrpvM! z*zXKwzl@cv>+1-&h;lR{D>w%kR`C&snpeikv4>|`DoyUICe63$Yc)g5T#u+QH{@YDq^H84Bivo( zPyJm)fAA=t%jjJvqotGi^c4CDq-!u*x)@`f@SR)jZPT6kW zUEo?NbH3`|87kaY)a&vUQcn5Q{8NQrIrXs;SqQ5C)+5#U)`TnzW5bj`Q(Yq;ElOge z<+NG(9QqaGPnPmSs(jkfOXsmC`GEYqMt%m!59Ke`u*P7`5`2}s1hw+#kKzl)k)Ltc z>LlVi&h<|2>t_7<82J%W|FB$6!mDQ!!S@&^Uc`R}{wj?)HEzHg5JkB*i<}$@@)jxpgdvgP?`*@dj650`_QN6OQvuN~a?I_26=de0!)m;N>3-T^fa?LY=OYv;8V z(k4HQMP=Nl_Gt#J1IOd{qZ01AsJWzVO@?T5**vMa^cS7Aq z(ef^HO6?QGPy5%r#GaTm%gNK*-z}Fi>gTtu(PCUJ@+)=!XIgo`o3J?|@7bcI^*(GkzSL|7*MTzSv{MpAj zk(CEXzmR9^`=g|czUKds{YE~1DLe<{p1v1oclHWB-_u80iHmyt2mIBuDU(_QF40OZ z?qHNwD#=q7*9&l0O(CC*6Td+}!rr}{IN#$r_OYjRQj6dF%9dWxQXjmeZi6bC!(y&M5_%@Q`dKdl4`Jz^82|rE6$#{5` zdU4Y4)%^Mtem`7)8hr-*3{;wBhe;nBQfbv<_wQnJ&v32#E9#XvReqPKu%BtA`&;s) z^74UJo_*wZjQc%Y<9tq*~j=aGbD^arPPkA;ETVXrwga+y=a(I^< zZ_-FE?!EB4W9-q91#l3GprwK}3ep5^>sX^BZEdXakTp;T&NTWivJuSSVZY~I%K6a( z=1$yJ=)K1}U=r)~DV$&6cB*jT1$`%TPdW1udI|HC@t2I1FGtBK+(w><>xY<6=dnIv z9P-n*&f$L^OxRM{@hEXJ_g=((3H+4TKP_6^tO;C$I=PIWmpQoaHqRDcx~MT;<=l`p zil1vf;_U}N9p8fOHplDa3gL}(#9`!hFms=cdE5>igmFy4{oz!t{fj5H?WAi> zh=1*qa+C0WPJp|wKB;wmmZEh(GhG{@%_8`RKj6G`$&=b)#;iW!JoMFRTK~7xv;mo@ zt&X~=dHc)%t-1O<&sWD~XzvjIF5HI);I~Z^9oz9(m;i||8IoZt7&`Y*iJZ?(`-K#L zULd72-WTKBeld+ZkB$7RRJ^uWQ-1YnP304ZG;e)AS##v7MRWL@ztU8Fe^c}KFI8ze z2a=^rvrA4K$>j_-SGw2bimP_7q!Rbl*EvH*&W5=#AN1IfI!{vexe)gGWNBRxBW=*0 z86(!!F=9j7*|T>Xi4jLB=lOc<{0W_`Ae~jP2G+v{$bufu@q6=QrLQnnyvTmsKF$^V zJR1l=XIl)<=VIimw@9|taN}KY|u&?ZSyqJ zjvZOkIZr~`ad#jc*tLm!DxnH$pbm z(KX8Wa+0|Lx?R|&4;w{#xUZi08o>-s#)B553+V_kM=X%%xS+EJVz*g7}k@sR{Co)&* z<@D(at?WhL2Y&sNQh;>h=YoUi-W{|Z6rqF2W_a3|C+ndak8P@2ynnLwfHK=MCbnCJhK2NtMoQ-Vex`E+>%A(NyUs zoQv>oq-RLIcaSRAiRUIn;0`qVQpEyy(XGcAJBh2ToH_jkY>jev%p{MhE(~m<*D)4A z@n-thvFUQ3dt4RM#l4aDE6{c6*aP&Vn2)i=3EZCulOY+VLMqIH9{N)6o`-$R%b3uQ+lO2Au>k$M)57>;XZ%4reT+Zo zF5-9B()V&0KL%1{Hu21b`LGb0Q&{uR{)^GAyS383O)JaLSAb(P>mdB>V9V3WD*V>K zde{J2unD%nR``AU-;O&M_JWDEd)doTVtgS=_Ter7V^p*Z&)|J)^l&RSgNz(~Qc92} zsDvu0fjX!MXOm7Ek!ENCE4T|;e}DtsyP5UJM)nNo9tbc__`wJLj5VzrwbDkJ+9{7J ztIb8(e3TLCu(KC$WnNSF!6{Jhj-Ns5k?MIx5BA&(edB3g_;Gwj4?yPt&v=J87e+dF zQ~!*|E^woJw(u?kN%g0Lu88J>3g}F&9cW{?| z6(uG%XLr$?3u!;Nk8V|Eql^#GbsUd4M(JB%2ir>W#5{S4Yq4CL0EytPU_3e!Ba_j+ zj1zshlhLO_V1)4qw~uk8ADN2ZESL>*VLmK`#Sk&3$TDP@b>Rx+@b^)?j7GhsJY2u5 z!msrM^2pffh5ixN z1)O2{k%1emKQb9hvl&x!7+aCuxLpevbHM{<-euTIJlkO>#^!Bov%i))Wc+VU zq5NqN*ClysfR`|Lkkr5vL^51+#T6-i~d(i|2sneL-sdd|42XX z0J4+#y5cE6_3FGv|D)YqNH=%}uzyRmSc%gCPVho=Q?yuI(Gozno{g6Fd(l$H!+z5= zY-%R9u@oCwfUR7JmQ(7!+gdq;JO^%`v!6#^1Xq~#HgmZ5fKD#qz6|~aItiq+_QxiD z(3*z*EuxR6V}D57JmRiq{s#wK;htf*4mZKZ`aOcY19ze4n2z-qHf|xEJ;a+&yx6`U z8Nl6nTPI!jc(>ssV*qq_F*f)(2XL|fY}3ho;&=eM71#hwfJB%Kk=7^0&x_gNqrAI^ zdw3f2Z9M%qkNyi&@iUL;Bo#RejO?f9BAc-{%Qc;xxmo{MxH zqkYl$Dt~NyAF_|W;LW6eWYa%#=pV?yI@+Iu*x6h!fP+v3&G=ce6Ql&)YDi%3kRT@X zN^q1WNXM}R+9*M6$g%I7##0~IbQRYlPmGruWF6GQ@bY+RM226A6Em`mvm#SJXVU0a zXeFLD;%PsdAXW}iZ5)u=Pm%^WMiRuxa|;LIoZta31mF~$fu56z(hGgyg?{jX9|9wZ z(#d-VU8@u11P97aWOq)2xG0|+JkYw4^f!|};j9g$PxGus>uVg#BG${(}?Xgzk;lAGpB-^NGWGLn{lBeti_@b6Q!9z6{33 zk4akb!V280z+X+g(67;o57ywf9?a~4Hy|y{*;&X<(0o8ETaa79N;%uXo=F+OK^{Ax zb%62@acv&^tu*Gubmm562W|&)JNNE{T-Xcy;2;!13G|dQ{#G#lHeg@1w0{@v5B>w} zKcJKT&~=UVSD5wJJ;vV~jK4$J=K$lckGcOFY>Rk?A5V}<*5mG>etM~kv3Z_6_oM$fzGhSg`W#`=SDnti-4N<3L$G;&V*Hs1rqQ~o5*LGZI8+o5b6^~v*IM>vCPfX-Xk{|NRE&ePaGw&rqa z#ofibt58oIjbK(>9b3D zF9yF=5R3h-L54ev4Oq<69mxx;LAS5TI3MlsuW5k{#8%|bE$kx0W7~ zc4$O5g9E(-?6rr)25S6qE;#h#_~X7^Bu1V~v=G)cle9xc;=NGxa2)+ioy!$5E*G&* zC}N#ZB(2wqq-~@~+DD7TinQG-68j?3NIxVFq*bK}PVj&i0&oh>K+oVI=?#+(c%lCq zWxGKcj~$W#bY>pnyX-@90-Vr2a!6eF4vCvE9x#)ibHsBVF2W^fb{!JSjv~2?Zmp&K z6^G;s`Y<@iR|n;_QwKJ|4Zrrpj$;>feDZZW9N9tv3=Ym_P>*`|0&r2Oza=}2bhQ|IOGxbZ27!3$TCifS>avb6(-tGYB>RHxM$cC!`2bdepP#>Wj@%Ysd?1-8O)P$%1w z>Kt_^GQz$o7r7VqK>-|uA_%`;B%74|MPVbjE0rJnjVk2uOBx;)GM+(k8aDjpA*sQ? z4kA|%Nj=i3;aMfJ>?^*xdzvz!yB9IWM01vmuFgdJ!n~J>?tlREmj43hAH>;@e=>e1 zaDoS#G*R-wB;uJO^1IVf(#m|*#vIjtgZXikIp-F0&j54K5c4DRXD9P$S9UDtYn+Ad z;e0=z^Zg{w_ft6ENA}F)`M@IP;RVdY@yxqZn0LWHo_QG5xBgzz4ZtZl1I_u&f2;Xj z2y|-~`|qY$sXDty&f|96X8&Ey{`(mFZ~SYA^5y8s{ZbctUEZCyPpWfv$=lhxq-Jox zypy?G8m_$|$5ReUBYFF6?;CO~rAXe(eqHLD-jw(6y(z`pUXzm2-Ext*FG1O)d@+HM zhnl5FcJb}*eyKeDYq`v|EAaP2hoovjp&U-nlOqjz^3Ie(sqy_rhPf7bd%V0$-PCR5 z{koh3Qak>Dyq^~>^}cAi&h?ur41{?f;`jm1oB3Uirc&$`KQk}w{*L#*Et+wBx6Ah} zZR4fcH(uH-Nz%%*8Y{GqCW*awlGwT?iG$}l9ZeIZYjm1)UP}?@jVW?M^MtrGKa=jl zDPrUqq#wqyPn#V+tq2j6cww&-hcw_=EK9!LKVyj6B2j^I^go;#&{y3sK^-M9BtpZzzg6BucW- zH$kA6xVuPW5oy5|{LCX!vK6@E2$;9SHrccjb3 zGbLmrsAo+6ui1B_d#jlXjxpwMWbChC4nX=hGydO-m$uP(X&;Ff>mK%n`Roe|+5aLP zxOZ}I3s}JcPVj&i0?;$U{&$o;D$=`>{Wtj5vH#9v{|%izF9^r#YMt@VgEDi^$(n@`oPayNuRs_WwDgQOiErPMj{{_K^nC zfqR%ZuER};z#X^?_u&Ejc>aq$d!c_fbqxL?>UeBoj zlmyYOW{!os*yIFcb2{aRM0D#m@1KPjvSaj$??VC4M-H9ibKo+#^ahp)zqk-506lfL>m=dAc;!6w)OTVXpyK1!6G z$XwV9`=9_0f^&~fijeB;-JQ?7Rp=(D1a$`Qg?{?KkN)q!!TcNM8$zztKpoUWBbcED ztk9ar`uu=e2k?9lY{#%0=sIzzL&}Q-fGVGR&u_uj`XoD5A@9Bd;AC;coLITcc27Pv>5|6wk=+LQEw@oJRJ#~o;*-F@^?=!e#5=J%9` zdra%RhkI&!I`jJ?=Jz@5QOQR!(MuWBTU{# zS%ZKLvhZ_~zfH(3;J(FqK5{#_$Zy{f<~P<0J8|cNKc96+A?pwW^Bc02e6*3TcJgHf z8@e4j4p9F57}?7``=9_0LJ^dJ2`ZsyfbtJfe)8odKmFGz|1HXoRA(if@sxiO^+cuH?VD_S_7!DMU63Cne0Kp$=>9|Jm&s1)&NVH`zNsmKySrf+sJ1-Sg(=x zD0xWY{4a&NiRX9e)?jat2$LZhra~&rf*#h{z2m88!h5-|e*yIhWBdOR#((C7ANT*X zP4{YSGn4&4(nDL6eUQTY9Xx|2t_bf;&PC3Lh06VLj2K^=CSkr$Sd4qPlk+H~+8?Y? z?&s-KQ`o1VuYvVY`t{Syt)#(vU;}RNY4SG8HxTHXAaItvog|OQe!`gtQzRDuEwB}~ zLvzw}v5ch1PIPND&!%}EpNqa19OHTZt>GCp(ndJ-&aVN~zHlGc3*aD_cve^TCi4M$ ziE=|FR6z~YK|QouQl-t#Gj^nvXYMwn9d`%Pk;Ly35XKBGUrS=J6W=Z~~mGdH%nW=l_{J|4--nKffp7gm#N;dcY(aQ?oVdSHH4Hr$MUnQ&LYg8svM4a2yN zSLs*0^WX?kcVX&qfcGD0gSKnDqjQV%XQYj`vLiciJCObl_eNe{<}vGYIN`fb!Z zsQ#BlxGi9mDAqQt$%oG5`)bOS$r(THEa+~@f5&8C<1pln({|W3Az_M?gJBgB?R)ZUk&y<9{ZiiyW04fn|MBjtb=CyO+B&^ zEOz$wdDs=0ar>WQ-M*T+eF1j713S*deh08$wcibMMnxF){*MEx-v4nT)mY>~j=g`d zi}gQajTd(SPJ#1?PR=0Dfl(hP=aFt7djJFPC8K-U7xcj;^ve)H_k$1mp>-wpPd?ho zmwH#mwvK!ivi6{yj$XC?h?gteHw@Ro_*$IYL`L8a^kC1u*mWPMcm4XYcV80qKZ*K> z&a<3B`z)d($KKB0xd;j;?hw=MDVSZoWE^*$62cY|NYzihoB20#_DSw1- zG9q7L8@ScntHwZcDevqfEfrcx#eWtwyV#dJ$9o4K$BC8lsWRHZ1`bed(RzgaZ}p@7 zZyS3`_NR9CryWQKd+%i8nGJJcJ}iXAunbl}Pd?Az3wi#I^zwTG{ioSqhuB{u1EcK! ziL;Bn^a*g5vj1h@>tdho1`jyd|E?mQHLxBwfO{TcAPe1_$JxT31lfeX1p>9iy_x6V z8+rcCK6opBX58D6JE58VaV~N%SkT+&u&-Z~Ap39^KnLkLE+mRAjdCXO{M$$QFAxr_ za~S`ZGX6scx&wZH|9p`9i=YJj`>_e+kMG|YXn*K0rTvf4{@9i}XYbsM{lSS_%GR*I zQS1--!}~Xt#G#(qsds*>(4FfNqy|~Ggz<*&5d#Z|i+uQI5;xLI-0EGQ7WzjW;mlJK zq#oI*bZCL*d%XX9oAm^`6`OAd2f7m++bG)s%9ux4kskaaU(j~Q0Gxs|a1PGHMYsf) z;R*~ZzDkhm$guh-jeYZv`{(Ca@8chVJK$GqKsEn9&wDnwAAoK>eHtb}B1C?smC49t zmjX+8W`!!=C`DgYhXQW0Cx)Z4_WBm@!0=Tooqti z0zbb0caOEwB<$aS{bMuj4cLDdwlRQ>3}G8cM~Lw$#QDH@#wX6hw{rh>$OS*&yzNEy z(6+tMx0|+se(KZ*e)It2K__dAt|QogDfYjI_UGD<@Ba~|YzFl+jxm_H3g93VK?x{3 zSLF}C%sT?OhpF#MWEIpv9hkFuhKg)dI<&yp`Njqx{6}A+OHy ztc{2#8Ky!ixHXg?|5@nXB+3u7(dR;dHuC3D{%Fb%^YJrh@(vmDF80;Dn!O_WGO*y^ zPFd9&Yz6LBFm|q!PB~XnPB5@XR^Rv-%lT%OJ#$&-axw8eke}~>))RgMWWgrb0$X7_ z?1Wqx{)RIWWcV5Os>{0i9}(4=333JE0rh1@0Wq{}>y3_Hh2kS!5r0 zIUnrDU!C2Ly@xo5Gey#KfD^nBenlsdZ}}ecd7fY5J_Wk2eg%f1k9CF@`oRZ&2*7o&-Gm6-fxB=Y9zf(f_8+^J zOL#f&$Yc@^=Q6R#36Kbr!JMR%WaLyxg;~(NR40}NJpV+uGKRH-|J4kck6Vpt9iZNE zut94|hP0(+Nc+4Du_A5h8Dd9v;C3KAo)Bkp4vvL1Z{1a1*G_@E!M@H2-qWD{}=G^1}tZU@V4$~8o}`0a?DxO1VSjdFnv z>`)b*CQiz^muvgLJtsp7kOx8Kv#*eHtAUjZL!Q!XE}v2cm}cE?g_xV=P< zZ+uqjH!qU+H!hN6x1W*s;ulEWjs^1W$!DZ?^E^44IZtZBPs=-lbEJCn9C`cpQ_^jj zBCeq)#6;Z2aqQQtlcY6`L!{b9#8EjpWzWK7Bo+KTEJTqK6QCfG5lQvD9v=8#UXMDW8 zwcwAWvT31I5qAyzeJ}c?Kas;Je=J8b|Cr~_|3ymL=F)!{BVLJ;vZncBO8O%)PWeMA zt^PxKe_Mvs(+*{yFpqtqlX~I^znme*E3hHSkau?fiPRieERFa_p2(1QGnYwS?+TvT zE|c1vKb5wIbZM%Rj9`Hi%=5*=V zn9j48bm`lX&a;61Nlo6BX&qz3I}Omo6UMZlrnUatRR6DL4b? zpn2YMvA}tB>&oTKjmzaC`XzAmF6Z2GIp>(m8H<+7W&Eyyvyk#4)pw8AkvBno2N^-$ z0q<$XAn^4v2Dvz&q)pXx#JgO(4-X*nGGnot3pqS^%KL^`Wr4XlSB-ap)c?qxr%-apJj-vj~n)PDSY zF5-eM_?fry9vpHzG^6iC=7NR1x95=WFz@o?-Ul7%4)*r8+dQ9!)-KBDBV34ZLzEAx zo-K5YF#ofU)3Z)8um&tX&6@62_EY?(qDlBhg}92K=K|}$LDqj^)_>4n&H9gd%U{d- zuY&oXHD6ag>;FR5|Hy9KF4pF5@PL!~x`22}zyy`xE@b}4zY5*kMci%t#tV8KsCW1L zxP48eg{;TVtcj6EC83$MObgNq7W~J~6-QYA<95Q>xndq`zgz49*oTClA|Ln<^Ubyw z8Guu82F}5GxCocvGN^ao)iZQ__w^Sm;cL(mmeRu%64fI)<0RCI_ zqfGk31^UD_`U+<}-jnpZMXbvP=!evYC!Tc|{mg}aVgYNebk?OC>9^3z+PDqc4XiJ~ z2I{?{AK!m|EnX5;I~2EcoO!>o;F6X#r*4-3J_?B?mPkMt0)GmX7D@f5&8C;~Tru0?!*fbK0N?kVhl&?_NOLEHw?K=xNB zN)>+QZS0wmb9_7lRTqpTo1GrnD<8-1pLhS#^uN~QXf&Kp=;g+&r2HPC$ z0y?l^_5H7vdmP{d4~(4$uuoCn0DE!w5T=)~|9Sq#`8zh^hX8b9XI&$Y&fgeAx`(iT z*MFYB1&HGmID6?cAJI3k=`*;`f%*pchwp#AXKij{uCgq-o9s)*vA&qbcj(-+3f91S=owMJam4cv zr1uDWdIR+jeh5J4HNOA7#rMBRCwu;Gbk_yG{~h4_-w?kiM;uwO37jYSUI00EK6e}Y zLEjEL;s5vjChg!)qYY?_<~F{OSj+Fl5JtVPxEE z&cS)O2$!ITIlUM9&NA*o{~+Tq_(P1#r#XLT4bXLf^}#ykdgvysD~)lPHGv1rjMJBi z=L!tNb!e{T`#Z+#o9I>_XXq||`yc%dIGDdXSU1=kn8V>Ne)r)4=r+PUs%N{ztKYq=);QIXpuko)xeP)_^*jcjfcW z1G@UAqz^WrXF*^oeQyD2lrj#0WggFO@ZSQ;?zbYhgE^XY5YkTmY%q52$2gFS-(u>@ z8lo@4Uflb@_%ZXbd@BX$zh8d`aTh@e_}Lp*BCDVV>Y(f@-~IF6P=xR3>v1=N8Ct*! z4se18yb%6~b0OsL)BKMrn{5EI@E7-se9pHdY#*Hqd+I`G)#!(5mT3t=%tSmztR=K25Y^by>{FLLIITm@@jJ*a1<8<1JB3ATXx zcEW=HR`egf-`?EFC7<>PR-yrHaJzaWvp52G^4)IJJ>BH?u1~&8keLCOYNAvxCJm23VyHj|!&ieyy zbk8V$z0+kc@$G{GI0$a;b@6Px2;Ix`^1gf1r3Bps0j~K6N#g=(AuI7S6HgVg2AT~# zPes;)rJ8d2C|BupX~b=Yjt0s_7#q@F%k%d+lpor&3AYFTeEbV3KhnYT`xfr8f&-l3 z0WSpL6!hGn{3GOXl=36{Z;?0B^+NzUQz-v6%8$Pj*-bbX(v91LbPn+w2gGv@&cj9c z;rA1aFEOUfByP&-e~M?fTnpg#FC~oyq=o#U|Cwuf7lHm~aZQ&?gu4RG=qB247~M)) z+R2ao0ANQGH28|K1%=)FN73}f5(uyLdhcRw<~Sl~x?&Efr@G}hkf ztifku*Q|fLS^Ihh*`rM2yF=nw49mb=%K8FnK{mrG^!2a-+KJbCjqzuc@rQUjz%jsn zeI0X6HtRt!GbZ@oi{d+?au9bBlz^LY-32Ceuj}F3 zv=Y4v)SlFDVE#a=Z^hT(R|9oW4~<}k7O+CAhWEOmJ)ify*?ZfVi|x$G9c`=+pp&`2 zi#^zhd(7`6%}mu;wUa?Sb^;4j|RD75y>Zxg;(1j;a&s z0WSogH9lI}3aP(L-VtkJezq__vql+vKEYhwS;{`Hntfdx`#RPz-CTEZ-Hr4d;5Sm% zu@8j4&799eKe`Wo_ zJeR=FJcTt3@(MH$vX4Vv2Mc=pJ=PT=)-t#w(9z3&7i?!K$7%MTtRdUr|0C`GgKW<4 z#NWR~TO;&zCL&@D9VN3eUzrtO@wL8UI#V(wN|aUNiYuI8q8-?{Z#LuN?pOY^!pS}?JQSv3?7~C#(bK-4vI~MzG zDdO1pCM1u`Yp2W&sxfv){7S6C8q^---x9x$z5$z1j%}#I``a%&gm+;ND%81^|LXcb z%}*(O2swWKYVrsY=uzIK$Xe^thid7QvWoJQ=^J42qs1zbX_JRaNM zSLoU4^50~0J?Phvzc2qGjuGg>b#XnvwXPD`Yj1&DWh$F{CV1DiYc zlQ%xc{-4MGM+*B~!$4_`!+1=->Yx_pmED&Uij!l_WxA< z|NZp;qvbB!9?~1Fv&#>%%eR(2A1#<%;JB*(YNibz}7!@nXiFt5zaL8KcEe9{{2$vEXPW$LaXb{ z40|%Hp=Y0RZ-e+K=oQ~*^R0Jp!rhMPB43l&3iI#frMUl2?Tda_Z*OpXjpx@e$8%j^ z{=L4*IDV)}IOUl%l1ayIa$GsKp$a>&3*$WZJ!GA8)T3d9bkImoF7tj6{S%M<3%;rJ zZT5xtpYdKFjW79={;8YtJ>vMlea>+Jhfs|pi2eHsGTQTTO}{ukASIke8?wmb1kyK) z!%6Zq&fpwcw-$#?PjR?F&(70snDv2hiGBrzc7t&21M4EICl-fZ@zH;9o$N#H3&o*k zOmVnHPwdt%cwIY#eh(>ic9U>Y+a%5b+@frY>+rlb$DAg=_k8Wcq1QSby-y4dasFNZ z|3#YCE$sW)N5epRCB&qmMp^L~^A&B};lWZH2pFL^&pyq^W^zd6sx7T95xmmu)Y~i`c{aF3`qWQY?4B0w|FN0qB zIrsZ5bC`S=S!G1meb-0kdR*UmW5xGA5&n6=6XD?H;_&A5A>qKpA>n(si^KjK9}3@{ z{-Ln%{8QmO&BMap0Urr(TpbqnWJZK0%5~P>_4_7NKQkbdlQm@RbH=C9 ztFQyRum}5a0EbYGBS;{HG^*5k8oP@^(z9v0TNF|^i?qLg z8}2#o0s1dg7ho_-Fcj?@2bljc!2FK^Axn1YbL=K_!ucE4Cf{EansICvUlm%6_0OP{ z-iGQ?MIrZ---hAR8i`WWE-DH&lZ(PwdSZ!d!#MhQqz;;6gCrU;L0pLscetAuPnJLZLn2Y&Xh<3+zIJT3_CbelY+BV(VIAq>=k7~m-pXZ-I+V3smAB_)a z6J9KhrKmouokP~#?jKf?t57@2UTfq!B=|BK2DrAhu5plSBbzp|w?1iKF2DEw+y&p0gou8gva6fb+O>aYfzqEHd$BXy9 z!}}pCu6oq{kS9-|cJlpjl01!?>F(<(_k}aU=g@Ti-H>vPjs4sw>fU}3>fKiZ5@W<8 zi6*3Q!GA8{3VLx3*U^VtXy57nT~Fs;_n&nC8Q0d~8tdIZj`qAOkG{+Q>|XD_YkZ3P zcMYwsq0MpWMGwLq>710m?vW4Bde|}k+yBe%{~2jxAbl|MPf7bZ=RnuQcMIdvsz*Jr zZ^XlQRO>t?jvI=a8}EjOW$%UrD!!)7*I(T9lKWcVKG(ZX`bfw1e&*d!O7^^R|Ka?< zC+r(7JPzYA0TrLn|Nn`iFp1vxg?GagavI{=J!NF>^YZcM280>(R{1f*Pd1aDRY!Mr z>!YI2MShX*MqcYCyVULVqvXM1?EhyT_EW{NJ&p2BO8wO&FXz6d@7MnpVlkFtIaXp7 zI_~-gZ_0afuCn zas%FPKWq{%$2RQ1F6=?yr?j2O>UY=@fJ`zqY$ zZuwYT8g0lTj}thFKJCbg?-%JKd=O3x_g1JY$aA=WOGv9Tu8`Y*GQ#{}-zQ?HgQZJ{TA{EeFx|k-<8z<*XLSa%uapkVZUqR#OVKKUq7q-N|$dvpo=d<^@1Dtk)0EhE_0XuZrn zKX3lPe(Rn{um2)-3kIXM+5CYH^9ShBCm+}U8A>0Hl>f!G2a@9BS_C7-rCnDkITo#a z4&%u2h<=E|{J#~(_zO=$VP1f6*Jk|`>-GOX=l&-<$8`7a`U>OnUvv({`F~UVXBx^d z12Zujb1@(7@BE4T-_1v`m!Eql|3tC-cdf@f;{>hsYYTnmmG9dV)+LaajIN`tEDwZ}oCY-j8c&{HOju_G{Yz+mJ;bCvXy{ z@&DKVSD62E-nHTJ{2y)iLjTWm@|$yu;ZQjQg?_+C?_m|Y(HxA&IH1FVEOkdre`;_$u&HsBK+-vXP{{O4@ z!2dUp>>c8pQhpAmSAE|40$(&%f!=z`IWDtZ=vjHc^Q=Ax`bgxvz58b0K!54NLL9xwzP`b}zRQ-r&A#5tzGl0(qAgPr z+S&CT%Y1*xq6^)~A&=@M#lM^XI?5b%=d69*{C7TtGJ0Y^+jqA)0`!?k-DmsWWCP!2 z3vYcg%oZ2VTlov?6n81pUCUf?^O3pAFR+ncV2-g1!i$lc%rEc^Kfx@1f)|YoV2dvm zzZ@&E3boHV{}}c^J>gm!y4nBq4M_F8cYfz?ID~2(L7_kV zy!U;XzfZXC1^&Nzd^1R_;G5aZN3&Z!ja0-jX%y;jb$F%vs;a`X6qiT4V>-}@EV`OK zU*r(idO3QN|L-pU-*M#wTEu10iZ;YKDJP_p`?~zE4nIk6eM%ekwzdB0+3D)l$=1!H zpF@7Va7$t$Q`U>qEI4K>a3)mm-3jcj}2J~T~K=b>?)wk+zV>fghE z=>PPeJGh4j=)YJ!fx#%jf9U`8-RJS2`9FtBV>m{l6n&q!j~!Y0U4HwY^COs>6W5;T z`HlHz;8{>pLYEV=0znC03!AUvdq(4jWMMYikw!+_zZImxt)nOYZd@?e`sFlVi$Jt1Q?? zRw2#_dF{SFcXA84^|rcF{0{8G9_+&bRJO}c-|~Kn`Ts}o|F7o%S8q0|HA+uF29ASFO&^lP6G1KS`cO&2HDd^Wk-$5k7~)b-S0wweEwg8z%p4 z_1us^Ba*Kx|C>EmT=1VuxPo3>!*%rG7TUGDJNR8Y$t-_N7uhYGBlGh3(ZkCBr1HN; z`QM@Z_pW}o{{XOH`rB6T{vy$^Yb|cPJ&;5JlB@6^)&F(Mf_i04!(`>e zEak!?d{)mI*w-LU7>8!&h#I+?WpMF~V)APUS`QP^ZcY6MNJ^!tqKl0t4 z|Db3CX!|=Z-J|?N2CY|>e;4fuC#`KUJ*1UaJIKss{xj`?UGzQJhfenr=Q4Mxzq_Y; zZk}r%wL8tf@%tgvY}fW7W1svHGJ&SGz9TfQ_FXND`BOUsDZi!BhMo#-XR`Mb+Ke&m znV*PZHb(9jqryr0Y4q_SSNzu40%Hu$#PAmehI8ZvT*4JR`i^?(aZYsQ=Zr`A@zdeD za35~r4({Or>huxU4>14UJo*IL*w6g?LFV62HUA#h9MgY^G8cnUf}t3Wk!U|<{{4CL z@6VclZ|(iAyVl>oVg3F6*5Aj`XUxBU&is36rOmf*p=X?{)w$Y?CrEEE4W-fi))N#{<|oT z^=Mz?kH-Jbk^dLS|4Zb5vM~SqruH8S~3klt}RdB zfWr7+;qI-@MQ)1m-chx&0Oj3Z;?!GR>qNg{I(AV za0t~nf&@}XqYYW)aRR4t2C>c1^K*UF`uux;Q5xdB`{>);ir6o5&M}YX|6QP8!WHzQ z@8iBL@_N*9D@y6yA@AV<`u}gvVO;w_axh9T6ooM~WUqe6v~pr3y%b|H4z1$i8bIUe zh5pB9tTRlXguFIO4v+dD>n5{7wrZ!~PmTZGa{qVZzo+?sC0}}#b%M(16<;q3?X$dp zbT0D#(M9h@Zlm%6h4H`j#vXLL){L&Ok*gmOF@|GSNzc+S1?tyIx>AT^BLh2$7^*t2{uY~Ome zZ^Hui?+W(sX0|0+m?yVPo!~#aum}54XuoPV#`SLx2p8;MHgMILb@JOTio0y=x^${>1PR37GPl{^^z1XzMx}OG6(3=qUm`cgImj;8 zo_0+1A+*I~QF~Y!N}fOsKf_g}63LH3wmFo~RkzE3&s=g)*` z^tcvxa^6E#l287=h=nsuZ=v)UM~3Kh10^l>ePkOiTiLYCM&*VO?`4XR^sqG9|+$c z#!q9s!4KSTA0O%}@oTUS8?Xu6Kl2wMu0>Ez&+KJdsYt*-+p^|*n4(pc;lrHTch&B z;X7{+4civ{+`jEU4^`4iplau+P`UeKVf)mNSp#!K*zwfU_UJQi&~It%+CD7&(|&zl zQ$G~Gxp!39Gw&ne+b@13wD~QIeaozYx&Dc;_u!!L#@%81zVs(1i$mUTCvXy{aRzVR z91;$%E)M7D)%}XX54QVe_ZNj9?k);PM*NjMh>ZOi^jD$o*}sxE&BN+v-c`R(hsIH# z4XI~8t&ir@%9hXQ|N4x4^vQ7S{%81i&8a*4$#6lMmv9BWxQ6S9ZR|Ma=(US{qQZ4g z@o(^JG#E3GSirx*SCCxGzp{VFEb`wV9GIg`9?(t?uif`+C7R3gH=OdeMEk$HsZ? z6Lnjy|A+=8(1;|On(b9_*qWIBGaGX;9}DqXhi9m5y#?))t-m>)k6(F_-Rt|_>HE(3 zzSVbm^5|;$&3At6MfIP0sKv3FQR=^8#^4p}qn6%Mr27?zb%R! z8`oCY-^caVxu*Kb${W}9yZuM}u8xjA$|`B>z%J~;PXfO`c>ssd?pivMuC2!XYvXsV zSO05U<;cA7QPY;OZ!rgXOO;ajX|=||M1PCko!teh<%_L_j^fvcC%|+Z*4YuFY;5p8~5A2;9*%* zJ$XP_DgFYk;W}z}n^XD>{{lVHZR}mfT0rzWNZr-{dRzbNUj47wqMX@^*Vx(Voz^mU zOdLBG_YitO@4rl5LFXK{`ipG#C2aQ>*zm|>u-{5>N4^?L4#!B8Vk`>n6Ln<6OX@K+ zs?Q4TlUR@QAsoH>-uOc@?gP;B4&MP<7a4!ZKhe(4??5NAh+_}BT|*pu5c?;pkMn^z z&v;D0B-HXh)SR{c4?Qu+xt)I+y$mUJZWEGdR7bym{j(W<&wZE8j+yk?s0`-Q|Je9_ z`g|z_5V zKRVbSud_eUqFs_ftM9YT_nFp5UnQL#*o8f4RsUwNkN)WUIY2*zn5T1ybyyd2Y@#@( zqS|jqP%Ez|$P{Ym4HJcZb7|pQ}({8!Q-9X|szk^A9i!)$Uc z`kvNTpkHP_y>S0U)5Ah~+IwA0W=43wgUkh?FULxB_G6!*3*E>ej!&xF{(Pu6r@aA* zx1Lu%J|B`~)A8p+>i+X#mH)26I&8oulw%vJ&_1tB{a2>`D^vfKDgVlpe`U(QGUZ=c zIQqKz|2Sq2e;O_H3|cpqg*L~udDhiapAS2vH4e2en3GTLLCrkZHOqBjpYQ=R(Nkzd z5{3S+dA<*%cbA1jj>(u`-uhx$sHRtbj@_Vdt5Z0;#P!W_ebZgvv#xJUS*V-i{uek8 z5-X&;S-P*g{|@KD5osilLKp z_bl2_?RlS+&S{*%In)k%-ujErhYR$?Q?5<;68#ENt6iJxOs;jU{hklK;;!L3`fv+( za1Rgg*k3&$g#OEYNA1cd45pXh_xiu&xyH%zpFG$^ri6z&W;jNo6k{`W=1mceTgSN^k2Crwo}Soyt$Dr+;ESetKd3uI~O} z8a*qm&fUfs(`O*=9hO5ky0%*PKYS!qD@SJfZ8mD9J(rx18hXQde#Ohy{}En{IA$h= z#<$!j+54sEL(fmktg-7^e%9I%j*0neIk^(65cB66GWLJ=ef9aUj=l+fpD%m3{vdzv zYn%Dzf^lBIavyIR|77jpa>r~#6?R}3_Fx|l;1JraNzj4L{njwpZS8^%YZxGR)*1#l zI>7f+?E4|pqkKPPM!1!1b6nj)-*3`4QsevW_DzsU;U+S*+4=#E+61U}t|Lewg*4hw zr#`Po!?Wri#6IP?wrrvQUwDIKvdH5EPU1Aq;2a9}?{4-l9^1d_`A7BpW0W%=gt-m76bU zlcLr+`W$x)HMiBP7u^>M`(M4RUrk+`TH-!8yH9$ZGz#kvzVyEK#YXopoWdRH+`|L( z|C(#SV3c4e{^$E|%KszS|2VeQ{i6jLw4x1h@4(^G8HrMiMeAM1+_dHoJ$u>xV?2EV z@@)Max)-qjF-ct9Lum>*4RP({GI9pu9!hcSL;^E~XQQc~`$BBHBvIJ^Mmj^nQu(b-x<;CrX5|Cc z(JOxTfv|z>lx}vf^mj^sqV$nR%=?@CzWwVjg>rHmYVYfZKovc4Q5kSnUqPJz-X-sE zR<X9=KBHrHVGs79@2KzR$Hw%~525!JzKu8dHrma@`KkHV;*KDJ6w+uz#pl)Q zUoH*>|JY>rGhKg#_&iRau>apzio4pBsG3Er&$0E;vkVCgVp|0oD|IZm;K3O04 zbhZas82`Urzaml{Pll7uHO~LWV*=vZ4_guY7A6U|FVeq{LO=UPHt9=jQ{+bIXUEYR zPL$wqRyaNtjR1u6J!%NMDRR`!|Pf$8@b=|E_iaXg~su zyNxYC6H-{}Kg+QatFQ*^umPLU?m2brWdD=dgY18Lcf6+z`=5XJ=uP*JV@SW{{*Rkm zfL64j`tFmVTsqrOg&nAU%J_p3PljFe_)Z$IhrSP~0j>>6$HX-Z3*Xx??_(5ytltlz z8WnG7gP`(r_J#Smu?F$KNTUr|Nw71zbYAeAFQybs~!{bf1zxZ_1xIx|99;qWXW1x}I!VqW&jah1WO?HWW^_p112}2_F40P$9y1^(;v)m-YLfdf)uT!_N7v z`*d6yZOEc^iTZbp>yKe|Z6{99Pa>~=&7m97*MC}EwRXfA@*HYMXwQ(BP_yEp?;xSR zy&~L;rn}CEMkG<^zx8bNhFQiR_$C_J@=3DEH3RQSFBO3&x2{HbghNH46L`GWa0^b$1DQ?ul+dGZ&Aii>-54kt&V z_ON~haxCKbp!#}iFdSAsx!wlXoxE%gBvLoE)$Y5GPdy#RId(iIU=pTa8p_bInGX_i z?d?v)wYR&Fr^mg{n)iAJo=JL+XF$&^@eI%=+=>~}n2Fh#i}_fH#aN2>+fU1dS78mR ze&dz|IZs8dg*ab`i5ENi9aE~zwF%}93A2u`Xm}(938GX zCOxP$Tqpa`O20+kL5AKrz_@_o(r{1s0dm55@m<68;fp_d)%buL#s`q;`^E>5nRkp2 z7-4(>+MhMPV6t?kONZ=w%Kt_g10apOwEF*zb`b9PJ_nN}7>eN-iMq4)7eHYgK=&W) zFL1Eb+H#{qspH0C9LA$oTn#4B6Yjm?h0$RWeF{?In}n0%8$F}<*T2*JUWOT{Xz`4Z z|3$wrTe$KI%FfTQ5xcAfE<7K(7Wsdt@ke{*f5fu7QGJv|6Fv2o`QeURjHOtPl~{!} zSck&;^T*|XWYLA+TmRqtZpN`^`TEc@UEA=P-;F=buI8V@25D`= z?0o8@;oEEfJnXsp=OOL4zBm3ZeCMY1AFhrLZ_N7huy@4h@IBXdKpB?xdmbmy^YIUd zH*c!nKmO5hXiRDN=i{Z};Qo=}&F4ni7fwG~_wcZP>hSR0=HX%A>qEnL>W7BCcw_xv zhCNd!nEy5*>~5YAzBPG#*p>W?(EInL;r|YS|IV8H^4Lk|*m6_50=c?@;k57>oI`y7 zeV-}~7wDJJ^F#UUC;We}*k7T`SblN6xQ2>1*}!D(W&a^>;STyf#Q#?`I^3f_K>wHI z0Srb7hGIBIV#{>x^+C!9^{N=T2{99sJQ zeaH;^`!G|wZ5Zo6xld~64AH*+ru`L#$72E}VG0f_Pk*q09lkk)A6i?oX4sHWdwWPY zVol1rG1i-W>j^&eZ-m64Z}3@tBP2(y4^11t&JXkTP*de?NO`DBtoTnY~mU(vTjX!)z`veUewNEZ;7u?n^a8HHx z2d^sMPkDcc_SPQ%ul&0HWaY>K`XRJ$cK-w1f3f=~yGFTxGL8|*lSdC~f34U4a$K5h znWt^D#QiUD|8vBd<5w-6BS;{H*3IVVAx+OVo7=a@T)n6xzn^bwuQ`6!{O_ta$4^{! zrYM{sPoj3}fN+{TgBp6nDDU<5fN)Ou0-DZxua~`N{SL|a?mP6sX$K@uX{(b-WkMq! zt^aY!f3Bbx*Ki$uxP?0?jQ?ljN8e*%{f~nWeGmmZL0t0!eUOO0g=2`m#}?mz+dFI! zGCiFQA)N>4|9{Fuh`xtR2b+N&eGYMMU*$jWAADaw_EdKMME?8j&V!+jsorQ!DRLxg zZ(D1V9E<3Kh`xse#tDx{VPApG?(w+x5w>WzZPk8zjqLkZz6R}r-d`7mo+skEA4Q?> zujKuY7g@JzNSNUNlh9MqFH9kOf2v=KEJMYQibMJodxD-(f5bTeGwHK27oF;oY*PK- zq5emXp08n_vj3Z3WS_pkK1B;XGlzY;mVJu$t?XNLB8x6`BZoW&N_##QVlno4j!Vhq zSc$re`j^qLnti>3&B~rnl1)f0VE=A6hyFRwU|1-Oe_iE2wc={_8h=ku^ss-8!Cyz; zfYdwwD=v9kTG%8mz1{f#X#YR$ozlz2Z9`@^+kdC=|Hk@P3GYB|BEJM4jsLG>M;F#V znC1Fjl;#p?zu+85t!IP#&mQc<0USa#jv#^d!=4X1i#^`~p6@8nx1Z;W!u;=I&-aw) zd(rbf@A)=+zG$uYeEqNbxbsLSjW%RaE3U?|d3wVC8~pzS{UqWw#l0Dm{amZxPK!H( zbGU#)dt|cjNx6SX_zHSa;hV{!^3&QOzX{>GxIWY!*9JfPp8rp~zOet*WIi(aB1PVE z%pKgr1N2{|uEAiGpxw9KA^*j^mqpj^hqifP|4sT)c7F2$_Wu%jjV!Fsb@O4~Ya_GG z{#!2}Nh?>u{usvo5RPkvc9Cgqhv9x3iBgP3>t6X&ULHqpxFLVtHr9_m0ZqcGdF&4| zdDYlvan;%!Q^;wkJ*zK)oPnCB^c#?`jd}0;^9}YDX`{Vith4yJ=>2#53XyUA|6+0} zmSZJWq4KNREl=1BfF9QZSx06zKkQ%KK;MLNbZTp7(RJ7Nan`#)9^3p@Egx2qJ5bwg z9|&>}YBF)W@Wb)^`-BgmX@qtm8YgNel83~_aRb$4??;}rf}3{8E9%}D{_78h1erok z8PfM@Wdywq6`v7qG(Kgj`T+(1biMjP-T8Qb`oq@Gc}M+%&Qt1_x706})i1}@FUU`~ z@1C^YZ=Ys`^EiQPzLz@nMLim3@k66=k@^jf#-Y_E)qffFU$^>?Y!pti)0?zcQf&5< z(m9PvKEk-C*ctjcT)-u?uhzdmfPbJL|A4x=OT8TH=G?RT_i=P9{{oJwW78}67u3I* zW&8_>;{(!@+2+#e#Wh?*h1Z7PPbZJCfS}HQN8Hwg1;^|7(Zk$h`2Mod46!_RX+?>#fNty$P6viY@B4r|qvn zul&s``s4I%H_9)K9Pv*Z8zX6+2j@sS)_s^JfNKd%OhD@|Q0jT)pDf_Pt?j zj&C9Ekrv0Qj1xB=6XI{bl)rbr8anpAs*Uq%$kx2dr}e7x=~d;^tIDTW!_gJ5hUV3; zhGQFF4e7P7hL%OI@~yubS{>Jxc{S86cs0~7do?s5fkq_JgcK$@&k^@NB|;yLX(+=C z)Jd}*4M?DIce(Gsod3O?|GhlSbj)nb#e6KpVl2gSv~Mrh{x8@5FXw+R=YKEf|0?JI zDi8T3<-Y%N{`YeJ_j3OCa{l*n{`YeJ_j3Msad*qZO6jb^8mvR>Q|@uZt6>8@JK3|r zCVDyY13VjaJErTIR||WWR6F0JeduaEk8O_2eal)NOMZ7>RpNG_=>^XPh3DkhI(q#) z$I%m;#e3dK&%5ca_j^-)El*W_MxEfld$11&a0u1-z4d3<34b*HhhL%jAp4(8&td;B z@NF(pKap|$ne^q&;fQn+NFj~Z8=FH0ZS?FS{(*UMY@2(J>{mwY*8U~qUKS_BRqx*% zPLiilTfaG+A3c_$@HEtnG;2h#{^75 z9p6Vie|+E)2WKuZJO^)?!>^q<0xM?WE49vuA%td>#@gGm|J>v2F>hu10S^Z8P zRSq=c*hbHKt>?eU^GEAc&wsMG=d9TyorPG8rFb-cXgNK$**md{z6O6dfB*gaUn_&c zI>+?=vOM&4`F`m=A5t#-8~*)O_C$Hu6yv`5YA7drKk;g~Fw6raa;{qMeaZ?J?`JLi@pc@Z~%|??>R)zc56GJntlYaPawBd{YiH1 zRsTK9|BZ&J+6Q>N|Lzjkz&=g*PYP+YA&Wdt;3V4LQU9a!uKFKc^ls#qsQ=aPkJ~r> z*t(;%Z`@mkZ5;PHY`bXuU$gS@efGy`Y1Y;o|95y`I73g|*EYd9dc_ZY6Q{IIj%%Zk zjpv=`C*Gm^xZw9oXsy?tAbSzlAuP-bQT|u*?N$x(UC`sau{igx8;|dQk$YvE7xw>K z?b&DK|9bsD`hc!U>pJ>y3wLl25AdIkKf37qJM8;I8ZC%xjJNLg{h^vY-hYieg~2F6 zE!(_CAJ9;GVuidR{|={*L`r^X5|>;rub@=iSd7DXOu!^e!8APfKk-qQ3D3Z6W^3-kRHe5FC;V5*$0!YjYD6G{5L-+f3X$NrQPzle?z-xx!-%g(J!ndqyKCb zxd!X70h>^cZK%Qy#I?+Kk$X@#NB&p7H6T&%o;utcn&>H1yj&diIqm=sp&Cb!K<*pG zp?$UdKR_N;4m`I1gmc1q^5{kVpE%~Y^a{^^v*%B?qHTfo|Bc1!`D-?ROY{K_d^rAZ z5c^H}mvW9YvdH5E{$KUMNwSaZ`6h=uqxJdHgPsg`oaY`Mp#Rt9p~-CI0elPe?0Wyj zV0sDi3-}(;z2f0m`=R2h=h&x^9EsYsPli%*ENWhG-4k6mDt@S~h^D7pHyX#d57gDT zKlju?COX7l7JuCRA+9yB<;Ful?rWaaIB6E<-?x}gOP_%DQ{F$~8Y|he@7Z^rac;l& zzt{WU?)iB4kJo?m-ZR35_wTs$b5Djz(wTy3C`0QD?%zGmpl9XPPT`sK*~km$an&!GKTa-0?SLo4Vsa^J+-HMu!aXb(UWulw-o0?+ZS6?%ck}PHjrp@04{BSJ zO-N<5+x=$^)?ouSp&Z*#g?8=A|JnKuN8e`u*RcQD^J%ouGfDYd8@~-(inUFj^1Wk+ z^!k1+Z<2eEVN3YN?dVy@%u6($me7GC)kn(|^x@ugp z->x9{`2nHgivvP0{Ti;L54Ug!_wWGkA3xK7tupIl=J$N%N&A07IW|~a35H@g3hOg} zZ9o`FFU44lL*FY;hQ2!A&wAgY@B~c46tqtCjScG;rqQ#bd_VLu`V8cI*oOC&q3FUu zaWgR+bFt4g&L{ePyapHbNV=UMgU2=z1K`u|IvXE|146>8U7|7L-G<>(3RfH*gC9Xb&UuJ)7!S}7A+^Z0iHmzQ z#yO1t{Q48&(D4D`%~ONIL441godexN^j%rQv7*$9+wyeyCuQxf6;Fn5cKmJFNq=qX{|MV~Bwh>Pi9ZkD9#mrN(g#CUeBWPv z)Z8%lg*P^TFzlT?IMna_XsFx%(U9o(A!YSPLvrnhLu2N{A=UAhp{d`e!r{&4`uK14 z;nCp-_4;L-|2+Kgc4;_rb!4ceSNw#H{>7)m3CEnoX`I11TtIWbPpX6e(%h8)5{|v| zUqa^gUxb!^p9mFS9~Cb7y)eIbnD1wleBu7Di0egFyZ-&pjtbZ5eYk}?xQ7Sm|F_yq z7>p7O#c=d)F&~-i`MxoCWS_BvW65zCj|rHBDVT;b#QptdkiGo>Gh>*Kc6JOm>i^LW zh`#Z7eSJ^sI~G45x%N_f0L%aH@V^MhIsTRQ3Rp^S_3Sg+Gt235&S583(pMqh;oH3U z&yO=&da7}98|F}|SQ_yT?ZZF}|qKWp85 zZPPfW(Z>?<(v0)w<2?Ek^u&tMp#g>Yf4^0Cu6F;+-2Zy_kCTo`?;jmblV{MX95_c_ zK&E+g=$xfoz$M`;$W3<+Wk}al??5?H_u}YKAIDfAo~v=QwD)V9z3yCFwf`Hn3;pjJ zuA>jPa0mC$p49%(rs!PlJY-@2X)-6ACy$=e{>QO;-%5@4XScfHu=WSq_8LF*($o4c zo(`SIpAK1cp&L2marAZ1568wl9n$83wv2jO`TDf-^=bZrr$gN`?+*<~pz)&jho)2B z-|^D$z%}+?C$C^IN-z||F%qR1i)iCl&l?rS(Q7u23blJb5XRFdU=kkB&%deuy|4bg zt<3T5PxD*Puhnm4?<@TZdvR>#?~3E`XNW7TKk&n%Fq1wTb1@$a(f3V#kF&J@(Xd4Q zt{iLJD19__OFyIS@%;f|v16BFIdWY^VI{c=Yta6j=kGmsdaqeB`t-Wbd;XU_|F=E= zx3&MzYyV%?2T8UF$GW+7l;=-$S{12&-?+faoa*oAjuyZ_hr zQ~2WOutzw(*BA@(01lxVtsTzM?0)Ev)?Y5{&&ZdN-|gDm@1ymXr^Nrv(ZgIgj4F{Ml#9%-}1V#fp~4d`d|Hd!ZDBf4^Glg;|%_z`G0jU zsDI}?tmFS^{ecyK7S8$a1zf@v^x_(>qYv$i*#GE!N&Swljp}#gRy=H99DR%b|84$% zGJR3|Vl>dV$#lg=I7!voY_W&hu{*4j7Z|3R*e?_eN(F#c%%A0K;Re1-C( z#P359=l@0j-*9^6kJ$a%A0z3d7>jWjj|nKu|J%v`e~^E7uXefeIf*8DHg#P7|Gsh7 zj+=sMD8mfQ#B8*0^!=gpZT|nawCOHu+mSiMvEE0OA zAr_-`s_$o_b$saA7u6qFPG5=qLH<8$ zm!s*Te1%3N5kKgyh~pc!`8}>dxPz>!)4uN-5_Zw|U>^?P5UOzm38aul8?xyAs(eSr z`u7AGeSs&*(>Q~3xPVKzg1VRFe>7}#ooJ*d@hJc28m)=wxNEqM$NtHB`B1ohp69>F z^Is2S@!I*2pA>VmRXXoJ>7ye7^W;Xb?{9)Sr0OI(@=3FcY)UDlT*T$uO6m zJl!%k%fC&>~FG3I7Jrh!}r=hi~WBomZL&n z%u2G|HFcnKD*pny=-tTO*Ux|3{DURzZ|NM9UfgT2Mf#aB`rqkoBlMfrl!R5%sr;m8 z@kQ%r(rd-lm`A^kp6D@$et${WK;MMaRr3(;nmd2f9D1@`T>2e*6_QnG-EEI{au+gJ zU6=dJy3akr`|xO9VAp=vi8|L&@7fCcFHCj+u06TH{k!fsKH`A?96~jYAb}LpXhZvL z-=F;5i7dJh_jZYEJm&Fu|Aj&BA1yuVf3%`a{ZySS4q55saRMh%d*1r{?1Bo zZWo6$^m9l(BX1yi*|&i$OXOc=;A<<`U-Q`BZ?dh8hiM(5ugvjr{>~M$u>TgHUYGc8 zVXYNSn9v+}V|7ZWd(_X<~I^nEl-Ul$6-7spjMe0%f3nUIQKcufu2I2hSYh_4@t)~qStrV6Zin0 zVE<#w{rBny&%kWV#eDSr;>oa(TpabUd8ez*^Q9N&{|^}umeX6sMIX~jdiI9C zVXUIBLEdx8DQ~;^0lLUS{lmU2?7P~nUT3Ek_SYd(BL;?<{<97nunFbZhAQkpJ3G5W zecM^A{vDwH9i{#ybI4ETe^}OG!S{l}C_&u!Ak$+V9(s1CJ^&1- zk3{|jc~gB6$6$AnbX4N!WyhYoqDQ%!TZ|h>aL_b-PEjn;72^FtZ2S0f0OAO z^7n1|`@Z~5wh6Z@V+!Mw9Gg|X6!sq`bBOy7$NjT6IL{`OV;c(j_o{v``lI~2gT4!? z5$^pd*FZLo(SCW!`$1uRrr!_X5ULU95m&s-clk!Ya74IQA5el!q4LX~`(N9aoZf~k zDk{wRB~Rca`t%LtUjA(8QO}7m|y;)W5=hd9@~apLnKIq&(+AayU@Pb^Y7>RBa1F{TQ49t@p&pC;p#)hsbIiK?3ayW&~|5DOPqYZH%pw(`tR7w7+-Bu}GO-aJE|LyiA6NGCC!4`28anrgwG;8af<@ zMtTxWQ@tx}QBFQ8f3Ns|FLLbz!gaC_?e8dm(W%Xq-L6bOC~s}`u944Zf6ZclEnt5w zQT~#Dxc+0`N9^$?ox=QwpAHCj=oMd9zmR=Dem*=Pd&vG9o%_?)C{TtBq{n^R2a~Ov zKM+dDp~$S)PfMm(umi|a6#Nj&T-RmSb>4Mebe+omv3?teYU8`FImdYV1WZD$aosiQ zf+_U47kfjoamMsA#65YM)F*MT_C|7sxS5!Zxp;s7#(d%2%ldT5LjSk^oW=B|SdNvb z>oEQg4cZS0ZHLA|>UQmhrisS?Jv%T|XrHZe+#0OI25dq(wxRu${wH)^WdEMkrn#$a zgWP`YKO9}6{V_)S1L^77AJWXAwV(C}(%bEEAe|l9g*|9>k8zIvK6>`5`g=eBJN*#y zH;i9E_dCWf+%~R3+z}*@LKnjGLFnNR*=bwzf1`Gg1E=#?i-P z0^%5i#4y*V&5>+=ul^?=jk_w$M|WK1uh`9BRYrVhNSG$P!M9yT&cIB}#$3$DLi}gt z-(trs#d55~Dy+deY(V>Z`G2$LOlFl8UGi9*gP1!d{~wqCGk;M2El@XmACJag#eEhk zzQD&YL|>EiD!w};l#|;~g&o+1J=lln+uVY_kDIeh9uRj3)yVy5NZ8(DzpxLPJFV^8 z8$KF(_@NSh>;1C!z-Q$tdK#7CiBQGIkUm%(rUKZ2 zEyQb3nk;N`AM$(5PPtT#7-=YhJ{dZsRKK{l&2Kpi^e`S3A zzv=sN>{yIL#n<)Gk`vJLGyVN!?-x9GvihZxFpVt349rCBGX4kgv+0TLo(<;G=Oaar zYcs?#7>!scE_7XN@ZFyO8uqRJui-l@{;R&evhc>T=fj@0&xdbMo)&hm{qNyh8~=OQvR8R{ zP_?RaKVsP?}j_@|D^Vb`!J;hU2ug`Ha`=}(;!_FNnk zaz7ps5`O;Z5zNEWPIozEd*J?LR@D z#A%$tIW)g8Tp!l(kgmVp?^wTI^vm@7WxtlEuJ`Nv=9A%q-|OGfht}_DK34swyJO$W z(9md&&(y`?p=rR-aQLP%3XZEjH8A|(IKM={kA)wu7#NP2=U97pr159Q=P%*sU%MAatPfpgscq1YI5Gh9Sp>&Ex3R0ipSZ z`FCXczWv|m8MbBXd2`CTts6)0lg=$vm}7W{ti7xKadSYpM^CuEhB4Mv`Y!u`FFZBE zwTVljapHjRz%l7%Z17i<2WTB*OAywdV2Y zx;yDvw4(!$_Ni;x?7G*xkB#mFM~AsDaZO7+r}*4nbWQYz8gnkly6z8#`s4@0NarlY zSd7DXOu!^eL1BLd?Vg6`w0~x4|IE?;@qU_`+5ed4m@>@3Ow7hy%*R5sFEBsD7>Q0Y zyH=Y-+oAiW`UiO&O|t)Yvj4Sn(g%%yq-Px0s_oFG9Z_9V^n2qU7fZADYEh_pt0*j` zCuX^)m-vNl$Vs__+1*r(2K>Fs>~WZXkM?j62?z6s^{kJ<-~3zQ>E zv^SP1S5T*ZuNPMsk2g>IqtQCL{$GV1i0j|)BKKe)y5!@S$79}(dw%_)Kd@!D_68n} zH#$mh#xbOEKpKZojUz}Pt{a#ld({cKztQhWZ$sa|d&XoQCvXzguPZ~Id)TIneFLY( z)$TNZ0cYrm0qzCo=$DY9H{p^0ukz!b-;YOyD}L+6HC#tu#Jt~IWY5diCnS5nF)-XC zAE5tc-#gOEmce8RhGIBco3$}I^f}Svn3UKDT}mH|{B7+{bib$$K=nNP#Y=CT-^ODC zYS-%j*3UnQp4dA&G`v1KOrcLhYO=mvB)5+ajo7mKz4oko@B8hDQ0AB!n2Fh_{E>X? z9nPiC$3pac>glkU?EQ)UJz!|Cjau-_ZYmRsVmF{(t0<$5He3o9B%V$B;(LP5u99MH}k$=hyFae)p1y zYZ{PA;ieAf&x{V6oTD7uP=y`Xg+17ZcK_)}>gTT+9kTJ6YTLzUTUg&koABsa=f^Rm z(IU-E_vp|{w)tPWcyu@*okOU`5wwcSAVJR#lQvTHH1gg{ZtZA3*3qF0uX+B}v%H4| z-UAhp#b+Si-a>o3LjwQm$D zRPc4?rF8-)aT;fE4j1r;`K{}`{Ky}dBlB--|DV^cSchiBZ=+}H)h&nF$n+h^t3z_=u2I*hTY6dshF#)&|Gp^fA-A~i!g$VM)=3sF zqXOvMl{HgqZS^eu-9dlgTeYGIY3uxt2Xmh^m zVr#ld^8xysMikebdNlqo=HtY&hxUKv6UtHkg19DO+`}NL{*G%Q5B8t5@2G?vifC63 zCr2WV^XXjcyWFV!7aoh;MD2peFW1HZri zwYK(pc~jdq?h%&f1N_g{zn&?L*_ey!`{u%sHSc%^qwKXpUyP+_n5ukIE<9Q{xM`#7 zlh>E~Z6#Kr!q|Z=BYb~@v`?q<#fn>pO(;k1XG6j^vI>=7wy(hu{g21x|F@MBZ%JQS z(0I!IyN<&6BF|}uV|QT>_Tc~yp&GeQ4+-sW$^UGu$K$_mdafvp|K6|s_f0gS`to^v2AdMJcl;r-39Uz zdjC3vD`Xu1&`b959bYG_J|6c6ED44A7-Kx2XVll?({C4t4dgZ4!X32UXa65B4)^F; zea4;ro(vD@{kO>f!a3n?vWp*JptxQ>i@{{iZ>?od4#jYcL@CB%9L8e;`o20OOd_YC zdQNeeMwVd)W}+E0QBpKK0i}hc6vVPO|>c3Wf{uMvv zb98LR7=X^R`~oPHqd$%1GJ9(UKLOjnd!zOL^yk;#(8sSYKY>QJbP`QSVX<_UVmVe~ z71m%KHlUpy+R@_~A&ai7p3OU+k-q&r`~2uU;}38QahyQQeKxCOT5lMCKvwfnY?4kn zwxJ5O&F25JZFkTU(rCag`W~dlxVEXTak~5G2iPYruKRF+JcQP}+H~aqN7?W?F2Tthf?OnXILJW`()qsDrtod`-h1x%k9R(w zXYJ=%`&rNPto2!+_46PyM|_v{^1t#S={SYbx(~bL|0dsQjr@Pv`V885VjVTk(<`;F z&G7wCk-wI(*{*3r4n>smmpm5R+m1SJpJ4ra=XT1QIdq*hzT~3uC1g>)Y8l7=`~8qO z)8t|2XNK_)Mq2-#JRy#Xx3$@m)tl6l$(lFi|5k1G^vX|`X-_depxgc3bAOlR&+G1w zjPnR%KVL!r<(9YZC1Std1=pm>OJolU%A!*LqrQk(F2%XXaZd6Tzghx)JTJAg_K-yWMv`9wrK#jKYuDeDcBg|8n|F%tG&9jQuzMe-1sZ z44+F5kU!^>3y~e9jCsU*1@v6Ew(%#`2k6UCJS`7gRsTn9D?cuMQ-!a=ZEpThp@w_9%7$a|FaoY*oxX0+5g$b1<;cR_{NJq8g|lmBemoG{{MQuvVMaP z_W$qo`+ig-?jKRWL3$FU{)e};AJEfiLk@NO-cKJJos^bFX)VofI4gZ8)j^zd6vuG_ zr*H=6Z~^Vg^p2R%kVD*4H7`Ft^uOct+4jEwyzbEJwY43czSRJ*N~m3 zjxfgh1N5Bd(8JGxH)d~8@es6eo!;DoV2coq8zzpdcWSp4e zp=f4vQkT_po!4~BeV_GSE_zQ#yeAYUm4za@Zg@{P)a1QpjN>3%YrNNe-s>&znV#9@ z-Mp;b0uRDJp(>FH!i# zUlPmcE3gv1?9eJQ_W!RYd#qovmW+9S6E-5+|8DK_r83}0`T+dC0h_TEl^Z>;Kio@f zr|-mW>_xA-VXuCr>M7bZ9arNZlBm6| zN74Jvy~J_y1WuvnyY~`j$nG!vC2@|tfJ-qPSCGE^UgCr8AJ>Fsg=KENm*}PEUg}G9 zo_{ZKlV0&J-pR{-iGr~Fn!ZHWYki5jd+#Oc$Gn$lnDAaANjA=SFVRFcJ5G^>XSE5s zwlw};Tfkj>5-m&KOSE3@OB}l1mq>T_ zB@WXw^z8Y*L>sDW`Vs@hGZ;fL9JRt~#4&=Ne8#Qb=rH0vBJ_< z-%E@qCnCG+y~Jd4Dl)=5Ti;9MI^Ii6cU+FrzA(ywt{v|s@~9gr{nFYXtx0KbB%9{> z52P^DHM1}Wb1@$au^3CyeqQ;5&g;sbZt)_I0*YvvB>nF5P~yEr8i(cij4~igwp~?M zXu6kJCZ58V^|O+zP<#4bqDJ^?dh*`AM8mCniM8}~NU4W6v-3@7?nR-QvH6H@qhc|e=?=q z*z3IgsK!B57%Q73Q%Iv-`+o=G9Kam9wCU$jpcm01U$-g)4jt3p*QEckLwg{y^tKv^ zBZnf6;y6yA_Yd0d$Zq{-XULv!saud6)tfiz!;NENE;&x$F-MWC{HA`NujxytXP2ns z;2J$QTe-nk>80O9QJGRe-gE4lr@kSq`}4kYYkl_ot2IL$_n}{f?;8U#7(+4KcR!pQ zfl=uF7ybC;SX3+1#*;N??YBlwMlF3RIUPy)z2S!U)+*nhS9i*IkLdm%^0oi#`G>O5 z`~5Qetr*WA{ihkmAG$7%TbW7D!W_&+_m`dfFV;N$P8pkT9};1udECEO@21DO^%?%@ zVtS5!?|j4@Kl(Bh+0_E_3GD~MAI>k_#ZIxojqGL9G4%_iZrS(VH7l_StFadAumPLV zzJ~w3-hYs}lj;{|*)8_0Ko%Xhu)VF~KD3YjO_tWbV>hFXZA0})?H9xTn5YuhR%}P@ z3&#IX84z~Tlk?cP*#p9E`d*aAAMRq?o@e8P|MLrX68mfJB;MX}FR|~%JBgnzxR=it|nSrM~0S~ zksgBNm{Yqoaey-o4y8lN)+>`D=-Wh|$m ze%#O5^%`p~zH@OZ1lf%?)hJ;3aMJato&1;4jhtQkY zJgzs95=R=9-M11|L;gKcG3|dQwru;KiGO_U-xFKAZzSUSb2-2N(;c=*nex_*e@kq8 z>E9ARoc&&6=f3yY^hZPGw;l~YZvBtM?mPX%j~4w$Vi$g@{bKJC^(faK#c`a#+v-TC z$hxMu2B1EMllq@7>Qf+_?&)K=rC(*FbpxGq4i|6{3Wg&&e?y}H?Zgc)B z9@YQ!|)xcA^JGDFTG z=OUXi&XQb+-1ExF*~&(;Ymu^%EI6)y;nC10&10Rj7)w#R-Ws05m(i1NJ{lS}JsMWf zS0d%O*>MxuxI=r(u)$%|$id-_XY{4>O;s^tD)r&LjL|bRmxdil}SS|IZh0 z;0q`D#*JhXKRNoiDQs}jG-HY#^Z;OC=d zHvb!k7V*Dvn4Uq_d2P1_hw4|%PZdvG8*eYUAGP}4YlK(RliRcnpj7|(mbx>&8BJ)! zLFc61f09fgj{i@SZOC+M7syC=9*&DBu>Emf-ox?#L*AeN@QgU5x3vGl0_k5O{n`vF z>iF5NJB|}Lg)=yZ3utHeJNWaRD6Q|9VgJ{#|M7pwmMPM&t>Dlj?hA+6{1=mm-|6emG^wN{DZNT_|oAldAJ;QFWT@T0q5Ar^=^+tPm*YADkSLuEI zz&HH5yz>>cOF3=U((F&+~!8B;MG<(P@mx(?n!-7(MkqUXBLbM<^1YkX(Bgbz~x7{)dt z`6Ao6NZQa$Prac3!@9(?#4`tTQPEx&=93Gt81478e?0H`BZn@0c>J~Yk4LnBoKy!t zrv2lhx&fJW+(vf)mjC~lvHySK|37M-e{n9u3arE`tVZEeWnmqu3LJXBh+oSaj^9|W z{fj*>wST>0UAKM4Y6!1FW|z9ho7!biS*ITMd+%cj-$lCO-feLVWwr8hfOFSkE4HI{ zk^Xn(<4$@~8QFl{^u0)JQa0~VM(++@@x1r#; z=(sDR4nWpj=6_$;zCb4LvH#@5_Jt*`bxjdRaU3Ub3TJQ*57+;>$B(_m{$FSR+3&)O z>_6EuhW($w{*&n$>_3@toFz-^|Ge_y^?xpivsPFQF42=$+5c|-8NHxQHRZV3aTD3d zKfWS7&U-A)FEB1Y>-}F7){Bg`na&q|7h{aSb9@^GdU1m9WRmX$Elt{ok7*x1qAdW2 z>6w$_xgmd|Jt2Ri6FL0#{%v=~*@u2r>WkPd{|zJuV<_rg^nA6SG)Q}Lisvi;H(l1h zg;dP{XZwfY&KrSI7>(Ng{73m=EIs*>yy5#DPoId?%kqY>2kZY#7B&^rQI47DwqC_7 zvNYati1Gn(ykqa5J{sl-pNsh@n8Q`^OFlF`-A(_6`VW78spDnHvQsO_m3S~-C!f)7 zrCwho;~odA{MH@)YqIBSzVmPEPiH^ZI$nnj*o-P{#dhq(ZuEXv|DQZiFE2DmQ%q~) zX=y_<;`ryi&fAY_97GZ+q|t`x0t^^e|f&zf?-{+$zl5l2z@{m5{FJcZt`dzODNK9nB&KF^UCa0xw# zW1!>SCs*hX<}Xx!$#c=)p7Q=<8xZ#{G(IE!Tl9bL&`*q{{^&;i(oJXef1lP*e9ZiI zaa=<$ZsIoXq7VJPFa0m+e@7>B=t3R^6w#uuyVZXj+NQ6YJgmPwLuMVfk?AL&4g(spMqDF$@jj zN%l96;2rfH?N7}krOR<6*)&Z5w=~yFa|4p^3O_F#&Ggifr$4m*&s1?%THB)PJJwdA zm*YQO|L2AOL;uemdEWia6wfTo!CYjeDA5$3Tbt|yKwpfa;#ytk&+~PRLc&*>oVFNa!3KdUS+n?Ny zo!E`N*pF%?kwW3y_kEJm{Fm3vbqQtn_rLoNU)%4!j~Zh^b~hRuLH7LX)1gQn#c`a# zDV)JMTtM$v%oikka3#uj_|ars|EHI{iQBk~KEyqR`n{>_!$1s1R#;|%bph$QXYSW~ z3qMmYcU&CjI}nziq8>a=|KBwE9}U~&|1I)A``JV`Bl^gt^`CATYl~5?uQaB+s;qw) zO&^PP@3%u4-$~|{cqg;pxBv2Z5vBS6HSB+r{C|M`r8a)Bt+{3`)?oua*#EQHar@=>*9X4CR-@}G zyN)Bx4_X-F5R1 z)KeP7mqg(S~$~a$G#;Z~>Q)9paoP*#vry&F;h%`ZdHg{R_xDr)!LUlIQgk2)~Ki*&i*{ z#W%`})!p_+9A-ZqVKJ?7&%T zXgvMrw5=Uem&Aj0wTi>!BMfxjVDx@f++UFA=)*Arqc9p{F&+~U+q)-|J)iR5-{wR5 zHk;7Q2B&_`*AL6_N`;XIW_Za`T!?;Cyl0V*nD*9H$KGm4kIOeet+lB4KZtTT=RO28@ z`+r#jq5Cc0rsKH(Pl`;V4Y{bR!xhP+IF1u2&Hvk@eTDvD{@)q;1w33obf2;cb^ZB8 z{KAHj{Lf+hB0gsmn)%2ntZ>a6C;9k%<&AvibB-^e(%iiYy zAnq6Z2jW`$BV02IqcIlaF%gq76{Ymw`m6Ns(7u5pTBNh}vh;f{ajk^E>HqKe@cswW z#aVky`lYR$p42W7*Z-eMFYqrO_W#Fwou&_1cpUSSVSi@P;~cZOWaW3gA9Fy8)6^IE z8Tm2ZH*)m2Ch2W?eZKP+VlgVpv<1Cs%@6v6_3xI^i=UnrR*(%_9LEWqLTUY<8J_8N&-Suse9yBc3yzCq%LMhm zDf~O;$mgWzTHRZ{_f5Zx()vG+^9S53*(R(H4KFBT*oQ{4=|#4J zOtBmN-jc5{5Y?xxg+Ru&>1x@`q4eSS;QZhbjz?iMV*mSCay%-(qzqJUPNYx9R7^)* zJ^PCWefdemIsHv&PUuJ024C)+nV5w+n2Y&Xh{b515c{o-aWI}C_x_lMyzvzU6!#f_ zsr|lniGFP7rN`*Eo}u5G%%W|WFlkvTo@H2pm56N^asR(n^xPiycbD}C>1$DBR||L8 z-y7^NxlUO18eLRgX5iO&U>yko-^5$iR~ZO|G%Yf?z>5n zjlP>E-$}FYB1LXAMq|@S-^Vfa>#zH+zN?*5JXMXp6@63f#&dL>mww|s;#iNa9maM@ zcVUBkZGpls02l{{;lkKH2 z{-GXmEz(rp7-ckVu@7MUO?y`oXIy&@7jOwZxB}yxLnj{W16aK186nQW>q5H2zJIMD zv>xy*nxuJ&v?JsB9L6ia_-|HeHICr2PFEQ3+>+&KAfjD9-$SQJOPKjfcre}3!v?Ej067BWGAaD% zm1o1Q<GWgw`@hZ(3C-spGmdRc*gRoO*ej0xs66s)s2VaMRE!)KwhS8={&C!du+@E6 z`|Tk9Y2;_Zc4>X<{Aa?pk(0s?Uz!qjo}3cDUKP*ea1Vu{#;0Qd**1m>K&YZPWzPk z{qxN4$04L~__gOeL-YGzF~8q?Yk$W20O*`%eE@Wgvp&Eu>jMn2KER;o!bWw~O^D<9 zdi-BOUwwDWjBt(K^9Ab`e2#sm-$d`9%EN8)E-HV*4>q4WEo~JaD+_(X`u!7If$SqQ zj6aiC=($_=w#Q)lP!uoP+aCGL_PWP#VIxp|Qur^+Ld}sGVU*+1sHKl3$0IpohPIp; zp=rX5(1_Hq8KD{3m-To1eKIoisbqS#bpgq8#Jz-yDD6vpL>=Ldx&mhUZ5HY#SbxZP zgVOp#P0FQ=@dtVBL}co`_6=phgZ_;<&Yz3-d@VY+W2T9#DfNJbuHBDn9E|ZGg*4hw^8Yunktq4UZ?cUj9N_<=MVzgJ_`m)6y4U%?x7gP1 z_x=Bi!s_{{6Yf9bjU(AGab|_bG4e%v?p<-Svq$L_#ut?Qe>OWmjs0a?~?Er%-cMy7JPsP5Z%@)Q7*twy@PD|9_Wsu4jMUcfH>mTvrbG6`M@l~i&RS1EH+?Vmqd{Ix;;+|7==r7bKaW|z&ssfkp5De+r3bM*t#)1IFSHAM zg?~d&q1Qe42K7#Q8*(V(D30RrC--+u#4hr2Ht^eS-=P~QZl9!^6D@fnc-$eG} zCT=6E&KK(+cj>u#zD;@`z26V~*8_j>dEct=hyL%3_x;~Z{NHW--!1&#%*Vq)*9~yp zKn%tQ+y90-Zr{iMZ{q*f@c;SyU3~xi68`^e{{Qp*|C9XRv;1HFY-#^Hac1)Tf3nSW z)nv@W!^JfMqfk4D|NF?t!)SVP2>+K}n*U-9R_ZnP_NseK@PCDmbr~9euvyS%wu@iP|C3|D>@L^yC!x zhSl`7NcDGbXmU>Dn2&$RKCScn25d%!I$r^mUobAsn&?}FZO2aR#$N14-8T7um;8?; z`_-sCh&CdQ5l!9GKkS@?NFs$|P*)?{kVE@=`TvMKh8#+E_FZ{Q{wtzo3;Xwi{Qsi7 zN*-P!Z<1NZZDd^krzoDIIF1u|Fn;b7J(ek*?B*HzIh6MG+#~<)@L%KwVR8MROJomf zwH;g`uOaqyTY3C;^er--4xfKJ^l zwe+|qNn8VB9eo2z>wigO{)qff)?GFJ3JrIRzd|Fu3C(On3b&wnHpW}W%bkFOxOD*^A$v94^?wI%g zuJ?c5``5nH=o@H4vu`N&6%BCBKn%uEycPXias)=9{kr!GqbUYk?;bYP2-W?dMYfWFUC?V!wRfK?3*i%KRB(g z%<*bu-QQYr9dbvc@s_j=@$Ak^8(DZV5jOa3Gpeu^+p!b7(W_5?FWJpc*iZIY$FG_^ zh$K=dweM)-iTOG0xbW4dLd93D?XT^&*O=gLZ9Lt7@%}&0KXZM~IYm_dM1TIDo(k13 zd)E3{j|w}E6NqysW7&6#o_xgfo1y%qpF_&G5cdv@d(6i*(CT)K4E5IgZ#XbAB++!zoYY{KHg*c+{N7}po||1Z$Dy+r>%**5#>(0citM%uR z-e&#&UFPrKGJoGWZDgJP?s_y#(0{Kly>X2GdwMgO66a0#cpG=ohkiRf2YF!tIS}c~ zqrzZvD28JMve*3wM$vOqr0p5w4CrG~d{x@?tLOb^m;ZR)dYtZ@Cb@q!JnuY|=H+`X&8|sdvUsLqI?6E< zvoHs9(LPT9Gdicq=Tm%}6XfgD>e7gFy&kUr?|r9nSec(e_DOBONPEZg#j_BLu@qTh z87!mc2Knx>g1!<(dI5QKVU@6I?{_u17PWD&2yz2z=&|oPiOr6y&`eL^LBC(gzaHZI z$5y}h{#u_Hxf2yX&?fe*|NFEyiQj4W6ju3IS!q7tetI?1FPY;-CXtHA$g0=JH3r-0 zIrWWFUm(30>Go{oxBQL!^WUoF`=i2-;{;BjwnP4Dm1pP=%Aa%e3y5n1#I*sNCdohK zC1E}2{l=(pg}fGZ+{A4>Oy4kVijKQ~|5WHBd%iy^^!t%C#ywZx(&qmq?FrA8g@M8b zqxT(a{Qv3c&>ip(_6K;@J^?LHhoOEOju9Az(HM)0in0**7aUK|XkW|@ON5E^$(V|V z)+_hR{kR6_D!;ErT!VToxek?I;P-z$gbnn~sKQok$4)%l|EI(M zps`;1(d-$du-iF%u^-hqh$K=-qun#_Kxv*m*>(PY-KwB|RpgsJ+#l&p_8*7m$$!YA z4YB{BO*}aiaTE{wACAZH^=$YWZ5s4bDDGl+koTOra7I|{e>g{8Kx^G~uy1VRlhTR0i|YT^*$DRdZ|2v|=ohYurx!O-_{O8*HhC9)=(kJy)%82j zd0zehv^x16b#oNR`|JCtZ)%6d>y1u-4n9MkiV|3b_w`qYkfZ2Xq&X&_-zrz6W z4n&q-^8f$ke%nIlo8smV#Cm_p4{#iP_QB2_is2Z6ICr-Oqv%QZ*1#_rO&^Pt<7UTA zWTX4-`5hbE$bWyEjm3E9R9G8eA~_jTF&*WYiQcF5vyyY7&OXPn77x~QjO!1~72cz7 zWa`?kKCUug$;XHZz`co|k;B|6o;a%ivVA3Dr^D5!fC(M8Xzn;jCKZZeNy zhotn=y7kTxJE?XKKXc;@0MO_-+I;i#VY@?8f&o*8?YIL7WYHeJ;VP+ z1ClfD`;<)+_?UdlIM;rmbGBkTc49a7Vn3?UuAjeyuieS#&b`9FUct2$6@ieS=9@!}Vrd!f~&v|5} zac5QD<0;pk!8u&OCG_A!>p!xqvCk=gOkP7N|8JB2UDCf#dZhPn_TTj%SHyD-y|{_& z8tK1m%mY0)Px@znJlv)Cp}0rfJN(BB(l5NoAZ5Q9J45VK6xqHJ)|DAn6)r ze?-S4(0o?EqI+x<9@|jr|BSrf6%%cWKLLg(}Z^Xb3(Eo<2f4vQTx#WJkGf%<;oXZOqnInpoue2{vGHt1fyeUCC} zrQcU!H7d%=LIG>(anG-H$5<^iLzr-RndB zrq@E03f&<@amq;`SEi|Q4}j6XnXq;_}be9-?N=y))OqM$#% zoek|k=QzHZHiNEb`2Q%Nh!$mVD-I!zQv1gg<@gMCU)w->p zm{B(xP0wY-jj{CcDBcjadPHgcrJ9e1iNdNE4G5FTsi;-wnogFZW{7k-KZ%)+XQBBd z`yjs3Iu70JMm_(ZEosQ}|M`iHW3>Ox)BcAP=D21q=3^l$K5q?Yaw(Rfy@vna#77`= zc|HM|cU&M#{{JNYKMwIn(>TnBey~4H+dh7UI96g6($o5f)nrDUJgZ)^mc9-f(0NDv z=;DXx5$Dhrm-8$9R=bJ4zh}%cy?Qwt`qkG%Hk|zHZ$M(iGJmOy8 z+nu`;|JnM#o3>~Z#BS%lah(rB?ngBa;zQ-XcDDa?{aDoJxJ|!{)UfY`W;8jc5g*)t zJl*=e(C3`Y@(uc*H-vsaiS=(~Um5#wbwkLZ>&^!C!wvd?H-zHJ4Pk(D24cAD29rah zju9Azx>vpz>X&>kG%WaDNN(8>8aHhSO=w06EzfTVtvG}<4zJmu4tf9o+sO7M8$!o~ z4WV<4|DWOi>G_fVf0+O016AMK5Jvl-u^5kusD0*p*6;sbm`qRB_-{<5Pe*E#^zV@V z%=h#KeJ_*?n~7PNgSnWGg;pjsOrqwzymm*PL&|X)&Fk6VXnV<~HLm|vS!ffUL*bX|zT{CH#|eD!`JHln1{KC5 zUm!0b&f)1HE5D>&;49Ykp=!7=Pp8^X{Sro0$V zj=(64M#Z=Khq2^%OvGgLKCvN8C8r~O<0GM*oQXM@i|kd;uiM%o^xUg_!aS@f;g zj-A+zz1WYsJ^TtZTxUn_u^X5974*{jKNs0IHmJ3UeIw&o_QUjOyGr}}yl7pSHOA+y z*Y-~4W@{f{tMVw&Z&}lKxkX5%{V1E4~5bK-9iwOaM6)HS49z zxycFUaX21n=O_8-@0VYrTt6Cdeo)~% z#s<(Ij<ZXff%^Xv^?x$W4>){B{U2F+Teo`oC;6d|85b(9 z$(V}iC`YgPd^5>z`C%5>V=uqLpN$cq&&7Piu{@ppih_#tLdQk_RX`qH;!i(fjE(T6 zScVnIUUUywNw4#c>rq5@km)d{Ki%kvFr{l%JM(l7Aam+;$XSbty&KYm>Q&}~j+_gC2a$JzTX^EW)} zjrAdH7xx>}^h;VZU}yZl?^|ENIstp>`_cRB0ioC2!Rm3!U&jZLL<+U{?$?LXF)WtB zr}eeabMg1}>O*Kk;}PxueCv8XYQq*j1|K!9DOc+M@8D}>tS{%9B97uXPT&;I;2hfX z`oE2@@8tXE$S%Hro-8;nk}YTXzZdzxWXb>E#kci8S+dRlrum^4#B&KfxPmx0AcJf4 zoPO}mA^bD?SQHoW|M|ov|DXSQQ&=@W_%?YLwe&u+-(KH8yxI}u8(7Gs}5UllRgv=*MC&r=H&S< z<#GO=GJcQzzf1n#CjHN|zewXSGKlkp+mLo2!^JZKqc9p-VHx*5mY!4hjQ;<4`a~2R z7mycs7bXj<_8g{?(@{$=CugE&$ou~P5p7nE=b)J$=l(UGW*<-*|2BhJ<-@f1>o3xs7SkeGzgl%4drra=Q{ z(dQsFM*g2B|MLkNXUPA1-nZ{f>_lGq$BsAAo5|SzeZ@V-zM0bg*hdD0x#F0QIQMfQ z+0Kr4ptQ~|yWhp;=U-)`+54h4{}$zGD-J1J(`@TQ`}dmmf5blO#o}3tIPR_TGwl4= zyq6gt4ahjdna=AV&qA2JI|x1I{!zt;cdIL`m;F^+X5z56rj{A9H_SCdyY1V(b@t*as>-GPmbGH6_bS=_<|APMe7xmwhEjP^nzhnOY5p(@6`3bAkJo_ke23@^V4WU4q55WykPu7 zJkCfPj?#~#xK026CjI};>2m)kgjKJ9I-DZUp!Tf&6vzvxNqDZ(pTs4{J!n4W|K9bS zyFF*;)_Di@o6K{8bxO5wnEUdkamuaMD#aDoTthEz;x_K05B>gG{$pc1*xF7q&V}hB z^NtH-(f8eQz}%N6b6{$GpB?;vb<2$Bm-YPOT$lmk_)z<%)aV=dOqWAm#!cp>g)Eh^I6XYqJ!8u&OCG_A5dcXK|xJGvW4`Tsh z_)|}Zo8(>eq2GSZsm-4LLw~#p7k6mLA704s@K{q#RJ*G`%X{?@&KL<$rFYQu$oOk9+ELvYB(~^HE&H{?2>f|6Xo>gs|%8)nUk`i2nC7vVxCL8h`KE#&vh%{$eYI zH@lx$2Pus+Bpm z|4I3u%w3fK=?}+u>{I`~sQqsY|HJp3eun>n46mN1<-;656@qgL=+P5zD4_h6V*6n56x6^l`d6RkrO6zFs&~7NKnoqo!+>hGlwKUKzhos{aDmy+As;&;V|JEnMmOY;c|9IfZ0I9|_y#z5jHaHGHI}==Y;Ij_yy72yb6h z@A%~4u>TorII7p~n<8w&h_KhbSU^cX)-QD&U}Q^unV^1h~cUo-3l zD!;1TOVin0cG#cdpZf3W_gMpO zmpKe$_{TH&$Nb7>q|maSf6G@t)SoXuLI3tNzAH-WxH+P*>V$ zUNWD3zOaR;InDodZnDWb{*ITTSzIYJW~9S?9O^$sztm?#%PXIejwxZ8b5`JR;?vq` z#)XyiRalL+SceVRj4J&6g}8>6eIn=wUQC2q?LG$+p9}Apk65!_zgpt+A^F0LP&a#8 zsJ}c-nL8<@)=vtJOUz|Ud`A6C`-6Lqa~zUreA0cPd64^@V4l2qcVaj8Vn3>J5J|L; z)c)c*cRDvmcFpi?PqxF2Nqv)2Fom~}Ap z^Owd$oK}8ds_Unt95XQsb1)aZ-`6Jgtob9u)R!D@`Q`YqkX($~X~sXu-%IJqW1cOR z(N`cvZ$^{yJ^FfegV;wlsnkxqfc;^=n$X)aV=dNU12&`mMfQ0C`#g_* zewlsd)9077&#$o0WXrqkKMq~w>z?KRo@D>mu_f93{@oS^1OiGMXO8 z|BNLc%-ika3&g(10*a_|UzOkZd^n_DGTwO;(etsgFq!QB<440(vNx#Xk>!|)S(t;l zn2&{6jHT%L8GnN8uG2qD_WrB6(Ib6Zi1Sld3R{KMSck%wXM_!8^#S+vh0lk$XX<9h zRfug772h&0fgZANm{WbU5%^Yr{h^XJc+Kkpe173Xk_z$lEy0QWJL9FMvsp6@ds zc)r6tU-2~G@_fZrEuM+an~bTLj@psZ@0@aaa++%=Jr`!uXCc+!wP+e5{mzSPYsNa~ z9KXde26M^zD9!(S%(&Ay|1TJaDr_m1VFgxV75--apE`dNO6zp66RVxG7VEG9vHriA ztio2bk7R$@(f{rIzc;o2;m|nwABWkd46^h#?E8EQd@o1m#sPa zC_muW>gw@Y^Z!nfXK)S|a0wO9s&kN6a1C+(UoUwRas98`q0AVE*4M`h)p@bLeyN|9Af1acP?Bx>EhySf2Uxg;<68!OgF-vsmc)eb9dFbr1Vd9q;Aae1~1^Grcta?@N4sdTmDk#TNfZ zPr8=|=d{stNZk`Rn$Wn(JqnBS|BjN!5!)_Kkf-p$`G02|pTpnG|I7cM=l@-B%_a2U z3S$1hM)u++9?t*c^M7dmUn`#*|2OmhZj0wG`p~aN`d_<0|8D^O!Tg(n^uhS+`G1>U zk-st2Z*dM?;cJhE5%f{$jceTC9sk?fdjHw@zlqMBjH#H8 za?Hdm%)y7o|GoQx_I-Ns2>&ai{L3ru@>v_VEo?(`h)R*o9SEex8wf?h2g@hupK*5 zTK}D&wwwN7{r6J)pZ=fJFup08_;rogXpQ|%*4caGfc5`tJ|6bECUby)X|DZ#dNmHB z^9ADrj7RPw^VSI{kVWIF>-eSh{L_Y)_`mDhu#(CLSt^beetM*eA&ncY2In?rz zYpz>sjs9T$flKrrq_j;m^V1)!KX66Z2lMwe$Gy0TIR8G*ySYuT`n|S0cIGa<5B+LA z7vFo`E7Jd_wpnem$sO8eeKSqIo#s2r0#y7=8@+P}VlakcI7VO;+W)imzvs38;eR{+ zV8}T8=t&Or^m z;RwGTa~;n|Gd+a|>ukq5%R=D~>XnP>OR)?q5!e4)MXttLtiuLuMiqMhV4p;?)c^1g z+Vtr=W4L*ByUA|vU@zJ8Rc-iD20n8P|E4`s$hudxKcE50diT}AHlvxILPeu-(auXD zjW*;^#8I@r@&W%)eX#2h{^1P%;S~A*y!@{&)rv#TOE1^{x?bI!jQxXc3-lj+-CP3e zGsL<5vHo#foN?UGCUutBcX7gTY{NfAo{74>!bc;IZ$|iVdajjiJzz~5`Y4q458lDX*08np?5F>#e};|2 z!}<60lAlVBcFkCf$3#rVR7^)X+AnK=(5Bx>=Ctp3k$J}jvZ!sMwEq7T_M2UdbL|dG zS84vew6!JpSM}EaSD)IHQMbZOan>HN{=e{9^rZSkT>rn~XU4)iPQ|_hb&4kOH|n35 zD=h9SR9gT43GX#yKO|ubk;zN@Z2s8-^>4>ZQJChrqw59lfvj_1_38|zb$aHp>94Zw zXzq~z5zCKdu3LeXScTPCi*;zP_x&M`G0matHSwZAFQPR6_pJPnbiMCSoN;gXEWPc3 z`p0SQ`{LP*Dr`mV4fgj=|FE4N*Su+vk9X2{<3r=e657^hnT{RB=S&cE;Zr2V$&-M>nNb7b#ljAJJ+p$AuR4ZXOD*#35# zyo)~ctMmPJ+;4vyK+nzNo6OdBK_84b<~#0tlYgH7K-Nw8-?Trn*(v#{@Xe87sOyGf z1Y#RNX}nUsxf+h!^#OJ)^88=&{LwYU^G5-3jeyen-)H6ji|jvn_@uJ+RnH%7=>F>a ztg@U$yi6-uJu9I3LHeFb8w-VE*!a zdQzT`bFdfE7bA6xZE+m;K96&->s~Sce9hCLK_7Nf-*)3J{nGmWo9QY2++$s{6w9yz zE3pczu@>!b>iG39$)Mq3zkB-UAtS;*{dRA^ zrtWayBjM-!^hfO)6@K=nIqu)LCg#W7*Ej8r`4|1(|HBxW*Nq8z^Sy95@t2TJ{D)`# zAE7nzXJbeHJ*2w-Jv1l&6q?5TTWGxgZy}lZ*U+F&SAW*n&Wq-jTK}>3-T$IK_h0nM zd@RIu(SLr!-f5SO{c-dEvi>f0UN3{fBCQit`8i zhkqI}JZ#5Xt@-)n?#peNtd zht*;3LhS#4UH$)_wP6s~h-uu>KWuEZUm!pF4Q=mneBWl*WEN@PpQPQ1z7^Zi$%f|8 zg**xD1a6j8gL|2>RPO;0wtHyo#*K-?3c*>M~r*@#oZ z&fpv_;GunB`z|@|!8MfjAJ(tbOTUTRxQn`N#{cY5|39z(AM@o6^?$@QCgS?jvHsuZ zynYSZ6)_NlF%-kmep3JcS^fXWUDp49PyhceHg2E360&8s`acdWV*hcN9@mD;I-v|4+#_Y7-TUZ>?}c7O7xFl?&GS*$X~}qw-cMXpsZIUr@D0xq4eD6Q1>WOq^C;1L z(R(`<`&Ycjv*tFs$59xKu^5kun2f1NpEi$}EJxh`ZYG(%;rqP9kDxzTe`F4QF8+G` zk<$1_Ht^y2$BcP5&RdL{{C(RO*B>d(zi+g69sPsrkNhw9A8u9uM+$NMy`|z>h80+e zRalL+XrILXqZ6^M{cq;~omT(HA;h#E9^&~Ui#BxY+g~T1p4e9Lg~4G1J@!9tj$yyh zH%4y7cJ%58_@nlhol*aWwKK@Q*pF(YJIuWzGxh5K@_3S-is6W5L=IijpGRqY`X2Sa zHRetDy$v}OQQJBw)GXJJOHb~)-}hg5M_tWv+&irqO)~!z$P;8AvajqPz=Wi^nPkk7)6f8SoHAI#*^Jo*xQ}lI86PK z&-;e9s>zP)PRsx2<^OT=KjNA)O=za4P*CPicV4CXa9s1doIVq?FbDt3`G1?_f7rWR z`;^rzkA1;1`U?E@{J$-KzGdv0 zHR$}d3UThqYH}@VZpnY=eTP`*cmw`={$Kl5eBP%^^(om<*zkZEzg6s7Kka13b(`n-;k-}#GS%s~*QZFBoJFy#kvGJbu z{vPo@o(y5X<7yPX=$^?G;{1a&S@maSh_-+>dJaV#MOqqZU2z(J)9k&$c%9SFY2W$rO9q@(8io!E`N=xr$r`^ngUTTS+Stt=cQlSmx{yZ!MU>Y6A0z)i^ZxihdK}yO z!2X;O$2p|8@w3S|@AnefgRHPCP9rAYL0bis2Z6QRoTg zz>wX)PlT~#XGXh1-dsfbM8vgyi|DH1IC0! zilUTsC?(xwBrTOLujHmO!I50aNG4P)r^{=(ESI>%HMkm=<+5Djl3a0#OT1Dks)6cC zTT!A!i6Je)L-TfcI~?B6!E(g09C5@MoW?6&vBVo35wRuRWlQCLo>Nv$)Sj6?zU%kf z`+1)IyzZCZ-oL#cEMWBVwJ?V9wQJfxY&dBqG1-1COd*kfE&Q!?1~GyJCUfnRWjlQ? zPuiHHFGRTBG*UU&v{A2xe=n}5>RR{@K(`$Egz+2Oy68!mTW z7_wjd*fsN8mH+DCb4a+Z|HnQ_*EzXzE&RXqws*_tn3X^0lQ{|GO^AEWPx=Z}F3*IDMe+)KRL^LKqC zuK!=y=TC7b{x|&Z_?P%s_&@MF^N-a#H~aqzU0;FkZ&Cg49>>YVn)W}L`M)SR)tD9j zwKV<>9zxH|Uii0UY$C;91ACu366={(DU-o6XKr4(|88kpY#tDasb1) z@#g~WXp%xE!r^6o9gSUA$(0-n>yqC+jGJ<=-aQm7QRKU|HZZN z9Wt8#_g(UP_yK-|g!IUb!vHoGk|G$!r zi@_x}?F!p=jg3o=Ts98wn0+JR>;c?@gSZX1;|~1J`hVI48T&g+9gWQTe{B0P{EycE z>sdVz4oT+~xC{3nS_3eKmZyAkZ1Dj^{l~AeZA6_tJfjVw?Jz{1_5DTP;JIq;Lw4wW z_DMErf7HGx+)rPsZULhw(wN14hWG4ETXd4k?Z#!2=|lG*hpmmKR`c<*W(T7(UyM``4&WT|K3Kv12OtQy>?|| zR(PlIyO6Of)eA$~mhmITpZJ3J^Bo@`|6u*Uj)UQ#W8Q=J;RARW=ej%toW-BuK>r&1KbQTFp+fe5 z4*Q=SKSF-k`96w|Bh5GSOmu#n|Cn!rAJ%k%^A|b)VfH`f^=aLl|H4KbW#f;zzHv5x zg6%&+PnNO&OWFTulh(Gi5FaHU!{c}YPvU7jgI)|^7~{Comlr0<^-t!7#3jD>^fbPL zuVHf0JuV*%-=NQ3wq5vJ^zUFne?nSZ%5n2#GiHl|U$Ebe zdbvMWTX;tLdGZC~7(z7X_c`hO8rQII%r}o)kbbo_FOJCjg~|X-XUU)PUm7Vy^ZTMX zztP(N(HfvpSuj;D?~6|!Qx*sxlumCuf4R==a2vgcyq&xQhY+nnIIJv)D-(t=NRR&2 z^R&G93fu3(J$M!F!~J*wNAY^R0dK-v@HX7+pOqKiA-rJh<(=evux$+JePqjL_k|CT z597SHPuqTLiiMYj4-0=3>z~dFA1AMz4dD!V4sD3afJe#9_Mu_e8$OW{r^ew%By`#WEx*V%d7WiY^;nwpnj!j z4&c|sM{@vs)dAn2x4lju;-}fct}QADhHB+|3?J7YP$B=X@&8gM{IlclT?fLur133$ z2Q9DG{vf}HAK*urzQ}*B!#KBn=g7?eZiGG3^}G5%U8{b_ zsI?T_V@43 zj8*k0KfhR*6S^;2)1_Mb#r9X=F5H93)9ToU1L0NlId$!<=X)Rhek>GtHb|-4qxt($ z`{PrczX=c6_IkVlnf8Zg^CtS>rgyW>zaaWQz*Fq$mFsP@_=dKEXZv(rZulGb`&6E_ zAGdPDlji1pp*+u;kiQ6>%Frj)cf#j3cfw~Eca6#Yy|KD~2%p*64WHh-8rrw6@`-)k z_}8z)Tb%1{Xe)RwoG*VawB-IewC?;WJbLLD;jfLYy~DP5;<5Ex!>0-khEJCLGF;4( zUi@JAO!JGwyKH+8p4i+UKG$IWUE?jGW9OFeSAUr@rNXt4LnX?Z zq%tR^{X;%s-zV`jp25xXW5!ss@bpRce+~P;PXBr?o1pOe*jt*ukj4LTN!hoe3|vzd zk`uzw+I&&}{1iDLjh3%lAD@iY@t-7TvHeEh+rP8+XXGEi@9zpfrYpgB@KSyOpC}6H zUl}*H-&ZhsTAI7sOZ4a;OWz>Bh3_EKPgAWtu2deAbIRRW^1HTu4@WVKAK*v$34V&7 zLTBidQchId-YJb&3?BdX8S*l?Nau5cgHkCT1Iocrkm zu5EBkykp|4?r+I{i-*D?$Grk=(fq>?W`(=x_uy5y57RZ;-o9>B2hNXN^4hyKG*T%>=;7>$249JTHB=*=$* zZy?`=Rc%< z_M83TarzT@5>Mk9^kM+Rh%L5`^UXRb8`mCA@>xm z6*}DE{*uZl-_&!CIlFp&oZze4-@g{#H=!Mb>3;biH~aspJzI5a8kzpTr5B9<*>6G{ zD~U<^RJVT)-ObnGHT<4FB7eWkcP`C?Xq&#?576@37lqq|Z^s=-n`?ZC-2Nuth_CaX z*xeUiA$%9^!K=_)qd#_IU$~DR&8dw1f9|J0fZv&aa9sc2DfTCBtRp@|kK@KV;@8~k zQOCU=Z@`=I7Q7Abz_dDj2D6w$X8oHnbsQEjs-7BCPmPm_J@r4C6rNmF|EtR)|G{@k z=UsRY-iI6hgCC&Zs6QX3e;5nu(`aqo6ggk|+x7qA`c04f4r}E9*n#j-`+XcS@dKyX z)pg2Ew$*uKk;B3x!kKyYoF+7eZ@FbqbGw8(tuDqWQ1i7vsC$a3_ z=#!X5I?~-|5?`Uu^?Nqm2g29r-@t3+7 z7y1`}NBFz=9)5&rZOIvA)|Zj5Z?-R$OAA%XpJLCy#5I!9no1M&WRd4z;Q9Ne+s=9} zKi(I9BCQ^NA3r5yWBfbubNVmw90s=dhi>rshzSp>?7n+(+Jz2XGXxN904B8Q)Vry+JrVnjPLmo`0?K>HD{ax6$8$cj8@m z54Qi}KzJYd0i>TbH}|s#!o&1+zDOS?KZ=jz3=)<4j>$GWipLQ7_9Yv$!sGNC{clgu zpTxqRvSC*lk>g$AX>re>7cH;zk6LmVG)eU&C3p zU$Zj!9Q&_lO`Bg_O!~df^DX*!@Lfb}e+**ig7!a#UEhf7{DJrN7TdpvA0Uk%;V1Yh z`nt^jkNOA7^$(O9^JbT3=Ku56AK|+`TFAb~xNQkcEVF%A*(*!zdv@>ip84s*v;F3$ zV?M?9jj@5X8yCpFX7)b@Fo+?vlE-W;< z_ImfgCZ9*f|K#Ms|L%Ws;$XPPe(9I;KNLSu>A9ihmHKkT50`mv6`tcU&k=p)`d=_m zeSN))0FL7Icmv*q-<^N*d-nY${U5%CG2g;CnK-HaQRUmL zG5$wR3D0_FbNWT5$(iH7?YHr4=IN;h&n?R{!f2`Ih)McXp68hFIbxvGb=$XZ!+p?) zk9ltxs`MW57T5STyaVsVyYL>o4KSufIo@m|NHKxy>85ZoV@a5{WL@S z!z6tc+qVjnUy1a0oB!{B1z)3o11*2PKO|1=3*VwA$w_>N{#|?zvlq2bTU}q5>m$>` z(OQx{@tp9q?LWYe@Ducki{YpAXzu*Lu>;}f^j~5`cvv`24pklq&x!jru3_Jl@<=(G zDR0X|I3Ror4kF|K_GkKd=(pnz973NuxId|XBK2GUf?R$3rR;xRn%MP!w-(9!| zuflz}9}i$!KiAC4|HS`IpTJl#`(Ij#3idylEKsf%vHy2NI4X^Ge%G%july<}yn%cZ z-h#K`9e5|+h4ARX@}6~)cv#C2p^}X*$>g2 z$eZ=Qy1yssc-u1b|K2RW(|b#eKV<3qrw^X;{PPcl$LNn^q|)=R_52a7)$oM4L`pvr z`7|b#>(7wAi0YtO`FyU1ogqAoXzXWULpgEWcd*BQrr!6D%={Pf_y2F(@-uVm9XE;D z=y-euU&A*rU8C<$JNtjQ{>F;-@0#&%GEv9=ZPZS%|0I2ie3kF!ThjRsqPFaJ$yh0W z7JQHX1N;aBMe=8X>&ufrliCDh>~rmajQx?T{woaOC-(a(eu?x;9}LftZC`#cocG=R zntl!Yrqvy|1qboJn*Vp3eQw7cID}W=F5H7x;djel`D@j4U6j8N)%AZc|L^z8-}Nu) z8n26?jh}n2ScK(BK+KQd6&2|IETp<|8GxLgf{xzRsV0_ zsR)nKAHzc7QRVN^kh0G_9v9cMS`nTgpG0qaMR=Ng1~JDCNM|rr5qgCOFkE$1`Fk`B z*)NVh;eK=Z2TmOggBU^_!x%x7|A!qnj!DcSjj!Nqm_GiH{ulH7&E=n~Gq2x#|CIUu zX>vh$^wL9N4C6>(0!d6_3TJ1O?}(n=H>9_I_GtJHdFAek@LjS^AMg47{B~bl5xyt< z1N;a-!B3Ise<*y5{5gJ!=P=pr9O8dXpF4gu%;Fk--;C!+Pb1~N=h0L9P&gp|797NF z=sopNh>5?QKG^En;12pBjL@UC8R8wD6<#4O;ho$?-h;`*N5iYg`;fG5z^uA!?rK)J zU-$uJ)}2&d%&V(X=v%G`{TT2r2RAFU@4c6eiZHxY5k~Ua_gAgYv1Dw2k-cmTHkyz3 zdg+`W(te)bAEwpEGbQTiV*Ov6`oFHKn-Td2EnrlcGS(>nACvzvAzvpe)K6vX-y`hQ zx{B}y>AeZPO%>rS5E~%JF^Tk_?GLl$_TL>1+u!sZ(Z7O(=bV}U=>DUB4}4AB zH;|;ydPZ|e?E>NNAnn=~>Ro5db-Ko*;)d~Ed=EdskMI-x6q)t&)L{e2tPdPl_cW`6 z*tgMG_-F_FvWtC5CTyEfpGEWDqIqyr!qehsI@N*t>HlE;e{n;`AL4Awj6Ipl=5)?q zVt{QqxReu~qsPoM=*6$;*RXF^8IB%t3GI+u=m&8dZpWlLGkN?#xPv}d?%e9r^m{`% zB%E14SACi)WPcx4|6VY6L7Cl*v-<8{VV}(Wzt$IpyXb>gwM}-|T`%Sb^a*Y9Rc(`H zZ4`27{YBv(`y_Vt^^^Buvej5E`2dohOJ-ffx~y|&qjb|n&QqZN$&315 zv_ChrKh;MQWKwvNoU&gJJO6#s`2f;yl)tS@*Lz9(Z#5@8EN<|Kym2`ve3<@Gj0g`O zRtJ+qrF>_^oxwS@;f4MQj|#UaCmts={(axPH9SFo5>Mk9^cnl>@6`W(nQgC($^4i5 zk}@A7`m}rPGk{@?V-mAS<14uS|MB1YKl1mT*S<+1-Km{}(Tn=uk6#~y{Ad3^eoTIm z&Nr~I|0UsDx_~FKc>bt9?GIndw}xPmv0(If7wID?R5!nz51YRC_WUqp%xomv*?h7f~F5$7U*Mv`%-XA_$abLJta$k77`2O&j}4!r8BaBbutH9&n`Tn-82QhRpEcx#v8KE* zeuA$BCe&GxkMblrb{jjHTf+{0&Oh;Hcl>+fZu9^C%=&$W?lV8~Z@4wg zW7j@CtG9-$vpI-4Za_MNsar#?a2|%Mlxu9Jq0U=F9Qoo3P>3QNMlp_H zdp;|akn5i~7)r@2Z_Ns2WH~Bu3<=NSIC%mmQHe>V@HDbv8M)o)hZCC8}Q( z>d6K~c6B4!gvidGWzWx{S-2Hx_2~lUk-~;&Rwe!dE}|VB=tLL#jNNDY2a@^+w$!LmXFa-p36Uy-?>&J#mF z;^?hrf2)Vb=z}NK%}CN`FrsZUj5vny!v7DaY`=t-&#QmgU5oU#V7&ILS>ZB$3Cmc) zD%Q|fqyFz;pJOnle^%Q(zM`DcHXp&deKxR(E$m>|K(RjUYda_pik7)g^td#E+>9a2nx#VP(@26~E$fM6S%A583LO#6! z3%;#1ZmeHfC@xxmzlc1H-u!)`m^^}*;|8P?wW~{nOEG*^-Gd?f#ZfM<0_pw6w8Nor zjD8#^a1v+L<<0Kxocn2E$G44oo}bMQm9|x(8Z|hD)41}%eW8|IR~FZi+kdhz)RPTp zL=&3PiVIl(h5Gk3>R$Hpm6sg|7sa)s1D&`zeoz=fmvA>)=8MAi=dI6UY%W@VOS2=K z|GT2l_Q98je%o)XzZIv?EqQ(zqbIRIPa}nSjC=oy)7sHdJFQB)xk5$ z;STNJ`l7IEzcs951HH9HA$F=LY|;lW8vDljY81ijv*{?q*df$lFH!Bd=ci8wm{^0)vS;prv ziZP5Mfr%Vr-?{t`jO|aA@juApe^B5$id+XdFPz%of3U^>fb40%GZZ>c+uhptZ_Els z^xg}18jrd&9HtN2X8^_YBN%bq@Vb8p?E04gO2j2f4uw*(43XbMIaz_o4`S9m&f%Ew zaiklx%bT>*yIlwRmfhc~`&;LKfT2qM2N=c(PB`WyDp7@M)Zi3OV|q>gSHETa%PvZ{ zQ~Mt``gcc5Js*rCfr(xDe^2|LoN`=`cU>!;I@F^9z02PJYEfvU4;uH6#{Qe=&B**u z(>EXA@cz+Ut6r^E-e~`vGX{0RJ{QrB4n*s3b&}`*O5Xk1{?JA5MhyMvYt{bmxbD+8 z7*j8%jPQ|E|dYxbgqZX}%pZ{4Qp-^X4$`*{9Z&g?ud*E*FJS z-|`s7`EevLLC<{OsiH8I)Hdw*{a|3m_k$sN9K(&iADoTJPjUH)%v$iBe_K1?ANPkG zdM;XB<1X^(`6xgkif|Zx_39@KU=TyfsA%2X%sRScW_|w+_Wzc0jGWk0Zjm?E_n%&4 z|JSR38~%CwKe2is6g$rml%N!oj*rIA%jk1i%0T;+(<`tboGxU)=d<6*W8!+WMUImv z5cLzBBr6fy(RNAdM@Z#_D&cAjR~olQw01!p(Z6$}zT?dQjn=b5jeUAgTmPr&Md1{E zu$ldDpJ@G`^pHAPIGRHkZ`T$fPunNa&klQ59dNIFr7o@&SBGS`YnR_6yS!ew0g>;` z0y1_w*|+TZy1$H{g7SW-&hzb3|93c-{M_=htkCGVCN!fJ7jO~nm{y0+U^drza->@% z|7XenSSVxv%jY-8|LpfW_5Z1?&>@XZbfHIkuA7W)>Tgju$LRft<9GaH#@LRC=EslO zmPA_F9r>@%&{MdCMP&Ttjg3V1?q%U6EaNu&+L&|UsnGh7kS7>0SZxs!zjk~o7oRl`v2tN z0d3?#{Qmey+B#uJ>|26Tlp$KHILcFJG3^=7AoKjagL%*W=JThI%Ip7q{KND2JtTb( znf90C*59xF@eB5UwEoYpeCN`yz%iV_NmQZ=)kquruOYWTsQq!b`n!K$I3?WnetrSg z5j;)rZSw9=OONI>4pi<7b@X~fYswApXk(H?$J~D_`~M<4nauqEj~-|H4=35lZ1&@h zX+R^I(2Q1Gz(q{Qjqlrc)-iMByfNcc6C0geVBe4Sv;W19+nyjN>>sV|pYi`EqyPW4 zOQ!>!=)z=$eR2+lZu%U1ezuN3JiQ+amB!y|+5e}Ui;VJa^#8xiI1s(J(e>MJ3^C*D z14rCfnelgP*C)jdm$?IaDy#=bMq{<}?k^gvjsE+$A#TMtKIb{idImkG4~9)~TiC%add0^Y4u-4r%=`iA z?$NU@dH=3&7;!|t96920vHk6rggi1I1t>%j4kP{k>~MrE!TQ_TqU4qL9t>qNz5RHOIXBZEMeRK0MhdK`bT^t!qNP{6>=48SVzy!zOX@V zVhcOy-IV{=_k~^hV1xXRtMonGY`^LM8&cl&9Z~;apyIdv&-&A&vHjsjb^dXa<=CW%rUZ{P533?LI8vj#BBoBl<>9iTx ziTv~O>CwOOqVe+r`rMMVQAjVs0zHis=5bhD&yF?(c?7)|4}=o36ww;G0~g#E%7n`? zOdr7z;>h&BpZab6+a&(9_&WDbj&vOe6^=QE<2Zqns6-X2F}>^lwN+-xKUhDw`G4a7 zsNSD){Wa3K;V*cK9{EU~CTmfLg>v^>>VC;+yyC`qMzQx&Z+lOi|3KsQZ}4pN4gLk+ zFL9CYW25cSzyBiN&bBu`!q3nA0pZBEGvhx{e;_p3XZ!p5XUJAuz(us91D$C3qHma7 zcx7YgCa=6-`-oisi1)Xw{-kH>|FcaYPM>7oM`K)L^l0p9Rvne3&tO4bz1e?m45~lJ zHz;34WBbtk77hX8& zJkAx3y^S6*_Lg;h3@*8>j40H1NKURA*F2*B=!oVdc!s|7-jw!;bk`6|DtES&JJMbk z-o)^la>um}tzWlO+TPt1&VQ`Q*rv8xcqByscHOey4x%!Cm%NHO@v|q@4dNo-i-jY~ z^K$JK+qS=dAY}b}Z5HGr5Bb=BwK=f&o(b#5`U`}kwb2SA`H>Kc$aU?_p37O`FufS9 zA8HIo$lgnQ&%~F|2g!lOtWZiXL$n6S@R;)7K11rPzO^Rr;7l0EIuiylggAyVf(_SK z?zjpZ!*Tr1{J)YjVP^eIn8h6CkwO{^7(I3-jGa6a#;ebSMAexvQFbOIi_a)u&xEOh zGodg3i1Ft~!T<)h9$`~I65<7qgyH;0^xr-bPB`C5RH6!Pzpy^ZbO_b-8cf?~rtpz4 zd-xIa{~ozszVB%x?keAv_oMZXgfWaGar}`mam+h9`AC?ocqB}fiE~}2q;ndzsKaE3 zYiv3b>gjVy_l5>~BNi^Ww^sMq?p|YOLX)_jqBEhHY(?+#nQ(!;h*-Jjy5qT`UAP0o zmps=c&)KtyuXsM%kYoC`#`)?cFhNf)nZLJVeH%=(sb?^YxhD13Y4ul~`y&^8tDVy9 zLN^+`s~FjjIL6SYt%Ojx6@Pd9~ias!*#!VY$E6?@3~s&D$sXF?8{i#$a0fAh%#6ru<> z=Kmh1&&g-A>ihI7mGQzi=l|}q^^wWHm$LtR{-qzWUkPIJ_`s6%Tb@I>;-(c*H|fPBmz6_!c_Fb)g$QSEU5ur^8Fo{k?-5( z`+l}1Ic)pQ`d|Grh@m6uhDvormHO|bdegDdxLJ~np=X_cS?B)HUARAdaYw(TV`dQ5 z(Q}LHs5NyJna0A7`map=U!ndd6M5?Y0(E5Y$BPqHyQb@&tE3p<|(Po8Qrepeqp zzxtu@x%Cf)CmKHzI_f?WK8w%9YyC6s{h___{o&I^4~KJy>%w1d{9oa+w3pC!^8Mj_ zM?+}otPQPrG}aLQdZr;P+qQzo>OT}d)%=O@$@2Pe5sy39XZoAMs%>j{qP8)7&hz=~ z`o}^?Q=_#n{rgUT?YeCn*u)l|>JMRu?C}q+XS|PY>K5c8Q_to4e)h6Mo^Ve=PRJ+Q{)+vmty)O$EjIpBmJ^ETgSGllPUVEd^kR%u%R}Pg z7^=?+N5qw&W&f?Alq|#g2idXYm7o3Jp@KYy)-Bc)<+X}o=)j@ zOLs;eP0V@vt*OcWuC`wdPT@3aQHS)e^f#RF4SL3Nu4|S|3D1)Yt}Wf<+n2_uG!xPs zCzH-GA#SSC+y(tVeJlGz|Jr`# zrhWl^8|&iMU&}|xyo65iUFb#({fHw`vOlUv&PH=ElH?2~=_&FO<~Cd-Qfsae(Y2*f zU|TB|aT!Zk#tK%^SIRzSKMrKEk8#sKoqc)J|G$`h%Z^Qu6NlOEn8XyO7xfRYqoXb7o0wdd#_GYaMUVa&IJ)a=BG%=)XIytHJKnl#=h+y;&8{0mw#DtcXTL=I!I1Sec?6TO zgCUp9Lo)8%>#iI5!Uafgvc*@Nd)GD4dt$eQLh(g7jAHaISw9g+=!12}fKfs(#YlxU zC=su^RbQ(APvm;O7$|a`1+MF`>%uTbQ0AC&RNxqn;{;Bk64TmnGb`#C_WT?dvG?OWP8{)UooCYdru0u#NuwGy=uzIBB4e>UHiZBDeVS1Dnbs z4DlI=`v>4KM%MVV9QWK$Do;HB9^YGCbo?=XJE*5;*8d6W;S%+5vHVpbf04r&DX`{1 zH5;JS_Ga7h`}way8jWZ|Gg@&07txOC)9m{?Z3J?z>H3_%lyF*jVNLrl#s-M%A0QLj zOB3{DmwKi{|3Hnn+x|F{q#5%TyI)_P9a)fcT8LoGe}|G zxZ@=<`uFc58QB!knvYSxKx9W>7Po|DME(cSx{&Li<^TU$@c)n29$2+)4eQvzCbqDH zU0lVse&OwZw0_0Awa@k|-#XOw-&DReuqkZM`Va1dowpi} z_oqF4?zHxErStxs_ggQ09Jhbl59NFPQL{DL5#`zqM_eDjgmjVXTXp{(`aimygZ$_2 z|AaJ7q7qf8Mh#BkG^Xq2Kg?ndH`jk%@r>og%>9qKeKko zZaM3FKFg-MRo~r^f6eQUo)LfL^PUrV35$s4qFpAJu#6R~Vh!uqKti6|ByaflZ_#(K zi`mFV-;h7|)Ja?Nr?&Y5uG+STtgkD7k&8U!qX2!%gnryCALwxmV+5netB43`&xKF;a>+tn;3u9!V^A@4^$i8rxJc7Y) z{S*5B;{9y@8T}I-`X|t03?co-qHzAb#{Yk29RXv3W&r^uG9yrRfVL-hHWW00s6dMC5ys zX6qNxxBK`$ki}TIyOfQ8XE;J%f9;`ALYATo<*2|h^epj1BV*c@C&-iN6<0}CA^NvY z^e>>ecJUBK+;RxEM}A` z)5xq7My4>21*9={SR4F=XH={Wj^q(O_~NDtw87Em8vB=>8-sbu)dDth4%;}3jf`67 zt3y2+5&b)`iEKtIrtLpd2Ud8{ahOZSl;e8~_l>ZgVf6u?ooWk>8 z&j}Z#wLKrgMY0_oQTUVI@4eOl`Ng5oDSYMShe8**&L-%N!o~m-o>h$Ak2qTN0ZcAB z$C`0p;kjkkwj@lS!NQnt;Iemr$+eO_t2v=Td|yB?;|NL+`S2&(4}?|9{(T%_VFA5E0)c@5Og^fowMPc7; zGa326wUQU`1V6Zo7_00uE6@5hw` zne~^;qxv^5Y}juTTiC%au3`_M}S+n)_?R|`4Z-kxrpXqtLu67uoQq$kUjl*OGNe(r3-VpF_QH1Jd*b+*l{DuZI0w=NzZmzfqcG zTzHrqnR!WQbW9VP(TWSWh<0>f+8py4a}Z{$oTpa0d=FCQBBc2qES%zhaM`?djIZiz zzyv*sN&7|r|Lc@SG>?4}UG%fsf|>RCjSD3Tq*o+8>7=pXm~Q*TFl*afuKx$btzRIV z@&Ea{Z-?G1E`~As;7Rv}Bz*=W^k@!39MK$vzIykM0SscOOB$`dKa3=m)jw07Iqnh` zaT!Zk#tLrwpPW+vU>+%?v4Bx!|5(iT*X{em1U(t|{k8l4khq!`R;9Csb!=cVM?I-- z+@#N)(r=_*-lFedp;miWT&jWni(PR&eDJT5d+0sJpZJ@~Cd4Y$t0(1K^=__k9y0Ul zO5|hZMjXvs>|<faoe^YiiPCtQ@h}OKC!;N)F(#q7x2JSm9|5v$A44!sfO|EN9`GUy5DH{K;bbJ-6 zQG-)Bjap>Zf2x!JPka7(o`0d|k2HND$Mav-M}V9E|7h_1n>>Fqnt!$aF?sei=Kfp% zKl1ahlV&{{(1<29;{q#vv)((VLX>_3*G4vyjF(i>$|F?nt z-^9Kq=j+&wi|p$q_BA;woiS;TtLviw15Kz;qq=pHJwBygP872@rIW%XEMoGoeaf^y z=ySE|-&5Kj^kpnmseh3=t^Tdk{t(w=`zpDHUh(VX24eN@OZw5ex|_mV7|wTJ$c%G1 zPhUU#9|Nv$a7_G?`!ALMFoLs}wKWmV58iQn`eWLp*ri{^|I+`qQvSm@qP1dv-~TrH zC*huS+U{1be?Kc^ExG?O-^lR;A%{NK;oCwkJrB{p1Jk9xu~y$23LM+ha3JK{R)F4J zW5i?;qBR5tmep%GEL@CX-(9q>Mdm-Fm$JeUaV0248Ol+CV>pfzIEhMBp&B(fh4hE4 zzZ)Jjwqcz5&yAIR@-L2PvJp*aMk_9$<-rHTMY0_o=)@J{kzHgo{++1w?=X4{{fJ|-G3o=-_NPZS|7@u+ zeFiuBy;Axf=cE3|3i-cT{_m9k+vN%Efz1Cu<=GLm_>XuRDaT&IA}(VI%b2cI|6>+W zAKQGrcY`z*@CWn%#U*j$|9@wbo>$8AB3Gr?Hfmn}4<8I`^zFCnqr2C<;J5pxg|GZo z2pi-kwy=YqO5Y;6@OO8GtK=Sf8|D9cQzvZE|~;mB*kQ+2NiU7cm& z>5A8ezbSidIPH9sD@Tnv91Sb<#PZRQ=REl+z{2X$khVQ#`#k38vztdlp>3b4d1dI| z$khk&;xL$_pFB^$xVddZ{@FHi%s5x-rJ;MBuY-Mivdp)p$GXgq>+m0qnHOiq$-cN- z9=dZX!ZVu>S_`cr9JX&Uj-Ui%m;NM-8avC3lQsQGNUj(cL#b_LICr=>{N<72P)@Hv z+v1UMzWLtJQg|e^;?b;<@Yh#M!ZF)U;IXXI@Tm*037>3#b+}km8XiAh7Cuv07TTUQ zUaU{{iR$w3xl^wVpWS^;=%7E@{b2aQ@rrQLv6X0UQs3dJV-JSNXRL4Yq0qngkT%vs zp20(5sOTZ~&qM5=huA+4g(}BXqXwsN8nviHJ*Mq9Q(*lA%oSVzpu)Gh=b7&w<^OOr zj5b;S;F$FfkiZ0zC#`>Azp1i^LQnI9p+P#0XhJi3TOSOujt4_4eULsd^I*6@zlf0w z4{Dn{Xzb*{Foc$`>v!9#2<^6YU~;n}bdp_2(r4opVJ=w_x`ks%cU6Ri!`cq9ijeBC z{zIPi2a2peaM=0-71kdpw*Ekg^#{n2>WbfK-}gH{jxo$cX-%{J<<}YQk=a7yZ}LBTT7R#n|G|)wR%U*ma^(`eH}3wiNFUsB|Guru z^d*d(*7vyT{+HeV`h#IvT;j;SutKgP@_kw(*Ae~0Y<9(Ut?|(l-bA`lxq$ga*GX=P z+dC(yT7PyIE*X5P(BPj6xLsNl&``=A=kcn z$VUM(_K`MiG@qtWc)RatC?XF>`kx&QeI?fatuyv|`uZ5`5P$hNfB9i@#F%W-bwq0? zrGJfdJef9G%4|D9p~V|bPQzw)9`>KtV_?mEiJ3LJ~VIDu(xh#75(S>p+F zm^bE-!p;8=#M%G)6~>WhW&d{>f6z{u)LxmY7pE<7QaY8WLNz8`Q_{89(C5^xv##|N z{WPLA4b#=`vC_S2Yt)MC(H^NI>k<7suz_qu%&`O79?`u1CgElb7ioX&YG)kQ&M0vI zGwgrd{Qp;lvbos1Em2RCt&VwN|H=j7i)cssLs?&Xn!a-$C1DUlAHXCmiZSU(|;J-7c%SN-F-b@bV|Dm(LcbVe}By#_WnPpo{i2) zU(kmht?ivc&$_xsd_UqCLu9*0|NctS2j!ROe?K$y6h`XgA93*pV}0m%&3)~Bla{P? z(akq$jgJyW^ydtVOW&IpE;()ym$8IptY8(<+N;xz?wy>~PRsObt;%DI>R^m{Hls)! z^9)Y7cXFcIy_1u|YtmT91~##UZS(MV$X%q3v0d@ct*i9)PwZ!$sIPP7C+FM~m-TJ= zzgGKQ-?EbjFvzAIN?O+~Wt}o|q@R7wrXJg6U++b8`PtW7 z?CTEpbQk;jf_3HC@H4IK^NW#v?)&uIQb^Mm(A=dyM7?uGZR`fJ5lv`DTQoMMz1T{> zfQx8HM}+5}=w!pwyU>joCTm;+`ss6(&fVmi>0?+ZWy_yn(;vIu2hmf_HWxpG6fU8+ zOaBKJ>4WWT^VGp`nZAUP9rpVL_Iop1om>{Tf>o^Hh2v}M!Y%q?HpuhO`Ub-Ouu0#- z4tCM!8|nAW43LAx-c7~t=Krhq*+bT{dH}h|Lp};HeOdm;?5aERascgY*;(;lIhpcJ$EibcI7S}Fa7tZuQCl;n?xGJQwX?|+ww=Vy`JUc=rEpq%qUB}Tp_*QUQ#g%U z)S(^?Xp4O8jjc7(o6w9_T);(akMiFqqrUPEGMX3MNp_(dG4vyv_Yfz?ki-nuhm<2^ z)W3eV)HhlozZbD*e#K7z;K6Vy+UH*1QM7(QIUC_tzW&d;o_FVj@9P7-@&D4x_M7vJ zqBg)1eHjZ4$|a;0Jxi=aX-RjLyz*AQFi{vA*u)lgu#2nML)LeE3)uegOG58yeJk`l z#Oln`CVS%iddNZyugmL**8GTL;F#x+BHIq5uT%YtfqwNAhGwq!OApgWP;8$U);~vt zOHhjRN3_4<>i?uVQoq3*IWL^j*Pg}#M!VVn7)JsVh~~vj#?=4LTjhSDK7eTb*hTte zg}!!amm}%}h;08kw7r;p%>GH!7cgJ%Jcw-n3i}?zahyPJqwx#dPSOXZ9rXcJ(yI{t zQ)d`)X-8}AX6E15+2^$TzbMW%#arz|A0ew9Q-f1Djat;99u1hb&y0O%9e4Bp2XSqL znCom}|LwW|Mfbnt{>h0o_fJj=Pmzf&WBbx+LNi)1nPZ>)1K|QaGyk#NymWdyet-UB zhqxZ^sgvwN?=kBckS)IZSe5%~a$mdzwX*mGC;2z=W7d+&WrIEm>++D}a)Nj6HzboGkVVAs$J#4?${Mr9Z{z{!m=0^HI z@;_0RMPs*79%%_hAy0fh(l0Fvg=E{8J+GmnP((kBVjMvUN-?*{Z;@Z(44=el{)h`p z{1%bAe0`38qR6*{F^po$zLVPLNlYMGlWPFc-2Og{H2HQB#}LY#qZ}1DhT}Malc+@B zmi7IO%@33s|3+qA{4Qh6#;!*$9ts8asX{esa0;hUi#kmA8~?_v_5A1j_k6zIxH{4e z#@TD{^!*$EUNZi@X#6{F{Cmb&HYVv)#`zQUdg(Nv5i!qtvdBJY5}wPIcKM;uOpn&@ zUuZJ6j?DW1`Nqz*A$s=Ab3X1pRPz6;wq3o@8l^=auL)XW@mFL6K8o`k9{>=LS zHOBwz?04FJ_8lVQ!o%cVu9N#(@kiu|8aw)A^%Y?X5gtADi>^Xa|o)=?5)Kp(7iZ>RV* z(~B@t?cT)2F;t%&4vQkr-;%E&9Pvz7x{fn$iqanY0fw^Y8yWC)A3sLp>VMh$b|n6&J8Qs!jhTK7!vr z6fO#1c`2VBvICvyLe!_+O~%lVI5P7O)IDSLXiQ=jN%{;H_SB(>#>k_8#b@kq^-J`R zyTNv4MpD_atPHuVEV;xN%`qul!Xh&L31i+DE(=fd;hVuM=GfHpr`g}e0Mg__k^29F zvJ2yE^FQ$avrmtDZb>@Ji2Tr2$ll89{(r0V8|&|6>i4Ukht-_cty#yn9t_^Vvp;TNM%29zA+8@V+TYQ7jx&`N-4WaGLF9|2aokS(7(ATB@ z*A5+6`fdGBkJB^j^vG+~_Nl=soJRV=TSF~bhk8t3mj5xkDu1d^=da42NO#Jg7(Fik zpZu+VUXA)+I4K&fE7OLn3+*Vq8rjTn7kQMtCHJP_{3u<(fRkn$%^X4dzP>TmhCCAfF{tba+~A+H#F zogq`WgzZ;{ut;9U5|+{SL;eN7(vGLEVh!uq{^o(O5$U78M>6`i?-sd(U0lT;vcBt^ zLN4-UiQ*qaT)vLo7SIuzjXkO@2`LRJK2*v z^7~a|JGRfQ%Kyv0fgkfh7mnJbX`~SStEO+q`e_)zAcjiWwiw0;lCH7Xaiu6jIVx}r z$1%OhzaO(~;kj0R@$BXl+c=Gdr2hj{@jtKOe@-UsH*r;(n50jw^Pg|b3n!#=5|yYz z?-=`g=3uC%4|cM@+xZ#LPa#@6e;9ELImc;n34IQ=WF00q`5KT7NYZEZS;d!$({r zA|HaMi>=}4Sv<9QOXw=F-r(`ujknwqK3{!%=&ZawJW+pJ_+0ZJhtF2s7CN?W3y&`! zGR}1Ay3y7T+1#aDMEM!@6^9;9T{ehQF-*)9}gTw}p$be-R$Tr)K_z z4De4`o3=k%{>S03AY}vMLJ^#lq|55l%(;tPWlyg@;X8wx! z_0L-`kc`HUpHy~zL3y~oUw!;nIpNCpf0}*OwmoG1S7|7Fwm-=y;6wP0eg|bwluq+u z?Tm8UCbqKADGQ?c3%T~sw14i+33>Dz^B3~z(V7&qD4-YOX8zYUjeONRoG(Xw5e}mm zM^J)N^szDeH;fOgvo$b8&#c$GY39h|5BKams4oTnTWIELPn=AYp>eXv#ihZFRZ7*Q7vBU-y`2$kZhP>mX#!fDi^ z4$-q}MpRzb3rAx{Eoh`Sp>I+Bzv3K-{`EVwC+#AAqXqg+^32b)Pcz!gy*~eP`ICMD z7txNJ|KGWw{g3%h`3hG}(#+Ik|W1&LYNMXKPU!k~Y{=gWSMDMEcXflOZsr%a1uefJUitr*b|9xrH zAEca!lYLj!|H#bGQQr?0s~1bOk;&-ao0lE4gk`MYh3&sp;pq)6B=cCd@>9~%oJqdvSn^2)vZBmU0! zip+Y#FaJ&U+)3v~uDCobGY2l%oR2kXirt zHT(4|>fe9%KsYY$1Wuw7XUlxQ6~14xTDS(M5ZMOXU)KMnTsgf&0_*GXcKP%naDgHkfuVwdlT+1Hlx|Tgi4vk&Qj;F3=5BFco9zm63 z8qtJiwBiCTq8-zo-Au{z*|X$a#q-(oWJ)+qE-XEtJt~bcj5lA)PGADb3)iwIYoE`a zDtbPt1%E>RNV6{3WDcr+r3F zHaSP*wd~9Exsq$yv&i`84f!VXu4Sil98+*DdmcR<&u1^$Zy76CMQ`f)?AXln*=zK{ zt>?1`uuk8=hIgZIi9`aFuLKNXJrccQ4(wePt9&%oqsfO!q zyr{2vRM|har2HWhYsw!oDIEETPc`xX{{Kn4`}nTwdh!3SeYOb$rjn4LAfl3@l9Db3 zok~hcNy#@kN=3;rGAfQyjwGFY1{*M7z<>b*1`HT%Y-1a{W>;~Nlbl#ZMn=iWH7QB? zPD)bBoqqTCy!dv$7r*cA_WS+uxIJH2@7MeNe!X9p&)3DfzBh4#|`SB|DXzgt!8J*$zkLTY+`~T8~Dx6Ua99cCzEV^)%|0+Eiu8z2+clJLi(~5bXgxsEo8IKN;)` z5bgczn5mzWp`XK?$t?>x$VDEyj34sJXfIF!S%@O^Xm|S7>%P~2FXqE~N^s!6yhj)3_k?f-}24k^Dn_7Z#%;gKb+dSiB<}^fS2Jb&_ zjlo%a4fIz^^SwXzp>O|Rv-dCY{*~UJoZy;tOu}SL!BkAcbTrLY{-kMNKU*62o$JTm z&b>ohTz%5}i|2s&YRSI&k23wE!guFCqBD_Zh-W5dVKy4Z=tGUSwtzXB`#31fT;_S` zm`j&G``x*Z)&Lllu^j{VT|-bILjLT$7Ii6ru>lC_x#bv47JcW$T18Mn+@*R&LSQzx}MTshmYR zfavV~XpdgBKRwa`G$PUg#HZMUhegU}qzh<&$=Tav11iPe_uoePwCD_wNEgs5-nJ-i z=13Q?OFdL0Tz&K`XhWm}h{pbhn9C2VJ8+o!2&$MP9YA!(Xru$^~n7=#0 zd=g#grZ+hinO7!+)8rYP#d-81K3Dskp*_l^Kd_E*$s6zL)i*Xo_G)zD`CZXpj{om`x^v(gA%-%iB86@OXOv7~4p0XE&%+9_j%p_;w{r!J) zn4_~N`}Y6s7!>BRcZ`-dC#VC)%d2GR8RI`=sC~w%p3%PZ|0Ww}Pc_aaThYecPIfG?K47Nt?|I{2$%W?M$~$?6XNMx|J!6e3UlNa=}5YmTaJqxrKOVe@-e}`c@yD|LFZ{+Pr_3cf|+)U(|1l&VQUO&N-Nio?j^M+QWIw8Ij%izx{Rk z|JVJ$g1vhLeH0muhq5BOGZ>@uhjW;7k%xS|fBtX*^ZVxy7cv(iYLnWJdB&mA579qg zCvz`G3Cgg?TD=nM|0Vi z`!?pj{eKUszt89&XnR``o!irX%=o|4xjw=i#u0Sl7>=Xoo}_Ss?7EqbpX^?rWFACc z2B&cbXAzBk&ynZRi-eP&zt{P6NM%S&i>#}6%}@--Nc>~@AI;vB>z&az|E~|x+N^Cx`!wx8s!wVE&uIU9wg37+efp8Z z+W$k^f8k1fi!$G2Uy5-$Dux@EV?T2hvLE%mo}qgv)!)$ujdjm)7>~5*{=zilplezN zzgwrbO>m9AZPdT5R{z&b(*K_v=^FI^$@;1K|74?Z6U32@Nr?X2JDGg{{~A-6r(zoV z{?DO4Xf0Cqa+E<~JIH8{?{xk%P@-JVBzGNmCM`J|`7kM@7ChfY^^J>-OdfGY=V9fJ|f|2f1i!qobc~{gZaMg>2Ohw~_7a9b~n> zu5)qJKk}!!#=2%}086aVDYga>@it{CMgAR5pDVt6v_E1!BJ>e52) zMUh|Yh)_(HpbWL=)gxpL>d}noY=`KK*Y=mpiA(!~3zRqRhtSvlrn93}n67X43x~OP z_qX4L>^T}jCwUCVBX|2fPmoUuqj~o@shdT2< z)!Fyv_xGa;)q_V_Up6Wnpg*lWGD`hED%2kuC4Y~i{~HyW4vz}W+z%d?7G>54G+Q5l zcIJ*^*Vv;!*nNg#I7Xu3m^e!8L1%7>u07}cede*~aG&-G;z$>laO1d*M;ay|9bJ!( z3X{n0x)EVAIR#VE6Gnz<xA0&SYM)Lq91=9?&MVRQl%izP+?>C!@0% zN(+^L>`PVtlYNtsz75&8|1?eB6h{tnk%xQ~pb$l98l(J=6aNJ9qjjwK(SB0?KQ8}I zRsMxJAWSV;mo9IT4WpHR_bL`g3Ca-tL#i^e7b@4xHO%#B#y(}eT-mKSCrt0VV}mO8 zHhzb27*YTC2-%5aIF3Gjq5KuCk2%495?xqY7oGo|)TjSD&3*=F(etWx@Z@>)qWc|d z;D7sW{!b=Tkcz<=irQJ`_Q{bLjWKwC|Ib+FXv}{Qzqf6bOPI!O*ySmwX;y{Y@ zLEUialPrDED6N{vzW=MumA>9KF+sg| zMEarPg!IFH<|;&I_H@Os2vddaZuKt$XDm#M%<@h9#YtfX^Gx&%l26Imn1i{I+mlyV z|7QPR)CNydm#H)7aesgRPX=@OO!r3L|9|eJv+t6(dgU=xpftxAFwgjxEHBjlY0IK> zard_wC)QY>CyoWkL>6+8i#+6`NgLT*sr@T4|DB?JAFh2z-}yhu#{b8(e>i|z{LlXf zjLrls5KkeBP>hBY|e|IKi#<^abH0*@Xt?Q{-vXGrzC@JHvh!|BL?5c)SdK`xMiq z{}|~%PWn%g{*&}=Tyq}1NO)P@j1;6|F#7a=QU-7lXH8cL0xP8_IVOv^qD4Kj(*gEI3ux0vib9BzLm~lneG}+lUkA^T# z{M`@S6~>dhYGPsMarIJtzpx|M_f5DxRHxjfZ#gj>$n&4F`j3RVglp_AoEX|~9u&%s zd_0sM`gka}Pis?OgP_9xsKiigu(M)A(Q(Ert+JwsnEU;lq>YRKcBj{+28&rJIWW~7E9=JF}# zhBJ+~nafaB=j*-W)Cr3WveuB9?*BF}pBy$(4^5_px zi3*(Jb{c1J7Uysty-4U%Uh$9Xr&HKdF&OQ2>fcK3zx=)r|E2v;)&G|#mc;e_>3xO^ zKN6!c24gV}O*!hhy!ZD16zTu6x3RaA9c}OR|FwAsPU)X$_v+5*|DM$U)i-G5AB`W! zizf{ekdB5N`KQ2`nmJl?7oB}SnRyEO{(m=?4i3>Cp!TClVV5#CmESb%Ib=LQ&OpgA zZBMUyLmNGleHQl5rMK4}M|*!NnM=h}hJ6$C|53qQiTy?T{{|n3)lomY z`%dF*@n@oaNK(imvnygDmt6W3{rsThkjI>l=&YWe`;EWPT6@G^h$0jt`Uh`)7ab&X zbSB)vIleJ-_Ku{`F+n+>p^lE~YW;}H$bDr}sEK5_-F3%u9V#y{%s z&#<4xIrN>sNnd!Lxfcn)Q#Mdq^4|ab+Kj)CXj4yUTd|+H>X3efeo%^VsTho*7>R_ zHhiji0OoXbl<9wPYa?6ho%_RWGJ4)f4pYdfn1<q3?6dS#Qkc(s*AdJwNjTM7sM(hhIrXx{6G0wQb4+nS%yx zdM=rVdgg=E^$TX2k76&t`+9-s4EpFl7^U;{Pq7c>_+S2iQ*mb~bX^gOQGzm5q6STK z%&*~K3jI7Ap|BY}ToX^MHs9vZSM>E>6%h=)&Su)=E1BaQ9pcB!* z0iu5dROoj^=XSq;K5x%3{okR|SXqyB@yCQcfhBL>sou1KBH}e_da< z_m=(trvE!D%sHG#FA}=tTcjWrO&P`qnbMhT=~SlG<83FE@gw?Qhv#SsNzBn2mnr1b$X~nB z{*bd5)+B^!?A^`~oKDWbOf318^m)^s`J(i2+0~RQiWGnS=Tod;O=_myTTaJhU@+pcQQ>oqks+ zn|+rx3U`HavLaL5bKIlZ{&(cNrT~Q~LNQ8EhDtO|xBq>n{qJPUT=8azH`AF7WCwfo z5&Pe9;Dr6}sAH~2gD{Qr?(WMwySna{nUx{zIU~Gy+v!_M&PMz4Jr}ZHF)rl3pScPZ zs6_3_J3}+~zV!v>YucC_?2E5IcV{@n++y$i!5VwXnUA1jhG(1R8HH&@=``<;zW1Lc zUR2_P|9|aB2%WAwhT}Maljy=JG-(T)5w$Tb_@MvK+=1%ef9U^fFYB7U2O5|g^*w7( zrH9kvIfJt}hlVbD|JU<>qVor5x_%aBV-Dsb+WR+; z{QrIa&pmgCZheXF{`T>I>#mR?&IQOs7IKh_JmjNY{S}=H(zErhP{`hQ{?8m~I7>Z1 z7IAz3{GVdx56=I2KFRqMNybIS|7Z0(Ci?~`EiwMomi3)~)2vNluEc(AP!$TqQGzm5 zq6YP7MjM*ung2)M|NTxH{~tE~r&DhyJ9>@(#Z{w?JAnUj{;l?Rn)Vmb`8S8ea~Ma^ ziH5P-Uv1nm=9YQd-?{o9%qP%cEZ(kdZkwU~ou&W5Ejs_Ei#&xr!k#A2pk(&DeV20W zJ9n;Sk<2^@SAk#`a?MIaV&hzg@!YTFeOZIt@OXBo*SK8mroQlC1is2ZE z(dd3MDU2bbGx5ff<1ij+n1FOl!emTAPhC=&N=`$mG5-haFUsg-l+8-)XRboDet)`f zGcXggFdK6)7fo}Of8&IM#*i&&Wo}DV_m5ZpQGG)B#{uDLXDk2Os(P{kjoP5BwnQJ& z+`GA)NCzIBO+Qb(?aE(+Hh2MZCR((;2NBKfwbs!6m#GKHEPi|D=xc3Q&k5bW7`EvP->JLPlo;m64UGK|Pw$ zhC`@5Yi^$$=4(K3`GoL_5fxZdaO7vAwI#&GH=e?t|aw?dk zwZ;2qsHfCX@lI)!B7aDmldkVV`!jR~b=KUE7!>->|5uOoo&SGY+MJh0XlL#~wKS~3 z0Yqmd)(w7lZu9-~|4)nW49?;l&Z8FzzxVFHa0V0^jsH`~?%mEt7&J7bG7rX3M020R z$&nb1F?j#~Ut^ie>%AMsF^|Xl=N4C%cvpO|{(XvZ#th?%ImQ*(Uuc|xQ=TQwbrX<| zNtlc&n2IK2kmg?F46?;IrEmSe@p3!**8iV2F2Mm|YR?*%95*gGX`Dhfc5*x94F9PA zRCAUqZy?M#&!7M{|d~N*pFzh zL;R$)c`yI934aLLcPih`bv(k{G*bFYpMzth|7qp_eAKV!&)k9PDbjzM^hYh~(!85^ z8^}i2?aECJo#I(yZan(`$1&zT{z+28{Wx=Z0sT*&GXj}UqUt>Tk9{JQIrKm1;#OOv z?BB0k-IEj=rYD9|+)ktK{E&IZ1=azaVLyxZ@x}#cou&+;I^Pk#!w7L&x6jJB1dBky0b$VOZN3Y$iDS^>Vif3+T+=GdG-lp-}-y|`_h^B zOj94$TmQ&huK%+ygZ_kh3aZl7Er`}0M17#C+@@hVW?&{}VK#d7dD_2~WPZSzn(W;} zj1S3q$iM<*A`3Z))_3NT_4<6#|DE%g^HG3k4S7qB{81!-q=`3O{vgYe%|ZJ&O1XA% zpZ2ga(m^Bo_fND}Jk7OH+ZgRRipKVZ>_sR>37RMM3(*`v-~KxK&c6M1bf53s|5uuyxK8=i|0@&c9$`xORWg?!@m#(75X|-H z(?87Px4?5AGxxx)w$OdaLx}nnhsh)8`+tzJesnHjC;Kt<{g-gEehT+CI)LNcPT(ZE za0;h!20f+8;Vjv8YjQY8cKfH}d9oJ?uNntp*GT{08~1J>B1)eWZhMYqb8X-L$1CMc=Fu2~v1rPa2EO6J zRNsJXmBwu;uAAWe0aWV`)nrI};cCgc8NR_}Y47_slBM~|N1;4gtbCy2n7nyX9zFi< z{M0!27>_h`+-N^6nT|=Aj3(v1d8YDdJl`@~`81|)LpwUqH@}vr{maq*(f8G5DBqdN zH`&NOMI2L6tDH_F>y@4CW$Jq72Ib>$RA*c_LjKamW@uY$+(X;kLAEzr|2HQVW(YeI zvoITbMk;^9ojJ(dw|+U@nqKC4s8W|iWxtY))-RW81EVv@_qBO{vZB)aD;N8fkE*Dg z+&Cg+xNZS5k%e3={kru5BSwZMWvV$(|BGxjR1-H+iXqEJNRVKWUSF&j@`y z&(Ey}_2`)G9nf~%v*G>yK+XKxu**NhbI3zDj3d}%PQ0XKWawlrA8P&^$C!_!s{V?w zAC<}Gzj1=wNp#^9{&D{8H2WExMZNm+dzT4UCyjrl55)d7d#U;VvZ4R5zF@rd1?Kkm zTT@UqdKCT0{|LYEf4Y7#d($!c-xKt|blfdy&8PoGJ33G;jv8?uKrQN0tS!L%|JUn_ z-+yj;9vT@WMOYf$qt!is<{8JD9~k3*XZg;)8aguUNHt#-zap#3g zim*}m@(k%eTbL*v=&GxxN`F*xi_R+8a&k!6d}?SYOt>^`>bf{=pEoLOTQEEnogW^y zCR`rYof;R`4!$Ym&m9}qVB`70VZ+(Mp&;i*d)LNU3qS|Ho(}EoN5bN~>qBfN+(zZ7wjMHvQ^YYrgufCi>p$J8epnI<=YKLH51k zs8iq8cdGBusNU^6kL8s5u!!!L%z5ZXiRHpAeaF8R69$A@X3J6IjpWKxgF@c!gmCnY zSa{A<*N>1pM7ZGt!iqs5T&a6D1FgdUD@F`Zc7Ko%e!|=|cR)BszKD}}8Qo|R??=UR znz@6yYxfmlJbU*;&Se<$-Eh`}7Y}JU6$0zaYea zi$CEi_P>&2$N~L~XE6-taTBVzPbBNfPe)Qc_YJaf-t{p}ZM=pq;(KIm>e$#CvhB2T zL{bODZHJCG55FPyZ)D4iA+f9atGhkJ-J~vQ{LAD7RiQcgNUa(Fck%}EYGKmJuaNhW zv&j4T&BYYLYRyJ)&Ec!oD<7n^EnilK4GcD%vre-WCBl?C6j5t@$pFZk)2#!n{urrZuDtD}y~52u|$)9dV+Bihzem&RICwY3S_ z?Jj3rq1w2<##+$^`#&3ntvlxonltu=rt3Q%@*h32EX}!*oz|G}s~{^6Cx@NfcO@r> z9qv&)!McKx$zd=1Zn9*k^KLxD7T4cJPRF*B$zl8WB=h9aKK|tJ9d5bU?U_G*Rfzw_ zJM3Uy>)V~hIjr-|lYRdb*p3_D$bLXs{0_fW*o+cXM(|6pl-;)axas5g(WF~qoz*wT zj$TQ=OkRwy3>nvqoM!tPZ?C3M@a}RkP*`IuVbgc85kHn5v z;GWxKM>ma$bv{2ac67uwvCfOHi5<VP7%%%GhI`KR4~B*w@H!U?KPKlJh~; z6~92bWVvsA7J~9`AJL+a6b5k5zF7Gm;=Hj}`Wp|&!m=Cvn`L)$SRS4V7xG`Z=7R7c z@-loFP3n^T=bjF0@Be;S_rQX16}O|-ux)rFB{ZqX*8CtPT+eMS`_Z@6;dR{U96uU` z8=>6k7fg)I^xjSSA9pc-0#ooPN>}y|Wv@IP9%n9pCMooVbNxK_quT8ZvS$6$p-CU4 zi5}(XJNhl2eL>`}Pm)PS_c-{fe`IDSh1T#)XnS*EXn#91biDH&g=S(bD|KRQQNqO7 z;^c|3?BNq*IU^^=mJFU4TRL=NY}wd}vD|SJW6MWRjI9_mF}8AoJeVB|tHw`^txn@7 z{`}Jyg`DVqN4higiAt z%&k|>lg7q6Usq7?ON+zoMO0O`~rcrFG-f zRAaf+P_OZfbUV@iqR$FbGu+ykv2<|flyB>F+Gd#ZPa9+|I5iwZGnx=@8xXSe1r`lW z3X6{n(Ep;pJ!xIk@d06JC;Nl)zrMwC=WVY@?H^VS?jQ1;*S)I#+Suw-1Hzg){q-&T z`=@UIux_sU>YC(GFtUHxkmK4B$@(+Np)kFF*sQO&#r0cVU*!6u%GJ>;wF|QcgkpWA zo#fG0?LtkG_ci}ZHqj@PW+jEouh!NTIwO3h{-WooEK3TbxK*X~x4y7{s5zSyex_X4 z=JgMCW3P+Vj~-xs>j3TLfY3C0U}zrGKTrwBTl9%qQ%A-7rR2(_a4GY$2a*C+U)=Ukb-iysPX9|MvU0RJoU#4F@3@WB zuH1`i`kXJ2-@_`r;JVk5?Aj}!TM@qx591qX^=^w!-Vj@S>V{anS2;L;L#*$+`}Wyg z1B`>@d+EIb%aDszIVmC2wJW=ngS?cmw)uwGx=Gi@))d|l%Rf9YY;f+!#*AxY>kp?o zcl4UrX7(*7u8(aZ3m5bYYh3#mZ0#Kwc6h(tD1~XBcr`2ui#MSaKfqT1sjnTnFq95X z3D2=VkE8e*7X9kk@C))+cnL4#6}$>v{rDSr6K~@k7%s$>=Z8H7^TXb@`PK)|4`pYb3HuH`6Us+tTJw`>-YhfhADS7e zQZqw!MyC9iDgR~KzxiyaoB3?0pZaWQU~XLStTOy;XrBJ8I_KHYLbmd2OM6zIE@FORym7)r`R024UHlZk#UJoz z{0)gW==b6qScJ9Mi39jo{0_gzA92NvAs(N;Fl5bM7#6wj;*5pHzY9Z7(ZaAKcVSqX zwJh!{}8fyyuqaAC@7WxnP!mz%1VJN@`=8f3I zuW;J4Ve{~3!xn5!eKr)Gdp2zAeKu?-cd!=^eKzbIsVtDY*-Iuo8}^KSHta>IYszLn z8}`k8*7}HNt&e!t*#FtEpS_B#c3sVwXTyPU&xTsEE?xSQ4KuzM8joj$rrrgi`E*7& zcs?VvoLCTAncKLxAIk_GCo&X@`5`NPepobRepozte#o9QKjciEAC{bdGAzxUZ~SAf zj(z!z`Tm3c{jgHFyrS=iRWK9^LkIfH7v%Vj;W&KlZ`|jDN6WVl&5L^hV8{1?xJ(pR4eWiLMywzBU)DXOuv z`kBx~K8L^J=sifHH9fwqP$B@k9I+FXBMyGocp0Vg4<;@dwo1 z_e`jNs+&*EST;;7&DE3$xU;k@o$3hZ$j8YZe=gxzK#EOa)+^XaXlSdTXYsh zLfFk-a@_d$WJ1_WmYy+3aZdZ(Yke$P!CuLIKmRJSTKJlJ`c>E0l6CC$)9F`dzB|9& zH$R?1KbuMSN%oz8z`vb;2f6)Y(gT0STlgFPj()d<_&|)p9k>^Zu@tMZ4qLDtKg5sF zjX&Tu^dRxp5Wf~T;3j+?>#z}x_z`}J9$b>9UBXxJT`a^>tj0QQL=|fBQ@n`Zq8mMU z3xC7k(eLA)9p&i2pYay{j%|g5!VR~D_${~t@k;yeDFhZ77%e8V*>jFtdvj9Xm|S+o z*?Z>_!}8w5u!3C4p2vL^|JCFg;q&R@*J2&k`*%yh^b37|ePeQydDg;=3w{3!eShb4 zu@`aQ#(z7xqtZEI_15OMnTsTMvzHvd(D%R4_qXo#jQP=X=0|(YH1o-j=g@m`OTT;H_4{C=0`Kkk7k-5C0p6sxVQ7~AY11og|>_&YnqZm2iY_^ zDKt+_3J1xS1xeP6CRs0<6dK4zVX8+ag__i)a3DF!deWp&F`fQ$W>VNsRtZx|ma*?6 z%f+)hUw;mJuopW}jGfqpt<&@eu?^d?35D2lcxW*%!(Ki?IT$u?`*hA%2YCq8ool zzuV<2+=S2JF+7eZum#&ug%|K!^q}7a?J+LFrMMi|;s)G;&*AfU7*F84cp9s*4jZu@ zyRaAKs6q!`#BcBiyoMh98Gpmyk*Ke55iZ3jT#XxW6K=sBh^sHM>goT~AB)Ls_MGF^ z-Ji7Xo?LduSnnMDUvGl^pCJF6SLMEn|7voL@cH!#VXf=ek?Yw_nTLDDeXl;tceF+G zalERdVJ%+WzRW- zI9wc-mpX5*VsKa)t_yi}H-uG9*N4@uWBr@r$-_b+IxqBmoBW0QuZ-NRpW)vzjW|oAGEYtS|?&Z34ex!uF@*kdB4Zd~#pY}-|T+MHD70L)+RC+8k1IK$HGmXGm5kC`8?Bo?1g#S#x?If z?^$hLweV#7gLJmsqdgpjIdm1qc+JcQ&A~q4x|TPS`rd2XVSdbg?@KYa$h#HJHgEiG z_mO9|%`^8{#2%$b$$S1~@7ZhEN4WP8*Bs&YfNQYXT+6l-^x9a|%0GIhbIdQ%D=*_O z>^5^Q+mCT;a!r&bey0B6DFh}Xr^PKoyx9DQeEw1)&@hjo?5cjrzedg#pb`N6D zXV`{5_IV!xgShR-RME9N)%-2~%DUTG6HHY;jW%5nB_sCs()HW$E*I7gMls4LX zHQp(lcdLIVQ5m;q^M21B-Yfsot7^o%;VSRpexLh$=jeO3Z_^@!cdwZ_JBl?#`^8Qy{t6tuo#r^&33%J)wLutC1Ir?^sDy&PG ze0^B-roPYf-uZ25b+dUl@pr55vW02C-8|*R?>_4?e%odS{gn_l*Qn3xH1;j4zm~NSBW9?sEJ$o2JMcsN2p|04^Mv*v+453OG)J9~6qb%m4$H>Sf2Ps@)1|GT*IP-? zmNz>&tYTg*udONj-?abJ&3VsE)c;M?|MhJ$67_%Ofq97`+W%REZP-3FG3=< zljJVz*>)dtRyFo8@9nidl`QLg_YAA@6V|L8v34cR8kThWZ?ak(HPfx1n`!+VS;tf-poDP1K&cnvbUwl@2-#5;r&<|{1h+XMf?W8#T)WgH~9y=h8`^18Vi3W z-@@PUcWgcP@le$J@sQ{n{Vi)rw}joy^<1+eHlMw}u3yhJ8)NzX z`-Kac6}|XnPz}UCf*bKMsD|Qq;XZsC58|J2Yoa-Lbj6=?uPb8RL%tE~y7-b<*N%%~ zs($MhFO2ovcz5iRQJ4qsi1plhN38qKt75uh@t*5O#d-#PDE8l&Z}5CK;SuI9;}+)I zFrRr5mSGiCG4aiK24BToe93d%A?)2y9makB_`S@ZLM!w4u?hc-wfGn8#xL+EypH%Z z=ha~m79)F}H5r-a-!d);OXpq?mQD8`9Q~8!mBus0^bg3xDySmkAN3snO0M;9&vpLu zxnBESU{ZKPul?8LCicSP{leyPL+D}qg{|yG+T?9{+WN6WLg5%X3T(k<6k#i_^$y#} z{6iOnHD@mf>yBR#)}FKQVI@{$6|xX1s27uqkb`V2#S%1^ zD5Ge>LA<{%y@RhBuUF^@@-tjJOJC6RnzeXJdgjx5?se`A9*pklKe2K z>kPp48k88mMy|yc>_+$f|B|?u?0Vo| z68Dk*xszB)K0J}$V$d5gx^=D^VA>PjTK8h-$Cn0p0f_H(;B*8T0`XA zgio}%=YH)&i8@P}-9Y!dmwXMMr3TR4f#`I*F7&JcHi?y{4lXum{aI^>b1n4Fa0UeG;aL0$bR2{B=&3? zlh{?aBC+evjfvgQ>`d(5#C-Fl(Dh(NV)uxJ#zdbC|6SO#QTW@R)@Pm+-ei6&vL9WZ z*mLF5#CZL%kac93`M-f-@wkEJJuWowVLd&T_6`fnPFmw$cv)C}`tq=1+{m!<@Z}-z z?B!uq=EY%k*JWW1_x#DuJD++{SVyimZ(BfaVBbh?%JhFeax?oD@ob%OQ7FQ;DHnz9 z*wK7ZD8|ks7lmEeT|Y3Cv^krB+gL)1kA^wsf6-*IM>7r%xBnllXRU=qJ38=JY0-A2F1zgB}D?Wp-V<9%+ zqi-;8H@3Zd#^?e4n&^ztI{lpJjL`=6=#0^(O#L77;3RcyruEUYtrzqR(HW{8)5JYg z9+N-H<VcfhE3w!LT*OUl+>`5+&(YWxFI#<+smkKH@&>@N>L^3+2&Ua6kUS9>@=26mG*q_%fcsgZLJn#1gE_ihP@k9I?ucF_z>SK(?bZo+z$Sj{fhR5+u{6*MXx!;G+Vk~B04ze*HTk$-8iVB=S zHxA+oW$9v^Wxj!Yn~aeY@JT#~FW{?q3OQJb-6%&Bj^Q^rgMOZA2u5KnCSnq1;D_pj z-zSM9*_NfRe{kE1p~B^o)5*UJUq)`c#sE+2k9h`}h04y_)cRTR5Dw}5^qKK&o1_lUzFmh z!J+#B@@Bk>cPw>X0`435ZzMMfUznC2HoJZcxs|<$`!@dD$sK3XL-D!vu(LNk>>_uw zmvG<1e=k`od|8@(7f}1 z(Sp{&{yUF$bRh2k?pfY_k#}F5dUwbkdUwbfd3RVsE@fYq=Iny>yTfvF1^Y_wdHh$A ztJ&9NIHSN>0&B^2?CZG~48J>UNVz*~#HI;%heB*--h!?Cim;7&J9eDEI}~Fl^DgY> zSAspvdr>;!?oft(%;l)ySBd@1Rj6LzyaF6xu0?%EycEk8`2JYVyaFrv=+}>)c%tj z**9fq|FyfD$t~<#xfk)@Ms8={QLp`P)BcmYI<^1D?VUP#L5TKH?PV`LXMc3B_Ma?g zui#$Ee?M8pUR`g0lk5BTmru37X*%8LOzZv0M)sx*W2{VTpve~YR_<;5+sO|03g51> zM0!?APqLJ~Y_{~CE4|6xzIDlD-(#xpLGEBLPWHWg>s@4Pk@PH)p04j8o7kIkq<5b5 zChJc69^4!FHUIaq?Fh$l=8S+HI+ zEXLNEw}+xr^3xgliQL3qIQe${?c4PUZnqxg_E0eU_OM~@?O`Li#x?m^i*@*+I_NjB zoGJciEKom{FEQTs-%9#0YW zDx~3ld=dYGEUd?0VHsR}7_P;~F&Ur5!*~>PP=IZb-7|jaLOLRRMshTz&=I0)fH(#k z7jpX?em{uLi5#WgUux|2pW44F@^NFg9~<}mo3YsWaoX9N)57)SIlN+R#&3<;wo|S= zk!GLcHDkk8V}v4X!*=X7hu)1##dU)*;bLv-MdED1=Uw{*zlS4t-YZz6U7x5b~XRs@c%pi zEv~7>i{kmau(jg6%JncyU-F|53RSJ32nFjclz<3&=Fn@uZh20 z{2ji_MV@E(gZl5z0oe11`TNj6ls;;G<(h$E-`9tR@^4)lD$He8J~=q-f96A>ithSe z*YEXAZ+W(To+&d*8}E0A_j>|Yi~BQZjMBmTH!}ZPTKp1wU3(k*H@#a2|L=)sg!Flw zd{Np@6W22Dc(>>3z!zNq8P|N$wcq02C`|m6@y{9KpK&*Z#a+ff$iWgU?YhbM_ok3b z@49?$wErn)|C4_rU={P~F|n{_p1D4(#X77XYz_b$uo0Wa)16~8wqWaEb4%EU?bvbF z`e*D!w3mPPV0#y_2YbDKbFA7GEh>aC~W9jmvWx7U)rDc4#a@*sOjnl&TXi#;gD4(!5C zRGy;`LlyQTT1!-lXf09nZ>+z`Yn$YmQS#A#`D3#@I8Yw>t~~V_`RGUd|BM@jxq;gQ zWQ)8sP##)^pW`w916`lU|6$jBQ(pXpxc-7`_bPHV` zqtN_^vG^|Ro~Qo69_&TwG-Gt^%NP{Og{_!s{-fTvZ1YXY>P~s)xV&;wo+0bl>(9wc zz48><#NN#PApaJ!mA$Q={>SwlZ5M^lNt18lY3aHJ-<6(!Mxt+XHzrE2)za=O+Ie<=KC#ak%OyM^oV9lylA0-tpKkNN*6|KE!Kz8f>jm{WW26vbk<#MT11F?xL&bpT~bCxgx>ZBr=zM8QIu7HZ&CY zW;wncYMBopI(lDj|1^dl$Kry_noF z%Nc^6VLvK8Qw7SI_o1x$8fTwalP>MMqwg#YZ<97JNV^w&m$z`ObgJU^-6$=k`yJ9O zCJq0@9DI{2rGKLElYEOyg!>cwTf(+T*Ae{xMYvBWYa8%8e*eLKlQ{p3hsFP0d=D!y z&@=pv-0mKaiQ`gn+>5Vpe+-Y~$GC&ri?~Z%|BU(gm}j~R&*Ee5^;!HA)?yp7Jl`hI z^8-AO{8xsBm&ui$DW0bMr)&Sn#mZK;GL|z_`6rjMFFSHW$W;!PAG;x}$WZ?0D*yai zUHeC@8Iu?umv(DMCx#oO!KY9#LHQr6{A1H_|Q_i4);<;((YVbQ42j^_O6%zAAld9cu4@#*%AqY(|LK{aY|0F@)1 ziI2xU<2O;pT#9mM1MfqL{pY)9jH1ibrjH#JiYJW2A`6jKZnP#0>8mU^5pk0O1``e z-^3HBMI$bfA78^w^5>_pM&8=hNtG!!KB;~IsumV+x z{^gQ|=uFwg$ZnQqSi-y%%lPFU9}$*IzZK+4{f<1W!fLF^H%B;bgtJsz{j1mhwaNcv1$*E5V1@EOs+~Jsb4LC@uI|)ltwVJ7RRbE)bWB}YBL8PPL!(T& z_08KxUmDtLqVsbS!VA*iMZAT6(&%zr8s!805P!xEzT+2hw>&uH z)A%#4mKVN=r|~0Wx=v0LXKP+9*gL62IKjHTneyKJJe`7A{ydbtE7o9XGPDcL=n903en|u}7 z#NLd9DcWY#opfIB80Q6!>lZ%BznZ<~u=9M)`S9B8b_bR zUV=Ro6??H`G<{RTC&JDN_d5HWqN3RI;9hYrE-^RXc0u?o^O7=i^jKDC{lO`7`<^?R z<6qhP?i~Lr_GpfOP0z>=Om9!$d*uuwk!gEDa2#dIpT4p(CwK8E3V zSh(!17lzk`JBws)SK!Nd3{M~j>rsf^a3)>+7)~sEFm{f-cCfJ_ZpFR$Z+IAA!6KBw zezy2={2IT-@DI5Lx8p(lJ4_|S{Zc%B7pCE#@gx>t2dMDlZ7_8cKaG2b(5GQ4W?(kH zf$!j1Y(f!EeCfeh3Aqnds6#W_aSYpy%eQqJ=O3CG+dAAlVzPO}k;42s5yn4)kKxn!9KM81?7%_%8-9!z@Fp(wT^t(|pNPBB zVP8i30(&wt?8TU8kH#E(HKIKmv+Uhqj`nagOrs~5VQ+^$8?`vV9Gz)b&97>Vy&$8F z^T*m7GR_{6RC`5I-aS*VlwP7wFEHMol5~243G@QO?sQF`o`8E?m@L;UA{U>#C}h)N zzEX6Y9VmVe|CGxOJ_|=|$4VmxywdA^ybSY#3`v!8Odu}2N**BA0_-`eP*te0} zd+933V)mWnF8;g8681ghUe`zOP{zKGEazWARHu;H`%-dQraFLJ&c1?N$v=-=#lD(c z4L_Si(R*YlOm@Q)7%&Gh}p z+%s3?J9|*ww!bJfR0&gEW~_(<=ghIUssH%b_G30KS5+VmUToCu;B$ynr{A-DG8T5wq@gd<6=y9c8G+-_#>iobds88gVSg zYj^`A#W4YQ;eI@B0DmcnOKz`r`@VoWWHZ zb(6fg^ODT_B$@X~3M;23g}fTNg5z`sv+V=Sq%+VqZ&+a6-L!#W{UJI9=FM~Mf7VWK zvWBAYEL{To_UQwy0~r{KW?NHHV{HX?Xy1#At@}A?ZAGUw7A4kNq#3K*Q(ewpdTyX~ zumi(>`>3l>fl3^hs~<3RpmQzwmT5t4vC)#+u>DAs=f;+y9UCD8Pm@ z+W#5ae{Fc-$@k9x$#`%5i*|h5VeS9C0q^!lN}PW%PWxZu-!$6t68GHW+PxzOgi?Kh zGJS!4Wcdu|A50k#DlO>hJ7*_F``@MgU!eUTZm+=q!`HjO$5q|;zCZ8c(>SqDF_cg@ z*R3BogcJj^G2kXPae#mWPH?~wlG4NhN*pkc5GQdO8fiwO_vrn8&x~gD97!W-q0n1<>cIRKc9O)(s%vWUVFX%Ywfl7dhI5ggJcK?VABUNTJ3_l$#ooimm{UE(+$waG@!bMsU>|cd2WZzw{~fe* z{QEd=MSAex?B+gD#eD(UixX;29kq zzB_k;wf+ly^Wp+){TI{>bx@0Y1F|0c_-k8pfqA?O3aq#w59eCXU*K7IL0;T~yDqTj z>w^05(+izjE~o>#H(pQ|GJNEMhG68`3mQg-j$U9*UVsaV5pU+~N6G@n`H$p;?Ds#C>%)(f^WI0wg}h@QG5-HZ1!q5E{QnViEg#9f zgYn-kt~=yBWpEbC@8J3OFwZ|^)jpno$eQQ>Kc0V_YsOCtvNcIN;5`QKF}{)JX8xNo zzrTs`KeCzz>|^i*@%6$Far;M(ze`!&PMq&>%x45Me}vmzxZMh+xGi$-5BMMA z96u$N`7X!5gYV<^ab$os<;yqooP$<|Z<9Q8!^jZFVHn-W{3|Rl{roS?6*%Bd+=K8r zxD__T)$n&$-lQ`do6Qs#|jhWs* zqhAhh;dYYaQ;;MtN0DpLi&s=KbAj7g=4Bi=ApZvGM}8LBhr9_HMSc!>is$_|DTik8 zKn6Ph0|ekz_;=vbjhTGHG4pcD;*-$N^%LSc8sK^wL=JHchLOV@k02vlgHdFR<2W+G z`J;rJgqtYi4)Qw)FINuf^Q8GKZv4bqrXS|u3*_r*@WL6mpE`O4Zl`XZf?Bs3--eWn z{Y}oFgC%$ej-yxU3C71z4i!+@S#JGld`!jhDo^GhWjIe6W>QxxDCd=sdktlB755sb z06)A0)1P6F8T<~4KTV%!BhNR8K^zj0gcOV(jqv@?kU~3nJ~B==zx6Y84bR6r1}L8a zz7K@r(J1{M)+}&$LN?FLLehR3ax)nJuVwuILB9&#?^oe_{k;F%FZVmFY0O>x{sDcO zGAQT#_2jQIzrw0Izr3bm)>dliBh{T6RQ=gOHDGTnyZHVAvSlyh|0v^s@NOJj-v3gz zyhlHF8#|4rzJEa2FDTls%*{SR{?j)Zys zQ$P0iF!2+|PlDs*5z6Oz$_MEv^I1;FWYc0+lh4YD`m9{H&&mc@*k=_~`K&_D=Y@P$ zex1)MroUg}=RGLSyE7Q`pWt~6rL**d&hosT=6QXL=QUJK@E$w;g$DWwweRsfhi3e? zoZ`Lr^h!11-@AqP;5V*R>ovR=4}qP_-#u#E#e4HR`0mjf^KELrgT(W_6M9$jeL>uM zR;^SJ2A-!sa+L2AK1+WDhKX;4_=X-^sW9=y*74n=^?U~jqQsLT>?q+9#Fd2XKk$2_ zn|NMRW)|`T_z=n}S&Qd`nk_%&ow`T)X4U6ErrDQUt(Sl9;@cZ8>wn_^{Ux5GwXf;F zkPZLWYr238-FQ})oaeDp*=h|m{+@3g@*NvKeka<<3Dx>$M;ye7JG7!R85#Z-tjA3w^{}3SF;|sn)SHV z>}lhBdaL-J9^cg~0%yi*Irwf^*3Q-3_&%ZBEvuEYakX4qS2IRft%gIZ>Eo}aueO@L z?rOC>&-eU}@;yJ+Ya5|r&uW$5v6^pWt!A$T^9cvY<9_B7`2JocbVpY+x6K$Yw3=t# zYW3~?NWGn_*^{+eJ>V-_EpHXy(63vq)~wZPcd@qw>36SI8^m#s;V0U}Z!y5=Y@YQl zzt0f%ty>}NX` z0~k_#g73W^3n|H1C&4&lbR%OS@{##riuu@P%kfxB&WV(=%TjVRrIh1ODc7A+Ua-u{ zPp4FHO}SMVPVxP>a?4$nQt{RSl}t2SrRP&B3xyf~ms=I@wOEy>QmVp#^<0Ymf8|!~ zW93%e^W|23G({Qk{kIR9|ES`daevAB|2xcoT*3SY--q)a`IvlsoU?7q$1cu)yry>M zS^j^2AA$4zJTya9CpFM?QiHBvX(<0Bzuj8{SU9R|KWA^KfKO3_*M2l zzRLc`S5-d!sw$vz_El9uHTD{){oqyh%)H9G@2@frepQWgudC@W^FKS7|CvMo6SK^J zoMHasjC^O$sBPkm{KwAl{^uFBe|Uzz?iuzYp5Zs(&M*ghMm_im;?zL^KD5Kr>XFn*FR6j?3EQSP0owY(=xCoJ(W zoeS>2FXH~n$LVKq3wxAH{(JBYaGS_<@zUbYkhx2&d2e7{dMo|%1M~;YImm+=V7e)j zpMuTw!KZmX97FyZs=lyZmACV)?z=w4cOL#qW!Lh}&-)n*e4X`K_O~^C`!Y2?dbwKi zu2l1rYgPBmI@SJw-@#>)s9t|5Yk3MX^DmKC!e;mz;=cv?If&?5McDun>$py5>0^D8 zIqSEOG5Sm+@DjXp3C|PuM*ar=nl8;3;6eBSc%zKB!T!#YpEXR}xZP#8Wv$TXk(pT$ z=09U{Trtdk(irofF}XGlGyfS=?yi{fR>heAjIn1U#?v7a)gmKS1bT^o~U zRh8x48)N)aZMBh}KU{4EcE;46Kg|4Jz12znx*iUx+h1+!7bd016rHPf$a z^2Ax2ZLk_vj;KByXU}$xRqKkYW@C+2osRR}&Xg*XaaFkEDo2(@<0^eWu9B^B6<0M_ z?n5KI8+TlVAB?B~xBO!x?46G*cU4?D>Em)Cv)xhpm~lC_#Fdr7ONqPU8og#jsSo4q zNf=QA8UG-z*cHbWMMj>FY6LkaU8c5BJ`)aRomd;ocBe zH*`&psPjZz9S=vK{D9dUV{kE?Z0y_H$lrmQV_mZP&x&UGE) zb0?XuWSer{=~M0%c~)MN%gXn(sQ`b4Rc$KTm`A@a&niZiywj%A7Xm8V*QW9V9X#vX zn5St|)xi$tf7=-UwyAb+8}q+ys(+|W4F}rPn9-)DU2UxWw5erFn_3SB4o3Vvxf0EGToubx@;>-{$lI<6i??_iG40B`D`2WfNdJRqfMD>^R28E`IZCe zJnQHA*Dlw3ziy`-a@YEmm*J;x)Tsg_)5@8ADkjs7d+{p2N>=(=`!eo+m9O*j{+plm zzjob&pX%8Ys@dUZuES5iw^Q}VhKWx0znsv$oNw-QSuMD=?)9^u+%GTj_%i%zL;BYh zSb>N9YH!N7I(GWeKi}#iz3yly^M5WYNd9{F^|Jph-|Bxp+Y0UR^ZUns<~;lwdLiEm zpUAg{kt5_QaIfBBot^@#3)%gAKt0rZFd0yb_0XqjS7|!e zYT4SZ=AGT*^FNu5LF#a>UG-ZFt-4)>RxL93!*=ByXnSGKF2eq_5Gu3RhYXg34; zlPWpV&0c|axw%G*-fvgovxQax>E&~c=DpLd5g0y@YlS;dBSUW#_lbwWEk}U@P~o9_GI@t$cSY^IzR6 z+)`u}xw9;Hs8z-ID{-}|bbpamwx`G{M^-eos`7-FzI`k6Up>s*wyO5oR@LqAQ9ZI@ zUn}qbwyFvD=Dn@Vf3>py(<;xgR(WSzk~0J(Own;nu&ZmHCfW4eV)U|8uK`kl}|)tl@R7^nZ%1$kA3sCyJ~X>BT?j zVg5VQN|L`6xPj$%p^QTx_^{8=$ zM@@4=pCKe}zW@;%VRLVgI*Bo!1mwU8K_;bXz^w zc+lT%^{(@%uQRCromp1su!s4LF7`vXt)au2R(QXM_n$o)dCbG}&uvBL+*S-3-{n!_ zS)Y<0dX!2BdH>nN{m;v@s8IV}G!uL6fV+5c2*b*y(P7aEuYT$~bj-{Cr(S+b&ddGJ%d@hV{;!w4rCvp*y^3Ow&3f7Y>t*cYRWj*SihPaw z$@3L`D);+Tc1?*@TIJ&#A|+P!T!*ScK2@fjRzsan^|;pseX1>USS@ot`iLb~)2v#bKY^Tzf^h z58^(s@^yuf{nX>g&OQy}9$wSO^WI0F#i!U2rxnfbQ)J8Q>{;|Nk4+u!^eJ(~Cm-j% zvwe&qd}`gtHS?ZN0o?pNzuS=Avp#j5^09X2Q^$mlYsSaD)2AT%^(1|Y&_<$Z+Q)lA z_S6TJpbnCdiuN)->Q(=2P$3xbP`)r!*30@T<$Em1^R>w8nC?|4be->2H}r5V2R%hr zudB%FOBPwJFZ7bPUU{3iztA?uPU+vSwY6QLDe1!s_xk$tNuWd)o{4TYCIHF z6Ev^wRST3p&;8{NbAJgkb_l9+XRoTDdR;Gd)vLT05?21ss8z843KhaZp3#-u*NV3W zRdNmOoBLMQkt<~H?>l;hvOh>zu9ZKs3*^V>lh+{s8p-rt=BJT4-)7A`ley!2*x-iS6nquF1`k6i^DJ}3b;VW8 zjc#H6_BzHsKjz&+@!P=HGS9x1an#KO{X6E7Z(t0wjeQchfBSmw?YQ;e_Fu@WcQDU{ zzZm}i2j`!|-=&=UJba3Mb~hkD20y!z=LEcopT5==x(|Qt+nIyrcsF71!*2;X-Uly# zCSo1CJ;Lu(Apd|Y&E|J3nB(eqS;y|r<#!?+*0FmX*2}HPBIFHO{9Z*Cziolc&axgR zj%T3`{uzAmIJn?Z^53~8zONs%%s$?e9QPmTW8SXV8aPmF4IVDG zh7R>948v=Ccz?5p?>F=)3g_^9oN?TBl*1R{%kVXD>^UuGu$Kj@9%UhOI(wD-JohoA zYcKb$W9-}8InI6TG<|aJTOQt9fFj)8FE9rY?Nf8OPfhMVHF95RSm(6r=jgMB`c#`P zv1)esRPE|h)hee|iNA^rr&a!bAM-w$R%x(LB_H;w_*kFZ(|ybf_3_QkJ{7o1to)<2 z_fuz-JI(!ozNhO$pR$q8b=)uBKcg&UCT(Jr4F;*sPb%s8q!KTrta$j7iseVGXjasU zFi$gbxJScDe$yfUlN!SBAbtmsp*yZre~>x6*{IcfjD1mTxa&c7vk|XreGku!E7h@v z-$uxzp6<%B{41|yZ^@O+wf4#Tj??md=(JjqnX4jJ)~>a3FlXxgFllAK-yzo(DJy5o zTIIe+AB9EHd?;WZweUpJD$0mh?p-OXIJ8zJP|DnEStx0hQ;!vA*Yf;h-j~JB>irR` zCYZEp52mcTvZPgiC}K4r8$Fk>&U>kvL)~i0zmz#W=6#>PR9>X7DrL1H{f919plL00 zeQVW`Tuc9VE#K#79{6CldKicYgDI=mpR)Su)~eqfwLyn60^+vu1s2~^&J}hFlA-#x>Q;8PaWjN3E3Mjk?VZI%2^lX z_b*1Ryv|FN4+U#4QQ>UDDq7PkxBpW1zOfGi_tL#lt1O(b%J+_1714xMxj$-EbtbIp zeCEM}YkB^4sjh6T>JMF_1{T>G3DPknfZv6J(H;C*l^RxbUDeM22um>(-4a{+Wh9Rzp@H>}k_`ORs zvNd5vR(2|y9<^epE>)ayPU6{1m4wvvB^q6kurgP~`2CNB<;c2RPRPE7cRA)pssAAJ zf0xs4*!K~-Tm>6rR^i!E>OWz*Ul_HDcgCy|WN8`eJK@Vzp4F*}=w+(h&%Te~Wz;|W zK<-Fbwa>EtQFj@0LYMRX-^(3|#J{K91_fubY)E*gFHf-wE$14%3I zS=NoXPIC^$t=xw%Q}&)7xxoI;RUOye{&waaFQe`EXcQ8itjo|=VvI-Pq!Xb`qm<+D zJ27j7d<=z?RyhAM<~iBtMZ4|NwgK_2M@^0iEnU$BT^@F(OS&_0Fp3QoI`{*~I;u_ZP&NJ=}ZPrgX{uii+ z!c|x4SGeatc9n|Vo0;>tN;UX(<6e$i4u0O`d|7z2hOyUtxS2Wd%}Qav5*m(PrC%WH z$~G(dTlP!6uvP!ganm)wB0rn?4j12v;`!iNzgZtgwr6Zso98Nh5_?zvRqWGc9V~j4 zI@4FN2Vk@Mf>)^r_aF@5_7(i~pTCOhfb{om*2wdlHMDgz>wKFPd;co>{a5L0_=&D1 zPkXlNC)g7Qwkijixn_&9sy5L-*d(WC6W9M1xgZC7Zgdml|1I=?x2PJwg)6o&j@m;1 ze~XG;n|S|oi%Jh}QCV`6%JEb2!6sGuH>v8_CRI;tV*YEBYENw9`rpL&|F)>%*)3`& z9xwb4%FD-dc^JFricRufvx$52CVp>{HJ;8b3Se*Fxry=b7S{i^=y&+<&f3Bp=@tc{ zcjYGa`M1#iwCgX;I*pay8%aRcxFXQ=s9hFYdGn1jlYXKx1M zpA7Z|Ww1{uL;mM86nHE{?R)=T9sbX$v-5MT5q(bGp?dY4s8*2U-los#GSctgQ%V27 zn*M)1{r_tE|JB_8>*@bj)Bmrg|6fi2zncDkHU0l;`v2AR|Ep(j`apuKK`{vH{PZqS#I@%>lp zd&sBYYw!)|-*Yu{bsJcpzgoi_54kq*yJ;IVoZg_3QyY|5b~XKyt5x8+T7^&qZYbWe zf${a#$_!qutQ{NVSj9WFD>mp~h{KhCHS@C@l)G*NuNr99rG*KF+X=TdvZ3YoHQyP+Q9zQ4g5aM230?{fwi~|EO_o>{d<=jdw0tT*$?fO z3v#gMLf+Bca*$>Y`~-@j0KD)AuF2B%yLta*w>s{+LMhx{gqPq=SV2G6E7tHHeFZ zguPCeuEUN!rJa7a)d8o{Lgwg`E_kvsT}ONb+54| zyi)m(p+9*jd>;MNzvP`W^oJ5CJ@QM&&ghRW6^GdSf0TCspc;D()IP*J08qb$cL3Ns zVD|nuy~p<|cn_cjT37H60C>R%ZQzFhv_l7ULKpn$_q$iJ_kRt2--AC_KWT+vU={Db z!_YeV%Cl(=^ZvmIej@Lu+4IHw?+}Oo_xtZH{@rSZrmEd)goXnzt3GSD>hgCp-rr3h z=08;(+)bZmw<_lr+5diCb(5CO-zN;kP{Z zuETwC2V<6lzgO0(-CXdy^?A;X;&(siHsdG$*lxujdSo~Ad%HDqa9+cnce$tU*3h%N zHF#*Z1`h642-)xd57zK^t2h5$1?T=#J+628ji}ws|No~t4=?iDF}s;V+N}WX*}s1` zb4$DB+p}BVV{glIU^nkx@76t}e?N?ohwmbP3_pczsDxkQJ_*~&Un9r;Fa+c92D}2l zf?vbC@Sm`nG3Z~yRq%E0ueTt-NcbC(pMfvHUGNWZC+?3TzYh1pangDc`2+YVJOd|j zt3=*|-=8C`4DK(;-{5`*UWYFF6TR@?IQQ?Ii&M_GeU^Iw$JfE9;W5IW!|xZd-vr-+ z%zXBNf}@T-0FaH{1v#vl@o0Kn~>%Wfc9|G9h=UD#@bNw^^>q2(B zdH1D?v0f9`KeCVG{`a~5KjivH4stw%d-(mIYq*T-A0i>X{{S(FL!ylDKR^mbq1Mg2 z9<--=+DZemaXsx5+01bZvh?tI_HOX)xI6fU+{1i3F2pzF!hAc9{SP(BytQjsA6dir zXbtZOtx-`1-+X}L>EkNdv4$Hl?^l2WoRG~q7vw-LM9z~xh z_#OTlkloM@gD`uZc8v7k_c*c}c@Ft!%>DFZzY_T=2xE`I&DigR-xJr}$P36vkdMQ2 z@H6@pn_*=O0v!oC2;r@@5 z`*!2b@&Dxb_aN?nKHN6r)`uK~mAHKzeu@2R-2VdmZ?K<(zvK8XIlh4XdUK98|L1Te z{0;8cBe&!JIpl3{AKc9GXE?VNK8HPX&2N-t*8ABP=|pC;PttXS_WwNXpLUs-@f+qp zexrgtw0|f9Hx%DN`%h1)^r7FVY=ZWGjP}3cMdrV0|4@DQ=law8p9jyX{yo}1G@hXS zgZ-WQQ?&oBwErEn|6Q!pBilIkA7I}+`7!(EI}Y>Bk|XS^CqLcD9*%>!_j0}u*^mFw z4)%W%eh@jtargx714bZ1yNE*U{j-X5k4VrylBZ~2XK7!^X68d$pb?s&9vYw)>Y%!g zc?_sL#vTr2c@xih7teiUSrz?&v-AVf^b3&gQ}h#Fpq~(aQ+X%oH$cuD{RqgWZ{q^T z`U(26Y*e7%la=vv`XS^45|D&gC-(t}JUgW*a@dU?wBsRU*u{M=$bEq0{(WyMROE?Ly!*AeQ_+3Q)79PR=d*t)P@d@Uh{{kMz z?Ni7G?iUW+bKwcxo`ySdFXFfb>4P{-LN#tn+@IKzqmvvThcoaFZi~nU&bPvL%IsFS z6Z{N7_Kz&#xOCN+%AkDD z2dW^R%2l*~sNTu*A8N7JK|OIa5J%%0+W-5s{||ZoZylrmPx}WiVSUhs-46l6wnGQ@ zPUzxXH`wplaNK*8_7DBoLomR(K^VdwhT)y;|Aq+mD8x7yhXnQ{q{_xLS~W(WcRlYT zujhT__3X!Zk@3nIzU{J}HLvw*aI?4Rq4lbQ>dy75fl};cP`-D)DxhfHdby$an)R%= ztXJOp^~ztlUIlB`t8gppEf24kYs-4Z6YE(|T+aotURignmt)_0IgyEc+C8M|xDP<& z@OnjQ-?1ihANV=@cwblez%x3*8aIo*p3E6d$3v#gMLS9tLhk`p^P@!8Y;)OqV6W`iEmT+A9KKp7v zWDhQ~g5yfutD1hVYBv4XxPGtNAiq-*=667nyw`+mB*=bH(ifXm@i@GkCToo~4D`xy`e zvcYda^ZOZ)0&UQSm$SeL+2Dd4$b~$^%bB)m{Nx>TU=~0jR2b~D6?P02_Ss5YD{a?G z+qKemZO63(2DQN)A0w4n;+QQm%$8xg4YsWqTd{2`!B%41O0kvNHsiN!wjZ-x8_qfE zj8U(R+Q|Q{+N908N?Y`4UCj?=eJ0}?xWO2F~gn~iacF?M1Y zZ`({Lq%5A1`lz*pg3*biTW``~^! z01v=}@O5|yz5xf}JMaiR3Xj1*!Q=22 zF=jA;xlrRbW@DahniLlrJB-=dWz2pP+hUf9e=$F5%m$PCVq40Xev`LFze#r~*Z5z` z9W>^E$C!hr1kUH09R6#+(d)O4T4NTr8MDdc=eO-9-QV_`eEyrG$Cy>w#;k4ox7Lf6 zcN%O!-mV)K9Z_W{<6320r;Ho;H-eHdW@8?v?ExR=e1?4@!#V*iJiar<;^<6HYhTPB+<3 zH!I_2oNl(AZnm9nQN}Ge-C{f4Vms|r#!j4e+D1}r5@P)Q*w(o6rk!-`OT^!r&g4niQ+w4Nv zW*5S?8@0_YfNgdGY}=u2w({F<(l%S=Z8vM1t?0H}w9Qs;+fHq>mD)zehz*lacKRlJ zb}HMo{aR7p+i&3CoZ6njzirv3?VP<)+joe5;`W9Y3KAt z-FU62FD71Y%+QV7bmMm2h~p03c$03tSvTII8+YnPI|5sy8%vwSe zCfmG4wxT$vE4!eY0-9R#f-RcBbs2xY?Yv>-`qpa;>O%yI^iH z&UW4J)Xr-~<=e%y(>UAp|M&X)zu%?Lu~TA~cA5WO+O1o4>qgyrtG=K;+H*jA-p;E! ztS{<|&+0bacDrtS`a0c)+Y^uLcHRDE-Tn>T{uA9kk%W})(3kY3Jzv+Iy7P;=^RA!h z&U+8)&ZD|ZcipSI9?)Hn3&*|MYyRQO+Iv8IAJE>f>&v=ZcYjfLWA4*Ey64Nf=l;XOY z{m1oB`X}>$2%eM?n1}Va9w*B0>bvOn-Ge%$L*^g8r$bNa&<}O!$NHY0&=Y&~1o}OJ zwcGj3r~ml z=8S_w)Ps8JdwS}y zo_bDC>*)vdjGi$C_skwW^CdmQ@q>Ei5k2#qp4IpD{ezDh?6@wyT&>+)~T^Bjdg3RM`J;a^=hn7WBnQnX>34agBlys zSXg7j8jETyrm?uj5*kZtY*gc!8qdCO5@cUuhDp|#_Kd*uki+rH)^~|ns95PSQ90hDAh!nCMq;hsfj90utr#;iCRt6X`)^e4Vq}w zM3W|(HPNDpR!w*`;njps6K$ICYobFFoto& zgvA>Q6C;|4Xd)`!M3{(cA|Y;4G^<|}>ldZ^MXP?{(J#97i$49LUz2W47HhIZlckz0 z(`30OD>PZD$tq1&YqCa@wVJHcWW6REG})-hW=*zevQ?8FO?oxy(_}!C?V9Y+WTz&( zG}*1m9!&-{*{jJuO_IcrCc~PHXfmqFm?q}Fc{LT#RJ*1+G}Wo8E=_f7sz*~nP4#N3PgDJx3TbLUQ-his(R8V%%QRiC=}Jvk zX}V6+4VrG$bc?1vn)Yehujzo;|3BTS=`KxoYdR>l0ZsR7I;80VO%G~%NYi0WM>HMP zbX+fG=_Q9=a_Xggy;Pu=3iVQvUUKWDQoU5Bm&)~0m0oJnOD%dSs+ST^i9rq{90o`j z6`IM>OrB=)HB+FOBFz+QrbIKPnkmyvxn}TCshKLxRBNV2Gqswj(@ec)8Z^_WnI_FN zYob$Hrb{#3n(5I@P&2)n>C;TVWtK)e(p0DEtI$o&bMLO=*@nRis)bS?0lBrj+^omojbI$5ui z4LaGVlTA9=tdlJ|>D5V}PPXZ!Unc`P*{+jaI@zt0JvtfG$zGl8)5(6F4C&;6P7dni zkWPkmGNO}Fo$|lt(5azQeq(eSBWR3aol0T6UdsP#BgTjrBZ2XHqh2rL-yD>^HmcXl z`8Nj@{2N)xzmZk1MU8>2+8DLQsC#Y57})BK(ZIhgO+1bKn}a63-fU#6F+9fb>UE!y zZOaCJ1IE^Fj1K;d?7V2ao@Lu#cOY!Lv32op&JUo=8y>yk)f+y&(WW>2dLzL98|}vE zF-FiBy~YR`V*q2;WsDqSo&1<8?W8QYq#;*ZGz*g$Ha|c+>9`;v)LPrVHX+xZM86L zpUcT}wCvM_GBzTyb;Pg_O^Rr1Vl~3}ShizBi(Zp-uL)%rgIx%A5%lSee*T$1F}V-D zF*utyn~lYGI8$V@B%{(G|0WI$6AQvbKe%kSb24bs!B}>;BOf$z4w|fC*rAt|;%M0s zWe08lCc|7HL&g(^eQ4qyGTJN)C5TZDgeBFeu>4#PfN&Me2v zM|S40n{Y-DGP-QX7USQ9jTyffe-!vNN5+To8^bo6N!!IJwvE{`BT8&z*W{zx+CWyn>oczDh zZVZxf8lO&+jFU?hgAf>h6vn#==rjR2pUr&`!)FY?F^DJI#Dj5Bn4lcl0ZioC_BCh= zA~HuNDw86H?fIf%dtT10x$)W8m8lq+^DdK;X#~bFK8+v_#PVr7L0fN(Wq;<{#{=Sx zzGaJvYdMr@(Z;^)#Wc5NrO|xZPF`(Wj%IU=UksBh!UQn_A4U)|XZehs9`@xkww^gA zwVXFX#u&h`!xFIxdoh0QbJ)>0XW5nbILI|=<{Ix97wyYO1kE*x=5mkWx$>t5#+_$K zCW?G>0uKcy7{;=lycaB&eSyhSfyq;W$y0%e7UPe?gfw}g^%{c+3+xz7>;$AF2^kgd z{m#ZUo6_v4PG{=0Q>U|aI!C8-bvjR{i*(wp)5SVnrqi`L?a}Eroet=9r%nfTx=*J= zIz6b#eB!t;m4higmvgt6LTxy%l%A6~i$>m}KT#sW8IvR%!%Mv@FV&MfI|1 zSQeqTk_1X3P#%#YE2&aDc&gm?l4@KQo@EhO7TwEY;7?%&mV*xd$zjkq%w+~N=WGv4YkDyD$xRXYNJVi(+LLEj(D?%Mc zNH0PiMo2S49Y(0b2s%edKSG`(=pCURBcvH2%?M>5L5B$S8fgIX5Frl{$|izN5$YsD z8AYg*2z3&nP9o$lLY+jYlSl_pClSgpLY+h?#|Y&Zp-v*yNrdu@P%gBcxd`PMp*$m$ zXN2n@G6E5ZLJZJ3k^ssml7dmqMbSTs{!#RgqJI?qqv#(+|0w!L(Laj*QS^_Ze-!O&=pRG> z82ZQ1KZgD>^pBx`4EHL(dp`#?Ui{o-y={p=S&|W9S(} z&lq~f&@+afG4za~XAC`K=ov%L7jG<=?J!9w@ zL(dp`#?dp5o^kYyqh}mFGmf5d^o*lt z96jUc8As1JddAT+j-GMqIZi#tspmNL9H*Y+)N>sDgETyOQBl|-BRe5 zLbnvUrKr0U`lYD56gsBRkq*jS3O!TQTMAuM=sJpyqv$w_j-%)|ihiT$H;Qhf=rxKi zqv$eCp9|55TkO8!U5>nM30C9k98bCi6JlFw1{IZ8f9$>%8f93_vVHNv(zI`wuA!FkNT&13%U z9HPmohX&}?+qrHih7!G<=YVX;g?uOi!sQVzuMGmw&HqL5zuA@#df(0?(mW$oDHK|T~fAryfdilGEb zp$y8Q(s187J_wFvv%?>4v7(E%bYasUZM}ayyO3XT(QTos=8v|4+r@%2l%Dz9puyzFr0IA;)soWJ30*KC=*AYoftL4G08eekDPK{ zpL28>>?l#i*hrBAaF|rP?KF%J0uZrtIl~+T&pC}(&h!{;2a_x|vSd^Z8qPWWfAYw| zMYkZ?x8pPE80T|NkDYGbva-ugDwl0}*o>CuT1vsF(ADtAT$*Zk zI=~6p;DQ{;HP|VdY+uySN!2^^?b0ixGuT1eB0{Tzu|gkvmIpDY1*v#9eBz z6{3E-IiL+MU%>w>yB9BLXWgDgJq@CJL;E> z$uLdAXQyfusj|-)uhoWgx3Zb}>*mvG;*jjV9Vpr1*n#jqUqq61fx?Pl8Ha^0Amaltvc z$w9M7V6Dz^E$YY_ij{oI!*sA6nXS%%(Vu(v*ye5gX0bNuzYq6z#N!khCGE zsS#5@G#h6eOe!>wa!R)x_j24SC@S`po@?N)(ZKm;8cHj%d(ha!u`d_UysZ(4rvrby zPJXVNl3;v&uBT4tf|NxsmD$fF6{4XGq5m+K9pjU8j5*HH7d}UyVxH&Sd}b{~HJ_EE zc}KP8Db;!ACFbb}%;&XhzMxg}MOiSUd3T-WOR4}_TB!N5Zp~LhRTFpsdrb`xUoAf> zT%Xc>L$2lnglP|GzOxsGHQzO$`JO?|_Z4e?&=7=(=EJ1V*nXa|J$nct9}0l97{AXm zCZA`FJ2j^$Ab`BWT6m8Q#( z&g@p22WXndWSTaRre&tN)Y92qfFGuH(k>Ucp%U7_5BOo~Ce73h+aDnaBTDDsKZkg8 z@K0YYol^zXPy?iqL--v0Gc}WD3MQRHnB06Qf<|bDR^VJ7apgH77YLu%1f-Woe)Hl= z=jQ2wik6rq1n z1c<)~{oPrR4ft~tkGl*g2RHuQ9>(~8|BhQp4$k|Eg{`Tp4zy!+S(U_sL@98`ccJ?>VC56r|A87@e_SJ(YKRS z2iff)NGI-{0j0aB2JR&39ZNL{ zr=p}5+{EL?ubcGUaV-=(f&3JAYN3RDmbietl%i9qR|{q6SH`)r5g?xOdLX?D{8o}* zet%$rR<}@94LuOi0xfQ#x(4cixT^bsvZ^8O8q%W0E!6nHuZ7y;g#ofrOLl8HRU0(- z?S(qDpdBvMqj`N9kehmr8*+fy8jPKk8YvyNoGvutpEkPCNC`Dk>WygEM9DT0Z4)_b zqC}fWrkNZx6ZPyW*MbMNy;P@{0A3=ZK`*pbYQgW&LV&Ed z6QBco2MKg>t{d4+;yn~g4|?>Fi6H)is7ec72vVKF5ug})NwAk{>?ISuWTKahv3#@8 z=LW*`5f80yp&xtykQR9MFNE5)Fo2>1qgoiGfClk9NbwDlHgoI?LxdSZ$szm?k^eAx z4^wSn@);)GFongO`@#t6u>7+yg8K-0VQzeZx$FhzbQhS9U5HW8JWCgtmt0^ja)Eir z1?ChNn4eownGMhkQ7vW_KsWTm2qd-0gLRR?*dqO^ zMJIOFRTg=mF1qreSBp7~T4eHKF}GBUTWg_D&;`Vm7lvVo!>AUSg;>nTUp{f= zwLuo6xrje>|iXoAJ|v{}$rsf?xDdz8=cbTLfiV^x@Bk-ACTqx`47_ z!P8v-i#%x-{pji^Uwm$bcPxN3{N&wFUIL^Ua077$N`SlsB0$)7;^nEd*iKpcp)Rf1TvFlXSWW-$j{rlh1C#c2jq>o5f!I^x?mcGVLS%e&XYavlt@(A@Vsuc@5w; zh#VwegOtS}deROS>3c2?5q5|?525oA^&h4z!<0c7f5XHxOxO|Xb%cDd?zBkXbdj~1 zMf#eHti3EU_q)ja?jrNFi_D!aCdn6bri;vtE;1Lo$Q|1xYOx`XQjDBHW8er)XG9 zZu07`0FH}EuNZ$N&X?6_se-gCaH}Bx3i4Q4t);3q zE!DVzGNCiIR7?D|#9NErb-34+0&&(6C)f5;UALCFxi8gYZ}4iVu^7<53HK&+X(ol1G-}mspBl8l=33kld@6!sIK=wZXl4X_#^xrX0C9 zFO5*%Be=5;wM2JjiM5_3)-#q^!&qWHVu^K#CAuq1%&9Lim%hY2_!4XLOU!+r&-6h= z=ehr$XSVQs)&L}Q-cbT&Q2BpzbYF{cooCi=dFf4xu?jio918`&2n4!i%e)?Zb6&(A z|2|j;PO=`gn?%)`k1Ky& zuC6`vPaoG`JpLM9|M%0!t-oe|2Kc!1$Gr2O_dfh%e(CYYnEUgz-1xZP^Z3V4{<=JE zfBNuy_s5IB{=fb4-u!ia|NGO2_tGEUpMLo9_QU(jkH7t~yr=y5+n=NNh9BMuet0MN z;rHYZ@7q57Jrq9tX#3$E+lO~(AAXPi@Z;^rfBd<6H}>IwD<9rVeRvP`;hocm_ctHj z-+Xu%^Wh!Lhj%dJ;QG@zeEc-#@1Mr~+9yKJ^V##o^OxtV=bPud=ZEK~=a*;VnR*sH zi=HLVvS-D!>iK_vtaZdC;d3`Dyq~Gs#<4M0ySmQ~*Pg%pKtnsAZ&w%4e|C9bF z{ZIOz^grqMDQrCHf70*M*m%*8i;kS-+n_$Fu%t{m=TJ_50*Ep7lTLf7bu3-|y4oS^u;CXZ_Fm zpY=cMf7bu3|5^XDe!p&xXZ_FmpY{7SbUf>S*8i;kS^u;CXZ_Fm{rWnd_4_q;JnMhf z|E&L6|FiyQ{m=TJ^*`%>*8i;kS^u;CXZ_FmeeM{~`k(dtnPT|adiVr8{JK5-x; zzvzF_|DxZoKzvzF_|DxZoL*qsNi+(>pjaU7)JL6UVtNvI0 zulis0`~7RY>VMV$s{d90tNvI0uloJ$HD2|<>VMV$s{d8L&nUwVXuRt84Z(QT@AtuB z7c}gGhR-hJRsXC0SN(o|8?X9b_1g@MSN*U0U-iH0f7So0|5g91{#X5e-W#v_U-iH0 z_kHQ`d1$=qf7So0|5g91{#X64`d{_G>i3yxyz2k0-)E=c=f>e%jPYClZ~edZ|JMIo z|8M=~bi3pEwnY#rvFX3`GzrvFXwnk(uK!*CyMCX+hi}4$8Rjs<9A=o~UH`j&-;NF6j*WNy z@A`d9HhfDq-u1uhf7kEx`*_#?uK!*CyM8}A4nI2%-=>Xs{qOpH&Kd9e-}S%if7kDG z|M2t1c-Q}~-#2f=H*e!zzwhmb@9l@rM#Fda!zORo`@V;Q!`_&@zNdBY}e*yIhH zyy5$sVUss(@`g>`@XhJ4$s4{l9e$=9HhIG)Z`kAwo4nzx)M1l1e4RRM@`g>`@N?(z z9ni4J8{W4Jo4jF@H@tTl-n$H&yz#ewo4jF@H*E5TcQL~zZ`kAwo4jF@H*E5TP2RA{ z8{X3lo4jF@H~dUHZ1RRp-uPSp-}-&uH2&8Aw|+nG4)1h^t=_QJ8-MHferNa@c=)<^ z*y;^iy9*y;`MfQGH!@Rjhe)f={Y!&k$@R&Utq4POxtTfO1E*|60c zzT+Bxt{%2}!&YzD>J3}H;T_Vj)f={Y!_VBqR&Utq4O_k8JF#J_H+Z`kS$TfO0{>0zrke6Kca^@gq9u+$lY#-enD2yZ`kS$ zTfO1y?_sMqZ1sk%-mujhK0Azm>;Jd@f9v--V)z^}{;l7JZ+K}o{-fW9Z}{4L_~Hy-d_$|zG2HZ{-ggt`h9L0HhshQjKlYg!=`W8^bMbZhfUx3kA9oJ;q&mY=^HkE z!=`W8^bMQ7VbeEk`i4#4u<08%eZ!`2_+Dq&^bMb(hfUwG=^H*D4V%7U(>HwQGi>^X zP2aHT8#aBzXQyG)H*EUGKl=aC|Brs#zTq>~uF(`+xU%t^xOH3fArh>jeqpp`;C9}+x!hLDu-7R!)N?q^EYh%hRxrw`5QKW z!{%>zRWWS-hSwFt=5N^i4d2I(fArh@jeqp}?rM0IF>L?FKl<(e#y|RP0Eh3ihVQh- zKl*I}$3Oc2(f^NrFG+`8;IIoEc7ek#aQN;i{f;IIoE|J837IDBU|>;i|^DZ?&s*aZ&Xp$*?*44*}ZUEuIt#_*YR{J+nn zc7ek#aM%S7yTD-=IP3z4UEr__9A3u^yTD-=IP3z4S2M#4-C-9vd>=CG0*77TunQb^ zfy4JB!}lY@HgMPm4%@(C8#ufG9`=F5cX{K#`n?PufAs&+Z!>>9DnrN4~#$heTP5({P~psam~?pn()G?{dk#E@o+Hn( z=frdBIrE%*EE6=s(#&heLdG0)O&%Nit^XU1P=gITzdGWk@etX_L@1Fnl{GaD< z&%ZtY@%-cYuSfr9{h#%J*8f?*D>nJ8-*>wCtpBtA&-#7G7E`-e^X0RCi@to;Z`GI2 z`YrqNS-*8(KI^ye%V+&oe)+85(l4L&Tl?j+ev7|+*6(}ceAaLIm(Th?>$d>RXZ==y z`K;e}d-<&2JC=Ob?{CMG&-#7e9N#y`LNLDjiU`1fHDMYsi+vN7yagju`A3M{a^Hd z(QhyqgTWXF=8OI>`oHM^qTj?WU-bKaJ$7?3j*D?z?B8Pl7Hg{5R>kBkCT}rsi~Uv1 z+hX39zw{fmrQi3e`Aff%TI|wd$rbCf z{H5QjEaqbQOTX8`@%}3YCb8;@=~fK3;yc-xSH*NHUK__UDmFZ^jEWUhte|3$65FTP zIK}oU22C+&iXm72(r>*Ko2B?}H#SSLS;}Af|I+`Le%qzkF2!~!wo9>HitSQ-4;=fY z{H33(3Re}bD&9|otIAjXTvfQLc%2=tDqr<;RpF|_RfVexR~4=*zEciYm9P5Qs<2i0 zs{gD0uln8U3u_hMJBPOlZx!AuU-dIr@oq3*^)pxbs{gD0ulo6`eAWL|zhO=|tZ-Q2 zu)<-*-YH-8f7Q=p<*WX$`oHRDv+`B{SN&i0f7Sn0|5yEA^?MH!L!TJ>OS66=9_-5D_-Nrcjft}|C@fV^y7Q;c&(pr`oHP_rvID%Z~Bde@=gCY{XAHBu<&5{ zrvID%Z~DLK|EB+&e$#+(V&TNXiN(+;hDPDUVr&$%ffyUb_w+G13MUq$qj-lJPAr^Q zII(bI;l#p;g%is+{f0^TrvICM(}R4||4sil{onL|({H$x@A{3G@?Ae)7QQUs^?%pT zndQ6w@A|*%|E}N2DP|1$uHTd)2I?_-ih+7qv=~3d_$kIu`L5slg&3~Ka6R7p=DU7g zE#LKj*UzoxyZ-O`%_8z$|9Ac0^?%p@T|d_r1F9HMg>B1s{pJ&4+`_oUj3SI%yvK-v zRle&tvdVY;-}Qgj|6Tug{onO}*Z*Dr5B)#%|Iq(K{}25?^m`W)PA;5W47Kt@{}27% zkHqjk-d)Fgl6Zd|?@IDRKS!4z`hV#Ep`WRXcYWdN@|*i}&Mrn@`Jw-Z ze(wbHL;nx`KlK05?;T-&==Yv5KlK05?|or@=>MVrhkjNs<|FYAF+cPhk;RlGKlK05 z|3m)|{Xg`3$Cw}bf9U_A-`phLImT`e(xpoQ~yu> zKlOWeAJ8D6L4NB0ssE?`pZbm0;vHsw>i?i?i?-{qJ7U;1ql@=O0O z{iZqjrT>@yU;2OP|E1q%A;0w7E##Me^PT+C|4aWb{lE0vGvt^4U;2OP|E2$z{$Kik z>Hnqwm;PV+35<9j9PfkkOFw}T?}hVAKZy|%Bi`A?I56H7$GhVE(*H}p_jvKQr_V3_ zzx3Nrgw)7H|3p8r5n>|~{S*DYwVL>YwVL>YwU2^vqQMRR2`J(PxZ4 zGu3Y!lc|0au}t+Z=(iKjg8l{l-tERXG{&JZA>ezUkN=r?rDqJCr7 zEb3p>Z}b|Y*DUH^)W4|T05;xtXHoy6{zd(Z`WN+^*TpzC_7$>i}?XswUQU9WT z`|B9aW>LTKY}_TzqW(qwi~1M!FY33=$fEv5{fqh+^_%U**fxv$7xgddU(~;-e^I{? zZWi?~>Nm#CqJF!lEb3p>zo_5e&OCNdG5^b=eq-G%>NnWUlKv(AhPyEhjIC6b^e^eR znaYxWyQwVcx1Gw8{w4iO`j_-C>0i>nq<=~Ol72Urv!s7X|C0VC{Y(0n^e^dO(!ZpC zNxvy#mh>;_U(&y%-##l#`j_-C={Hr(lKv(AOZx3svZQ}W|C0VC{q|g0(!ZpCNxu;_U(&y%e@Xw6e%qHU>0i>ntbbYmvVNoGm_)`TGRyjx^&2tAP9}CTS=MhWlV$zO z`j_=D>tEKttbbYmvVL3pEbCv^zpUTxKFj)-_1oEGS-tEKttlv%}%len~FY8~{ zzpQ^*|BC(<{VV!c^sne&(Qg-*75#?Uv5(7&es&-$`VFx>H{#E^}`d9U@>R;8rs()3#ePLGh zuj*gbzp8&#|Em5~{j2&{^{?t*)o=J7d&I2j=Qgsce^vjg{#E^}`fU}ns()4gs{U2| ztNKlNv#Nhp|Em5~{j2&{^_%u)RsWj)HT`S)*YvOHw|mT*{x$t;`fVVyrhiSpv2ND% zujyaYzovgp|C;_a{cHNy^snh(({DSOHT`D9S<}C!e@(wJf7bM`>0i@t*q=51Yx>vp zujyaYzovgp|C;_a{cHNy^snh()4!&FP5+wyHT`S)*YvOHU(>&)pV7&heoiNA`T>Hh z>0i^ou76$sy8d|GNHl{q~<(*T1fRUH`iNb^Yu5*Y&UKU)R5`e_j8& z{&oH9`q%ZZ>tEMzLz;E{>-yLAuj^mezpj5>|GNHl{dT5V*T1fRUH`iNb^Yu5*Y(@v zWnKTee%rjP>$lI#y8dzx`g;^{?w+*T12ELqBPo4gDMXH}u=M zW<&pm{tf*b`Zx4%=-<%4p?^dFhW-ux8~Qi&Z|LWsVk4Lh{dTh1(7&O7L;r^U4gDMX z?Ps&0e?$L<{tf;1wAs+Vp?^caZEZI6Z|G;1vY~%N|Azhz{dTw6&~JMi+uN9@XG8yn zemmT3=-<%4q2DGq8~Qi&+va9N|EB&;{hRtX^>6Cm)W4~JQ~##^P5qntH}!Ao-_*aU ze^dXa{!RUx`Zx7&>W5Kc9-mG9oBC~lv#Ebm|E7NKDx3N@^>6BDu(GLtQ~#!Zd*N*A z-_*aUe^dXa{!RTR_u16Hsee=drv6R+oBB8PZ|dLFzp39YFPr)|^>6Cm)W4~JQ~##^ zP5pLz+0wtIe@j0=lP&#Q`px>YrGHERmVW!bZ0X<9zomam|CatO{dUdS(!ZsDOaGRB zBp_S*v4Cvp-_pOOAI!;?{w@7m`nU9N>EF_i4`fUKmi{gMTl%;3Z|UFCzop;CFkAY! z^rHsZ(!Zr2^vRa~E&aex>=3i1A4SNP{w@7m`nU9N>EF`7t$$lT7?f@O+xoGEZ0q0F zzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y{%!r+`nUCO>)+PD zt$$m;O?VKAZ0q0FzpZ~;|F-^Z{oDGt^>6Fn*1xTPTmQEHZT;K&xAkx9-`2mae_Q{y z{%!r+`nUCO>)+PDt$$npj{Y6}JNkF@@95vrZ_}F{{X6=1^zZ23(Z8dANB@rg9sPFf z+0k$R8>S;W`gip2=-<)5qkl*Lj{Y6}cA>F*&yIfE_b?#Y(Z8dANB@rg9sN7{cl7V* z-_gILe@Fk0{vG|crPvm{$2gM`r(x9>bGUhu6}#g?CRguZ|5A4EW7%5_3!H6)xWEMSO2d5 zUH!ZIclFyuXIKBO{$2gM`gis3>fhDBtAAJju72C;0BW(H&aQqN>g?*@)xWEMSO2d5 zUH!ZIclARw+11YyWLLkfhCGe;dXiyZU$a@9E#uzo&ms|DOIm{WiMU({Hn# zJ^g$7_w?`S-_yURe^39Oeqbki`uFtj>EF}8r+-iXp8h@kd;0hEgFV^Pzo&ms|DOIm z{d@ZN^zZ54)4!*GPye2NoAK=F-_yURe@{Onkv;u;`uFtj>EF{24P{TiZE)+SEuYX_vzW#mv`}+6w z@9W>!zpsB^|Gxfx{rmd&_3!K7*T1iSU;n=Tef|6T_x0O7$L=}%`uFwk>t{Y<2c3QW z`}+6w@9W>!&w^xMKOB{P{rmd&_46Uw*Ka$Wef`i>_Vw@U-`Bsde_#K;{(b#GRd|vd z=s(bZp#MPsf&K&iuvK8I9OysLf1v+Bzx{p=^dIOy(0`!+K>vaM1N{(X0IeM8KhS@m zAJ)o&{saB|O8Au==m)rRpx@3r2l@~6ALzGL4kM5Q{RjFF^dIOy(0`!+KtG_F1O4{l zInaNg|3LqN{sa97`VaIU=s(bZp#MPsf&K&ihx!loAL>8Uf2jXZ|Dpav{Wj-0)PJb| zQ2(L+L;bK>4)q`Ew@J^Te)ueh`VaNnsOM1sq5ebthx!loAL>8UZ@->H{fGJw_1m)N zQ2(L+L;Z*P5A`4Fw{g#*epVuf`VaLV>bK+0q5ebthx!loAL>8Uf2jXZ|Dk@qB8U2c zIDh{WkN!jbhx(87v+6n0f299NKQtE}BS-p=^dIRz($5&>NdJ-kBmGDEkMtkuKhl4s z|49Fl{v-Y1U5@l0=|9qcr2k0&k^Uq7NBWQSAL&2Rf299N|B?P9{XA2S^dIRz(to7? zNdJ-kBmGDEkMtkuKhl4s|49Fl{v-WI`j7M<>1RuFr2k0&k^Uq7NBWQTAL~EXf2{vl z|FQmK{m1%`^&jg$*3TK_SpTtpAUMbRkM$qxKh}S&|5*RA{$u^e`j7P=>p#}dDCAiG zvHoNI$NHItFfU=Ya;*PY|FQmK{m1%`^&jg$)_<)3SpTv9WBteakM$qxKi1DZ1X9ki z{$u^e`j7P=>p#|itp8a5vHoNI$NEq7a}qhxf1>|HKQoaN{U`cQ^q=TI(a%xjL_Y+b z6a6RpPxPPYKhb}p|3v?Z{uBMMWw?u+=s(eaqW?tyiT)G)j89JVpXfi)f1>|H|B3z+ z{U`cQ^m7|I(SM@_<-YpXfi) zf1;l;%BlWS{ipg*^`GiL)qkr0RR5{|Q~jsOa+g zs{d5~seVvAr}|IzpXz5&a;l$g%BlWS{ftvi^`GiL)qkr0RR5{|Q~jsOa+gs{d5~ss2;_r}|IzpXxu=f2#jXKlhq5{b%~m^fRzI(|@M_ zO#hkwGyP}!S;Cy@XJm7x|4jdx{xkh&`p@*A=|9tdrvFU;nf^2VXZp|dpXoo-f2RLT z|C#q^`GlM*MF}6T>rU#b~5Ms&-I_{Ki7Y*|6KpM z{&W53`p@;B>p$1espVY%x&Cwgyiv~epX)!@&mQGmKgSjZDd+mn^`GlM*MF}6T>rWL zbN%P~&-I_{KiAJJ#c_e0>vv!v=laj}pX)!@@8CfAxSZ=}@ z{!9Ir`Y-ig>c7-~sh^jP3k|u{f2rT4hFt1rXmhFmQvap?OZ}JnFZHvvxzvBD|5E>@ ze%Bpxsh?jAbDK;3m-;XDU+TZqf2sdc|E2y*{g?VL^@ z{!9Ir`Y-ig>SrXw_2x=H+nX!>SNgB?yR;U5GFSSq^k3<}($7=oO8=F94mcceuJm8& zztVrD|4RRrets@j`mgkJbh*-hrTH(ztVrD|4RRr{ww`g`mgj~>A%u{rQdPpT%Z22t^Zp8wf<}U z*ZQyZU+cftf35#o|F!;W{fug^^yuk~N+zt(@P|5`tnm}~vl z`mgn0>%Z22t^Zp8wf<}U*ZQyZU+d=>bFH6=&5eGy4RWLZM*ofe8~r!>Z}i{jztMlA z|3?3f{u})_`fv2#=)ckLHbQRn-{`;5f203K|Be0|{Wtpg@Z9LX(SM`=M*ofe8~r!> zZ}i{jztQiyNpAGt=)ci_qyI+#jehr&a-;u7|Be0|{Wtn=^xx>e(SM`=M*ofe8~r!> z-QUiQ{u})_`dRhd>c7>0tN&L2t^Qm6xB74O-|D~Bf2;pi|E>O8{kQsW_225h)qku1 zR{yR3Tm85CZ}s2mztw-M|5pF4{#*UG`fv5$>c7>0tN&L2t$r6>a;yJV|E+%JG`IS1 z_225h)qku1R{yR3Tm85CdHdYzztw-M|5pF4eg;3c`fv5$>c7>0tAD0{rhle?rhle? zrhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle? zrhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle?rhle? zrhle?rhle?rhle?rvFa=o&G!hclz)2-|4^8f2aRW|DFCj{dfBB^xx^f(|@P`PXC?$ zJNA%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj z{dfBB^xx^f(|@P`PXC?$JNoqo!0hN>t~=d*FV=k*FV=k*FV?qwpr%- z=lbXR=lbXR=lb1%h}$Wd>!0hN>u0Gm*FV?qno;KZ=lbXR=lbXR=lXf;%=ORpbJua3 zEOY&H{d4_{bLRS4{>=5eSspe!?v}@eq|Ei->%Z53um4{Ez5aXs_xkVk-|N5Ef3M#O z{y4#(d;RzN@Acp7zt_)F=U%@XFuB)%um4`Z8v?o4&x7Y)|GoZu{rCFs_228i*Y9pj z?)Bg6zt`_-Q||TO>%Z53um4{Ez5aXs_xkzr-0Q#Bf3N>u|GoZu{rCFapUJ&`H)wLN z|6c#S{(JrR`tS9-Z;%K55BeYUKj?QUDi8V}^grl-(C-F99`rxxf6)J+-|db(=zq}v zp#MR?zdb=7^grl#Z7L7?AM`)yf6)J+-;KLG=yw+<5BeYUKj?qZ|DgXt|AYPq{SW#d z^grl-(Ep(SLBD%CdC>o$|3UwQe)o0qp#MStgZ>Bo5BeYUySo#2ck-bBLH~pP2mNl3 zVMSl z(pDbzKk9eqCy)9c^*`!=)c>geQU9a5C0$jKm33A|M36e|NT96J^K0o@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@% zAO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk z{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$j zKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8 z{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5 zfB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG z`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A z|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW z@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K z|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<# z;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e z|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe z!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0` z|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+` zhyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36e|HJ=> z{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q%{~!K8{D1iW@c-fe!~ci> z5C0$jKm33A|M36e|HJ=>{}2Bk{y+SG`2X<#;s3+`hyM@%AO1i5fB66K|Kb0`|A+q% z{~!K8{D1iW@c-fe!~ci>5C0$jKm33A|M36i|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#S zU;e-RfBFCN|KhYfBFCN|KR;5qsDDxaqW(qw z{D1lXF6v*@zo>svKmT9;zx;ps|MLIk|I7cE|1bYv{=fWx`Tz3&<^Rk7m;W#SU;e-R zfBFCN|KI^`g0jM(o zbq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$i zP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb z0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&} z15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UI zfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g z0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l) z8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}t zodKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW z)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9 zK%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$ z0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@ z3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG z>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4 zpw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H= z0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{ z2B6LW)ER&}15jrG>I^`g0q9KsO#e*(OusV#bq1g_{WJYD{WJYD{WJYD{WJYD{WJYD z{WJYD{WJYD{WJYD{WJZ}0Mr?PIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`o zGXQl4pw0l)8Gt$iP-g(@3_zU$s51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P zIs;H=0O|}todKvb0CfhS&H&UIfI0(EX8`I9K%D`oGXQl4pw0l)8Gt$iP-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhSclz)2 z-|4^8?+iel0jM(obq1h!`tS7L>A%x|r~gj>o&G!hclz)2-|4^8f2aRW|DFCj{d4_u z{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u z{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{d4_u{muZ?8Gz39&-Kss&-FV4P-g(@3_zU$ zs51a{2B6LW)ER&}15jrG>I^`g0jM(obq1i$0Mr?P-s`{Df3N>u|GoZu{rCFs_228i z*Y6BKodM{*{(JrR`tS8S15jrG>I^`g0jM(obq1i$0Mr?PIs;H=0O|}todKvb0CfhS z&H&UIfZprB*MG17UjM!Rd;RzN|Gz5kWH+*`fTAeh4x$FddjmiS1|TxiO*T}}6eAGg zgV5R1{REbQf?|+2BP=WIZP^hoZnGZxANn8qANn8qANn8qANn8qANn8qANn8qANn8q zANn8qANn8qANn8qANn8qANn8qF#y8=3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b z127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A z01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j z0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3 zz%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8e zFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~< z3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk| z!vG8eFbu#j0K)(b127E0FaW~<3z%T&A01N{#48Sk|!vG8eFbu#j0K)(b127E0 zFaW~<3z%T&A01N{#48Sk|{{6#g>(Ky40~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y z(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifp zG=R|nMgtfPU^IZy07e5C4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPU^IZy07e5C z4PZ2Y(EvsR7!6=FfYAU(0~ifpG=R|nMgtfPVBhMe0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz}|j8X#k@Ej0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn z1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y z0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U z0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|o zz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQt zFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)D zj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1( zqXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}H zXaJ)Dj0P|oz-R!Y0gMJP8o+1(qXCQtFdD#U0HXnn1~3}HXaJ)Dj0P|oz-R!Y0gMJP z8o+1(qXCQtFdD#U0HXnn1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1pdad|0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180R2cm4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180DY~W1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0rV67G=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFF8!DOOaG;x1`rJ(8bCCFF8!DOOaG<+(tqi{^k4cf{g?hr|E2%Zf9b#UU-~co zm;OutrT@}@>A&<}`Y-*L{!9O*|I&Zyzw}@FFa4MPOaG<+(tqi{^k4cf{g?hr|E2%Z zf9b#UU;1zTxBgrIt^d}4>%aBi`fvTW{#*a8|JHx&zxChxZ~eFaTmP;9)_?22_22q$ z{kQ&G|E>Slf9t>X-}-3)(Ey?WL<5Kh5Dg$2Ks1180MP)t_22qw0MP)V0Yn4n)_?22 z_22q${kQ&G|E>Slf9t>X-}-O;xBgrIt)B)E4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1L$Y^ zX#mjxq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCkSPXmYs z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V z0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?W zL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz z1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$ zhz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c z1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh z5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC? z4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1 zAR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ( z8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2 zKs1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4Immo zG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4 zfM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCF zXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks118 z0MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT z(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLawq5(t$hz1Z1AR0h4fM@{G z0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dg$2Ks1180MP)V0Yn3c1`rJ(8bCCFXaLaw zq5(t$hz1Z1AR0h4fM@{G0HOgz1BeC?4ImmoG=OLT(Ey?WL<5Kh5Dnn`)Iy=**Kfn` z4AyxJe=wMRF4SzRyX*axm%$d*gn;-wuzn8Zgzxux$?s%;KeANFQ^M7~# z`29P7`u?47-TCS-KK1P(|9!)qw@3Q#owrB&>aX8@%D--W_bK1q@hR^tah-Rre+?e% zovX?7&dbty=LG(F=XmgW=aAcZ=QPN9?|r~|@5%4H_m_X(JB@hWJ5qDr+k|`G+a+jt z{lWQQ&BpLsgITci!PC$A;6%IOYlA~O=YvxM=gIp0c`_M#p3FL(CsQ>B-^-Ip55q4E z-d3I`&%x)(G3@i?u-AEVdgwfT>qq@$!|r*q)9F0fx^kZE|2WSktk1KV$n$J!<~+Mr zIM4q0&NCC>JllzXp6v@i&o+1%eDN2{>*vK literal 0 HcmV?d00001 diff --git a/keras_nlp/tests/test_data/no_special_token_vocab.spm b/keras_nlp/tests/test_data/no_special_token_vocab.spm new file mode 100644 index 0000000000000000000000000000000000000000..582613aba70500f85977fac17732e99a2391d252 GIT binary patch literal 237677 zcmZU+4Oo=ddZ_)(3_lwFbQ?A?h-eKp)>vbWRjXjEajP}dxD7SdaEvupEpZ!atZ^G; z1_lO#ln^AuAe*o!!{5v>;4m}5Ff*~n8tY7r$8ZeCaCAznajTv98IJK7j^Dj7N%z-} z>$N5=hX_ZwS&^|yK3es(XADBW5Sqe@To z8VG)`m7Zvk%0EE=3O!TAp!Ay${i~I}R3y|d@|*FZZ-S4YIE|Yp6BbsaiGB%efVH5X zhd;Cst{j>9l1`Sc*2#)TM4z=&Cz=wnpwWn~Qbb!W5?w12RV@*xF(S} zyT~IJk%V56c%MiTB%)6|B{JcJ$fS_S&(4S>4~RT^UL<8mnVUg5ZB9C7e zNgENFa$6*QRAlNsk*$lyOCI^$_H3g3%{Sv@`;$rXsyQ4 z-^9z`eGxCaS0~78Qxas)grCW;UQ3WQl;idINwT+flKlP6NwV+B3G&AC6J-Bq6XhQs zogfE3`?_3!S@lkg z{KBY_HBZLK>en=~c2TVS#W%5%{d%lC|6HuB`zA{MG8iT8)awgRMazZ{ACX^9h?X3A zL|%O15!v|JIQgpyak6RkIC-gOoERsH{PogtqNn_Z29ZmoIiGwK@6t-qO05_cXhjdE z-8w1F(TQO(?Xq1fkCVr@HBnMNpp&v&I;k2jV&&ea@IQ?HC&==7I;lnvej&11#R(@L z7yUZwM!%i75{NTXq&Q6{!3kOkQLds{gnvUPW<*k`AT((;KZ}E$?X8cXn8u22XpdZvwH>B~2R(|)~tJ-?}CJ}GsyB%7? z#8@f1t&wjYE7g|gP_I`tlFBtxSVKl|Cs5`CA_vHm0jaOmNN|xxLXT?X@Cl6+;vXO{ z=46eWJdz^+bnH<%WlWJzUPzJCH&f)F^*@su(yJicS^CkxT=_Zo{#+JO=jZyT$-f?( zEa!ty$Y)<<$c3+;kgoL_$>LheP^_HfzW>qxgbaQ>u zQ8EMfY{Hq&#E6r8RC8ZHayI^Z39|%Q!SyQD*7ss05vd=D5yOd?vHA!_MN83o%D699 z^mhDvW2AIJte9rSigCF}HF30HcPoge?0T$}hhn7;zp4|la+tIl&~MR~T5@BglRjz2 z-3I36F|w9>HiMotHz2oxJwHbBkpDy;gWu6Fp3=!FbTc;oDe`c(h>QDM=qFxeS7wZu zh_|#>q&1B^m5ZFfzo?D284$5+VkLyTn!Z_dR3m4JBNQJ^ABh$Nex?$Ql=(DrkTlOy z4_)N-Fn!}W^7I#Q0YCi+{ey5t28|RqXh=gNr-`$)O(UjUkuv&0Ir-X49IEdh#;+(r z!Up}J0aNi)|Yw%Nj@iKbHc=|gs_%+v` z)Ji?q%#_<0O+BJt<(d=O>cU>teWdF}2EQl#$MiS!v&z3jCy!DV!&RN!<{r~^o&2k+ zON&k}aD5a%JMK#=>``oaNGFGV)cq6WZ5k+hej3|lEH%*=^f}b+CXu_`S4{a1(+_TN z?=YMv9zy|P^0B9B#LRAwNy@qRXWZja_hpE@pxS}@`YU8A z<(&kxVFt{HIMC0cOz?nvCXq+u0p>QOA)CI_#Q1~1d6fJuqFtD0XQ*q`N9ZjreJMd^ za{XoEZ#|PFro|%vle#Z&N|G4-t0#&yacu~{W4QkYUEiC8T_?#U@?Bh+B=_*2tC}fy)eq@^O`6^DjJxsD9HLE6iEJQjl_5zEmnVs`+xgtL0>5R* zt~15I`rd}z40&MKO?c>v*2qVsxdguo z%CU$0T1l@Sy#}yp33ifiY*`P*A)Pq!_rggC_A^c}PnRi; z=d@CsEpnFdra{_n4rxvxB#8rWArw&vZ?-Ry=pZPp(jsHOimPLtSA8~_GU2_q? zFz>&p6&?NeZTyVb$ven6+>;>qS)v4=qC65Up_ij2LR$Y11m90&p5y)?eR(NiST}@@ zAwN%&(3=naLW>AD1wXJYecFwz$)S|w=jncQh#S+Wf|^`uui4>j7GjAtbU04 zx-RkyHC~RA_Ym%Tst#ji58-w|Ib{q!$$9~OKYT_0)&2hqsm~?scKVHmJZwd$@9Ecw zEF@eSb=5%_wKkcHz5ur1Z-UdTi+)L(MQPOE0p{vX zgE3NuEFWR*$vsuf-G>K(`0f*CF7f?eWOWnqEGHgNc6^RBiiqbT@-mofSs#?didqu{ z-=W>FL`&Co@=iU!M7;kEUWLB{4Oq6v$~Eo__UNQFCsrcpuj79g8OwN{1P{=o@H5-l zGawIV#Kc)S}9d3@AB~^?_X@9`xV~d_Wks z5&FaA?Hl?{GrHjv_K&UG(2LO5agXtgZmf@#lGYa7s*a0EV%5@I~8c7s9DD{g%0Y`8WIxw14oiGzm@9irP2*2Y#U*=6+j@v~bVYD$Fbq54r=)w+R0%;aLj>aGwUl zc=|tb%>Og3v@!;ykS~>n8c&0-knXE7@-1RdZ>?woa&|frDr2K$JVi_l^a+qVb zYUBae4MS7Kken`!q+!TSm&1m1F)dH0{IuIn=7#KaF<8>&Nzy8A<2qpvpPwqTahqJ} z%uQTB3e;1v4|fTAdAi&sKDEyJ4P`Fp`UBi*P90DAVi~*j3AEQJ`*?KoV7izOOcgV2 zm&A4Zf^=!wI#pUo?<4Lv61E*_#{Z8=*G~M#eA;??x^!hul`hhJpX=8sPcrxSL9mmnaF{WL(%zd8n(ui);5%f;9veuWTx znYc1U=Fy&N{#9#!^J1MmgL@&g5T<1l@p48`hAo-b>%@WIE5ud2J3(3+M4IjRBfAm^ z$NcyO*IHcoLr|s3_+c1`m48+`*Z(_m71jT@NPD@+Ye;>FFtlUvIW@me72}9TK33sh z!u>q$iESH3X=mchQ*}ewd}MIkR5^T`F@g0^i1pA$;w{Gp4AC*N4t)n~fjv-6JLso# zjq>ZkfW8d>Dpe2U%fS5l_oQhUB|hrC5&t^yfPOmH>6d2OEr9zpoP!{X$I!D{@l4Ul zo7`(+Z+#j6i!g)w50$CD#J*!Fdd#jSt9rhom1|rtqfQLjj-UxnwjjUcAD}P3~ zvQx3riu-NWEO)t9PyS6;V`V>iHZXtdA(L=L_c;IHJV331^_(H7zEB#Dkwe^Tq%T$x z{%z*`ileL_4lv&}FwRnURhw8l?2eTO#2wT#&OD)&PS!im5~qoKW|IFxq=|U+mDCq) z9HeiBzK)Vuu2tf1UK1-d$RylmU#z4cQ_05f$X*=sBTKB@h$8)H&Vq2aZl!(_upjhK z8P=w;J#nVypJFYkGN|ssp9cF$BasCp-{=3#_WIEYU z=Mm~0=)_Go5*7CKDdZ;g?XU5uCb%>z_@@Ee`PA{W5tq}5d`GMjsy z$m6gCJqvyT%i$T=N7!can}Mt+|1+j@&X%K-HC#(aPlX-RsEgM`j%-hmcSS45Xy*ot zMm{KKoQC(0YUC&~IE{H%?f;)ezKo>OLY&10`#DEb32E4qCM3xo;_Ks{l4z0r=v%;F zLf@FhSwKdTx@P^IXm#MH+Iu^T+j`M?TkTC zOkeC#`)USbBmF=hCf%VVIY=DaAowZ$gpOOnm{^j`oIx3vLGbIz66&5TALVM~dCLFW zY%TlcB>6B$BTd|2M4RSQM_F(Oo7;oDkNh?AF0z~JH3{?&^ao%Zh?kPvT*3m#ZOx&dV48=Qq9s~;obO7=>IMArX_QhS&_QJyGu&oQS^4{bfd;ShFy zi!~GB*Ae^%vSd?|6qj=*L8BU0CbF+dlnDM7_C6(Bsf%wOl^HBKdOzXJ4x4AaMgRPUv7}QYzogE# zacv8K3;uzbX^|StEUywt5 zrK|(YT-Rg6hEeKS#Y;X7Ysf1$ANrCyKw1gY0uBf<{~lpnsUf})`3%NWSH0Xz8pVts zH5PO`aSRYg1G0@dgRn1=o@y)o8F1lmU_8OUcvlqXaI|wbP7N!2`i#@!Vuh zuKWdm(hTujDa4sV=tbh+N!qWmzU2C=*xHe!j4dB85k2;97^E+d*U+nkdy{Z);b*5W zL&DN#Yy;=Y~rjH`GD|L%2+vTl)ck7#_OC>p_g7|f2yw2ma6}n)%7%yb@*A8PW^6F z*K@eO`(gSuD`|JaD%)DdwK~F$*&p#X{&4?V{9Aswe}lUJI`wy(zDs-#<*)J&o4K|^ zUEj@h>QwcWVv9y5sr*x3m3|6(9LywK@KfsJORXHI%okL;i^&&ULLY{Ua9R0N234MG z=(AP)r*IF@CTgF?I!KRhz^{aRi_b8Q5x+~-qw-gIBi;G<>1Z#Lx*tE)r{i$f9Hsod z%&DqPw-YuKdm>CtE@#Bbk8q*a>E~}m%Mr~|spDE9gjj=CsBoa#sUBGjOGsy~LiuMr z%v10?%J~U4i0$=|F9Y+sz7qE*)DiYvvYv2j)VhWK%GmPb`GS5|mKb0i@vVi>qNNgi zaj6*2FO`~xED5eAtg1uC_6=M=(wim!O_jTe`=AKQDwoP;uBkb0Y#f_TS_8|Zp0eq0 zQ4hB{kFkiX#cgb(tgNL~**4%VyO$*;x0lIg^mR~jY8mS?u1|n()jU8u(-%Vh%=4V} z2d}949(&n|#pL*sumnqD^oUUh%Q1w?7LQ+RXYL20w={Es$y=wmo;uy<6`->vVrOpJ$uFpcx zr+%?zH6Ez@YY3~xbk(=2(Cc77>5e_4`6`ogFy4I4J@LfP9Hq`Tis&oFSC>f=VN)O- z7IM!l@^}!rLb=KJD&(Kwr>C5T<&+io9 z&tAgRe$c*tp;HFz>1#8)-i;{;C~MBFiXy(RuD2Buof0(5VAu zuV=aLfdHI_nlAc3ahyXRv$NzM!p|Xmrbwa6uj(hnr{=$61Nvu~vSb?eJ5AXyVIfW9F53w&OwG%e8i#Vm9cW{=9!jCll!Vk^BwwH&Cn8gR}&?*!68$*RH5e`3A?9lGuEvmF=IR#odN@JdUw|~q7E2s)$U)q}GObKR4_(Qk zuhZwTn}5Kcu~feb|6QRN+@feJn>7fa<^XNHxASAxp#9Fy+ry*T_eUlGtb&ZB{mme#Q8+ zLHQw7KJDlwbJ&x7NPb=;KLg~4@)v1XW3XlkzDiz#TKS6~;tR%;pK;jgMB+Ne^-k{V zX8idC`7u)euv|{Ut7j9z_ZcT%z<(P4DvfwGZor!mMY%SKoEpmy_4bKIUO+z!=hgj> za{oAN^i%S$+8sZZn|ig@ z<+*6tiL9Uxmw}o`$}*_0?cDb|<$8nko<_1S{Tsr)3u+$Pjtp|v&TB2CO@0`QO1V$% z(-QG3H9jr>LR;%O7bqQ#mEWjv%td9`%tz??p!9a-znb{5k-tLkgu0KT^w?#|K%_u2TZSe*1GJl%; zqr|Pky`q&*k#E4if_+?+e2%n!8^!Y)@)pgrOWdBX*|ScG64#UXvyXEkD-Mu;0ngUo zh>}wJn*SsA8~gA}<2fMr^gU0zvsdW(o<7n_T-4(~;jf-enbaC^kyi3>2cxu7L7u9( zz8`nhWb(;4@oV%-_U>iG`99CFk3OZ7TKqmxw)DJ~`XK%v>hFCO&kyT{mR;7+alFWiH~eS~s@lLI%qFH_EAT@kK@`En^xkHrQfj!x=H!Ewel!#2k~{hLz^rm zJnc0BzdF*q@&)6FQ77*aM?m7GVYkS!tt7|wF8Y)61+CN)eu|2d@$d@u;-ufJ`SnTs ze!Tt^`ZRa~RGOuSNgo?hY1Lx)?_qOKbFKSp>XkTEeix~*pKGQ2Tk@px@}X9q`N8if z_j|a;`J61p?^$@8e&~4yd!s&|!@U}uTz~!txfOYgI%|Lp=zk6B8TwA-UidW_pm7cT z8CeCd!F$jEmtg?Dfq#dOpqRCg;hsj0qxZtca2oy-2%)BNIgJKkCX^Lp;hj@+M=fM9IcK*K5 z6IriM=KKP;Q-uRB=sTHv%9xMPi}l8+jhCA7Vb8!}^4A$WPxoi~l(= zVN0dQqQuGEdja=F@Kav@lxT6YCU6bv7-gg4R=hmqI7%zZZIaXWMn#xWW9N2yx-myc`PN!Oae{m)Mp|LVu(2I2jj z0C!(`Tvw48WcT6yl*7EYJ;HR)p2Oc2}1B(d8kNr&c9aokIh)_F0~2JP7~VqF;{Hl&?B zdq-uAI7&Fr*JJ08>0}w{tbkRp7S_QA=;0i{cVDdZ6~u}c*^k@Dxq_c(0|Dr4i{bfP zj2u4@D^BR<%-sd<+*tAC#L8*v*m)vGHWJrDaHqw{X5jUqkVS5JJ6UztHemnL?Fe#XuKbcT682*<$* z-Qc<<;vQtKf#LXA2~UHk8Ntn=+?8i2}yRnG_G18kKBR$Bm{j;6%z`~gU@mE0&)ImKs2-krP*}2aKRs;5Mls;b_ zBdy33!kC~D%%{@CjIN*AiXA%89T`)_iEbsF1DxOi_vln{ zffwC7I92+1Um$>f5(2x@#lJOOe8~P2Q>FFxRB3~Du!3y?@$*c-1L>GERZerySvUt5 z;38at%Pv8B*^Z zq{}tpxd9Ql1I@m4vA|t)>ruu|;wmj;PJbR-qud?S$)lw z`;%Z2q(Ca9!wl%5FZJ$z*vGt#3H`WzxK$qu(7!t^j6Zh9AEeXA_=D~ues?W>FPHIS zAWdcx&uo|r^PxG7H4p8-5Z$^}-XYF~kuts&Xis2l zie`)g`*_AI#zP0=;3~qbg)#e^!24(D8(<@BhMtLxg_Fr6W2YDTM_3ndhT%sBuCxBg zW-QHNOwDC%MRwzM&11|3514tEVGHqWgB_3u&E>ITspS1jbSv*Kw&UK5z8@UK+kx9| zVEuqRh@Uge`T|)D?yJ00h^zqDZSv(KU({t4?iv`Ix3SIsTI!JTzcr2WXFOb^Sdli$ zZKoU^$s;L8o>-LP}Iwsyo+;pWX~=7UkUxMlKzM6Z@~VMe%t|MC-HS9P=4yw zd6oW0yStEX@C;!8mT0jOrvseeh32McvACiofNnh#E$#QBrId&LrYYFebZlcWHZl)e zIUg-2)qS_MavFIS+&pJLhr9r;FzapRaPI+~T*Q3|{PT1Y$Ykx0P5PiU1N&P*AI-%6 zkhVF*UCsOt4!F!c!*C65fQ|Kg1bGMULeEhh>o08FLOQ#NcOUU$`+j5qcjs-Lblv0K zh7*hd(A~w@;Nu*?#s0HRC-;ft0qB-t0}uyEFbN{9kBgrdv%^PtcMtdQ6z1Cm`fon{ z7gF&vkLV;FIRlLBr)MLZu{X zALaX$kK<(_`Vv?MD_|9@g>|q2Ho|6zyqPNg?~-H-`Zm}BPTD*V={idLqVG}u*!Esz zAAP}_P5;QDf8^3XkbyO{KL@chxxOC`LLoHcXUR#FVsxt^k-bBrn9wW0QIaSfM-yqI zM6n^qzH=H+ePGj7T#r09UTTnaP!GdP6QmItekoqe$WqRVO#Pfmqg$bsc-n}k{Y;`* zIY_l}Kx#ih8sHd76eG_q9E5X%2fPq~lW-b(P9#Y$^nn-p!3TZ_j3h}X?;UilOqAms zC_9nexrySUd~WbS>w41PMEZoYHjqB$v!iz)9lk_4OB(0k0$hYka2bZ-8uXC=Uh>~( z=Us@E)Gu}GL;69j&pS_%eu(suPRiDebh${MxIIW`bfVlKo(SB5yWmbB4BSWejwfzS zqC7y?Jxkg16UC3)hkyUXL@8pAq1tflIp}uYeWR`72$uvF^!AmME1UN&R;x1!kq+FB z1(Y*~a^l~bi~Vh-k8a2Q=%=>b^vy2%C^*2#`RpX(N`X{JhZ!&vWS6{TJgd%+$+Fe zO}x;r(TWdN;kOpd?19%IEzH>)kQ<@-fL1mmw}6#$wu3#JGJ=CVc0lU@6@DPjCAXZ&rzzG`XzF4`ab2iSi=C;g%8D(kN> z>#uu^zt6*b_mx%XMRv~ig?asEwAA3PgL-I$$Sa9r zMz(+zN?tAy2hs_{uM#&h+)bX4kq;9kfb=lV8NXx9Mn4T_!O5JnA9)Tgz(sK5=VI=? zgzjx(4r6}3j6Mtj_6&YIa~Jfxc&~mp<1z2I{n-EfFYDwQ;Yz=wzrDuyTj&wE19#y* z3_lko50K%*?B6XhqFc_`3vrMH&G=b}dlI^pxlrv_QqWVuL0&rMFh_!oJf`F4Stn$&|0j+%=E!!WmGz9Rfw(J4ql@_$ zmT=E9SOKeGZ2a$Hyo0s4d#ImY>SAo3C(r%pKaH>1*#CLvUpPL%{L8g&!nk@_f3&gw zpqyqO?~4-81~3{qCw+(Sff6WxGUp)pS&{8fI*$6}d9R~@yycU}1LRc`BU=dLw9`+K zJHXw>9Dv*dF8up;(+BoxWH0Xh;Ll}TT0lSDPCw0I{ekU|*+0?>Hn1}WbTnWiwd`TJ zrx1$41Qk#PHBbjVgy|)0UlVo;{rT8G_#ptDx3K>a>>r$`uzzgL<p2W|8^DQ}x zEL~l|nH*~q^b2qiE>rMU6S~0#Zty^OQHzYp5p+fPVFMK$Tey+~t3K^FRStk^-PAHVttA)}w zQYh`Cg7Riv`YC=sXdNn``~{T1fb*9E zv8B=fHT3_*^ncK;pszq2B*EA@9&&6SHq@ zV4Z<(K{n6fYy*82tc7+v`LHlP_(&Jo0cw0)$F&Wx5jMjX7!K-W8&aL4?m$M^H{~Js zz+Tu72cZzcuNTTjWq(oF2<{5y$9|&|N4u^-VdJ zc2FA0+i!Z`l%r{d@_x?iQs4BJd~okADcbs)6qoFh3&ed9N+<3U6Bv1@SyH)^Z+G92 zic^0tm$-Hr{&DD#RLv`p!qPA-rd->+quYms-x%X`#K-Fn`y%RL~q;}6IO z`O#AEis@8OL|KeBaVGUYdR5rOlEo ztvsu-Li=d4*n206t!ttR5zQ8xyuk!sky6!pbVt{rTqMc6APT-#xB?0KCo_%oKPP@8jXJnU!c5R?t zM`&ksS1oO`E0%LJ)=|&~9pC^PsP8LcNh1!DU=pN2Dx|{ZE2h_Ean^nY;&T; zPW^SPi5AE5Xqicv*)SL8!$MdB%U}ic>I}R2`<(j2Ex6A|iOUit>(ITSDCUqT*?_(g0=>lDMH&l8 z3pV3t9*L4I$ZgPE8zp(jJzxn(F@8lceld2nCP-;8UOGlN*BIqFM?TLvK<)p-&%_FA zYZ-ot_j$2R!w~H?!1G|@*bfJx5Q@PB6%aYbcrmi(!YuE|5k#ujV4I@NP<{*voGAozOaD(FVcZ~2luvs6&&CM z4|pK}JtORYN7+Noa_0W0>ZdR*A17^ zORunwiDG}6%lQDm+t9y&{4FMb=mEaVXw6~&pGz9G?33-p=^}0)X&@cAhl%4F+<*w& zfxB=Y9>7oMzu2=E`gc*s;2)xn2U!0NP<{*LCoX3b0o~6x~YZSh$N# z#vz+CDL*8kTep%o$~y@?1suD`8-DhEhf;t&g4WWplo_lJmXgoG+{)eQe7EJ<~a#fIjd-Kls280noEXSMLI< z?@-k9hX{4zAYCW;C-6MtQ=TiLhd-k|ktg9aoP~4HPTs6AcK-efKRw9a|Br?zJ@ zzb{~ZpT!=Pd=xR>8?G`o!A#<0?S|pH2awUVHLWS>+P@>eH}Q$Zy7ZUYzj?W<&dW4*8gcOLlnvF<2f9b#a9L$;ESHuBX@zN}zFw?oGP%D*p0 z_HfT$*bfJx5Q@PB70@$4`G+V!`SOyV{;QP#7Uf5(vy#pP%0H3vgA@Pm5z0?EH`0UK z+{GCR`!fSJs@}0(MSVB7QT`^L84$mf_}kCKNDaDwC;NZ$*fEbhB5kK)q#l3gZQeE7 z`S5(eoy&RZ(j;T&9mi*O0d%&C`=mO-ryBd02XRDm9Y6ohG`Yk5ci}#az1KY+dp*kYRouN{`rs_~9>~7y z*fvtF0o2%{#+a^b_8{P7Z*qJNbAJYFfW^%H6IlbGw_>ktDS4n%6JS21emqy(r z@Vj)Yus2A8Nst1mkPb7Thjn)Ec(>{(pq=pE=>D{XcEfy%O8ZX8(`$ z&=#d1rty9U&tQow!aI|*k#k|ba(@yd#@D7unC}x7;vVkgJPN7y2g{WEIr`LO_9^JA zU@er~e2Tf1G*}O;!|gpq-bVQb0(~O{&XBhgX_`=l|1r{tx}^Yt-Jx z&p}P#1m~~${6+vA2j@zj|1anHe>TtmGkN~c?+G}0#~?sFC*d@l1vh>!IEU_a@%{o_ zK)(n9p6&Zfri!n6s`Oj<-GEt~zwe?Rm|v9*H=|!7+-0z!|M*_RFmB@&`W5dyI6~B2 zm^vKb{Ri5h?JDo++~WKhX``*|$PU~Nr2j*HAB28>jr(sv=>+Wj4Sw4J{SNdLsPiZG zc;JP8@KJXEZORXw4fMa2oIla$oXGB6`UcXC+k?DI9QWY?=vK21fe7zlMs7}(IP~y0 zX)=5Tn|^w#gpC?W!fz6!Kq@$~p(C7$rK8V)nV{@S^}pHZUhJ!nJ~|hDJ_K<42RS3a z-ujUX@mm7RzXe;Ww58H^Ie z+J-ebdg;eeVtRr7IQkCAgFUbp_Cu>3`}bl0E)_rZpG^IyQU6F~|LXb2LBbV6F_@qN zs-OmX2-`ac8_U4{Ll14rH$=N(Ujgi_vw-@~q5gBJe`I$y^`A-oFQERpFZ|e4sUx1@ zMN_37*$8H60V_Db3I122#Dg^KW1fJ>mGrSaofp3VjIBT3;%pyXodKRj4u8mZC&&wM z5hC4ukA}Pq!*C5;(Y$+yJPmI28^{PaEzxoZ>6^@)Hj#N?D{~t1E`ImHJDxdjI`h|P zv^>DATSMQ0IB3pg{(>ZQ>w4y|1Dxffr@+|!wTAg?IrA5?mFsPr2t)irV34{Ma{Km?u;rzV}yW>13K)L<6eMnU=O9*4O^V?3y z70_(q9Wvxvu;Aaml5%Bp#*cdgbl_Ic^=&zn6WL1H+qP5w-GoE_*Yd0Pe>QU8X4nGT zU3a+W489cs_-!gJ$|oJ+cujcJ}r8*cF&@ z`=4apzLL3p9(KGPJI=;_2e4nY-wks{MHuz|j{~XR|8XMKSmZ&Dy??Ni^*>{c7k2o`rsn^B?zGV!3X`&x*Yo_AMNBzy(?o|L%s@F zdr(eCuUda3$Yt&uhHGGaEnaRQBX9?Lu;*Uvx)0R5e*M_HFPZwENc}_S8O}f9IAfg? zy3t+WCY)y<_Wve(|M%I4@%utyeqZ1&ao&dqp!*AK3gRFMCc#b0AK{yf$k*5gZZ-F+ zG0sh$04 z2hzdbJB4^=!fcoe^I;(@fo0IMkLT|NJby=e`8|RDQ|zxp?5~l5QTG4D*~MP^I5<2IdhL`S49AZlssE)w@0|^p85inI|VoJ+e{h&;rf( zc>nh{>j`u#Hs1~obSF5rQnmw>F`u#`J@`ewr0tLaI0>iWES!T2a1k!SWf)d`oha9k zVf9fO`{tka&(E^n$3Fshz^~STYW{tW_iS)K0Nq;pG{iv?M4r&fBxDMtLORTVnJ^pF z{5uyp9~Qze-z6ONHllBapWgqw$69G3 z_HV%cv6=P;?7s`!7{Eq`unnXm#P}5Ad|*7|6X)SuxPKeufuC>Q_8@y`+g|9~McY6> zb?O5@dVukuleI-xCH7x}{qLszx%Sihe}pNWM*WOq3?{Dqa1aWi7?ho>@`qpM9Rb|K z)OQ843TmJZ%sD(mMK&rOT43z`%{0n`ZXHkA7E=~; z9XcAAr)pzl2lsfu3jsI@r{OG|gPsxgzxP-RRI+C*VNT>eAJPv27yI8^?0*S!e3ZQk zvKzPSJo{hpfb%x{U)IeA7vmUQAWrvn!i0%~Z{xi4h^8&w=@4Pp!|yoXQ2FG13PqpgLv<7 z&t13=4?y=k{TJdO33@2+*zwTjff`&QXw7O z8p@CV40LZY<%gN*vmrnm`SU4%H06i6_?feLhYWca`)XdvUJ-o>SnzMBtZEIm4EG8c zJJ-pioXaUE7}z7LZ+whpd^5|QxwLbsnD`#Z&v!s;3BL|Dz(&{%TVNaPfIJxfhBFdm z_-Xd4vQ+k>?}vj>2*ptHOeXJ6vwna`Cui2k8mNPMXaqC(KUpd*$f5>p4%FFPYXbJU zojzPZf8K|EZo)pHgEK$}bgDf*9B0q(gl=>fxN|xGV{GWz&G{c^k$vFhe6SyXb#_1Y z9^x#{6iLqkPVhqb6`e%B<$KKMcz%idBn&TAehVf1J-=sz`z)M;3sCy%GBNGsTmNUK z^ZqyM4E%n4zwi?JWf+D&))`*t2Osz$0N1#710rw-?!tX|0Fm$5f9zZ;;ibGIlTAFF z%fuq%APFXcIawzu$W%y&8PL2~Czg3U|3tSkhP8wL)hwBdTa9TQpx$w?L2Fu;v}I&T z`d%GD`|^7lWUC1$?AUxqQgRf<^M1?Te-1_98j9{u!y?v_L*s zzd(-Oep=p7m?w4H=gE5~o|f88bL2?&9H|LECGQT-lIl&fS1T(|;HvUWt;@rnzED{$nvt{v#=={v-KdYnIg0 z4yB(mkA10=dg2JboF&J~u_4KlcX$1%)ErnSjrd0%%aZr9mq=ajGM?Ekk=oopleUIT zX)VtbYj38s+cRmCOtCd(a)zGC^UX~0e;p-e?ri}pIKT-W@IvpVOzByl$+MSC>D!*k zvzJWPg_#ndj{L|jL#A{Rw-f*4<(bUAnbMt~DIVNzqk<=k>9=a@?witouiF zG6~&8J(a!|B`N65o3Q`&I!Q&hvd3wMbo3eE$Y2k~_+V#zux09`j&x)3p9!;JF3g98 zumoC}YueO2Qpr40!o0zJXGi|{ePHJ(^ZRo4*WiS1aIIl~&EDGsJ=58rLth5_^Tq7X zXR$v|W`CZ>{(O{sGMJl4X9cW+weaKnhwIS2?5EZHha1p0LV!KBA3vXqxL`AW=B>O3 zhuj9u=sS>kU?K1Ax#T;{yZpHKLI=8oy}j)=&!?fai}Lvh7b4sc~E3wj->clZ3beNCi=tjEu+ ziIGMnp_#Qz3(^V}{Kw7}M_B*kcEZ@XVm@oXTkHYYhlHObANUXR&9)aAfRk_<&cZpk z02kpBsCVDhH>eT5i!VCB|02L|7>u8?zNLTPKzAYC@NoUl{`0wb%E0_!fhMj& zpq9Qkk$R?m)w_&#m0t_(fM4s%#nP6)SR%xC2kydscmTR}^jU}l|1J7aHhtkdec~#8 zg)<)S3Hsdv)@1|qL+Znmz`Bcm=0ZO{k2O~&>(ce~TWDo%+y?Ci))!y{^5ayf8@aLQX@E+hWYonROIUDA}d@%C6iVKlTU>U4{RS@B~3nO3g{>&P!ti`Nz#pU zWwZZBdWhGV!QPyB_QOFa1UG)J1$=*i?kyqi$?Sj7DG>_E#rK6@KQe?3s~u z(2QP>Yy?X_<;ta8C-`3jxLcs(RFXJC?ElHH9oc%G{r@207PDUl+brw?I#*lM_y@p*}KwgCA>+Js- zi!PyCv32!Zp_kEz!7-1rZK90pDeG2^T*L1MMBomnbGN(5VfOO(k%1?Z<;1}2_``b48OtuzhM6meunoEaHqozmZ9rQzxABd{YJM+ zoZbt4XBc;(e~@t){2|8WQ=GrE2IxA#`d|%nJ#-V+mBF~on!p2Q#_3DMa~X!=8Z_7P z{T<`=4RouIGjtcf{f~YJ9L(PxtQ+hN%;9hszx(h2bnEG#pnmI%WuNFF4tEkv0^^6w zJ0G!MLLd7+;R@wHMfnFPKUl9){!z-WYy_08q!KnAX249C4Rc{WEQB8HxfdJnyZZ2K zuD|y8=Ko5@f9y>CU#8>WgzgdSe-!&idbrP-%QFPxSq3X$6{xd$*FN5PKv&sN-<)exC@~e{OpY@kX29vbx?YR@BVpjD8hI2^|%|s3@u;<2ROk4UI>58 zxe#*rDgH+l@-&=y2!_dk(ejBud6>MOK4sbvx<3<-_#&M*R@uM64$M=ss zj4eIv@p~7r_RdsiNvuCKtUnkl)HlEZ#sVkv<2BN`0TH+Z?&*xf_#0nH6EF6yzHPXV z{{skM+kX6f(BH-Hh-K5yAaaa7S}xz~d_#S_na0{DO?1DcKSCTN!6Z=6=2DQu)MY9% z9cI8xm<@AbJ}iU?>wM!)p8vm2AHhBR0%xAc6|f4{f_i4U4!Hp~!e&t4PFV2Yg8t+8 z+uP9ntyAQ`e!ty1kMGZ+9jsu><+=1qp7Ce%oEmwTG8cvTP5+T}(T}Ff4&up!vG;%Y z4Wgb?Q>B;Z*?maw5YNPsKHPp}U=!cpXY&1hG~eGR@cliqJB?@Syg%SZ_l)A#J5}}& z-(J`c2f@v~E}o4SqI-E>-gj@R6r-CUz%~CMX`Cl5WCea^;;BN`K(m48smOY;R8uY= z- z{|I>;rTobLTjY&&{Sbi8G|GRK^5gGBb`#EpbmR6QokRS_0r8xLb8rEE{QU&uON=Sg ziJNlzpXAvs*8;fxi%DZ1X(4~?f96`=MWFv#TvO#D;Vwfnx{3B1Mz>OycJgCCKpFEN zmaPLhc8XW(dz@XJ#}3f%a_v4CiNnwD ze}tI}AK)Gq-XZutU344h^Gl~nA+#o%UKcYz7r>w360tw65=wI}r( zm_Lx}Tk%!+)j%E8LnD}>1+37j;k|BX-^Y92?7eNw#dhZ8jyBc@(8*li#UAYVJ?8fj z=J%t_@8B+HemAfWL-s=7Zq{epS#uPy_CWe^2axL7ivB3?T#}Z0N7ae+fENPLnh-5* z1=L?Q?}#-qKU*at%2CeG)f zAKeE({yveKe@~K5>61Lqdy2I;y1Rim;4HegoO_A)9Qp+ajB+oG{r*8E&n56PPi75+ zybR5Q?BkHvz=Ga>k99?ewG8eEbo8>{1=|_QafHX;dC_R9|8UD$(4b#B$ay8ch|QwkqK zo}WKK9zha4%DXh#YcGNfS!w)y8`<}-${4bM6F7;}h<$oz$aA=WOK6qHWBdCGJvT%C zn_{j9{Td4Q^xrIz ze~`G!Ml>v1_LKPoEQaRAjt?W6d3_^OnG6;2S>v}Y>xnnOR7^+vJI4RtdT;#yPJQxw_01nNZka3yADzVhpQiu6pZbv=J(ET<<=9P*tH3r?V+VF&yyw1$taFZfG>nuE8tJKJ-VdUG;*o#BHK93QyPIS$|u5;%g`zn>(dJs;Qfi{k^*!Wpz7hXPI@b7N>YNuI_T zoI~r@p&{EdG+dzP=4&_1{y?}yzk*`BK{)n-b&-ilLqo6l=s&nl_M!Hknsx^LF4F4kCgGH}Nt^??McEeD;dyngIZb}=`Pzp=uXQ+j9~&Iv{JZ}Di!`lU z*!QuIhJo~|zqUTh7y5_6^imZ2|E2YN{y*WS?b;xtt@rMj5g3IsjKg?LKxO-Y(C%4x zY?jB#+#q>cSzcV9jb7;I{U`^Tl@Z6tjN@8f^nR9jKMUD^bFB$3oynMr=_toc^nHd+ z@uSB>?ECNeVyX2djS+g|@i5D8uf}?FvwV*^!t;>-vHJIU^L6Q2vUM(B2EFQY?)O{f zF!?TW%80Ieu8++3xW4noitl|a{PTdv!okZ!!y7Y(gaea?gzw!N8uq{Qq43=q9}4@< zKM}suJUr|j@R9KP)!|`Jc4YYWq>qK&(>@lyHT21_YuS_Gn&+NjKQE2vRyQspm!nm^ zypmjn++Ou4x)<^hpnyEq_-!5jsDELrdNQj|xz5_Ve&2+|Qv*T;Swq%7V|*ID8auEH zd$11&a0m$;K@w?Xkf z96CJgX#Pkj`0WJ#>CGp?uJc2}H{beD*g5)%@a<(o!=Cj+twU*@SHFMf%|YfOmW0=L z4$;3gD17fZXI(zo?C zAH&<>oHQ@s60V>Z*Ki$uxQW}S+gTFo_m+ePBx_1SV|PhNc{WXVN<#WZiT3wz!(GSS zNB>3Y0t`kehM|4q0P{ZvnEx>#4WCjAcaOu6qn&opG;0itMaCtoQZ75 zgR&>LF#5mmh3X&uHssk91-3w!Jkm|pU3UMowL$ckC0A>6@H?jVYJ+HFq&u|lck2I< zUtX18W=V4n=3xOAq1~|^j_o9KDQ%jpwoSJ-4q0&CquMad=lLg)@q3H-hvNg3M@YmHooBwt3u0N1wGH4buZWYb3W)+g=D<@es7Tkn$W`J}a| zV%S`fZDciepzq7p=OOoCUkv|?xg%uV+(jdm)L&8~5~!-3v`fW2$4~+^DT1 zl{aVaheN^%RDQ!fqJ90l%D;D&fA4z#?|T35djIdT|KAOTTkq=6zNdYDFC0S#EeqfE zjlQe=dsq4Qt~uepb?H?#svp^qNqXzrdm+2g`RTcZ_d+K!^fnasOMAC-Jb%wSyccrf z5~J^h0(kZjFPsrRho1D<5S$fYiM;1 zZH~*la6jCZ&Pn;}E_olVhaKa;{lDz~pOQ8P(g&mPgtVV=4s=a=w>T~>G5WrJBObh? zTIVTs+%VL<^KNKZ_HIa`@@vX`{l!f$y03-qbG`edk8)h^XWk8EWY5d@9?buH%)ZgW z<1qmfQTYk||DPxclj(h5csEQXrz5W2Q%>eTFCTwyK$uByl^?VGWV7fwb#!O9J}UY= z6khP%$ZOqXm%6=vv^+SR{r}X1eyTXOr%}F1tG}A$<^0$5{rcY`EXGnS$4ab1#~t6` z4S8>_JV73|JckRogp4}l3c39!Bh4T7eIoWPXYa5r=(!qg z6UX$?Z=!I}cYtp3T`BE!;PfPiZ^&&#R}*|2sY~Y|&qS zTlg-D{=eUP_V2K-Z>ihW`#G{pxO@JCx;^&G9$msdAIm;Hnf1%pxBZ2mxp`2+OmlaK5F45N=g+W+F(11a%wErL8eZ_J4&pQX={J*LGGacoaiCLI~ zd02qKH-NbQ=my7ZLS>2k5{86c^+(^&tV{PH^@#YsZ}DenKX0S={2P0c+<{%#gMB!F z#PJV=Lu3t^AdjGyo+Q&q9+tmTzWW;aTfLl?_v0EG|Ed3v{hIOrHsnyi37o`f{Qvd; z73cq)cWrny|3}-s*#Gm4{N^0za0yq?i)*-!KD2v|9r&a6wf1)5s`kHje)AITe=;Mz zmMQY)H0^(SoA~KpdQRT&Jgd)vJ_?0y@4neL(BWG+ZohKrmWdyS z@tA;#n2f2Y8&s+dTFU-ppQDkULQ_4P5JxYvuis%`-(gGNVqfoNU$fm?(UvU@?dL;TL#{pI|mW!Sluiu*H{(UyhYn zh1#c`e=Pf-o^&k@-Ryt*2Bdr5JHK-`@^5SsSAlJ)#t!Vl9_+&b96|y|Q0xys?|on9 z?-Q&Bho(vDJT%VNmPOq({d@Qi{h$7G z8+UOZ{THhzFc_uy5B;CM`vU$m|K~7ijKCT6OnOc}k7EaB(dVErTK>bs`5g1a_58KDE@ba# zeW&DNEX8uH#47aiORgc;VFN0EZLNZz`xfi@@(_J`slC3V{k|h?a!dtkl?B_#YQ#Aq zuin$=PHrK$-cnbJ-+^7&gMB!Fs&@J5Ti(x5{{NBu|Eu}`)tim#%@o;$H2eMNGyMN= z^8afe{L%b6=VRuBoXK4WzS2Wo*V1G{-2?@ zq42aaQCv6KHAR}@66*W{c>=ZcljLdC>~`HdA6)kt;d3Znw|i+^>psZ3;qu>B&kad5 zBK4Z`zu9xe1^>B(E9k{FTt^>nqFuYYgWt82%<;!`k=??1vLKHiJ*@msDgSGf{~gMI z@9KB!Z`)5q8m}t%;+W4ZxGQ`g{l6kl#`a(12f{#l^<(n>H?0LuZ<7y7$zjOMR<@I) zP=;}66_>?$dhTi8-&4L_`b5MrD|z2zH{x9N{J-}Plg0P_Mq7uRj`A4R2AE04x@H#H zYy8ig7?v;Q#jyVV1>_dns8_}`Oi@nERxZ4t zTtE{&t^HoyA90ZUHq!SmopsoRxVB`~lTT`Ydj2;&|688_PS1a@=fBnSN1@yE9~5l> zZGXpQdX#_2qV=ls@1i~7q_r)khm7)S2bsOhf2KXKi@pc@(CJ>{T;?wIclR{U&2ufF zcBlC_em{ho?b<$M?2|u2CegIkcZ9~(zN=+1e`;qS?Y9it&{L`HO!j_4n=ytx^Aj=5 z#>oF-bT~;rjXpl)%HJAWV2r_;82-Y*aE`oyOSpoE-%&3;&WW!2obd=hellDa?!!&o z#$DV;oj&6F0p{PEN1r4c`>>zg`=^7i3`vy<>7RmHxWgNN3Z~JfnhmgP# zB#}l2ZOEa36F7}Ch;4?RpX;O6=imE_vJmIpN8jdF#D0-;j(Ir$?*jc2uAmovANOsM z*Q1V`QA+1Fc^CK5|9^81CDS_|mJb6I4#G{CY`fpY8pl^9Ao8 zUG#3`H!2@c9RFKy>_N9{%^G{a?#ZInZ*lzZOzF(R9L&Q4EW%>+{p*e_(p_# zjXhsZuEZ*=K^*_Pj@*DvsKCSVzuV}^XWSd#N;Q24(!<$ZNR4HOJtifaMAu{19#qe&;Dokb{~A7{oBL-#W7^= zv47dTS+;LnC%^5YxXZ?_ODBONNFx50y~X~f=bn-_s6o~NPc6NSUgd>Xzpj3MGup@MXL%g?ru@5qdwyNAMB)XWYxjtjXzr<1qof|7z_Ivd8>_$>db@eadk^e=1C;$F;aq z^B?&7(kpzA{p80%zS$R?Z=p8DFNcP5$IZknXdTG@*TTg}(Qp4B75x8E8O_MROUUVrh!)~Nh&_|99y z!nTD!w{QE;L$$P$sNOj`RPFv)*goxJ*1#MYc0BQPLE_VXVJZGOvP-!f}pu751-Jvb=5erLG8Fa3$Bp`qZn6F7;}ID?fUUM<{Kk`9J*E*8?aix zIawHO{KIJek>mU$$e`s-{*kx%N65B~#zdSi4W-fu=8B<5ynf_WHhe`o6QiZ}nY) zJi1zb^PL}iUj3&YYH@6KwEAzjF?d7uQA=+rGW~{zpH6J_f{?XFx{+++ovuUemFG!=;I)6vVo?jbxL8g&G8!A7pUzIH21Wuyw zm-eL~&meQl8iQo9|KXb@A^(+<5c@#0?)Q@T+-BFd-r8*RUKFNzH}1E4;e)a$F=aql zDgFYk;W}z}n^XD}{{lVPZR}mvT0r#MNZ--_dQ1Q7Uj47wqMX@^SJ~N_oz^mUOdLBG z_Yk^I@4rl5LFZhy`txk|C2aTS*zhP|u-{5?TfQ1bj=(6CVH}F>6Ln<6i|R2ns?Un; zlUR@QAsoH>-uOc@?gP;BHs1kSUoifVf1;h8-+@l#5XT;LyM{RSAoforj`M*y&jd`w zWYqFM)SR{c4?Q`^xt)JHy&P$EZWB^yR7bym{j-^V&wrQAj#>0Ms0!xO|Je9_`T{J% zVl2gS)TyuQS38IM`BVOfm5y13HCTrY*n|pfLpz(IgKbez_5VKRVbS zudzSSqFs_jtM9YT_nFa0UoD*-*o8f4RsUwOkN)ucIY2*zn5XlIbyyejY@#@(BH_0q zsFl}~WEwT}hDpM{xr}h-&$Julhq$hO<4E}s`G5Fq^b2o!xSwa7lbX_AZ+b@if-Kf| z_mtPi`HHnw3Nc->7ANRcU-f*yWDOwtX`DeXf89B<=ZE|^WZzfh1@cP#4cBlTeYlC< zZ|c_~dp=td?vnS>|9|)0qVnVX>trd0VFdbK8W={AWyn0Ejq>>!VH|w|CZbhb7L)0@ zQ}UmDKb1Zm1>wAKH`#TTUoo!#^CM$>%>Rq)|2!Jcbkvm?FVLFR(cmt!S5`>{{Zg>K{#$0yZoe>T*c)82sOo6jmA zpA9Lp>G-oDeec<@%752j9X4PSDzFXJXrEuM{wr7im8<{Cm4D^RzjEbYx$>_(9DU9F ze;hN1KZ6!}7Ofl0Lz`pTJnO`?XTuI@jYsWs=H!!mP&40k&30YbCwu@+^fVfgLb3mA zzV8E>-R0qsW3uL#w?1DU67;Iiu^aSlbqeQ}xW2irZ-(o8+Vzbs4|Q|h|3c?Ma)op^ zOZPSR-{CwsB8?={$e;~56mSCV%j7?FB8M(?BaZ@(zUBVUyMM&B4qB{5kga$Bo<$oH zp7%-VoW>cPL+zkvt-ttexIj-n;o5{R(XSxA+O@gP)LPft@7d5R?i#M64>xfecX1z& z{M7?O=)c@|)UJHOV0tNjum4M)Yn&qg$%9Q~T6maaMqm`mFb?A}0Ta>ww)QqUU(>!t zm$q*=^7I0ZF64h(!v98Q-q!biM|&Kt^tK*x%8<#@srsaP`X|=orx(}n>h2$=({s}5 z+-;08eJ0}GVR>|;YpZqt!$(3wIWo&{b5JYodE^4r&>PP4D_*w#kMLr|F*9j2zUe;6 z-Y-2HdVX4Nja|?3v(}DqOw3=)$(2}zm_OH$vH!F0tIvjY^iAmdeEEa*2l;zn-OM)^ zjPv@H`*_3nCu;{+IA$BFu>-rX2m5dUhtO_Kf(~@^?5xS zo>u=L_9@4;WsCj)!W$fuLjfmn5~pzn=TNkNce8)-$o^H&Kdj#$t^fNDTlXIOm&{() zJ~;oL{mWjzAe~FNf?l+CIHuW{d3sJ;q7&EX15p^sCURXx``>lOe4n_g+W%n2ab@=D3oCwTJJdKhBbfaxy$Y!6X+9BVC(16 zy^#Hn$>QQ3N>j<{h-)X8lQR+bP>N$Gl9(kt2TlFl7h>Bbh2s7<>Y?KLw|^r4&-0%J zh-)7#CYNG4R$>)?FaNV~yAH~m@?{={?0=sBx2)!WsF8nC%5QDbHPXyBD<80qUiq^R zgbie;baQ*9zf<~?q>ln(-rwZ+?O%T}RFK}<`~Q7qXgEzjgLAlmOSpnwT*Gzr;bxTV{mRCn=KuH(g?qIR?vnS> ze}!)mam@E%vJ}HG0OxW?Qlda?g;tT_SnaVYQ~=Z2bRPwk_iN{B5Ywx1Rq8Wxcd&*~?SOawJmv{K>c%LEMudonn(?k?WZyJ}Ist z+Rq){*nmxF_nbO*vj554LH0krJKj^4{m;L9^oIM#F=XC!|HsWOKr7mi zxbt|Zkj^$#V+U%VF#cfV<6##)zLN&*q3=U_fNMj_F>wvU;`cV(`xwn1>-R%Qpz?KX z5LA86zA!&G)*${D8MGmX0_t94|L&Im_iCqiXqTVYE@vkU|PUO&q?o;yT4fzvCcd~z;o0@mUrG)}jk2L(sQY@ee<&3m zhPdZnTDXx+X&Y?e2ip3MesTGwuig3uj_LVvwBht={F`-+h4a5G3FFAV$3hri{GH#0 z?ERW~9fAGyH^w0gXS%gz$ZU#zU!%=WpN?{LKBs+%E_t(iu6IKg?(qNnZ5HNW9u{B` z7NhTHuH&VW(DQG;L*d>ht#e7P#44;oViF$+S@{X$fXPj$ecJrru^$K(^oR4jx6yZ? zIPd3_exT#}fylbI)&Gclye82|Pa%#mi);0c@Sj)TdN5WXjv1&H-i4}hT1K+p(MoSaaeaZI z?00cFbcyT6!}WdlxRxW{{XX&l4j~cakVM@L;}_WT4eb2nW&QrA-Zy{ouya1`J{^}q z8**q}qW&H0`eRsK+ldqOlPIWP^XNwO^`91((2h7mo}UVKrflz4#%J}FXpiU=y9*J=DnVQXOfxg8PKy!JOi`|w_>I=W?>HIVF4ClF_z-}_S16VRak@S-*{#( zkJc_I3Hg6{(*8Yv7S@U1fK8~tHdJE=c3}_p;Q-<~h=<4ojvyK1{>6Qh8MMW)G;?H} zA6Ovc`hO>4Sh;>OhQIb_57u9e_3_j0@t$?R#XlTha)y2ml`oCtr8V={L#S$kICp7#A?KEZh~okGya}eAjS&_~MUVH9p`S;{(XdJ>vt&?Ayi%j5Iy~ z?N1wDFhx2uq(gQ+;eVrz0gy&PTK)e)+{qFUL$&ws^)U{Gwl& zBV6?bW#?zuh+WnK7hZsTi~PUS_@lk@KVn(ks6I-eiJpGb{BXxD#!@WDO02>ftV41A z`Q!3Ga_GYEt^e6oo@5Z0zR`X9`gS0l`(fa(C`KJ))%~y!uhH6y$ z7F##U|7-PK2{))~;u=%C=zCDyH*~vp#D47v>=T!8`~mV1YNeMTkDz9XHN;kr58HPB zmrzxw9`$`y?f=VA{no@#`NUs_?eu>f`{A(T=?{mb}vKK|kG#trrR z$3GeljV%lRe7r0i+&?P3@ysau!s$or9uf9W8xg+SJRMWTfWaum zFpR(`Y?-0GK1lhX-YXMM+|iGzU3Pd<|M2~{e2lKE1i_m^g`(vj%db_eRrB3cpHzV#*T&U5$SZM#ZCH8+KAC131>l;RK{o#!z zVX<_UVmVf#wYx+gO-Wcq&*_is)E~Kqz7B=q+8)!iK_+R7Y%K{J#3g2wgiT}xYVVYU zZDciS=ndyfv^z?|4&hyB+H73oeC?1o&9hr?{Nan*CurEHeR5H|;FflQdn&F!cvbm+ z%KJmKxAypd)z|eWD@P8{521at`yb%`hq`~VYqa|(;~0SgdGw(6*Lv+Q$7RTt`Pw#1 z-2X!NKUbVNehKLuK@w@SZZ<~`8G5eS+`bpg)r&d``}wx^n&W59|E_v-{KO@)CE)~l z61CF?gwy01)X*D7d#|?!gmc0d&~(;&z3e^fcSyx|-=PmqJ0N*VTb)cP6B_Yw{f|rj za|OM)hU@6VP25It{68B%`W}nxe;j<^gDBbw;+hxggGBT#97FUyw)pD)*E|5F}9^gU!d*bMaObBJ^Ms{Voh;QRWqr?K-V@!xNE9t?9#Vxu*s$Wf@hWvxwe z9HI{*`W})PFFXOoeFZkV$K%>Z*rMIGRr~E#vhQE{8ng>~e_axK9*gUKl!U&&lJ`Ge zV%@4CVWR&}Mo(qGFqQ26seUQ49F;#B8ZxKY6ZEY5BhCSsMW2It=v0^FQtJN>^*{3T zLJj+r{onjN`}8^XDO%{+x$M)m>{GOFW#6I`Idq{Lc@!{E+6%Ani`L6<4#@_wq~G>m zaj9FiG`vd`Ek}{XAb3=YJ3Nd{2447d_we zo^P|~i`IJ2*Z&g7okuzuv>}IDaW#%D(3Ae(;QuG+ClRkH?#-C$=UV-CTHG0&!vz%E zBU5}&%Kb~iSI~<}-%K7=pVkifO$gV;^`Y*#Hu%~1{D0c@#r?0Q@R7+EY4WCHZsRWQ zqyH*(4F;nW?Y`{}`7h?Z9J+Qtu+5A6Z_zN=`@ZS$zrQOw>G~-++8|?0eszZ?LaK8|`&toyE^X@4wquh>YX^7n4h|94oO3 zRbSO^dCXn_^tcwtIx@TYLI3Io`X*GMQ(HTSt~y@)x9zN*B=Z?GL5`4r0>(p z2znbTKO@{|e9AQS1B(9Xdi8_4^U?nFhpnIUw)zE~r_?WRs$VXvUyiF^P?%xgJ!!q) zKFtXia01tSFLmmRdNj=DheqQI>Nh+bhgO$T|7F#G-ReKGQ8>j;Z_-{#v)NBd=QOJL z2;-h&XXxi}0hiFeTL1n4{(*k{1M223^>VD6^H1yF$I-3)3pl2Z&8*;GQ2%C^@h>2b z56Dbmn@gt`*Ki$0`~NBBF8!hXf0KS2u}zWpjJlg2*#CFMCDt12K=yyh`x|MEc5*Ok zmi^BDFBKk!xOZXP!?0-o-_rhnO8b9`_pDu>BpaXcUU&a){?7>i8HF;8!+5;f&1a4+ zXlL_xq_qEQwEtIY|F75n*AB~*1>rwA|EHVnn`HynTa#0I6EPW;Thwh&+FyfS^_!RV z$LZT{lwX8rVix9L9u{B`;`o!rZmH*=Sla=(`X8C)){jce3P?#ovPm#YD z%HK##;lC2U0h>^P+THy3PnmN_PrAp3Y_vV)52WuXn{LV5d*yNDe`F6)@q4fj?=Sxj z2p>WMRsZ6Cg0h01Mh0!jp@0)OiPJcPbBOx@RQ~ei5bf;?!o8pFA1;wsqK<3Gyl9>+ z*@v6Bjn-vsEb({g#rYRwjT@u)U+rA#hvNJn?>+k82Z|evQp9p#7+FKsrX2UBmqXPS z*X@Dm&5`<@g?ULS;!si1P<}|LxC0S&aL1Nf<}=y>4udZz1lH z7RRcL7dHVD<8Qx|zjwY8I`+Pzjq^&#)x5%|^@{T873I?_%BNSt(G{(TYn|AI<77IN~l}-N~mAIn-(I2pU!nb9!T(;t|6alWRl)yN5eiEx zeE${v?-l&-75wiN{O=X~?-l&-75wkw?o@=8(piNyScldp+~des!UlS7if4mO^a>OP zcsA&EOxIJd6!$JkIN!s4=xRNWZH~)-%UT{wes^Eh;&!0vInM>f=j7Nrdi{LI(UY6S zd)_I}yXlVidqaIKPgQ?Lo#4NFunz}t2nqb&`ZMf=KN|nTuh4vu{ZD4*vi}$QHkYWM z$hiJY=JMunL^?^NkwNP_n?n|D^xO;l1M}n9HuoOcuZ-BO{Y%EZEKZ0^?B5(tlBZEy zzd4*C&!L9iFkIVywEb^|FQMtYw*6)8i~-vE)!o;u=JE$@}R4 ze`$YUFiH{U91SC@`KPLWSrSIj`~JZ=X0j~){!&Tk{fsfm^qw!76GcwIL`+5<-$y-v zeM61w>~>vbN;u9{rB&Gtqcn59Mkv9 ziqO~P`=$4MNV)KD`1e=X6BS`ojQip%p@QuF#4F*pXS$M}WGgnHIIfLMs~6*5F;&XU z>Q5@Cw_9(=aW!N$xdXNIxPQ+s`X21V0X*El=MX*Dt?hsW{Rm>8Kz^(GlkD27{(GAL z8x7O65AbOJ-6gJpeVX*2G%{#I4h5XRNwmMM{zvB>^*_4k-N-Ld|Eu30wQu^dbw_L8 zxVJ3ZIPP`WcG391X657i?2psZtgScx@9@BIhMv5qZGv<3${+eBPHCGQ*G3^5&pXdg zyhHbK!S9#QTCY7p_9CuBSezH4{IBBMtsdgLpvQS*aqe9=9^L;7?v-s`-2ZR2XP=e- z>-GQW1G*-y>*&Kx+{Rtp$A3Eh=%VlMu>iA-n!fOhXi}P{~CD;gHeiFwt0;{ zpkegn3VB2R9YG(3wEWT}F121>L7BL57>@~3W>3HOS;-fAXo{2fge^y^DSrypT zAF)0lJ&wm;L}q8O52jcfhrSerZ+=kzVk@9ayX8^;hIY?#zxRHlUsy>-|Jf>X4c1`; zHlYIBP>mgkYnksN_n>aB{I7g#K(gLFb+|V)(bK4WX=vEzxC1zZ1dbqy{5OV%_SN$L z0C`k7@W}oX&I=dFqZjpm;+W$yD?I^J3K z+Bq`Fp@0+kf7J&k$v(30Yx3t;J-?q12?=HLY4J51KM>B4=TN&=xl3L`oJ-Mw)E#Yz z8@{c#wIRqhzsqYpPxw~&4O0vnXAn$)k|m|}bGWrOOsPTyib>}KEM7&3Td z-!`*v(JrpzAp6*Vq8;9)U%nd;*XPR&dOX~Ap1Zh@{$H1erm&F*@Ga1D>-`sl>7^(v z&y2sH<^*?x}%H zc8I?${-)95 zN$x?OKd1L2@}T;>XN$c~#2rEc)&HWt|7=Of^-ie!CvXy{k$6qsA#3)V|3p7WuhcJkfo#~We0WFsa3|&=niI|M3Xr1I68{RKWr{_lde(2@&nJDzI4eu#K(S?EHW?>HI zVV`SUKrX^!)a_J1tKS+9s-Lc^pYCZRoK-&|jib%{2Oazev)R&Ud4YXx{9h~L{<@F$ z|Cy$KMsfe2r`4My)z66Q|1Wi(^%C1h{? zMQG{wiBS3V(czNci}QPj`+i2t7w-RxxL#DZ>)-$E=y09hhnu*KySR`3f2+-e!6?Nr zj6m-e^O4D(?;CSR_8B`kjvS8(n25=kis>jv+~02|*~|YwD~9=K=frTM{vYjt=o^pM z*Y~8pWAO`+Z!fb4u>Aiv|BG;(<6mX3fTi?S&pxX?vz#919Cl(QeH98FzRimde2`tY ze1A`m4%ghzBEPM{I@InT9cp%u4jbr6^8p*MiC%&9M*ZLVRZ~xl35`?CH5ZpzT^6dz z9cW!q7Iu+)kQLvle>I1F!UvFlO2556{jN#+@lkhJ|2!IyL?cpYLK=tsCxIhKB8?2% zkVAXQ_<*eQ=-bbcUBcZ9jSrabJahGLN~0OakU>k0@dfq97wG$M+pGWoY3t@|o5nGX zK9*3BW}G)4=h2^_Cs&LK4Jgk4`>ncjwfkS@{@1&IoODcP|Cn%^JcCx{z&Y{)vdv>c z=WOKyE(u>jeui@>L%ODU2g;GU=f{NlIK~3;T#cKhy9+dGwU_KaSP=R%*0AyVVVcwLj3d*Z83qpVWWxWavEp zWXPcl-N>VWqpx{>I5zglkTDOmW%QHE*C&;)Px22u8S0jKe`r7wjTgN?G@bJPj+ce| zuCf0*c?E+}ieVUmQ7FSWL>oUbe{>j6uh}>{)b9O2m_VP1$#^tB|AzYap8EHeGRL<+ z-ETd=R=<(GFZV0%#j%yYD~`vXDXzHwzz<8pEczVG!vZWq-#7I=&er}%!xHtoa;$Nq z^wHEU{j9df_XmWB-tVy>*m(cot-zg*m(kuRgL+qJphhwCp-jsKaiPnJA_+Su<)wxMP{ zzx_`AxB7c?!UZ%5r`5-eWQzU24Ij{=a3dwQtD(gIpWm!9e<8{L%bBKKA1H3gt(s--jX2 z|BL>=5%j7bvHP_@M$yYK4&yNa6H%Q1x0C<>Aph=O?Q-RF3Qh8C`ndf6edDYhHx<)S zj+vN+IcVSL`$Okj{Qqxi(_PlKBlC!3y^ktGnsIEl{EtWeCwg0q8*cv?=`6q^EJo`z z-_IoL_|S9Dt3R-uz7mCl{D0_17q-7_4wJfOmEYE&b~ArIxdAon<*Rtq>(`za_If4d`HIm_XHVz zfhWn+ID>PzfJ?Z7x)NkYq*X_{>ggzP`G`*=l_D|zfAr| z*GTyvkL>?HmH*eu|7b(+k7IwGzW%@GyYI5!v@|O}W-Y&VcKv7L{O8heSKNK{U+;d5 z{TWCOj`}yOkx3522*mNnqsWKjkIU%eFdp$8=9YM8WViM}o{Zy_oFpr))%zoGR zy@0+5g&OudJG;A?{Z7^mXMaD%PGtut$;KJ%Z?Z`^O&0CL_u4;;{eLNzqf%eYO0wNG zb)a(^{{p(`-N@h5&wtDOgC*>5=^T?@+-tB!`q{Di-|1~5^_$j|hE>w3`lM&^MeAqM zYsJ-=N577q>@kOae`(l2--Ps4^APTsJAcC*da^=X=52cwlGSM4ZI5d{iq5YQcPyX&i z4qb?QyTmmf3wX5u!XWpLmLBy#TG6I{N~DH{oOB8}fs?2`Z~c9C#A$l+p0OLZhK4ir zb4WiWZygrt1H-F^V!~Su&s@UX&tGr%<*yl&K0t_{}!KKm-uevFS5T; zx83?*d-d@l$v4obzb}r0dlw%sv>$~d-d;pKJ|FPxX zd-aKSf4SplVh-kE0s4ONcvwU(j{4WU)79qr(u?!|hYSeI>8;|Tk7*@6_l~|{tfH?$ z!E?zgZ@c*cy2xVv!@ex;yV|W@XQvkT*CEp*2ZmYxvkn`u2^H9eYV1HeJG(=D+c{MI zJ3#$ATK!ArQJBL2u8wWSF*bR|u`TSytnqKHXhY%|_OEpIU>^>kR$ScI?hrlhIn;mz z{Rq-~+5h$Ie|~^Q^8MvQGX9PXD!=3#`-(npdJa{;P!E66cSJvdlQ@kt=q)J;=g6Lx z0pS9930Gn~uHicRa1*zquAk{Hc^{c~tj)8*_kzJFMcnrw+hZLbdTysa0F0oILg6`i zQ+*M~V0V#qPigT!s@>9|<25ieG!aozQa(n=W9H<_$S*D|`$|#BVk=+BZz5hnnZ|obdsUnTgouoJD318$UzNL;iWse6D=CUcM(g`QCD? z`TPC02#c{47v#O=dT({U)z_Q-i911E6w>O zPv9i_^bO=+`fTV?&z~0V{hIZN$#b}XOURrxW|-{7HC#vR+llLc_tA6Rz75=@-$r4* zH1Tl%$hz(sq27GrhLO*PIM=*!(z78o|Jl%l^j!1q{pbDTd+rL~NB_U~&7u?Jm`i%? z*=Mc){cIRUPu_EF7(pL}^c2^IxJE%E%EV<3mWOfVc(m4(hY93FWK-p#^Tl%YTX~o) zJQaD@Rqg^RsMSZ>pg$RWyqEnJpboCfA6-9 ztlKVs?Ulcftnp22L#EFA#?Q*%*y9}gZ~%o*{6GB;kqI0@6737+uNUO&)$-R0`D?R$ zPUeLRtr9=-&X#jQ=2QdU73DR-rDM2qmb49n$7-N$o^WQ{3ZWz z{l~tK*yBw)#rY3E9T0BQE5EFMA^U#(Y`9PMko`A0_ouB%e%4vmAw%g-CIy{+BpS26fBChzK30nV>z6~qUH`j}{{KUnkHIL#FtqD?>p-V| zw;Ud|@9D8G?&x{>|FZl~W?oc3Z+y=_ptmhEZeALrP=>@UZE3P*lKwZ0r%%8{#4!lT z;jT}cBh~y~{ZBp|cU7E^?zpO7v75iDjQG%yFkN_qZ@ZkFiCLI~d02o&_|M9}#g18u z<{j&DJXXPn+236s)P|e4XIXE;-6>dWg1+=pNve}{GM2x#*?ZO-Oc%z?=zmH^F z>sRh}Oc&0GOBmmOj=X@{-9y7A@(ODFuR%J=ZhO25Uqcg{HqEwebX*E`^!jD&UnJ3p zNBi5+ulr9QZsIoX;y(Is@-3p>T6P`tjq^tiU2BamKpq7gJ^lyl?+ItoiZ*24E)9dF zli$)Wl#)lJ-P+?A@xz331K98wMIV6zJ&$gFi!K!R-+j*e_#68e=!>lSmGSlertinG z<1ikTU)M)VPDIbo^!Jm!U+~C868?GP2gAV^KN#LP`&c;8{NKX&*8jJ# z|LkYNcYFS8*thz>hVQKSuloAR!|ThQ4SUu;8@@eddf2`8zlU#a{O@7QUghCI<>6ud z|IX3(dF|zIl!jgO-Y4yQ=UMKd_x#NF`xWJ-{PY9Q`|#z*!}mM*a_akseSWX}QAub$ z%wO;;XSvJGV~OpE@<{xi~1~e>^fI z{r;V1Q~3 zXnt;lKCBTTQ-8hRv3|ekm+kk2S;tol!P$G(?gq0t(j z>5C&m(|}>&@C{=W9G5sXF#O;+zeK-}g&(dM7>=0dSbJxb@n^>8FX883$i7?6zT3#J zk0yG0nP=(0SI~=VDB9c2-WRS5x4$+ZbfEK~J_7v&T^;6zA_;ONx>q4^#2@5szO z`@hk%Y|Ga3=9G0?H;&#XotvmM$M7~;dq?}@#(;2_o^*W;W38+7UG@QAczUF36PH5c zqygc+V=~Lw;4do=&^p$dxMV4^>d;QUgWS%6VVLj;@~< zh{>3W;{FQSJq^!j|IF6@nXCQd{WLYR|1sS$<(P?Cn1gv(fJJCuXnuw<5}jmjtu~3a zL-!5!4+=P%V*l@C|7+)D4jTVR&pNJE+o4T6B2iQFd*dG$OSATBNvL_VBrK&TXS=5t z`IYFakj}biaVa$FH>&!o@fDBRr_OJg?R@`a+(SI>9ln9S2^IK{+6Rpbl_N{EHRUyU7z>)-Dp_h28oV*8?==Y?zq3_>4W3qq~IEln-%8+LswCQ5sz-e){JI!Cf z8G3Snd%-#SC8X(1c5cc>1WZKjTK(Vp z`6ttpd&h)^*T#gY^yx@X(YK4#_A#LmTUNif^#5Pg|KFqkA9)mT)O`Ks`D4N{WYBU$|36yMhC2QE^*f#4y(HtB24qUO zsl)lRW5OoqsK7Qzh7`D@06Tzsb5cJbL3*LTq-JbKpoaSR!>NHg0# zCbW`m{+Ag#CLEB?AtZ1Ft>Uss(sRS5jWj)jg7=bNJBE*SOz6U^o_}Jt_ps1=;DfKF z=igB0JGO@2Fkd>5e?rw4%`2KBEzh;Wd9Pl5-mpUZdb4`{#n`^~jUtUozRrTQ zPT(X?;|$K>0{$?+b)A3xVkyF9w(p)TAMmte2oChwwly|sYI{;#CTkXvhl3lCu`{Z+Eq|M6?{;DyUE6SL6w z)iGfXIS&i4{a3&4w}@Pf-mmEsAY(bUoQ&&=tRz=qrMzDGTk{p^>k!w!8BcD&CR8BS zp;__U=(&1z%V9P$eFqBakUY9;)HUjso|b`Om-yblFA00dE$+KGo^!Z$l7)-;|I5l* z`XMB61bstFLNbP>pC&VCLk@8b{~GZHdR&vR!S$S=pG0~z`xmLF_#d8OJBy3s4$qM1 z(7JhGxIkV)R)0e$`#+BRyCU3+xOY%NU0WOn$j4AOga3W2bEqGad!^YSZ9E+Rcg=sU zqYpQ68^!gnwSTsBSbN4Z=;-#0vg(FEmH#iRe?6;Xj?1{O7Q}f0t$Ye?&X*W!O*d)Y zM}O0Z;<{50$N$BAoLu(6{;zsWIm%xU*CdR47^KwSaV_M*{*&%HV_t(GH)?P1f zYTL#=!U}wV|JnN2v!pQx^N_e_E(}@owr4QfUMuv)Sc-;e$|vQ*!*zq3Ho87}eYxLO zVihWl9oRC`_cuuUbQ)i*xOLcs3gmw_By1zAQT1i}8Vu3@cwGK}OF8kT^pypTr`*5m zD2^}koOU>N7xrKu4&V?H$bWiBXn#}wXJb7Y|9!)AMREN1e&xS!qIs_TPiE8&Eo4?W zuF=-jvTz80T(YiQU&Uc*h?M(aKH|M8*WEO{>x0!Z~9*S*Q(FI@~3=`j?Eea z(0P_$0L60jr?FgSZ>``bVEcD(wEmy|{Q7tF@$1V^qLD3~LKD(hES;rTj+I!2HCTrY zXlI9Z^msntoi>a zyZ;y~uX@q{Ys(<^QluP;v3Y&z&57~V3;rRb@`V7oM?_Z4lH~xPPJ*^C%OAe4f z=aUPO9i)tT%z6d%T(`FIr_~4O%TPQm4_sCMM{FxUE`3vkufR&I!fLF=I@GaO_3Tvx zo0#N}Hg>Z;*V&-U?DjGKJ=@nhQ~NrAR8I@m~LbJzrVB!TbCF_xgQ5 zsuA~(DBvJHiBkW=+u9H4X|y4Sx_$4ZkBv@BOQW=w<~N*`zLV-8&N+(XIDu0*gLAlm zc4c};%xB0U?x~uWA0PSO@%e0f&;MSo{9o_gka3Jg8=0P<4PQJxxPoiQ&QnJiWBmbo z&U5Hw=Wo()qj*5v9sZ*=+HiFTVb$scePq8kJ-cDXDv|?HT7O`=bPY02%<)h(vpK2D z>bcHqy5+vldM_8frz74I3eT5?BD!vPPdL=%y=IK#AX{s^*L~jWE$^A0+2!55qTK=y z*D-3Qr_hKdZ0xXB0blq{?QyX$r}7{El8EgEh0n+%?x$`!`@4qyT`zx;jjzgI8SVRd z{dD9g=ZwZ!jK@Sw!W2wH`@Hx1AN3{V&^1IpA1VG}^8I7#AAFGa@Ap&M59qc3um7=J z95WEhj9FyndcQD-oQv#q^$~I*a!)Jc63Vv6lyPKHIZz-M`)w&Iet9oZ_{Com%jhex z620uuDl+!}uO@q}U$K^qd4Cf&BHI6M?ee8E;D`DE{JsI3u@#jYJ+D99OKhj_#BS_G zuexEcex>Tk+B6+k;~A9Et5}oJoCvMU! z{>3|ar7uwsmS5AC=z6^`QFrftqJGT%M8kypi6q%L{eGf}Y<8R?3(sj2bZu$;y|#e6 z^gi_4tjt6EroKc6I`{M?a_D+X+!_DT+LtIc^(9)C+)uP#?n@lH-j_&s_azR~GxY5F zzC;_UYx)uc#WNT~F&wqRYQ!;uo_yB5iFXuzG*aW-o3N(I?se+@#8_eJtM?P*$%)AB zx}TUtPC-U^XY2h$uH$}Un&Wbm_JvUfbnUpG$fItg^h;}lv?is!k!+ghKaj!<*UZ8k z%*A{x#9}N(`+4OLI+W3q3K>?nRp6c z($7k+Lhb2$i5lUn>B)Qd5)HTRCDzi{A*CMP%+5ERy_aaj#yj#i-h3&9E#Lk`*x;Os z-^uI0Eeo6Jm7iz7ziccGeJi$OCw8Ntt-elN^@wBSO7;6)#{cb;|H+hcW3Th}qZ$WM zVXSPDOd*YS?f)H!a{zPb(x#tBfnG$5eBG)HICM;VUz7gF4()-+(%WhvjvR_OisLwe z-alx+BfIsVogsU^scu1TRBzs-4>yj9x#T!~#~ekn@*DbnzN#;so?W7jgKPBMY~==D zrI&saMP*6>dC#$Hp8AHc?$7zot@YXSuhtB4+=qS@zHbb~U<}1@-~DiM1V*9vU-aXX zV^OV48&B4pwci>!3AOYoA^v7dyoUH?o&a$J8&7x@F&c*Q~@Utj1cb!v<_d`x^fDdjCP@ zPO4v=Ww+R~0$Ft2!uGa``_MlAH(6T$j@^tlwhh%IwOo2Td^IrGmZbBJRt0( zC+D$ovj>FT^t~vJKitK(y~xH1|L2)^68mfJB!0T%USi)%cM?BYa4)fU$-Tsn``<|H zQ5O8D=HC*#U%rv};j6b2yC&aC{Gj`4V&|5BPkevD9}_#4TurpLjtng|BSZSFr^BHa z&8owL~ zeCOhzaPahFp;o_Ced0-ToIeth$DRxgCx@xq3<-_;ic4kr#ozsGUe^*|CZSH^1mg1F#CRD z=f3-F`s1PUn~#Sdwf;w9_nrRXhl~Cru?s)ZezEt6dX#IA;y6y=r|L+j$hxMu2B1EM zllq@7>Qf+_?&)K=rC(*FbpxGq4i|6f_mkL&;cly6r1 z#dzoS@@Gop|DPKaCOYm>f0#se|HYWz&1_3N=U4iM$a2g;+C+Rzcz!4I)1k6j^hMQ z;SA2<0@~UA4*q;6O6xmj*#9-`fBYY^WwP{ZD>(F+`@&(iKkf~YMcY~SzkX1-B%U5z z!8Me||JMu(z4T;k8!$fLCjB;2&$1hA*Q4?OgS-!Iz0n@t^?M)sReE3F_YJ=xul%tr z40JpgLopm9FbbnlnwQ}oOY2aM@m?lq7ujc@L^9*JGw;26&h0hcGrcgudmZLId&l`P zgTq*HjK@Sw!W2wHIcA`=u7h__cg%CX=(+CmTs_~$8sFJ2;e*sahOv!EzQi^zk~TEc zQ#18{SeJN~c;;X(D%#7!d~zWcqy3)tj~6|EX@N0R)@te!Ff3fGK_ODm1 z>$cBW4dGSD>{9o5OS=pz>(s-3?|m%cyGU2uyDg5PtX5tQaPC@c#dg##(*Le}+(}O= zBO9=rz89%Y%H}=F=pD*xa=)TX- ze=%dXdpK)A^F`VvO;3j&GwtFHZ2CJnuU}OOy8DW7>z0XbZq$dgi2fZpfc# zPsrcsL=Jzwf7@Mg_Mu;u`XYAAe*?+E7>c@=JYVf64bq;R?D@+7O_%j=ArUbHl?9>W!B_58~$!D}%sn-|DxW~aN zzja6dn(X3BxA2YvQ9M{l`o4Ad; z=tIBnN&n0G-_eO2x{yZ!MYQPaZuK9Bw(09859=?_kXgrVWcul6!T|9M#9$0Xc7b#B zQ4go*^ig-}^BzGTg`(raBK_a`w7baB!m5`%6ULI`Q9DF`Dme*p3`2u>lKqV%ct?Fl z`&08s>2lmiHVxDNEzR}P+<@e}!p{pwGd*?WnGdZ0Gew-0*0!knwzXC0<@it6|C#xJ z=>NGR&%3`F;+cgxn2W4*Wu$LDJ@=MxYm-}TL2h3d=08E+9Sk?1Or5;b-dQj*H`b z2g34`)q|(%|C=iRqhXu;zeWCMKby#AL?5}d{?jdEZ86IAmBw^emGuv!>0{CE{dOqh zJIUMxB64} zm!Ao<9QSw_kp5ec(H6HM*{{>xg3niu~=C zk?gN?4qaz|$;0>9UwsH!v>|=oxxdjqB(AO4j;!zbU={8`6(MNFS5_BhrQ}+K}#0 zj*I6UF5nWfL!9$8n?TR8*`2sTzlOM`e*t;tbdAwZ@}hnM;Wtq``@^NW_(plLy4&7} z!|bOcET%Q?dDx7K&zOrQ-^6#_fWoh>fA8A3jzk}sIdA^!Kgv(YvVWBuji>*twzXsG zl6bhTR&khogn`Z*jNY$^`}6V~eKn##$zI4d-o)==ac^XTYO00W)qs(;FNE+ zZlwI!-#3e-u!_3J!xZOFLpcgvgTf4Q7UrPqC3#t1FCfl0YQL&I5S>fhv#`VODhrT3 zudNR)PqW83L{DR`IC}oY8sB7GKX4%#+rBm-_LVPoycEl@606YbnXM+*qWWULu#T+! zL!5VD|8aWl9^?OZ7`I4I^2ZxcMc<0pryA25$2>M-yRepf|L14cPxc?rOFtSHhw-2Y&jIBSndzqFOp zliCI1`u{WN1^&gO{{MKdQ}qE0k7Ir^?9VKEoMSeZto)AmV-84hs`>&yBR|IbMvflW zB)u)K&v)KJEJj6{wxGAH`9XiU{@pTq@l#X73bJ8~{CM3rd&jqnhwI<_7AMV9D9&OY5b3JuHlyS4T}Au z>PY?7jS%PmrTj-4ITUde$8iFuP+I?Ix@UUbv%TyY-}9`=g5x6DGC}=sGXKswY4Wh+ z_pkr6QFt8l7xzEywbwvpi!t}V)9xwmUTp)V{r|KLoO2xKLtG%EzjG-bYcGiVGxgA~ z;2L_7J?1|y@;T|bR`*u#ebeuvwEmCd`~mk$wh8M)!%Sri`_M=>y~I|KDR!gZ+wv6# zqWZM85XiVTT`ikAls+8qpC3HJ@hFT&?0+9ijz{Gem4V95iS$XBf@!F$XMfS4FF%Pm zr@sl!3H`|0;LDve1G6v(b1@$au^82P;y&Xqwcocc(U0xC z^celt)Ad`ES+orkCM`?FvkWV+60r>fDnqHn6*c#e+q(r=tc9P81w!`Ke#F6@&2x1|3i z-`@h?-xA*+`C$I3cA^{BMnHOvIXvR4#z7>J&B!OqKhz_xMViVR zql~64_5qB)Y41wnjBC%~0xqEkS74lT=)}W)0E-trBg8p)T}XG>_pddC)&rhJlQb`p zc4R!C!?-4nUfjfOY?{pO57L%Nf0JHloF4k<{k9on`;F;gAUPP7pZ#2@`h{{ueSfIq z-?+!&xmgc;5vtj?5&xW5K`ApcCnHqjlGufIo=6=2Y>9FUu=fjVloe*|!c|QE`)#t*l<GTs1 z`oGQ&3C-u9Fph0Z*gRoO*ej0xs66sqs2VaMRE!)KwhS8={&C!du+@E6`|Tk9Y2>HF zc4>Y4{HMdVkL_^}CvXa9u-mtGj=X?NsN3;^_8ITs`U@f1?U|$Ls&{bu1?^Mj_s=uGABT{} z;n!dA49)L<)%+ydDef8Zf z)5A4-&*!aM@LBeqeiOZaDi62GyQuszKiGWkw6s-xq%8Ca>-SG=1+tG#H~vgsq33Sd z+a81ILs7hFZ+qk~+v^^~g^fVDqFpho%YBLnBhd zriW%^U(w&~_esdmr;zE{)&(TX5%&@*qO>pZ5p{$+>I#_Qw^^v0VErNE4NB_|H7S=e z#vkOh6OpO&+BcL15BoRfIDanYqsm$_l|L;D?aICmbWT{U zYQD}^vSy9_P{^IArSB&9B6(R^jHZj~S9sX}U&qg^XY1k^`{Z^E#hn)#Q*Kj*S*gFy~Vb6zvus76jskq zo$%lpZyd>ni8Cuaj*&0YbMK0qojppgFutJV|FhZosq8P?8ut#b7Vj0`-*M;E?lC8T zJcXL8(v_F4ZQ2jMs6PBnwuP-O`Tx75b3Oa(zU%$o;JVWIe|ogbrTKSFd~@-f!v$PI z53b-EdeP37cf72OM-E-cH?sKwXR+j<|O62yzr++dv#UnZ#(vW6?Za`k(cl zr%EU4*yeiQL&G@hZ=rFj?;p+d6vn$|A|_!P$}t0lcKyv{`*r^I7WrzEe1*6tUY@^O zX!WkpvPc~qhn$yIA3uDLKTc-Rc2@pkmp9=}6tp$W5oeqOF_)Z=(zud88CODIjHOtH z6{z5U#(sm9^i^1mwaDJ_A2;~r^c?@Z6C3E8QGD6Gz3v{>LE>1UDq&l(9n}-8cS6>v zQ`gGRyXkwe9}V(q5`VouLeDRa|9Qgteb(xU^Yk{pCOwGdX|?Mrf1zFA%lsR93cc>R zH>h{g+mJ&MM{yh{a0+M8`%YZ{aZu>qX#EGrJx^Fimb?^oTtWJl{wA^)H*p(Tb-q~t zxJ%E?^KH`m=>5L$zaIL7FZx!6Kk|R4zvusM;{R^r|8C*`WaU>4?JF6Lt)7NgyB>ewdzWX`kb zqUWWrAYBjI|D^wh^do(Y|9?dKkwqJ-@96Ut&oZpQO4JUK{-=$lpeHB0H>{?wMXJAh zLz8nF$9(hy_Gz8pH()a=)cFdi{Je2#)bA-MyX1c)*{??BL9`KZ zjA-hf{$b}FL=q_!gSr~oh8)`0%l}8@G2~FPv+v4d@?Q}xTiCyu^8ZWnDtUN`yh&yq zw~=xEpQ3n<;y6y=;rO{z^jM~JvYTh<=TO?$bC3MH!+((%gvIrLE|EQ`)pl@&yoT5Z zR%%B)CeJy(iDqR%%5ftZ*BQJkEbhNsQKm0m``^ZQ`Q!HRH|7|JzJ7e^GBaY)50j2e0*vVMFEEAS~UOPWo@oRJX$koV>Q^(+Yt)<5`N#Yt1 z>*yO$TK`KL^GD==vhJ$!S7^9n{1qDMO=xBtQn)SsD_mC^{}Qxm(yOo)+tJPjcbsRV zy4inp?O>bsu~F=6-0P=hHv5M|OV~Pg>hMc!+YR;)ZK!_DI0NzQ#$N14?JLF~ylk8s zJ=*RD9E>_j`x1$_X%ibpri7&<`sCZl44ET~$X@pCyZ@u~T;3dn1I7jzyVs?Cp-2`` zmtgX7VNi?Eq0{bs4u5*0cweN4=IN|sd&Y;x2yUB=vR=s=-$wT zI2YrZu=Fr}0OU<%r+a7QU1a=c=Xv!K^f~VL1NXdMyVP>;o{Zy!>W+E;?|T2|y?^aH zjlO{948&jz#oN)(B}ZTs+OMns%Hy5Nk=!QrUwWQi$jjHth8B6V6^ERc zR{uP#Oo@8~X3?gsiEB@f7SC9W$3)b=?*02NCef4ome-+0SlndXu zoZWrQn&0%okBxyN=c3}1#=wzP-!LBO?T?3r^u<_;Wmtigh<$UV@du~%l{sFGtovI_ zu0!sKG~SZ7A)eiNX(J0yC&C85ZAKNgVmo$XH+uEy?-bfZ2a!YyrS=_d zJTX6~9T&dx@lf#e`2h8EmkzMBG=kFOmaMkz$wA5ICzSa8kNN=-#|1R_Q zZ<)XEoHnvfe|J3^Cg{J{m)^S1Ec7<$JU>eFX1G6v(bJ0Fd z|1&zL%IA}Pn-k>g)9TWQbG;s||L=XLaaft3LH22Fzesz>^To3ei?I}0VHqr==LY%i zv4Xx5MS1~wbYYdSYVUV7xfZo?uLyDjYUr`=If>1VtI$kO;bFgD$-f@r`^Q$l_x@U+ z7`YP_-`6Jgod5fjHi_S9_Y_w7L|JJ*;C^~F(l49iMJADo$H=PJ$2A7q=sER`QePmw z80q$Grfeo1T|{$VbQy9c$B_j+7umk|Ni5lkL>y0sL=0+(irz#eOsIV7qurmR~7~e8;ss}tnvS+ zXF_+tKiD7OIr{{(JQIfcZ8%0?6h>n#Dk{oC++T1!J)?awJ1h|<(kEdG9*rNnCGGb- z1G0Esn=4yWpJ!t-?BBa=3ft9$W_s#~F~+Vf#|+HE9L&XhEX1SzcL%Y%+U&cKM*&5& zyv6?E(5rl&<=WrZYkw!Ri2LvE_AIKWnkyi#0jOP~J)T^KxCccSaevH#&5c>%Hi;S$+{QvZYaMpAPUuPrO`^r7D_=~vh9K<9b&|I_N^ zcht>M*rWcxi~U!}Z&lYn6zlr(;$br5IF8Y2bKa%}+5l$zZ8=*`kNpk<#5)jKdddI) zmj`VNoo|VoKM?EvB|pG%^w|eHcPNHq1mfJ?8jPYR-CF~{WHfy&QjVJ)H<69*x94|k zY$N~ur)(_7JEy|h029ecn1X32#|-p-Tt6#0C+h5T9Bc7#J;%8Iz+B-y`ex>nas7?O z@w{*8tNE37fOiIkrH+?j1y-U{-7AOon*O1~+=qg?R}o$Gyt>&T;pryx7>3A4$kN;P zna7YZkAWWdhKg%M)a{dxclmDVrS`4YykD&HAFHtz>#zZvQD|{LWZkp;Uo;>&{ee%} zG=Y!Fw~TY`7dmGvwqqxDV=wlj8twY|JNVk2eD2(<{OdLRYklhlvdGtN;or6%;Qu0x z!^qV0e|PYIk)|IM&lTTOl1w4ng)WXSpTEf zUXAna`PoS9|_c3@l&%f?v=@(DKdgqai;%~Yo{r8+lRvLF!E9*&`=m#D|7QPP|8Ye;*U*cb$gYw8%f>v= zbMvHs_D92AdLN2=#J$6R%#?oN{eI*dHw==lVfIIKJOa&U z^((r^M&YpyrB2?jpCFHn(f%jDH{OCe{obqLeG62=r&HevHS=e%TRhZ+vxu_LZgZcF2tI~!__O&XuA1Q;H(X?Dz zg)ekYPcWbUE8n!1&ET-u@lq_q3LL2K7k+lnT#zIE!p{e(hiHTD<=gitlUDkD6;`97 ztSl6;mLB)~T1RfcX7v14J0scsWpx~KJ9c6>-rv8l*YSQ-WBZN~q4%fefBatA^jq&x zSkJ%uCdo7^pNhV1S=e)Vc=$2)UbWWX)PZ5&J?k*u7#Q};kN+%R?ie>JeE;N#uyf3) z@B?-2UDgKt;kcn;_tV4e#Wpm&-Tg$^Hs*=&PZOR9+wT}_CGLax$Ei<-t-GEK6<41M zTlzm0Dlb13s%|_P+WeNoo6mndY##RU@YDLTP;`70$8iFu@RQSJ*0m}NKeWEbuFD?@ zKf>W|uIId2-5q`c$|6Ru$;hi0CgoAs&6KcEHhx$#ghq?pb z3CXv<9U2nf_RW4PG%osfNNxF6Xx{g&(AxdY(31G3{=;vCLnFV*H~2<4j7#F|!4+IX zFK*&C?xGLy^kIBmM>FmC?f%M1$VT8CUKc${xuFoiX+&eL&ZZw*n z%ZM9e>EltnA#U}E()vp^9}W|RRWBM4CXrK6tIjozEJw`{>2iJ&GaS!C^GWtWe5G|9 zy4j6-{y$sNkmvvN6C1~9|C^`%4=K!X&0NgKLR5Ut8qVZWEJJ$@|G$ZkK<4s%0y6Ko zK$iUf=lTCQ#2-!LFdzEi{xog-_!Z(DF3yy{io~ys`I_S{+AR-3Td>V`_t+pUpFV|(Eq$4^!stFe=GaS*oUhdLJnPbHmDzN&P2DN7wMLxsMgLW{Ya^tvYFf4q+dJQ{!Yw0inu>@-U4 z24%+9Id>ISV=dNU1KRm;9b1fn*{1K5?Aph_BMXkrHw-PW8~=kt&P!i5=7sNsB-!26%yDaU6}VLb8$@)F`4 zo*uIDi`oUgY+WDvwHS_@xQ)B$L%%)pfcXUj$nMWuHs%>DyUFUyaiGF8kE))Ia0;E^CEVFR-RFxdF8=dEaCeYG#@{&mP#K5xCYcQQ9y z`v6;&M}Z#a=cQ+A6ZJm_kwgmFm&AcIy|n(#i~8?B!aizzF1E&N*+0L>^>5x>A`d6{ zVKY51dhKKEuechMTHNvh=oY_3}^fL!U4%R9urV1=CQD zUi0~8klpgbEV9R5euY09BS4>v`G{kAI{6g^73qbJi~g&CJi5f6e#{sf;Y+a$E0DeD z98m3RpIjgZ2>#zZvQH8B&f12Mu!TJMZ9{BNH=O6g( zg_Fv%WBm44`-j#w{CK2i^4pj2+oxN9U@|{`T>sE*PGt9&+50Ei`!4f0JnN11A#4}- zn^W~mS~Fm0{J-y6U%@&7d+Gbp`|AOr*WAJCamruE2a!Yywf7#>hte@Dmcgg>wa|0% z_x0*SXhP!=?f-o1dOm8y7Cr_aHLfXF>i_THYhp%&HtwPp%=t+2|c)iI5!}JYxJCc@XjIp zGx}H*7xDl3#3lcq|9VqcH9z<^c^9?xKC<6l-#@)!fpjh5N4?W83=q~lOu9x&C;vIF z^HM1If8Ed@jDfC?V{WSsTW^y-6pz+_RNm&~`7Y&g{+=>^ zkNm$&{@*73FS5T#<1jLa^Mu=wb|1sVGXkS98d+f(_dS-LQ}>Mi|9JXD6df0k7k3vX z39I%TrjXN6OD`v9pk~N>{{IneR*vVOnI7losTTPC*|SBJ@P-A=_z@5 zu50FFAr@mPmSF`}qP>a#zg+#}RrL?DYrXmhS#Vq=Ti%ubaj0Hitdalo^4@_5{(ajn z2Xbi2F#+*L28Wr zKUMzc6Esej|M$FS-<#Npyz-A7Z=yGovHkmsdyIWErTwvw3j&2@ zkp4x^aeiz^ZG1)g(d@aTQ01Df*p8jpjlI~9YP8?s|0C}4m)pW7?_-+qpQ-7 zL(WO#FgmKwqYH74U-bd?pO?KO z#IXZMofr2XuBnl(SELIS+AvQDZ>Gn&fDhAI@_)DaMs~6P9li zb!%b8IKS2}oFUKQ0xqF_jr_k}9wT$|dDnS)3k7=d2>YMs|BL_77XH6{czA)lxkUaa z+Z@O7KRx29_=vhYc?}Q8|Mb!yuAg<2ejCL-;@;su$hdx1OOSrw`(3~FA&&p)w@;oz zY5b3GJUQq?VW8u|Xg;f4yQrKS7Sn0IqOjo@f!@Xshf(BcjKz2q{_ycIk(`8SC`b3k z0bvH&^EJ<1oijf7H<4bioE4sVRr$W$*nE2BCq~NeBSYsM_JIv4^$(p_hPSd0=>1s; zbDcLI3$YkWu?%s)lZtPCJaqqB|Ci%9|F6e5)|K?`Ppk8j)#6-DZZR%vEx8W0SH1gg zV-x5}_tdZ^5jNASkebf7MAHH`0Cm^(|LZSrn5zF7jpOu7qj{hH=N;Dlcg0V zn|GvH|LMki*1xXT|Bueu`tQ-TNdNsz{r4~Fzb9L6nE!vr{Qo27`r~ki{(oef^#9iw zA23A!J`N$Rjq)%u+AOn6^zY9b7201jKHzoZ16~wAx^}TI``8!KIQp>H{T%n+_mkB) zh$Jfitli-^`Zu+C)*DyQaN7R#dHd5h*}FiT%}60F%_--n(S{td(w&)U{6aj=NE?pQ zkE6Iv|Nkca|IX=h|0jf1uYV?-BF~`qto;{T|PBPAg=_2!v3uMvv z-EzR(mnL&yYJ8s^{C{=JjOUm2{Nh}g0pj>T`=;Z;7>dG^_K_ni>hz_NqcIlaF%jG4 zz4YvUVG_Nx|14WQg`O3b!8CeqoU(ztky4eId8>)IS0FGcg!_ww&8^%Q!cqF-1hd<9lw6;@*{*5Uo_2OAu3##U5(#yEL0 zJ;DA$WR-CTm0z;X4LvI?gT3@z^zB|xg#Gkt6!Y>f@*VQ>JpTK4MumgIdw%2lB2(x# z?lK+2zM+gGb10(sd;P*u@_5u6M}-sQDV)JMT)-vt;0k)b@JzTycK;7!0b=;ap9wd~ zyXZr|{pKsZA|HC!)tA*#z5g0=fbfbZ%R((Zj$It+IJrqVpuA~%O}^iw4!A}+fVyYd zKX#;H3_CHMT_8*2-`I#a{=drq+(xDHx$15EbBb#yhGPWU*{Tlwjs4rk{%v9Z#QA~o z|7>6e+3W1vWxt;^Kkuyhd8f4nTsJ;|Z=SiQ{otzl0ot|4cZ^ZLn4tZDEUnv5791DJ zQSM=MydU>CmK=|Xn1m^)8zy~hW`nqs;%y|G#M$gWQyu=ZzcCKZE5{7XLdpLYK8GIn z)ahh1=hEk+xQP9o_n!Z~-24b()i0{UkV_H$?`32KAE7k<-m{JC?!^7YRtj%+Kd}x{ z8fQp8T7SVgah!hR8qdGRyJ_-{$cYS}{{tCh(T38vHub@^;#!B|F+Mi(?0+2RmfqJtY!JQ~ zRfyyNvi-GhUF;vWIxel-%eHT)??m$^^#+vI(b%EgP*^pecrUphwJ&OOCJ&-!Pyg`d zj{j~<>3c%-CjWc>lDtQ@Cp0kk(bc>r2(PxE80E2 zW?wwVS8y$cqvETB!_BB`@9M31Z~R{iyZ?LR|GutG$ZvPihkid(&%r3ZpR=<58{t^K%?f_Wn#BS|OfunCQ1jn1VO=NW+d`ZW#Ni|E_+YHQ;uc!!U+_ zJe_~cuWUvNE$jKWeDy>9`SKI=Z%^gBqO^{i^B=7baK!j{ban9E_wnI(>8Ix#r>9&0 z-+z{424*4KDh|KTq35ph|M%#J{~o`gV+IerH$q#^V#PM zTZo#|{9orLo2=vScqy92l|o}iI^4&h{*(1feI~TL`f2Hy9F{p}1r8@ZrJZJ6SV>=n z)mV#l*nrKb!p~>MHMHy#K|kFH+ z^C7kV`Ovt;T(-og)xWeqxaT;>A&JJP-4~h%xz7pa$%}UVko-}eOBD=@_5B6p(e7P*- z=$-V+@0EqBE_M2D;noAp-?9IrYu}vB_8c2z-B|5}^jd3=9u>!N)SUI*ASvvG<5OtP z_&;k4HoB%MZ~sU2|N5Eyk0t8q3zQqLtCyqH{_qxm!!_q{0hbW>uIVAK;2PSU)3IC~ z<5m6#`8WFy^Fdm12x%Nf23fSBR~$ES8+Xx%IBuuk0ru9q28C~wg#q+|h+}^SlS45a zBaq%@j08Cv<1rC&Y(u71y@;NZwoa7R-+M=!AiaQx`xnRkAiJNl{^v)mgQ1_lG#=u# z@&i*`KMm!WfmxV?x#<0#HnHc-9~q{;3j1Y_W{K0x5bk znw0O+*Q*=EKC(%rcH#x>5Bt@G<_>lWV_mZntFRhtu?`!s8SO8z&kNY+dF=Bm>@%M} zznp!3m3<~#-ev!B=ptYDEdTc;`@e>LU(ddiF|0~FTd^HGk-g)bSdPJsW5`8Cr2I=dh& zw*Oxtuc20WFL@KC_WybQ7q=bXMf0=%cbxl}?mqm_-?abFQ?EudQs{F{zn`-&f3(&Q zIT%AR93$|bw*Nmb|KZSN`A>cQuiO7O%D0>NCU5dl;uyeD;>?_tKlv`B>2dteSn}b# z-7daB?0YPrh${D0`Ss6*L+T~toi`CZA1Moy$nHOWI7}gXgE}5rjv1JRIhc$2Sct_~ zik_eGC&=zP{j+56znU98(zk^;KXs+BRalL6D12#p*g#eva8IBAT!?$7ZgyOS*cMUo zP2&>iaco5#hu?HdzEdYjk#UcqO4n2!o*uS4XD4=JFZQGN3%*;j`?uPN$ezd5VaXKI zXp8ce?~*LyD2^lUEfm*yJwY$6_cG4-HToGmT7Tea-}0E){%d~uwlZJmgV$sXP>4EDtSB=BH=OPj59p zeVh5|d(2Nq_AT?%QSG-l|K*CfuAvvT{a?^_^+LEwPrl~fUU}g`{^w6UT7O`XdtLrQ zxb2+uj`DDq>_gUn_p4SfLZ+i!yOjG{UmgZJ9*lx}iuYBz&js#pm;2l2{_5SI|2%xa z{UM7sv|pCK>(WQ&?wLPN&tEiu{;c`)p5ah&4#xr24xSO+%#Ld2wycSm&JMw>ZXN zE;%2i`F~FscN*va1>;bKEyXgdz)Gyb-^~A0=Wjx3oep+lwR6^D9X24=|2LCW*oyX% z>@Pd|zn%a0mi9j!8YlnbFx!+tmfogaAh!Q)7tc=Y#$IF>@h`OP?WgBnVn0z$KZxQz z`3!kmIH|G??Z(R#|VtVXpF^p^nS{>L3V%cg)oWi`D`L=9H%TAro8mtrwNPm|Kb>m zICh}i@eDki|2K>NaQ@#M`ds|~o&R@Snx?p}RR1=XXFh!)7Go*?m-GL2uu&-D|7QN* zGV!dyN~}WdD-Y)Xt)@Sm|F@RD4u3uWZ-cP3wu8-N6|y%5gstRuWcYQRevA9t>~y>v z1wMZfUB}c@$o5yXU!e0XZS3e`OY?|h>`L<&ho~QnR6k&Y)6P50=4Ozkx4q1M&Su{i zys!Qg>lJKp^K0xZ7J7c~w;y}m!+uo9d-)dMVHf*MFOC2EBA=gLo6&!<#sATh?xn#w zZS)*c_r#4RG;VT_!s7hDqvUbKwu=+wDZGFF-xf>PqeH41*+WP7T-5)o{&~eXS286NXcvSw5|7~r(|7`r^NEd#$h6`?L4_|1TIH z@GW({ADbKFw`Ev?m5AfFR*|c*7VGeE{ND!p!|{Kc>09x)jV_YqH)!A{L*^-}U_8<>Jl^4$b+&Aw?!#n?f3G$f1a%IF5GXoIBPS ze_-4}j_fk-AU{O>D5B-$VC{>8`5*3&JnT5*yzFb%|3~#z`zea&6wcrrYWc`D*R8ci zf4Kg@C3+81+9sO$=?~W*xFYQR`TLsVUfe{Se;?=F+@@FkUfUfzbC=$Sezl&9@4fC- z>3>VxtTx%?4sEl(nI_*(^BrXYDt@Mo-nj!Y7(+1}BQOf>|5^Lri`xJ2za4)t;J7HS7R;KVFNa!3cY`@Pa;|BfA|M&`t+SK z+`PKoWVd&)m+bk9HvA|9pE-tq!yYMQ-D}z((12vU`|4nu(M(UFqS3f$=cSNF8*(V( zDB53rpMR)6*!39ya612RviyHu{#Tc3#UbaVmur7ruWnAp{=v2d`VYQlE`jwK;@tjN z|2QtrIPPbYI!o-kIN><9;h!SUMBQFtrTWKjjGJ*>T7Q83zeG+3%`V2dc88^_H2+@O z+7kS$dh7qIPi@MmTVaMcYY$leU-&F~Qhg$>|6lPlW8odAV&8!}MU(g&^-s(d7WWk@ zt^fa&_nNUElCXuy)cnpIzwrlo_TEgYiv83JLG@F z@?)9nR$wJoVKvrb9op-Ce~4pDbLe_qyeQC%D9!&pEB_;1@B0&H+#5biZ#$s=aa#Mn zcs8R7TTy$1{k_vaY^TRHZyMy|o%G%K!1%F*wsm3qQH_HreYgCPBt3q|DKd@1C$-O! zMI1%t?~E%@|2|GXfm4X{?|VLBzb$(AuM*)L+52hZ*vU)i!4+IXFK!~XzuhM9q7VJ* ze19De+TRAybMyEnv$b8&2P2O8j{Dx^U*tcKbrb$K?T>7BN`5MQV`LcWy5Sgs*alD< zuT*cYhU0d9fE|lG|Cc>~bPe(RQ9xWHptSz?S^57W`%fM|sce1C^G6%Hzw+MrKLZ)! z_`lKO9FNMUJfAO*3=`>-Fa^_4`18j@IobQK+IfEO``u-nkKfX=DhT+XTm!1Y`|tz zAv@AJ*h0Iy8KO#k)Ds}i}YXV_`k?LD~`#YCq4I?^uJ;( z0{tlBn8HWEEkmpe2+S0lR;=17Y5=#3YoL0~9zj6PA?l1p+ z=n>xY)&CN%kk`c~qr^QLo0JtB z7bL<@7im}YJrsV$ClNj#6EO)>Fb%!GeI}HX-Ji5a1ljWq*RAO+d>K|?C9>`(BYYLTdXW8p$kkYfwWwv^YlLs0C*RVC)nV>J z?Eim5{r{e|VG!4dY2492Y;3h(AV2y|ZSQe>-)7fj7HQvqUb_>0E4HJP4b7noc@$7& zOY7Ln`VRJQ4g0@^P2I<~vg6I#=u=Pg+g-aGd$AwYIEW-t_`v*v{^B1ge*axS5iR;8 zT5;%geGNEFfB*c1YX6lMPaAS5qINz1dl;XZo@{b&I8Hx-xF{{LNU+&+CJWXo*ze;iuG{^Kw`t__!UUfV6#^)Cy# z$I3#x_t6pG3%!Ugx_jWAy zuXvAV&24m#qc9p{F&+~!2~&_hZ5}aMj=2Bb3^IGe_j!jOL4UaZ$Q=4y{Pp@HrSXq! z;G^-68S`$Ow-`0~2evP+KT?{1-)QeT`uo=(`Cslo+^YVM6yo}OOU1PeE3gu)uo`R8 z{yh7SPQa-+$p9bv4Iv@3dw#ZQ=jp1M~lk_dnsB44LJJpQ2Z4Gpu@2 z9gIJnR+JIb`O8}!^h_65u6EAZFz|F-=3ma%8np!3@* z#JMM{$+f7tCI6lG9b%p14fyN%f9+S5U+8>A`Gv06m0!!r;VRP3Y-0B6}I9^y?jLO#BS`x#(UQLd(8WI zI)wd>t5NuZdnQwe^AFNw)t{9i+5+0>ITUdeX=yx8p1>)bL0sD{g$>O;&t+K)4%!^Ii0j z{Te;z*R5B2(zrkR#%FzBcvCqX*9L2R_r3NczFrCgoii8>SAB2Y_JR?|P{)POTf>m7 z{IvXv5%f{0o2pz#!yx56+uGP)xlV5;Q|xKWW9$MBJ(Ftm&EViDr`G;VmJ1px1}uXCu9F@HQDpkvT%@0B84>S_Pl5R2YDa;<^Pf1 z7g^eWVlw;xJ$Ax*ITUde$8iFu(0)YzK__zPLLLPaQCk0hjQs!Xd*lD;act{D`*TJd z=aAmU&nDx%-%DfP?mI*gqcNe{%vF{LB^^|`6=l&AS7hGPUqp(mIFLw5f@ z5yq088SM&ra}nti5!d!DqN|3FPfqcDr=2s&Z&Q%nVO%me;E5elKm&s z@3Q~rm95Cq+tBj~{=Ygv+~2I{i4T8Z{NQ5oFU2yf#45zKgjSPlqy8n|$J_V9I(qe< z4}}e6&0EGFkX5M7E3rsT zw<`bD!Do8&4>&oLu^&dQf_@@I-n3rGKVylkKU5C0ik_y??g zb2t1WdHM6d4gXBOgzO>P$A24MLH-F|g+E2_AE@DTLLyj3);~#`EM05GZ+2u(-KAH8wEBxChhkt?N zX2@B`&55pOlm@>gJQ5$+r{&`&76L<-zeK8UO!EHZBGh*|f`S z+f_C$Ief`DxMTJVhtvCT6ZYd4+=|=qd+Yyc6J+e~EOj(8>;JLsNAX`<|F3&xUpOG0 z*W(V{g=h`H7|uTFn`4XjBkDiC$F^a0_RzF8h_=HZdD{0EeS>GJwGY{$_u41fr2SF* zif|vj^)2dW>q5-fHhakRm1{d7O`kugjnuI(+;6|bcq`t99%B~ z|9YKQj=a)%{gcw%a81gJ6gekvxBTL!@W1*-yBD*=ckLfr%?{rupGA+jACN!9KwKGc z(YgXpFD*EqURb^#o^kyd&=?74wd!y&L&9O~1{42k?5F)&Fq^c^B@% z*7vf)z2w^6`@(%>G&U01|M$}mS4-14Kukr0&Vjo~`#q&X$K5xJ4^>SaXw(zv{ z^W+P}F^Fi+?~Bs;4X$F(sBa!OA^k>cUL2D53zY$w%920jzcf;a=J!Q&extSjqcuRI zvS6}Y-WQ)dqAU>JFP)w?{&F4J;TC!~c`JDv4j@{Ca7bAYS0)T%fFAv;=P7yd^|s%E zyKoQg#eKLRhw)at4e!9a@E+XgpOqKiCp>TL<^AM`uw@MBBjnjH?+G6xAHq3ppVqzB z6br8npA`Ny*1nh(K1*Ic9l|N{3|bMD0gsTG@%_52@F@K;JdRBHd_~@uCsW9*51Gme zPuTVpo<{mxg`tP+#}Kyk`HYd#`u`K;bS59KdgjkLCdO zr~|%BZ+(kC#4oagU0YNR4A#o`7&@vyphEs%<^QEl_!r0DJNJbTNaOo>7H8k6{XzZ^ zKgLfnb%Fm}yK!#&&XSq`-zL@9DZXjsi28DLTm6FsGXDSKCv2Z2d(Z!F{k=AJIx_3} z>Ju1J&qrhJKX;Dj@k_jb7x5ch#hx+eKlZ!%2Q}(u2sKdL|UWz62`O@ z650&o&XL4Kqy7o(lg3Thk6W;19B1vDx#3p&qmEH#;UrNpIl3uF$Le>MK*GRJay$utZstROY0#f5^w} z`vjiC)3{N7OdD$!o;uF{uVMe!>0i%f6BJ$>drQ;jv-m$QD*Kj|fvd_wa$GoCn=k60 zpCtRGarQgb$0wt8{3pm6Y`xw0_V29y8TkkB`@77K>2mNLyqaIY=ZZr5*T#+Q_Y5XZ zN^?hhi5~r9>AU3j@hmd^G}X%EO64&*tK6L-e_-1WaTr7RF@B1l<9YlNFQ7MWeSoBO z+%ZsW{JX-qagp(FW5>e{#=rB7f0r2lwk=^Se7xP*b*J%ta#DCI$v+@v44a%4Ki6XH zzRLOl=J(H&iK3$LqVxO)SFtDIe(axg{66~ZCC6?Y2shF9W4_US8$(W!b4$km4=MMm zi+oe+L1kdil>^}x``wC|?fr4%_O9bL;pqQiLuIbJ!hJ|TPWBpe?xXj+wt-Rcj)||h zzeW2k90&&-_jrfRgmFREWLi#en&Dc`rX@i5|EMx~wc4^Rf* z@c&XjObYjxc&=qd;XY~Ik0IgV_Sg9i{fD8rNdJy88vkfGY};GWlV23xM!o|vlTs2@ z2g1AP@4@@8Ef8{X4Ex+~A_?s$l<@X)BblUr%0p?vW4V*|0aX+V>Hh`-Jw{Z}*1B=#S$G zJcXyxgMJJl@?9SzComI*@eIC=tsmVUzDq_veBUQ8zb-30Oa1^q#E&uC;Crss=8#{0 zDm<@jNh5_h`69}5KNtTzeu)<_(Imetusc4Hqy5YNaWB!_oB?vsJwh->>zZOLIS3r>^w_oPF~v!Y#tL;x?qsH9kOYeUER%clb~2>ac{-j@D98S z@4@>prB0v53}%s8|7KJjhk1;sr$*INV`O4i{ZA%^Csx$|>axgx@cq*H06v6|;JW|d z$LQDV&xh!r#Ju`6T3a_o&XxXd{lB<=)1$t_8u>rAFMQg5pG8c3|4DXropO_Hblol>gM<&gUcY{UC=hylfpiZTR2Ve)e1P)Y0s4l^%^H>`BTWhTTo!ydJ|Byfefxh>RT{&km3JF7L4YF5H8b z@rrOSc^~e_VZ0TQ4{c_APxTk!}UMdaI;Y|ILe(XaQvJx+fD^SjE19c4t0 zcZH|KJ&himeT#q8l0z87mb^YeuD!uGL|!(RtM!ZC*KcnQ&j^1Tr`dkZ%HT8XzwT9S zesMAB_c+h@>CfT^h}QlXz~Fi9e+;?4Vb}R%@9SN*{}4Y$8b8I)@jQAv&Hs=32g>yi zlp6D9muBYw^VJ{byFOCLzQ>qt35+kXeOK5ki|l)L@6@jO>B2L8=BHyW#rBP{fwddw z$=+u6Kl(9%LBubx|68v4|Nqi8ynq++8(hVn3EviO!c-^wKhFM-vH#ijzxV$^{SYHd z?0<|Qf$=2!KgIqhCmokyU+)5? zeE)y(KRLEP+-1M?tN9;_@2~XSaP|%Qa>Ngnd2SV+;}Ophz2*8}&|iIRzd>ooG1TV$ zpjCZy?raG6Ire@W##`|=yaT^K|Ktzs`-}QNd<&z#g)uU5T>GQSw^?KSkDL^q@yurR zi%gNzM}OCEKQqK_+^vOKWG2e4Ue~0U~Z|}PMpbs7K-Y{6{J>p%i z@jZAS-j5I9L-+_jhD2pvc!>NYK8?>}qTc!gjd|e|eO6sLW6bajy%qD?*XjIgZR|Pq z=Of~}3-ZFFd=o z&GY=pM3LuDCWR-+N&BsR_*G%;Nj?CtiRSJUyB_-+yFZ_$B=X3=0nl$H~FUec?rMzrj`PnN%JrXEWt(c?kQ2Z^C|L z{NMgu9}oRj+=c__RR{Ma^-rXJ=U%k58j9O;{*5* zK7xRBau&GLb?7l*@LJK znvu_EYuFjWLx{$H=GT=IM|}so{AcQY|H#aLA^-6Iwr77~ZoT6sFcTe*XYg%&7gII* z{=plS->wLc&>zKo;bG