From ca690aa8ad0437b58372087d508395d0a280309f Mon Sep 17 00:00:00 2001 From: GustavBaumgart <98069699+GustavBaumgart@users.noreply.github.com> Date: Thu, 2 Feb 2023 16:06:57 -0800 Subject: [PATCH 01/16] optimizer compatibility with tensorflow and example for medmnist keras/pytorch (#320) Tensorflow compatibility for new optimizers was added, which included fedavg, fedadam, fedadagrad, and fedyogi. A shell script for tesing all 8 possible combinations of optimizers and frameworks is included. This allows the medmnist example to be run with keras (the folder structure was refactored to include a trainer and aggregator for keras). The typo in fedavg.py has now been fixed. --- lib/python/flame/examples/medmnist/README.md | 55 +++++++ .../medmnist/aggregator/fedadagrad.json | 67 +++++++++ .../examples/medmnist/aggregator/fedadam.json | 68 +++++++++ .../examples/medmnist/aggregator/fedavg.json | 63 ++++++++ .../examples/medmnist/aggregator/fedyogi.json | 68 +++++++++ .../medmnist/aggregator/keras/__init__.py | 17 +++ .../medmnist/aggregator/keras/main.py | 90 +++++++++++ .../medmnist/aggregator/pytorch/__init__.py | 17 +++ .../medmnist/aggregator/{ => pytorch}/main.py | 0 lib/python/flame/examples/medmnist/example.sh | 55 +++++++ .../trainer/fedadagrad/fedadagrad1.json | 67 +++++++++ .../trainer/fedadagrad/fedadagrad2.json | 67 +++++++++ .../trainer/fedadagrad/fedadagrad3.json | 67 +++++++++ .../medmnist/trainer/fedadam/fedadam1.json | 68 +++++++++ .../medmnist/trainer/fedadam/fedadam2.json | 68 +++++++++ .../medmnist/trainer/fedadam/fedadam3.json | 68 +++++++++ .../medmnist/trainer/fedavg/fedavg1.json | 63 ++++++++ .../medmnist/trainer/fedavg/fedavg2.json | 63 ++++++++ .../medmnist/trainer/fedavg/fedavg3.json | 63 ++++++++ .../medmnist/trainer/fedyogi/fedyogi1.json | 68 +++++++++ .../medmnist/trainer/fedyogi/fedyogi2.json | 68 +++++++++ .../medmnist/trainer/fedyogi/fedyogi3.json | 68 +++++++++ .../medmnist/trainer/keras/__init__.py | 17 +++ .../examples/medmnist/trainer/keras/main.py | 140 ++++++++++++++++++ .../medmnist/trainer/pytorch/__init__.py | 17 +++ .../medmnist/trainer/{ => pytorch}/main.py | 0 lib/python/flame/optimizer/fedadagrad.py | 4 +- lib/python/flame/optimizer/fedadam.py | 4 +- lib/python/flame/optimizer/fedavg.py | 4 +- lib/python/flame/optimizer/fedopt.py | 19 ++- lib/python/flame/optimizer/fedyogi.py | 6 +- 31 files changed, 1497 insertions(+), 12 deletions(-) create mode 100644 lib/python/flame/examples/medmnist/README.md create mode 100644 lib/python/flame/examples/medmnist/aggregator/fedadagrad.json create mode 100644 lib/python/flame/examples/medmnist/aggregator/fedadam.json create mode 100644 lib/python/flame/examples/medmnist/aggregator/fedavg.json create mode 100644 lib/python/flame/examples/medmnist/aggregator/fedyogi.json create mode 100644 lib/python/flame/examples/medmnist/aggregator/keras/__init__.py create mode 100644 lib/python/flame/examples/medmnist/aggregator/keras/main.py create mode 100644 lib/python/flame/examples/medmnist/aggregator/pytorch/__init__.py rename lib/python/flame/examples/medmnist/aggregator/{ => pytorch}/main.py (100%) create mode 100644 lib/python/flame/examples/medmnist/example.sh create mode 100644 lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad1.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad2.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad3.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedadam/fedadam1.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedadam/fedadam2.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedadam/fedadam3.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedavg/fedavg1.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedavg/fedavg2.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedavg/fedavg3.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi1.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi2.json create mode 100644 lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi3.json create mode 100644 lib/python/flame/examples/medmnist/trainer/keras/__init__.py create mode 100644 lib/python/flame/examples/medmnist/trainer/keras/main.py create mode 100644 lib/python/flame/examples/medmnist/trainer/pytorch/__init__.py rename lib/python/flame/examples/medmnist/trainer/{ => pytorch}/main.py (100%) diff --git a/lib/python/flame/examples/medmnist/README.md b/lib/python/flame/examples/medmnist/README.md new file mode 100644 index 000000000..7cb276fb5 --- /dev/null +++ b/lib/python/flame/examples/medmnist/README.md @@ -0,0 +1,55 @@ +## MedMNIST + +We will use the PathMNIST dataset from (MedMNIST)[https://medmnist.com/] to go over an example of using an adaptive aggregator on a data heterogeneity setting. +This example is run using `conda`. +If the environment is not set up, you may follow the instructions for setup [here](../../../../../docs/08-flame-sdk.md). +In the `medmnist` directory, we start by using the command `conda activate flame`. +This needs to be done before the shell script is run in a terminal. + +It is also important to change the job ID to a new random job ID before running this (or any other) example while using the mqtt broker. +If anyone else is running the example with the same job ID (and broker), the program may behave in unexpected ways (for instance, one of the trainers may correspond with a different aggregator that was not intended for it). +The ID should be changed in `aggregator/fedavg.json`, `trainer/fedavg/fedavg1.json`, `trainer/fedavg/fedavg2.json`, and `trainer/fedavg/fedavg3.json`. +Make sure you changed the job IDs to the same (new) job ID in all four files. +The task IDs should remain the same as before. + +Once you are back in the `medmnist` directory, you can run three trainers along with one aggregator with `bash example.sh fedavg pytorch`. + +By using `cat example.sh` we can see how this file sets up the federated learning. + +``` +cd trainer/$1 + +for i in {1..3} +do + rm -rf $1$i + mkdir $1$i + cd $1$i + python ../../$2/main.py ../$1$i.json > log$i.txt & + cd .. +done + +cd ../../aggregator +rm -f $1_log.txt +python $2/main.py $1.json > $1_log.txt & +``` + +The first parameter specifies the optimizer, and the second parameter specificies the framework used. + +Initially, we run three trainers in different working directories (this keeps the downloaded files seperate) using three different config files `fedavg1.json`, `fedavg2.json`, and `fedavg3.json`. +These files are located under the `fedavg` folder. +The log file for trainer `i` is located under `fedavg/fedavg$i`. +After this, the script runs the pytorch aggregator along with its configuration within `aggregator`. +The log file for this will be under `aggregator` as well, and is named `fedavg_log.txt`. +In order to check the progress of the program you may run `cat trainer/fedavg/fedavg1/log1.txt` from the `medmnist` folder. +This will look at the output of the first trainer until then. + +## Keras + +In order to use keras to run these examples, we need to change change the second argument for the script. For the example above, we would run `bash example.sh fedavg keras`. + +## Other Optimizers + +Config files for other optimizers have been created as well. +To test out different optimizers, you may run the script by changing the first parameter to another optimizer. Optimizers currently available for this example are `fedavg`, `fedadagrad`, `fedadam`, and `fedyogi`. + +Keep in mind that the config files you may need to change are in two different locations (as discussed earlier), and please make sure to change the job IDs provided to avoid collisions. diff --git a/lib/python/flame/examples/medmnist/aggregator/fedadagrad.json b/lib/python/flame/examples/medmnist/aggregator/fedadagrad.json new file mode 100644 index 000000000..f1e27ce67 --- /dev/null +++ b/lib/python/flame/examples/medmnist/aggregator/fedadagrad.json @@ -0,0 +1,67 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622b358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadagrad", + "kwargs": { + "beta_1" : 0, + "eta" : 0.1, + "tau" : 0.01 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "aggregator" +} diff --git a/lib/python/flame/examples/medmnist/aggregator/fedadam.json b/lib/python/flame/examples/medmnist/aggregator/fedadam.json new file mode 100644 index 000000000..23201ea52 --- /dev/null +++ b/lib/python/flame/examples/medmnist/aggregator/fedadam.json @@ -0,0 +1,68 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "612a358619ab39012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadam", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "aggregator" +} diff --git a/lib/python/flame/examples/medmnist/aggregator/fedavg.json b/lib/python/flame/examples/medmnist/aggregator/fedavg.json new file mode 100644 index 000000000..15d3ad243 --- /dev/null +++ b/lib/python/flame/examples/medmnist/aggregator/fedavg.json @@ -0,0 +1,63 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedavg", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default", + "role": "aggregator" +} diff --git a/lib/python/flame/examples/medmnist/aggregator/fedyogi.json b/lib/python/flame/examples/medmnist/aggregator/fedyogi.json new file mode 100644 index 000000000..fa6379d72 --- /dev/null +++ b/lib/python/flame/examples/medmnist/aggregator/fedyogi.json @@ -0,0 +1,68 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "511a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedyogi", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "aggregator" +} diff --git a/lib/python/flame/examples/medmnist/aggregator/keras/__init__.py b/lib/python/flame/examples/medmnist/aggregator/keras/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/examples/medmnist/aggregator/keras/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/examples/medmnist/aggregator/keras/main.py b/lib/python/flame/examples/medmnist/aggregator/keras/main.py new file mode 100644 index 000000000..b4fbe1683 --- /dev/null +++ b/lib/python/flame/examples/medmnist/aggregator/keras/main.py @@ -0,0 +1,90 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""MedMNIST horizontal FL aggregator for Keras.""" + +import logging + +from flame.config import Config +from flame.dataset import Dataset +from flame.mode.horizontal.top_aggregator import TopAggregator +from tensorflow import keras +from tensorflow.keras import layers + +logger = logging.getLogger(__name__) + + +class KerasMnistAggregator(TopAggregator): + """Keras MedMNist Aggregator.""" + + def __init__(self, config: Config) -> None: + """Initialize a class instance.""" + self.config = config + self.model = None + + self.dataset: Dataset = None + + self.num_classes = 9 + self.input_shape = (28, 28, 3) + + def initialize(self): + """Initialize role.""" + + model = keras.Sequential([ + keras.Input(shape=self.input_shape), + layers.Conv2D(33, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(66, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(self.num_classes, activation="softmax"), + ]) + + model.compile(loss="categorical_crossentropy", + optimizer="adam", + metrics=["accuracy"]) + + self.model = model + + def load_data(self) -> None: + """Load a test dataset.""" + # Implement this if loading data is needed in aggregator + pass + + def train(self) -> None: + """Train a model.""" + # Implement this if training is needed in aggregator + pass + + def evaluate(self) -> None: + """Evaluate (test) a model.""" + # Implement this if testing is needed in aggregator + pass + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + a = KerasMnistAggregator(config) + a.compose() + a.run() diff --git a/lib/python/flame/examples/medmnist/aggregator/pytorch/__init__.py b/lib/python/flame/examples/medmnist/aggregator/pytorch/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/examples/medmnist/aggregator/pytorch/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/examples/medmnist/aggregator/main.py b/lib/python/flame/examples/medmnist/aggregator/pytorch/main.py similarity index 100% rename from lib/python/flame/examples/medmnist/aggregator/main.py rename to lib/python/flame/examples/medmnist/aggregator/pytorch/main.py diff --git a/lib/python/flame/examples/medmnist/example.sh b/lib/python/flame/examples/medmnist/example.sh new file mode 100644 index 000000000..9ef6577c5 --- /dev/null +++ b/lib/python/flame/examples/medmnist/example.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +if [[ $# -ne 2 ]]; then + echo 'Expected two arguments' >&2 + exit 1 +fi + +optimizer=$1 +framework=$2 + +case $optimizer in + fedavg|fedadagrad|fedadam|fedyogi) + ;; + *) + echo 'Expected optimizer to be fedavg, fedadagrad, fedadam, or fedyogi' + exit 1 +esac + +case $framework in + pytorch|keras) + ;; + *) + echo 'Expected framework to be pytorch or keras' + exit 1 +esac + +cd trainer/$optimizer + +for i in {1..3} +do + rm -rf $optimizer$i + mkdir $optimizer$i + cd $optimizer$i + python ../../$framework/main.py ../$optimizer$i.json > log$i.txt & + cd .. +done + +cd ../../aggregator +rm -f ${optimizer}_log.txt +python $framework/main.py $optimizer.json > ${optimizer}_log.txt & diff --git a/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad1.json b/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad1.json new file mode 100644 index 000000000..5b2fed879 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad1.json @@ -0,0 +1,67 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site1.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622b358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadagrad", + "kwargs": { + "beta_1" : 0, + "eta" : 0.1, + "tau" : 0.01 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad2.json b/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad2.json new file mode 100644 index 000000000..690adf25e --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad2.json @@ -0,0 +1,67 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580371", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site2.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622b358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadagrad", + "kwargs": { + "beta_1" : 0, + "eta" : 0.1, + "tau" : 0.01 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad3.json b/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad3.json new file mode 100644 index 000000000..3f370e6b8 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedadagrad/fedadagrad3.json @@ -0,0 +1,67 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580372", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site3.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622b358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadagrad", + "kwargs": { + "beta_1" : 0, + "eta" : 0.1, + "tau" : 0.01 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam1.json b/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam1.json new file mode 100644 index 000000000..88bd5eda1 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam1.json @@ -0,0 +1,68 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site1.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "612a358619ab39012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadam", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam2.json b/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam2.json new file mode 100644 index 000000000..110714d50 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam2.json @@ -0,0 +1,68 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580371", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site2.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "612a358619ab39012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadam", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam3.json b/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam3.json new file mode 100644 index 000000000..a53a85de8 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedadam/fedadam3.json @@ -0,0 +1,68 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580372", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site3.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "612a358619ab39012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedadam", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg1.json b/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg1.json new file mode 100644 index 000000000..314f3188f --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg1.json @@ -0,0 +1,63 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site1.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedavg", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg2.json b/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg2.json new file mode 100644 index 000000000..8ef6567db --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg2.json @@ -0,0 +1,63 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580371", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site2.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedavg", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg3.json b/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg3.json new file mode 100644 index 000000000..7c01f8208 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedavg/fedavg3.json @@ -0,0 +1,63 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580372", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site3.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedavg", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi1.json b/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi1.json new file mode 100644 index 000000000..c4851ce41 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi1.json @@ -0,0 +1,68 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site1.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "511a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedyogi", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi2.json b/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi2.json new file mode 100644 index 000000000..4ba657e63 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi2.json @@ -0,0 +1,68 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580371", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site2.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "511a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedyogi", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi3.json b/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi3.json new file mode 100644 index 000000000..47a7eb44f --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/fedyogi/fedyogi3.json @@ -0,0 +1,68 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580372", + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site3.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 10, + "epochs": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "511a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedyogi", + "kwargs": { + "beta_1" : 0.9, + "beta_2" : 0.99, + "eta" : 0.01, + "tau" : 0.001 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist/trainer/keras/__init__.py b/lib/python/flame/examples/medmnist/trainer/keras/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/keras/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/examples/medmnist/trainer/keras/main.py b/lib/python/flame/examples/medmnist/trainer/keras/main.py new file mode 100644 index 000000000..da06cd929 --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/keras/main.py @@ -0,0 +1,140 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""MedMNIST horizontal FL trainer for Keras.""" + +import logging +from random import randrange +from statistics import mean + +import numpy as np +from flame.config import Config +from flame.mode.horizontal.trainer import Trainer +from tensorflow import keras +from tensorflow.keras import layers + +logger = logging.getLogger(__name__) + + +class KerasMnistTrainer(Trainer): + """Keras MedMNist Trainer.""" + + def __init__(self, config: Config) -> None: + """Initialize a class instance.""" + self.config = config + self.dataset_size = 0 + + self.num_classes = 9 + # 3-channel image + self.input_shape = (28, 28, 3) + + self.model = None + self._x_train = None + self._y_train = None + self._x_test = None + self._y_test = None + + self.epochs = self.config.hyperparameters['epochs'] + self.batch_size = 128 + if 'batchSize' in self.config.hyperparameters: + self.batch_size = self.config.hyperparameters['batchSize'] + + def initialize(self) -> None: + """Initialize role.""" + + model = keras.Sequential([ + keras.Input(shape=self.input_shape), + layers.Conv2D(33, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(66, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(self.num_classes, activation="softmax"), + ]) + + model.compile(loss="categorical_crossentropy", + optimizer="adam", + metrics=["accuracy"]) + + self.model = model + + def load_data(self) -> None: + """Load data.""" + + self._download() + + npz_file = np.load("pathmnist.npz") + x_train = npz_file['train_images'].astype("float32") / 255 + y_train = npz_file['train_labels'] + x_test = npz_file['val_images'].astype("float32") / 255 + y_test = npz_file['val_labels'] + + + # convert class vectors to binary class matrices + y_train = keras.utils.to_categorical(y_train, self.num_classes) + y_test = keras.utils.to_categorical(y_test, self.num_classes) + + self._x_train = x_train + self._y_train = y_train + self._x_test = x_test + self._y_test = y_test + + + def _download(self) -> None: + import requests + r = requests.get(self.config.dataset, allow_redirects=True) + open('pathmnist.npz', 'wb').write(r.content) + + def train(self) -> None: + """Train a model.""" + history = self.model.fit(self._x_train, + self._y_train, + batch_size=self.batch_size, + epochs=self.epochs, + validation_split=0.1) + + # save dataset size so that the info can be shared with aggregator + self.dataset_size = len(self._x_train) + + loss = mean(history.history['loss']) + accuracy = mean(history.history['accuracy']) + self.update_metrics({'loss': loss, 'accuracy': accuracy}) + + def evaluate(self) -> None: + """Evaluate a model.""" + score = self.model.evaluate(self._x_test, self._y_test, verbose=0) + + logger.info(f"Test loss: {score[0]}") + logger.info(f"Test accuracy: {score[1]}") + + # update metrics after each evaluation so that the metrics can be + # logged in a model registry. + self.update_metrics({'test-loss': score[0], 'test-accuracy': score[1]}) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + t = KerasMnistTrainer(config) + t.compose() + t.run() diff --git a/lib/python/flame/examples/medmnist/trainer/pytorch/__init__.py b/lib/python/flame/examples/medmnist/trainer/pytorch/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/examples/medmnist/trainer/pytorch/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/examples/medmnist/trainer/main.py b/lib/python/flame/examples/medmnist/trainer/pytorch/main.py similarity index 100% rename from lib/python/flame/examples/medmnist/trainer/main.py rename to lib/python/flame/examples/medmnist/trainer/pytorch/main.py diff --git a/lib/python/flame/optimizer/fedadagrad.py b/lib/python/flame/optimizer/fedadagrad.py index e4d12faef..043d300e5 100644 --- a/lib/python/flame/optimizer/fedadagrad.py +++ b/lib/python/flame/optimizer/fedadagrad.py @@ -35,5 +35,5 @@ def _delta_v_pytorch(self): return def _delta_v_tensorflow(self): - # TODO: tensorflow implementation - raise NotImplementedError("Tensorflow implementation not yet implemented") + self.v_t = [self.v_t[idx] + self.d_t[idx]**2 for idx in range(len(self.v_t))] + return diff --git a/lib/python/flame/optimizer/fedadam.py b/lib/python/flame/optimizer/fedadam.py index 639c19c45..07a7786c4 100644 --- a/lib/python/flame/optimizer/fedadam.py +++ b/lib/python/flame/optimizer/fedadam.py @@ -35,5 +35,5 @@ def _delta_v_pytorch(self): return def _delta_v_tensorflow(self): - # TODO: tensorflow implementation - raise NotImplementedError("Tensorflow implementation not yet implemented") + self.v_t = [self.beta_2 * self.v_t[idx] + (1 - self.beta_2) * self.d_t[idx]**2 for idx in range(len(self.v_t))] + return diff --git a/lib/python/flame/optimizer/fedavg.py b/lib/python/flame/optimizer/fedavg.py index 3aa3abea2..e33617e95 100644 --- a/lib/python/flame/optimizer/fedavg.py +++ b/lib/python/flame/optimizer/fedavg.py @@ -36,7 +36,7 @@ def __init__(self): if ml_framework_in_use == MLFramework.PYTORCH: self.aggregate_fn = self._aggregate_pytorch elif ml_framework_in_use == MLFramework.TENSORFLOW: - self.aggregate_fn = self._aggregate_tesnorflow + self.aggregate_fn = self._aggregate_tensorflow else: raise NotImplementedError( "supported ml framework not found; " @@ -74,7 +74,7 @@ def _aggregate_pytorch(self, tres, rate): for k, v in tres.weights.items(): self.agg_weights[k] += v * rate - def _aggregate_tesnorflow(self, tres, rate): + def _aggregate_tensorflow(self, tres, rate): logger.debug("calling _aggregate_tensorflow") if self.agg_weights is None: diff --git a/lib/python/flame/optimizer/fedopt.py b/lib/python/flame/optimizer/fedopt.py index 79d744f16..f08737b63 100644 --- a/lib/python/flame/optimizer/fedopt.py +++ b/lib/python/flame/optimizer/fedopt.py @@ -48,7 +48,7 @@ def __init__(self, beta_1, beta_2, eta, tau): if ml_framework_in_use == MLFramework.PYTORCH: self.adapt_fn = self._adapt_pytorch elif ml_framework_in_use == MLFramework.TENSORFLOW: - self.adapt_fn = self._adapt_tesnorflow + self.adapt_fn = self._adapt_tensorflow else: raise NotImplementedError( "supported ml framework not found; " @@ -96,7 +96,18 @@ def _adapt_pytorch(self, average, current): self.current_weights = OrderedDict({k: self.current_weights[k] + self.eta * self.m_t[k] / (torch.sqrt(self.v_t[k]) + self.tau) for k in self.current_weights.keys()}) - def _adapt_tesnorflow(self, average, current): + def _adapt_tensorflow(self, average, current): + import tensorflow as tf logger.debug("calling _adapt_tensorflow") - # TODO: Implement Tensorflow Version - raise NotImplementedError("Tensorflow implementation not yet implemented") + + self.d_t = [average[idx]-current[idx] for idx in range(len(average))] + + if self.m_t is None: + self.m_t = [tf.zeros_like(self.d_t[idx]) for idx in range(len(self.d_t))] + self.m_t = [self.beta_1 * self.m_t[idx] + (1 - self.beta_1) * self.d_t[idx] for idx in range(len(self.m_t))] + + if self.v_t is None: + self.v_t = [tf.zeros_like(self.d_t[idx]) for idx in range(len(self.d_t))] + self._delta_v_tensorflow() + + self.current_weights = [self.current_weights[idx] + self.eta * self.m_t[idx] / (tf.sqrt(self.v_t[idx]) + self.tau) for idx in range(len(self.current_weights))] diff --git a/lib/python/flame/optimizer/fedyogi.py b/lib/python/flame/optimizer/fedyogi.py index 3f14a5b18..e3b13ce59 100644 --- a/lib/python/flame/optimizer/fedyogi.py +++ b/lib/python/flame/optimizer/fedyogi.py @@ -24,6 +24,7 @@ class FedYogi(FedOPT): """FedYogi class.""" + logger.debug("Initializing fedyogi") def __init__(self, beta_1=0.9, beta_2=0.99, eta=1e-2, tau=1e-3): """Initialize FedYogi instance.""" @@ -36,5 +37,6 @@ def _delta_v_pytorch(self): return def _delta_v_tensorflow(self): - # TODO: tensorflow implementation - raise NotImplementedError("Tensorflow implementation not yet implemented") + import tensorflow as tf + self.v_t = [self.v_t[idx] - (1 - self.beta_2) * self.d_t[idx]**2 * tf.sign(self.v_t[idx] - self.d_t[idx]**2) for idx in range(len(self.v_t))] + return From 0e0e739e63f68a1817b68fbcb915afab83698795 Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Fri, 3 Feb 2023 09:18:26 -0800 Subject: [PATCH 02/16] feat+fix: grpc support for hierarchical fl (#321) Hierarchical fl didn't work with grpc as backend. This is because groupby field was not considered in metaserver service and p2p backend. In addition, a middle aggregator hangs even after a job is completed. This deadlock occurs because p2p backend cleanup code is called as a part of a channel cleanup. However, in a middle aggregator, p2p backend is responsible for tasks across all channnels. The p2p cleanup code couldn't finish cleanup because a broadcast task for in the other channel can't finish. This bug is fixed here by getting the p2p backend cleanup code out side of channel cleanup code. --- cmd/metaserver/app/metastore.go | 76 +++++++++++++++++-------- cmd/metaserver/app/server.go | 4 +- lib/python/flame/backend/mqtt.py | 13 ++++- lib/python/flame/backend/p2p.py | 1 + lib/python/flame/channel.py | 7 --- lib/python/flame/channel_manager.py | 11 +++- lib/python/flame/proto/meta_pb2.py | 16 +++--- lib/python/flame/proto/meta_pb2_grpc.py | 16 +++--- lib/python/setup.py | 3 +- pkg/proto/meta.proto | 3 +- pkg/proto/meta/meta.pb.go | 70 +++++++++++++---------- scripts/license_header.txt | 2 +- 12 files changed, 135 insertions(+), 87 deletions(-) diff --git a/cmd/metaserver/app/metastore.go b/cmd/metaserver/app/metastore.go index bb2adfd1d..3cb58dfab 100644 --- a/cmd/metaserver/app/metastore.go +++ b/cmd/metaserver/app/metastore.go @@ -33,68 +33,96 @@ type job struct { } type channel struct { - roles map[string]*metaInfo + roles map[string]*role } -type metaInfo struct { +type role struct { + groups map[string]*endInfo +} + +type endInfo struct { endpoints map[string]chan bool } func (j *job) register(mi *pbMeta.MetaInfo) error { ch, ok := j.channels[mi.ChName] if !ok { - ch = &channel{roles: make(map[string]*metaInfo)} + ch = &channel{roles: make(map[string]*role)} j.channels[mi.ChName] = ch } - if err := ch.register(mi.Me, mi.Endpoint); err != nil { + if err := ch.register(mi); err != nil { return err } return nil } -func (j *job) search(chName string, role string) map[string]chan bool { +func (j *job) search(chName string, roleName string, groupName string) map[string]chan bool { ch, ok := j.channels[chName] if !ok { return nil } - mi := ch.search(role) + return ch.search(roleName, groupName) +} - if mi == nil { - return nil +func (ch *channel) register(mi *pbMeta.MetaInfo) error { + myRole, ok := ch.roles[mi.Me] + if !ok { + myRole = &role{groups: make(map[string]*endInfo)} + ch.roles[mi.Me] = myRole } - return mi.endpoints + if err := myRole.register(mi); err != nil { + return err + } + + return nil } -func (ch *channel) register(role string, endpoint string) error { - mi, ok := ch.roles[role] +func (ch *channel) search(roleName string, groupName string) map[string]chan bool { + r, ok := ch.roles[roleName] if !ok { - mi = &metaInfo{endpoints: make(map[string]chan bool)} - ch.roles[role] = mi - } - - _, ok = mi.endpoints[endpoint] - if ok { - zap.S().Infof("endpoint %s already registered", endpoint) return nil } - // registering for the first time, set heart beat channel nil - mi.endpoints[endpoint] = nil + return r.search(groupName) +} + +func (r *role) register(mi *pbMeta.MetaInfo) error { + ei, ok := r.groups[mi.Group] + if !ok { + ei = &endInfo{endpoints: make(map[string]chan bool)} + r.groups[mi.Group] = ei + } - zap.S().Infof("done calling ch.register() for endpoint %s", endpoint) + ei.register(mi) return nil } -func (ch *channel) search(role string) *metaInfo { - mi, ok := ch.roles[role] +func (r *role) search(groupName string) map[string]chan bool { + ei, ok := r.groups[groupName] if !ok { return nil } - return mi + return ei.search() +} + +func (ei *endInfo) register(mi *pbMeta.MetaInfo) { + _, ok := ei.endpoints[mi.Endpoint] + if ok { + zap.S().Infof("endpoint %s already registered", mi.Endpoint) + } + + // registering for the first time, set heart beat channel nil + ei.endpoints[mi.Endpoint] = nil + + zap.S().Infof("done calling ch.register() for endpoint %s", mi.Endpoint) +} + +func (ei *endInfo) search() map[string]chan bool { + return ei.endpoints } diff --git a/cmd/metaserver/app/server.go b/cmd/metaserver/app/server.go index 1ebb9020a..9b9c7c3b4 100644 --- a/cmd/metaserver/app/server.go +++ b/cmd/metaserver/app/server.go @@ -86,7 +86,7 @@ func (s *metaServer) RegisterMetaInfo(ctx context.Context, in *pbMeta.MetaInfo) Status: pbMeta.MetaResponse_SUCCESS, Endpoints: make([]string, 0), } - endpoints := j.search(in.ChName, in.Other) + endpoints := j.search(in.ChName, in.Other, in.Group) for endpoint := range endpoints { resp.Endpoints = append(resp.Endpoints, endpoint) } @@ -116,7 +116,7 @@ func (s *metaServer) setupEndpointTimeout(in *pbMeta.MetaInfo) error { return fmt.Errorf("job id %s not found", in.JobId) } - endpoints := j.search(in.ChName, in.Me) + endpoints := j.search(in.ChName, in.Me, in.Group) if endpoints == nil { return fmt.Errorf("no endpoint found for role %s", in.Me) } diff --git a/lib/python/flame/backend/mqtt.py b/lib/python/flame/backend/mqtt.py index 0083748f2..01af0e9ba 100644 --- a/lib/python/flame/backend/mqtt.py +++ b/lib/python/flame/backend/mqtt.py @@ -26,8 +26,8 @@ from paho.mqtt.client import MQTTv5 from ..channel import Channel -from ..common.constants import (DEFAULT_RUN_ASYNC_WAIT_TIME, MQTT_TOPIC_PREFIX, - BackendEvent, CommType) +from ..common.constants import (DEFAULT_RUN_ASYNC_WAIT_TIME, EMPTY_PAYLOAD, + MQTT_TOPIC_PREFIX, BackendEvent, CommType) from ..common.util import background_thread_loop, run_async from ..proto import backend_msg_pb2 as msg_pb2 from .abstract import AbstractBackend @@ -418,6 +418,11 @@ async def _tx_task(self, channel, end_id, comm_type: CommType): while True: data = await txq.get() + if data == EMPTY_PAYLOAD: + txq.task_done() + logger.debug("task got an empty msg from queue") + break + self.send_chunks(topic, channel.name(), data) txq.task_done() @@ -458,6 +463,10 @@ def send_chunk(self, topic: str, channel_name: str, data: bytes, logger.debug(f'sending chunk {seqno} to {topic} is done') + async def cleanup(self): + """Clean up resources in backend.""" + pass + class AsyncioHelper: """Asyncio helper class. diff --git a/lib/python/flame/backend/p2p.py b/lib/python/flame/backend/p2p.py index fe1c71d61..989eb26d5 100644 --- a/lib/python/flame/backend/p2p.py +++ b/lib/python/flame/backend/p2p.py @@ -212,6 +212,7 @@ async def _register_channel(self, channel) -> None: ch_name=channel.name(), me=channel.my_role(), other=channel.other_role(), + group=channel.groupby(), endpoint=self._backend, ) diff --git a/lib/python/flame/channel.py b/lib/python/flame/channel.py index 5d19053dc..e4d64dd8a 100644 --- a/lib/python/flame/channel.py +++ b/lib/python/flame/channel.py @@ -313,11 +313,6 @@ def cleanup(self): """Clean up resources allocated for the channel.""" async def _inner(): - # we add this check because not all backends implement cleanup() - # TODO: once all backends implement cleanup(), remove this check - if not hasattr(self._backend, 'cleanup'): - return - # we use EMPTY_PAYLOAD as signal to finish tx tasks # put EMPTY_PAYLOAD to broadcast queue @@ -327,8 +322,6 @@ async def _inner(): # put EMPTY_PAYLOAD to unicast queue for each end await end.put(EMPTY_PAYLOAD) - await self._backend.cleanup() - _, _ = run_async(_inner(), self._backend.loop()) """ diff --git a/lib/python/flame/channel_manager.py b/lib/python/flame/channel_manager.py index 61e68069f..dcb60dc7b 100644 --- a/lib/python/flame/channel_manager.py +++ b/lib/python/flame/channel_manager.py @@ -203,7 +203,10 @@ def cleanup(self): for _, ch in self._channels.items(): ch.cleanup() - async def _inner(): + async def _inner(backend): + # clean up backend + await backend.cleanup() + for task in asyncio.all_tasks(): task.cancel() try: @@ -211,8 +214,10 @@ async def _inner(): except asyncio.CancelledError: logger.debug(f"successfully cancelled {task.get_name()}") + logger.debug("done with cleaning up asyncio tasks") + if self._backend: - _ = run_async(_inner(), self._backend.loop()) + _ = run_async(_inner(self._backend), self._backend.loop()) for k, v in self._backends.items(): - _ = run_async(_inner(), v.loop()) + _ = run_async(_inner(v), v.loop()) diff --git a/lib/python/flame/proto/meta_pb2.py b/lib/python/flame/proto/meta_pb2.py index 3e9802706..7d720f03d 100644 --- a/lib/python/flame/proto/meta_pb2.py +++ b/lib/python/flame/proto/meta_pb2.py @@ -30,7 +30,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nmeta.proto\x12\x08grpcMeta\"X\n\x08MetaInfo\x12\x0e\n\x06job_id\x18\x01 \x01(\t\x12\x0f\n\x07\x63h_name\x18\x02 \x01(\t\x12\n\n\x02me\x18\x03 \x01(\t\x12\r\n\x05other\x18\x04 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x05 \x01(\t\"r\n\x0cMetaResponse\x12-\n\x06status\x18\x01 \x01(\x0e\x32\x1d.grpcMeta.MetaResponse.Status\x12\x11\n\tendpoints\x18\x02 \x03(\t\" \n\x06Status\x12\t\n\x05\x45RROR\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x32\x88\x01\n\tMetaRoute\x12@\n\x10RegisterMetaInfo\x12\x12.grpcMeta.MetaInfo\x1a\x16.grpcMeta.MetaResponse\"\x00\x12\x39\n\tHeartBeat\x12\x12.grpcMeta.MetaInfo\x1a\x16.grpcMeta.MetaResponse\"\x00\x42,Z*github.com/cisco-open/flame/pkg/proto/metab\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\nmeta.proto\x12\x08grpcMeta\"g\n\x08MetaInfo\x12\x0e\n\x06job_id\x18\x01 \x01(\t\x12\x0f\n\x07\x63h_name\x18\x02 \x01(\t\x12\n\n\x02me\x18\x03 \x01(\t\x12\r\n\x05other\x18\x04 \x01(\t\x12\r\n\x05group\x18\x05 \x01(\t\x12\x10\n\x08\x65ndpoint\x18\x06 \x01(\t\"r\n\x0cMetaResponse\x12-\n\x06status\x18\x01 \x01(\x0e\x32\x1d.grpcMeta.MetaResponse.Status\x12\x11\n\tendpoints\x18\x02 \x03(\t\" \n\x06Status\x12\t\n\x05\x45RROR\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x32\x88\x01\n\tMetaRoute\x12@\n\x10RegisterMetaInfo\x12\x12.grpcMeta.MetaInfo\x1a\x16.grpcMeta.MetaResponse\"\x00\x12\x39\n\tHeartBeat\x12\x12.grpcMeta.MetaInfo\x1a\x16.grpcMeta.MetaResponse\"\x00\x42,Z*github.com/cisco-open/flame/pkg/proto/metab\x06proto3') @@ -57,11 +57,11 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'Z*github.com/cisco-open/flame/pkg/proto/meta' _METAINFO._serialized_start=24 - _METAINFO._serialized_end=112 - _METARESPONSE._serialized_start=114 - _METARESPONSE._serialized_end=228 - _METARESPONSE_STATUS._serialized_start=196 - _METARESPONSE_STATUS._serialized_end=228 - _METAROUTE._serialized_start=231 - _METAROUTE._serialized_end=367 + _METAINFO._serialized_end=127 + _METARESPONSE._serialized_start=129 + _METARESPONSE._serialized_end=243 + _METARESPONSE_STATUS._serialized_start=211 + _METARESPONSE_STATUS._serialized_end=243 + _METAROUTE._serialized_start=246 + _METAROUTE._serialized_end=382 # @@protoc_insertion_point(module_scope) diff --git a/lib/python/flame/proto/meta_pb2_grpc.py b/lib/python/flame/proto/meta_pb2_grpc.py index 70947ce93..d335f28f1 100644 --- a/lib/python/flame/proto/meta_pb2_grpc.py +++ b/lib/python/flame/proto/meta_pb2_grpc.py @@ -32,15 +32,15 @@ def __init__(self, channel): channel: A grpc.Channel. """ self.RegisterMetaInfo = channel.unary_unary( - '/grpcMeta.MetaRoute/RegisterMetaInfo', - request_serializer=meta__pb2.MetaInfo.SerializeToString, - response_deserializer=meta__pb2.MetaResponse.FromString, - ) + '/grpcMeta.MetaRoute/RegisterMetaInfo', + request_serializer=meta__pb2.MetaInfo.SerializeToString, + response_deserializer=meta__pb2.MetaResponse.FromString, + ) self.HeartBeat = channel.unary_unary( - '/grpcMeta.MetaRoute/HeartBeat', - request_serializer=meta__pb2.MetaInfo.SerializeToString, - response_deserializer=meta__pb2.MetaResponse.FromString, - ) + '/grpcMeta.MetaRoute/HeartBeat', + request_serializer=meta__pb2.MetaInfo.SerializeToString, + response_deserializer=meta__pb2.MetaResponse.FromString, + ) class MetaRouteServicer(object): diff --git a/lib/python/setup.py b/lib/python/setup.py index 407c5fa65..17933b91f 100644 --- a/lib/python/setup.py +++ b/lib/python/setup.py @@ -19,7 +19,7 @@ setup( name='flame', - version='0.0.13', + version='0.0.14', author='Flame Maintainers', author_email='flame-github-owners@cisco.com', include_package_data=True, @@ -39,5 +39,6 @@ 'mlflow==2.0.1', 'paho-mqtt', 'protobuf==3.19.5', + 'grpcio==1.51.1', ], ) diff --git a/pkg/proto/meta.proto b/pkg/proto/meta.proto index 5186d328f..bb425c9e2 100644 --- a/pkg/proto/meta.proto +++ b/pkg/proto/meta.proto @@ -30,7 +30,8 @@ message MetaInfo { string ch_name = 2; string me = 3; string other = 4; - string endpoint = 5; + string group = 5; + string endpoint = 6; } message MetaResponse { diff --git a/pkg/proto/meta/meta.pb.go b/pkg/proto/meta/meta.pb.go index 81451d696..f6220ec72 100644 --- a/pkg/proto/meta/meta.pb.go +++ b/pkg/proto/meta/meta.pb.go @@ -91,7 +91,8 @@ type MetaInfo struct { ChName string `protobuf:"bytes,2,opt,name=ch_name,json=chName,proto3" json:"ch_name,omitempty"` Me string `protobuf:"bytes,3,opt,name=me,proto3" json:"me,omitempty"` Other string `protobuf:"bytes,4,opt,name=other,proto3" json:"other,omitempty"` - Endpoint string `protobuf:"bytes,5,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + Group string `protobuf:"bytes,5,opt,name=group,proto3" json:"group,omitempty"` + Endpoint string `protobuf:"bytes,6,opt,name=endpoint,proto3" json:"endpoint,omitempty"` } func (x *MetaInfo) Reset() { @@ -154,6 +155,13 @@ func (x *MetaInfo) GetOther() string { return "" } +func (x *MetaInfo) GetGroup() string { + if x != nil { + return x.Group + } + return "" +} + func (x *MetaInfo) GetEndpoint() string { if x != nil { return x.Endpoint @@ -220,35 +228,37 @@ var File_meta_proto protoreflect.FileDescriptor var file_meta_proto_rawDesc = []byte{ 0x0a, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x67, 0x72, - 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x22, 0x7c, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x68, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x68, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, - 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x85, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, 0x09, - 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x22, 0x20, 0x0a, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x01, 0x32, 0x88, 0x01, 0x0a, - 0x09, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x12, 0x40, 0x0a, 0x10, 0x52, 0x65, - 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, - 0x2e, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x49, 0x6e, - 0x66, 0x6f, 0x1a, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65, - 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, - 0x48, 0x65, 0x61, 0x72, 0x74, 0x42, 0x65, 0x61, 0x74, 0x12, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x16, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2c, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x73, 0x63, 0x6f, 0x2d, 0x6f, 0x70, 0x65, 0x6e, - 0x2f, 0x66, 0x6c, 0x61, 0x6d, 0x65, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x22, 0x92, 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x15, 0x0a, 0x06, 0x6a, 0x6f, 0x62, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x63, 0x68, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x68, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, + 0x1a, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x22, 0x85, 0x01, 0x0a, 0x0c, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, 0x06, + 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1d, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x73, 0x22, 0x20, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x09, 0x0a, 0x05, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, + 0x53, 0x10, 0x01, 0x32, 0x88, 0x01, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x12, 0x40, 0x0a, 0x10, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, + 0x61, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x72, 0x74, 0x42, 0x65, 0x61, 0x74, + 0x12, 0x12, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x16, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2c, + 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x69, 0x73, + 0x63, 0x6f, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x2f, 0x66, 0x6c, 0x61, 0x6d, 0x65, 0x2f, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6d, 0x65, 0x74, 0x61, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/scripts/license_header.txt b/scripts/license_header.txt index 5f4424499..d2e35fa5e 100644 --- a/scripts/license_header.txt +++ b/scripts/license_header.txt @@ -1,4 +1,4 @@ -Copyright 2023 Cisco Systems, Inc. and its affiliates +Copyright 2022 Cisco Systems, Inc. and its affiliates Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From a5c8fb2c7c5fecd5689a6ffeffeb5ecc18f70fbb Mon Sep 17 00:00:00 2001 From: GustavBaumgart <98069699+GustavBaumgart@users.noreply.github.com> Date: Fri, 3 Feb 2023 16:55:33 -0800 Subject: [PATCH 03/16] documenation for metaserver/mqtt local (#322) Documentation for using metaserver will allow users to run examples with a local broker. It also allows for mqtt local brokers. This decreases the chances of any job ID collisions. Modifications to the config.json for the mnist example were made in order to make it easier to switch to a local broker. The readme does indicate how to do this for other examples now. Co-authored-by: vboxuser --- docs/08-flame-sdk.md | 79 ++++++++++++++++++- .../examples/mnist/aggregator/config.json | 8 +- .../flame/examples/mnist/trainer/config.json | 8 +- 3 files changed, 88 insertions(+), 7 deletions(-) diff --git a/docs/08-flame-sdk.md b/docs/08-flame-sdk.md index 88bfc5c0c..a4f6436c9 100644 --- a/docs/08-flame-sdk.md +++ b/docs/08-flame-sdk.md @@ -20,17 +20,90 @@ make install ## Quickstart ### Configuring Brokers -As the flame system uses MQTT brokers to exchange messages during federated learning, to run the python library locally, you could either 1) install a local MQTT broker 2) use a public MQTT broker. Here we'll illustrate the second option. -Go to any examples that you wish to run locally in `examples` directory, change the `host` from `"flame-mosquitto"` to `broker.hivemq.com` in the `config.json` files of both the trainer and aggregator. +The following brokers are all for local testing. +If you wish to run federated learning accross multiple machines, please consider using the MQTT public broker. +This means setting `backend` and the `mqtt` broker in the config file as follows: + +```json + "backend": "mqtt", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + } + ] +``` + +However, this may lead to job ID collisions since it is a public broker. +Thus, for local testing, we recommend using either of the two options below. + +#### Local MQTT Broker + +Since the flame system uses MQTT brokers to exchange messages during federated learning, to run the python library locally, you may install a local MQTT broker as shown below. + +```bash +sudo apt update +sudo apt install -y mosquitto +sudo systemctl status mosquitto +``` + +The last command should display something similar to this: + +```bash +mosquitto.service - Mosquitto MQTT v3.1/v3.1.1 Broker + Loaded: loaded (/lib/systemd/system/mosquitto.service; enabled; vendor pre> + Active: active (running) since Fri 2023-02-03 14:05:55 PST; 1h 20min ago + Docs: man:mosquitto.conf(5) + man:mosquitto(8) + Main PID: 75525 (mosquitto) + Tasks: 3 (limit: 9449) + Memory: 1.9M + CGroup: /system.slice/mosquitto.service + └─75525 /usr/sbin/mosquitto -c /etc/mosquitto/mosquitto.conf +``` + +That confirms that the mosquitto service is active. +From now on, you may use `sudo systemctl stop mosquitto` to stop the mosquitto service, `sudo systemctl start mosquitto` to start the service, and `sudo systemctl restart mosquitto` to restart the service. + +Go ahead and change the two config files in `mnist/trainer` and `mnist/aggregator` to make sure `backend` is `mqtt`. + +```json + "backend": "mqtt", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ] +``` + +Note that if you also want to use the local `mqtt` broker for other examples you should make sure that the `mqtt` broker has `host` set to `localhost`. + +#### P2P + +To start a `p2p` broker, go to the top `/flame` directory and run: + +```bash +make install +cd ~ +sudo ./.flame/bin/metaserver +``` + +After changing the two config files in `mnist/trainer` and `mnist/aggregator` so that `backend` is set to `p2p`, continue to the next section. ### Running an Example In order to run this example, you will need to open two terminals. -In the first terminal, run the following command: +In the first terminal, run the following commands: ```bash +conda activate flame cd examples/mnist/trainer python keras/main.py config.json diff --git a/lib/python/flame/examples/mnist/aggregator/config.json b/lib/python/flame/examples/mnist/aggregator/config.json index 8ffb6ea74..24d81e9e6 100644 --- a/lib/python/flame/examples/mnist/aggregator/config.json +++ b/lib/python/flame/examples/mnist/aggregator/config.json @@ -3,9 +3,13 @@ "backend": "mqtt", "brokers": [ { - "host": "broker.hivemq.com", + "host": "localhost", "sort": "mqtt" - } + }, + { + "host": "localhost:10104", + "sort": "p2p" + } ], "channels": [ { diff --git a/lib/python/flame/examples/mnist/trainer/config.json b/lib/python/flame/examples/mnist/trainer/config.json index c689269a8..cf049e888 100644 --- a/lib/python/flame/examples/mnist/trainer/config.json +++ b/lib/python/flame/examples/mnist/trainer/config.json @@ -3,9 +3,13 @@ "backend": "mqtt", "brokers": [ { - "host": "broker.hivemq.com", + "host": "localhost", "sort": "mqtt" - } + }, + { + "host": "localhost:10104", + "sort": "p2p" + } ], "channels": [ { From d2115d04247e0ba7dad0cbed9ead5813168378c6 Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Mon, 6 Feb 2023 22:40:31 -0800 Subject: [PATCH 04/16] feat: asynchronous fl (#323) Asynchronous FL is implemented for two-tier topology and three-tier hierarchical topology. The main algorithm is based on the following two papers: - https://arxiv.org/pdf/2111.04877.pdf - https://arxiv.org/pdf/2106.06639.pdf Two examples for asynchronous fl are also added. One is for a two-tier topology and the other for a three-tier hierarchical topology. This implementation includes the core algorithm but doesn't include SecAgg algorithm (presented in the papers), which is not the scope of this change. --- lib/python/flame/backend/p2p.py | 4 +- lib/python/flame/channel.py | 166 +++++++++--- lib/python/flame/channel_manager.py | 4 + lib/python/flame/config.py | 4 + lib/python/flame/end.py | 4 + .../examples/async_hier_mnist/__init__.py | 15 ++ .../middle_aggregator/__init__.py | 15 ++ .../middle_aggregator/config_uk.json | 102 +++++++ .../middle_aggregator/config_us.json | 102 +++++++ .../middle_aggregator/main.py | 65 +++++ .../top_aggregator/__init__.py | 15 ++ .../top_aggregator/config.json | 77 ++++++ .../async_hier_mnist/top_aggregator/main.py | 89 +++++++ .../async_hier_mnist/trainer/__init__.py | 15 ++ .../async_hier_mnist/trainer/config_uk1.json | 78 ++++++ .../async_hier_mnist/trainer/config_uk2.json | 78 ++++++ .../async_hier_mnist/trainer/config_us1.json | 78 ++++++ .../async_hier_mnist/trainer/config_us2.json | 78 ++++++ .../examples/async_hier_mnist/trainer/main.py | 140 ++++++++++ .../flame/examples/async_mnist/__init__.py | 15 ++ .../async_mnist/aggregator/__init__.py | 15 ++ .../async_mnist/aggregator/config.json | 73 +++++ .../examples/async_mnist/aggregator/main.py | 89 +++++++ .../examples/async_mnist/trainer/__init__.py | 15 ++ .../examples/async_mnist/trainer/config1.json | 71 +++++ .../examples/async_mnist/trainer/config2.json | 71 +++++ .../examples/async_mnist/trainer/config3.json | 71 +++++ .../examples/async_mnist/trainer/config4.json | 71 +++++ .../examples/async_mnist/trainer/main.py | 140 ++++++++++ lib/python/flame/mode/composer.py | 27 +- .../flame/mode/horizontal/asyncfl/__init__.py | 15 ++ .../horizontal/asyncfl/middle_aggregator.py | 249 ++++++++++++++++++ .../mode/horizontal/asyncfl/top_aggregator.py | 205 ++++++++++++++ .../mode/horizontal/middle_aggregator.py | 8 +- .../flame/mode/horizontal/top_aggregator.py | 4 +- lib/python/flame/mode/horizontal/trainer.py | 9 +- lib/python/flame/mode/message.py | 11 +- lib/python/flame/mode/tasklet.py | 34 ++- lib/python/flame/optimizer/abstract.py | 12 +- lib/python/flame/optimizer/fedavg.py | 10 +- lib/python/flame/optimizer/fedbuff.py | 100 +++++++ lib/python/flame/optimizer/fedopt.py | 71 +++-- lib/python/flame/optimizer/train_result.py | 7 +- lib/python/flame/optimizers.py | 5 +- lib/python/flame/selector/__init__.py | 3 +- lib/python/flame/selector/default.py | 2 +- lib/python/flame/selector/fedbuff.py | 139 ++++++++++ lib/python/flame/selector/random.py | 3 +- lib/python/flame/selectors.py | 3 +- lib/python/setup.py | 2 +- 50 files changed, 2579 insertions(+), 100 deletions(-) create mode 100644 lib/python/flame/examples/async_hier_mnist/__init__.py create mode 100644 lib/python/flame/examples/async_hier_mnist/middle_aggregator/__init__.py create mode 100644 lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_uk.json create mode 100644 lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_us.json create mode 100644 lib/python/flame/examples/async_hier_mnist/middle_aggregator/main.py create mode 100644 lib/python/flame/examples/async_hier_mnist/top_aggregator/__init__.py create mode 100644 lib/python/flame/examples/async_hier_mnist/top_aggregator/config.json create mode 100644 lib/python/flame/examples/async_hier_mnist/top_aggregator/main.py create mode 100644 lib/python/flame/examples/async_hier_mnist/trainer/__init__.py create mode 100644 lib/python/flame/examples/async_hier_mnist/trainer/config_uk1.json create mode 100644 lib/python/flame/examples/async_hier_mnist/trainer/config_uk2.json create mode 100644 lib/python/flame/examples/async_hier_mnist/trainer/config_us1.json create mode 100644 lib/python/flame/examples/async_hier_mnist/trainer/config_us2.json create mode 100644 lib/python/flame/examples/async_hier_mnist/trainer/main.py create mode 100644 lib/python/flame/examples/async_mnist/__init__.py create mode 100644 lib/python/flame/examples/async_mnist/aggregator/__init__.py create mode 100644 lib/python/flame/examples/async_mnist/aggregator/config.json create mode 100644 lib/python/flame/examples/async_mnist/aggregator/main.py create mode 100644 lib/python/flame/examples/async_mnist/trainer/__init__.py create mode 100644 lib/python/flame/examples/async_mnist/trainer/config1.json create mode 100644 lib/python/flame/examples/async_mnist/trainer/config2.json create mode 100644 lib/python/flame/examples/async_mnist/trainer/config3.json create mode 100644 lib/python/flame/examples/async_mnist/trainer/config4.json create mode 100644 lib/python/flame/examples/async_mnist/trainer/main.py create mode 100644 lib/python/flame/mode/horizontal/asyncfl/__init__.py create mode 100644 lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py create mode 100644 lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py create mode 100644 lib/python/flame/optimizer/fedbuff.py create mode 100644 lib/python/flame/selector/fedbuff.py diff --git a/lib/python/flame/backend/p2p.py b/lib/python/flame/backend/p2p.py index 989eb26d5..3a3162fba 100644 --- a/lib/python/flame/backend/p2p.py +++ b/lib/python/flame/backend/p2p.py @@ -363,7 +363,7 @@ async def _broadcast_task(self, channel): break end_ids = list(channel._ends.keys()) - logger.debug(f"end ids for bcast = {end_ids}") + logger.debug(f"end ids for {channel.name()} bcast = {end_ids}") for end_id in end_ids: try: await self.send_chunks(end_id, channel.name(), data) @@ -374,6 +374,8 @@ async def _broadcast_task(self, channel): await self._cleanup_end(end_id) txq.task_done() + logger.debug(f"broadcast task for {channel.name()} terminated") + async def _unicast_task(self, channel, end_id): txq = channel.get_txq(end_id) diff --git a/lib/python/flame/channel.py b/lib/python/flame/channel.py index e4d64dd8a..4b3f0c937 100644 --- a/lib/python/flame/channel.py +++ b/lib/python/flame/channel.py @@ -26,10 +26,14 @@ from .common.typing import Scalar from .common.util import run_async from .config import GROUPBY_DEFAULT_GROUP -from .end import End +from .end import KEY_END_STATE, VAL_END_STATE_RECVD, End logger = logging.getLogger(__name__) +KEY_CH_STATE = 'state' +VAL_CH_STATE_RECV = 'recv' +VAL_CH_STATE_SEND = 'send' + class Channel(object): """Channel class.""" @@ -117,12 +121,14 @@ async def inner() -> bool: return result - def one_end(self) -> str: + def one_end(self, state: Union[None, str] = None) -> str: """Return one end out of all ends.""" - return self.ends()[0] + return self.ends(state)[0] - def ends(self) -> list[str]: + def ends(self, state: Union[None, str] = None) -> list[str]: """Return a list of end ids.""" + if state == VAL_CH_STATE_RECV or state == VAL_CH_STATE_SEND: + self.properties[KEY_CH_STATE] = state async def inner(): selected = self._selector.select(self._ends, self.properties) @@ -198,17 +204,94 @@ async def _get(): payload, status = run_async(_get(), self._backend.loop()) + if self.has(end_id): + # set a property that says a message was received for the end + self._ends[end_id].set_property(KEY_END_STATE, VAL_END_STATE_RECVD) + return cloudpickle.loads(payload) if payload and status else None - def recv_fifo(self, end_ids: list[str]) -> Tuple[str, Any]: + def recv_fifo(self, + end_ids: list[str], + first_k: int = 0) -> Tuple[str, Any]: """Receive a message per end from a list of ends. The message arrival order among ends is not fixed. Messages are yielded in a FIFO manner. This method is not thread-safe. + + Parameters + ---------- + end_ids: a list of ends to receive a message from + first_k: an integer argument to restrict the number of ends + to receive a messagae from. The default value (= 0) + means that we'd like to receive messages from all + ends in the list. If first_k > len(end_ids), + first_k is set to len(end_ids). + + Returns + ------- + The function yields a pair: end id and message """ + logger.debug(f"first_k = {first_k}, len(end_ids) = {len(end_ids)}") + + first_k = min(first_k, len(end_ids)) + if first_k <= 0: + # a negative value in first_k is an error + # we handle it by setting first_k as the length of the array + first_k = len(end_ids) + + # DO NOT CHANGE self.tmqp as a local variable. + # With aiostream, local variable update looks incorrect. + # but with an instance variable , the variable update is + # done correctly. + # + # A temporary aysncio queue to store messages in a FIFO manner + self.tmpq = None + + async def _put_message_to_tmpq_inner(): + # self.tmpq must be created in the _backend loop + self.tmpq = asyncio.Queue() + _ = asyncio.create_task( + self._streamer_for_recv_fifo(end_ids, first_k)) + + async def _get_message_inner(): + return await self.tmpq.get() + + # first, create an asyncio task to fetch messages and put a temp queue + # _put_message_to_tmpq_inner works as if it is a non-blocking call + # because a task is created within it + _, _ = run_async(_put_message_to_tmpq_inner(), self._backend.loop()) + + # the _get_message_inner() coroutine fetches a message from the temp + # queue; we call this coroutine first_k times + for _ in range(first_k): + result, status = run_async(_get_message_inner(), + self._backend.loop()) + (end_id, payload) = result + logger.debug(f"get payload for {end_id}") + + if self.has(end_id): + logger.debug(f"channel got a msg for {end_id}") + # set a property to indicate that a message was received + # for the end + self._ends[end_id].set_property(KEY_END_STATE, + VAL_END_STATE_RECVD) + else: + logger.debug(f"channel has no end id {end_id} for msg") + + msg = cloudpickle.loads(payload) if payload and status else None + yield end_id, msg - async def _get(end_id) -> Tuple[str, Any]: + async def _streamer_for_recv_fifo(self, end_ids: list[str], first_k: int): + """Read messages in a FIFO fashion. + + This method reads messages from queues associated with each end + and puts first_k number of the messages into a queue; + The remaining messages are saved back into a variable (peek_buf) + of their corresponding end so that they can be read later. + """ + + async def _get_inner(end_id) -> Tuple[str, Any]: if not self.has(end_id): # can't receive message from end_id yield end_id, None @@ -221,40 +304,43 @@ async def _get(end_id) -> Tuple[str, Any]: yield end_id, payload - async def _streamer(tmpq): - runs = [_get(end_id) for end_id in end_ids] - - merged = stream.merge(*runs) - async with merged.stream() as streamer: - async for result in streamer: - await tmpq.put(result) - - # a temporary aysncio queue to store messages in a FIFO manner. - # we define this varialbe to make sure it is visiable - # in both _inner1() and _inner2() - tmpq = None - - async def _inner1(): - nonlocal tmpq - # tmpq must be created in the _backend loop - tmpq = asyncio.Queue() - _ = asyncio.create_task(_streamer(tmpq)) - - async def _inner2(): - return await tmpq.get() - - # first, create an asyncio task to fetch messages and put a temp queue - # _inner1 works as if it is a non-blocking call - # because a task is created within it - _, _ = run_async(_inner1(), self._backend.loop()) - - # the _inner2() coroutine fetches a message from the temp queue - # we call this coroutine the number of end_ids by iterating end_ids - for _ in end_ids: - result, status = run_async(_inner2(), self._backend.loop()) - (end_id, payload) = result - msg = cloudpickle.loads(payload) if payload and status else None - yield end_id, msg + runs = [_get_inner(end_id) for end_id in end_ids] + + # DO NOT CHANGE self.count as a local variable + # with aiostream, local variable update looks incorrect. + # but with an instance variable , the variable update is + # done correctly. + self.count = 0 + merged = stream.merge(*runs) + async with merged.stream() as streamer: + logger.debug(f"0) cnt: {self.count}, first_k: {first_k}") + async for result in streamer: + (end_id, payload) = result + logger.debug(f"1) end id: {end_id}, cnt: {self.count}") + + self.count += 1 + logger.debug(f"2) end id: {end_id}, cnt: {self.count}") + if self.count <= first_k: + logger.debug(f"3) end id: {end_id}, cnt: {self.count}") + await self.tmpq.put(result) + + else: + logger.debug(f"4) end id: {end_id}, cnt: {self.count}") + # We already put the first_k number of messages into + # a queue. + # + # Now we need to save the remaining messages which + # were already taken out from each end's rcv queue. + # In order not to lose those messages, we use peek_buf + # in end object. + + # WARNING: peek_buf must be none; if not, we called + # peek() somewhere else and then called recv_fifo() + # before recv() was called. + # To detect this potential issue, assert is given here. + assert self._ends[end_id].peek_buf is None + + self._ends[end_id].peek_buf = payload def peek(self, end_id): """Peek rxq of end_id and return data if queue is not empty.""" diff --git a/lib/python/flame/channel_manager.py b/lib/python/flame/channel_manager.py index dcb60dc7b..e1a156a5a 100644 --- a/lib/python/flame/channel_manager.py +++ b/lib/python/flame/channel_manager.py @@ -204,6 +204,10 @@ def cleanup(self): ch.cleanup() async def _inner(backend): + # TODO: need better mechanism to wait tx completion + # as a temporary measure, sleep 5 seconds + await asyncio.sleep(5) + # clean up backend await backend.cleanup() diff --git a/lib/python/flame/config.py b/lib/python/flame/config.py index 75f45660b..9d9ef62b7 100644 --- a/lib/python/flame/config.py +++ b/lib/python/flame/config.py @@ -90,6 +90,9 @@ class OptimizerType(Enum): FEDADAGRAD = 2 # FedAdaGrad FEDADAM = 3 # FedAdam FEDYOGI = 4 # FedYogi + # FedBuff from https://arxiv.org/pdf/1903.03934.pdf and + # https://arxiv.org/pdf/2111.04877.pdf + FEDBUFF = 5 class SelectorType(Enum): @@ -97,6 +100,7 @@ class SelectorType(Enum): DEFAULT = 1 # default RANDOM = 2 # random + FEDBUFF = 3 # fedbuff REALM_SEPARATOR = '/' diff --git a/lib/python/flame/end.py b/lib/python/flame/end.py index 5d10ab76d..6755714f1 100644 --- a/lib/python/flame/end.py +++ b/lib/python/flame/end.py @@ -20,6 +20,10 @@ from .common.typing import Scalar +KEY_END_STATE = 'state' +VAL_END_STATE_RECVD = 'recvd' +VAL_END_STATE_NONE = '' + class End(object): """End class.""" diff --git a/lib/python/flame/examples/async_hier_mnist/__init__.py b/lib/python/flame/examples/async_hier_mnist/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/examples/async_hier_mnist/middle_aggregator/__init__.py b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_uk.json b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_uk.json new file mode 100644 index 000000000..33bc7dd95 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_uk.json @@ -0,0 +1,102 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", + "backend": "p2p", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from mid aggregator to global aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "global-channel", + "pair": [ + "top-aggregator", + "middle-aggregator" + ], + "funcTags": { + "top-aggregator": [ + "distribute", + "aggregate" + ], + "middle-aggregator": [ + "fetch", + "upload" + ] + } + }, + { + "description": "Model update is sent from mid aggregator to trainer and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default/us/west/org1", + "default/uk/london/org2" + ] + }, + "name": "param-channel", + "pair": [ + "middle-aggregator", + "trainer" + ], + "funcTags": { + "middle-aggregator": [ + "distribute", + "aggregate" + ], + "trainer": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 1 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "http://flame-mlflow:5000" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/uk/london/org2/flame", + "role": "middle-aggregator" +} diff --git a/lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_us.json b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_us.json new file mode 100644 index 000000000..d1aedef88 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/config_us.json @@ -0,0 +1,102 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa741", + "backend": "p2p", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from mid aggregator to global aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "global-channel", + "pair": [ + "top-aggregator", + "middle-aggregator" + ], + "funcTags": { + "top-aggregator": [ + "distribute", + "aggregate" + ], + "middle-aggregator": [ + "fetch", + "upload" + ] + } + }, + { + "description": "Model update is sent from mid aggregator to trainer and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default/us/west/org1", + "default/uk/london/org2" + ] + }, + "name": "param-channel", + "pair": [ + "middle-aggregator", + "trainer" + ], + "funcTags": { + "middle-aggregator": [ + "distribute", + "aggregate" + ], + "trainer": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 1 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "http://flame-mlflow:5000" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/us/west/org1/flame", + "role": "middle-aggregator" +} diff --git a/lib/python/flame/examples/async_hier_mnist/middle_aggregator/main.py b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/main.py new file mode 100644 index 000000000..acd5e582d --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/middle_aggregator/main.py @@ -0,0 +1,65 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""HIRE_MNIST horizontal hierarchical FL middle level aggregator for Keras.""" + +import logging + +from flame.config import Config +from flame.mode.horizontal.asyncfl.middle_aggregator import MiddleAggregator +# the following needs to be imported to let the flame know +# this aggregator works on tensorflow model +from tensorflow import keras + +logger = logging.getLogger(__name__) + + +class KerasMnistMiddleAggregator(MiddleAggregator): + """Keras Mnist Middle Level Aggregator.""" + + def __init__(self, config: Config) -> None: + """Initialize a class instance.""" + self.config = config + + def initialize(self): + """Initialize role.""" + pass + + def load_data(self) -> None: + """Load a test dataset.""" + pass + + def train(self) -> None: + """Train a model.""" + pass + + def evaluate(self) -> None: + """Evaluate (test) a model.""" + pass + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + a = KerasMnistMiddleAggregator(config) + a.compose() + a.run() diff --git a/lib/python/flame/examples/async_hier_mnist/top_aggregator/__init__.py b/lib/python/flame/examples/async_hier_mnist/top_aggregator/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/top_aggregator/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/examples/async_hier_mnist/top_aggregator/config.json b/lib/python/flame/examples/async_hier_mnist/top_aggregator/config.json new file mode 100644 index 000000000..0bf4d24f4 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/top_aggregator/config.json @@ -0,0 +1,77 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa740", + "backend": "p2p", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from mid aggregator to global aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "global-channel", + "pair": [ + "top-aggregator", + "middle-aggregator" + ], + "funcTags": { + "top-aggregator": [ + "distribute", + "aggregate" + ], + "middle-aggregator": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 10, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 1 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "http://flame-mlflow:5000" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "", + "role": "top-aggregator" +} diff --git a/lib/python/flame/examples/async_hier_mnist/top_aggregator/main.py b/lib/python/flame/examples/async_hier_mnist/top_aggregator/main.py new file mode 100644 index 000000000..0e7d9914d --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/top_aggregator/main.py @@ -0,0 +1,89 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""HIRE_MNIST horizontal hierarchical FL top level aggregator for Keras.""" + +import logging + +from flame.config import Config +from flame.dataset import Dataset +from flame.mode.horizontal.asyncfl.top_aggregator import TopAggregator +from tensorflow import keras +from tensorflow.keras import layers + +logger = logging.getLogger(__name__) + + +class KerasMnistTopAggregator(TopAggregator): + """Keras Mnist Top Level Aggregator.""" + + def __init__(self, config: Config) -> None: + """Initialize a class instance.""" + self.config = config + self.model = None + + self.dataset: Dataset = None + + self.num_classes = 10 + self.input_shape = (28, 28, 1) + + def initialize(self): + """Initialize role.""" + model = keras.Sequential([ + keras.Input(shape=self.input_shape), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(self.num_classes, activation="softmax"), + ]) + + model.compile(loss="categorical_crossentropy", + optimizer="adam", + metrics=["accuracy"]) + + self.model = model + + def load_data(self) -> None: + """Load a test dataset.""" + # Implement this if loading data is needed in aggregator + pass + + def train(self) -> None: + """Train a model.""" + # Implement this if training is needed in aggregator + pass + + def evaluate(self) -> None: + """Evaluate (test) a model.""" + # Implement this if testing is needed in aggregator + pass + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + a = KerasMnistTopAggregator(config) + a.compose() + a.run() diff --git a/lib/python/flame/examples/async_hier_mnist/trainer/__init__.py b/lib/python/flame/examples/async_hier_mnist/trainer/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/trainer/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/examples/async_hier_mnist/trainer/config_uk1.json b/lib/python/flame/examples/async_hier_mnist/trainer/config_uk1.json new file mode 100644 index 000000000..f8fa0ce03 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/trainer/config_uk1.json @@ -0,0 +1,78 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa745", + "backend": "p2p", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from mid aggregator to trainer and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default/us/west/org1", + "default/uk/london/org2" + ] + }, + "name": "param-channel", + "pair": [ + "middle-aggregator", + "trainer" + ], + "funcTags": { + "middle-aggregator": [ + "distribute", + "aggregate" + ], + "trainer": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 1 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "http://flame-mlflow:5000" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/uk/london/org2/machine1", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_hier_mnist/trainer/config_uk2.json b/lib/python/flame/examples/async_hier_mnist/trainer/config_uk2.json new file mode 100644 index 000000000..239e6f914 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/trainer/config_uk2.json @@ -0,0 +1,78 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa746", + "backend": "p2p", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from mid aggregator to trainer and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default/us/west/org1", + "default/uk/london/org2" + ] + }, + "name": "param-channel", + "pair": [ + "middle-aggregator", + "trainer" + ], + "funcTags": { + "middle-aggregator": [ + "distribute", + "aggregate" + ], + "trainer": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 1 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "http://flame-mlflow:5000" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/uk/london/org2/machine2", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_hier_mnist/trainer/config_us1.json b/lib/python/flame/examples/async_hier_mnist/trainer/config_us1.json new file mode 100644 index 000000000..855ccd948 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/trainer/config_us1.json @@ -0,0 +1,78 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa743", + "backend": "p2p", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from mid aggregator to trainer and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default/us/west/org1", + "default/uk/london/org2" + ] + }, + "name": "param-channel", + "pair": [ + "middle-aggregator", + "trainer" + ], + "funcTags": { + "middle-aggregator": [ + "distribute", + "aggregate" + ], + "trainer": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 1 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "http://flame-mlflow:5000" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/us/west/org1/machine1", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_hier_mnist/trainer/config_us2.json b/lib/python/flame/examples/async_hier_mnist/trainer/config_us2.json new file mode 100644 index 000000000..4a42bc27d --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/trainer/config_us2.json @@ -0,0 +1,78 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa744", + "backend": "p2p", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from mid aggregator to trainer and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default/us/west/org1", + "default/uk/london/org2" + ] + }, + "name": "param-channel", + "pair": [ + "middle-aggregator", + "trainer" + ], + "funcTags": { + "middle-aggregator": [ + "distribute", + "aggregate" + ], + "trainer": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 1 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "http://flame-mlflow:5000" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/us/west/org1/machine2", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_hier_mnist/trainer/main.py b/lib/python/flame/examples/async_hier_mnist/trainer/main.py new file mode 100644 index 000000000..1b2a539b7 --- /dev/null +++ b/lib/python/flame/examples/async_hier_mnist/trainer/main.py @@ -0,0 +1,140 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""HIRE_MNIST horizontal hierarchical FL trainer for Keras.""" + +import logging +from random import randrange +from statistics import mean + +import numpy as np +from flame.config import Config +from flame.mode.horizontal.trainer import Trainer +from tensorflow import keras +from tensorflow.keras import layers + +logger = logging.getLogger(__name__) + + +class KerasMnistTrainer(Trainer): + """Keras Mnist Trainer.""" + + def __init__(self, config: Config) -> None: + """Initialize a class instance.""" + self.config = config + self.dataset_size = 0 + + self.num_classes = 10 + self.input_shape = (28, 28, 1) + + self.model = None + self._x_train = None + self._y_train = None + self._x_test = None + self._y_test = None + + self.epochs = self.config.hyperparameters['epochs'] + self.batch_size = 128 + if 'batchSize' in self.config.hyperparameters: + self.batch_size = self.config.hyperparameters['batchSize'] + + def initialize(self) -> None: + """Initialize role.""" + model = keras.Sequential([ + keras.Input(shape=self.input_shape), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(self.num_classes, activation="softmax"), + ]) + + model.compile(loss="categorical_crossentropy", + optimizer="adam", + metrics=["accuracy"]) + + self.model = model + + def load_data(self) -> None: + """Load data.""" + # the data, split between train and test sets + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + + split_n = 10 + index = randrange(split_n) + # reduce train sample size to reduce the runtime + x_train = np.split(x_train, split_n)[index] + y_train = np.split(y_train, split_n)[index] + x_test = np.split(x_test, split_n)[index] + y_test = np.split(y_test, split_n)[index] + + # Scale images to the [0, 1] range + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + # Make sure images have shape (28, 28, 1) + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + + # convert class vectors to binary class matrices + y_train = keras.utils.to_categorical(y_train, self.num_classes) + y_test = keras.utils.to_categorical(y_test, self.num_classes) + + self._x_train = x_train + self._y_train = y_train + self._x_test = x_test + self._y_test = y_test + + def train(self) -> None: + """Train a model.""" + history = self.model.fit(self._x_train, + self._y_train, + batch_size=self.batch_size, + epochs=self.epochs, + validation_split=0.1) + + # save dataset size so that the info can be shared with aggregator + self.dataset_size = len(self._x_train) + + loss = mean(history.history['loss']) + accuracy = mean(history.history['accuracy']) + self.update_metrics({'loss': loss, 'accuracy': accuracy}) + + def evaluate(self) -> None: + """Evaluate a model.""" + score = self.model.evaluate(self._x_test, self._y_test, verbose=0) + + logger.info(f"Test loss: {score[0]}") + logger.info(f"Test accuracy: {score[1]}") + + # update metrics after each evaluation so that the metrics can be + # logged in a model registry. + self.update_metrics({'test-loss': score[0], 'test-accuracy': score[1]}) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + t = KerasMnistTrainer(config) + t.compose() + t.run() diff --git a/lib/python/flame/examples/async_mnist/__init__.py b/lib/python/flame/examples/async_mnist/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/examples/async_mnist/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/examples/async_mnist/aggregator/__init__.py b/lib/python/flame/examples/async_mnist/aggregator/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/examples/async_mnist/aggregator/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/examples/async_mnist/aggregator/config.json b/lib/python/flame/examples/async_mnist/aggregator/config.json new file mode 100644 index 000000000..a8646f70c --- /dev/null +++ b/lib/python/flame/examples/async_mnist/aggregator/config.json @@ -0,0 +1,73 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", + "backend": "p2p", + "brokers": [ + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": [ + "distribute", + "aggregate" + ], + "trainer": [ + "fetch", + "upload" + ] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 10, + "//": "aggGoal is aggregation goal for fedbuff", + "aggGoal": 2 + }, + "baseModel": { + "name": "", + "version": 2 + }, + "job": { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency level", + "c": 4 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default", + "role": "aggregator" +} diff --git a/lib/python/flame/examples/async_mnist/aggregator/main.py b/lib/python/flame/examples/async_mnist/aggregator/main.py new file mode 100644 index 000000000..6f75e2eb0 --- /dev/null +++ b/lib/python/flame/examples/async_mnist/aggregator/main.py @@ -0,0 +1,89 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""MNIST asynchronous horizontal FL aggregator for Keras.""" + +import logging + +from flame.config import Config +from flame.dataset import Dataset +from flame.mode.horizontal.asyncfl.top_aggregator import TopAggregator +from tensorflow import keras +from tensorflow.keras import layers + +logger = logging.getLogger(__name__) + + +class KerasMnistAggregator(TopAggregator): + """Keras Mnist Aggregator.""" + + def __init__(self, config: Config) -> None: + """Initialize a class instance.""" + self.config = config + self.model = None + + self.dataset: Dataset = None + + self.num_classes = 10 + self.input_shape = (28, 28, 1) + + def initialize(self): + """Initialize role.""" + model = keras.Sequential([ + keras.Input(shape=self.input_shape), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(self.num_classes, activation="softmax"), + ]) + + model.compile(loss="categorical_crossentropy", + optimizer="adam", + metrics=["accuracy"]) + + self.model = model + + def load_data(self) -> None: + """Load a test dataset.""" + # Implement this if loading data is needed in aggregator + pass + + def train(self) -> None: + """Train a model.""" + # Implement this if training is needed in aggregator + pass + + def evaluate(self) -> None: + """Evaluate (test) a model.""" + # Implement this if testing is needed in aggregator + pass + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + a = KerasMnistAggregator(config) + a.compose() + a.run() diff --git a/lib/python/flame/examples/async_mnist/trainer/__init__.py b/lib/python/flame/examples/async_mnist/trainer/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/examples/async_mnist/trainer/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/examples/async_mnist/trainer/config1.json b/lib/python/flame/examples/async_mnist/trainer/config1.json new file mode 100644 index 000000000..4766ce89e --- /dev/null +++ b/lib/python/flame/examples/async_mnist/trainer/config1.json @@ -0,0 +1,71 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", + "backend": "p2p", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is irrelevant since it's a trainer", + "aggGoal": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency (this irrelevant since it's traner)", + "c": 4 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/us/west", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_mnist/trainer/config2.json b/lib/python/flame/examples/async_mnist/trainer/config2.json new file mode 100644 index 000000000..e97b28b92 --- /dev/null +++ b/lib/python/flame/examples/async_mnist/trainer/config2.json @@ -0,0 +1,71 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580371", + "backend": "p2p", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is irrelevant since it's a trainer", + "aggGoal": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency (this irrelevant since it's traner)", + "c": 4 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/us/west", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_mnist/trainer/config3.json b/lib/python/flame/examples/async_mnist/trainer/config3.json new file mode 100644 index 000000000..f87e0e2be --- /dev/null +++ b/lib/python/flame/examples/async_mnist/trainer/config3.json @@ -0,0 +1,71 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580372", + "backend": "p2p", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is irrelevant since it's a trainer", + "aggGoal": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency (this irrelevant since it's traner)", + "c": 4 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/us/west", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_mnist/trainer/config4.json b/lib/python/flame/examples/async_mnist/trainer/config4.json new file mode 100644 index 000000000..1374553ca --- /dev/null +++ b/lib/python/flame/examples/async_mnist/trainer/config4.json @@ -0,0 +1,71 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580373", + "backend": "p2p", + "brokers": [ + { + "host": "broker.hivemq.com", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 5, + "//": "aggGoal is irrelevant since it's a trainer", + "aggGoal": 2 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mnist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "fedbuff", + "kwargs": { + "//": "c: concurrency (this irrelevant since it's traner)", + "c": 4 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + }, + "maxRunTime": 300, + "realm": "default/us/west", + "role": "trainer" +} diff --git a/lib/python/flame/examples/async_mnist/trainer/main.py b/lib/python/flame/examples/async_mnist/trainer/main.py new file mode 100644 index 000000000..0f2cbc08a --- /dev/null +++ b/lib/python/flame/examples/async_mnist/trainer/main.py @@ -0,0 +1,140 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""MNIST horizontal FL trainer for Keras.""" + +import logging +from random import randrange +from statistics import mean + +import numpy as np +from flame.config import Config +from flame.mode.horizontal.trainer import Trainer +from tensorflow import keras +from tensorflow.keras import layers + +logger = logging.getLogger(__name__) + + +class KerasMnistTrainer(Trainer): + """Keras Mnist Trainer.""" + + def __init__(self, config: Config) -> None: + """Initialize a class instance.""" + self.config = config + self.dataset_size = 0 + + self.num_classes = 10 + self.input_shape = (28, 28, 1) + + self.model = None + self._x_train = None + self._y_train = None + self._x_test = None + self._y_test = None + + self.epochs = self.config.hyperparameters['epochs'] + self.batch_size = 128 + if 'batchSize' in self.config.hyperparameters: + self.batch_size = self.config.hyperparameters['batchSize'] + + def initialize(self) -> None: + """Initialize role.""" + model = keras.Sequential([ + keras.Input(shape=self.input_shape), + layers.Conv2D(32, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Conv2D(64, kernel_size=(3, 3), activation="relu"), + layers.MaxPooling2D(pool_size=(2, 2)), + layers.Flatten(), + layers.Dropout(0.5), + layers.Dense(self.num_classes, activation="softmax"), + ]) + + model.compile(loss="categorical_crossentropy", + optimizer="adam", + metrics=["accuracy"]) + + self.model = model + + def load_data(self) -> None: + """Load data.""" + # the data, split between train and test sets + (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data() + + split_n = 10 + index = randrange(split_n) + # reduce train sample size to reduce the runtime + x_train = np.split(x_train, split_n)[index] + y_train = np.split(y_train, split_n)[index] + x_test = np.split(x_test, split_n)[index] + y_test = np.split(y_test, split_n)[index] + + # Scale images to the [0, 1] range + x_train = x_train.astype("float32") / 255 + x_test = x_test.astype("float32") / 255 + # Make sure images have shape (28, 28, 1) + x_train = np.expand_dims(x_train, -1) + x_test = np.expand_dims(x_test, -1) + + # convert class vectors to binary class matrices + y_train = keras.utils.to_categorical(y_train, self.num_classes) + y_test = keras.utils.to_categorical(y_test, self.num_classes) + + self._x_train = x_train + self._y_train = y_train + self._x_test = x_test + self._y_test = y_test + + def train(self) -> None: + """Train a model.""" + history = self.model.fit(self._x_train, + self._y_train, + batch_size=self.batch_size, + epochs=self.epochs, + validation_split=0.1) + + # save dataset size so that the info can be shared with aggregator + self.dataset_size = len(self._x_train) + + loss = mean(history.history['loss']) + accuracy = mean(history.history['accuracy']) + self.update_metrics({'loss': loss, 'accuracy': accuracy}) + + def evaluate(self) -> None: + """Evaluate a model.""" + score = self.model.evaluate(self._x_test, self._y_test, verbose=0) + + logger.info(f"Test loss: {score[0]}") + logger.info(f"Test accuracy: {score[1]}") + + # update metrics after each evaluation so that the metrics can be + # logged in a model registry. + self.update_metrics({'test-loss': score[0], 'test-accuracy': score[1]}) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + t = KerasMnistTrainer(config) + t.compose() + t.run() diff --git a/lib/python/flame/mode/composer.py b/lib/python/flame/mode/composer.py index 99fdb519a..298c252b5 100644 --- a/lib/python/flame/mode/composer.py +++ b/lib/python/flame/mode/composer.py @@ -26,7 +26,6 @@ class Composer(object): """Composer enables composition of tasklets.""" - # def __init__(self) -> None: """Initialize the class.""" # maintain tasklet chains @@ -133,6 +132,32 @@ def run(self) -> None: visited.add(child) q.put(child) + logger.debug("end of run") + + def print(self): + """Print the chain of tasklets. + + This function is for debugging. + """ + tasklet = next(iter(self.chain)) + # get the first tasklet in the chain + root = tasklet.get_root() + + # traverse tasklets and print tasklet details + q = Queue() + q.put(root) + while not q.empty(): + tasklet = q.get() + + print("-----") + print(tasklet) + + # put unvisited children of a selected tasklet + for child in self.chain[tasklet]: + q.put(child) + print("=====") + print("done with printing chain") + class ComposerContext(object): """ComposerContext maintains a context of composer.""" diff --git a/lib/python/flame/mode/horizontal/asyncfl/__init__.py b/lib/python/flame/mode/horizontal/asyncfl/__init__.py new file mode 100644 index 000000000..00b0536f7 --- /dev/null +++ b/lib/python/flame/mode/horizontal/asyncfl/__init__.py @@ -0,0 +1,15 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 diff --git a/lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py b/lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py new file mode 100644 index 000000000..19737ec7d --- /dev/null +++ b/lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py @@ -0,0 +1,249 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""Asynchronous honrizontal FL middle level aggregator.""" + +import logging +import time + +from ....channel import VAL_CH_STATE_RECV, VAL_CH_STATE_SEND +from ....optimizer.train_result import TrainResult +from ...composer import Composer +from ...message import MessageType +from ...tasklet import Loop, Tasklet +from ..middle_aggregator import (TAG_AGGREGATE, TAG_DISTRIBUTE, TAG_FETCH, + TAG_UPLOAD) +from ..middle_aggregator import MiddleAggregator as SyncMidAgg + +logger = logging.getLogger(__name__) + +# 60 second wait time until a trainer appears in a channel +WAIT_TIME_FOR_TRAINER = 60 + + +class MiddleAggregator(SyncMidAgg): + """Asynchronous middle level aggregator. + + It acts as a proxy between top level aggregator and trainer. + """ + + def internal_init(self) -> None: + """Initialize internal state for role.""" + super().internal_init() + + self._agg_goal_cnt = 0 + self._agg_goal_weights = None + self._agg_goal = 0 + if 'aggGoal' in self.config.hyperparameters: + self._agg_goal = self.config.hyperparameters['aggGoal'] + + def _reset_agg_goal_variables(self): + logger.debug("reset agg goal variables") + # reset agg goal count + self._agg_goal_cnt = 0 + + # reset agg goal weights + self._agg_goal_weights = None + + def _fetch_weights(self, tag: str) -> None: + logger.debug("calling _fetch_weights") + channel = self.cm.get_by_tag(tag) + if not channel: + logger.debug(f"channel not found with tag {tag}") + return + + # this call waits for at least one peer to join this channel + channel.await_join() + + # one aggregator is sufficient + end = channel.one_end(VAL_CH_STATE_RECV) + msg = channel.recv(end) + + if MessageType.WEIGHTS in msg: + self.weights = msg[MessageType.WEIGHTS] + + if MessageType.EOT in msg: + self._work_done = msg[MessageType.EOT] + + if MessageType.ROUND in msg: + self._round = msg[MessageType.ROUND] + + def _distribute_weights(self, tag: str) -> None: + channel = self.cm.get_by_tag(tag) + if not channel: + logger.debug(f"channel not found for tag {tag}") + return + + # this call waits for at least one peer to join this channel + self.trainer_no_show = channel.await_join(WAIT_TIME_FOR_TRAINER) + if self.trainer_no_show: + logger.debug("channel await join timeouted") + # send dummy weights to unblock top aggregator + self._send_dummy_weights(TAG_UPLOAD) + return + + for end in channel.ends(VAL_CH_STATE_SEND): + logger.debug(f"sending weights to {end}") + channel.send( + end, { + MessageType.WEIGHTS: self.weights, + MessageType.ROUND: self._round, + MessageType.MODEL_VERSION: self._round + }) + + def _aggregate_weights(self, tag: str) -> None: + """Aggregate local model weights asynchronously. + + This method is overriden from one in synchronous middle aggregator + (..middle_aggregator). + """ + channel = self.cm.get_by_tag(tag) + if not channel: + return + + if self._agg_goal_weights is None: + logger.debug(f"type of weights: {type(self.weights)}") + self._agg_goal_weights = self.weights.copy() + + # receive local model parameters from a trainer who arrives first + end, msg = next(channel.recv_fifo(channel.ends(VAL_CH_STATE_RECV), 1)) + if not msg: + logger.debug(f"No data from {end}; skipping it") + return + + logger.debug(f"received data from {end}") + + if MessageType.WEIGHTS in msg: + weights = msg[MessageType.WEIGHTS] + + if MessageType.DATASET_SIZE in msg: + count = msg[MessageType.DATASET_SIZE] + + if MessageType.MODEL_VERSION in msg: + version = msg[MessageType.MODEL_VERSION] + + logger.debug(f"{end}'s parameters trained with {count} samples") + + if weights is not None and count > 0: + tres = TrainResult(weights, count, version) + # save training result from trainer in a disk cache + self.cache[end] = tres + + self._agg_goal_weights = self.optimizer.do( + self.cache, + base_weights=self._agg_goal_weights, + total=count, + version=self._round) + # increment agg goal count + self._agg_goal_cnt += 1 + + if self._agg_goal_cnt < self._agg_goal: + # didn't reach the aggregation goal; return + logger.debug("didn't reach agg goal") + logger.debug( + f" current: {self._agg_goal_cnt}; agg goal: {self._agg_goal}") + return + + if self._agg_goal_weights is None: + logger.debug("failed model aggregation") + time.sleep(1) + return + + # set global weights + self.weights = self._agg_goal_weights + + self.dataset_size = count + + def _send_weights(self, tag: str) -> None: + logger.debug("calling _send_weights") + channel = self.cm.get_by_tag(tag) + if not channel: + logger.debug(f"channel not found with {tag}") + return + + # this call waits for at least one peer to join this channel + channel.await_join() + + # one aggregator is sufficient + end = channel.one_end(VAL_CH_STATE_SEND) + channel.send( + end, { + MessageType.WEIGHTS: self.weights, + MessageType.DATASET_SIZE: self.dataset_size, + MessageType.MODEL_VERSION: self._round + }) + logger.debug("sending weights done") + + def _send_dummy_weights(self, tag: str) -> None: + channel = self.cm.get_by_tag(tag) + if not channel: + logger.debug(f"channel not found with {tag}") + return + + # this call waits for at least one peer to join this channel + channel.await_join() + + # one aggregator is sufficient + end = channel.one_end(VAL_CH_STATE_SEND) + + dummy_msg = {MessageType.WEIGHTS: None, MessageType.DATASET_SIZE: 0} + channel.send(end, dummy_msg) + logger.debug("sending dummy weights done") + + def compose(self) -> None: + """Compose role with tasklets.""" + with Composer() as composer: + self.composer = composer + + task_internal_init = Tasklet(self.internal_init) + + task_init = Tasklet(self.initialize) + + task_load_data = Tasklet(self.load_data) + + task_reset_agg_goal_vars = Tasklet(self._reset_agg_goal_variables) + + task_put_dist = Tasklet(self.put, TAG_DISTRIBUTE) + task_put_dist.set_continue_fn(cont_fn=lambda: self.trainer_no_show) + + task_put_upload = Tasklet(self.put, TAG_UPLOAD) + + task_get_aggr = Tasklet(self.get, TAG_AGGREGATE) + + task_get_fetch = Tasklet(self.get, TAG_FETCH) + + task_eval = Tasklet(self.evaluate) + + task_update_round = Tasklet(self.update_round) + + task_end_of_training = Tasklet(self.inform_end_of_training) + + # create a loop object with loop exit condition function + loop = Loop(loop_check_fn=lambda: self._work_done) + + # create a loop object for asyncfl to manage concurrency as well as + # aggregation goal + asyncfl_loop = Loop( + loop_check_fn=lambda: self._agg_goal_cnt == self._agg_goal) + + task_internal_init >> task_load_data >> task_init >> loop( + task_get_fetch >> task_reset_agg_goal_vars >> asyncfl_loop( + task_put_dist >> task_get_aggr) >> task_put_upload >> task_eval + >> task_update_round) >> task_end_of_training + + @classmethod + def get_func_tags(cls) -> list[str]: + """Return a list of function tags defined in the middle level aggregator role.""" + return [TAG_DISTRIBUTE, TAG_AGGREGATE, TAG_FETCH, TAG_UPLOAD] diff --git a/lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py b/lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py new file mode 100644 index 000000000..cfdc65a17 --- /dev/null +++ b/lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py @@ -0,0 +1,205 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""Asynchronous horizontal FL top level aggregator.""" + +import logging +import time + +from ....channel import VAL_CH_STATE_RECV, VAL_CH_STATE_SEND +from ....optimizer.train_result import TrainResult +from ...composer import Composer +from ...message import MessageType +from ...tasklet import Loop, Tasklet +from ..top_aggregator import TAG_AGGREGATE, TAG_DISTRIBUTE +from ..top_aggregator import TopAggregator as SyncTopAgg + +logger = logging.getLogger(__name__) + + +class TopAggregator(SyncTopAgg): + """Asynchronous top level Aggregator implements an ML aggregation role.""" + + def internal_init(self) -> None: + """Initialize internal state for role.""" + super().internal_init() + + self._agg_goal_cnt = 0 + self._agg_goal_weights = None + self._agg_goal = 0 + if 'aggGoal' in self.config.hyperparameters: + self._agg_goal = self.config.hyperparameters['aggGoal'] + + def _reset_agg_goal_variables(self): + logger.debug("reset agg goal variables") + # reset agg goal count + self._agg_goal_cnt = 0 + + # reset agg goal weights + self._agg_goal_weights = None + + def _aggregate_weights(self, tag: str) -> None: + """Aggregate local model weights asynchronously. + + This method is overriden from one in synchronous top aggregator + (..top_aggregator). + """ + channel = self.cm.get_by_tag(tag) + if not channel: + return + + if self._agg_goal_weights is None: + logger.debug(f"type of weights: {type(self.weights)}") + self._agg_goal_weights = self.weights.copy() + + # receive local model parameters from a trainer who arrives first + end, msg = next(channel.recv_fifo(channel.ends(VAL_CH_STATE_RECV), 1)) + if not msg: + logger.debug(f"No data from {end}; skipping it") + return + + logger.debug(f"received data from {end}") + + if MessageType.WEIGHTS in msg: + # TODO: client should send delta instead of whole weights; + # in the current implementation without detla transmission, + # fedbuff algorithm's loss function diverages. + # This needs code refactoring optimizer as well as + # trainer code across all different mode, which involves + # extensive testing of other code. + # The whole change should be done separately to avoid + # too many changes. + weights = msg[MessageType.WEIGHTS] + + if MessageType.DATASET_SIZE in msg: + count = msg[MessageType.DATASET_SIZE] + + if MessageType.MODEL_VERSION in msg: + version = msg[MessageType.MODEL_VERSION] + + logger.debug(f"{end}'s parameters trained with {count} samples") + + if weights is not None and count > 0: + tres = TrainResult(weights, count, version) + # save training result from trainer in a disk cache + self.cache[end] = tres + + self._agg_goal_weights = self.optimizer.do( + self.cache, + base_weights=self._agg_goal_weights, + total=count, + version=self._round) + # increment agg goal count + self._agg_goal_cnt += 1 + + if self._agg_goal_cnt < self._agg_goal: + # didn't reach the aggregation goal; return + logger.debug("didn't reach agg goal") + logger.debug( + f" current: {self._agg_goal_cnt}; agg goal: {self._agg_goal}") + return + + if self._agg_goal_weights is None: + logger.debug("failed model aggregation") + time.sleep(1) + return + + # set global weights + self.weights = self._agg_goal_weights + + # update model with global weights + self._update_model() + + logger.debug(f"aggregation finished for round {self._round}") + + def _distribute_weights(self, tag: str) -> None: + """Distributed a global model in asynchronous FL fashion. + + This method is overriden from one in synchronous top aggregator + (..top_aggregator). + """ + channel = self.cm.get_by_tag(tag) + if not channel: + logger.debug(f"channel not found for tag {tag}") + return + + # this call waits for at least one peer to join this channel + channel.await_join() + + # before distributing weights, update it from global model + self._update_weights() + + # send out global model parameters to trainers + for end in channel.ends(VAL_CH_STATE_SEND): + logger.debug(f"sending weights to {end}") + # we use _round to indicate a model version + channel.send( + end, { + MessageType.WEIGHTS: self.weights, + MessageType.ROUND: self._round, + MessageType.MODEL_VERSION: self._round + }) + + def compose(self) -> None: + """Compose role with tasklets.""" + with Composer() as composer: + self.composer = composer + + task_internal_init = Tasklet(self.internal_init) + + task_init = Tasklet(self.initialize) + + task_load_data = Tasklet(self.load_data) + + task_reset_agg_goal_vars = Tasklet(self._reset_agg_goal_variables) + + task_put = Tasklet(self.put, TAG_DISTRIBUTE) + + task_get = Tasklet(self.get, TAG_AGGREGATE) + + task_train = Tasklet(self.train) + + task_eval = Tasklet(self.evaluate) + + task_analysis = Tasklet(self.run_analysis) + + task_save_metrics = Tasklet(self.save_metrics) + + task_increment_round = Tasklet(self.increment_round) + + task_end_of_training = Tasklet(self.inform_end_of_training) + + task_save_params = Tasklet(self.save_params) + + task_save_model = Tasklet(self.save_model) + + # create a loop object with loop exit condition function + loop = Loop(loop_check_fn=lambda: self._work_done) + + # create a loop object for asyncfl to manage concurrency as well as + # aggregation goal + asyncfl_loop = Loop( + loop_check_fn=lambda: self._agg_goal_cnt == self._agg_goal) + + task_internal_init >> task_load_data >> task_init >> loop( + task_reset_agg_goal_vars >> asyncfl_loop( + task_put >> task_get) >> task_train >> task_eval >> + task_analysis >> task_save_metrics >> task_increment_round + ) >> task_end_of_training >> task_save_params >> task_save_model + + @classmethod + def get_func_tags(cls) -> list[str]: + """Return a list of function tags defined in the top level aggregator role.""" + return [TAG_DISTRIBUTE, TAG_AGGREGATE] diff --git a/lib/python/flame/mode/horizontal/middle_aggregator.py b/lib/python/flame/mode/horizontal/middle_aggregator.py index 6c6af7cf6..fc7367cd1 100644 --- a/lib/python/flame/mode/horizontal/middle_aggregator.py +++ b/lib/python/flame/mode/horizontal/middle_aggregator.py @@ -69,6 +69,9 @@ def internal_init(self) -> None: self.cache = Cache() self.dataset_size = 0 + # save distribute tag in an instance variable + self.dist_tag = TAG_DISTRIBUTE + def get(self, tag: str) -> None: """Get data from remote role(s).""" if tag == TAG_FETCH: @@ -81,7 +84,6 @@ def put(self, tag: str) -> None: if tag == TAG_UPLOAD: self._send_weights(tag) if tag == TAG_DISTRIBUTE: - self.dist_tag = tag self._distribute_weights(tag) def _fetch_weights(self, tag: str) -> None: @@ -155,7 +157,7 @@ def _aggregate_weights(self, tag: str) -> None: self.cache[end] = tres # optimizer conducts optimization (in this case, aggregation) - global_weights = self.optimizer.do(self.cache, total) + global_weights = self.optimizer.do(self.cache, total=total) if global_weights is None: logger.debug("failed model aggregation") time.sleep(1) @@ -215,6 +217,8 @@ def update_round(self): def inform_end_of_training(self) -> None: """Inform all the trainers that the training is finished.""" + logger.debug("inform end of training") + channel = self.cm.get_by_tag(self.dist_tag) if not channel: logger.debug(f"channel not found for tag {self.dist_tag}") diff --git a/lib/python/flame/mode/horizontal/top_aggregator.py b/lib/python/flame/mode/horizontal/top_aggregator.py index 1d2e3d890..367bc03f6 100644 --- a/lib/python/flame/mode/horizontal/top_aggregator.py +++ b/lib/python/flame/mode/horizontal/top_aggregator.py @@ -129,7 +129,7 @@ def _aggregate_weights(self, tag: str) -> None: self.cache[end] = tres # optimizer conducts optimization (in this case, aggregation) - global_weights = self.optimizer.do(self.cache, total) + global_weights = self.optimizer.do(self.cache, total=total) if global_weights is None: logger.debug("failed model aggregation") time.sleep(1) @@ -175,6 +175,7 @@ def inform_end_of_training(self) -> None: return channel.broadcast({MessageType.EOT: self._work_done}) + logger.debug("done broadcasting end-of-training") def run_analysis(self): """Run analysis plugins and update results to metrics.""" @@ -208,6 +209,7 @@ def increment_round(self): logger.debug(f"channel not found for tag {self.dist_tag}") return + logger.debug(f"Incremented round to {self._round}") # set necessary properties to help channel decide how to select ends channel.set_property("round", self._round) diff --git a/lib/python/flame/mode/horizontal/trainer.py b/lib/python/flame/mode/horizontal/trainer.py index 280fa88de..6fd8daf8c 100644 --- a/lib/python/flame/mode/horizontal/trainer.py +++ b/lib/python/flame/mode/horizontal/trainer.py @@ -16,8 +16,8 @@ """horizontal FL trainer.""" import logging -import time +from ...channel import VAL_CH_STATE_RECV, VAL_CH_STATE_SEND from ...channel_manager import ChannelManager from ...common.custom_abcmeta import ABCMeta, abstract_attribute from ...common.util import (MLFramework, get_ml_framework_in_use, @@ -83,7 +83,7 @@ def _fetch_weights(self, tag: str) -> None: channel.await_join() # one aggregator is sufficient - end = channel.one_end() + end = channel.one_end(VAL_CH_STATE_RECV) msg = channel.recv(end) if MessageType.WEIGHTS in msg: @@ -114,13 +114,14 @@ def _send_weights(self, tag: str) -> None: channel.await_join() # one aggregator is sufficient - end = channel.one_end() + end = channel.one_end(VAL_CH_STATE_SEND) self._update_weights() channel.send( end, { MessageType.WEIGHTS: self.weights, - MessageType.DATASET_SIZE: self.dataset_size + MessageType.DATASET_SIZE: self.dataset_size, + MessageType.MODEL_VERSION: self._round }) logger.debug("sending weights done") diff --git a/lib/python/flame/mode/message.py b/lib/python/flame/mode/message.py index e7f79b67b..78afcf48b 100644 --- a/lib/python/flame/mode/message.py +++ b/lib/python/flame/mode/message.py @@ -28,6 +28,11 @@ class MessageType(Enum): # a digest of all the workers in distributed learning MEMBER_DIGEST = 5 - RING_WEIGHTS = 6 # global model weights in distributed learning - NEW_TRAINER = 7 # sending message for the arrival of a new trainer - IS_COMMITTER = 8 # is a trainer responsible to send weights to a new trainer in distributed learning + RING_WEIGHTS = 6 # global model weights in distributed learning + NEW_TRAINER = 7 # sending message for the arrival of a new trainer + + # a variable to indicate that a trainer is responsible to send weights + # to a new trainer joining a distributed learning job + IS_COMMITTER = 8 + + MODEL_VERSION = 9 # model version used; an non-negative integer diff --git a/lib/python/flame/mode/tasklet.py b/lib/python/flame/mode/tasklet.py index c33eb9c79..e5501af99 100644 --- a/lib/python/flame/mode/tasklet.py +++ b/lib/python/flame/mode/tasklet.py @@ -60,6 +60,16 @@ def __init__(self, func: Callable, *args, **kwargs) -> None: self.loop_ender = None self.loop_state = LoopIndicator.NONE + def __str__(self): + """Return tasklet details.""" + starter = self.loop_starter.func.__name__ if self.loop_starter else "" + ender = self.loop_ender.func.__name__ if self.loop_ender else "" + + return f"func: {self.func.__name__}" + \ + f"\nloop_state: {self.loop_state}" + \ + f"\nloop_starter: {starter}" + \ + f"\nloop_ender: {ender}" + def __rshift__(self, other: Tasklet) -> Tasklet: """Set up connection.""" if self not in self.composer.chain: @@ -68,17 +78,17 @@ def __rshift__(self, other: Tasklet) -> Tasklet: if other not in self.composer.chain: self.composer.chain[other] = set() - # case 1: t1 >> loop(t2 >> t3) - # if t1 is self, t3 is other; t3.loop_starter is t2 - if other.loop_starter and other.loop_starter not in self.composer.chain: - self.composer.chain[other.loop_starter] = set() - if self not in self.composer.reverse_chain: self.composer.reverse_chain[self] = set() if other not in self.composer.reverse_chain: self.composer.reverse_chain[other] = set() + # case 1: t1 >> loop(t2 >> t3) + # if t1 is self, t3 is other; t3.loop_starter is t2 + if other.loop_starter and other.loop_starter not in self.composer.chain: + self.composer.chain[other.loop_starter] = set() + # same as case 1 if other.loop_starter and other.loop_starter not in self.composer.reverse_chain: self.composer.reverse_chain[other.loop_starter] = set() @@ -86,13 +96,9 @@ def __rshift__(self, other: Tasklet) -> Tasklet: if other.loop_state & LoopIndicator.END: # same as case 1 self.composer.chain[self].add(other.loop_starter) - else: - self.composer.chain[self].add(other) - - if other.loop_state & LoopIndicator.END: - # same as case 1 self.composer.reverse_chain[other.loop_starter].add(self) else: + self.composer.chain[self].add(other) self.composer.reverse_chain[other].add(self) return other @@ -193,7 +199,7 @@ def __call__(self, ender: Tasklet) -> Tasklet: ------- ender: last tasklet in a loop """ - # composer is univercially shared across tasklets + # composer is universally shared across tasklets # let's get it from ender composer = ender.get_composer() @@ -225,6 +231,12 @@ def __call__(self, ender: Tasklet) -> Tasklet: tasklets_in_loop = composer.get_tasklets_in_loop(starter, ender) # for each tasklet in loop, loop_check_fn and loop_ender are updated for tasklet in tasklets_in_loop: + if tasklet.loop_starter and tasklet.loop_ender: + # if both loop_starter and loop_ender are already set, + # they are set for an inner loop + # so, don't update loop_starter and loop_ender in that case + continue + tasklet.loop_starter = starter tasklet.loop_check_fn = self.loop_check_fn tasklet.loop_ender = ender diff --git a/lib/python/flame/optimizer/abstract.py b/lib/python/flame/optimizer/abstract.py index 7e001ce98..6dabfe128 100644 --- a/lib/python/flame/optimizer/abstract.py +++ b/lib/python/flame/optimizer/abstract.py @@ -13,16 +13,22 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 - - """optimizer abstract class.""" from abc import ABC, abstractmethod +from typing import Union + +from diskcache import Cache class AbstractOptimizer(ABC): """Abstract base class for optimizer implementation.""" @abstractmethod - def do(self) -> None: + def do(self, + cache: Cache, + *, + base_weights=None, + total: int = 0, + version: int = 0) -> Union[list, dict]: """Abstract method to conduct optimization.""" diff --git a/lib/python/flame/optimizer/fedavg.py b/lib/python/flame/optimizer/fedavg.py index e33617e95..d2c078871 100644 --- a/lib/python/flame/optimizer/fedavg.py +++ b/lib/python/flame/optimizer/fedavg.py @@ -15,6 +15,7 @@ # SPDX-License-Identifier: Apache-2.0 """Federated Averaging optimizer.""" import logging +from typing import Union from diskcache import Cache @@ -42,7 +43,12 @@ def __init__(self): "supported ml framework not found; " f"supported frameworks are: {valid_frameworks}") - def do(self, cache: Cache, total: int): + def do(self, + cache: Cache, + *, + base_weights=None, + total: int = 0, + version: int = 0) -> Union[list, dict]: """Do aggregates models of trainers. Return: aggregated model @@ -50,7 +56,7 @@ def do(self, cache: Cache, total: int): logger.debug("calling fedavg") # reset global weights before aggregation - self.agg_weights = None + self.agg_weights = base_weights if len(cache) == 0 or total == 0: return None diff --git a/lib/python/flame/optimizer/fedbuff.py b/lib/python/flame/optimizer/fedbuff.py new file mode 100644 index 000000000..2d0ec86c1 --- /dev/null +++ b/lib/python/flame/optimizer/fedbuff.py @@ -0,0 +1,100 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""FedBuff optimizer. + +The implementation is based on the following paper: +https://arxiv.org/pdf/2106.06639.pdf +https://arxiv.org/pdf/2111.04877.pdf + +SecAgg algorithm is not the scope of this implementation. +""" +import logging +import math +from typing import Union + +from diskcache import Cache + +from ..common.util import (MLFramework, get_ml_framework_in_use, + valid_frameworks) +from .abstract import AbstractOptimizer + +logger = logging.getLogger(__name__) + + +class FedBuff(AbstractOptimizer): + """FedBuff class.""" + + def __init__(self): + """Initialize FedBuff instance.""" + self.agg_weights = None + + ml_framework_in_use = get_ml_framework_in_use() + if ml_framework_in_use == MLFramework.PYTORCH: + self.aggregate_fn = self._aggregate_pytorch + elif ml_framework_in_use == MLFramework.TENSORFLOW: + self.aggregate_fn = self._aggregate_tesnorflow + else: + raise NotImplementedError( + "supported ml framework not found; " + f"supported frameworks are: {valid_frameworks}") + + def do(self, + cache: Cache, + *, + base_weights=None, + total: int = 0, + version: int = 0) -> Union[list, dict]: + """Do aggregates models of trainers. + + Return: aggregated model + """ + logger.debug("calling fedbuff") + + # reset global weights before aggregation + self.agg_weights = base_weights + + if len(cache) == 0 or total == 0: + return None + + for k in list(cache.iterkeys()): + # after popping, the item is removed from the cache + # hence, explicit cache cleanup is not needed + tres = cache.pop(k) + + logger.debug(f"agg ver: {version}, trainer ver: {tres.version}") + # rate determined based on the staleness of local model + rate = 1 / math.sqrt(1 + version - tres.version) + self.aggregate_fn(tres, rate) + + return self.agg_weights + + def _aggregate_pytorch(self, tres, rate): + logger.debug("calling _aggregate_pytorch") + + if self.agg_weights is None: + self.agg_weights = {k: v * rate for k, v in tres.weights.items()} + else: + for k, v in tres.weights.items(): + self.agg_weights[k] += v * rate + + def _aggregate_tesnorflow(self, tres, rate): + logger.debug("calling _aggregate_tensorflow") + + if self.agg_weights is None: + self.agg_weights = [weight * rate for weight in tres.weights] + else: + for idx in range(len(tres.weights)): + self.agg_weights[idx] += tres.weights[idx] * rate diff --git a/lib/python/flame/optimizer/fedopt.py b/lib/python/flame/optimizer/fedopt.py index f08737b63..7156cd109 100644 --- a/lib/python/flame/optimizer/fedopt.py +++ b/lib/python/flame/optimizer/fedopt.py @@ -13,22 +13,23 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 +"""FedOPT optimizer. -"""FedOPT optimizer""" -"""https://arxiv.org/abs/2003.00295""" -from abc import abstractmethod +https://arxiv.org/abs/2003.00295""" import logging +from abc import abstractmethod +from collections import OrderedDict +from typing import Union from diskcache import Cache -from .fedavg import FedAvg from ..common.util import (MLFramework, get_ml_framework_in_use, valid_frameworks) - -from collections import OrderedDict +from .fedavg import FedAvg logger = logging.getLogger(__name__) + class FedOPT(FedAvg): """FedOPT class.""" @@ -54,14 +55,22 @@ def __init__(self, beta_1, beta_2, eta, tau): "supported ml framework not found; " f"supported frameworks are: {valid_frameworks}") - def do(self, cache: Cache, total: int): + def do(self, + cache: Cache, + *, + base_weights=None, + total: int = 0, + version: int = 0) -> Union[list, dict]: """Do aggregates models of trainers. Return: aggregated model """ logger.debug("calling fedopt") - self.agg_weights = super().do(cache, total) + self.agg_weights = super().do(cache, + base_weights=base_weights, + total=total, + version=version) if self.agg_weights is None: return self.current_weights @@ -87,27 +96,51 @@ def _adapt_pytorch(self, average, current): self.d_t = {k: average[k] - current[k] for k in average.keys()} if self.m_t is None: - self.m_t = {k: torch.zeros_like(self.d_t[k]) for k in self.d_t.keys()} - self.m_t = {k: self.beta_1 * self.m_t[k] + (1 - self.beta_1) * self.d_t[k] for k in self.m_t.keys()} + self.m_t = { + k: torch.zeros_like(self.d_t[k]) + for k in self.d_t.keys() + } + self.m_t = { + k: self.beta_1 * self.m_t[k] + (1 - self.beta_1) * self.d_t[k] + for k in self.m_t.keys() + } if self.v_t is None: - self.v_t = {k: torch.zeros_like(self.d_t[k]) for k in self.d_t.keys()} + self.v_t = { + k: torch.zeros_like(self.d_t[k]) + for k in self.d_t.keys() + } self._delta_v_pytorch() - self.current_weights = OrderedDict({k: self.current_weights[k] + self.eta * self.m_t[k] / (torch.sqrt(self.v_t[k]) + self.tau) for k in self.current_weights.keys()}) + self.current_weights = OrderedDict({ + k: self.current_weights[k] + self.eta * self.m_t[k] / + (torch.sqrt(self.v_t[k]) + self.tau) + for k in self.current_weights.keys() + }) def _adapt_tensorflow(self, average, current): import tensorflow as tf logger.debug("calling _adapt_tensorflow") - - self.d_t = [average[idx]-current[idx] for idx in range(len(average))] + + self.d_t = [average[idx] - current[idx] for idx in range(len(average))] if self.m_t is None: - self.m_t = [tf.zeros_like(self.d_t[idx]) for idx in range(len(self.d_t))] - self.m_t = [self.beta_1 * self.m_t[idx] + (1 - self.beta_1) * self.d_t[idx] for idx in range(len(self.m_t))] + self.m_t = [ + tf.zeros_like(self.d_t[idx]) for idx in range(len(self.d_t)) + ] + self.m_t = [ + self.beta_1 * self.m_t[idx] + (1 - self.beta_1) * self.d_t[idx] + for idx in range(len(self.m_t)) + ] if self.v_t is None: - self.v_t = [tf.zeros_like(self.d_t[idx]) for idx in range(len(self.d_t))] + self.v_t = [ + tf.zeros_like(self.d_t[idx]) for idx in range(len(self.d_t)) + ] self._delta_v_tensorflow() - - self.current_weights = [self.current_weights[idx] + self.eta * self.m_t[idx] / (tf.sqrt(self.v_t[idx]) + self.tau) for idx in range(len(self.current_weights))] + + self.current_weights = [ + self.current_weights[idx] + self.eta * self.m_t[idx] / + (tf.sqrt(self.v_t[idx]) + self.tau) + for idx in range(len(self.current_weights)) + ] diff --git a/lib/python/flame/optimizer/train_result.py b/lib/python/flame/optimizer/train_result.py index c099be7f5..7314c74f8 100644 --- a/lib/python/flame/optimizer/train_result.py +++ b/lib/python/flame/optimizer/train_result.py @@ -13,15 +13,14 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 - - -"""A class that contains train result.""" +"""A class that contains train result and its meta data.""" class TrainResult(object): """TrainResult class.""" - def __init__(self, weights=None, count=0): + def __init__(self, weights=None, count=0, version=0): """Initialize.""" self.weights = weights self.count = count + self.version = version diff --git a/lib/python/flame/optimizers.py b/lib/python/flame/optimizers.py index ef1f37fbb..b3e2c4e9f 100644 --- a/lib/python/flame/optimizers.py +++ b/lib/python/flame/optimizers.py @@ -13,14 +13,14 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 - """optimizer provider class.""" from .config import OptimizerType from .object_factory import ObjectFactory -from .optimizer.fedavg import FedAvg from .optimizer.fedadagrad import FedAdaGrad from .optimizer.fedadam import FedAdam +from .optimizer.fedavg import FedAvg +from .optimizer.fedbuff import FedBuff from .optimizer.fedyogi import FedYogi @@ -37,3 +37,4 @@ def get(self, optimizer_name, **kwargs): optimizer_provider.register(OptimizerType.FEDADAGRAD, FedAdaGrad) optimizer_provider.register(OptimizerType.FEDADAM, FedAdam) optimizer_provider.register(OptimizerType.FEDYOGI, FedYogi) +optimizer_provider.register(OptimizerType.FEDBUFF, FedBuff) diff --git a/lib/python/flame/selector/__init__.py b/lib/python/flame/selector/__init__.py index eb384bc88..ba8368bdc 100644 --- a/lib/python/flame/selector/__init__.py +++ b/lib/python/flame/selector/__init__.py @@ -13,7 +13,6 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 - """selector abstract class.""" from abc import ABC, abstractmethod @@ -32,7 +31,7 @@ def __init__(self, **kwargs) -> None: """Initialize an instance with keyword-based arguments.""" for key, value in kwargs.items(): setattr(self, key, value) - self.selected_ends = list() + self.selected_ends = set() @abstractmethod def select(self, ends: dict[str, End], diff --git a/lib/python/flame/selector/default.py b/lib/python/flame/selector/default.py index 0ca27bbc2..e462f4d6d 100644 --- a/lib/python/flame/selector/default.py +++ b/lib/python/flame/selector/default.py @@ -41,7 +41,7 @@ def select(self, ends: dict[str, End], if len(self.selected_ends) == 0 or round > self.round: logger.debug(f"let's select the whole ends for new round {round}") - self.selected_ends = list(ends.keys()) + self.selected_ends = set(ends.keys()) self.round = round logger.debug(f"selected ends: {self.selected_ends}") diff --git a/lib/python/flame/selector/fedbuff.py b/lib/python/flame/selector/fedbuff.py new file mode 100644 index 000000000..d3f42d399 --- /dev/null +++ b/lib/python/flame/selector/fedbuff.py @@ -0,0 +1,139 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""FedBuffSelector class.""" + +import logging +import random + +from ..channel import KEY_CH_STATE, VAL_CH_STATE_RECV, VAL_CH_STATE_SEND +from ..common.typing import Scalar +from ..end import KEY_END_STATE, VAL_END_STATE_NONE, VAL_END_STATE_RECVD, End +from . import AbstractSelector, SelectorReturnType + +logger = logging.getLogger(__name__) + + +class FedBuffSelector(AbstractSelector): + """A selector class for fedbuff-based asyncfl.""" + + def __init__(self, **kwargs): + """Initailize instance.""" + super().__init__(**kwargs) + + try: + self.c = kwargs['c'] + except KeyError: + raise KeyError("c (concurrency level) is not specified in config") + + self.round = 0 + + def select(self, ends: dict[str, End], + channel_props: dict[str, Scalar]) -> SelectorReturnType: + """Select ends from the given ends to meet concurrency level. + + This select method chooses ends differently depending on what state + a channel is in. + In 'send' state, it chooses ends that are not in self.selected_ends. + In 'recv' state, it chooses all ends from self.selected_ends. + Essentially, if an end is in self.selected_ends, it means that we sent + some message already to that end. For such an end, we exclude it from + send and include it for recv in return. + """ + logger.debug("calling fedbuff select") + logger.debug(f"len(ends): {len(ends)}, c: {self.c}") + + concurrency = min(len(ends), self.c) + if concurrency == 0: + logger.debug("ends is empty") + return {} + + self.round = channel_props['round'] if 'round' in channel_props else 0 + + if KEY_CH_STATE not in channel_props: + raise KeyError("channel property doesn't have {KEY_CH_STATE}") + + self._cleanup_recvd_ends(ends) + results = {} + if channel_props[KEY_CH_STATE] == VAL_CH_STATE_SEND: + results = self._handle_send_state(ends, concurrency) + + elif channel_props[KEY_CH_STATE] == VAL_CH_STATE_RECV: + results = self._handle_recv_state(ends, concurrency) + + else: + state = channel_props[KEY_CH_STATE] + raise ValueError(f"unkown channel state: {state}") + + logger.debug(f"selected ends: {self.selected_ends}") + logger.debug(f"results: {results}") + + return results + + def _cleanup_recvd_ends(self, ends: dict[str, End]): + """Clean up ends whose a message was received, from selected ends.""" + logger.debug("clean up recvd ends") + logger.debug(f"ends: {ends}, selected ends: {self.selected_ends}") + for end_id in list(self.selected_ends): + if end_id not in ends: + # something happened to end of end_id + # (e.g., connection loss) + # let's remove it from selected_ends + logger.debug(f"no end id {end_id} in ends") + self.selected_ends.remove(end_id) + else: + state = ends[end_id].get_property(KEY_END_STATE) + logger.debug(f"end id {end_id} state: {state}") + if state == VAL_END_STATE_RECVD: + ends[end_id].set_property(KEY_END_STATE, + VAL_END_STATE_NONE) + self.selected_ends.remove(end_id) + + def _handle_send_state(self, ends: dict[str, End], + concurrency: int) -> SelectorReturnType: + extra = max(0, concurrency - len(self.selected_ends)) + logger.debug(f"c: {concurrency}, ends: {ends.keys()}") + candidates = [] + idx = 0 + # reservoir sampling + for end_id in ends.keys(): + if end_id in self.selected_ends: + # skip if an end is already selected + continue + + idx += 1 + if len(candidates) < extra: + candidates.append(end_id) + continue + + i = random.randrange(idx) + if i < extra: + candidates[i] = end_id + + logger.debug(f"candidates: {candidates}") + # add candidates to selected ends + self.selected_ends = set(list(self.selected_ends) + candidates) + + return {end_id: None for end_id in candidates} + + def _handle_recv_state(self, ends: dict[str, End], + concurrency: int) -> SelectorReturnType: + if len(self.selected_ends) == 0: + logger.debug(f"let's select {concurrency} ends") + self.selected_ends = set(random.sample(list(ends), concurrency)) + + logger.debug(f"selected ends: {self.selected_ends}") + + return {key: None for key in self.selected_ends} diff --git a/lib/python/flame/selector/random.py b/lib/python/flame/selector/random.py index f50d7fe8b..2c1cbb9df 100644 --- a/lib/python/flame/selector/random.py +++ b/lib/python/flame/selector/random.py @@ -13,7 +13,6 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 - """RandomSelector class.""" import logging @@ -57,7 +56,7 @@ def select(self, ends: dict[str, End], if len(self.selected_ends) == 0 or round > self.round: logger.debug(f"let's select {k} ends for new round {round}") - self.selected_ends = random.sample(ends.keys(), k) + self.selected_ends = set(random.sample(list(ends), k)) self.round = round logger.debug(f"selected ends: {self.selected_ends}") diff --git a/lib/python/flame/selectors.py b/lib/python/flame/selectors.py index 889a19ba2..fafe66ef5 100644 --- a/lib/python/flame/selectors.py +++ b/lib/python/flame/selectors.py @@ -13,12 +13,12 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 - """selector provider class.""" from .config import SelectorType from .object_factory import ObjectFactory from .selector.default import DefaultSelector +from .selector.fedbuff import FedBuffSelector from .selector.random import RandomSelector @@ -33,3 +33,4 @@ def get(self, selector_name, **kwargs): selector_provider = SelectorProvider() selector_provider.register(SelectorType.DEFAULT, DefaultSelector) selector_provider.register(SelectorType.RANDOM, RandomSelector) +selector_provider.register(SelectorType.FEDBUFF, FedBuffSelector) diff --git a/lib/python/setup.py b/lib/python/setup.py index 17933b91f..b834e86c7 100644 --- a/lib/python/setup.py +++ b/lib/python/setup.py @@ -19,7 +19,7 @@ setup( name='flame', - version='0.0.14', + version='0.0.15', author='Flame Maintainers', author_email='flame-github-owners@cisco.com', include_package_data=True, From 9c5590277a424c849f4d887c96408d394a8665cb Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Fri, 10 Feb 2023 10:02:46 -0800 Subject: [PATCH 05/16] fix+refactor: asyncfl loss divergence (#330) For asyncfl, a client (trainer) should send delta by subtracting local weights from original global weights after training. In the current implementation, the whole local weights were sent to a server (aggregator). This causes loss divergence. Supporting delta update requires refactoring of aggregators of synchronous fl (horizontal/{top_aggregator.py, middle_aggregator.py}) as well as optimizers' do() function. The changes here support delta update universally across all types of modes (horizontal synchronous, asynchronous, and hybrid). --- lib/python/flame/common/typing.py | 4 +- lib/python/flame/common/util.py | 21 +++++++++- .../middle_aggregator/config_uk.json | 14 ++++--- .../middle_aggregator/config_us.json | 14 ++++--- .../hier_mnist/top_aggregator/config.json | 10 +++-- .../hier_mnist/trainer/config_uk.json | 14 ++++--- .../hier_mnist/trainer/config_us.json | 14 ++++--- .../examples/mnist/aggregator/config.json | 4 +- .../flame/examples/mnist/trainer/config.json | 2 +- lib/python/flame/mode/distributed/trainer.py | 19 +++++---- .../horizontal/asyncfl/middle_aggregator.py | 41 +++++++++++++++---- .../mode/horizontal/asyncfl/top_aggregator.py | 20 +++------ .../mode/horizontal/middle_aggregator.py | 5 ++- .../flame/mode/horizontal/top_aggregator.py | 5 ++- lib/python/flame/mode/horizontal/trainer.py | 27 ++++++++---- lib/python/flame/mode/hybrid/trainer.py | 21 ++++++---- lib/python/flame/optimizer/abstract.py | 7 ++-- lib/python/flame/optimizer/fedavg.py | 33 ++++++++------- lib/python/flame/optimizer/fedbuff.py | 33 ++++++++------- lib/python/flame/optimizer/fedopt.py | 21 +++++++--- lib/python/setup.py | 2 +- 21 files changed, 219 insertions(+), 112 deletions(-) diff --git a/lib/python/flame/common/typing.py b/lib/python/flame/common/typing.py index 801ff1e42..c342fae79 100644 --- a/lib/python/flame/common/typing.py +++ b/lib/python/flame/common/typing.py @@ -13,7 +13,6 @@ # limitations under the License. # # SPDX-License-Identifier: Apache-2.0 - """Definitions on Types.""" from typing import Union @@ -21,3 +20,6 @@ Scalar = Union[bool, bytes, float, int, str] Metrics = dict[str, Scalar] + +# list for tensorflow, dict for pytorach +ModelWeights = Union[list, dict] diff --git a/lib/python/flame/common/util.py b/lib/python/flame/common/util.py index ec759d3c4..9c07869d2 100644 --- a/lib/python/flame/common/util.py +++ b/lib/python/flame/common/util.py @@ -21,11 +21,12 @@ from contextlib import contextmanager from enum import Enum from threading import Thread -from typing import List +from typing import List, Union from pip._internal.cli.main import main as pipmain from ..config import Config +from .typing import ModelWeights PYTORCH = 'torch' TENSORFLOW = 'tensorflow' @@ -115,3 +116,21 @@ def mlflow_runname(config: Config) -> str: groupby_value = groupby_value + val + "-" return config.role + '-' + groupby_value + config.task_id[:8] + + +def delta_weights_pytorch(a: ModelWeights, + b: ModelWeights) -> Union[ModelWeights, None]: + """Return delta weights for pytorch model weights.""" + if a is None or b is None: + return None + + return {x: a[x] - b[y] for (x, y) in zip(a, b)} + + +def delta_weights_tensorflow(a: ModelWeights, + b: ModelWeights) -> Union[ModelWeights, None]: + """Return delta weights for tensorflow model weights.""" + if a is None or b is None: + return None + + return [x - y for (x, y) in zip(a, b)] diff --git a/lib/python/flame/examples/hier_mnist/middle_aggregator/config_uk.json b/lib/python/flame/examples/hier_mnist/middle_aggregator/config_uk.json index aec3af91e..db3d31e6b 100644 --- a/lib/python/flame/examples/hier_mnist/middle_aggregator/config_uk.json +++ b/lib/python/flame/examples/hier_mnist/middle_aggregator/config_uk.json @@ -1,10 +1,14 @@ { "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa743", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -64,12 +68,12 @@ "name": "mnist" }, "registry": { - "sort": "mlflow", + "sort": "dummy", "uri": "http://flame-mlflow:5000" }, "selector": { - "sort": "random", - "kwargs": {"k": 1} + "sort": "default", + "kwargs": {} }, "maxRunTime": 300, "realm": "default/uk/london/org2/flame", diff --git a/lib/python/flame/examples/hier_mnist/middle_aggregator/config_us.json b/lib/python/flame/examples/hier_mnist/middle_aggregator/config_us.json index 8f4b09575..02e17163e 100644 --- a/lib/python/flame/examples/hier_mnist/middle_aggregator/config_us.json +++ b/lib/python/flame/examples/hier_mnist/middle_aggregator/config_us.json @@ -1,10 +1,14 @@ { "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa744", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -64,12 +68,12 @@ "name": "mnist" }, "registry": { - "sort": "mlflow", + "sort": "dummy", "uri": "http://flame-mlflow:5000" }, "selector": { - "sort": "random", - "kwargs": {"k": 1} + "sort": "default", + "kwargs": {} }, "maxRunTime": 300, "realm": "default/us/west/org1/flame", diff --git a/lib/python/flame/examples/hier_mnist/top_aggregator/config.json b/lib/python/flame/examples/hier_mnist/top_aggregator/config.json index 33889a398..3dcf24010 100644 --- a/lib/python/flame/examples/hier_mnist/top_aggregator/config.json +++ b/lib/python/flame/examples/hier_mnist/top_aggregator/config.json @@ -1,10 +1,14 @@ { "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -45,7 +49,7 @@ "name": "mnist" }, "registry": { - "sort": "mlflow", + "sort": "dummy", "uri": "http://flame-mlflow:5000" }, "selector": { diff --git a/lib/python/flame/examples/hier_mnist/trainer/config_uk.json b/lib/python/flame/examples/hier_mnist/trainer/config_uk.json index c3d74d707..b52b3aa6c 100644 --- a/lib/python/flame/examples/hier_mnist/trainer/config_uk.json +++ b/lib/python/flame/examples/hier_mnist/trainer/config_uk.json @@ -1,10 +1,14 @@ { "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa745", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -46,12 +50,12 @@ "name": "mnist" }, "registry": { - "sort": "mlflow", + "sort": "dummy", "uri": "http://flame-mlflow:5000" }, "selector": { - "sort": "random", - "kwargs": {"k": 1} + "sort": "default", + "kwargs": {} }, "maxRunTime": 300, "realm": "default/uk/london/org2/machine1", diff --git a/lib/python/flame/examples/hier_mnist/trainer/config_us.json b/lib/python/flame/examples/hier_mnist/trainer/config_us.json index 59afad3b2..9fb50e04e 100644 --- a/lib/python/flame/examples/hier_mnist/trainer/config_us.json +++ b/lib/python/flame/examples/hier_mnist/trainer/config_us.json @@ -1,10 +1,14 @@ { "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa746", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -46,12 +50,12 @@ "name": "mnist" }, "registry": { - "sort": "mlflow", + "sort": "dummy", "uri": "http://flame-mlflow:5000" }, "selector": { - "sort": "random", - "kwargs": {"k": 1} + "sort": "default", + "kwargs": {} }, "maxRunTime": 300, "realm": "default/us/west/org1/machine1", diff --git a/lib/python/flame/examples/mnist/aggregator/config.json b/lib/python/flame/examples/mnist/aggregator/config.json index 24d81e9e6..ac8efe28f 100644 --- a/lib/python/flame/examples/mnist/aggregator/config.json +++ b/lib/python/flame/examples/mnist/aggregator/config.json @@ -1,6 +1,6 @@ { "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { "host": "localhost", @@ -38,7 +38,7 @@ "hyperparameters": { "batchSize": 32, "learningRate": 0.01, - "rounds": 5 + "rounds": 20 }, "baseModel": { "name": "", diff --git a/lib/python/flame/examples/mnist/trainer/config.json b/lib/python/flame/examples/mnist/trainer/config.json index cf049e888..c640681cd 100644 --- a/lib/python/flame/examples/mnist/trainer/config.json +++ b/lib/python/flame/examples/mnist/trainer/config.json @@ -1,6 +1,6 @@ { "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { "host": "localhost", diff --git a/lib/python/flame/mode/distributed/trainer.py b/lib/python/flame/mode/distributed/trainer.py index 577207d10..c39c6e3e2 100644 --- a/lib/python/flame/mode/distributed/trainer.py +++ b/lib/python/flame/mode/distributed/trainer.py @@ -18,10 +18,12 @@ # import hashlib import logging from collections import OrderedDict +from copy import deepcopy from ...channel_manager import ChannelManager from ...common.custom_abcmeta import ABCMeta, abstract_attribute -from ...common.util import (MLFramework, get_ml_framework_in_use, +from ...common.util import (MLFramework, delta_weights_pytorch, + delta_weights_tensorflow, get_ml_framework_in_use, mlflow_runname, valid_frameworks) from ...registries import registry_provider from ..composer import Composer @@ -53,7 +55,6 @@ def init_cm(self) -> None: def internal_init(self) -> None: """Initialize internal state for role.""" - self.registry_client = registry_provider.get(self.config.registry.sort) # initialize registry client self.registry_client(self.config.registry.uri, self.config.job.job_id) @@ -61,8 +62,7 @@ def internal_init(self) -> None: base_model = self.config.base_model if base_model and base_model.name != "" and base_model.version > 0: self.model = self.registry_client.load_model( - base_model.name, base_model.version - ) + base_model.name, base_model.version) self.ring_weights = None # latest model weights from ring all-reduce self.registry_client.setup_run(mlflow_runname(self.config)) @@ -80,20 +80,21 @@ def internal_init(self) -> None: if self.framework == MLFramework.UNKNOWN: raise NotImplementedError( "supported ml framework not found; " - f"supported frameworks are: {valid_frameworks}" - ) + f"supported frameworks are: {valid_frameworks}") if self.framework == MLFramework.PYTORCH: self._scale_down_weights_fn = self._scale_down_weights_pytorch self._get_send_chunk_fn = self._get_send_chunk_pytorch self._allreduce_fn = self._allreduce_pytorch self._allgather_fn = self._allgather_pytorch + self._delta_weights_fn = delta_weights_pytorch elif self.framework == MLFramework.TENSORFLOW: self._scale_down_weights_fn = self._scale_down_weights_tensorflow self._get_send_chunk_fn = self._get_send_chunk_tensorflow self._allreduce_fn = self._allreduce_tensorflow self._allgather_fn = self._allgather_tensorflow + self._delta_weights_fn = delta_weights_tensorflow def _ring_allreduce(self, tag: str) -> None: if tag != TAG_RING_ALLREDUCE: @@ -420,6 +421,9 @@ def _update_model(self): def _update_weights(self): """Save weights from model.""" + # save weights before updating it + self.prev_weights = deepcopy(self.weights) + if self.framework == MLFramework.PYTORCH: self.weights = self.model.state_dict() elif self.framework == MLFramework.TENSORFLOW: @@ -484,8 +488,7 @@ def compose(self) -> None: loop = Loop(loop_check_fn=lambda: self._work_done) task_init_cm >> task_internal_init >> task_load_data >> task_init >> loop( task_train >> task_allreduce >> task_eval >> task_save_metrics - >> task_increment_round - ) >> task_save_params >> task_save_model + >> task_increment_round) >> task_save_params >> task_save_model def run(self) -> None: """Run role.""" diff --git a/lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py b/lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py index 19737ec7d..9f4ca7104 100644 --- a/lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py +++ b/lib/python/flame/mode/horizontal/asyncfl/middle_aggregator.py @@ -17,8 +17,12 @@ import logging import time +from copy import deepcopy from ....channel import VAL_CH_STATE_RECV, VAL_CH_STATE_SEND +from ....common.util import (MLFramework, delta_weights_pytorch, + delta_weights_tensorflow, get_ml_framework_in_use, + valid_frameworks) from ....optimizer.train_result import TrainResult from ...composer import Composer from ...message import MessageType @@ -49,6 +53,18 @@ def internal_init(self) -> None: if 'aggGoal' in self.config.hyperparameters: self._agg_goal = self.config.hyperparameters['aggGoal'] + self.framework = get_ml_framework_in_use() + if self.framework == MLFramework.UNKNOWN: + raise NotImplementedError( + "supported ml framework not found; " + f"supported frameworks are: {valid_frameworks}") + + if self.framework == MLFramework.PYTORCH: + self._delta_weights_fn = delta_weights_pytorch + + elif self.framework == MLFramework.TENSORFLOW: + self._delta_weights_fn = delta_weights_tensorflow + def _reset_agg_goal_variables(self): logger.debug("reset agg goal variables") # reset agg goal count @@ -115,7 +131,7 @@ def _aggregate_weights(self, tag: str) -> None: if self._agg_goal_weights is None: logger.debug(f"type of weights: {type(self.weights)}") - self._agg_goal_weights = self.weights.copy() + self._agg_goal_weights = deepcopy(self.weights) # receive local model parameters from a trainer who arrives first end, msg = next(channel.recv_fifo(channel.ends(VAL_CH_STATE_RECV), 1)) @@ -141,11 +157,10 @@ def _aggregate_weights(self, tag: str) -> None: # save training result from trainer in a disk cache self.cache[end] = tres - self._agg_goal_weights = self.optimizer.do( - self.cache, - base_weights=self._agg_goal_weights, - total=count, - version=self._round) + self._agg_goal_weights = self.optimizer.do(self._agg_goal_weights, + self.cache, + total=count, + version=self._round) # increment agg goal count self._agg_goal_cnt += 1 @@ -161,6 +176,9 @@ def _aggregate_weights(self, tag: str) -> None: time.sleep(1) return + # save global weights before updating it + self.prev_weights = self.weights + # set global weights self.weights = self._agg_goal_weights @@ -178,9 +196,12 @@ def _send_weights(self, tag: str) -> None: # one aggregator is sufficient end = channel.one_end(VAL_CH_STATE_SEND) + + delta_weights = self._delta_weights_fn(self.weights, self.prev_weights) + channel.send( end, { - MessageType.WEIGHTS: self.weights, + MessageType.WEIGHTS: delta_weights, MessageType.DATASET_SIZE: self.dataset_size, MessageType.MODEL_VERSION: self._round }) @@ -198,7 +219,11 @@ def _send_dummy_weights(self, tag: str) -> None: # one aggregator is sufficient end = channel.one_end(VAL_CH_STATE_SEND) - dummy_msg = {MessageType.WEIGHTS: None, MessageType.DATASET_SIZE: 0} + dummy_msg = { + MessageType.WEIGHTS: None, + MessageType.DATASET_SIZE: 0, + MessageType.MODEL_VERSION: self._round + } channel.send(end, dummy_msg) logger.debug("sending dummy weights done") diff --git a/lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py b/lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py index cfdc65a17..55f550ad7 100644 --- a/lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py +++ b/lib/python/flame/mode/horizontal/asyncfl/top_aggregator.py @@ -17,6 +17,7 @@ import logging import time +from copy import deepcopy from ....channel import VAL_CH_STATE_RECV, VAL_CH_STATE_SEND from ....optimizer.train_result import TrainResult @@ -62,7 +63,7 @@ def _aggregate_weights(self, tag: str) -> None: if self._agg_goal_weights is None: logger.debug(f"type of weights: {type(self.weights)}") - self._agg_goal_weights = self.weights.copy() + self._agg_goal_weights = deepcopy(self.weights) # receive local model parameters from a trainer who arrives first end, msg = next(channel.recv_fifo(channel.ends(VAL_CH_STATE_RECV), 1)) @@ -73,14 +74,6 @@ def _aggregate_weights(self, tag: str) -> None: logger.debug(f"received data from {end}") if MessageType.WEIGHTS in msg: - # TODO: client should send delta instead of whole weights; - # in the current implementation without detla transmission, - # fedbuff algorithm's loss function diverages. - # This needs code refactoring optimizer as well as - # trainer code across all different mode, which involves - # extensive testing of other code. - # The whole change should be done separately to avoid - # too many changes. weights = msg[MessageType.WEIGHTS] if MessageType.DATASET_SIZE in msg: @@ -96,11 +89,10 @@ def _aggregate_weights(self, tag: str) -> None: # save training result from trainer in a disk cache self.cache[end] = tres - self._agg_goal_weights = self.optimizer.do( - self.cache, - base_weights=self._agg_goal_weights, - total=count, - version=self._round) + self._agg_goal_weights = self.optimizer.do(self._agg_goal_weights, + self.cache, + total=count, + version=self._round) # increment agg goal count self._agg_goal_cnt += 1 diff --git a/lib/python/flame/mode/horizontal/middle_aggregator.py b/lib/python/flame/mode/horizontal/middle_aggregator.py index fc7367cd1..4d669bb1c 100644 --- a/lib/python/flame/mode/horizontal/middle_aggregator.py +++ b/lib/python/flame/mode/horizontal/middle_aggregator.py @@ -17,6 +17,7 @@ import logging import time +from copy import deepcopy from diskcache import Cache @@ -157,7 +158,9 @@ def _aggregate_weights(self, tag: str) -> None: self.cache[end] = tres # optimizer conducts optimization (in this case, aggregation) - global_weights = self.optimizer.do(self.cache, total=total) + global_weights = self.optimizer.do(deepcopy(self.weights), + self.cache, + total=total) if global_weights is None: logger.debug("failed model aggregation") time.sleep(1) diff --git a/lib/python/flame/mode/horizontal/top_aggregator.py b/lib/python/flame/mode/horizontal/top_aggregator.py index 367bc03f6..bce576b5e 100644 --- a/lib/python/flame/mode/horizontal/top_aggregator.py +++ b/lib/python/flame/mode/horizontal/top_aggregator.py @@ -17,6 +17,7 @@ import logging import time +from copy import deepcopy from diskcache import Cache @@ -129,7 +130,9 @@ def _aggregate_weights(self, tag: str) -> None: self.cache[end] = tres # optimizer conducts optimization (in this case, aggregation) - global_weights = self.optimizer.do(self.cache, total=total) + global_weights = self.optimizer.do(deepcopy(self.weights), + self.cache, + total=total) if global_weights is None: logger.debug("failed model aggregation") time.sleep(1) diff --git a/lib/python/flame/mode/horizontal/trainer.py b/lib/python/flame/mode/horizontal/trainer.py index 6fd8daf8c..51bffc5d9 100644 --- a/lib/python/flame/mode/horizontal/trainer.py +++ b/lib/python/flame/mode/horizontal/trainer.py @@ -20,7 +20,8 @@ from ...channel import VAL_CH_STATE_RECV, VAL_CH_STATE_SEND from ...channel_manager import ChannelManager from ...common.custom_abcmeta import ABCMeta, abstract_attribute -from ...common.util import (MLFramework, get_ml_framework_in_use, +from ...common.util import (MLFramework, delta_weights_pytorch, + delta_weights_tensorflow, get_ml_framework_in_use, mlflow_runname, valid_frameworks) from ...registries import registry_provider from ..composer import Composer @@ -67,6 +68,12 @@ def internal_init(self) -> None: "supported ml framework not found; " f"supported frameworks are: {valid_frameworks}") + if self.framework == MLFramework.PYTORCH: + self._delta_weights_fn = delta_weights_pytorch + + elif self.framework == MLFramework.TENSORFLOW: + self._delta_weights_fn = delta_weights_tensorflow + def get(self, tag: str) -> None: """Get data from remote role(s).""" if tag == TAG_FETCH: @@ -117,12 +124,15 @@ def _send_weights(self, tag: str) -> None: end = channel.one_end(VAL_CH_STATE_SEND) self._update_weights() - channel.send( - end, { - MessageType.WEIGHTS: self.weights, - MessageType.DATASET_SIZE: self.dataset_size, - MessageType.MODEL_VERSION: self._round - }) + + delta_weights = self._delta_weights_fn(self.weights, self.prev_weights) + + msg = { + MessageType.WEIGHTS: delta_weights, + MessageType.DATASET_SIZE: self.dataset_size, + MessageType.MODEL_VERSION: self._round + } + channel.send(end, msg) logger.debug("sending weights done") def save_metrics(self): @@ -143,6 +153,9 @@ def _update_model(self): self.model.set_weights(self.weights) def _update_weights(self): + # save weights before updating it + self.prev_weights = self.weights + if self.framework == MLFramework.PYTORCH: self.weights = self.model.state_dict() elif self.framework == MLFramework.TENSORFLOW: diff --git a/lib/python/flame/mode/hybrid/trainer.py b/lib/python/flame/mode/hybrid/trainer.py index 567ff22c0..2d28e0a32 100644 --- a/lib/python/flame/mode/hybrid/trainer.py +++ b/lib/python/flame/mode/hybrid/trainer.py @@ -67,18 +67,20 @@ def _fetch_weights(self, tag: str) -> None: logger.debug(f"channel not found with tag {tag}") return - while channel.empty(): - time.sleep(1) - logger.debug("waiting for channel ends") + # this call waits for at least one peer joins this channel + channel.await_join() # one aggregator is sufficient end = channel.one_end() msg = channel.recv(end) + if MessageType.WEIGHTS in msg: self.weights = msg[MessageType.WEIGHTS] self._update_model() + if MessageType.EOT in msg: self._work_done = msg[MessageType.EOT] + if MessageType.ROUND in msg: self._round = msg[MessageType.ROUND] @@ -99,9 +101,8 @@ def _upload_weights(self, tag: str) -> None: logger.debug(f"channel not found with {tag}") return - while channel.empty(): - time.sleep(1) - logger.debug("waiting for channel ends") + # this call waits for at least one peer to join this channel + channel.await_join() # one aggregator is sufficient end = channel.one_end() @@ -131,7 +132,13 @@ def _upload_weights(self, tag: str) -> None: logger.debug("sending dummy weights") weights, size = None, 0 - msg = {MessageType.WEIGHTS: weights, MessageType.DATASET_SIZE: size} + delta_weights = self._delta_weights_fn(weights, self.prev_weights) + + msg = { + MessageType.WEIGHTS: delta_weights, + MessageType.DATASET_SIZE: size, + MessageType.MODEL_VERSION: self._round + } channel.send(end, msg) def compose(self) -> None: diff --git a/lib/python/flame/optimizer/abstract.py b/lib/python/flame/optimizer/abstract.py index 6dabfe128..4ea3317c6 100644 --- a/lib/python/flame/optimizer/abstract.py +++ b/lib/python/flame/optimizer/abstract.py @@ -16,19 +16,20 @@ """optimizer abstract class.""" from abc import ABC, abstractmethod -from typing import Union from diskcache import Cache +from ..common.typing import ModelWeights + class AbstractOptimizer(ABC): """Abstract base class for optimizer implementation.""" @abstractmethod def do(self, + base_weights: ModelWeights, cache: Cache, *, - base_weights=None, total: int = 0, - version: int = 0) -> Union[list, dict]: + version: int = 0) -> ModelWeights: """Abstract method to conduct optimization.""" diff --git a/lib/python/flame/optimizer/fedavg.py b/lib/python/flame/optimizer/fedavg.py index d2c078871..4ec43acda 100644 --- a/lib/python/flame/optimizer/fedavg.py +++ b/lib/python/flame/optimizer/fedavg.py @@ -15,10 +15,10 @@ # SPDX-License-Identifier: Apache-2.0 """Federated Averaging optimizer.""" import logging -from typing import Union from diskcache import Cache +from ..common.typing import ModelWeights from ..common.util import (MLFramework, get_ml_framework_in_use, valid_frameworks) from .abstract import AbstractOptimizer @@ -44,17 +44,28 @@ def __init__(self): f"supported frameworks are: {valid_frameworks}") def do(self, + base_weights: ModelWeights, cache: Cache, *, - base_weights=None, total: int = 0, - version: int = 0) -> Union[list, dict]: + version: int = 0) -> ModelWeights: """Do aggregates models of trainers. - Return: aggregated model + Parameters + ---------- + base_weights: weights to be used as base + cache: a container that includes a list of weights for aggregation + total: a number of data samples used to train weights in cache + version: a version number of base weights + + Returns + ------- + aggregated model: type is either list (tensorflow) or dict (pytorch) """ logger.debug("calling fedavg") + assert (base_weights is not None) + # reset global weights before aggregation self.agg_weights = base_weights @@ -74,17 +85,11 @@ def do(self, def _aggregate_pytorch(self, tres, rate): logger.debug("calling _aggregate_pytorch") - if self.agg_weights is None: - self.agg_weights = {k: v * rate for k, v in tres.weights.items()} - else: - for k, v in tres.weights.items(): - self.agg_weights[k] += v * rate + for k, v in tres.weights.items(): + self.agg_weights[k] += v * rate def _aggregate_tensorflow(self, tres, rate): logger.debug("calling _aggregate_tensorflow") - if self.agg_weights is None: - self.agg_weights = [weight * rate for weight in tres.weights] - else: - for idx in range(len(tres.weights)): - self.agg_weights[idx] += tres.weights[idx] * rate + for idx in range(len(tres.weights)): + self.agg_weights[idx] += tres.weights[idx] * rate diff --git a/lib/python/flame/optimizer/fedbuff.py b/lib/python/flame/optimizer/fedbuff.py index 2d0ec86c1..98bcb0ce4 100644 --- a/lib/python/flame/optimizer/fedbuff.py +++ b/lib/python/flame/optimizer/fedbuff.py @@ -23,10 +23,10 @@ """ import logging import math -from typing import Union from diskcache import Cache +from ..common.typing import ModelWeights from ..common.util import (MLFramework, get_ml_framework_in_use, valid_frameworks) from .abstract import AbstractOptimizer @@ -52,17 +52,28 @@ def __init__(self): f"supported frameworks are: {valid_frameworks}") def do(self, + base_weights: ModelWeights, cache: Cache, *, - base_weights=None, total: int = 0, - version: int = 0) -> Union[list, dict]: + version: int = 0) -> ModelWeights: """Do aggregates models of trainers. - Return: aggregated model + Parameters + ---------- + base_weights: weights to be used as base + cache: a container that includes a list of weights for aggregation + total: a number of data samples used to train weights in cache + version: a version number of base weights + + Returns + ------- + aggregated model: type is either list (tensorflow) or dict (pytorch) """ logger.debug("calling fedbuff") + assert (base_weights is not None) + # reset global weights before aggregation self.agg_weights = base_weights @@ -84,17 +95,11 @@ def do(self, def _aggregate_pytorch(self, tres, rate): logger.debug("calling _aggregate_pytorch") - if self.agg_weights is None: - self.agg_weights = {k: v * rate for k, v in tres.weights.items()} - else: - for k, v in tres.weights.items(): - self.agg_weights[k] += v * rate + for k, v in tres.weights.items(): + self.agg_weights[k] += v * rate def _aggregate_tesnorflow(self, tres, rate): logger.debug("calling _aggregate_tensorflow") - if self.agg_weights is None: - self.agg_weights = [weight * rate for weight in tres.weights] - else: - for idx in range(len(tres.weights)): - self.agg_weights[idx] += tres.weights[idx] * rate + for idx in range(len(tres.weights)): + self.agg_weights[idx] += tres.weights[idx] * rate diff --git a/lib/python/flame/optimizer/fedopt.py b/lib/python/flame/optimizer/fedopt.py index 7156cd109..cd5413fc2 100644 --- a/lib/python/flame/optimizer/fedopt.py +++ b/lib/python/flame/optimizer/fedopt.py @@ -19,10 +19,10 @@ import logging from abc import abstractmethod from collections import OrderedDict -from typing import Union from diskcache import Cache +from ..common.typing import ModelWeights from ..common.util import (MLFramework, get_ml_framework_in_use, valid_frameworks) from .fedavg import FedAvg @@ -56,19 +56,28 @@ def __init__(self, beta_1, beta_2, eta, tau): f"supported frameworks are: {valid_frameworks}") def do(self, + base_weights: ModelWeights, cache: Cache, *, - base_weights=None, total: int = 0, - version: int = 0) -> Union[list, dict]: + version: int = 0) -> ModelWeights: """Do aggregates models of trainers. - Return: aggregated model + Parameters + ---------- + base_weights: weights to be used as base + cache: a container that includes a list of weights for aggregation + total: a number of data samples used to train weights in cache + version: a version number of base weights + + Returns + ------- + aggregated model: type is either list (tensorflow) or dict (pytorch) """ logger.debug("calling fedopt") - self.agg_weights = super().do(cache, - base_weights=base_weights, + self.agg_weights = super().do(base_weights, + cache, total=total, version=version) if self.agg_weights is None: diff --git a/lib/python/setup.py b/lib/python/setup.py index b834e86c7..1c7232190 100644 --- a/lib/python/setup.py +++ b/lib/python/setup.py @@ -19,7 +19,7 @@ setup( name='flame', - version='0.0.15', + version='0.0.16', author='Flame Maintainers', author_email='flame-github-owners@cisco.com', include_package_data=True, From 8b3b938728018f28dc3452968c7cd2bb5565d096 Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Tue, 14 Feb 2023 18:05:18 -0800 Subject: [PATCH 06/16] fix: conflict bewtween integer tensor and float tensor (#335) Model architectures can have integer tensors. Applying aggregation on those tensors results in type mistmatch and throws a runtime error: "RuntimeError: result type Float can't be cast to the desired output type Long" Integer tensors don't matter in back propagation. So, as a workaround to the issue, we typecast to the original dtype when the original type is different from the dtype of weighted tensors for aggregation. In this way, we can keep the model architecture as is. --- lib/python/flame/optimizer/fedavg.py | 13 ++++++++++++- lib/python/flame/optimizer/fedbuff.py | 13 ++++++++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/lib/python/flame/optimizer/fedavg.py b/lib/python/flame/optimizer/fedavg.py index 4ec43acda..c405b92cc 100644 --- a/lib/python/flame/optimizer/fedavg.py +++ b/lib/python/flame/optimizer/fedavg.py @@ -86,7 +86,18 @@ def _aggregate_pytorch(self, tres, rate): logger.debug("calling _aggregate_pytorch") for k, v in tres.weights.items(): - self.agg_weights[k] += v * rate + tmp = v * rate + # tmp.dtype is always float32 or double as rate is float + # if v.dtype is integer (int32 or int64), there is type mismatch + # this leads to the following error when self.agg_weights[k] += tmp: + # RuntimeError: result type Float can't be cast to the desired + # output type Long + # To handle this issue, we typecast tmp to the original type of v + # + # TODO: this may need to be revisited + tmp = tmp.to(dtype=v.dtype) if tmp.dtype != v.dtype else tmp + + self.agg_weights[k] += tmp def _aggregate_tensorflow(self, tres, rate): logger.debug("calling _aggregate_tensorflow") diff --git a/lib/python/flame/optimizer/fedbuff.py b/lib/python/flame/optimizer/fedbuff.py index 98bcb0ce4..8895a5ca5 100644 --- a/lib/python/flame/optimizer/fedbuff.py +++ b/lib/python/flame/optimizer/fedbuff.py @@ -96,7 +96,18 @@ def _aggregate_pytorch(self, tres, rate): logger.debug("calling _aggregate_pytorch") for k, v in tres.weights.items(): - self.agg_weights[k] += v * rate + tmp = v * rate + # tmp.dtype is always float32 or double as rate is float + # if v.dtype is integer (int32 or int64), there is type mismatch + # this leads to the following error when self.agg_weights[k] += tmp: + # RuntimeError: result type Float can't be cast to the desired + # output type Long + # To handle this issue, we typecast tmp to the original type of v + # + # TODO: this may need to be revisited + tmp = tmp.to(dtype=v.dtype) if tmp.dtype != v.dtype else tmp + + self.agg_weights[k] += tmp def _aggregate_tesnorflow(self, tres, rate): logger.debug("calling _aggregate_tensorflow") From caaac4bb186a74e65613a60f308771970caf6499 Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Wed, 15 Feb 2023 09:50:06 -0800 Subject: [PATCH 07/16] refactor: config for hybrid example in library (#334) To enable library-only execution for hybrid example, its configuration files are updated accordingly. The revised configuration has local mqtt and p2p broker config and p2p broker is selected. --- .../examples/hybrid/aggregator/config.json | 8 +++++-- .../hybrid/trainer/config_eu_org1.json | 24 ++++++++++++++----- .../hybrid/trainer/config_eu_org2.json | 24 ++++++++++++++----- .../hybrid/trainer/config_us_org1.json | 24 ++++++++++++++----- .../hybrid/trainer/config_us_org2.json | 24 ++++++++++++++----- 5 files changed, 78 insertions(+), 26 deletions(-) diff --git a/lib/python/flame/examples/hybrid/aggregator/config.json b/lib/python/flame/examples/hybrid/aggregator/config.json index 70808223b..f65417310 100644 --- a/lib/python/flame/examples/hybrid/aggregator/config.json +++ b/lib/python/flame/examples/hybrid/aggregator/config.json @@ -1,10 +1,14 @@ { "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580371", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ diff --git a/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json b/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json index 37da8a3c5..6a6501d19 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json +++ b/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json @@ -1,10 +1,14 @@ { "taskid": "205f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -79,20 +83,28 @@ "role": "trainer", "channelConfigs": { "param-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto2", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] }, "global-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] } diff --git a/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json b/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json index b39ceca01..406c95e2b 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json +++ b/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json @@ -1,10 +1,14 @@ { "taskid": "305f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -79,20 +83,28 @@ "role": "trainer", "channelConfigs": { "param-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto2", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] }, "global-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] } diff --git a/lib/python/flame/examples/hybrid/trainer/config_us_org1.json b/lib/python/flame/examples/hybrid/trainer/config_us_org1.json index ea6e86e21..9b200a821 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_us_org1.json +++ b/lib/python/flame/examples/hybrid/trainer/config_us_org1.json @@ -1,10 +1,14 @@ { "taskid": "405f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -79,20 +83,28 @@ "role": "trainer", "channelConfigs": { "param-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto2", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] }, "global-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] } diff --git a/lib/python/flame/examples/hybrid/trainer/config_us_org2.json b/lib/python/flame/examples/hybrid/trainer/config_us_org2.json index 187cf5c57..7a61c87b6 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_us_org2.json +++ b/lib/python/flame/examples/hybrid/trainer/config_us_org2.json @@ -1,10 +1,14 @@ { "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -79,20 +83,28 @@ "role": "trainer", "channelConfigs": { "param-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto2", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] }, "global-channel": { - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ] } From c3c2426bf549e73bdfe8970dc824b6fc1e5aa428 Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Fri, 17 Feb 2023 08:56:06 -0800 Subject: [PATCH 08/16] misc: asynchronous hierarchical fl example (#340) Since the Flame SDK supports asynchronous FL, we add an example of an asynchronous hierarchical FL for control plane. --- examples/asyncfl_hier_mnist/README.md | 93 ++++++++++++++++++ .../asyncfl_hier_mnist/async_hier_mnist.zip | Bin 0 -> 4816 bytes .../dataset_eu_germany.json | 15 +++ .../asyncfl_hier_mnist/dataset_eu_uk.json | 15 +++ .../asyncfl_hier_mnist/dataset_na_canada.json | 15 +++ .../asyncfl_hier_mnist/dataset_na_us.json | 15 +++ examples/asyncfl_hier_mnist/job.json | 43 ++++++++ examples/asyncfl_hier_mnist/schema.json | 58 +++++++++++ 8 files changed, 254 insertions(+) create mode 100644 examples/asyncfl_hier_mnist/README.md create mode 100644 examples/asyncfl_hier_mnist/async_hier_mnist.zip create mode 100644 examples/asyncfl_hier_mnist/dataset_eu_germany.json create mode 100644 examples/asyncfl_hier_mnist/dataset_eu_uk.json create mode 100644 examples/asyncfl_hier_mnist/dataset_na_canada.json create mode 100644 examples/asyncfl_hier_mnist/dataset_na_us.json create mode 100644 examples/asyncfl_hier_mnist/job.json create mode 100644 examples/asyncfl_hier_mnist/schema.json diff --git a/examples/asyncfl_hier_mnist/README.md b/examples/asyncfl_hier_mnist/README.md new file mode 100644 index 000000000..b62963b31 --- /dev/null +++ b/examples/asyncfl_hier_mnist/README.md @@ -0,0 +1,93 @@ +## Asynchronous Hierarchical Federated Learning with MNIST + +This example is based on asynchronous federated learning implementation in the Flame's SDK. The implemention is based on research papers from [here](https://arxiv.org/pdf/2106.06639.pdf) and [here](https://arxiv.org/pdf/2111.04877.pdf). + +We use the MNIST dataset to walk through an example of asynchronous hierarchical federated learning with Flame. + +We assume that a fiab environment is set up properly. To set it up, refer to [this document](../../docs/03-fiab.md). + + +**Note**: You need to add `--insecure` to all the `flamectl` command when the example is used in a fiab environment. + +### Step 1: create a design + +```bash +flamectl create design async_hier_mnist -d "asynchronous hierarchical FL mnist example" +``` + +### Step 2: create a schema + +```bash +flamectl create schema schema.json --design async_hier_mnist +``` + +The schema defines the topology of this FL job. For more info, please refer to [05-flame-basics](05-flame-basics.md). + +### Step 3: add code to the design + +```bash +flamectl create code async_hier_mnist.zip --design async_hier_mnist +``` + +### Step 4: create datasets + +```bash +$ flamectl create dataset dataset_na_us.json +New dataset created successfully + dataset ID: "629e3095741b82c266a41478" +``` + +Copy the Dataset ID into `job.json`, and repeat for other datasets. + +```bash +flamectl create dataset dataset_na_canada.json +flamectl create dataset dataset_eu_germany.json +flamectl create dataset dataset_eu_uk.json +``` + +Replace the dataset IDs generated with the ones existing in `job.json`. + +Flame will assign a trainer to each dataset. As each dataset has a realm specified, the middle aggreagator will be created based on the corresponding groupBy tag. In this case, there will be one middle aggregator for Europe (eu) and one for North America (na). + +### Step 5: create a job + +```bash +$ flamectl create job job.json +New job created successfully + ID: 629e3185741b82c266a4147b + state: ready +``` + +If the job is successful created, it returns a job ID. + +```bash +$ flamectl get tasks 629e3185741b82c266a4147b ++--------------------------+------------------------------------------+--------+-------+--------------------------------+ +| JOB ID | TASK ID | TYPE | STATE | TIMESTAMP | ++--------------------------+------------------------------------------+--------+-------+--------------------------------+ +| 629e3185741b82c266a4147b | 56b04d963015d199988d4f348f73df1630cd4bf4 | system | ready | 2022-06-06 16:55:33.197 +0000 | +| | | | | UTC | +| 629e3185741b82c266a4147b | 63ce1443fbacf064a938ce17ac7e2279547a2a13 | system | ready | 2022-06-06 16:55:33.203 +0000 | +| | | | | UTC | +| 629e3185741b82c266a4147b | 8b24540425e6ea17d6169e473c45faf06cc807e3 | system | ready | 2022-06-06 16:55:33.21 +0000 | +| | | | | UTC | +| 629e3185741b82c266a4147b | 9af2682ba448be82dd16bd60683858c17bd48998 | system | ready | 2022-06-06 16:55:33.206 +0000 | +| | | | | UTC | +| 629e3185741b82c266a4147b | a0222d56b53bddd7996b135c68c5b37834e12d6c | system | ready | 2022-06-06 16:55:33.213 +0000 | +| | | | | UTC | +| 629e3185741b82c266a4147b | a296e108066d23369bbaa593b8280a387a25f0b4 | system | ready | 2022-06-06 16:55:33.215 +0000 | +| | | | | UTC | +| 629e3185741b82c266a4147b | ae150e449f08c138f95f646ab480a8162ed702fb | system | ready | 2022-06-06 16:55:33.218 +0000 | +| | | | | UTC | ++--------------------------+------------------------------------------+--------+-------+--------------------------------+ +``` +There should be 7 tasks for this particular job according the schema defined. There will be 1 top aggregator, 2 middle aggregator (1 for na, 1 for eu), and 4 trainers (2 for na, 2 for eu). + +### Step 6: start running + +```bash +flamectl start job 629e3185741b82c266a4147b +``` + +During running, you can check the status of job by going to [http://mlflow.flame.test](http://mlflow.flame.test) or running `flamectl get tasks ${JOB_ID}` on the command line. + diff --git a/examples/asyncfl_hier_mnist/async_hier_mnist.zip b/examples/asyncfl_hier_mnist/async_hier_mnist.zip new file mode 100644 index 0000000000000000000000000000000000000000..cfe27e7bd89706b9524dd16521bed09bb342cdf7 GIT binary patch literal 4816 zcmb7|cT^MG+QtJRbTE`49hA^}FCtZt-aAN#&?HDlq$5q5bOAAhB29V`q!)n`6(JHq zst`b>geD+TE}nDNz2{uL-@V_=+B19p*uVA6thJwc-^V~3NI(ksao9i9Gy8q==Y<-; z47ly??Cj+t=m3NHy1*R#;l9GACL{o$gyM#i8Cih0ATnf>$Zctw~$ZB@5sv+IQpNFVWxe)Gg9ELJ^sK?F%=AtetRwr zdzZ?`Pow87>%^z)x1@y4P@%vV0OT8Xcawn|SDLehpbeMc3*EPr%g#)%&oDFh!gP~Pi-e63 zqKF+_k!Mvv1OetDB4bmt+#bk7Q8TU~T3xuP?1yp@2bOqT6G<5~y5l6daCnen%^hO0 zFB^H{NWJ7jX~{Z(L&>9HgFBKlVfT(2W9m@D!z+Qg&-{XeV`g`uU?7oj@myWHVE zs|0?qDA@MuI6DH8efMy8EGWV@K6|}J#XXVr>uq{koG_$s>P$2u4bs& zq;ZvkVDPty$HUg94#!pOE~`8!t5T=cB>@=X`C-%wN-ldtz9Jj!;Zv)bPqs|xl;$ud z!;Qb0;2-i)v#JkXrYWN>#&haCD`T;%|2bc#Pm{J}wP})lZL}S%R_p)e&LVEt?Ay20qR$ydi3L^XDEG7jP18RD@ihfJ6tyIU5y+}ZRqt`nZ(|U;( zZ5uFL?OkO{9fMvu6T7@z$|*z_w=$^IM+RGr{rdLZDFbiJ=x6K0jQMlCA6xnld+_)- zd$@G5rTyVPKY7E=i%tAv{GZ+s%0OJ3KmY)AUa;wY2K^7l&}HVJkCg(S7TJlCvT2lp zpO-!3t7`txh6MWJ05yWO2ue1IkWdUQ;2dE}5H}G9*v( zzPqaL$rlc)6ZLFYqh6>&R4@LC0ybXp%79s`L8c(FJ7SWSH;-^O20v`X-6@h}#AR-= z{OZ$s2iMKk&zg>i;q;RX>e}_S#^noP-ZCC&8PpTy~;k2VQY0dTJg1jswJTBZC(h@LEl&G*_ zODWP|G_;DK97tg*i9V2M=>m`RTfQb2^(Pe`H&&~4*z{TP9q`=0!i-6P^4XQVXBJv>g98iajZ?!*K0-u!X0*;DWmq`U)*Ld7 z7#16=44T)nwJ-+0MrfH_q%&kc!SAq{O*r97emSAeR^~t_LPvfyO$W2_AaL)b0k)(wT9&o(2_i$$$5QG#_dv+(<|?fDXOO=MHH|@ zF|Fo+Ra%3T$RO@z9vWRwu`Vu(Avck4qWIqrg=WjfihS=7#@2CGLD{&aHG+njgd57H zqS@t-b_p7u2o+jgiK5AnH>XoCx!l1-U2=YyY3U=EC~zMkkLKYZ(c_Sz;9zfeIIOjy zu``@M7H+b67|+BjYb%2F*ft-FuF2efof2`hbv}QTK@;Ob)5-jpcUohA8+vvIiM*}d zx2G*i=T#7_%X%PrPIY&gBV{wWzY^k88L+e_bA^9{daAY2<|B2HnAY^M6V;0}FY?xj zn^n|LXiLK(=Ej9o?Rwou&@pSZ-Df>@E+(v7p+T^AsG93~$%{r|%qM11i^nqa6#Hco z9oABxWeo&gm+;(JtTL$ z%tj++35YbP6E{u&H|NLo5ET=m(!t(Fv}DMmq!bUpX2ciO;xkD zLi4}7-|Vkv);FEBf=CNd)0(U$NpA76vBrf850&zWO_an8)S_9{*?n}m<9inM_v4#q zq2c#t?=LI1(1u4W3s44aP0!}8pq1GoJeKAxh`nlX8$ckTU4b$1?MIt1O@YaemMs>0 z^9%nnNWXtGNa7#9=6g}%xcpf)SzWlI^?!55dWc^YmJk4VP7MJ3Zn1w7{ufK6pYfiN z+Mlz#gOIl%WF88@6ptV%EHfO{Gn*^Wx4;DWf`($!iSIP{QmytEf;0?su!G)DT-nnH z{XVwa`4U`4)^MUQjH^xGcx^-=WEL1Lw2H1n-!gz$Khhlp)wO(+Q10O`As6jQXI$%s z57Dn6=qmTI&7fza6uZArX`oZAzwUMhcwy3Jhw|400vdEt!891|pyJ9<*UPeXJpY~F^dcFLNXxVI#0N(wz~bEFQqHL0XU%frP_E6&XXrqy6iP)o|BaeX`%?IIJE0t7?9TAgR@>o|t;Mvx zG`2A~j?P>=_?${;e!*5~iw+0hSr;r^I0ND6+H#E%Zv35b3fU7QX)0n)H}mOBE_{(V zJxQJ2bRw@zK)oy2>|wGB7i%T9f-=j5PKN=`M*|+=gn`gEW-?(~Ak1CVrQ8VC7n#dy zD(&hACPnQ^4Q2=OxVE9;>QAeI4V^J9=^IUp`Ph1OovX`|ALQcO-Xl2g(!)h3x>eIj z)BPn>x?iTU%Un3`}6m@k& zEK@5Z5yRyBgI8%+Q;|2uoh2BDWR-iYug&?GyeCZp0IGGwfYzHj;{y+>R{= zt0jxGsC6SlOW*b#v-#1RS*IKr%~gq?yfXZ)EJE%HBVFOD+2mWr$Nit3NuijHakGxMnG$uK zx$U#*PcNxp^D;-O2c?HW?Qgh=gwJ;3&%ZCV$&OpNpX3(j8fD?%mLapnDT_en7IgcH zd&|!u*h=9&RpGrFZ6JES;p6vq1HOGnW+AbfVvC&;izTYN3~H5K#o#&pEXEgaVSK7- z(9)>~@#Bd$rop&}GPTe|Pn?4tq<%~&&`&vIB&!%$9GW6y??b2@>db_cedz!wsEdP4 zGHHdNf7`HeSqIR`w3O}M8hsSaFHBDqZmhFGrNBL->lZK>|Mj86 zKzM)2ZTLRRhf;0@N_oheTPe*+i-0Kc=u!Pbe=B+q9{yKD$m%<{USnPCrvv3$+Gefs zI(AJJpH}v$H9f08^{jbBIoqgeh%5eN>5w>{plTJhmoApil#PQ;sk0&;@fEFa&f_B` zFgF(ly!G}!&ldEOyt3;FuT|dL)ji>Oe&rl-iLV{vrpvn1+xk2_CUpF=3iLAl*|Tgpd|h6Qf@xTFQ|^Q#r6nD6ga2h>&0L7G&X(cV=(+;-S_zaXLhYUQ zdC_4|(kpDc^ra^Elkdr>d3D_U!NKvd+EFV1q2$SGS+!8kWYllI?@dU}1;0;%f;c6D z<_?w#uMDNLerl>?KL`h&b_06~K>d%W7q2B0v~zCg$}K4GCm1U`zR+^slPClYD4B4P z*a&*@Wr>AoHS3zaG{zI%GT|@abxmoihK~BcQN&g|i?1FTD>S>~JkYH=s9sQi5GBd% zUbt7}a=ei345ld;q4tSpx}cic1^IB@@hms)>!Yr4QkJ>tgVefAp_e=#>Qs(_4uLBM zw#gC@GR^oqQufn?=lq# zKEWY=?QycWmln5CL>V4U#LL~&jQuA!k!);&t~6J#>m3{ubPx>I=04KxqU`?2<`n6) zR%#~dpLKmJ#IrhTdMeMdB_BlD5ANjQ6PZGec*?I}l&*?1w`I>TZd6+m&MYt5xSB>e zp^3pwDT?Q1JZ*08oML-+om)n)6p55fi@mKEAT%B z7W98c7hu4q@{j4S;tS9p@x@Q=$q(*-3NQZPUc?-L(kPu@a{u{zzQF&8GJc8wPmR=H zWwTv$ML)Gw7tmj}RDaj}_onKvn%#be{yzl&T-f|ouoB6I;D44m7fOGWIlp}Ue|_D* f7dd}b8vPgAf0YQjsFMHy!i$p(9{{KW|9JWb_DUGC literal 0 HcmV?d00001 diff --git a/examples/asyncfl_hier_mnist/dataset_eu_germany.json b/examples/asyncfl_hier_mnist/dataset_eu_germany.json new file mode 100644 index 000000000..4c83639ce --- /dev/null +++ b/examples/asyncfl_hier_mnist/dataset_eu_germany.json @@ -0,0 +1,15 @@ +{ + "name": "MNIST dataset", + + "description": "This dataset contains handwritten digits", + + "url": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist_1.npz", + + "//": "the url is a fake url, serving as a placeholder. Data-loading are hard-coded by users in trainer's python script for simplicity and demonstration purpose.", + + "dataFormat": "npy", + + "realm": "default/eu/germany", + + "isPublic": true +} diff --git a/examples/asyncfl_hier_mnist/dataset_eu_uk.json b/examples/asyncfl_hier_mnist/dataset_eu_uk.json new file mode 100644 index 000000000..6c0ca4125 --- /dev/null +++ b/examples/asyncfl_hier_mnist/dataset_eu_uk.json @@ -0,0 +1,15 @@ +{ + "name": "MNIST dataset", + + "description": "This dataset contains handwritten digits", + + "url": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist_2.npz", + + "//": "the url is a fake url, serving as a placeholder. Data-loading are hard-coded by users in trainer's python script for simplicity and demonstration purpose.", + + "dataFormat": "npy", + + "realm": "default/eu/uk", + + "isPublic": true +} diff --git a/examples/asyncfl_hier_mnist/dataset_na_canada.json b/examples/asyncfl_hier_mnist/dataset_na_canada.json new file mode 100644 index 000000000..492b8a86c --- /dev/null +++ b/examples/asyncfl_hier_mnist/dataset_na_canada.json @@ -0,0 +1,15 @@ +{ + "name": "MNIST dataset", + + "description": "This dataset contains handwritten digits", + + "url": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist_3.npz", + + "//": "the url is a fake url, serving as a placeholder. Data-loading are hard-coded by users in trainer's python script for simplicity and demonstration purpose.", + + "dataFormat": "npy", + + "realm": "default/na/canada", + + "isPublic": true +} diff --git a/examples/asyncfl_hier_mnist/dataset_na_us.json b/examples/asyncfl_hier_mnist/dataset_na_us.json new file mode 100644 index 000000000..5481d3b65 --- /dev/null +++ b/examples/asyncfl_hier_mnist/dataset_na_us.json @@ -0,0 +1,15 @@ +{ + "name": "MNIST dataset", + + "description": "This dataset contains handwritten digits", + + "url": "https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist_4.npz", + + "//": "the url is a fake url, serving as a placeholder. Data-loading are hard-coded by users in trainer's python script for simplicity and demonstration purpose.", + + "dataFormat": "npy", + + "realm": "default/na/us", + + "isPublic": true +} diff --git a/examples/asyncfl_hier_mnist/job.json b/examples/asyncfl_hier_mnist/job.json new file mode 100644 index 000000000..9a42d0ac6 --- /dev/null +++ b/examples/asyncfl_hier_mnist/job.json @@ -0,0 +1,43 @@ +{ + "designId": "async_hier_mnist", + "schemaVersion": "1", + "codeVersion": "1", + "dataSpec": { + "fromUser": { + "default": 0 + }, + "fromSystem": [ + "624b4417dad565fc66f8a0ee", + "624b4428dad565fc66f8a0ef", + "624b4430dad565fc66f8a0f0", + "624b4437dad565fc66f8a0f1" + ] + }, + "priority": "low", + "backend": "mqtt", + "maxRunTime": 1800, + + "baseModel": { + "name": "", + "version": 0 + }, + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.01, + "rounds": 10, + "aggGoal": 1 + }, + "dependencies" : [ + "numpy >= 1.2.0" + ], + "selector": { + "sort": "fedbuff", + "kwargs": { + "c": 2 + } + }, + "optimizer": { + "sort": "fedbuff", + "kwargs": {} + } +} diff --git a/examples/asyncfl_hier_mnist/schema.json b/examples/asyncfl_hier_mnist/schema.json new file mode 100644 index 000000000..7dd68a100 --- /dev/null +++ b/examples/asyncfl_hier_mnist/schema.json @@ -0,0 +1,58 @@ +{ + "name": "A simple hierarchical FL MNIST example schema", + "description": "a sample schema to demostrate the hierarchical FL setting", + "roles": [ + { + "name": "trainer", + "description": "It consumes the data and trains local model", + "isDataConsumer": true + }, + { + "name": "middle-aggregator", + "description": "It aggregates the updates from trainers" + }, + { + "name": "top-aggregator", + "description": "It aggregates the updates from middle-aggregator" + } + ], + "channels": [ + { + "name": "param-channel", + "description": "Model update is sent from trainer to middle-aggregator and vice-versa", + "pair": [ + "trainer", + "middle-aggregator" + ], + "groupBy": { + "type": "tag", + "value": [ + "default/eu", + "default/na" + ] + }, + "funcTags": { + "trainer": ["fetch", "upload"], + "middle-aggregator": ["distribute", "aggregate"] + } + }, + { + "name": "global-channel", + "description": "Model update is sent from middle-aggregator to top-aggregator and vice-versa", + "pair": [ + "top-aggregator", + "middle-aggregator" + ], + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "funcTags": { + "top-aggregator": ["distribute", "aggregate"], + "middle-aggregator": ["fetch", "upload"] + } + } + ] +} From af56dae379b2b2192ae4e4b662de9ec08f58c64b Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Fri, 17 Feb 2023 08:56:57 -0800 Subject: [PATCH 09/16] chore: clean up examples folder (#336) The examples folder at the top level directory has some outdated and irrelevant files. Those are now removed from the folder. --- examples/README.md | 3 -- examples/config.yaml | 22 ------------- examples/flame-role-and-rolebinding.yaml | 41 ------------------------ examples/flame-service-account.yaml | 22 ------------- 4 files changed, 88 deletions(-) delete mode 100644 examples/config.yaml delete mode 100644 examples/flame-role-and-rolebinding.yaml delete mode 100644 examples/flame-service-account.yaml diff --git a/examples/README.md b/examples/README.md index 491df28f2..98bdc66b4 100644 --- a/examples/README.md +++ b/examples/README.md @@ -4,9 +4,6 @@ This directory contains a few sample examples to explore functionality and workf The examples here can be executed within [fiab](../fiab/README.md) environment. -## Setup -`config.yaml` is a configuration file for local fiab environment. Place it in `$HOME/.flame` folder. - ## CLI tool: flamectl To interact with the flame system, there is a command line (CLI) tool called `flamectl`. diff --git a/examples/config.yaml b/examples/config.yaml deleted file mode 100644 index d53407b8d..000000000 --- a/examples/config.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2022 Cisco Systems, Inc. and its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -# configuration for local fiab environment -# place it in $HOME/.flame folder ---- -apiserver: - endpoint: http://127.0.0.1:10100 -user: john diff --git a/examples/flame-role-and-rolebinding.yaml b/examples/flame-role-and-rolebinding.yaml deleted file mode 100644 index 11204e477..000000000 --- a/examples/flame-role-and-rolebinding.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2022 Cisco Systems, Inc. and its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - -# source: https://docs.armory.io/docs/armory-admin/manual-service-account/ ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: controller-role - namespace: flame -rules: -- apiGroups: ["*"] - resources: ["*"] - verbs: ["*"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: controller-rolebinding - namespace: flame -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: controller-role -subjects: -- namespace: flame - kind: ServiceAccount - name: controller diff --git a/examples/flame-service-account.yaml b/examples/flame-service-account.yaml deleted file mode 100644 index fcc9a0b96..000000000 --- a/examples/flame-service-account.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2022 Cisco Systems, Inc. and its affiliates -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: Apache-2.0 - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: controller - namespace: flame From d729786167929a71d9241d1eb0364fa45918795c Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Thu, 23 Feb 2023 09:29:07 -0800 Subject: [PATCH 10/16] fix: workaround for hybrid mode with two p2p backends (#345) Due to https://github.com/grpc/grpc/issues/25364, when two p2p backends (which rely on grpc and asyncio) are defined, the hybrid mode example throws an execption: 'BlockingIOError: [Errno 35] Resource temporarily unavailable'. The issue still appears unresolved. As a temporary workaround, we use two different types of backends: mqtt for one and p2p for the other. This means that when this example is executed, both metaserver and a mqtt broker (e.g., mosquitto) must be running in the local machine. --- lib/python/flame/examples/hybrid/aggregator/config.json | 2 +- lib/python/flame/examples/hybrid/trainer/config_eu_org1.json | 5 +++-- lib/python/flame/examples/hybrid/trainer/config_eu_org2.json | 5 +++-- lib/python/flame/examples/hybrid/trainer/config_us_org1.json | 5 +++-- lib/python/flame/examples/hybrid/trainer/config_us_org2.json | 5 +++-- 5 files changed, 13 insertions(+), 9 deletions(-) diff --git a/lib/python/flame/examples/hybrid/aggregator/config.json b/lib/python/flame/examples/hybrid/aggregator/config.json index f65417310..0a0ef510f 100644 --- a/lib/python/flame/examples/hybrid/aggregator/config.json +++ b/lib/python/flame/examples/hybrid/aggregator/config.json @@ -1,6 +1,6 @@ { "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580371", - "backend": "p2p", + "backend": "mqtt", "brokers": [ { "host": "localhost", diff --git a/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json b/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json index 6a6501d19..8d29caf61 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json +++ b/lib/python/flame/examples/hybrid/trainer/config_eu_org1.json @@ -1,6 +1,6 @@ { "taskid": "205f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "p2p", + "backend": "mqtt", "brokers": [ { "host": "localhost", @@ -96,7 +96,8 @@ ] }, "global-channel": { - "backend": "p2p", + "//": "bug: due to https://github.com/grpc/grpc/issues/25364, more than one p2p (grpc) backend can be used", + "backend": "mqtt", "brokers": [ { "host": "localhost", diff --git a/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json b/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json index 406c95e2b..ccf3c7cec 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json +++ b/lib/python/flame/examples/hybrid/trainer/config_eu_org2.json @@ -1,6 +1,6 @@ { "taskid": "305f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "p2p", + "backend": "mqtt", "brokers": [ { "host": "localhost", @@ -96,7 +96,8 @@ ] }, "global-channel": { - "backend": "p2p", + "//": "bug: due to https://github.com/grpc/grpc/issues/25364, more than one p2p (grpc) backend can be used", + "backend": "mqtt", "brokers": [ { "host": "localhost", diff --git a/lib/python/flame/examples/hybrid/trainer/config_us_org1.json b/lib/python/flame/examples/hybrid/trainer/config_us_org1.json index 9b200a821..b80aa1b23 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_us_org1.json +++ b/lib/python/flame/examples/hybrid/trainer/config_us_org1.json @@ -1,6 +1,6 @@ { "taskid": "405f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "p2p", + "backend": "mqtt", "brokers": [ { "host": "localhost", @@ -96,7 +96,8 @@ ] }, "global-channel": { - "backend": "p2p", + "//": "bug: due to https://github.com/grpc/grpc/issues/25364, more than one p2p (grpc) backend can be used", + "backend": "mqtt", "brokers": [ { "host": "localhost", diff --git a/lib/python/flame/examples/hybrid/trainer/config_us_org2.json b/lib/python/flame/examples/hybrid/trainer/config_us_org2.json index 7a61c87b6..44a7c1f03 100644 --- a/lib/python/flame/examples/hybrid/trainer/config_us_org2.json +++ b/lib/python/flame/examples/hybrid/trainer/config_us_org2.json @@ -1,6 +1,6 @@ { "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "p2p", + "backend": "mqtt", "brokers": [ { "host": "localhost", @@ -96,7 +96,8 @@ ] }, "global-channel": { - "backend": "p2p", + "//": "bug: due to https://github.com/grpc/grpc/issues/25364, more than one p2p (grpc) backend can be used", + "backend": "mqtt", "brokers": [ { "host": "localhost", From 19c39eb69839c3a9785aae4e01650bb5f647043b Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Thu, 23 Feb 2023 09:33:48 -0800 Subject: [PATCH 11/16] fix: distributed mode (#344) Distributed mode has a bug: before 'weights' is not defined as member variable, deepcopy(self.weights) in _update_weights() is called. To address this issue, self.weights is initialized in __init__(). Also, to run a distributed example locally, configuration files are revised. --- .../flame/examples/dist_mnist/trainer/config1.json | 13 +++++++++---- .../flame/examples/dist_mnist/trainer/config2.json | 12 ++++++++---- .../flame/examples/dist_mnist/trainer/config3.json | 12 ++++++++---- lib/python/flame/mode/distributed/trainer.py | 1 + 4 files changed, 26 insertions(+), 12 deletions(-) diff --git a/lib/python/flame/examples/dist_mnist/trainer/config1.json b/lib/python/flame/examples/dist_mnist/trainer/config1.json index 73d44acca..dbdcba115 100644 --- a/lib/python/flame/examples/dist_mnist/trainer/config1.json +++ b/lib/python/flame/examples/dist_mnist/trainer/config1.json @@ -1,11 +1,16 @@ { "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa746", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } + ], "channels": [ { @@ -44,8 +49,8 @@ "name": "dist_mnist" }, "registry": { - "sort": "mlflow", - "uri": "http://mlflow.flame.test" + "sort": "dummy", + "uri": "" }, "selector": { "sort": "default", diff --git a/lib/python/flame/examples/dist_mnist/trainer/config2.json b/lib/python/flame/examples/dist_mnist/trainer/config2.json index 8bd49306b..51f9f20ce 100644 --- a/lib/python/flame/examples/dist_mnist/trainer/config2.json +++ b/lib/python/flame/examples/dist_mnist/trainer/config2.json @@ -1,10 +1,14 @@ { "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -44,8 +48,8 @@ "name": "dist_mnist" }, "registry": { - "sort": "mlflow", - "uri": "http://mlflow.flame.test" + "sort": "dummy", + "uri": "" }, "selector": { "sort": "default", diff --git a/lib/python/flame/examples/dist_mnist/trainer/config3.json b/lib/python/flame/examples/dist_mnist/trainer/config3.json index b59a5a1f7..fc0803635 100644 --- a/lib/python/flame/examples/dist_mnist/trainer/config3.json +++ b/lib/python/flame/examples/dist_mnist/trainer/config3.json @@ -1,10 +1,14 @@ { "taskid": "f5a0b353dc3ca60d24174cbbbece3597c3287f3f", - "backend": "mqtt", + "backend": "p2p", "brokers": [ { - "host": "flame-mosquitto", + "host": "localhost", "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" } ], "channels": [ @@ -44,8 +48,8 @@ "name": "dist_mnist" }, "registry": { - "sort": "mlflow", - "uri": "http://mlflow.flame.test" + "sort": "dummy", + "uri": "" }, "selector": { "sort": "default", diff --git a/lib/python/flame/mode/distributed/trainer.py b/lib/python/flame/mode/distributed/trainer.py index c39c6e3e2..b80b7a72e 100644 --- a/lib/python/flame/mode/distributed/trainer.py +++ b/lib/python/flame/mode/distributed/trainer.py @@ -64,6 +64,7 @@ def internal_init(self) -> None: self.model = self.registry_client.load_model( base_model.name, base_model.version) self.ring_weights = None # latest model weights from ring all-reduce + self.weights = None self.registry_client.setup_run(mlflow_runname(self.config)) self.metrics = dict() From a7ea2a216c945fabe3eeb3a12be7dc2e994effaa Mon Sep 17 00:00:00 2001 From: GustavBaumgart <98069699+GustavBaumgart@users.noreply.github.com> Date: Thu, 23 Feb 2023 09:46:36 -0800 Subject: [PATCH 12/16] example/implementation for fedprox (#339) This example is similar to the ones seen in the fedprox paper, although it currently does not simmulate stragglers and uses another dataset/architecture. A few things were changed in order for there to be a simple process for modifying trainers. This includes a function in util.py and another class variable in the trainer containing information on the client side regularizer. Additionally, tests are automated (mu=1,0.1,0.01,0.001,0) so running the example generates or modifies existing files in order to provide the propper configuration for an experiment. --- lib/python/flame/common/util.py | 3 + lib/python/flame/config.py | 1 + .../flame/examples/medmnist_fedprox/README.md | 49 ++++ .../examples/medmnist_fedprox/__init__.py | 17 ++ .../medmnist_fedprox/aggregator/__init__.py | 17 ++ .../medmnist_fedprox/aggregator/main.py | 189 +++++++++++++++ .../medmnist_fedprox/config/agg_template.json | 69 ++++++ .../config/train_template.json | 69 ++++++ .../examples/medmnist_fedprox/fedprox.sh | 36 +++ .../examples/medmnist_fedprox/figures.py | 97 ++++++++ .../medmnist_fedprox/images/acc_all.png | Bin 0 -> 68643 bytes .../medmnist_fedprox/images/loss_all.png | Bin 0 -> 36006 bytes .../flame/examples/medmnist_fedprox/run.py | 63 +++++ .../medmnist_fedprox/trainer/__init__.py | 17 ++ .../examples/medmnist_fedprox/trainer/main.py | 227 ++++++++++++++++++ lib/python/flame/mode/horizontal/trainer.py | 6 + lib/python/flame/optimizer/fedavg.py | 3 + lib/python/flame/optimizer/fedbuff.py | 3 + lib/python/flame/optimizer/fedprox.py | 46 ++++ .../flame/optimizer/regularizer/__init__.py | 17 ++ .../flame/optimizer/regularizer/default.py | 31 +++ .../flame/optimizer/regularizer/fedprox.py | 39 +++ lib/python/flame/optimizers.py | 2 + 23 files changed, 1001 insertions(+) create mode 100644 lib/python/flame/examples/medmnist_fedprox/README.md create mode 100644 lib/python/flame/examples/medmnist_fedprox/__init__.py create mode 100644 lib/python/flame/examples/medmnist_fedprox/aggregator/__init__.py create mode 100644 lib/python/flame/examples/medmnist_fedprox/aggregator/main.py create mode 100644 lib/python/flame/examples/medmnist_fedprox/config/agg_template.json create mode 100644 lib/python/flame/examples/medmnist_fedprox/config/train_template.json create mode 100644 lib/python/flame/examples/medmnist_fedprox/fedprox.sh create mode 100644 lib/python/flame/examples/medmnist_fedprox/figures.py create mode 100644 lib/python/flame/examples/medmnist_fedprox/images/acc_all.png create mode 100644 lib/python/flame/examples/medmnist_fedprox/images/loss_all.png create mode 100644 lib/python/flame/examples/medmnist_fedprox/run.py create mode 100644 lib/python/flame/examples/medmnist_fedprox/trainer/__init__.py create mode 100644 lib/python/flame/examples/medmnist_fedprox/trainer/main.py create mode 100644 lib/python/flame/optimizer/fedprox.py create mode 100644 lib/python/flame/optimizer/regularizer/__init__.py create mode 100644 lib/python/flame/optimizer/regularizer/default.py create mode 100644 lib/python/flame/optimizer/regularizer/fedprox.py diff --git a/lib/python/flame/common/util.py b/lib/python/flame/common/util.py index 9c07869d2..77b09c081 100644 --- a/lib/python/flame/common/util.py +++ b/lib/python/flame/common/util.py @@ -72,6 +72,9 @@ def get_ml_framework_in_use(): return ml_framework_in_use +def get_params_detached_pytorch(model): + """Return copy of parameters of pytorch model disconnected from graph.""" + return [param.detach().clone() for param in model.parameters()] @contextmanager def background_thread_loop(): diff --git a/lib/python/flame/config.py b/lib/python/flame/config.py index 9d9ef62b7..522468a59 100644 --- a/lib/python/flame/config.py +++ b/lib/python/flame/config.py @@ -93,6 +93,7 @@ class OptimizerType(Enum): # FedBuff from https://arxiv.org/pdf/1903.03934.pdf and # https://arxiv.org/pdf/2111.04877.pdf FEDBUFF = 5 + FEDPROX = 6 # FedProx class SelectorType(Enum): diff --git a/lib/python/flame/examples/medmnist_fedprox/README.md b/lib/python/flame/examples/medmnist_fedprox/README.md new file mode 100644 index 000000000..076c4a8bc --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/README.md @@ -0,0 +1,49 @@ +## FedProx MedMNIST Example + +We use the PathMNIST dataset from (MedMNIST)[https://medmnist.com/] to go over an example of FedProx with different mu values (mu=1.0, 0.1, 0.01, 0.001, 0.0). +Although FedProx, unlike FedAvg, incorporates stragglers, we will not include stragglers in the following tests. +Thus, we are measuring the effects of the proximal term. + +This example is run within conda environment, so we activate the environment first. +Once you are in the `medmnist_fedprox` directory, run the following command. + +```bash +conda activate flame +``` + +Once this is complete, and say, you want to run the example (using all 10 trainers), you can select a value from mu=1.0, 0.1, 0.01, 0.001, 0.0 and run `python run.py `. +If you want to run the example with `mu=1.0`, you can run: + +```bash +python run.py 1.0 +``` + +We recommend running only one trial (or mu value) at a time. +This way you can track the progress by running the following commands: + +```bash +cat aggregator/log.txt | grep -i test +``` + +OR + +```bash +cat aggregator/log.txt | grep -i test | wc -l +``` + +The last command will return 300 when all 100 rounds have finished running. + +Once you have finished running all the mu-values, the files that track accuracy/loss accross different rounds should be in the `aggregator` directory. +Without chaning their names, run the command below to generate figures. + +```bash +python figures.py +``` + +The figures will be generated in the `medmnist_fedprox` directory. + +We include two of them below. Overall, we found that mu=0.01 was the best value. + +![acc_fedprox](images/acc_all.png) +![loss_fedprox](images/loss_all.png) + diff --git a/lib/python/flame/examples/medmnist_fedprox/__init__.py b/lib/python/flame/examples/medmnist_fedprox/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/examples/medmnist_fedprox/aggregator/__init__.py b/lib/python/flame/examples/medmnist_fedprox/aggregator/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/aggregator/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/examples/medmnist_fedprox/aggregator/main.py b/lib/python/flame/examples/medmnist_fedprox/aggregator/main.py new file mode 100644 index 000000000..2d187c925 --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/aggregator/main.py @@ -0,0 +1,189 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""MedMNIST FedProx aggregator for PyTorch.""" + +import logging + +from flame.config import Config +from flame.dataset import Dataset # Not sure why we need this. +from flame.mode.horizontal.top_aggregator import TopAggregator +import torch + +from sklearn.metrics import accuracy_score +import numpy as np +from PIL import Image +import torchvision + +logger = logging.getLogger(__name__) + +# keep track of losses/accuracies of global model +fed_acc = [] +fed_loss = [] + +class PathMNISTDataset(torch.utils.data.Dataset): + def __init__(self, transform=None, as_rgb=False): + npz_file = np.load("pathmnist.npz") + + self.transform = transform + self.as_rgb = as_rgb + + self.imgs = npz_file["val_images"] + self.labels = npz_file["val_labels"] + + def __len__(self): + return self.imgs.shape[0] + + def __getitem__(self, index): + img, target = self.imgs[index], self.labels[index].astype(int) + img = Image.fromarray(img) + + if self.as_rgb: + img = img.convert('RGB') + + if self.transform is not None: + img = self.transform(img) + + return img, target + +class CNN(torch.nn.Module): + """CNN Class""" + + def __init__(self, num_classes): + """Initialize.""" + super(CNN, self).__init__() + self.num_classes = num_classes + self.features = torch.nn.Sequential( + torch.nn.Conv2d(3, 6, kernel_size=3, padding=1), + torch.nn.BatchNorm2d(6), + torch.nn.ReLU(), + torch.nn.MaxPool2d(kernel_size=2, stride=2), + torch.nn.Conv2d(6, 16, kernel_size=3, padding=1), + torch.nn.BatchNorm2d(16), + torch.nn.ReLU(), + torch.nn.MaxPool2d(kernel_size=2, stride=2) + ) + self.fc = torch.nn.Linear(16 * 7 * 7, num_classes) + + def forward(self, x): + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + return x + +class PyTorchMedMNistAggregator(TopAggregator): + """PyTorch MedMNist Aggregator""" + + def __init__(self, config: Config) -> None: + self.config = config + self.model = None + self.dataset: Dataset = None # Not sure why we need this. + + self.batch_size = self.config.hyperparameters['batchSize'] + + self.device = torch.device("cpu") + + def initialize(self): + """Initialize.""" + self.model = CNN(num_classes=9) + self.criterion = torch.nn.CrossEntropyLoss() + + def load_data(self) -> None: + """Load a test dataset.""" + logger.info('in load_data') + # FIX this. easy to break right now + self._download() + + data_transform = torchvision.transforms.Compose([ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) + ]) + + dataset = PathMNISTDataset(transform=data_transform) + + self.loader = torch.utils.data.DataLoader( + dataset, + batch_size=self.batch_size, + shuffle=True, + num_workers=4 * torch.cuda.device_count(), + pin_memory=True, + drop_last=True + ) + self.dataset_size = len(dataset) + + def _download(self) -> None: + import requests + r = requests.get(self.config.dataset, allow_redirects=True) + open('pathmnist.npz', 'wb').write(r.content) + + def train(self) -> None: + """Train a model.""" + # Implement this if training is needed in aggregator + pass + + def evaluate(self) -> None: + """Evaluate (test) a model.""" + self.model.eval() + loss_lst = list() + labels = torch.tensor([],device=self.device) + labels_pred = torch.tensor([],device=self.device) + with torch.no_grad(): + for data, label in self.loader: + data, label = data.to(self.device), label.to(self.device) + output = self.model(data) + loss = self.criterion(output, label.squeeze()) + loss_lst.append(loss.item()) + labels_pred = torch.cat([labels_pred, output.argmax(dim=1)], dim=0) + labels = torch.cat([labels, label], dim=0) + + labels_pred = labels_pred.cpu().detach().numpy() + labels = labels.cpu().detach().numpy() + val_acc = accuracy_score(labels, labels_pred) + + # loss here not as meaningful + val_loss = sum(loss_lst) / len(loss_lst) + self.update_metrics({"Val Loss": val_loss, "Val Accuracy": val_acc, "Testset Size": self.dataset_size}) + logger.info(f"Test Loss: {val_loss}") + logger.info(f"Test Accuracy: {val_acc}") + logger.info(f"Testset Size: {self.dataset_size}") + + # record losses/accuracies + global fed_acc, fed_loss + fed_acc.append(val_acc) + fed_loss.append(val_loss) + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + a = PyTorchMedMNistAggregator(config) + a.compose() + a.run() + + # write records to files + mu = config.optimizer.kwargs['mu'] + file1 = open(f'acc_mu{mu}.txt','w') + file1.write('\n'.join(map(str,fed_acc))) + file1.close() + file2 = open(f'loss_mu{mu}.txt','w') + file2.write('\n'.join(map(str,fed_loss))) + file2.close() diff --git a/lib/python/flame/examples/medmnist_fedprox/config/agg_template.json b/lib/python/flame/examples/medmnist_fedprox/config/agg_template.json new file mode 100644 index 000000000..0018d0d2b --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/config/agg_template.json @@ -0,0 +1,69 @@ +{ + "taskid": "49d06b7526964db86cf37c70e8e0cdb6bd7aa742", + "backend": "mqtt", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 100, + "epochs": 4 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedprox", + "kwargs": { + "mu": 0 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "aggregator" +} diff --git a/lib/python/flame/examples/medmnist_fedprox/config/train_template.json b/lib/python/flame/examples/medmnist_fedprox/config/train_template.json new file mode 100644 index 000000000..57e032315 --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/config/train_template.json @@ -0,0 +1,69 @@ +{ + "taskid": "505f9fc483cf4df68a2409257b5fad7d3c580370", + "backend": "mqtt", + "brokers": [ + { + "host": "localhost", + "sort": "mqtt" + }, + { + "host": "localhost:10104", + "sort": "p2p" + } + ], + "channels": [ + { + "description": "Model update is sent from trainer to aggregator and vice-versa", + "groupBy": { + "type": "tag", + "value": [ + "default" + ] + }, + "name": "param-channel", + "pair": [ + "trainer", + "aggregator" + ], + "funcTags": { + "aggregator": ["distribute", "aggregate"], + "trainer": ["fetch", "upload"] + } + } + ], + "dataset": "https://raw.github.com/GaoxiangLuo/flame-datasets/main/site1.npz", + "dependencies": [ + "numpy >= 1.2.0" + ], + "hyperparameters": { + "batchSize": 32, + "learningRate": 0.001, + "rounds": 100, + "epochs": 4 + }, + "baseModel": { + "name": "", + "version": 1 + }, + "job" : { + "id": "622a358619ab59012eabeefb", + "name": "mednist" + }, + "registry": { + "sort": "dummy", + "uri": "" + }, + "selector": { + "sort": "default", + "kwargs": {} + }, + "optimizer": { + "sort": "fedprox", + "kwargs": { + "mu": 0 + } + }, + "maxRunTime": 300, + "realm": "default", + "role": "trainer" +} diff --git a/lib/python/flame/examples/medmnist_fedprox/fedprox.sh b/lib/python/flame/examples/medmnist_fedprox/fedprox.sh new file mode 100644 index 000000000..2f859061a --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/fedprox.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +if [[ $# -ne 0 ]]; then + echo 'Did not expect any arguments' >&2 + exit 1 +fi + +cd trainer + +for i in {1..10} +do + rm -rf trainer$i + mkdir trainer$i + cd trainer$i + python ../main.py ../../config/trainer$i.json > log$i.txt 2>&1 & + cd .. +done + +cd ../aggregator +rm -f log.txt +python main.py ../config/aggregator.json > log.txt 2>&1 & diff --git a/lib/python/flame/examples/medmnist_fedprox/figures.py b/lib/python/flame/examples/medmnist_fedprox/figures.py new file mode 100644 index 000000000..01e24be73 --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/figures.py @@ -0,0 +1,97 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import matplotlib.pyplot as plt + +# get data +mus = [1.0, 0.1, 0.01, 0.001, 0.0] +acc_filenames = [] +loss_filenames = [] + +for mu in mus: + acc_filenames.append(f'aggregator/acc_mu{mu}.txt') + loss_filenames.append(f'aggregator/loss_mu{mu}.txt') + +acc_data = [] +loss_data = [] +for acc_name, loss_name in zip(acc_filenames, loss_filenames): + acc_file = open(acc_name, 'r') + acc_list = [] + for line in acc_file: + acc_list.append(float(line)) + acc_file.close() + acc_data.append(acc_list) + + loss_file = open(loss_name, 'r') + loss_list = [] + for line in loss_file: + loss_list.append(float(line)) + loss_data.append(loss_list) + loss_file.close() + +# save images +def save_figure(acc_data, loss_data, mus): + X = range(1,101) + + # plot mu=0 first + figure = plt.figure() + plt.title("FedProx Test Accuracy vs. Rounds (E=4)") + plt.xlabel("Rounds") + plt.ylabel("Accuracy") + plt.plot(X, acc_data[-1], label=f"mu={mus[-1]}") + + for i in range(len(mus)-1): + plt.plot(X, acc_data[i], label=f'mu={mus[i]}') + + plt.legend() + + # save accuracy figure + if len(acc_data) == 2: + figure.savefig(f'acc_mu{mus[0]}.png') + else: + figure.savefig('acc_all.png') + + plt.clf() + + # plot mu=0 first + figure = plt.figure() + plt.title("FedProx Test Loss vs. Rounds (E=4)") + plt.xlabel("Rounds") + plt.ylabel("Loss") + plt.plot(X, loss_data[-1], label=f'mu={mus[-1]}') + + for i in range(len(mus)-1): + plt.plot(X, loss_data[i], label=f'mu={mus[i]}') + + plt.legend() + + # save loss figure + if len(loss_data) == 2: + figure.savefig(f'loss_mu{mus[0]}.png') + else: + figure.savefig('loss_all.png') + + plt.clf() + +# save all +save_figure(acc_data, loss_data, mus) + +# save pairs +for i in range(len(mus)-1): + temp_acc_data = [acc_data[i], acc_data[-1]] + temp_loss_data = [loss_data[i], loss_data[-1]] + temp_mus = [mus[i], mus[-1]] + save_figure(temp_acc_data, temp_loss_data, temp_mus) diff --git a/lib/python/flame/examples/medmnist_fedprox/images/acc_all.png b/lib/python/flame/examples/medmnist_fedprox/images/acc_all.png new file mode 100644 index 0000000000000000000000000000000000000000..362622ad7bbf25f8307d856f41d2c8e89d75cc25 GIT binary patch literal 68643 zcmeFYWmHvB+cvsUKm?KQ7U`1iF6k7cyQJB43nEZ6!41oFZc0)bgZfCs-}ADLeV zFWipepB+_fO&whf984f`299=CwvJX7hNR9W4p0kQ8&*bkMrL|ab4N!zC=U~p_5b|_ zMq39nrfGxIM(`o8?LKNiArKUU=l?Lj1oJH*Fc658n6RpA%6_U-3g*RAclw;if|s_P zBO&3!!a~Bg1mACdSsCBny?~bve({dXFXNjdA(14Zm!yuvLaj?RbF<)gEo~-9=k;U4 z)>;{g$tLUB&Hn!Uq2(eC2CT4<$p2ic1;UTEO#jc1>Eh_V=>Kym;QfHaM*a74{TBtB z@$J7?hsyW&5Q_Kzz0z3T!%&m`_d1CsdoA?(zgKYn8!3qN$Nye@CLdoY{;xaIi2nb# z_&+>Rj~GQ7siPS-8XS1Z=^yxqw++5ENbwB8d<#o|H(u9}(}(wN@qzb2ub zO`i(YR@T-;g@s!&QdLAmZl1BWdgb}_a5et>Q=egSb@kN}&sG$fsO#N9HFKh@n7H`K zNjJ$@wXL4>;k?w*V&l_fE#2Biyk4GUB=M#X6$J&e`7r6C=b2oZ$07HbvxmnW70c4$ zLcKQB}2SCM5_Gc14q{&hVY;OyI{JGw*2kWb}Na713~3NJjiH3h@#PZvyw zL&E;xak)>$Q`#{W4yxc$WxP;5l2R_|J{#)p>N=byjs(%H{371>lc7MZ7(W!7;kP1W z_rl$n1y6co>56K7Z!fg8wDe{v7aK)TQ1HzC38_e@9=lOj0StAf(h{fn{xmc|=z%G7 zZ0FD4zqi%#C+QjP7rUExzpUq~xOjM29M(Thmg+r?BOv>?wzLHxx$We}|0ye@DSt}8 z7MQJbq66bH+8NCaBjo`{zR}dPcGSf z<}U>Y2gmg6>}aEhOIv$;Uzk?8DKfj|q*(TdV!gBVWQopQ4-^&-Ze?>*!pe&NxW%N( zW`SqX`_9_a({tFIZN3)z$`{PyScdS+3QdN=!NE6#Tv0(m2xm6}59ccD4&1yhhZPBC zjpoY0$W&iv*UYGu>V^jfz7o7YeyOIWR;1flH*aq?l$L9|)C5qMG4bU!BBJ4F zjuf`hJ92Wj%Neti<71)hk>#ze?jSUBYDPwQ;+oE1S!#u4b~=)O$+{&eUC-R#HHthNbPE9{(`OTJq4D4ieLK3-{e+#ZI(!NDmiDmp$p zGdf;w#l*%|yG?W1pB5GodBN0lZJ4|E$jHLVipOa~2yWClG*sGVKP33XS6f?qe?F%0 zw7mQNv6)=s8QQqv5^v!BJFArG=BUy=+rsczarxcZSWWvga&o=}AmNNwS!=bmwe{O1 z0=Rr|KR3U zT`l@o%sk&7@!uRs2F1Iyyu93^Ao$P$X5$%70Ctdui%Bi8 zS`m?gj|ovxsECM&HO>e4czAda&*Rotn3$M)hbmDipKG0BZ2q z9~Bg~#tT%8&CCQZ*$aZUx6L76#(xeF0tZ2jv2&aIKrf zr-DmeR`yM!$5kFE4^X-Il9H0ZXiV3;Fi}uY#A)^eg$k&z_XuEIIBklQUZeC8ltBF! z*ko3#t-E<=*7of+SO6C9d)EjeUbU67w6rv!AfY<-&L^WXO#F_FuY)B;qN1)*jscvI zgld@@b(uOkJ6}uZDnC;?LquO+9|j*ke}F;z_t$=;2ol*!pb)Xa+`Lbar97)iNWc}& zRet^W_=w&8vTeG|2o8KeY-}t78XB6&P#Qm+(C0eG0#XD&RD66|hzQ^b`iOyl|Ik6D zeba4p*RYBV4aH!?4?hMAnpmBipO4hk)Fc9`;9DpT6XN9LWE&Dw!|~@SY91a<$aC=| zRf9s7L3sTd?cBYwb6UXb7C~26*Kfoe6IK{?ccPH$4ep1G(X&VbPI=e)(r@v)?oyC% zfK`Q2iM0z+1g={QwG|bkP%$yzZ#&i@4owuOP_>SXD1DTZE4J(JPh{=WG1_^+AR@9} z+&nsBqQ*pA0re5r^eTIVdi%oN0IZdtNG2*^0ytsRBL)u-k4ykw>?>tAOXb1|5ozfV zw>bc3SRpP@*SXMUP(gliadAQ4_`u`c^_3dp^Y2TVR zqh_&MP*>@gnYDpJ#TRRJAnD{J-%C*&Soty%KElT$2EZ3Ef_C+vD$dw+j6v{7<# zvz3m+$Hzx}$1nX+_1pT3pVtay&Ig)+-_1^xY2)_is!@Ql0=f8r`(`s32?;&WA%3%!AE#GW`M}z24Az3`0KEUDG6~csba$L)vp=yNpogIgnV4^SbtzvV zT_S%hvDhkCwLm!lI0}uLOKolK=?V)B946fy@EO-PHz$C#?gQxHTn^{SSXh1l1|@D; ztvlbH2+hfPZ?o9&S_t51y4(z9EKhE&Cz>2AcrzQPkhix0c&I$^8Jj{kmj`ngn3x?& z?3N!AWg+u*PN9HZSWNp95MM4JAt7*-ZfgF1kKma@5Nvme;+V_e*jgGNX*r@CBkBs`kPnba6 z89BG>*cb~e#_~3Z)ps>%oGjek-GAb(?d)jS$j<#SXiwmAqGd^Z!^_JHhDxu^mwP`# zq{*Z1W}|FtYx|avOTVnx?QCj>StOietsg~(&k5e zJv}`R>)8%qtw+98Ivy>i0$zCB2^#{H7@eLz1nxwy9Sufl1f-FW%gz_r+?by~;q{}3 zPbGm@{eT}H7jeTHdo=@?ZUl(&m&u}Dbv-X}At8un@y|bhJ^(T{!|sm!Yl9jS^c{s{ zFa(P(fx{Yqs#I_5`ed!>bJ=Rzl-BI(_gt}fQkO=PwcOd1F<>jot-12sV zDctm*|M*rfc@u!eP4jtl+Z;>{O-)@`)5V5t4y7vq#iRyG3skT_nKKgRCA{6`z6KjU zGSGblKgE>?WRypEVgRyumN~$Xo&Hi15)uJE-OW_g)It8QP@WYDJgB6wFbu%531E8J z6t1#SCjfJ4)=9csfm?n+l^(`8b~I!83X-|X%s>UsmgE8cNd?}Rn3(}zrhl@WIq?Mz z_G?zwJ0xL7vJjV|l_{;2Va*dHF5)X5)^pU<)XVXD-eooMj|qw=)O(C3s~y45t_66G za+f1rLL#DppEH5XfRDQ(h-AdXV3m}V-h(^@ei6zHgllupYj2W)!*;PBq(N#Lnwzno z>%Tu0CLP@YX*Do0=>!JubAv0hN}g$TPrz z!{fahVEBG8jN{|ugqr+p3d=Mcw7$q0rQgZ*Z^bp96o(5r%KQ;FjPuf z7zSVm|NUdpG7^@>1(fjPI>y!EzK_xT^=lwq9rG!?E+z91z)$%p7hnSP=pJ(uU&<+M-YHZZE%}=e7J+F)n@d(UI`R2 zHO&Loo{G(4R1A3g634?O72P-bAYt@%kP6VoP|Bf0GBYxSP+^&gkx>zmkVxFEEK+8H zr3)Ay9%d;8l#leKQWIDq99ZEnm?E{}Bt=zK)fH}Dr``8l+}y7%EG)1hW@_xSAqWNr z2H10Rm6qx@MH*l&`%>2SN}I7EWHdCr4Fkq3uK^-3AY^Q8s^Dhu!>z5t=2Im)xy-(~ z6<;Q#%_oZz^{uTLWRuu-GS_5!fp4hLEIuCOJwg!u{ss-?sQ$9#xRE~wqM3knBKZ_( zuoi}ZD-x;vG64$;hm4a`9Tip>WVPJoogE8%1>npxL3+Yw_ElJgS}Zj+4mWvuoHdiM z!~fy?TV4()1Q_Z|?o^gazC4IG9}^Ey?m!hqM@P2-TTOg>`{3RVlHUD*=gmg^3sTS% zkU>C3X26&N1DtZ0qJ1tOdSCP>3|Ls$55UakDWuD*s$%lHpSSe(1_QJ|yMAoOTJ6E- zVA;pi+~4x`rQ1fFde7kCb30)}!1@Ub;PqzNAH@}9eLQAE*dc*Cdf>Q0xeyeq^#Owd zGa_;gOwsd`n+~P~YcrYwd8@FTigYPBn7)_zjNDAlkdGvI@+z%GXB<145t<=;p zT~xLl=RIl=1#0?lf2gl-4KzlI^qSLxF{u8690pvj3<&HAd~Ph>-rj&x%WRhfKpz3p zf}`gt3}nuHv4I;rV>Sr*fHM9Vb-z_B`usIDmFU@7&&(t;8+AR;R;)CI;h%wQ<#wM$hbnPP}(O+M_`U@;QSmQ(%laPS=V~~QvNCeX|MFW)^-JQn2 zum_@$Y=U0`cRs*p)1ScnERFMx9@WZpZsNaGX$5Z9F}@=cbOD)o=7R)ITVmj$bB($q zWD{B7Dm4fGn)E$~BLYqvHm%R;9b2c8z(lkH)4e@jfI;GYO$NLyy-r<)tYjF7ao^hm zQO4?BY(b)+rln;ub8rEzhf!`G$L*FBZYWR%dQPotV2w7q=Hq^b_`OoQ?9K!Z9P1FM zj!ojB0A)<@vrL8IolQffKC1RK7Yk9mhg}&-uc@}4qjcP#R{B$2jRRC;8JO5Ncr0RE zuE!ajHF0$6h``q20BfL_ErDzRO1=#kqx4^J$5&9eSFfJS+Rn}n6{r}KcC)t*D(r~i z2mo3ufPLHQdj!8fb}J$ZxqL_&qzG*reewTE2f(BHN#~+JhXr8RUN3o` zg}e1vSd4uGZ5{-WZT%#&(SRt>%BJznSY?84D&QV$UGE!{)`0=Js9s|%O$}*jX;>E* zm;Vg4WJ3P~!gCmSxV;#dlTR05INzU9d44)DtY`oJ{5h*NV8RhZ1K(l+cL-al$pCEO z>uibezkdk72L!w>)~rOgva*6DAt5mU3e^S{m}F-3c^uF!`ApEd^HVFt0yOdsq*g@G zpDOzU`T5u5$|e(dSHJM^@MrP?X)j+}#?jusEMvs{-vF%_?%mZ9r&;J?ol~Eo9x(hL zzKm06f|lNY{=m|b4i?O#p7%|z{s-LtHHiqq8^yi^Qoeg{;7Dti*Vl~)Wd*$NYlfeL zM^~4`vrpHoFelreuPyJl_ql-HWWy<_<9TJx+3;ICJH^cblYz2|kS_HH>j45RY@#fs)ndbpp@)e^i|_G*%M$Bkgk=T8Rda9JE~iH+$--LXR&%0d;hU?eFjB z^b7u{^B(+!1}jX;#DoH@Xy_oX0>%%>J<@whO66SXI3EwZ|LN`f_yJM%^zwQ-FuAAu z_~MvJ-^}b)5NA|l<3n=;{{OV_C4uh5(|-D$;M1Kh@gC+HhuZ0Gc5Y7Qe|q_=K4JOX zo_jlvo-+Td&)@m~tv-K;5h=n8yh61YpVjr|M$G5c5ez?F()9&v+3|!PkgpI^XGex; zX7)Rs@2f23U3L6>vXiGDzf!0VO_y?u6|^d(51JcOo?QMrtW{=eE-!e9ktrnwGqd@N zmrom554MIqJ*3trhD8MIZJBz_J|5iY3Q|Z zZ{lH>(}i3fc&@sdgUM)u_Xh;CPmBGWvZH)Fi72|zRUIaLCPOS~X~>?om5M|~6zl39 z*UpeAF#-$>%z<@Yzd^RJ$mvfGRMgfsO;^fqPl$%8woR@!DbWm{T_OpDc_=%YL#Zr9 zfl#(~JSIFq98dO6VHd+YNPvreo-1<0{-ONsS*NpRr}#E|LcVQ1?U5&9?t15FHZOQE zK$bW};ZmxT1($-l1NC(dj;9`cWkls``ytPJ^?iy;QJpT?JG3~FPlwYv_wV=FFQFFptm~N}F~$2x%KBYjb01((_l;4$HD(@XYt|mCY6|86h1r z|A2+5-{0R66&=jX`f}l7Z=Hziqm&iBg`St(th!$I`MFrhNQO#MLWWF7je&}B74KWhu!UR#q& zY=#WYlvA@{8{}5pYE4F|q_qDoES$!uN<(R$T1k54om|RXydoxWa1s}Q!1kK*r;cV< z2pt`QGm@#-(8Y!(BAox%*c&A3;Ba9ZlkoX}h_#hih-Ns1fym+;hq{NFr!uG9X?7{j zO$YQHCL;sMIi4($ZRIHR1GgSmHJr|b-j+?0C0?%dDWnJq4i40RenRgT?M2o^xufm; zUU&&7mXV}B2IUdU*^l#RS`FQvNGjdKB*@W{0mjJ+R9^Y^+sbRBr8>DPP%8n#yR zzHhE`4v8dcH%2q11Er_F&{C7SJi$0r+2$YbYrN_0g;4wAuWFzvj}hOdQ4o_P0?KY~ z40`(H0EODy*^yFFy#kvv&jo>w?!!uRh7)!3a5e3GDEpWx3K{3}fDiC*U*ahxMR*EI z9*v3bNlq*yEA^sEf=wJ>E2S{nY~ImQ_CYs#We zlhePg%G5NW6bFcT^V+7RoU8GV%h#2u>)YR*O$_b=ey!6YUIg@War?h4d)05G=AtL3 zGL&bsDgR^&XAz1WSI?N6PxYxuBce);A@pF1wS@IzTXpqpb!TfP;EL#bcD*OuLaxj@ z2|pqw21_OW)<|~Q-e^D#v(bl*O~a}3R|4ri5wf~&Fp+yl4Z&7h!rnH8#xis;hfBQ9 zUwC)xCpnw3b+kKD_j%tGaYS(!ZzV)BaY4V=Ff;j&1l^6(OgkM&AEq^~A1=zB3?xHh z-|WjiC^ShhVoVu+z~kb2pt$$pmyD&7CCF`&+UuY6*0L{mE_7_n-BwXy&dD(x#`rv{X(-;c}>mP7r!HN?|ou{WQ1f z`E-4kqQE^gn`U`ZadJPm$!uzd!D;xuv)Wb#9V?*QV~p56On^Ds1A|i8@gS+D)wpA8 zue2{{Vn5vnyE9!m_^QE;>-@lI(QGUWZjf9u>ZV=uMf^eQt_e3(en_}*d^G5VNrWdG!zqr{`&uSB$V(_TU{xtMCgu2m*Y#c`XTS#Sg{tGlDTJpkF=~OHa@iH=0aUt*9ZAZ zD(3MgH_}Bw#q2a6xcQ9l#uVQxHiodX;sH<@4};@dkFYFfU$<#P-4 zMRx)tHEhIJufhTXGN#r}7THoHBiJJ8n!J*?$wbjYLhU^*U+9)LDQ#-!WxZp(Jp3jn zf^ATV>B#}Xbf8FgxlS4MPNmcF7zGS&J8S${P3mvtWb*Cu*if^1Xc*5Ca~Fw*`d_=x ziYXfvBUyEMyU&CBILzbx&ODrvJ5S$T0YWidMl2080(?s4ApLF)f>s=Zzw;XrGL=6r z$GYVRoLD#*tj5#;l}1y=1Oj>te^HydQV0*_Qp8pgSxMv9jt{jwcd#Se&8WTFWVs#( z3AFY%eJx{tex5+-$`glHIPF5BN@TWT%F`96av5LR@J8J}zJj=V-J?Z_{?q&z4$n{J zUeWQO3)7p1&ll4v_9h=kuVbyzVng$-S$f*QPc1D3M+9SNKFLQH_l5eytJeOIxf-(1K$ zIu%L9p?x6q87fjKpLX}4^Djg(RV&np^qgNK1@#(`0UW}kZ$S=htN!)#E^+qpy&Ed* z_@}}w2bXTz95P7Vv5e2Irypt5<%OX#Hoe$~#D7Fk{xxvpFz$Jx#$-AR^~Z7TI0IVR z)y3&DOr2W4wh^@2VutS(Y=}d8zw8!(aYEzVGzrttFy9*|)8A9$TlB*5jSWc8tap^+ ztCsI>?2Q3y9{j5$H+@2SkSd)CI*3j|zxxuNn=O>>RL$^%L~GA@whDKg>+iP8_>Ru# zMkhMRA`*`Bd@VN%%&Z<$5 zE|WODEaz9g|I2NgFkfW|{t2_jv=BY*Yro_~PP%(SpUB0WE&UYp)_t_;wdjcGYv_Oc zXImIEQ{=C-#vo$8oOzPa{6xQU0|@XyZ+)GDK39pkY#{pSK22}+eXjj+ZqnDI-I&zi z|EiA&%6;&+vl%B%tLG?{!>?6nF;ldo@e*I@<*Q5uY4;*_1b6<|F_bE}*x%>7zSGjF z8-mh-)Ot9_d&pk?WXNH>e-U@sT=v1mzSOUBlo`hImG{~N6bzom~Q6! zpw3^b<+C*R5Lt?TK6Z>$VKe(dA{Q6C8ScnN9YPrMn=z-fTs-%fwBSH0^q-h&hnk~5 z>0$g~lAW!U9FJPYBVIRfDhsGjj4w5T3-Gp-Svl1p& zvj5wLo#c`;G|Ubz+N*tQt7UtZu=R!iRST#*Y7T4_*GIm@!sb^kmV+q{P89^UC7|RyZ>B1<=(x8!(^QeHzKyXCEnn>LMfLSQx*;hw-l3lD zLpvgf>g>#r*_EeZ;RF;{o>fZWyyGwi&0HoiK! zAzUOo^B;~LY5CuG$7;j`&gz|Y!$CYBQ0osD1}yQ_ptuKKbJ?6biO!z*;@{s1c?Lbx zBthTL5bhY)?E80~A1AGi_CA zI6vnHpCz`lPW>vEc}8bb8>=CV`JQPOMD-OMS#@=HY_iPAfY#@U!n>X`;9#U@arBa& zQk$%}optJB?MLDeTcVvL{@zTQGUnh%loSQ11%oMHd1w&{%eVMyF;FbKtL;)u;l zle1W!gj9;?tl)znYCr3-PQvXc3r5M=cOz~e?4PvI@N)e?(03~guO}A8n-VrVIxLP3 z#*XL?zQnf4qRO+^n59ABF=#aiLE6xWn?D01%>rJwWtL37VfK-Ia`Cf_wk}~G6T?<9 zr7es;8{W;zEegc7nd7b21I?+ZC|(_9Qbq6pR{};?!h@0d>d3bh_Q0{3QdZi4kSwk0 zo`o9%hz$l6%hSEtdz|-cTG@!@OF z$9T!iY%)n#T}3&o&~9->m?b(toX7aG43=bSv{cEMJ`2QH)9N(qh>bDT;*jpUqv-qA zKwO&;rw46akBN6yLX;?)|l11PRrut&`I({?TAn9@R)?W5sZ8d zVGy=W9MYJ+f7*Mizs%9|WZ{Hcnr!t>QAqN*Sbp5M2bS%fW-J$zBACY8;yf`<|FZt< zY=I|ukfhM^+X^<-T#n|ClEc8%$Ft-Ym4lRL?qeqO>OI4>6%FY3h^6rHKKy?)f;FFF+H0j0IQ(F>WRutXNrTxID1uCW{BU*8{(2MTUzWF8lWNF~!?^{Ij;I zVaueOSkwtNOTv|i;;CT|&0_?H7hRvE<+^?CW~B?o5oue3f6Dz^S0H*kPGI9~+E;e? z%*3V0?M%^;?k;A7dO@{p_i|omrY)60sK5pvqbAO+`kulb1yvaa?3;sR2StL;$+IlF z>+DGqgY2rWyAvck0uVc5?jQXZ1k`*`_=U~PU*6JMM3ak^e1?R@$SffA#a)$9jgFi1#= zF2!0h7WKkH*}*VOt8vb{P3*N%Y%-FsSE+7I^IgQufocddRbbesLK;3F;>fGT_TS!( zEN56CTIYCzzshx(ts{k7rjO;z^Kaud8CpjMP|=wVxob}P^T&=?OukuXHbsFcFX#2*B&zFkGpc@Voh{** zP)V4mjQRGUup%F12xrNo&2|-%zXey6liGSOTL9w9)HM7NJ)3o9`&ow+9dF;n_~{w7 z6?)77Ppf5XO(7TGHS$onZ9f|%1+QYM{eI=!=g5_4-?y#O{#SlCCej=CxF_UiZJRL1vIAzWr8gcZ1K!q~ zGjD7OY|)X>xiMXW7C(7}P720+HBB2Z-ygw6Ew)&ASvS^J2nUdky4yrD@q=|dl6hn; zpBi#*O|UkSsdaNS()}tfQXydPm!V!jcq1ozB9=wjg0z7+!4ii#W)e$xtftol49v9C z;Wyb&-c>f1>681ofBf%%cs}-0m({KA58-^42#ymNaX=8jOk-2GGD&?xgaVvR||F zfHrjA7{Q}tZg(M%;QzHDN|06Mb^J3*5`H?F$mhW^BWV4imT-=lJgSrO2jER`5@+V+ zx&taKDlF(g)UVaOHGsBiIAE&>bQZ`w~$bJL{NXyPS@-XdX2WJW%sJswrOtaqLk_V-Dame==f zTad8dydl;2%@;jB<-iS@&1{&kU40qsE*8w&V&Z2`c!M=bOiw{h_jQ}u=ua(Ls8nvw zZ5sdPDDx9tQGHni+!i*(u*j%MARXsRb4&fe;)B*!oOd$?QpUHAC5e9x3ccW@XRtZvVxQJ{k2 zlWH!&#zg<%bqOZnjqpX@8(?T3#bH7#$4*UseQiN*pRFg79wl^|!hKEuS#d?vr{?ix zFkGArWvbudM0aYYo9kpH{NG(!q1t8P1$Q{L^v;%{jxsuW$$ z6xKV+R=iFklrf2GPxv+LABLtVC1jMDb9jUjB#A3iEX#||SmuissPR}5!m#M5xttKa zMBdqcZ|H5E_9s-}8tjY(A=>=CZn>?RqT>avgNMhyncP+`x$TbE z7gLizOjLqYY#49y4lNmAbMWU>Pv@oKUZ7Acmy1$-@ zXZuOyMayKfrq543-NBiha$s?B{CkBkA}QlS1R*$7B!p8oKzTPlv)kOKWBK@zqPp?m zsq1OY-_w(P*DE@E7lAJ+z;{D-bL$Clhrhh%{CP9}{1ubnW*8NF=9}&an2dV6uN?(P z%jMgBfojLEhIJaHH&HBmd0kO6fAuu*KP21Q5ptZD-F?mTYD{QjHH*^FT5I+u0(%ZW ze9oz*NCrE67Br0=xD%H`OZWr7z68o_FjYqWj8hZ_J;>(aQski_7owyjro22gArTFq z$Il`XAK1jOoY(%-`i(?y{_Kyk2pk=ASAioob0s>7Pz%ecE}wRHhbn8Ti}E4r;0So3 zjibQ1WQ;#Y6BwcnEFKO&NDt8B#H81gvKNCzq@vPfnrWewBa11bw0r1c*zLlCUW6-smZb8P z{qBSzV?bVACahIXDb8t=QSbOVAQTCd&Wo0vn=N~Mfj9Xp?Vs=W>JgG%Mn7nF%|_U9 zm|^_ZV#m`oqJ|7+OGejv^LKK->p?;8EK#9?-Y*@&=^r{X)16!W`C0Wzlezi8(`q6s zS4Afmk)v2s*>0C@Myq;R(Ei=aLKNe^Vg~xUnjjMQd!dmyx*VS~WQYxk>{g>zx;^h6 z2k@;woi#&>XR)=ccJz!$&AvQ*971~G=5d)(pcdZnaCcPdRN=-y!aWi;Blx{POZE%* zl%Z5Uh3oX^x|6BAwtkTKy4vtD*8J#3jQCUPPcKgr~pJzQ7 ziFipolsY1$EmqIBVEM7|&oCj%E;4NQD=5VS;*#!7RsRVnGYx}&6BE}t_h$o;A0rQ4 zUqpi}nk!c68oE(iS=%++(mYVrK*{3T<_&kNuf!>R*rGkQL}&x2Br+)MF8m5j#jpODl477QKHDAA*T?Jd4R8YFdGiDCD4*cHZ@t%0 zCcyfBdbR#S#p@f%2(Z?D4=!^EJwAE#yiJXfjtX#c$ZH<*=EyFoOONcX8UY<+vi4bq8 z_`O%J^15{rlx`3ESO|hYI!et&_VWSvYSGl{_fI(oJ3^jVKb8F6n+^5(uJE;s<*6Ar z7pj$l6~bmHYPyTSt8BD6UH+S4cR{MdRH4G*zd2{<4OWf)TX-5~?`F_iu7WUVG9YZdzb5i_( zO%e+a>i-gGK!d0uUxHU@+GkD+k<~es9?kl<011pt;fzK| z;Z@#V;2TUyyAx8b&=(p69Lp@5xhxNAasdH33>qPds>W6SdabQKCNeok*ts8aL%*ki zaCsGs@iFm=rXbzRw8YYEt^i{fe0#ENx-6XpwD=I^(^-({QIVDnSuzV$2&@lgam)XL zT_ti%oHf2x@yWX-@&gC_rmCjr?VOeCcKYUq6mcRP{_{E-LP9$N&Ot*hdHc6pKZZt_ zYshueJw;YyX^a)88ME0<4mTz$%`QcF;5-lVw6rXQ)HveJ%<^Il%yH^o@ zKvbo{OSeE2BiSKH%UWu|X3bD(dvljQ%kXSHJ41u!@O(M9MyG~VZDoK(cfLVsS$=>z zQi^0GCA2*fbD^enV2MK?GYoi-1YU*p{aH8(ay4Xp#f)PijbD;R))rD_2lIFV+2r42 zF;62&y0eWW(~2+X3EG>f)>3zBIyW#gs@uk*|NfcZ#2{5I_Up9*UW{mq1F>zgzoO>PG__NhkhDoI{_|-yJtR8W9dzb%! z7iWBZDW9=4#V+lqf3fy%n159eXPT%9MWto}v5Uh)@CW#Rjm;9^qz7IJmh39V70Bfa zT;dZ;NH#}sS)}yP4z6CnmpA6>wXP-#vN9dDqLQmRZlii#i|{AmRL| z(}$6EaUs0@{8I)M_G=CvNe1Yt>}&)KsfW`4l{?^Zkx*&G@2ozGx>$>>X?m>NC}TB? zu7{e{?v22|S`!#x$(Y>;?p0-YhiZe|%$_O>{B$F_NS4>O-wo>PdU{~C?H#+dy zSK*XAR7v*8-6!tpQ_tIoUOUQ=hD~oPY?sYSx;R&#s;F(4mm@lLV&@sH*PbWbySEqD zY_dg>9Ax!0r$c5&^fba+rNqQXje4dwz%WHx$kaMCG41x?a?~u6U3F>V=Q`z)=6~Dn z+Yw8XZ{z&zsqoz+%S3Fh;`OE1-3vqH%0_07Xfh**wcgFML306r5F*}o5bG;%zxO7% zCMX#!;S#)}$D>mxd%EY{*F`m7erkD0F86u`(O?KHw_J4d{KCVm|JNihN4vPopasG* zRYZ2T#W3x(-}5m)ZCtN9iMGHkAr_GX1;kBfTs3JrY!^*gC{tfem$Jl^Pq0qq)i9@Q zh4Ys$<1OPA0=!3u#-O2~jHvPpVC1OIE3kZ$g?{D!&>VI7uv0Z8y)d|Icj>Nx8DEa@ z`Vsy+I>z842Um@TlD2uqFChhCd1i4q_Y%UU?zr|VyQ^yQKVRwK8-rIxUUtH{ONWH1 zBHL%n?=`Fa-Zn7D#BVydWGDzIs5rOLIK=fTi>AvwSvSQTaaUv?SGca*J(cMhqVM`A z9}riJA0RDnsti;AQ-_RGE>Jo*mb*lqIyr{hS!N{`!;qU|(Q|>%SxhzJW4d zNzO&%?Fz~sthi9g?8R@|WFE%q(6 z!yzjICL!D^?3NGkN}mf;7#x(3ckYMdUSi}GIj zUwO(lIH+dtTj-pZe^6Di+!g7(JzDQvG27~n$LWwAX%hBg&|XHY^;4MhmNxW$2M$h@ zubv|2RVPIB?Uly39HVx0d@!gF`!Q)quL@4K04JnBJZ(qQiZvW##~PZ$JLCEZTJgDj z4fNtmG(;aqve)mcLi-~H^Q5bS=?Si_KFn7&2Um_JndtmXlnINK)S0TYvQi;w>-gNU zl+=6e4fS|KS=OZI>jj~!FQ$)ah<2n(UZ*Z=i0q|C1+Fk!JHQ8L%cVOCKPI$GQ!4!g z`44n5=_*n7h5Zz35MP|wd7PuN(b>oF0Xk^Jv?bp5skHe#`LeB!C1@d-NMHI=8tIsw=uBel9bO@^ zO1}QYAA{Ph%A=W>ic*~H$Ve*7GgEDe@GNSUA0UapFyw@tQrBaSWE#i$yLR~YQah=u zb3AnO05s%55EVP)8L%hf{CS?+XP10ziWeW_lr7O?ZOv?I{2Wh3q+Y{yNBS!VUsguX z_cutfcYoP@QT@EoYbEBnNwdbkZ#+vmT~oc7@3_!?zU|>nG1L*Mx+2S*KBIH~UtK_9 z99rszvL1-Ti>yTSfihQJsyA>0zG7jcDAMDQ`{UPJ9a0Z}S6FeOSxrKD{ zInF$C&~tlR)lB+M8T%}@7mqX3MN4%txXSmitU{JON_lZ*SwIUYH`E6wt@ObYua89N z8fIiQdz|_MCP6Tw!rc_CzJuyp9OrJx@MCQfi0VR&@o0)malW@D0EIPf9Es=Gp3L&Y z62X(2;x^wS=mL-IO|>vtYQrQJ?w`solEWyo7;~Y{l9)3zhIkYNuh`+ZDYW@LBi*gN z?9QpR#vg|4?z9mDYAJfKSWw06wX-lfo7CA~xi_hi(D9z!6{*Q6FcO7O(G~cH-WghO z|Iw`7YCv~K5%#66Ozl6sz<%k+b-O0l<}9XET*k}s7E4_+s6OrMp^KmDYVW(}97HAi z_;4)`jx$#_EqX5iv4FGDl*;e&s}Imim0^Py9Q4o*?tn7jV2FByz~}KgXR2 zc(DP5E6uM=(iDoj@~jC?FZU^!*v7RHDM~$9CDMq(2Q%7rtrb{@Oe>DRv>uw5xmX?j zZY(&uBn>V7E&9nF?sB#I*V6OWkPI(V@%b&*E)t%H9X%(D&2o;tp-FX|*p?XUO^@;9 z(&r(hO1&1TyB3iotsi~KflKcTGExL;G^hFNI4+|Z$2EN5hh+J|4tB#8ao6QT@6$RV z#GqLm19e~1U_MTka;q)qB%-L4F|esWjLoCx8JuJ>P#{>sDiEOY##SRzCO;oa zw($b&CcHe>KG>p1r}tK1Ej6t;wdpMAP@%7{2y?UEir&pN5l7+1a86RZKSpvJ;EgGV zzTzoHazQIDVR!{+e>+v*p)Ey4&P;-1+c(YLO(r_5gK9Z7bF~{VMFn-ET6-ZV@z^sB zZ&a{%Yi_-#d0SyPHU1S0^Q_L+qwdxv_qWl_YXXQyFPxa3)T@j!q{`{$T^_)T!#D&= z7LPMkx)kE@Z;P5DNQDMA(^PGh-JJi%rPhhi#Y{?wPY;^QKRMt4oAYt%U$GKpc5v*r zHeVr+>UL9DWWVuJ^_1QDA4;J4UykA3 zoGD+WEmLRZs=avI*bOh(Ht~y47`4Bbh~f6t$RA6r5F}(!4Ij4U0I$A#TDXg%TdyF* z?Km`1ukpMlzL>rHi8e*S^O94Z^2Kn*ng8CW{BTaR{2w2~dpFj9@?v!b{lvukSd)tE z>%$-4d{0dA@4v|X*xTrVxl{*-C|*mU0I||!%iq1U&Hm<4cr?tS(9_@?Gd z3QErJBqdKRbgiYabdSpJ@8r2CNri}5Uj0X8tYjZYB=64a_g_w%f4A!{0CsF%AO;?zWpP`=#rKXu_n@Wo-y~B*b5BNeOM~5V{liW2hhrkl77mt1=58-ps_mpdcWF^o!?v(>Ycyey18p zoYo)%{ zEvI|Na&;yX=PejePhB4fVB->e2^;_XnLmkRCCAaRL9AFMHZInIq_j?1xiT1yT>o{H z&A%K@lk(k*noV;$EM_oI=4^lg|T`Wi;)xJ_f{{{$WJE;JV|iQ{qX2&HN%a1yi-q=vjSz5g7KTY<*{X53;$NFL z%`^LZ`+I4A!ZI`vx}@h&7~NFcrbDKN3L3>6Xt?f&|J7`4y5rf9t>7wH{3H z450KIU~zmuTk9RiIQTB9s%=6givFdQxxXI&&G`^xGMH{h%Gj)uS|C3*;>84lt=+-4 zr}~tjF(UKjJu%bpg^;hE zDf0Ig5wr=^B|UyT&GSZ}K^%2*NJ8z8&s0}4mz&M!WXHT#w)#{{Yyj2KJU*Df?yTZw zO0`R=IaoxnEmg^L%wg?k^Y@|LI_`37*GjzY-q9%Gg0QldEF=Ryx1fCKuHn z4!o0E0;*;|hYSS@F7Aca2~*5QA1%hW=`5E7b`2$We{btL><$`=b6tdwFGm}N{`yHD zYEri!G*tgsqD(TQYhCONa13Ch^B7ODdk>B%3H;?%GuFUc$pj%w@a~D{-upf{-2}-Ra zj7u^8P;Qb_(EO-qC3)qaOJm##Uw)}CY`5{PC@?g;tD8eEX6sFxdoj=DVW_1~{K?!A zvTJz-S9)M`_qbORfmR1uFmRw6s~V&NP-?#>lu9I$%u}=Hc&m7DgvZZm-9WE;B^!F`2uXoqkquc|`b-oqj?lwU4 z{oLFa3P1vpyp8#q_E4pQhbY*nj#UVZ1SChCt;*Jy(i7*|x~LrOlXwe63+bWVZ$CHY zvH%yP6AwGvA3TJji)SVB`C3IXo(X`y(&~(g|2k{)aL7dHuc#cDA#k7qD$-?uV?Pg) z)h;#j^w4l4qwS3XYWAz(h2VXK4sgL|Rjd3?QVy9f*wzWG)Ahpeqz@~)8;!&5b1INE{)JY+cfS`s&; zsW<1&pjyGdtt-iZaZiiaQdu6Q+kyzNn+8 zV#=x;C)ORU=BPuM@Z`taLK(jO0yL8$4gvXN2~V!pBe)-B|?K_JZ-;pjp+1*=+}{e zhAxY-5S=$bGZv~|fdIgX%xw(mw7-V#n&D(C4PO->shwY8`V;^2*Igx>zBOWi+(bYC z(O_Dx|8sSiK>qId8)r@hPRQ~^OsSQoHg0uvHrJ2D;$sI)xlsXum`wY3=+5)QH9duv z(Ly`0Z{De#_MHgwyS*8syP^IoV!Ehwl`d-=q_?_y_I4#Hw$58pgNhv^mE=@jsmRvp6j;NZ=~NxrwjPF zr9Z((4cGRPfVvsrDprTugX8~RR@PJ;9Cy1Bb zJu;o%zCOCr5h7aado17~Xzg-v_s{KB3o4AFA>k&059-|G-~(}9=+?=}B|6yAtiGNC zoy(5(VH+Zh;-Ui^Xx4tuOnHL2C{BQedxIM2g#hH@z9JC~c|lRoaUy*ULvrqiy;(m< zB26x*aWWvdcdP8#m@BG{7*yn?W4N$dP^1x40P;9j^4Bd^Sd!GJ)!eNxxKOH2X zO1hkGPx{x>hk8vJ8*$>Ncv@3~*Ip6u*$s+65QXc7sdi~ttuJT+cHO?BP6H!ejAqKn z1rhJ=9U#`?1Kaq&4;FLpGKG9Ita9UH11E$bw)!pHqi zKt}E}=Y#F|emnTaASveKzsDC!`lfJzH^S3!S)>dDSJgs7`mFAotd=n-tkR;zeI}(%$+-%2*f}7#DBO z)a9fCkL88%IP2Kns6)S*6;e==XJ&%=q-Q^~8H~>K$6MG4A&Fq2RRd*41>#A&k5=8; zx`CNcmo9z?{)n2IQjgC$=6BA25Axe*jF|S}^Bw&{P>lCWyq1?0|{XZ;Bo6 z=d%doO)*0E-{lKGrUJ9}Z1 zuYQ3~D{!Q~7}QvL8$SxD@H_aKIZv>@Y#^Wu_dX{4h?Z% zM7J>mmn;Hs$@YFA2X&D(8&0GbUUtI(vg*nCieR@H>P1eQAHy3TVbDki#KMuxf<%DE zEY_wDkfO5R%*s`7Sbg`0PDho$v)1CW3UZq%fS2scW7Y~_lXt~fc02E#Xz<%Nco(Wg zWCDOWzvd^b0jy|_+&<*`jivGR!TMoaxF2R1coT}?%8TJVFA?9)F7G6TL z0+eoIr984nebF#5>0Q-W+d{{|B<%yl6w#6l*G~SDBW}zk7(%xXti9lQLUrG8C z_iB=wR&Sc?)1!_@QKp|Dfu8P|*bQcX~|z)a<2&g*ZhjcqK1PYtFZ}*&=92 zV)_;zAeveC4|cd*s4e2hAsrcRsI3V5mhBaQWUqH;7f&lrYJKvfh@`CB%T6tUU!PWb zRcKXH74pONKbfchl@#-X1ZG!py;8ZM*Y7t+IAq+l4eBfGm1lxFo(;z1M|^1t{A2mG zvU}gHc{80!ARzut!O*wgPR`2nPNs)whdTjTeXo?+NR^rd&_o!zKNAE1ZNI|1Pil*% zM1FU%xS@EC2MW}q;o^kA#&Ope&5^+jHM{HNXJF;x9Gjn9ox8BK#Br* zmp_ePM3cMfQ_;B=Gb%UIMtHOu%c~#$D()S9i;6Xh@e1DD*aiih;Wfn{!#C zdS;CjKdP0|kiap!3X2?a!vw=^K3L#Ld#Pah=)|2G6Pkm=gK(%TPWBizmJ23XeV zufM#r4_NUFw>NYC40pVa0se_?u%f{s1^`dPh}b*Cxb5@+@3I*VXwTTY+ zzcXsw9cN$Po2&Qo17-87;lbkDT_9j<_B;AOieP5kE=&byCzd|_x0AG|Q~3;gx98FA zbT4tSW!gRD4D+p+%l*?z6S8@~we0$OzY_j}$(ovlstP@v(nX88O}LHTS<2~3r+Z3xn6HmCY^ zAB}UsI0sC)JH7k5*t@@m(DU*C(?so4%`JvIf6uAAh`I9bpvg-m1!m$e(ZgE}pOdWk ziU_)heZpEC!o2_BahFHYLW(a)=hSITpQrpagGT;!-U3V+7f7A8?AmFtZE?N2O98YR z3_UH)l=@QhMjs_7dcxQ-=@E&r=_QWm?RS+5F!Nw^$x;V9i|q;vM?ldf?+y1Gho)X4 zp22L^`pBxCyJ4a*78rW7>oD>L4pMf`p5<+|iX|CnC9ng4dDvyg6f@{6Fyc%)D{%pz zNy1Qp2I#PXUIj0_Z`mnbdajGIv%T9T=_@=6&E#q|R5hU&64R>M;2n}A&3slLhK(_)I_OExxk|FxM;;q6VVwKFjQsHPU ze$8i9Zp5{|NUNlkKO1{<0SY_}NQZ6dL&Z{HX$=4jMB3Pb%wzRjpFU*+c6Sr0X3xy8 zg_KtPaQ~770KmM+mjU$)bN=Qook%SaVqYJXLZ)bc1ixc|J+5T@Z}E?JGp~VRQ+B|1 zO{1SQ{?~9G#7b@oN#FD+>5%)MkypeKaw#8jomfAw0 zW6TYVGmR4TBpU+k&26@KVsSDD+Ak{N;_2t(ZU7_|xN){O?ImyXIqfsPQVUbW?-uE~ z_%Hl4I?L*37=)-KlLqi`ZW{4dZD>(Ie-JY-r7op)91*pOD7$|g2klaj-N5k=}Wh60AjiQbN=F=*m>0l?Uur67le+t{wM zzkImIhAW5`(!2fMk;ms7l%@gz<9Nqu|2ag)sapqT?QfH*RZdDfr+1e&T5ZK}P5Aud zwpit^Fxq`otHV+if2N8cPd0*0*1}pcx`6-AdGzU9{W}boD@VhJ3hkb&Pd1J&t^wJ; zUjrJA;>oZ{+xd>_3_?`h?`<(QDYl|GLc5=HtSvKd$BmQcDFk_ad&;r;atZw7F?#F( zyYgn!eW7f%I2V&@5zmGnia2tnh3=0!&WQElKPkh@i}hcQr1E0hcK50qA-(czeR2=q z4jTeuTEdlU!xOxxj=*KBKe%8ui<|-Dl%vU#NHTsMjeW#lm^7y2Ka8D|HP&*3*v|MNmAF`Pe{>ETpm&{# z*2PsObb^B#re3MNT;C-M8%Juxv|M(`NR9Gp32l-lmDMZ&i+@(>lh-4PNPi_ z($#}EV1(lal1-N~T5Na#w^&@88Pa(?wUBFjkZKOdHI*}Lb*+W2*(-s^z0)14%*spG z6}*BLVf*j@3lC3-@IssV7UuRSpm z$b>N+91Vl?#{qCN0QuO>JFcqNoUwM~vgW*DjVL1+6D*z@jSo6CHm)>_P1pHi|2I0# zm6*?p|F%zRmH>vn9J5X%P>-C3p(&s{4f9_OJeQmzSZVwT9(#t>&=Ic?jD*DkN)LKF zSOGb$L1la2A|f?YsG_|-uWuj2iGOIdQmy#+OjRmrl=tIDg_NNiP`_eY_abmYWw7}x+P|kgYLCXkOc`q_>sEpz;tNrtN#;!pu`#`cdUEu&Uws?Q|D6e2 zWxLt6%+*K=dm=^7hu!kgXHA)JP(0YfsivQCe-kCxjN_Hh%umxFY0m{dpZPR;aYDfH zFgze)ZNWT}$=ey2U)?gnHExbUpuov@ZW` z0k(|>p-ouN;db3ulO>aR@Is@(lncG`%3+7P!38>KPXNk5nj$kd=<*sQccFsGKk`K; zuISY_fuPoUn1I+=W3dc72VvKPJ&6MQrl88vbSwh0cgDm2I!k`DGcMFaR*g;m*2(ye ze8h{W+ksJa##Lslc$r~MId%1Osr`u<_U;(wr66L!Ee*8-O-4xF;aJq#mIQ#ERAOne zl!mT6-(-1j`rhh3?!LQYn*EE8^#A)h*#qC#SM>HaHD7#l3H2VZQqh6qU^PB zEvrJKV%E7r(zIXx-d0F@{3{o{v2pN#Y)(RHz3f>>T7vHJxT-Fl=YFucx$FdKwbQIp zVN^q3{q*P8RMkztqaAm8K?>D!cVlmV?y*zD|NNHmo+4YxtU_mOjS-lNsP@PtX+RT_ej_* zMqgbXD=Q`5JP`O21q8rmK;ITYB?*X)D(3t;er!JsYGYY0;u-YO6046s>M(%W%-HO= zi$|N=LvZAoHBN(BxAmA|&ys)ILB#MySF^`lS9oj8Vot z2v9V!finRA!uqwS_O^VIf$1BptjyY_MG-gqhrC z$L~VS<@(E0JNqNMY3O~K{Ze57o}0dw1F}6|dSyx9093*Eegi?V6@H(>JN}V8dwwJ` zb&waw3Y_N{B0zwYE1@bbpi$aBv*jK`;jpOFknq!B1CI;#3^BP6PSE!c8^}jMsC!x& z5&n>OkQufY-PSZh20~^H9%o&8w|)aHVr-nmx-xzP$h`(8Hf(&v&Sl zaA8P(wf~EUs?-pr*pykVM_y?CYsDn211uY@pt$n%6TkKq`#HJha>nm7Gh_f@4>(UMWGy@V&IE3Y-``#(mxyQMT>{rD`JP>-}+YUO}@VyrjhOxo@(gs1( z&gkEs{IcN$Vb}XHe~9Xw?;-b0{G}CMo6R^!dMslbQy$X6u35r9E)Z6o$g@sgJvzB} zv12!)L;Z0+#HIs;PCc7rQUi@3zauF6JVF~RXzj1rtAJQOhp_()8*0w8*aZ!TQiXyFaD08Y`gSO(pcnV zHaKTZ1d9yntsss|iA$5=RkuH@R(YrdHU|KYDiCG#dYUO`S@lS1>Gz-$+MQzl$8^voEW4jUx5es=48If)Yg5TlB|?Oswpso#?`Gf(A%umta7LM?sG)&uc zE?19sYRdikJf|2?*=g@%3x9c=)rYM-Y{yKpfdh1euNdieV#1K=zdr*6@NO1nh23ix zQ^rpsWeEp>72rhr7=x@i_fAp@(>@I0OFq*I07aANrUf55pI++8yL8M+lV`Ov$kcg6 z?aJWS=_G7)pJbW>mjtFuIIT?M534tHHqR_;X}oG;Q|HE33<~#;l@Y2!B}LJz3u{)y zAWJvhc`-S!K}A?;@YJ&zBZkRVVnYa*Z!bHGtZrv3eN_n(yi^enp7ldrT5ms$7c=_D z$D-Jm7ON&O4+8snfZX^zG8mQx+&72ZlspT8-h_1?!EJpDs>9QK9-C@OiyH5hD&IV@ zAYd<-W(tSmhstX7{zZtewAX&{iu4e%h*XZhP;Gla0;;{|^fp6?*Kao}&t}*OADDJ< zfxUTbF}94{r2|WLQ+q~_~cIs>Ht;`0GXLRK=P^_5ah01Yl)#=qGt4T zy|)()cG*XUfPj!oW)1A@6amr+d;To|d3s@Lr+UYC_J4``6C+H0p3jEI&Qvb$35OPB z5K|z9!Z}C(7~}~}^fR2z`$;;6eXH=m^K8?GQLqC&{+srn`v*w$P3L7ZLP z(`%bGmTV1Fpre7VbA!&ZyG0KoCe0QUhryGH+>h)9klgqCpeCSELH2B@K9s`wM}gy% zSId0uNK)KAq5?H{=*2YK`DSQcXi2-<{9SX{P%n>r!HU^?VE_&(BmcToLU%Mlkp*H==?9C*eGn)rj;MNIw6;mw{T0oTxMU6lI zcu3S1PSAEH#pi}!_nD#Wj!R!7^I>{p{F$3eIP8NHYAo9b{Yeb?kWElU(%*9eqWDl7 z9wI_hFB*bI-R-`J9w-iHmaTs)TGy`)I{)Z&HjI3%N;^>dN!QWmOwJvlIiEpOSz>v# z^IQ)HS=IiS?!hOV%7XUmY}@>H{#kna`)4(H|A_a#>n>*95BgI=gajzkyR(H{C zO@xKk(UKDn?IpZ?)H**6P3nFJw{qx|oBLq6jVO_{Y2=1s0+am!#8I#RnYt|?OUjt~w zjc1`tY(s&46+>5`Dr;o-uRf7qlG1j@`RW_lZQSt9Dis?5^0Ez924aSuIcVn)`2`K*J7Fu5BO$X-DB0e!SNe|(F9OJLxbMrJOss3m&I25e;!+ur8A+OfhHkJDUJA4ZHWjoxe|dse(kje$j!3jPQVczNkF~ z(Tm*blCz*(D>BnMx@;Lp%?h?>zB|P6kQ?{e4~6AbZ1@*et;Xw@Ht!21|1KCWM*jZV za?M9N#^_dxHjgqn*flW5T64og7+YNas{6CjkZS`q!)wseX#exr?mO4dmnN(WJSes$Fhjt3Tv5^M{9cebPS@Ht` za4(%@6o=`Qk^yXGsq}fuL!Y!Mrn?YUlbBQlguh~E1KG_Jdjy#}qRI|Yc+DT_HHq~-*56KUFHO@sQZrdrcewTxAWes`wY}MLObIs9C${aE z-(C#z_q5hxSmB+Y>BlE^oqj|aUA;PA8SK&cAw!F9ewQI>%7;8f+r>U!a-^Pry~rHz z>)MLx%ECHXb$iho-DE;51X*&C5X9tV$#_ehjZ$Ze)F!7u|B@TuDRjA#d)cE8PE4n0 zQS#C1t0Um_m3;m1<>;i+5t4tv5{dpZOWivWcebNzQmX7sra(5o@2*{2_kx~cFnZ~r zh?Q;-kQAJ#2s_V$wVhQmM;u_Fu`tLydo#RzGrH=BqF)W+wp$6>)i{YMJg7^nm==E7*#% zPh(MpJjtv^kpBXsahdhLU97iBNK4;5pHymkyaT%twt&oTKlS441#dp#vE9=}grESo zcxv7l5*U%|(%p}KG$1GB?j-l#TToe@B!YNCH47rf7Eg8cPkwq5K=|z2UTaHT z5X$$nU431?Z+~g%)#^^DWdPyyC9gdG61xVRa)8LP@qOAd=;5E=$ql-vF5W=VSwKtGGRKbi&)@tGD#MiZZNX+$ zVW{+SH3ryF6S6L6cF0qB^t{Ig%Os#);q7^Lgoh{->9Ot}NG6*+bkZeF+VE^Se0GGqZ44U5Fl0nHr-X zH}ubIL{&|!5v@5kDfafzN2S-EBP|2%MVjY@Riooa7$`FKegL<{D3)eLw<@abNXqOptwQ8n`^ zgOTvl(Ik*0y&w9r9a3S%&=%z!5cd%KE^{u$KZE-NTYn2ywcfQTl%|>#OZFE(V z3xDQtd&it}+_X`a~pc3H>9VZK{7Ke7kl)se8Hh zyI7{(V@H+B`|rYwy|TPR#3(Gz#gB#31J=ZB&?x#ep=(9}jICBr!ZxEP#c(XS)CVv*vS&A@j3R*VN)KT(f}rAKXLFpNPKjhwsj-Q@c+ z#*{OVvs0^!H{6@q%@nfP{l>%RNC{J?G9HdgxU*v!+R~PI?k42fAk8!%V-?+|F%-JU zxW4M4+!{SUKKZVH1^pAef9PNwF>E8IF6>`j&PQEoLySi4Ry=NR!hr~bLg(zhE_@NN zPCHo_4F|VwEgc_#c+N3EpA(_a$|hM8wMLAIXY3mn*tY`u%%x*dKc zlj*o?mG?s4ulb&l)RdWqk3^t5kh0u9v7kiP66*oEf63KvOHWz z*K~*Tu@3)?HN8!;Htl45!*nYt1%xYHImewDLsOF#LO-r?TXE618-TY=TcH|+nSJvU=iuGdA#W+X?oEXvy@R*AjCeuY zh5hfIbnkZuBv`na#SYu4y}udX%LwMMqg2MbeyOd0+|dV-WjP4`fFuOde#t*7X8|d} z*5y`r?D=wJtGXS}Rftdg_Pk|E3yVtlWQgj}*}%fxzjjXkIjCfV#oYg%@Hj1H*rH$p zuMAyW#b>$wad6>Y*{N=hHOCC3yJ6XP<<8Gu=PhsTxja>w{m_G!=wbVA2rm-Qu?8OOs7=7$xPRLtfQWbVq;v0!wtq_pM|XgQjKHAw+70YY3kwh51_JT_4QIe=tgSPq zHCun4{2@{xVlS)eQ*g80U#_03l&yU>4S9mx>sK55;dEtzrUW-6fdt}-dtcI(@pK?( z`+ZJx2<%4$5X;mAee4JezVfT|=r%iV2B7yi?)DtQ##zZ%+j5I)U;vw3hG!=AaxHZ% ze$lkaEvXU^DIVc7bkP2on3HZb$A2J^2leZHJ-B~TGi5)#^cnLPLYg0%>9;-O_>)`( zTzXkEGsG&{)hgTGhKnowR*tuiq)wqfE3rOK=FmY&RTFqw82^Qv5D1hurz({= zpfOImgqEu0ldfrkOL;$0@6O>Ep@K5o$DLvF!vOJd{RnUuziAIkFEy!DH^vmnkF7op zl`=;D>q3UC2hr&`V-HKKcqQur$q*wAp4NqrlFNTs7-&LI4K0@%>Xu5eogVL7j)g+J z+B)^!gDtRcmoN&0Xq)P77?99>D?JNRGR~2o)dZDs-;yz&h^AyCRcpUGE8CVgIDTQZ zD{aZ)K`i6@)Km77Snfm|jEAn`B&IO~vPcwMfNw(P&g>Xqu$h?Iufb|f;KBnDrabDa zG^j68Nj2&9C-o32wP`@k)4xQoQZNFHWMd`F7L`jR63mZSB(~ouK3eTQLxU~tR zf!|q_`j-AW5tmlGs|YNs`+K0-hYcKj$h-Az_56&Fi|YcmV|&mSg-)>$@y?-3oV3I{TjyCvykoYAK`nhS`h2M zOZ#Kjl<3`9s(w6+qs7t?#qX1JIpw{fk3x6e5D-R(3I0yhZs;f4Ii`mgFamKobM>$Y zged{~LeO5lE<@cggcd{jQ3c+pBI(Z*eRqJht6;MSz6!cq zy>(1SFzmU6$Kowm9AupKn$Y zsHSfsmK?dGQ7m?f4ooJ3x=@OCnAVe$9x4iQU%B~fdGDVFjrnN^xj4KQjQa0wd}ceC zqh9?i*h-hMmfKMVz!uqFpBJM1M%Pf5nlTGpOSSL4{zl?Wlp7nG;xJIuVI#E9K3sis ztD#!G@Ay1_f@F)*HT`Sf`*WLuQN059p(u{ueZghW; zv2Quq?qcHX?(c7JZi*-dAv-@@&;V(qf6mX-%$B%G>O?`$X*jpe-9~{r zqgTOh;A(3?d2j?V-onrO2jLM$s}3je(&wwAS-?lltQaFkXR)~wbqQbgn$h^3F)$ar zPDQquux`R|?)G&3p2U~xibI%yLX*K4s8T@%SS(ODCbHCqJoifP2N|jliB~-0OQo@@ z+-tT|(EbXw;vRv&0C544y*?4lpbx{T1Ux=3h6L`pwW>L_*VFX4>$(t7J-OYjMzJC}Q{FhCAzd&dpTb+)yiry)&#drgL z!j>0JrL)@>G*n4gg}}iYlV0K0x|i-vb>pYR;F_&o+*K~msWwyL@$cMu`=2;~E1;iC zsdJcFDOtfMLc3N-1u9BKVeyvvwCQhetzm_6p|Z1w4BYA|iQp+f zDFb84TAI?o5qQniWZ)jpGFuvpqRegUey4cv3-`T04~P>K=Mi6wsiTQzmaG-=cX$P zfc|=Ni__tT1_J~*Fz^PiODI?`jdrX2J~wr_OFr~Fe)Jjp`11%gxm=QgcxA!*7w@Kr z&%n?>E_)-5nyGTzyQgpEo+SdD`*pL%mq)vb=eO+DQp_i$hr#4XM=L0hz^C*@x6cR( zVhWu|vc2tfK^XN>p%fz$pYz2OR*vl%iIxL;RF`MkD&Sz_J2ijWSHImXIP&DjQSo9e zUaVc4!_Zg54}Hq9thwp`iaSzSC#F1TX4^#8EFbUeTE>qdf4~xr>26F3Vhh5RB+%T2 zAFRodxWc`rKz`d2J!|KV?^;eE^TeE>75h5I(65neDiMpS@et7M>R1}(kW@W;Ta~-` z&G;=v3ujf4C}p3`A#NuFuhiwMjl&Ss=>?T0y zhQW<6+W^tB+mTpd05ftCcc)-M>mwM5%DCS0=x!et`BE9i=DLPv z{sN%-eMO`B`I!#M)6%D;fG44ed9TEWzik6AV;ejU@gru9t1(b=F)SM>wzH}5`lJYu zfK)s5!sPx>f`{|y8gW-~i*e@FuROgU<-Ga@E#2|EgJWumA?rpP;a*G zZPKE=g4)J&W5g5$`(y+P46tOmeEDgU6X#a5J}@0ogM!p}>$AF58&|BJ_Cxy_eG7yQ zb;`|)jA%eDZT(w^n~w+cW+&A5Q!v(f=UXiY!BZu>L`^ADBc4B>4jTyb^7E&p{7^t+ z5#Vg!3ud_{6c+nn<(BLX;}O3)9q+?S|I;;hk3Z`7omUsLrqkZJAt9*Q)M}z#kdTO4 z=!A=teL)n*6=XJscZC%ZW}?;E#-ftSno7pm2tgtA#B|wIAyjz^?00W(SLW0Rba`EJ z-A{sw87ZxIaH2-?uFUW#3`4WFIR zn))>OC16-Rm3(@HW%X0UMBVQf%yF0^paZtc=pGHqxAL_Z)1G%#-%!;3zLr$u$K0(Lq=)Hj$$dsR_)s4Sd2k$lD@r*mIi28PBume zx0sSL8DB+Ml|A{8az_Lw_kOGJE+ZxWJ;GrfMJ<$jaUM36M{^@0Tq?YwYa(F70HGQu zYO&d0!GpPD$Va{}cLU;K0*@BUaTJi{YF79;wLkjoV613NIl(Oqz> zI{L12X7$(4O8Ok{cjtBs+zdH=K;)}Rg|`jyO3Z#z=H*CD)hULXR`sSw-9p;UjDg0* z$bLTQ!z^!Yi+KkDXq;pwLH?sP(%s?f*71LgGas+D4f~tbyV$4CsL^)D16x?Dm-iQ| z&+nwGPg_#d9Swn$O=fM0`+8|D?{J+IFRav54d&1Jg>S(XGrz9gZ2Ud2OKfbWzG^v! zHj`AWhulBMc;05ROy{~$YKlx9ksn##z1_1Mu5ka^Lo*nBct8U}L&fo}H8tO}Wrf(a z-C!H#(y6D(-C+o9Flq@A<2#g3t`iWhIN0gGz6~r7o=+j4;Vm^zQ1og~8~+y&>dxk{ z7+}nu8xu}(+fRtlazrija7zC02E&VpgwSs7lU}*!$nW7l@KTFvJGOUbU4dljvix}9 z@X=eb*whG|P8rKZOlPBZqmZY>4>TcP3ueM35=gRYMJil{(-8kJXQH}3D!gkydaHq- zBl>KJGFx*G3>luIh6gDu&Qw7lQCrc3E%*MF17<1T(v1Mq3&^HWkzSV|%-b_Hus5cs#Ujw6n_J+&-k^ zFieDTNP)>e?;na)57LKvTOAv@ghp4`yl&`nM%LY5Qs3ooZv*pejCy?P z6td^!)Uq;l9)Rcj#ZM*X$jj`lV#%|X;FeeiC0(j31m;`O^Tx%FrfaUZ9y*pPK}UET zy2bxX8SS$h3<#p=i?ch1 zDe`t^@cWW!K2-F@2vpWgfF*NFxp(?Rgf-!y!ls(JQ-W`PANRv+X8;FuJeLy+1nz+@ z9o>H#^f^AJ3v?i3%wV?XVgG&k^G+Ffdz7uS(lxvK<))Kd&i@dZT%%EUHx}+HIG4bq z8-oL?u#OuvsoG5sRF&_oG4mb%qtMcHGID5~eM^qcxISKO{48Td_nZ^DSGe5Lo{Gzg zEedJ~Apez2aaKvj`xmg7Mn#bFUVb?V{!|KOllglCQ*ejxpZ3oc*jNR0IT8Et`n=t|^Owo*l!=S5S4Yzk__jVtjwoff9cfKGGz)^g`0FIFYfv3+ zh~ROxpIV<_Gz8&n3^lQs#kRNhD`&avQ&m#m(Wh~)r;RSq_Jo4`-w^AK;jK}ee2?Ab zqhReHk)z9gh9KFbx#N;-_UPkke6)~9_2xGz_zr!^s1CH z6cUZ2&N^njoLaNqcROKb0{7gft$c9!@M?_}_auTczncyi2?XP4?wRv#S z7HSNiKNT2KZ{~7!e1$pL2#Md5O}45dFvysU*nLz&&ToQdYScP=s!3Vem)lf0L1zB% zHI+^|;?ZUM!g1$-)fPwMdzWUlW1An$#-pGD&zMo!VblKn5%hD0kylq}jED&5E=fQ> zT8}Pn2}JgGNCwtc#V2{v*Fb`@&-$h98}DXM_xjd32>O*peia)jkI*7NWvPzDIK44J z;nWsOEx0dN$ehB$k7#A>tXBF=t97xAE;!Zi)seP5m@PBo!5zYb<3x2q?PEqe#cqmM z+oUYpYH4~4NBsR%oD~7Ck_v@l^WZW-e%o12wU!xAuoi~o}rI=;sXg$t2;Pa#MkGvXWE3riGkEdJAc%7 zDPko16KS8uXYYWu931)jgvOW!OU}q6{E?)!`R&~58quR+?|~KPG0i7a{U`E6U!#qf z%d@xEI%6>R&+dQ_RubYlm?+qK(h(Ytj@I(#P?{T*;t1@=CT!$Xs&WLER z?-IP8M9xw!rFR#RH9`^7o}5Vbq=Vbu%w)wjnuna2Ki;6upNQ>WAiNYL=opGkPXrT- z4*ARe?qb0w3Fo(O+>)DKC_{ChfI~|UGzqrtorS22Dqj*yZwQIG7hB}kG&Z&j$#w7$ zK$7t;C?C&g1R?XL)|YMy2^r1{*QxUs1QRHp(GYAGQ0!>kLBZXBS8^*N+ zOfh+a-KT*yskPRx#gWFgIB;I1#70i+jxZ72MO4VE$=-sX8{XS|D2PhVBRX0+ z&=ltnyA%Gd;Z?MblIIBxk=zSJ037q#7l{D0wv`aj<;^+Px$N0EV{gMFyzl>by2`Mq zx@bLghmw*a3P>Z}B_Jv--6Gvmk^|BpAR^L@)X?2Ch=kHPba#g^)Xdz&ckgq5@Qde} zbI#stuXx{gt=(`tPzAjZ^olpfu{07CwV!0_)Avf+eesV@Px_<4@xpjQtb~#t()CqA z$d$a?MSu|5VMH|ZI{3oTE2!~bCjRMOZN$H3brO!YFO|2~Bvgwe`K^#>4oPTuH?%Y) z*BrZ^(ev_{FN`J$q$$*rZ*JNR?WXt2x}M+C`M{O$VQ1U>G`_1u@P+LagG8h^Ta!>q z#*d}nMdfPPw3wgP*F;kn0zW44J@U*iNAQ(7BZ(S6w{G#2gnUps0`Zr-O9!t`eZXX% z{Yi>$++&E*`>WAR^?mM><*ZB4LZaa_e-Y^K-VMa``971dl|NYTf$UwH;WZ|eAG%R& z>e|%3FX+P#k?h%|kL0JF*b45&>}ajx%?j1$#BYi9M?7X$2If~*I4^F$RGeJ)xV&;5a%xdj=Ve7ZdY+|o_9H`< zUN^nyA4d+TH$wo?w1C z801(v?cRG4Qc%C27No_6ko{+pKb~A6 zp&&cH09sRF$Y~}O?lN7H&2U+v<0M?sab=`u@0Rz)9ak83aK0*4g$Yi~XV)ed+WZ3g zfGoIZNBp9_t>(dF+$Op&KYzzB7NHVj|HXdj+9XO9wxG`Wf(rZn^>>j&5LFy0$ZBK~ zYo-~*&p$}REGw-Z`KxsBy^|i^Bk(51H;{JX;_3!uXs;S#2C>{93t;PL3!z^^Wf8bm zTKU+pJiw3%KsCur`RJ0}#YuSEQvX0yJ|`^K6r7RuG*M3A%{tw$z@nB{(P7dsz(M7#P6t#l>7mC_L716YL~$4Pd#ahE@#L$^3@T%CItJa z{)p|P8o+C=SJm*(7|Co`>v)1H6x9q4)Ta+ddf)qe^>8CHZBWsy$MuRgMx2t<7FYJK zz7BikZ_bEI;z+hwCDBHy*I987QEFD*O|LXOkE!T7A-V`4~v`q z?sA^h{q~~Fil-sT--OX9&IMlmtG@E(R~_LCkjM*ymjz?*=x~P@auOyg$;tN=UwJY{ z{u{=ETxM;2$P0G^vpS;OfeSjm1_Q?wfz`q=EHJ?~_#3EA#)M5Y`kAZFs?=}C);2E8 z5s;2xe{k@6#&f-ZR9EQd-4ySSWIW2u=;#DC#E1DLR@!-pg7)tbjbxGzhsh6->a0_T zG9hlrzP+1T-tK{vYmZ$fnnC}j%Win?I#=Xecg5^Ynm<&zpp>6D;B$RPtdzQM%l z_}OL`?!r%(+rY~7;MzuRNYKugb6eU;VPfLF5&7k2@F#+?hp$5T$3A50arHn;!NHgj zWVU13O6{$?y3_{-u*ej!L2-7gw*PaDDjmpS0$E1?uutqCiSpr|k29H0CVo*v0~5Pa z{1HnjjqW2gkl67j^yVG)1dh>+6=sV2!bQ(74N$Nw`D*;tRg86LR zb47~Ow)e8^gxpe+GSNG7%vXErGbo((kZc+Ec6TRk$IGYuof%u6^RhvE?v*(hR7>lAL_gy+)8*^H#EJF5JTgvqWVlp?36tKDas*{*VwlG zyH{#_>*xF@gG;i8Cf!c^bio%On`A8FMk^`E%iy%`mVKq z9g}}@a6D4W?oSpf*!S?Z+oG0~=DpDFBbibn*5kKN%1p^g+mf;z_QkdJjw^XZ z(BCU@!A2zNZ3;K>40=Jek@xm|Nu`_8`;iRY=s0u27YvRf!!>7&(P5E&!-Lk+{?NUv zDcAi-pRzPI=BEYVK!WdXI>|+4Fue#o;@3>pF3Oo1=cGOOz|i0 z%&6>SzWGWQJoR){p`plx=bI6Z$5?O>IT#}s);O|wi~BT;Vx?dkI4+0uK`dq&;ik5q zhpF_s1i}?^GNurcP%U%5Pb+t2Ry5=UDKdFBkM#pY6f*msf+*E`rSBx2XvKe+G=6Z# zn0y&Wu|=_v>_^k`Qy9-F_L6%o1KMeMWiC2)zVb|rk?_zj%%gi9YsXuj0qDbRzGVJm za^YtFUJ|>2quOe-hOSCqOEoe15Lza`)Zg~4c{Lh?m?7uYJ43f7)I8SAU$*hKW=vQK z?l2n5E0wq2iDSCqNfb~6@byPk64ZK{VpvS)VfskJrcKk$LCqJhm_(s<*<5Dk-PI}j zC#$WBVi2w#Z5n^@JNV?{33W(L{H84ud1t=!uyxPzO6Ap!-&Ktht@rOb{y@oaMVq7o z><4S_k{o2*zT}!W#V6}8xn0yBnv(v!wrKj5(gYbOZFp2dt>wbmQK@g<6trii4=;g# zv}AACxptd(068)8;;q|DuVWQqT1)@J7#p?Ko@juyG_PO?V;M#OmIy506vy3#KB-Bd zFd+Ka_iR7Ba<4TDN^7rz;!dtEC#&e?=YFNk{_W4Y%-Sm(p@&Zt29LjEvZyj>8U#h{(RQ@T6DP5NXG z`|Bvz=V}A+H$K-sVXy`p$1j>#pke(Yc_1iLER{^|Ei0y6$yfX}LXZc26Yk0GueG}| zJK-7bJr8`>%>?tWr^L3t2}nbwg%18WcQi6i__hv%-DGtTS|1tcE}pIb z3!ag1wzKAJBI(NGh_Ho(q(bm`8%2BQWQ`na(+tkU@0w%-_^HRie9%`~)B)~9Vuzzh z2VsLVk|!tQDxW&~-e=dUtp(n`+M4St^+0pR>|#8~nhc9yT!x&Z;$1P9$(y$rM}=3p zGhEgu->xOKu~^0WpFcCr2OK$JR{G=jd&WbfE}#5&MDzl|UH=^5Ra6R@<{wCNOP*|3 zTpM^FtzC5|e7!mpwL?O3Y;Bk7(-z2N3;Bhq7#vP4_oj%@M7ZYz>5o9fu9BIwXxgRL z10EMsZ!XD?-fklr2sgzP`W?op4B$VtgN9P|XDkfr>?zD`IKhRnMiagkPI1m(oam## z)ro5XTz!{Y50<6dd#&CORyY~wT~b~`pX7}%h*jt*CRkQn9}Xa$%-z4cD6Wm#2~lQo z{uEn|QHHS8Is7#u}yf410OoA{e(najXBK=De@Vg!h0l zx)Q^Sl)iiQ-KW#O#rBRpkh)TZaDZ3Nw3M5wO3-3b4;opWae?R2ayO;BAX$!OiJ|;4 zrhJ3au%EZK7HJ@}+mf8*cyz-NH)WbaZuE&kfnr8_&!cz9bO0Y|gIcsE>&24o>Pp_z zzl~kZ2g622Sl-_U+?$AX z2eN@4XI|XL0%D22DZ}fR%&xXN(`+Kv&X|v9u~{>VVPe2a44Yb|!mV(haKwRm+^CnXcb+4WI@$hVuxdM2d`)8n)jOzIK*6 z*|qUm-5h9DIzNdT_KeqxC0c&b%t+%Cy~Z5dRC8IyfXZrx2f94AsT{3fRXyDq-8 zDA1Myh1dc~I-oKkqn++Ov*F}Ck-++Y`(+NsQIe;55W*^%(XJuOYPZq<+VN&9=qy_D zil}ps^Os8p&hqdkm;o$#Mz2?#+!*3)&#Y~B)!_fqM~2~o#r*I#X2>vO*585eQO{mx zIez|bkOi%GCNXVFR24dny@dVW7mL0!D~5$1L*b&HWg`1iCKlGWuf2|zcRRqmGC{Z! zWKouVd+Sc<3%j47>bwgy=SOSKZR_-b7r|3Za%i?a3~XW00-}$gUV@~CaZj(-X8RQL zM{2P`|0Cg(9ZmW~^IdM}L@Cm3+1iaSOPD8s?9*w>pcaCk8G&z8QX2Qe|twrs7CO|DMOEbUc0y(Mct*zJ|do_j`$hvsOg zT!!nc_WSkADyYCpg~_E4Ty2k+6r!k=Fy*vV$M_tcsLZH|z90DeceF z&I-Wyora3|nH-do9RQ@eK>d) zFSttnP3h2|ZgVH=SuC3OFQ%b=9>5+P-Fi~M|2YpychA8MKHDssi!UJE>3nVI$K++& zrNzUuD-<2tE0qgYZ}etOpw1!;w*>(zFWLKLV^BNCC)1j=pG3QU;$U+bGO&MBIj>^7 zi(Br0-uUpricaT?$TxP9w1@}^Y?Jh2T$eb3Gx4i+#^#OFI!UN{abz~Ut5x)`U_xMM zUZ0m;l52QsVV#dF%gfj&z-3<^=|hxNy8BT$CIJM!(-jL~sz?4(W)i7#=?He0w{GdkY3J+ZPlkb79Mc=g1Y6$F6s@3w_~E^ry0 z+rr%~&Q(dBqz#{eglG(|>&n~TKT^5`G<_lW9b82UE~^eF#4Qs)pP)mQDV6tuO~tQ3SO@J?%Ff?u8h5rBaOnf4xhbMsd6y5*dwzLqYwwP7hR2B2+yN z&sM&@iSoV6nZkWXFDR;Q$Mtkj^h@>w>5)$wJ*CxtV`iGUI%vE1l3|mOAp&cU8&cX4_@_r=F0}6Co-j5OR_21-IH<_`&GbO(<)s9-T`u%^z=3)({E8!y6^2?B?X`&KiQ37y*n%r8r1V@ zNVf8Qws3WM+V|Qp3DzV?rWCwCgi^EAGcGh?xLi~FsefJ3z#n`35JcGYD`zTi9`@#M zqze*{^f5sEfEpr4C!6>xP{NaJ5p(~@i)a@yJ@CU1}m_x~* zCW}KBN*vxj%0KNHNvC6u*Ffzg+TM|^1TGYG80MU29_$$@S?_W5Kij+LI2F!Z^-(S!2ztqBw7lo0Mv4lk)jHb-&3c2}TWm^34aipU3R^y0IDkr!dbL_@sW`$hQPT#uRndZb5} zaBIe_p`;q9s^-`@Fc-oFqtA-&1Z6~uxFDC=Po?35Qy%d5KAeTysWlRr zpaqF$MvZtLdM1=5+XnMbw}12jou~MqCNkBPeDy?trUe~cS+E)>Y}5eN1E^yz>}2ov zQhjWRRl;7nzC|B=ulm@FIjf~GrmK*YNT0vlg9M|1Tmhc|6khgWfSRFLJ&%O6m+x82 zxJ(69mffs9LDs_?me`Yv>QJegN@B=*i#R#56Qa9!(*v2|s;=+l4uT~X5j)vZAS16z zm#A7a+{XYsOCjj>D|Ikwz5Smc5u3esrjuMGfix z0~F8A<;FrZVO!nD4%Am;3g$7~>$~JsuyDqy)vJvwe(GiERO6+2*lGL2Bl@Kf`xMXL zn($zs;{*q1ncy)#g}dBdDO{E?@~N*NP0(gH6obap!1|MD@zcms+2y!J`>M+Y1GE9J zy+1)Qx5M%DS3Sj(u`F>|&5s%Jv`Sn$ufv}Zb%hc9>A(w`zRS!f`)(NYDU90l?xuYb z$w{UCqA=J=hD4HkZg8Vls;GfJ%NUyw9fHtI%?Rtn6WE^9t-sJKOO>MAphH&Vqj8qc zShGDGgkN^3O&dxC_zSJDkC(pvs%d{X7k-;WzN!J#&g`AYq;P2QIfAe0^bM4xIjGe% zA!zvLoCq;=<;UypgF{xVEBjuhjg*;TDR@=>omKTG!d$t(hkGgH3Q=tA7`dR_os`;Z zFpWNAtEqngk9TnvhYn?^aWYvQl>wnaN=^KiHDc-`qn#x)`0DZ(=$Td~q5Ol+xW7>( zai{E0=Mvf}Y&!j&xd4b@{>akvZM4hig#_7`3NNZ}5kOv#t(?jsW{4>iNS2$=Q9hS4 z)J}DKjg-6g8;DLsG5qD-s!G~R82~aPD%0_XJ&z3WGA=Ge_Qoq0f?^TwC%7DhdvbDJ zL1r(8`R4`}ZFhia+cf7JD3 z((`q!^G=)*Z)=`M)+q$`tjpZfn1Betbq`y5(cq0E}E z9Y;mpn^{e}H1G?;Hu>b#xQy^w!t8_yKse#DXyTHAmR(Fvjd33_#&XF@-WRfkQ(rl^ zZ}tLPnh$$S7B1f(s7o5fQCAeX$ir$nd_nbll;bD8DXKd^af3m~s#i}re%-#d3hTY{ zuxm4V|MR&Yh*3Q}VVZFb5F}_id0C9wrwUaw>O`=S(`civLy^SyK5tW1Ag*;8_S?;- zX`bX!_1tU{GZ`1dr24uF6r3+!jTwaIBhJ>lRmudC(BJ0S7(E20mBAuN`YAo&v@`&O zhUNw*`lI`lmx9m6!Q+u{D!D9QvlAQO7ZZ;AsQq{DD035vf1uu{xJjd2m_U(O^`^b< z$k@)w*KKiz31<2TN9tzE+vkV##{iBI^TqXpA5vPBy!B-rU2n=Af~S9~5T?hEbfb&9 zH8kYg^2p(BH|Gh?pEcMN+F0DCGIFcPv6*LcsXeFdrly3#0GF-%5R|=rFgyF*X^~J) zPWT)hBzZ{>!dl0j=7LEkt>2zcUV{^h*bE;iLZ7_s}zQL93e;y3xx_{%4o^fm}u%S@CfHNKE?kpn06bTs7AjbP^pb(2ISc z;o+B{G~@hy!_#xD=05{B9%nDe}}udJu9rj$s!}pm^~SJ^1tUvYJ`g07*}qz zL`(N_GkL2<+FueBx{j+qyG*N=1{yb^4ZJWx7|7O3ICuE%zb+?bLVe^~cGb~8wxfE# zw(GtK8Je!3DW;%aF2%L6Cwg3jN9;@98W%aC@2*7&*u5MA(p)}okGLcXJ@v3$1uin_ zY?V-nd@~hj*k&;`uy;Gg2jTJy2w*(W&`t5<8hnQ!xuVDR-#+OeSEv`NrCjhG^j^b$ zW}gAVZw>g6jNE@QotHZ*P|qrWihJ|8q+h!fW&SG36h2?bDAOa&TFK+SD3L5j9H7*W)EEH>_`NA$#hE;Hw{ z*3+Mjk9&S`#ZfFX#=o?Irt+w(e9ZI|QChSfbW8K~GRUxcQ|vrEOWoi)|0d`Jd9sqT zcLI4%sm>VoOlLJ||F8e{-w5!o5M}_TL+6pThWtxgj=y2YOq%{!2`G$SIok8r*W@0* zM$aCTuYBHh;L8WXfXBTt8gz}PM}o6rCbB@|ljrF4+Pm1MTJg+6M!m?E6l9vbU;H(( z?YrY9BTp+{-|W z&yPdWJ3j7`DN-1xjZih9T& zUMd|BXUOsrpXFsz5}GTgn3EKp%^}lOI*L32a!31CHC!Ii6=mxvbLnZsxbW|pR4wxpiAkgs;J~&4 z_HdhJf3^Gj(E@=4V;Yiz_WXjD7q#}D$?zH<`~f8&(hyw1KD9PVQ{S4adVI)>9tS3u zqvsgaMVPd)U?I_#3tpB_K+KhrZ=)Nea6@BQS-3#;Iy9_aU*4+=5*W~dahZIcd z@vyUok^ka)1^UGFaSC-#FZ7k&yZuX#W!F*td3PRjGqP{~(d~r+LriGc3bql> z8q?a6c#WCZ>F|@Pe2&5+sjiSDB`*oo$T|+CF(Y;cv1c5?d%*cR|9ZPYZG8PgEo&mh z*lBaiXWHX&8*MOo=GP*8t zf$V(OKs>P9SLr!~lvF&<0Sup74KRGoF2oy*nQ6gb)0Q^;7q|lG+G~^fU8A>t6yiVk z?iz^J9FCBZzJ*_(RXN>@OaqX|Y5{__g3BG9;a=v)eJ+r>H$MS|r_7R~y!Z1UdF8dF zUn9ejl0-5lKBv8mx z{Lp}_k>6&to^+kdKe{rw$b$2~q8FW*1Oh6}+-A6Uyb5kmBUPNs+_up4Ysh)XG;*@V ze@xY3u_NmZCv)4)^4AuUFBtqgOT9AGfUyjv0><)a0EINTJjCkAPI>DvGPZn+%#wEx z{q%m6;!bMS)`ldt=ZbW_rJ#>-%Be%}GTitU(==YSM1+3ty)eXRs+y`#y0hyrJaQ46r zPZc#dEHg7|4d7)g< zi9T%Zvxkec5;YV6q^07$b%vxjuK7_G@;5pZr~LXoGbpsTiM`UQe-`;wZ713lUD*G= z*ma&f`%z9`1HiQeqU3{sX(!t2Fy%ZDjM<%ERm z;EuAq<|zx92yQf4t|AF}{ovGp32S9p1~FpUI?q%_}LIYwP$jJ%bAsOEcbu5VZQprC5*JW$fo@AXh7m4edS)_U>2*jKl! z{2)bk@BF}J4vD8KW2InX>BuIVgL*UC05695j6R<#XBL2zm2K}}ELLQx@p;<8=jfp) z1D~x9P+^lj5(_bdtX@O>U)Wnnp)*9nTx#SqU(8?4tJrhP^58E3yO!9ao?De&StFl( zsm8Pr|H)jiBJhpjr#?{Y@9)Qe1WQV|Qc_fzQMp)a=J*S%#mV=xoVa1oVsGiOX*VoJ z+YBU;rvRc|FskUlt3EZo`iCnlD2Bppz6)pbmr=lHN#`Q~jj%RS7#esV_YyeSSFNIC z5OA(P-MjCuE(#X^FZKukC02TV#MMr4b*9E%ZVsul|IH3oV>8ANj6Om$Z#hoE*eb1f4 z4IWo$_S5Hc?>4h#{1_2UlDIHjXN}6E`PMTGReeZRTPIeV?%^z#41u4yuufBcwskkm z5rx$PF5R@OywxOm8V=;Z2q0=DP@;>dePqaq2%P=h0e)xC3zfr_@Nvxo10Zc_7Dr2a zEqEnZqTIg6P3Awb}e_Vs#mG$BMxMI=;la3t|1~_M*Alo8&3;_V;%rFL7_)Ej&$zKx1EV5LHZ9LRAA9Bop?CyCrZh1aLwISLB5rl zejYk5PP-ckqCpC`T^+|D8AiCeR^QEN>Y-ib*6dZOV=(QRzk{Lv_EYrRj9PVm!bYE4 ziK|-8AzaM3OQ*UQ=V+6+c@CoYeA%WY9RDm*e=3d}qFX^V!wyg8A_--Gvt%%}E(tY1#0D%RD?1kxa%&1UOmi^pVs|yYJ(&=-+8M z8i{Z2Pk9>hM)A^Npip~eiqZla2i=dIrl{xs2!BU%T zjU;fu=x1|&PH$%fDv`*4ykz3g5)8zfnWiXeNjq-*JXJZ2bC5zFVdgUjtq=(;>GWnK zx&=pkE&y`lHNO;Qk6dohTL!)JNV`Sm&qneg68hw!IduGbD#du{Ege@az-zRyN(XO8 z=TGe|)E*uRd44(2uJZ6Hmm!tA?bHG)R#D&i;bbrC^K%eN%rmBxXrg*9D>!}S>>k~? z4KcwC_}3101V6zHD1(l!HXJn>6~`4n^u{<@kEZ|;zY=r4f@@>XL*FeUphOZT<(Q-}IRA2_iUU~7mK?dHGN8OH>}v z`X4#%!DGO84#?I(ygTIae{!uhmy)DTW^E2g+4*>uyG4!T3*Zi^J|5<+JvdVVEbxy` zT@I=lT9-KlXFSJmtG~dI)ycON-z%gXuOahLfX1fccSQbOv zq=l%m#loY|7MM}7hfk$eB)E+%&QV18uFT^p{H@P;$u|1hcU{u5Az9-}A09B>Gc?~vf11KZmm3`C3>?(8G zdd9-e<$}sV?c!B)8bDi(B6LAxy!W3v!~lR1a;pCpXb3%@31Yom>>YOXFu1ys7!MK) zTihx+Z7=V}$$ZQA5&|E!wm4{UI5J!XzIc%h8_ew0?EL{j3rLugK+Mujq%M8P6i=Xd zGU))zZol<+o8c_qao1Xu(R@X3 zCt2;%M+m%3aU!TXT!*qp8ttl)Velkol8M*2G;}$IVI_%uy3Tc-;QPdzZ<;LE1sar{ z7(@Oz(aR|9NHE`yX3Ntd-9V$di|7YZ;*I``eqs{?59LgxKOOQ#zzjJ4gm(;ZGCMZG zrZuJzf$aKAaHT`>jT0kbt{J1k>7&?0zrcGxBtrl)78_U!oT;m=zTFw4#|4d6+G3|H zO_QL>nE^)25H(ENnN}D6?bn&5rK76vnaX(hkmNNGwhZ>wL8V4XC;y(!J8bS@d|@u{ zbnqJ8_q#xDC2%pfZBhvH6Mw0Y(C4dXeCdVPgHf6QnFrE=x`2BVt$T}_t9PrMk{zFq z9F-|%2ASgZjr+1z6`?Q3vtHZOR;3G#>i|5ys4IWcu9$x^)$&NOBEfsOo{Z z)+_ZID%pbgC1(Y?pu9DP2HeMT8HC+c9aEMTm&FQ}Yz*_*%%(W#j7pbdLDh;sKEQkA z?x%aDq1gd6M+gJ>>gN2~)#VE3wbLe?0>lJ)PT`N|0<>QL=#yY!Dz^_9?DIL`zt&R5 zXX<1ovhK#5U!AL?g3qqONQz(ld-SJV;0>P(;e3xx%z+1tlYRj)6x3frnj*W+p(W8z zp8LQ?Bg>#>hM3XD?-{qz1i&`OJ;ksf+T!Ld`d~!|3Rf03uD62Pc^2cy1H$IFsc8V% zxhm$Q9^?mR2flc0w_!-;8jEuh?YP|kY}W)QcO6f65q`^dA#5v~8JxXR4}el3*;;h+H4qYFwI!vm({K>kURE_vxjCOcc_d zotB^X)I_*JD^ooj0cTq4VfJ3Pojg7L9s(B(S z-7#jDt3njK<4OSkOT@-J-I56K!2Il|Ag3;qP4gFDvhuLsE^>RNkEG17fBP+CYd}&! zkUYl#FxMpMj;YyJTdRCcwRz;~QmWJBuZ6@Wc+u zCyf{@ojHp%n@tHv%fC?oEYC))}+yFLp6XaW_FrREhX&Z z`3q~hp+{$YQ~yx+#xV5dAE;3iP}wF)S7NW&^}AF$N_Hiwl}(p*us?4QH|*fcXp@<{ z_su85JHwxr1kv+7{!m723poW^K8+ov468RS&sSI(I`>b!3@orrKvJc$s{5Boj?k5~ zOf%4b=x=W!UUfJp%9(_aF8;;`W98LC&dX~h-)3Fi!$A%=s5$Tqf7zw5siX3X%pC7* zH#~4%pXcU;B;GiiEf04mR(BrN)q55Via|s?YKDp@)JxLdQF8P|S;7fu?!S@gOvfg_ z-;*?ZEsT|g77x`|uV8R^%ErbAy(%-)^%`L41s7dh(O-SU(c*|@>Z5ii ziqB`P5^8+UZ6Zh+{{n*(UTJ7lU0&&%2PS{{LX3bXvZ~+{5n27hKBi}@sQPv|?@meO zUX3&Wc`H>E=)6=RYbxVx>;7}})@w7-7_Vs^5EasT8T~+}-E;XB;}kodoKpX;_0-dx zJrju55(fr8*W~0x+_92BB-0-b*>QP^dtJLzDKB<|RFk%u-`31rtd2N(qia4-lK68D zc++}(o~^1`uB!H5j6Mrp-=FL3%r$U%Yz^kAWrz%Bh*r(5hlYkmM@PGZofn!Db&A#9 z=beY33@WGT!XH%r<&RcTk!Hp5R0I{hF`&^9g~TX|_W)w#yDt)J-N=?|ou?kId$9n$ z@aM<+m#dN2fNVcHp93^#9vBP=(gYrPwtFiw2mF%|7Pso2-^1zaEc#*foZ`_=cQaR2 z#LUu4wcOk-iQ~=^^CVn7MRaZAK3XjmC}9WdL6Yn1g_OV0YT4fc156Csyq&WuPmkJ; zKkoiadC1HR-U{t7Sxs+Xc-4#H+$ zvps=DDvH~nF16@H+mVv76Y*zf0E^h7VKN(cnTTP*74BM$5!1Tl;^nyOAjA*k$w=no zvh(Y`g2z}d8bNeBVry?z?)}k_LM%S0uCC5qjNzqu;`QtB)B4v8x;SxtMQw z+RuN)xWrUoHgJsh(HH1rn8_k6@c+$UV@`ho)8$hbmcfq-c`z~^P;jI#FDFE40~>bT zgZBr@mQ_`JV}ad%vjcWHD#GpoM()6_Drrpq)yI6*G_%5a3#6=4FKUUPQl1a2 z{7d&Y8%c#v?{uZ7a_oCW3gm(7=k2A+PNBnIJ%)gY%uyt z{Q#u=@@4MCZrQ&}Yin!WT`H-pe2#X+lKM8)|F{e_Ar9H<0^ck8>jPv?rljA9VoV}LI1ki-0*SR zjkPAjs566S$J(GM(82AgEY?`lUO138^LhnFcoAp|aKXRc)?t(j*ykfI`jt=D+f7NkPf`sr2*apQr|GN7dDr@}w;8rzIcB89xk^eE_w@Te*cmvJJibKQQ z8%?e#^|E>K`*0~Z`UF!h_>rK^$*|<{$&&wZjU$|wDd0qYd}88$x>)Yzl3KQjmvEHN za$pJu7Vg*YlXqws`g%y@bzB~8`Ssm`B@RGjc<7I`NiCK;v}~cDy}Yt|k?p&g@S3y| z+qi}wnR3zH@_PQ`3>_1r}??Xbi$tEP@A==l93m0@g=gJB6w8Ce^&zKck}Y%)Gc;pAWp@d^91D@$-!M=06M0tbw!=8O^D7i_Nzx;_A zlP>g-5`OZ(%lH}-W>e`o!W5saIZ$#CgGwULA@`i~`=KjNq<0`g0K1Z@pv|Jhf)&$G zav$Kb2LlwrO}ZP)l}J8rfihNy>$_!FGXvk8{1WT;a)96;IEw}kK_VV$uI)Xgqr)I4 z=X3TvnQUg%5MNE=k08Qe%d96Yd0=A@&~%jfOWU}cLT+IeYDKr)ZzhUKCw51_are_j zhtfcX{*p%@dhBPte*9l7W+4D&)Qn5}zgiApC?1aLTJC+WgKNn)@*@btppK6|QYTOG z9SAtCJ2~Xax39p1dU$U{n5}%2Su{tOV7~^>$`{oOfQPK;jjletK>pB}Tse+Rt3D#} znok8QzD?*|RsoFaL;-kpt67h2_ZRu>pt*SJ(G@D`V6MxOb997P47OasShGKd`K`6= zGZDN_Tpof4C@*LU1W~0+h}|QirfB--v9V0Z&ji>_?X$JY0IC}``*i>RZ1oke$bV~H znQ>%!+1X7@e^nO7No)^KJc6kg|BC5wFpltv{^$xVa&`RT$HPuEHMjy$_&fC%20d0j ztTD3i!gD+RaD`R(t955NEUV?<4daX>CcwnPXug4}(>Rx^L+(~yIz@m0tS^&L99OQ{ z^-8=2ooFH`NbPKSJ9hVa-p7X0t#kk&T#Ox~oYm!TQeH^&U|xNp zqS@r#z}!AB>t#EOJ1MW9|D|Q&Nx%a>Zfvwhg4g}Sk{pL{hE2&<+8jyGrhrTO&QQ4V z8|ht^q0BaGbsMBl{#d!);s^Xou1f^KOwAGOujXhRQm&U?p;78;7Z2Vk7D1##;DMkH zSGzgK`%r6Erc{9wAXBrs@s+U>H<&=0RXq8p&0;nL#D7@r2VHp2<-=#t{jL9c2mSgd z>q(pjo$>o^J@I0&0`21Z=b2n5-<(*FP z+o_pk4Mn61ZyuSj6AJL`1F2JfAAc@!GAR0SX0;6ugyUycRXG3%JQSjolUP=4I3c4n z!6o=O1y$J#;~q317s>R<#nMrM(bo>Yp^pmZRrB7jX+har#?SarqX;EOz7RgwNb_u* z;)y_)RA|i+AOpxDVsC^*)3Ktxe#pcDuIL_jtWJAjJbCTKvMMu^DMAQb!?Vyri@EeY zTZIFMdsHfX`A>yG9Ls`T;1?xBXO7HrUWrpw*IpDCGnMt z&>|l2qb$Q7fPw2g^Tq;UJP6t_o_uRUlI~GU>NdBkvygimt~VV@P9Ez1H^JoNpH~P} zZ0m1Wl&$5#{$9j|YgehF7c?XvHXhk!6LCfKcmLwP0?AkXBm{DTS2AkIB87H$ce9<8 z>k4L^zB^1k`5Pk8b+o#yxjb6eNm`xH;iN9B#HAMB%5tq2wf74v#q-0S(jkC0r=t%4 zM&#Y9NBmx0sqczGLvGc)g9>>2iiBZQX#1PrWWZzAq_)RECwY-fYZYkf+=sPmkwRRm7tFl)lO|1>Z z=u~X`u=(x+5TuaN4!TLXQSaUpOr6Gv2Y2{gtWD{gZ_d`l{`G7buy%Z{ zXcZ!PSKxX9#44kbfNDHt-xm@zV^}3ihnXn>_iym0S$Wr zs&H~QMSoB%w>P&4-YSMn1Cwpz+-#O%6g9iDV|Hr|uXHe|n<<>pybJYCtetcXPglA~ z3~wA>cAW8X$Q)QTg?DRXplWkykcs!KHBitboUZp-@za(uOjH<)RzgvJWc9=TRGU9A zS8P2&aYP`G7;a*JU!3rt-U>)!&nUuN#dnXTjqyC3KROaqJ(Or<`qQ`iZinuJq7@JQ z57`L@xs;Pj2l148oRa}Tw|#_eoj-zCXPdD1U-0OaANZarRG5rY!GZyUJjt2OZ<+nT zVDJ)P79NxNTY1ee(%_sNIyqq*{7;AG;*>wi;F8vjr+s-kckVlsIWEC~9>DjgjOoY< z=&KI33igDFVteX_UecLf`OKU>XF!o${!}P_kc?=H0JoVK9*^8VE~@o>hXGbz&OKda zZf<>hbFn|$>|TIpG%1z(W>O#hSoq4}?ZYUplD0fBeJ2!*UuCwO(Z>50dnM^&*GVjO zU&h;GwkGfigEdLX_8vo5g@v4BdUqg_Z7ON^-M7>Yky^BT)z&LVs^g_(us2z}+d2wvb!`Wq{3CAFHp;#*M3IUlsykI5Q+c&u&XMB!KM#;CaGJLp^;2^Gc)wdp=R4-} zdKE@B&hsK>0jEl>Ui-=)Kfb8yyy2X0axDg6hHR&FIHyz*Zb0YdlX?l$P&h_q>g1Sc z=az@n=fLZ}lCxh6YU5b=ds5H`){xI)<$I9t;0N~+i6B9v{v3hWu53Fx^At>v`}NkV zd4WdwK|!7{C?T7ZrLF(7d~u!%WNw>uX%fm4Sr-e)7%3|`zY1RgYC);5Z^Aq@W(+f1f zlB_$H1Rmw;K`bw+7XOL@{f@o=K&>X1LK}<`l~(fRx7wcfVZn}$$(&%L3{v?X7_tf| ziYb*$%?6XduDFRw%}Q0h`|gSpz<*sfQkFLW`>Fd1R7#^%%8$L!&m^ZAtEh2ZdJ4b) z)|Xkno}?>SWKwd#1_3v*0KHwni+NW&OmS*!Ya7(O4`3HnB1$mo<$J`R4%5E95A-hUj#dD>>&lOLc8AG*8KN-ubB=pJjEEW zu}hTVSf4x*3%U~n0mbscY+YZH+PTp)QdSVgs>2DOK6cwr3}8?XPkn5@rm*+ux3Df| z)P2~I>Mf8CR#jDXns1Ez{re>@F)b{XuQO`Q?gQ%X`Y9WmU#-2_pH(;Qva8Rym{y9x zXI{bkI(XGx-Oy2?XBPKvXjKbvL$)K?YVPg=ARx4J>zla$Pi^l3k7fV zHx3YUyr{cV|`WR_wSgVc5NR(4woDmBXaT=YN zL;SX${d8p*0kR3~RKVkvG!S-G9=}Yag07Pd5gKH6-EC}qJgM7Al-exyW}#RCUf#h0 zSsJG3Q^tW&aCi0A^c>95R}*{Lez41 z2=UXMj0t!sk1X-0mM5E&piWzGUYYi8D0FlvkJ_^@c-|D2xtekOEdkgn;WQf<`-b4N zi;ew)*Xsx$Nd(8m?d(T9$M)=p)~FB9J9!UZ62^|kB%W>;jr_f}!^NOIx7>=4VKI?M|bMXEiWW@G;J#iYMuS|HMgpW0<<#kI{u z`*lJt!1`MI)3I7{IRmjp1yL-KQkv`qu#-n7AI};Rk2&sv7d07)uIg)3kYl{|*Ly-M z3VW@shsq)-#I~?0v2IN_Ggp+dnA&8l&ez*JZ>=j};?s$E`8q25_HDzM+IF!mD!UFR z<*ROI%Ahyfi!LYIfg<+)tnv-rlE;fJ3DSW@ zrdN7a=tVx5bsa~$hA2( z*F0S&oU!Rq_*>cq!h2_^e#>K(xg5OeX+HnHxxskHh3wrV5H8GEbiBEq_7GxOSXjyq zcINVI2m3k|m)U(&*rBS?)FrxBd+}q_GSRH?UX*+Mp6BG%tu>j?p<3P<)K6$&Omqo? z$uWMcHpzY=-@7ASH7;L zJXE3Ma38a~eeDT^w9N|PH>bE2z<`>yeH&Frqhu%?)y8%i$1q$OfT0M$xJNp%@Bv9kwE#k#lP zbFC;T`B6nFdfJijVeE^Rco7?)J}sex+gY?xH>>Zp9Y6D1Z`Jo0f0ZGLt|)`XYo3c3!z(S25Mab*dD)-$zJVc;^VzhYkI4A5~w*T0fw5%$Jk0Zn? zp2!_O{5$EHNyfQASADdZcorNUYsnLrPyFgYwZh=-KPxK{W2~V;-Ctmnp5qB`GqhM%)oo3@H*y(yLR17RPqg<+wkg*a{P@DCcHieF4tE9| zk@PtXCl5xwfe!mY)7|8Ow?l88mnwRi5FJ-I^G!W8Jhq5L67K!UZGE`4WWrti`^1`C z{w1Z9kj9XU-|4GJIL)+?J5RYqUMBk9QdJ@5CD14(jkY2i;qO75?jskq2H)T5#&_|^ zEzytWz|>MFZ&2b;5V}DjC!=qtLKBBxfpzu0%k7D`C?E=p_Ht&c^C3U|3NMt$TW<-b z+9U}Fa1#yOtaAQ?$>2sv;lStxFh!H^>BerIUp+)Osab0xCSJbb6~oN0FNrBpqxqco z9TBqoH>+lR&per(I-H=<6ViO-VJgKXiUrFSK}iWh`O_7vbN#1@v379$U-~anH2Bqo zf%|-Z+0xfzngx|>9rAD*do{XjCPcAjPUou^7^|*6u}Pz!i#+UMuYJ~d$N9yz!m$v` zFdjPM%nuD+q7+L#BrL{hJR)dT^PjANRoE2D0SvQ3==^&lFQIg;%oT~ivXFEl6+6L1 zfrtUxs^uG+_<}u4cA13nAIdg)iUg)T)5KZ5c&9x-jGU%7Ot%d=Z;Xp+Q0Csz(0JBv zk&!cK%&VfJGV+ARHKjumVo`ZHL1wmHk`Ok@ai;fqjMo9zlj@l0*VvA34Lfrlzm9g@Sdr ztkkkXCDw#*Z7oz?3OU_%63;aLHkSx2 zA~c(?PAvyi+^v5(S~%>L2roL?-`#Xv@6zUnm-xYh&pF3v8@l>}jNx-hJ-IF4N_+B0 z#LcLyHsOMHhWQPLOEnZ3`o(XPWeBY{vQas|q*B)d)ck;oNiJGc&j(@{m)i~x{75YMfX2D?v zeSLbnvgIR*w}Q>a>w}!dhAs2ohK1FGZpUtOmd}*HswXRRBigQVKkVH*YP3dxH-1T{ zD8;yJjrv`>bm_rQbD=YwG#3#uw(V%jQs=odY=N<#UX0yA?V|D9FNMlP&{ir=91B1Z>cPFPS zQ1NudI*wFfx-)cqva@-Ny!QoqdwT~QTSeh~^X_XDZKhw0zX0(b7pQPwTJ^IoH9{)W zqJsaK6aQB}K{N+K@N4;_Uu0f6O@TBncifIexh;*QpDC2gIxuTw*W)$-oNz zf!Y-UW6XF%F#56~!JU$n))(s3NqtEj5(bWU65E%S8_srCdV9+RGd82j>{G&HmVXF6 z{!OP-7a+E>B#)v>>BIO3tEyX(Rk0sEcM?bjj-RpO*H%f+rK|>H`@_C+4L@yB84f-N zF@Tt7{0(9cZ{GtA(iebC0N>sE_|C2zQ)vdN|0u>ey$A|Q(oEMPAR;O)be``!DXn_V zZOtNo3dNWp1mQGmx~_#1`%Jt9Rr+a<@A6eYweUah&xm3xsz>r_vKHCxvaJx}wltpF zjkH4?d-$wp`8HAnub&_}b_}q-Nfj1Twp@QETEsP{x`+QisBe}*vJ9v6?J)o#0UrpHFt#jo8CX+sJ28>#%h%pwUY>5c7FQ2D0LoTNW^Dv zU7hoOzf&^KvEIdi6kP9bMqOafPOaQVu35C58}iz%`_Wr1FEn~atFBXYcRU0+5DZfMF^(Ti$6?VYVk?djkisPu?6 zGUi<4j@o@}f08;T;}91S&+AeLn*nIqcG#US%r$GKE3g}Gxk({fi$Vv;%y%1JW%lH= zDn);TcQUx_%%sc=mI}i6n$C12)i$I%PQU|!vEcF9vy_w)AYi(>x&n1)4|JWwM5y*k zpH;TNjJY+Z{EKv=HiZw}BpPu@4u`tp?U~ajv4$zW(IhoxCPyiIh^R0A*-19RCGuw$ zd?LEUB?&Dt*H-}ytw4FWI{0{4z(V?!`$DlF{7LEaJVwKrnv*I2o?G*IPvMC-<$8_o1%K!FX|1 zEBHQo(Be0{3uhb*J%V46bh((_h)2_v`>tpfiTpC1-iizvu;6d;tml^)R#*)^xoH*u z{iJykYU+HpGEDJ^vCXMsj(V?mD8716!7aYmuR!Zx4#N%bCpEt!6_1!Pmmjx^yoY5*!#-VUxha)hAUpn7E zS>@l+?psVd?71XN8B>s9u40Ls!ZA|#D$|owh%T9NM-=<~aExo;p7?7!%}QM7>-mPf_!Rym9R2-ksF}d( zNBHu2<{Qu5(a)%g!wg;8+!GICAS0pisgEI0EAu8pc7qJsr%4&D(9Zqykr&~<-lY?~ z#0XGkTqCf)R)E1$RhNr)pw2wWD|YA=9v?kjho|y@<@Go9s4kn#dkfHpM0|c$8NE|R zyhmx6cxeh&pQ9wNs7zXGKUgd&(@!_D;wpiNw@913CMO2J=-7?x;L@w&W{G{K#m8uc zy8_xgQLgd8_x^|k8KUS$Rk!<-;I*RD2j^6ypea<3l>CWK3Aj*y>I7m2bgG(;=Mybi zNGLn$1~v4dj!*wdd6FCMmwM^upf$~*lv_kc(dD3jQ>cn5GW6FEFZN@3q_PKTl+;#CRGAl=dX^ZqYZ@_S{ zU(*{wu8Lu_ih@DW4oG+%5g}`67@HEs)ROc1HKnPkX`98p5)EfqvT6&GZ8Xlrcg$CA z39VeGJJ;O%;>-nfN&D|SB=lBQRo&X&mbGugyA<%W*P4}s;~4hWuR9cl->%Cd(@9VV zT@^TX$8N>PARY57)yeG9$I6C2R&YVRsTlnhql(clCy}bbQ zv0w7VAreiq2wgr2T~q{K9fhO)Ef2Dl@2~^bM_uBa)BmX)9eTEa9YZ_8bm@5c)h9Uw zlz*+Z{xT4XjuoGf$kS~Twz)$#q8*Jd5qS?mXD#6kjDlnq(q{299wSF)oRivk&M%a$ zPaXkM;Vb@RON>15trZm&;El<3Tc72R&3!l_2cUX+eVqlgM8RQ?Rr)-Q=ofIG-$D`7 zE2-mETKHQ+3<$N!TU@y`H2{oHxR|GKu;|9=C_y{3>9!9q=>!7S*4F4+gCvUv+V`gdRlEP@w2xo%IqKQlnyegz$@ z1*SIP{GL|RHsgov)Er`Y@6ljwaXBEtxS&sTC8;Q9e|Fl3LhKw9(^GiOJTKhgLdC8v zt3r4cloY*r52!BOdUdbkqilHdq_BbA`fL{g0m09mb->yf<9uCNdHK8WlJU?iKGRy_ z?koenanbHfJ?)8T@JZ%4jLTeV9WzlQ?mMRQJwW2jmD%(zq&h(II4f?Z6;o>`Uk7Ilh_hrHRx%>KT9Z=Dja3M!lp0XvpG%LD(r(`;5|ANiFJ$mo-gYG3tYl z9Bg4L`I?^anQ@jqvv;UV@s8yQ8%f=k+|ko8s*=?oUs(q6r zsnJRobu$Z^C|750W~FOqoBd2x%^z}v;^jxAK%l=rv6HYt=ukyQMh3g6$H}_dS`GJC zhS~2YwnP*lM*S>szXooGkoCC5Mod0sp8AK|e8Lh0kbCE3TE^G zEG@GAJ0z%Nvi1cP2yf)58v))nZPt~h$!SpX4w&&xK3ZDZ&{;fIX6DaMGiq8HcWyvQ z;yM{&?lPa>Q#2WAMb|~s?n0MT;X3^_Gd>;*2DX*e)uSYqJ1d+GjN8`FTTD`=Y+kj2uw!i0!8mF3WoJ4U|f-ef- zE+x?*dBXpE(g{|*DP7=6#HSmBZE^DIWUM%MnsZ=Ni$c4H)i_j=j@`Y*buyS`6;Hy#Ey1F`)hY=idNo<*- z7jbzFG#9 z|2I(;&VwYgtpRVh3s70bJG1F?9xY%t0lLQqT~Sa)-iJ^+;I%tnwzu+~%+}9>j*A_M zzW$i|=+UEFJ3DWqqkqhHrCW(SstY)2aV3)7sPyrom>*YGgj(7Wtd{7OPz?1Y);(Ne zPEKMo=Gyq7-7B(KvIDiLzrVk6&2ut^sH;K1Cq*5lV&!#NGQMilR|~ps#FYmp%yNsB zdi2WO1TYv(zL)6cfFqe}nk|sLxHwx7L@)Ff-h*tK+e_zc(|(PMHQ^3$p`9tp<{+`_ z+gB5ot#|982`lFM;|@p-bglil<`g$>OgPRlUAWLH?b!NFfWVp$8<(K+@`c1O&FVNT zibZ4o8b^oN;?0b47Mc7>a?F@eJK+@ z(eC|>y{0@^D&}{FG_(TxctZ<{@z_&3UudN}6X|lNP!wkwmPe{4%5=H4q9M_$JHstM zHz7RyGzyvGEl4wi$r8}wUD1(047+GB5~dauCZ>jpOUTYxdvp<9a&P3erLJg_8_TLAG#$Y z$WHSj;^N})YSFs92c5q@o8Y4)f|G)$9~NfqSEYC5?ye6JMb7>2G!Uu>9Ounjq8q_k zvIRUPCLGMrD3!=>_G1KmN%sazoXd6>O3^4Dboe?!Rac6}UP_8UI3^1n9(O`3e)|!L zjro4<@q>R%6$iqg?)vn5umY2ANqagy%j+f~3a5-<(Q)b*y#^Yz1M;@m{+fEM$4aA& zfg@0sYcrjGVxBt)@7;RRowb=7SZfPntQRi$tgSh1Zo(!;GsQ^`t+vv~cCoILw1)2U z0$IhAkuv$xuzsd7|7tI9($qGX7>bRIY$|dXpY@1ojuPyNeDiyJ+$T6#Ik8VyyCsKF zBG7OilQ*X~M2QMWB|2YBr9*9iCr-=B94o5?`w~Cc_T_dBXluy$tbcuGUnNm~?8bAU zO8S&RZL(q%orjUbqdb3*6}(@7pZ*2xsv|Hzd)uqB%F5=DI3>DlXJEs$0nrPBzD_p9u-~(D1Vo(kI zQJ=5qEKNE**sNULtUTPO&r37OxQ~g~q^JmZYWnUki=db&&HN-!YQt0jaRNYq1S!Xx zpJ7I`2+d8%eD`@S9g%2Hr?Snj_$TW;K(h`#?E2|{`ARf973W~{;nG7qze1XEZi-`Y z%H;cpU6`MuhU%apm>S56yB_m@I7LMFrol*w| z+@^3)#@P7We1D-i^rB!(b~JC_zMW*nsgpwxB-1%^_1}e5-zcd+k#K>F4Fw#KM$ z^!-CON6V^@4r(uXSbbV*G9j^z%Km43Zyy#Ks+lqup};B(Xe0Y-wAix@Ho3&-p}teF z7=7P$s3DvX1gU=ri*HjXK&ZiklO=Y}^tk^SS~uZ6&F-1qZozKjxE}*7v_*a&`^_I$ zqcS@-|7ToZ{xrcAEo+73oA4w6Ae>xrKEUnRJOyZZGpzaCEPd}Dv#+l&^{G?ii`fTc zLY_s1VmGT~65l|7oSM3N@oZZ9z^=NwI<26sen^2q^ch&^=aZ}Rl);P=o;m-DjvLMmxa4#{}!_sj42os2qC_s?pOy zGT{H}PHknQy9I0a_X6gMM7Vmj3F#6JQDS^5Y&LAt50qCubEG zkBg5_)=Mw6uOoS-R35d@*EXNSsYNi*z(66=b+6}?r2newg9n$0C>Wix@155z=<^yf zpIdzF$m9(5ILFaiOO+{P4CEYK$Ph+{EvlFp3B2JGmw@GKtKq_-VtI%#r*f!I;Xx?5 zsB84^Ahe{GHj+w&A1@daw@QUVdxuBpI57%h*clM6MR@}H#hGuY!3@LkkPMS9iv5fD%fOh$2LdG;&QOHj_TeV2fF(7!7zAP+mqu=&+86w+ zx6G9v^pG(y6PSZ^w{Z7|89#I*qQZD>ZEbX5Fca(oj$$`NQ3((iGpbxT#GbioeY!JQ z@h(K#XV0HQ6J9f9Kd5CV3;Jy@@bbO}K#*(Qdl{kOyd&{{9s;L8%KQj?0X&f(8Xkc^ zv8~*HWazbhf0J*2XMH^Bz2O+>c?}H>Db@2->nPn@QrWt{_jB^mf_SK(&fTxKvR9o# zpth1$qb5x0{qu%LT6MOo8?_bzB1Y?AKtx4=XCin_@Bjmu!9%46N|yaO^>sH4m)`o{X8GhceG*LU_39?15* zJ=I}xWm--i;!uWeNJh_z z>8W=M`$1-9y?V7k$>yXYTBzRVX(Xyjqgb++hlS_j6D#UoGCb!b3{u)md!nCv6h~WA zQ&U>!`v1h0Bt<99x>Y?Yuk_Oc`x)2!`g;kh$yb5_UXwm=5-<^R_;9hy*4_uLy9>QBm;b)tfJxsSUevt zDl019A1HDVh!qE^u>$5&u0M>&NJ&mkE)=U3jml7$f~3JQNe2h;Lh-y0uBZPF4x9f7 zuLg(wZIDX=iKFZuz|CZ(m{~D1Q1I#DxP1U#lvrR#R_f2#q1Y5Hs(>zQfJ*Q?bn?xL zHCX27I8G^T?t*~$1rRkJ9^R~|6xXa$4i?Z$+6KZd_Jq;)1pt>xn9P^H$4=z{!{N3z zT@An96rwEFn`(uEKeWJF>s0Opd<$0Ix{f6I7s0`g&%#UMRX=eX14RmS%6%wa*A@n& z%wt`j_ZYedS!EU1d;S2t3O&#io;z#j0WX8~Bm#gmaF6d~^0j{|CH@qoFeo}nO-)Hp zui)h+cH_nkMYnk>9tx%si;HLT+@I*#mY2$AgNbD-v7OHzx@T^6EZPO^bpWyI|rC?fQrw?r(KLjDRmS5U$3P3MbjH9Tzf z;$CzVs*n5ULPD~;$ML9gefik3{&h4owle`a7t~~l^qu7LI#By-yi$~h&uJ8p+~%tw z*pkb%#f3}U6*cEEoL{A|!JMIJ+nIkd3npHW{R|$rnnDN8qlP=1_Eb(NH3EJhnSU}u z9vYUz0N-+|DQxXdiww}A7ZiBBpugE+2~}N&VfoI>Db%0=dE3L&Q&vIYHw2=Lhd;*Z z0%y82&wwHh?wO$M%7c=Qga1UMD6YT3X`kn<9K;Y7UGX*H?c*rV@qZhB(ba_#Wk4)I z-FqK+T>Zy`&KX9PL!JuYzD3(pf&_=IrN=FMFtz)l5nnIg3%A0E?RfvsfGNUx=QH;8 z>(_EUcXI9>1vFD_;{wzW>Y5C{e4hkmQ(B^hAW}YeU7hT5`5%~;g}bm%YVN-`sE3#h z70WSbEB^gfp4$X|f8j2~gu2{TvG34MGdQvl*o02Rs~n^)8K+>ju!7G+hn{g;jXKHD zX8-?=X+iE}YC$*9X+Be_BrCMP1tkt#f&l?(3YkM|v-LpH@u+64Ilg=Y?JKeOPP=@l z6jqqr2(o!`?1juhCM+ZK;KA8@2lyPK4;M=f%ZYD_PP%Ra$<>={j?e2jsbJ_bO31{- zR0M567?-`}h6q7BM%2aExeg5KublhxnGMf;eKD_NVuET$&j|~|5}BU10A9gLM4X>L zx6kpDEA}q~Y8d~sd$Jf2&5@4J%sn%H0!9{0b22ga?rrT@E zn@>tX-!|~(tRXMR8H9hPlci9c=LmRMZ!$64BIN0QGm8TU?=&Qgd5J($zB&`6 zyWqCY^iQNdh7Uniu3sM$(m5lQ-laur`iQK!1Qz>$AVN6d*+A#gbeqV@$w^xq(xj%M zQlA%cE00}TTG9>vG+&ai%=~ppQkrI6yh8$KEHw{>w|MpNT00)6rIj@?IS;Jh*6uF8 z2JJ#-E9K{Q3uYmqh@U?-vPx#zHxJd6JEhDHXaes0%=0Fq47s6hhp27d<&Yg?xxl)UYv zPz!vqAC+Wu{e670HLexF3L4)V4IU46jo@PZe^ua@1~-2Dma1`z_x$-Mz>KWGHdq-x zJa}JJbnPFGgKm=#kjf-%q`ScI?SY~9Be?1~;phwKQ!fgJ)}ssHQhn?#4(SQ8EuIKt zf0CJcCP-}<4DnhjDiZegyxZH`;OTKKL2GbJRbAaC80|q09sjV*g1ExH{V-u1(x#jO zOo~dy=Ue#y7W1Cb_+TM#KCatJfXH zJPA*Jc#FGQ%Q4}YS0ILN>K1!>dF2dQAN$BgVwM{LT~m^hl5I1(LvlQdAEXJffOjWg zsDSG9Sr{yZ_(2cHc(~QC0+%buT{6uCjq)u;oUhOX%E8Kdq>6zcAdG{Vxpse*Ml(Mg zvHx$qfI4%R`QX9PdRm^l_!*Qo(KR$6NnY!@LPrKXC&;C6>mq~n4q>NBkIZae_o2x> z@eRtc>Hb0qSHkVx)`-r*)A)`A&QxWw@%#km5f#f*Y2s7UbpTktJXOAz;OIu1G1tCj<^1|?}ZN=aIqi5 zb!A9KM}VAN)zIK~G1etP?dgka*Z`W z{Fe=h7GrPzGI?VnP!FNIx7Jk#Pl6)qe-}SQ2S1uE!ctCwR!xEgT6B@KEG%Zxzhjpnxw7v9^}XppY1Jmx!e$duUFW z28$f9_6z$f8Bxy%ZOtJ(0WoKMIpQ`zC{POPXPioc-|X3!Z!d;Jk7sD+|_aS4Pf&l)^H=Lx7bRzaWwswJm}m&JIq5@En(AQ$m#raO2DE z&aGI3*)}cDvP%xi7){WQK9Zgl)1T=|e{WjY-@2UgpaQT_JR}>eorpC^dUr0)&g9wg1fQfUDbwY;1h2^FitIq27bYGnzGou!-Kb8Urh zJ|`W888rj>7qK2J2^FyF<^^l41Z)S5_vTSyVb7b*(w$JhGk9EA_U3LZ&K!x(&iUs+ zpj+B~jJN}VlKphMR@ef*!sDL4e2*ADi!4m(4d#k_Y%K}d-F4jTHIGC4#lfQx%)a{@ zh6%cdc|qJVgNhIeSX4BDE^2Xr{*XU(Vx6gW8pIE7Kp-?Iak`PxISTz2J(UM_WP$_8 zx5dBr77_6Rz|WN{c};vPo4E@xp}avd>ND=rs%h3#WN@y`CfcA0JfHggIFa4@>koeu zQ-~}fAm}&r^c2>Q!(GI7&Oq?-g$@>SEDX!%yA9;1c=x+8WU!$z*A^`)P=q12{hJEy zbOMS1^}RSaI1D`e&b0tnh!Z7qEr4OU1#`vB#Do{4q&;E80V_!JVB!Lu86*V{u)ppB zhk`aIm5V|`m5BAEf@b;)92|jA|E#%*0`?2#z&Tn!^vn&ms>eEy*VWf|Wa<^l^@C+a z&dp8uTxE~q3~I=2zjby3E|vm27iB7m&U(Ct*<*Zd?s>IqqPM=b77Qms&a)ap4gUNf zO5c3>Fa8FtK2)LsWnT1WTG$O$YVa0_RqV`icx_LdI&tCz`@Hh)+viwWlRI(?bu-b$ zAL}V<@UW4^dblD6pjc5QpzorYuvK(x!k zg&J~(6*SZUY}JF~o3udl`*82xy`st=X1TlJpt9aTe~RAqeMoRkuolj-&_~{`<206r zpr-+BQ#?=}6Gn$S?{n;i#U_B_(SoS?4mbc@SiAyYjRy6BFI?s|@>~x+as2p)z5PXR zT&NjEcQc4r9N=iuLZCV`R0R_65BC$T_vY7OM3bPzX#yw3N9(>k+2XCSz$8)yQ7EQO z4&e%+;HjvyxWW#A!udV>aWW!}-F4viN`oPOGyxR(0L&g6tnDU{FjHYj2OWcKZzxgV z;>BXcu9B>roSKJ+$2)F=5;ky%Wak;hI(M?Mv8gb+j9`)g3j}0mXTJ>(PZintbvFPo z3K|?o(QZD=!?T{yF2WX|R%ouLrj{@O>t628op9635$`y%iX=fH+pL0ccO@W!Eq2Bo z)vTUT#{TVT_#bfRf1BpeQr>^{B}(Wz&t5!7KGrr$XY+-Z%x!Y|5lnA+`CrQM8Xunf7<4Z6 zHBEy5?OgxoSD#AC268J<3g2Tsuv#9Qh!J#Aoz2%?Tau|4e2jt*PQA z1xR*^TR%Q2X(Hu)ZJ_^cPC|*}LfvWU>3oVIU0s@tVjkaWYY7Bx2evxjvoNhxCL?T2 z4Z6VZ0PwgLqr}d^@hLldYU2BQKe zjlQ0jR7+bMAsV{B}U#I^eM%uNiIKGx9GbXcF& z)Y8%-7g#8?8(wlN7jfTsxxMWw=C$_?l51v3>(n@~P$F(?{=UA16&~APfBg#A6jThU zua}98iqg^5HMO;U1@{vtnc)9T)nRYu80GQplOZ7?DAdZ$t=U{0#w_jbBQE8J!otE9 z78Xc+Wo2A%uMXZ5go07z+7i8JiQ`*Xkqut`2 za-7)cvWw}Cy9i@gSQxoL1JJS=OGK!lllcV3*wd4t^9rQd{8@58pTJW*x^aPlftPrA z8s;CVYil37x|}KvIp(%}J@L{?*ktt_ljyEG!hvJ~ydd8}9GV?E|!# zQQ!%WA>RC@^5fnnA4RT*`|Qt4aIQ!lTp2c(v4w>{m?3p)s9>TPV`!Mxbkeh|)3s0zLUz%Le~9c!&D0ce8;!O#t$DqJ_8YnlH3oL zBIcZ+;CtBmj*CO0Ztm`oL8_-#8QfZcl5#ja@LXA0u`kx&+CK;nsr#Vf9}z(d3m5kG zMQW1e?NxIQ{{Vk_0rdVIVAhMg_JzNH|1OoN+SlLzLW+r+nwnQkOv%uYajB&wVsUXqWF%d8PY(uAx|x?(ti(BqD|9T^ zub+u)ZGc?mwObzOPyg&o$`mCq&;GvxHjn`H-!Jjv(f@9S<^BKaKlhMXuML)+D(^7_ P{wFJ~B$Xv;{PceTk2s!> literal 0 HcmV?d00001 diff --git a/lib/python/flame/examples/medmnist_fedprox/images/loss_all.png b/lib/python/flame/examples/medmnist_fedprox/images/loss_all.png new file mode 100644 index 0000000000000000000000000000000000000000..9f7e4a87dc00679739cd5f2c3c389c3782e5b912 GIT binary patch literal 36006 zcmd?Rby!wg*Ef38f`ovSl!}5%cY}d~5-KR&NOyOM3JQXNN{51gq)0bNiNq}+(w)-X zamE7o^FD8U-#O9^kR~EHM}R`1i0;ZrDWXuAJ}4B% z$SGX-gui`Y0{#+qxUK4-WNqZ&tY>G4lGk&vv9NZqFx5ZrWN2q^YHh{K!Oy|XcHYFn z!Ny*Mlhg8F2XI*18FTjNtyaKA@NHz&>`^EZJ>*}EG|5y`6e@`PuGEc3F0u22t}c%@ zJ{+xVMTzfK-X!}*{FUHo;wh{1i)CG6aVh{Cps)qllzC%yl2SISJ%n4OmB#-U2lt>6lDo-VHxh?lJAs4{*)5Q z8dF_IeyY?W@VN+o27@pY0|EkM1d~u#;Mb1UE2vlSy94k4f4^Rh%C{--v?d*2{EBSW;zrIF9HBl%554(dI- z#*5BBPnL!%*t2bux7G!{6>g2@cptF7(97S67oW?_i{v)A^F8M(*}cLx4j&n5x5J%j z8X+6qK+CA8D0;WnjGW5oqr;`6hNZ1M_bn^0gE<}hrppmuY({T#l+Ju4uGnlJaGj3R zw5h?V$q5V$ELr`*8YST=A~;}k%f{Wp!a^py$fW(^;r_HFqspnbOo0IbBS|3=n2BG% zhIzxq_7AtJjK>>ZmmeMO#c7sWhnPmUL~{F?cBX`ehj)&Z3{|+V!E+^baVwW6dRn+vz7#ogA6+*Q|RdE5ZW5BSF7^!v`Gk& z*bTTF&K&yoEo1}i-rinK$F9e}y1N-y`XY3^gyGNj!J#txHNEJ2rTgOi+Z&QcJNFxc z>1AO9gocK$FAm1^TUQ+<>l}?n9~BGRj9j;g8D5%hVo7*1Qtb+s-@rh4seG-c&{8ANK)!PVu5gKk1uvqx z{&Q;l57DLa&e?NbRSB?E-GtO!J^=xz#>U5K1T3h%cRLe;ccQ0TqH0Z3<9?g+n{!sI zlgUPKO!gWlh|hWn4myx?R_*if^YafYHc17CU?zH`?s!O)Yg-3O=QT382hHVF#n<@Z zt-+M8Y*ih_MMs}wmW!;OYK`&RYfX?$=(j3+jjZtQT+Z5vAIa)WQV4F$c<`r>AD@2x z%A=^LsBV!UY|p&2v(pkM8u9!zRakWNkMR)6HhSlQnyaa?-;e-UNtHZ!`I=Uc$7(>x zX}$-e8tt{oo1zf+gS}{gjPM;=@<@!cd>Qj-P4cMr=Z9POx4k^~R~ln&8)zOWEAu!n zY92yd^6J-}$}U?JnL;Np4lEt+Eg84P3J*9BdzSAlmhHm<)6wSHA2KrueRTH6s5Ct{ zx=?I7m15i9zke4R{fe;wu|TEma>4v}<|wj4D`WL7QM~>TPKs)3_}FHWoTW1t>4@>X z_xgujwg#PD$6m4oyn6NV^XESS-Bkyh{Y_}J^+v^TsUs_^O7=uyzt!o=DcDEB$j1M{ znzOvwZ@u>O7D<82(esS)rJ-G&W5t?@R5(%8e=p3(&jFEz2k2U$l$~~(T`Iz6=u1M@m zgp2Jj*J^qnY?Q;m%3+HR*f!GFETKdB`1x0A@EC12dyN@w6RZ!*-7AKaeJip&``jW^ zddG^x!pMwTB5TO+i?x3gs9=XQ{xLf{__FMKI!DP+1%p=}c;ZL_L<9f^2sNljg5?qqEqAD(*JDj`Ry(A<~8Q`FBmB(C%yslR zRbfwjLV~qOoWj}*TB}o5tnBRk?wj+b!8E)Lnq^K|G&D4(LoVa58y!PdS6Ah#_Xn4} zs0IXJ*V#{e4^3$tH_ctDr_!-7dh#SvA_2`O-$}>wB*1lNGJPJi zlK0xR*#LCW)*(!Pw2v^{gDD>7em4R?a;`ha+IdH=lMluo_5S_uF?YS_l%39m!(R?U zbVjD8pH1P{UrP6*eALv`Ts5a9!=~KFS6t*m8V%mV#q6hAxY;QKg(1d9ypX(BiAS;p zV~V^U=xJ|f*P>xn@~NwUSs~U$Ay$Zc+i`7gBWCNMpj(?4!+vLVGQ||PQj#O0xqR#u ztNQ#FY~iTuf%e)SEA>+Wq)fqE0@yl@tU2W!MkOKZyxxuU&e&V#Uc*JxD=UqRMa2z| zRkRF)86_pQ%SKF=dQ(!EJGltgj3xJaZL1VQt^Ib9ViU+&`91N5pSnd}uSGdbic5vI zRprhUWdRSQ)|ccgt8gi-FP8lRuU@?>hDygx$W*MOpg=s{7-F4wcMXPQ*)DdLimH(* zMC_aR)t?tB|_=E&B_(O@5wl2%@Q#JX|61f{?MV z!J*=EnpXvErZBtP*fF#h9vLZJdT{!3Q=+PtW z>t<$VG3c#7o&GA?uAjO#Yt4swd3n_e%(}R9aPm!zybdNpxjp zFF!ftzVzvSg2eit&QvYWpH@`| zP6JlXTqa0-La5=zcwgWm`W|ClNwd$C1lWg=hUZd`^eO`3znFDhQ_fPa|MvWJ6rU;C zX-c*c01`rr?hY5DYc2DpLJNq9h}g3S;}hOR@fxqf&dt@YCxFUx4+3*K-kl>uB}c-H zCOT)*uB6XnXwt4=*Od{OL)5H|5H+{HR2f67Qu(jnzwMSPws<^u9e8+oKWAnp!>sja zA;_?_v{azEQ`0Ea2FSypqMaQN3W{{qQ^s_E$;$1q(<49qhYk<+G~$x7EYGA_!us$f z-Ff}d`{+$kwxo)P=awSrfHff@@Mj~<<6v(M8xT#c~k%YXSW>COsNlU(<$#iWV} z12c&us8;>uEXdN;1(2eoZr@(X&^dUOrBU?N`^akw5Wp)6*5ZjUS%q{$X?w9P78VwN zc*5X=y{$ah&tB_Ej4QBFR>KqaFBPMo;ghREI6Z#+I8!zEsqJ_JH^ioZ`5R(Emt|dK z^R1yJNw9ZyAjk8#u1-+yX*u;8BEb&3Cl}D2+va=|k>Eh}>SXi%nGFrL_qvHC+hc*Q zu(-mLZd5PNwwA1`t1v8BVq&7t zRizX<$1@7{JI&3__tS#T%pC2sO4-i#zNC2tb+3re@W)vT0opRB1+%<{#F7*nfZ0!;Jo%zq^OTbQ+t%V> z^K^nYLkAB4kzjyO+HQY7QTA^w^fyW~de)Up$I}iJx~%9kc<?{a@l1CPBdvgi(3m*Hxv8V3y85}IAve};9dGgeLd(-$KHhmRW}eWa4}>+3 z4i8+HYw&199IgQPw%jQ1GBcd-$*p-?Q0M|&uDjqy$Ggvll!qceyQfVXvn%}dtFFwrCuIMSYH3&Q? zv>qNZtG?qN=e7H%STkFBZP^p(5j(TRVaoQlOLpb1*(|0wCXBRXV1o?MZl(lam&`U!ND4cdw$o56MsQ zNx~G@6VeI&V~dXlXSzO5A5Y`1cf9Ga{J>A##vFBoF@F?^$pH zOh7$=Hp=6+9jl`}5DjUpdr4lh{OztL6D`W?kt?i(9bL(NG== zX=rK90pIbmurwl|wUtC-JF(siVb`s2&nRO5+WFzCs-2>$gVyrN_Zoe4I3AF988)I@ zldi!=@GjihN9d|OtXE3yDA)MdSW7cUSNB|MT%+k6y(iT*6iYMElo|_aG$8R zZ=WDT_W+rs&v6MZVqH9nOU{KB8V^!NA52Rgai=Q8nQhLr*CIgF%q$%N%+Zhq%ySus zsp;MG-R+axQ(^=xc>sU9_FEQ3rO;599QF?%{zj(Mw5Y#zV&chK1FbdRX#xUuy6LUG zR+a>hj`_i|qCa*eE$vCtF^rz;?WEEe-fIVa`-e#2X~eDp92Cs^`c=-qR*#rgphdIT zMlZwu5Tp>n#t=qp2$mRFw3fCufu-VcVtaJFcw;$)WIbmK?77Ybr=lLO{D;*Z;=7W+ zf;T-&{-ngTe7NQRVn6!g2mQz?L1qy$Qc`@S+qZ86iC+Z~!EVs8+tJCk?~#_4ypa*J z_x?Czlz^qWF_m<({45rzb6R)cV_u_{5y7e*`54x(&-?%-hCDGgE~2L4AhY^~_W%^W zw|I;mZ@zz*Avk@y>Bo=R`LTR5TH0n%EJPhFtwX~}F(C>OHm7*8mQ6ezfIY2@O7XNJ3g z2MA694hH{KbCdvyvLh=pkDL4slsfkP4)Xw1-KmkcPQK)S#*Z2>Lc@ z*ACljsx5Ah{q3XR+s7va|LIcEF$4f2wp@*ssZkV+PtMHcIQwXIZ7sw_4rnz(;Q`d^ z4wgE$MDqtiW(tA&d!WHql7Ab6i?AZ^b0lNs*WhktDhH>BQc{-)<)Zms0BO>N+e{9u(-I^`QAL_E9W6smZD)VQ6MwF@FQ;Byagk%6n#)z)lHRa zt78bOA~hk}$|pY?Qy#a&Ns2KEkE8Ji;$pV2zG1Q69LUKUg#^3i{g16 zV^#WjFkF*UqcCV@W+q93!F`_lJlFHKXnylZF;}}ld%1YAC{RxJ1G-f!JzO&k5STsN zm9gR?*0`~IIiHV2m-?$3ruPR|&&I9h8&Xoi_x&0hNBoE8-kMu`Js}7U-t;CUVU&o4 z&2q*ELC1CE_mLq%#YVY6p^D@+CZ-j%vgqEq6T;BCyD=A1dLJY>)Td9Mwml?3NK+R} zc>lfu&~1pzKK!>|tYL0LQCV=yEso&io2w#|b&hins{t{7EG`z>Rt0Go1#u>y`ku81?|~oKAQRY^ zn3-2QWI1y|{C+`9{{zlLPi|X$C!@8~`L(*wpg_Vq;@RBCQ4}gRASE_w50C#641*n4pBLA3vtZ&CR{Q+Gw6Zu>EI_R@hnh8hX%8{d*hh#g4d=K`Rh>p0cWackXYm9H!sc z8GPq3cdoh?)@21oYrD5l z0oF1X1#cg+mh4MuWetsG80+gW~oDh*)E1$vV-&&HlY{@XUQok zcI~&R=p7|fJ70GGnO|P*IW9?3A`gmkFxK}Dkb?j&u=iMuhWckIXJI21h(5I+uNttB z&;Gs#u-{F{+i%Q{AB*X^^g9)nXxQ%=8)Tv}k6C>YqY7_!liz3bd&)b+#zKbq;(|Oj zJ944Bka&9ePr=9ETA;jtyBwi;6AI=Fy=6#hg>%K9WFvUYeqGVzpn|A0S{x{$*LYw` zg;UMtkcRsJnaN9H5cS+1+cxp8Q&CQgP&68!Q)z!k=wgu-J%ZXR_ZG)Dx^-&ic{F{nLR-!MsDAiZ;BFI|9K11l!0It&UqwBhZr5~ zZb%QZaC7GlPMSAEj=U=?Yo5!_&Q6rmEo*9e70NV=qnEJl-#nLu#OG>(UhNBpdk3woR|@o~*$escu2l_()a#oTe6JCV(Trf%Xn?p6acfn}GvWrJa{Su# zXh&yfZQa5%H_CJPkJWx`)1*+J?hrZog zty4qRN%$_HW3g!#f2l9r8um^AC|N|1zq+6Z!)oGUkq6%Hnt*NMWzZOWnTIC~ zxiyF>%GsJtfXX!H?aSq>3knLrAmZJ2gdz_zEKFJ`P^$Ak1#`cAc|)b+LF!YsSQ=;? z@A_hIsVV_%w_J}MI}p-@!S=CP(mh{!e*K z)Y>yVqK2dbull_-$N6u1M7<@qrAk?g`sLwyN=izAl_NmvOOj`m=q8Xp-oA8&|DQrG6RoJ~Z@54r>mzb(eYC=-cr8-!OH+t0m%2*GH5U=`KB&Kep zSv;ZoarV3P3%-72FL}KSF|K9OAR&C73ayStz_8R!c?>SO&m_oM4rk#e6qy*mt{mTz z8-6p)GdTm_1UL{PzZ9e+$32Tcq`cz%zQ2g=;T9(c#}sJtx*({ivQdJ8u(ELWQ7|MM zxn6z(81J|52t^N~;dL4D#X}MWg3xAveTLa&T#0OGxC1^B@)fm&$99pbx-cc&?9(x_ zwE)i;*|s#iMsMD{xekiT+Ei;2SQ3cu6d6g4xW(XqVO2w%ybKAkwha~>_8_n2>JxI` z+aD$I9tjp*M3VCp&|+KS#bYuv*`PMs*BQ8h+m>*+&I0PT(b(9S!ge2-8*H;5=?|+R zPqqOT6W;(V);hj-aQOcOi@n_4-F1qy?)_>gB`wXkvC?A@f&nnO?!UpYVr~CtcpA`R z(081lMYR2jd~-QN7T%hB+tyM^^)Z1lzC2oss6!Z$;GEqTTfuH>YLbf)AoB5Na0>u@ z`33YixEB=uOr5I3v^}WMm=Dy|U!kX&*x9q{*Gx@K5mGmpbK{#o#KG6&RiiTkR$OzW zk);gShFZkI#74tji1q4K)9xLp4B#a%*}fZ%k^i1z8viKos{;cpL6;a&kTeoOB682z zpK@AlfBY}tD&QY*WfaobIW}k5{GQu!_E(a&JwPi2eFLS;R5s7(tAtxB7d-9YhXj2U zNzi{>Ug*)O@LH&ZhK1b(+T`fy2!bU>zS*L9JhQHUZ4DAy07k(7y}i6sA4p~2YZ&o7 ze|~l00g6z`3AsclIUl0=GciT+n^PjC1rmHqf@D>_L}Nd;2Y?-WP&>H6_613&5sJ-s zGPK(%T)1`%Un z_dX5D%na7}P@8fL;5u%UTA~0;#k$RY7TfGGK$7{<*3ls&^a~h(CoC;y1UUeNY8*x! z92|P($|(sShU1XpLKJY^2xVG+v$LR`h)!{XRc`nz?ICE7Zd;43110tvVn@J0Z2`iz zgVIS@s|&22{*jxoW)YDOlxso;vEuqdFvJVg>vnBGpzZ1Dp$rK=MwVm>V75F)>{Toh zvFcLCxkxTOOb`;qXQz$+m&{eX_`av7IH0j9FfPuEiy!p6T)ins?1QWZh|1=49QkI=@q4+_`Fxet2(a3%WR1G$n*31_e-uAUW#SzoI}^ z019_FbWB76N3jLL&dI4Abqv!4K)0x>tW;u1arLMg)^Qod2@4NbE3vx_`x$A3nF0|~ z4}8H)t+Myv0ocHfeyF1}oV~{W@2nRU9UZNjXG9dva^K1}81UNK&U6Cf$~mwctsPrH z0amSW<7Wt&F4nGkrd$+9Ipts&6{5Jr$ z&{|L<*|xnix)49=mOn9(l{v9GfUpG-9q35$+}jk-=JCkG@8xSX5MV8Gl+);+?C*kI zEu;i6K&FxR;zjB&h4$}J{1-0rF* zefl)?`iQr;w>J(-u`hI6N@MtoB3L<;{9R<^cUjJgI_Ot}a(M;u4iT5)^Kr^Yu!J;& z>oKi`Zjmo0?H{2CQBYA8MN)AI3P#)xq(p2maGProx1;mEWGmS36a%SgX#oH%k(3AJ z!FBd?Dz&iPCD?G&ci*W5w{e9y)0JZX3pqai&hq@Qs$j49sDLQ8OY%9DH6wY4ePDot zG}tcX<>e#yC|3ua@|T>Yy}X<$(lH2?{$Y$#fW2^3Fx%Au&0HkYKSnmLP|2l|{h$Va zo-)-HJ5~};zA}4^lseKs)NtJFPpO~jP+$L6j#j_xCk*u%Cz@U~L5{lu$ECZT{qp`P zIUM%@2!@h?JQfq&iXOQ!e>cj_17SuNM@K>5BjU&sfFOVlKBpFg_Z(V3pt#$VXd>-V zh}qva4Rq0Xd87s!qM-b?fVtB?wsT)%=NT9e&!GDWI>mqt5_ZYX76OiaiwhiAM#y!e zOC<_~0}`=i6qw!y-=5*i#fiMz-`{6q_+PV9lEpk=1xS~WRp}pQ0gIk$B#xlLq8VWj z1E~NkM{E7pKrV5ZF`rjJZif^inLKC%Ll_Ap?;GG|0l;dB5ez9U6|pK?5YX4xpXp2` zLFyuEZ5quYAyHW`JOnK{gtbDWJpWr13gV`$lam0bC`JHKk^aL0ue}AajbBPNp5SD) zLLx&V8u}Q|gSId+V2?J0q+H{PSX=+FVsehE{A2(C^T%9tRIanGb7tRaH0XK%gAC6~NQ>sgiXFpQr@s<`$g83zbU7mjAljpm`{cQz zSqnqn(0(BXc0Q^lRydp`-sLF_3YwL0t8bBr*To9kBQ9lGn;@XI-#I$ozdXsv$uX|5 zvxA}?igelRKf++i=vBHO!8ufGY&GfCT z>?}c~=79xp`(+MdYanpd(Ae0ZE;Gx=tSX>arf)9V{Q2|8lEEOaSLWV5k-t-P45vvF zO`i^1>*vmJUVv@GOE_K}tylKy?c1}^-GP|a*RByHKDJ+4;lmK6Bfj(cT>V&mUvSGC+#l|DQ24Mlm$CxlBbFBkez6z940G8r*S%nx{B523{bF+x=EE zUIAz6Jq$Pq+FOP_fXjI7{zIV`g>Co{-Tnm=77F- z^}v88!s){<%R++&HrTIM?n@r6#!o9Tamui~i3@eQ;u<^QflULxGktUM{oQD~JiyQWNlap}Dr^9ot0N zg30hw@c#bdwMSUgDeAvayKmbOuI_+jm3sZ3Vwl=YzQ_k};R98B_QVs&dj;4zcNQ+> z(u?#rFt~#Fb8yE_(lh=A2%?lO5cO`NbClm%-58!u>XiDa*J6cyANs&nhz{5{G-k=% zy<0PCV^Y!kZzc|WLq{xaOm&qFxjurv@aY5VZ(3P#fK&@k6}W22ez))30oUSn{l|v3 zLMRit0F04RfKTLKdjryxb`Dhny1TbNlvuw&J-62)eN>zP84`lJKNwTvI+f9vX|b~1 z-TLI#&XsafZkUd*bMAV`{SzqydHTr5{BXv_4h+EQNY4m2VNyMlxOhcgVAs>u^kWQ9 z<~Hbd<~!0Kir5ePeP#tt`ljlqgE4>KI}fLl-5eA_^;r~t$=DE$l}BI*=$sDy%(_1G zqE2t3qVB?TA$YV0fb+vGYYU5HXvC>cV*0CdJOiH67);-cbi27PRWw3V4He`U^!Cd5 zRaREfG4B5)_`Y9xz<3?O1W*zYJ_5o8(g;jQFY*?wpAay1Xni2jjs`R$*)2UieIyBt z^IOd(UB_dE$8=<@t_2elY>fxd-fCOt8LPmekh-S9s`P~<5vgYZGN&B-&sAHSSy()Q zL=0Ui;7&cwKO2ikFUV*Sr!&vtk0OZP&_=^q4DR^n?CdFM4EZzv5vMonKgx7!zW~Nk ze^Ch1kOoN-{sF51rme>}{Hu*Wg_H9><05L+@rCld#7Xm~jxovuV7LGI zeh7LzIm|p8V||+s5xb>sLWU3edxhhxnO;7F02vevB-8csnG<2`cvl3(bbUynom0DK1tWV?}uoT#GIN1@T$QAxs3S?$l`F$7??i`c%nd6=G?)oFd=f3y>N`3=z!l^z!3QT0` z_~1-cd&cXi$C$?&9~{g*r-$(%<2btDO@iayoalvYJThedGYyFnkKb!~_5yRH8zIs_ zW>wxa+M85|20?&!c1L$ttjI4>^oZj)g1w6YMLFmH}f|uHfUk!<<8eTIXsD`$U zs~j9w-NG=UTX*l`BVM&po?c!r(vF7I8i5!h$dj{?V45m|jpcFn?Ad3Fx9NS4J)29v zEaJ3$$Zer_k_+4nc=zNYLXUtY5L}0TDiC58L>i%BVYfBVqVSYivNL;O9T|r@SPU1btq%R0* zI`okaLb!ZZp)>QzCVns%hG7F@0rWX5YH1DX&%S2_RSR?=C;g_IOAr2uk2^32(AyK4 zLPO|{_+1Ef2DAz$^yD*i*^{QQGw-+@ER;`z{O4?#-*o^gJkXYW8+DD9l}6NwtqQ1h z$^L3HOo#$Y1V_Hbaj(Y33%@U8jle=ifev!q7Kym;@P1ix?a}nkAkCU3@05Qe3IXvslteX98UsmNQSkBKG6`61+X3{ zD&7Q<4H-c+p6@mvB27!(OqRv1Mub(Q`w8)5q0^BI-a|P5fEJPN6gad@{$5o*0{V@B zcGv^>QH0jNSgbRc;i5X=_Qt7x{n^RKg={S(`Y@AIQ^Vuq+bTWxxCs3i#NG1U$!4;9z{mj8%N1J3R=gXd@A>ij{GFhV%jPIX=%hn@ zjChwZ{5GL+Lb=X~Jha3(aM6YZr?r^j{gk{wY94S>9-kzJ4w%%_oaNO>Clp7;ddEm1 zP@2vpX_7^Vs-;J$N5@R5 z@v?Z)Vx4At@ZVY_2YisCPlcQra#AD$9GaL%0EUGjO>T%u3F5RKHEx3Up)jaxU z1yU)VJkN*U@I1(?M4Jo!2(E?BzvS}rSlG{D&;|Yqc6#N1IT!;PYBh*90o7w6Z-5}; zqR8h*AB{HhlRAR9IkBzjC-N&|{1_KtSKj{nV9>?V@;4x(VFY<*C!+7)TdtcP+cl8W;j47~ z;{caK=H+i4M1D3sM~w^^k%^5vPeKwo+d1tYEMnr&j}kI&qkBO_d&k0p4QzO%!2m>x zR6uC=0J7Xad75(|^8p<_CUo%bU!L2B8ML6%A-lg407@H&`e~$oooLSiJtAeOxXCbC z4%3wlYp*q0P~VW2WFcq=h9%A7C&7GpGJEd;?~qKd?QBCo#9eR_;iWp!F|U7j>zBW( z@ss8w>35)-#4-;^8_9MBIIOXoPCn~w;Q@#_v4|3e+lB<4)gT07R1103g2Lb}x` z;2&tDB^n{c&`j6^9s%hV@~#pzJ=aLzxN*ZjqCNc>#9pc^btL~%JBpaAs2~RM=*Jn# z$FD3Q?=D%|dmnrG`vQS!Cr3||IueZtMy2$A4ABu1@?h8Pd~^z_|KZ)1<|t!$nFr}B z`0?XoO3Djp=6?<^*DQn938JXFNjAo_>+HH~X^4&=O5SCOHR6kfR}5`&S2#GBIXHsB z`dxuz?SPGgRH^=jff7~WMIKW?ei{*9hVfxRB;d7%IIU++rrP+}$7D+C<}FWD?ZBvf zmRVdlwY|7|XO9-j>A8FKi)u%{Eo z-x>-)aUSzuFs}!P4nXwr2@?}nVaiVs`SBqB{aL?gU=1_>y%=m@1V7osTOLRco0-`) z3+Cqa3$DU5eHuaV88#3wO#m%WAURo;Gj&0og%^I9@5jq&X=xc#Wl@~e^ao!Z zoiIe2p>?8vYz~ri{I6e+5OoN8uDkAjGVA{!;d89^U1Z7W|Bdoe0?dN~*DOvm?6-yT z$%tkgfZo(br8FVhZIp3*6BuQ|#X5l9U|-kdWMpJiy&eO>a^={o40z^ahVnhy%{kJC z%@6EY`a*7lbP-!_)sJlv&5YO!2#RS6yA0w#!y5I;4yL@~4i=sJFQ0DE z(Qg;o5N#i*zOKbgWcEYob2yNe;EoqQtvHMKt_(j15kwO|?YP@kSPd$piD$3Meu=cT z#A_Rz7~hzGGDIvJRHyu8U3`1?0uO2$jORLd)l*&9e;u2cEJ#Ze(YBb1{%MQ4U9KUUApxDPnAzNQP$lR6ORFUM{05(c}nDQ3wD! zYiB*wvb{whY?zb4t{^(ELGH0AJ-xwlp`&dRtDrnzpYu#4-R?oorhVi7W6iy)=Bqj8 zvzZ?e50^@JZXQ#{{INCrr9YpxXe7UlFsc#n*YBvB5nq=(XB+C|#bbCrZ>Xn^kze2a1?)l@A>{N)3TDy9eaUNM zyF5>Fn%>vqKINm0r$DbpO|N#u8I+i);u6Q;1lH^Jsa0TX_ouq5^TXUp^+5!^5b#!) zs$4ru;4U}v)`OSpOtKCAwr8W*=BrR~nP4ZyN0U>Mw1Jh+I}jqQl7vJ+LKJ>=%>*eS zTabR>pl|OZtWW(Ru9?kNlZCu)7|T_%&IZv&(v*+k2Bqg=k>7?<+`6ZHHl_ZK(uAsJ z7v(EU<({H+t;S&?0e>0)cUgPp+fV$NgOHGZ4_NKjRpSlp#4GkCWnNo%(}#9`musr? z81MZfWFY5YAd@z|GlOXS>p@3B*HLV}gpZ#uh*U~@U`G%(qegrM1#>?Ym)8EchJ;-p zL=~;uiZ1FJxwN9jJM0uj?PBcoe!FqPj0h2?Vb(#$R|Pj!?|XS>SkA^0mCpS`-?53*!tVIc)8?V933&jy^5Ye zYsqVVcUBwsVDYRVSWk~F>Xa#bVZHy<$>6VPuw$)lXnBKx@;+(m{-v}P@)r}>Cxy^VtqL)eVhKaF!d>p!TOjvf|nr<%S%Th zehe$5AH1SqK8MfYT*s*nU$rdvmCK3f&T)T*b=AP+UT=K}LmZ{aVVpPjqX`@*yM~)> zG(X)huE!AG^$hmZ?o2NJ(c0ko*dtV>;pw}bVsYP3<8AbjKMGO}SG>))Rt~4I=njc# zLMhPZN7i)2F>WLfqTR`;p4L5-7xd3++BIE%Vq~OGjSFnV$e8LgMXeE&xR)eq(@*W! z+p-JUECYq7UTETEyO?CYSXinqlk`8L!ATIqY5czNLhlU?V-3!&j-AQNsbAJvjO}ZC zEFh{#irz0D9ag^Bsw8+;)#TS1={|mC?BaYTXWE3Iwe=yTb_uMZ!&@tQ)ZFYWbYKag z#n$AEsV39x8i_6zx{=@#5A7#(@s(7ZiNRMnBJwJw_4N5Z81KZ(4-Jddc`FMcx4gQiy>7KKYj9*Myeyuyt~(HnP59w{+ce$O88Z=T5|1=91ij48PXedT!iU^ z3|2e+_kK^Rkj;Hjxa3j>gx^*sDfUZL_n*?Kz@UEPPFZI}=VpaAUfw*gv6&0y zG*Q*-akQBVIe!6fDjxq+zux+S1@)n^_=h&!b&Pwf7P{1ep6H#d>>a%U0C=E^f6@7x#VgGLZ@%liH3uT)YY$+5O zt>3<1?G1_!^J~=;uMBrPSKWPfhSP=3dI)Ro2;*~V^^pd_??Uq>SdQgdmEYky+V>UT z`gzOIHdn9T+A?V>B(w7LqPY8BeWbtWn6TQ=+Sm~f?|cuh09G&43!+uPs@5=pYT14n&)6`J4O4VzFMFA zOAp&RV|YW_X}g=eBOkK~<7w!r?|K|E*)))Z6q~>9M_y*(&h;ixdl~8fvL4V%Kyj|O3O8V9^BR65;*xH+sC=f6D~j0Xy2?3x-rfhxqg75 znt35|*LF~HWO&O*oQz}lR-(7A1#_LpVB;4aNv=1^7de-YcE4VqF;ByGzWjE9@RyJm zJ38S`_9P1B)j2YJxFR-J5u|0rOd0qtw09-8T`y`!kE&gXtK6)e=X&F&MV<1 zdb)8v>g{~iXrA{%HQxni7oPAXp-+#=Z6|%*W;1A4_T-@dxe8r3^(CxVFngieB-;1W z%%)f0ohCwc1wni4CM@0WkJ(J9TU$O^;hx*s|G6oml$G(cY@7RvpurVhgR8s-YUY~; zzFTwz_`jSQge-#=-&NCih70HE@L+Z5r&%qpHk5jne@*uR*n;{VWWN$(npzYcjeF`@ zNBRBp%=A1j{90Y-0oTbyq(F?iD2nV6J*skIe|~geP+ni}!oFPd{LjGJwy4lTHs#)e zw)dN@qg@J#oP-vy6-^g3*Fb^%ysFl>wl2IYteNI(`&WCpV?D>OD}L>doG&qwWOJpv z=X}%g!Xql*rMW(7_lXZRm+&jjU%ULXitD1pvb84Vr45|g>bbSG)gK;o%BPv~$Y-Ye zpHGGlf7^7fSuZNhBst9(le8T(?IE$1aPzzk?j>o_G$(h{Ror0vKDN5sJy$=QUb@`S zB)Lc@dz4STyr+j|`LcMq=j)mB#cKRaVi`tC>VYtO-1RC{rkNLX!Ds)O2N;nEFyh-N z(L_eNKDs5LW4)aPMMoXo)T1LIU-w2yeO{%w;^}R)RdUlpEsbeX?hvgRamf;PEM1 zo#*ABYP8Oc?|u>AbdYe`ntXR4esvYwY#Z0G&h8+Rje~XwD`Xh`+WX+C_?~Rk+1x;B z|97EZr!CWdNIGQIN%%6)?smL43_$y?bm~pK)>R?@8t~ouwvdOHvjCu!GaT0Nx}KWw zba(OLK2LD93&B0yPo%b;8$reE0(1g+D7@2Vs@syZCTUjRxDuzUv3G+NlC}cQYCdF_ zUi+;gm^)vvFt4eFdEMe`AaBke>6{Nu)Rt^Frnh!__Y7$!>m4HxuSd!8zQGz-yeQN9 z@-oL|c*{4WxyJI#D(4qH488)*x#A1iB=L0^b#?UyLLX~Li6ULL*cu0l&WZZp%t4dh zk{@pV%-}%(`fBZG>(x>A=GsbcqKB8wzhCp9l2+2~BD)wqx?ZNcPH&kVtsl+o`pGe~ zN2|?e-Q>^odhA8PY$-*b}muaF1 z&n4V=R32WMey7kwxlX0|Rj?51Q`Dg0<;$!uGH>7q#m4$|Igw#04s?AP_w3mdp5XKk zYi$%u{uS0jhSrr>AsY8Qx-vjlQk4*Rr*digSsDG(oB2V;9+N;Hz3wpy$8(eIp1o>{ z&C(jO+b(k$78IhwtzTc8oD<>~e7)wsS+^L3GPcm&pfILwogt$5qR%m}{WQv*vbi&R zx_1(9$H6#}d9~7hmuI~E&@yKS^g9-|0f{P6Kg^N4ry2zxB;|xYo*Nz=30!MZ`x+O} zP+jEQE5l+dc|fW{bR;3rys}|#Zs)F6VL}ok$xa|ZPfO{Sug(2-{&UR|iDoT@C5y2P znQ{gOB~~rh^4|Vis;-BnIg|<04-zU54LXFDe%G+*F!f|s(h*epqevaWpfHu5Hzi07#x1yI9FRtZ#_Hr+jO6B;4AHiu8}sx0SWM{uE?W z9}rD^&erc_WN6r#EmM`da?j^z&x?4tY#*a-#uk@yeA{fKTwcN{i+wc1@b=lIbONjU zOZ1IzUW(7Sn_W|O@1I^rW};iEIO{1|XZYo?HP2W%TU&AI@ZE}rV2I8bf(+H-4*^F| zwu{q!JA*6-^xZ>Fvt@jtVaH30RA#yJn|d?+drHOS5zB9;E;Q%V6rPr$e3-qlbKXh% zHm8mMsKd*p_A1=YbE$P^?pBrjknrgYO6Lhd|JUrvs8)_b*UsEaQ>*i>shwy=LX9B; zu~&lF9I*n}4RU@Tx}P$Ay&YwePSVm%9ggvSJeE!qUL9o{kgTdALS8J+0EDQ*#g# zRh*$(Nn_h1@hZO+9kFJK}{p-(VcH&N<>4FT{j=Uyt+Jnzip?6P5KsN_Kka_OI{NOzcm$*RSyGHt1PSWB0t? zV364BR=Bp=ZM(?s_ws(A*yasOF8S~Xm8m=vCGLYxeQJ98`uJ`qyi}PW*_R_P{2L47 zjoYX332mD8uPijSLpe~4Hd-f2kIpgz0Q`1=t(wdFzp zLD1O{1tP*Px%6e6vnwX6yJ{6hv1q6FWDEKBYsGX~%uE;A9X_F#;Ee+pveG`S1^Gq>=_43)%Tvurgh1fh534fHG z>UrjL`FeI*tBWZ`$Wv`%(rH_h4_XBZ!`;g3C^unZ)x?&*t&_7|!=ilZ((I#crAzCQ zHtoHYCQSt9<@3;NVyh&VqG0tpa}1G7t0b$@_Lh(?WgC?P8qANv;-M8WW|rqPI-F1Zes4Lj*yBJkdj-GW5-qX26NS=&Kzvn$t%xeK8=j^7Yns zGSC7y4g<3NgxtR11!IqvqR?AP-Q#=yRkG^~Od&4!PrGlL7e&iZ>w1DE4kGtMecV?EsZ^C{`IN#;9(la^jT}sHq=P7SFpOL*DCcWdt z#BI5xSaqNwhMwzgQ18K&4d>MA&Bc$M7?2o^0t2X^bun1&y)9(B&YTLm$m;Z`SCOHoC zv`?_MW@gILg=K4r11DflZYX|wD=#C25pM|F6N>O2J?NbRhp5&kJzt0Qq-)D!tX^6-r!j3f|pEP;TF8oe}OhOb>La9!j z_l=w+-=0S^bor&KD6YTd>iHNS#((a$(IYno8-9Eg$)@0k)kC&(Y{V)1= zlM;ByQ}0HOWLvA(_D*fHcJZ361m!v|;n>yavB2zhw6LH#XLhI>TeV7GH^bd$!Q%MN zQp|Secn~aSkZLQdKHQ^#wW{c2Z5KFptC>&GrL?@w=BpR@#VV^{F}+h0l6hX_=y8$W ziqY(Z@TMkUkx~Gz-yo9>i(^wmP2gV!*N&Xt9@>K69)iwD>_aJex7~Kf803~FpC)5o zTc(&S4x~<_aX3@WRS{vtM;&uG`85)|XkB2C1QQl8Z7C?T$!ci`N@C$;H@4vm3$Sp8 zaYGVc9<;oToUd|});9EuuQ@t2?g;SBc`+Zxb9b9*=!_?L7 z+Ew!XRw6P%Z&CMQrTs~BNAG^T?fg|;Q+>FqcNKc;^!hz7{Pl-E4&6Gij@>(ueOsfhSvIfm%S!sgaP0`+^iviOR z^qq41@g44YW|H^`994YPTMcEB;wOaS2Y0>I{m>O(jmA(kmvMiCFv-sH88|-~I*xLl zW6galfN0oi>{m-v?e^UjrQJ);FShJ7D<%&?_t!YZvDi1sr-*e}eS6+UxKBA$o$JwJ)Ee?q(}FnwYbN+`Y?viC`j}LMZkE ziJvMuGuxp;EnFu=Me)$2tJS&UAp{P5f~|U@O9#wt1o!)H&1JLwZ!Yi`yEt&3m`7?o4OCFEP3uO_4G0qJ zN^khyw%r^N`s*{#v^EAiKRRPvC`)sZS%)@PbNW#k)k0A#LA_4;r*i0Cs_>TyyUQ-i zw4FQh>w8LNWTHYmKBWmxdF7+bhT0^77C!h}C%umva6c_GNXstTvG9fSUJ)_x`2N8{ zwfPxEzii2~%HEv&)82rFDo#U_wC_>DE%EOA2xk(96q1&Ip0RpG~aZ6?~n?KWx zO6RLbS+0f3-$Q#=S66!2(9S$l)_h&TRmCb2+BY6L_tK90Gy=Wx?pIY66U!xq0x`!I z#5k1gm?(+{DM(TgnLBML{?nfQMtgWNHT!M$KQ3xL)owaDf7CtXjjx>-CECeAnq1RY zpcilA?l;tmt`y!g|JvB3xo<3FEK0IJ9&>u?9pu8!+W`3@CtAexNPy;7wTechh_a2E z0Mjmq!#1U1+=PCb#0siO)0rdGm-0P*gJ^CqYm|*ACRH%vlNMI|;sl^xzd&ldI{(=` zg_9b&X!8__cL}5n7{8zE_92jrTXsvX>mKL?*iN7A49TLB`mkI4MmU@(tZS$9{N^|= zZiSxVZ?ho+`w?TV&5ivrk%n>hW;JY+y#-td##_x-FY_iPB!%Cyr{NXi7csYsJq}w+}q z#HSOMyg?(vo*sk~Jukh(FQ5o>ggD`{jv`A*S>Y({U)R^uc-CTc6mJ|*8oy6!DMz}~ zyv9TapSy&VHln?iSy~y{&@WmzFduVJ)U{b{{~%--D9!VpAbDO#$79(6Z?B0;3DGQx z@;QGYEJAsgy1YJ%AVWraE9X;GWVN39w9x6>WC63PK!)S&O6(RPwe#yUKUHC7?i=US zfa~hs?D&DLH!p`buR_e>r)+FKUb`p1_+0J+$2Rp6K3Vwpa*()*bOGj9B^Ic}D2M@Y z?Q?82Lz=QT{i+g!cc^_~uL6cQg8jD7+dQI{a8f<%2_wn4e(|Lx%xW7=SdmUDew)8C z;Di>REJ~VpF|oKS*$ubFc1~FR96OLxoHtksd(?V6WE7R18rmD&W5b@ohl=8XJKtld zQGwqA(aHVEGcJ-@LWL|!Zu1q~-m|w*7cMyBdBA z8h`owyHM+$f?lom^zKx`uH5?MM7;7t1%iC=<(! zT0=X(A+~8ptSSBhvK~S=0d~x~B59%q)zQ=eVXp&XUpHdLFGzaiq_z}{p38=0dTX#T zGw^z~O6VKI&0S8H7v;aKGGO$R#T~APl%-`=TA;n8JIOj6q1|Cb~3Wi#~H`3DI zwaul*jdVreR6;-raqovW0-Ni@K6OF6P0h+G)R<^gP)gtF{+9Qd=F2KvbV-iA}U(RmH*%5g7$4>6~CoW-+> z9yW5%?emX9E*7jOYU~=05Pb-ouh;U>5yTgu)f?$nK-n)vk?~m2!6-W#0m*}=H3UMx z?C*!~Vl(cb^>6P5!@eg8m_l)QsI?W=UFoSImDYQc<9e=X5`t+Mw5d2y8L~Ahhmk)d zIS^)i{GIhBQ5Xp!)A-} z`oVSZ&|KZPlDaS+COO_Dn|>ZFm9&2dz~k7L{B}WAIo6-_jB&X|kd4$EFqNEFB!?ot zs7Y<6T;6deflQzpF8Yf;2T9pHq&6p)A)6hPCj7^tO=Deojo8mAH3@-%7sZ*Vx=?>P z{MeyC%r~JNt>je(m{Z;T%b8)1;&CcVT_{&CD+w)eh|j((&x$psFEQ0YAs(V2B*JdNMMFMod{v24E>xbjv_%6CxB`J{=r*4f}+w2QCe@&-4*Al`< zqWd9d*yQ^^*X65S!Nh))RuhS66X>5YoLE-1Iyu)J8X(BgbogMXdX=}*Zm0pZKF{IN ze|_R9T9v~PzhqrLzRp>$jzpOr)oschxp^oda9B>Q8-ADn*S;>*E7w zt}3as>~2R6?7>s^*uhCfL3yy_4QBB`8F^VX3hysP7K*Wnw|tO%bJ4o@&jiq%W{{qB0b!W5nHGqU*rw-1;(I-iW9aU$E=XoquaVK?3XNl z)o~qSi|v@{x+_>qFx(kEb(x%J@h90|Eb5Wz_&POY(kw#Z&KW;A%X_crDE`U)hsw}q-8fLAj03e< zsrpjElLE1{)tzTuY5wTd-V^>zD;tOH!ui}me zzwU?*&*A}M889Tqx+Q--VyK3GjxV@4j z_0fi)6OX>Jlnqygz-0MKGnLU7F70t9RFu@}=6xp;spZ70oD_Qw$IF+G7hZ4NRh52C z;M8+(Ua=`w2@0uISh++~Bj1@J>DRBZ)yPeKV}~GdO7x*L6Gzva{I{`ZzLP1z z2?3GRDS;Q?C)BdfO-o~|f2@PB?1#locgvB$B?->0^CRxM2YHo7#EK=lzszGIEtH}p zU+qlM(%z5WM_jQ8oS+(hvOuasJ?bl;Py=-nI-F0-&I#;`?fp%sZHBVk$svV)uwxc~ z|51K4T;MPm`sBKc6fKT{ghWf{B_Pdamn#M?WrEH#aa5mm?Vf4E9_MT$e=}MAZy%EQOXzk|Lstb65!WD!LM6?}NR z7&zCp3s(eNZ+%>HKb>LjLZGr~lQOWx`9zowZ7`>&;s_XLl`(bcxAp&xM8-?mD8f}} zUbymU0XBafVTrCr zlL^QS_H&H7eogU)pUw zv@+n}eMFsD<0N63`fL>H*UN=aDvqBfN*s2fGk>9jSD5^L9OZ%_p^cjpm6UQ+#M2Rj zcyLLoebXN1HYMpVSY4AiBAlbOg*+AqUWA-!7IzUwZ)Z&(8(rGa{%&Bieas<}4{a_+ zEr2rD8|RmPXeOi8qOhD`M<)5^Mzcv}Y zq!l&sXs1c!E$2yP!;wi+@K-RCT0eW5coiSXrw%V5g_Ln1sF6kY8!7NU=Ck^%&U7AF znP%Lq7H0PFMp~v{<_`nD?0=+*ubY>zUfo8x?P~7ZXzV9&ubpg4Ws)A!Nm^pi%xa0Z zhfCsFpk8EFFH;P)_z{Im3u}IbDtVI_FY3Ukng2DoK2Gq}Fvbr%54(=xveYZF8!tC7 zXa|)eJ=u(@H1RNYUp%t7^IpkYUgyZk>ZpX^Shj1!>&nL$EmrkD@ZKKBw?UUL3>1@Q zQsx7!b+F;^NW8r1r5%^Hx6FNTP4gD_$O<`*&s0oKs<{3N`7X$MXZ!G$gw)Sez8(LD zu3zx?G|X3BV7xoae-Em}8Oxk=UuTC0BU>*GqTuS!ns{ zCp~Bn`Q%z~1{HDc)HfNgos}{CVY72D4bDZHXZhnT-FN=e;S8k4b1t(ZsBa#K&1M%W zUAyBVKi{*f8p=xl1|@{Q&gJiYwk4tiTMsqpBIVUcwQv}){ky*Bxd^7MlJbf82DIj4 zjc>gwLml7BF8DwUk|$-S`PQbm4~VXe7LfIOEk zY;acKJby1_hq+uh!7BJEhQ9Bk_;B!M9ftSUpN-##jFX`XwLep1dHaHf^eqmRKp~tG z#d5Aj8O%3Mh9#X!X!N%l(D4d25ptqe0?W~&x5;@%MXVHF2H^&LHx?cowfqN$_qVk% zYo!-2g;dUx#vGNUCsgVSip+8vnn=`=gK*7wz+zRsCBMfRXHthTg{3Vzw@{RLZ=lhJ zW+Tmv0_;l*{JU^T3WyL5W|w10c!+EWxd43>sKEBWg&WDc3O8Iuh6@aKD0hGO>{LpV zX{#wxv{xHHX`_E1>xsitK_zX1(N)uB)McdK{&)83>abesgtsCTaYq!ex>hu zNDd=D=+MT#eiZ4P+2P~~^UC`j8HfKmSRf)@`s-DN+i02!>>#Fb&t~~cKf`oIMee(9 zSS~WS8Xpl}+UtE?UK*$-1|KB_uRnM@BV@^(+N?-8UCs^S!Lm0~tD;m~xSslq@S)PC zlXI@co{Vz%=}8zJf9jCB!4%vG!cO6CNll;Mj;2q>{n0=r&Hcfb=mEo41VJZx zWG}B`@vC7of^0Tw#9TmLANIYsq91~+Hox?RP0}uYL20eLJo@(BTrjLe;wal6{)w0x zuSJ^)R`OGAe=&wmn|kP>KnUYr9=zYmN#$5z5a!dgpK}%&9xhA0zppeSx}z<0nwLBC zN|^fkMO9|7-{RuFeMAQg3qdj%?zaouOL~$su2hGf^uV# zj$=|+JR5g=1bf5Y!Q!bI>97JZxfP`edD%LvqHj5x(6qey+XtA3+bPzY9_;jg4?O6e zULRr7-Z%TogzsD^+;rnBF}S?aw4gni*8X~~ZmO>aN7(3DQxhY=8*mDYD$Kp>F6z@U0Ch!e>6cJ9m>P^BH7`UbmP@hiM!~F3dtb5acpvYE3=Xtjw+|U~s^Q-Zn72 zf%mF77Jx7NNcCsd$Q{*h2nx?0NneJs7P!*1f0i)gKq{lT9A9sql#uX#;(d9E2fpS& zU1g&O70pH;*TW9^L|*j2Uou~W@Vq+2)Rjn#Bky|Y=X?I8ioDNpnPFymwdeD=-g!Yf zn)stpn*$mzm9#S|}>u7k+$5SGk;;W^dGY3OahV{@sUFCTP?H_7n(ckbJf(1!(T4?x!O?JhU zn5VL$ZCl8Kc^9sE53YH~CJVLGzOnWYt;&IJ7nqZqowM5~Il!QbX#KQYV+fu;(3hRC zl{A1~xu`3B6(sG%Y;TVO*Yn%(4iIeJ=QwC}JuzDw(KIdIwp=T5#OMA}HTqrPi={tl zQfIqa^MK=FVzSq_=qVteRhiNsjCTvJn_5ca?&Mz6h;83Hskk^ZP-(5=#EG(U`dZH6 zyba~&UMp;&nmC%+IvJnXMuztsXJhstB1O;Ah@W1gf*8_h#$!i6DXS02*}SwcOvY;p zcy!@{jkH5SXU!5nA`}x%=NfbCSqXjpqhfaq{?fWA!&dWEEW&OdVcOuo6A3QsuzyPU z`Nq|=%=AvdXZ-uKYGKoc@QdQP>4mu#cz8t;tjI{tm~2XV4VL+2sge*L_6r6n*5jmc zdqZ3=8?0ur+?N(TQW#ENuhaS7ywWA0wN;`;YQTZlA;>?NKWCh|fxSk3Sef3AFcLWk z^H3N3Nj6zrOEIh7^X~46naiFTEiMUbBo>NL4)vr-9+2tSgK#HMEjC0%%0pN_7|^J! z^|sDAv!%3GCYft}lYuo?U79|$uL%%n#e|U!Ui`Ea-o4OBCMj%xqI6(GewkJWW_gU%6$3;r@6d97Zhryq0u04mNy)!?n>>Z!iz}A zrI^X3;)#4wV%eV`_}5G+beVue%XFflugK^Z_6Hu{9hV@KGq8vqHw9uZ>9xF6Em=;S zsXS}xSXnpnldbcDoE7?YX%x3Jf-nfwu;12|8Tas(t={Y-j@1%%uKb+t!EfTHjI_H^ z{g>xK4ui?u<+6H~aDJjNc6HEU$~pt(jKX|a9@pYLRKtcdykq@48PI-7=Y|usA69Id z4Pn%cwKmc0;^6Z_b&BX)ei5zjO4)HHRf$^5P&TrfLFde8EX26^$uUFaVgj*p2VAv+ zRrG3{tw6UXxU%&zwm~+I@Z>?8$WZ?-kZxBjjI?FM>~?dLiK$37L{D{Au3s}*C;Oh5 zk=f4tl?-a}p5ZZ6&yUb=P5kR1B|S@6Oz(RIQU&oKDH2wm=tpKaYBlwNBs_(T=<;boacYDH_*H?yqKO=Y_-6298g^@DttR}&#MjcIkl`TTNE7e z&SU*{fzG9Mu!3Bj&4Sb0AJW2Yk43#QKekEJxo-|$88XhF!B*jseVviVq`R9H7!N2{N0_o|6V{R7smJ*>H$E6x# z9i{^Xx>fM*8nnznRWj(hWmq)(@=oRRC08$ANZF$F+7x|)w6vOmK?RRAU01q3e>oo9 z$h3D~g3?5ng7Hp><+>6ni#r+`VLnAEoq5JLUF6?4H3W3|?FP&^-$W^)+#K^q&EM~$ zz1R9BNH&`sm|6Tk?Y6a5OMMcu#`w6j)EBoQGGp+;`3swnJA0%;C1$n;kXSk^@Y-C<~c$IFeQ0umI zbnq)gYS&Ze1RH0)AAixjI_V-Kx!kV0@$j`>J<%?E^b=MO!TuBjO!Hzy^W(*RZ*OSi zuq@wAz8*!UWJZ2BZq{y>Qp4y4hj-w+?8c@n4{zrv1}^eLv7LjzSWVMkTJYcUk9-arRX@03j7+Zf4=sJ&J|4uikaN#L zS>GWjmRkyYqC>Wr;%&U^^%X)cb^%aRl?7n5j)yOqcYNa!r!S_mmiOtAQQrAW4=O$D8`VOK9d|SnFsQ1q-^R z7T?s2S`sO`S+ESHiLD@M)tIg5Y=s)qU@m3h;~9tX2*22PskT_N&au1YCwbC+Y4%=O z_L*qBZAFi3oKPdy;7AJjbH?MyipIC@3)7s7OyyVmzEu#xeNTI+Fd9UaZp|m#cUK%v z_uI=@QMfRtz%a>mcfV4qS|Z6ho3>_ms>^$Hdqn>8k4Q*4W{$FS@SEHEz5cAG>R8||9%pC1a9*@~BZR>}=!N7~XN!|D0UhixCQb>-?jxGz4c!do&BdfvX{ zw<8@jP5nv6`|;oVr~V=rR3eoNqpC=tK8^M`;#X_+^1j>KE4bMz!EVkSUII=%c?Zp=DSmv-Dp??3PpoNY}7(y1IHD+vj;X!H|h75Fc9Iz*4NZQF!o zEK&L6GGzAiJHn}6Jh^ky2d3%rSI{kA3S>_KCZrTN1a$UIHqxHZc6QqjaWiV6fvtkeL1IzI3*n05DLyl^#r(kk%xluy< zUuG;6a`;6}8Z?@eXit=_jg+3=Ym#;EW2{(6YnWvhrPW~^?0_X~9b8r`(b;%RqPIX79)06V#PpG`3SFa(7W)war#ShcC`fHAYlcD8C7I5V>HB`w z4U80a<;>H|pPGC-3|G&;cfc_t|I3@-C^rOJOejA)5;YsHn%OTk&wJ@!8(uc(2XG*GpY$dv$}m&J>Ptj| z%1Ne|2Xx>QjhSph(ig2q9A!m9fBK7mj0}4RUG5omIfvCeh)qY|Zn58$VZEN+B`mkk zcoi|(32J`C=vEbKRvZ>moa`n-$A3cYx8-5`l*PhHC7d4w6-)&r(jkBG^ z#;wwqwLy8YI~Ff#nOl2vTx1ZCHEJ3I{NY~y{<6xn1H%nR~6r7p`rq(_LCs9w<1Q`_ZLZ*`+8X6GrfPDEsPxs;1UguCDT^WGb)YQigjfd;&oRSn{cx?_cBk-`*NP9Jv+gk zu@HBgBueDHX~na41U8!mfA_oE{8%YR8MjH`TUWvJD?i?fhXPS2^LF&RW|+g$NmvZnCl#$w9|Fs>G>WN2?jJO*HS; zU!pa2#j=lCQ(tG3Z8tYn_O#$%wkb!<1Z|ztRl&(7*|l$9IVMI#y3vJIQ#u(&{mwbRn8zYq=|a&CcUdKsO~JD3wG;7OcIRW zFw%8>vjMf#$Df)PGt#S{^noDMZ>YIACVk8=hUj4%cRYGF$=)Eocx@sAv5$_wLLhGH z+GS82`@%8ydclT(sQhMSpvd202%^KC=|B|ppNR_TLP%Mze^w%E!Namp`J9CJHa0m> ziUjC5crSQ4sUB{|tSinE_@L3|ny=E({cUW;nJ(A&HEDM`j*B0!x5m$%{<>a}XAorJ zzW5bf!FzZx2j|=3N_ttzVkHq&?~hDoFvBF7)nr98_eDhtt9i}Mu#U)Gy|?2=TJUb~ zvZ}})`-c5S4GEE<2r+LG7YE7si?k!n;sI}k)ksI8?w4^Nf-J@)$)gxpqaGu}y4eDY zVgty9&ZW|kzV48~ZTHni%DzLn`m7Gek;{Nf`QwK>i!1pYY;7@Zo1%U>{6MUnO4LfB`6$iR}uA9**bEfup5PRP3 z|1Q9ik3A%SP#-{SFf!adDOgQ7E|T;6!SPNaL<;d{_tWkD=IbIKu<<7uspG?D(-}`A z4(Y;|vz!}rx1~da46qB9bZqB64A_zQzr9U4$?^b^4E7J3{69Rs{#56D#jG6XNzm2` zJb;VLBrkbeJXBs|o|Ub6FtCiS(8FAg8wtR-HhVO;Pv5e&O7KYG=kAxt~k9qxJbaOnFv zAR&GLe_mvNISep}{ojXX*3G_&Qo=AWqS>R}yd&P$~4)FK{PrtOoOhy8Vp@=n{nT#-VN>tB9&+Bp55rGQOI1#`-*=H3Ns5f1zeSCElP*ueWM5;dlW^~r9r%DWv%GmIb?d|Pjm`$v? z0n)7#5aL!gHeLaW8l7e>Qb|ckKVMaCXk;YXvx53H3W`5?2P_7yK;S(~kfJn3u1aeH zw0Ay0t9gEV(d>3rT~A{U!89ub1cb=g*jEq;q`slS0C>EvfttSfn%V@GB)Ool5FI=Z z96m9C%r4>NR103y50s8p%{936BqX0v2zkh{8MNLt>o5WW2|5lAHBb~PPXSGANMmCo z0l%9P4-bz~hd=V_))p0L1pd6~Pz?=@^~?aGC?LO)1n)TsNJG+X?&BbQ-)twz0aMEgLH$$hEmKaG$ zNE9?RpUvY+Z$DJkrU1TbP)NvKk1IGk;ERZgi!{7Q z>hkgj06gkX0?!1erlw9zOsH8Ou@Wx5sEy3bB&n{gB^3}@wBi#GAOibGt42#r@>*I1 zVA=o*9~K52{PxZc^MFp?xc;+#!7J*Sa1ZvC8URU|$=cwoJ)rpm0F58O{Q_5j9&lI# zU_9bYxRkuW@ z4M4|KT~h;KXym6PxgPk4U>})b%H}z$>r}Bru z&IQ)haqb@+^xW5s&CT&{G7=}H0KJKlsw(iKpuoby_Sct{F~TspBbr%Ti$EZZKy8#c zX#sAd|NcGW@je$#(z>$j6`Y-&3Awn`L)FyOa7uMsQG5ia`nF-gyDhhPRf`6Fy=_7#9x@DF`V*)MK&+{&N9%M(gS# z=x5L999OnZHMM@R9Omrmvo2sVs@ ziVEey!9hR8zwT&jH@AkDFJJZoC}K`Yi8K(gO6b6wd{=|9wgPtG=jan29u9z)5!CUt znSF4_b#*`hKw}Bw02l{C%F4>Xh~NtBQaRb#9ZtzbN>oDH+CG867A}nka4#q%}>bx|! zxA12R*gV;Eco;C$ z0Q%;K4uy-juj&Jq# z^=%y-6eYJjv$L~%hlaWU-D!DaIRoSc_iBk5uQAU{>288AFN{AS^(tqlf9&yRsa zAfuuJZMsai2k`GR2jk=7Rsj<9FK{c50@)^LOKSj5+~3r$*9N$tz@O$1u-n-!Ekpq5 zX(&mG7Fh#Q8(7k;0xs86^7;9BCLW$(!0LpB>F@7{fdR&d-hqJ*V8VfgdB)W5?(WLz z=sc^xBkpiIIy&<6^VV<7^+^X9zkp}DWYCfguj+y{D-`g=X$-HBQBfnf zyC3Rep3VfQPA^M9FbC}M695GLz{f}MEDa{|y4L^Z=ooe<5Ew`o^xVjRJisF+CT96? zq47E1Dk?@p?}4D;9TpZmj0@;UuxVb2GX3NYwR0J-`+z=n#F)!HlsFD~l*2nfh- zXut#60?gyX)FIIM_=kof0rk>z@<&ZaCv0zj`Xju~aZd#Y2_C3<{tgeL0NH^HhzXs%%gSy-^^%1YV8A~K5gFmqFM8kq57IGs1poj5 literal 0 HcmV?d00001 diff --git a/lib/python/flame/examples/medmnist_fedprox/run.py b/lib/python/flame/examples/medmnist_fedprox/run.py new file mode 100644 index 000000000..6a7d7ed94 --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/run.py @@ -0,0 +1,63 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +import os +import sys +import json + +# get trial +if len(sys.argv) != 2: + # logger? + raise Exception('Wrong number of arguments; expected 1') + +mu = float(sys.argv[1]) + +# modify json files (jobid, mu-value) +job_ids = ["313a358619cd59012eabeefb", "332a358619cd59012eabeefb", "333a358619cd59012eabeefb", "3333a58619cd59012eabeefb", "3333358619cd59012eabeefb"] +accepted_mu = [1, 0.1, 0.01, 0.001, 0] + +task_ids = ["49d06b7526964db86cf37c70e8e0cdb6bd7aa742", "60d06b3526924db76cf37c70e8abcdb6bd7aa74"] +def update_params(filename, mu, num): + if mu not in [1, 0.1, 0.01, 0.001, 0]: + raise Exception('mu-value not in {accepted_mu}') + + index = accepted_mu.index(mu) + + # load json data + input_filename = "config/train_template.json" if num else "config/agg_template.json" + file = open(input_filename, 'r') + data = json.load(file) + data['optimizer']['kwargs']['mu'] = mu + data['job']['id'] = job_ids[index] + base = "https://github.com/GustavBaumgart/flame-datasets/raw/main/medmnist/" + data['dataset'] = f'{base}site{num}.npz' if num else f'{base}all_val.npz' + data['taskid'] = f'{task_ids[1]}{num-1}' if num else task_ids[0] + + # save json data + file = open(filename, 'w+') + file.write(json.dumps(data, indent=4)) + file.close() + +num_trainers = 10 +for i in range(1, num_trainers+1): + filename = f'config/trainer{i}.json' + update_params(filename, mu, i) + +filename = 'config/aggregator.json' +update_params(filename, mu, 0) + +# run bash file +os.system('bash fedprox.sh') diff --git a/lib/python/flame/examples/medmnist_fedprox/trainer/__init__.py b/lib/python/flame/examples/medmnist_fedprox/trainer/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/trainer/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/examples/medmnist_fedprox/trainer/main.py b/lib/python/flame/examples/medmnist_fedprox/trainer/main.py new file mode 100644 index 000000000..6e1d99501 --- /dev/null +++ b/lib/python/flame/examples/medmnist_fedprox/trainer/main.py @@ -0,0 +1,227 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""MedMNIST FedProx trainer for PyTorch Using Proximal Term.""" + +import logging +from flame.common.util import install_packages +install_packages(['scikit-learn']) + +from flame.common.util import get_params_detached_pytorch +from flame.config import Config +from flame.mode.horizontal.trainer import Trainer +import torch +import torchvision +import numpy as np +from PIL import Image +from sklearn.metrics import accuracy_score + + +logger = logging.getLogger(__name__) + +class CNN(torch.nn.Module): + """CNN Class""" + + def __init__(self, num_classes): + """Initialize.""" + super(CNN, self).__init__() + self.num_classes = num_classes + self.features = torch.nn.Sequential( + torch.nn.Conv2d(3, 6, kernel_size=3, padding=1), + torch.nn.BatchNorm2d(6), + torch.nn.ReLU(), + torch.nn.MaxPool2d(kernel_size=2, stride=2), + torch.nn.Conv2d(6, 16, kernel_size=3, padding=1), + torch.nn.BatchNorm2d(16), + torch.nn.ReLU(), + torch.nn.MaxPool2d(kernel_size=2, stride=2) + ) + self.fc = torch.nn.Linear(16 * 7 * 7, num_classes) + + def forward(self, x): + x = self.features(x) + x = x.view(x.size(0), -1) + x = self.fc(x) + return x + +class PathMNISTDataset(torch.utils.data.Dataset): + def __init__(self, split, transform=None, as_rgb=False): + npz_file = np.load("pathmnist.npz") + self.split = split + self.transform = transform + self.as_rgb = as_rgb + + if self.split == 'train': + self.imgs = npz_file['train_images'] + self.labels = npz_file['train_labels'] + elif self.split == 'val': + self.imgs = npz_file['val_images'] + self.labels = npz_file['val_labels'] + elif self.split == 'test': + self.imgs = npz_file['test_images'] + self.labels = npz_file['test_labels'] + else: + raise ValueError + + def __len__(self): + return self.imgs.shape[0] + + def __getitem__(self, index): + img, target = self.imgs[index], self.labels[index].astype(int) + img = Image.fromarray(img) + + if self.as_rgb: + img = img.convert('RGB') + + if self.transform is not None: + img = self.transform(img) + + return img, target + +class PyTorchMedMNistTrainer(Trainer): + """PyTorch MedMNist Trainer""" + + def __init__(self, config: Config) -> None: + self.config = config + self.dataset_size = 0 + + self.model = None + self.device = torch.device("cpu") + + self.train_loader = None + self.val_loader = None + + self.epochs = self.config.hyperparameters['epochs'] + self.batch_size = self.config.hyperparameters['batchSize'] + self._round = 1 + self._rounds = self.config.hyperparameters['rounds'] + + def initialize(self) -> None: + """Initialize role.""" + + self.model = CNN(num_classes=9) # Should we add number of classes this into config? + self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-3, weight_decay=1e-5) + self.criterion = torch.nn.CrossEntropyLoss() + + def load_data(self) -> None: + """MedMNIST Pathology Dataset + The dataset is kindly released by Jakob Nikolas Kather, Johannes Krisam, et al. (2019) in their paper + "Predicting survival from colorectal cancer histology slides using deep learning: A retrospective multicenter study", + and made available through Yang et al. (2021) in + "MedMNIST Classification Decathlon: A Lightweight AutoML Benchmark for Medical Image Analysis". + Dataset Repo: https://github.com/MedMNIST/MedMNIST + """ + + self._download() + + data_transform = torchvision.transforms.Compose([ + torchvision.transforms.ToTensor(), + torchvision.transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) + ]) + + train_dataset = PathMNISTDataset(split='train', transform=data_transform) + val_dataset = PathMNISTDataset(split='val', transform=data_transform) + + self.train_loader = torch.utils.data.DataLoader( + train_dataset, + batch_size=self.batch_size, + shuffle=True, + num_workers=4 * torch.cuda.device_count(), + pin_memory=True, + drop_last=True + ) + self.val_loader = torch.utils.data.DataLoader( + val_dataset, + batch_size=self.batch_size, + shuffle=True, + num_workers=4 * torch.cuda.device_count(), + pin_memory=True, + drop_last=True + ) + + self.dataset_size = len(train_dataset) + + def _download(self) -> None: + import requests + r = requests.get(self.config.dataset, allow_redirects=True) + open('pathmnist.npz', 'wb').write(r.content) + + def train(self) -> None: + """Train a model.""" + self.model.load_state_dict(self.weights) + + # save global model first + global_params = get_params_detached_pytorch(self.model) + + for epoch in range(self.epochs): + self.model.train() + loss_lst = list() + + for data, label in self.train_loader: + data, label = data.to(self.device), label.to(self.device) + self.optimizer.zero_grad() + output = self.model(data) + + # proximal term included in loss + loss = self.criterion(output, label.squeeze()) + self.regularizer.get_term(w = self.model.parameters(), w_t = global_params) + + # back to normal stuff + loss_lst.append(loss.item()) + loss.backward() + self.optimizer.step() + + train_loss = sum(loss_lst) / len(loss_lst) + self.update_metrics({"Train Loss": train_loss}) + + def evaluate(self) -> None: + """Evaluate a model.""" + self.model.eval() + loss_lst = list() + labels = torch.tensor([],device=self.device) + labels_pred = torch.tensor([],device=self.device) + with torch.no_grad(): + for data, label in self.val_loader: + data, label = data.to(self.device), label.to(self.device) + output = self.model(data) + loss = self.criterion(output, label.squeeze()) + loss_lst.append(loss.item()) + labels_pred = torch.cat([labels_pred, output.argmax(dim=1)], dim=0) + labels = torch.cat([labels, label], dim=0) + + labels_pred = labels_pred.cpu().detach().numpy() + labels = labels.cpu().detach().numpy() + val_acc = accuracy_score(labels, labels_pred) + + val_loss = sum(loss_lst) / len(loss_lst) + self.update_metrics({"Val Loss": val_loss, "Val Accuracy": val_acc, "Testset Size": len(self.val_loader)}) + logger.info(f"Test Loss: {val_loss}") + logger.info(f"Test Accuracy: {val_acc}") + logger.info(f"Test Set Size: {len(self.val_loader)}") + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='') + parser.add_argument('config', nargs='?', default="./config.json") + + args = parser.parse_args() + + config = Config(args.config) + + t = PyTorchMedMNistTrainer(config) + t.compose() + t.run() diff --git a/lib/python/flame/mode/horizontal/trainer.py b/lib/python/flame/mode/horizontal/trainer.py index 51bffc5d9..4e39e363c 100644 --- a/lib/python/flame/mode/horizontal/trainer.py +++ b/lib/python/flame/mode/horizontal/trainer.py @@ -23,6 +23,7 @@ from ...common.util import (MLFramework, delta_weights_pytorch, delta_weights_tensorflow, get_ml_framework_in_use, mlflow_runname, valid_frameworks) +from ...optimizers import optimizer_provider from ...registries import registry_provider from ..composer import Composer from ..message import MessageType @@ -59,6 +60,11 @@ def internal_init(self) -> None: self.registry_client.setup_run(mlflow_runname(self.config)) self.metrics = dict() + # needed for trainer-side optimization algorithms such as fedprox + temp_opt = optimizer_provider.get(self.config.optimizer.sort, + **self.config.optimizer.kwargs) + self.regularizer = temp_opt.regularizer + self._round = 1 self._work_done = False diff --git a/lib/python/flame/optimizer/fedavg.py b/lib/python/flame/optimizer/fedavg.py index c405b92cc..e8be1cbce 100644 --- a/lib/python/flame/optimizer/fedavg.py +++ b/lib/python/flame/optimizer/fedavg.py @@ -22,6 +22,7 @@ from ..common.util import (MLFramework, get_ml_framework_in_use, valid_frameworks) from .abstract import AbstractOptimizer +from .regularizer.default import Regularizer logger = logging.getLogger(__name__) @@ -42,6 +43,8 @@ def __init__(self): raise NotImplementedError( "supported ml framework not found; " f"supported frameworks are: {valid_frameworks}") + + self.regularizer = Regularizer() def do(self, base_weights: ModelWeights, diff --git a/lib/python/flame/optimizer/fedbuff.py b/lib/python/flame/optimizer/fedbuff.py index 8895a5ca5..ff971340c 100644 --- a/lib/python/flame/optimizer/fedbuff.py +++ b/lib/python/flame/optimizer/fedbuff.py @@ -30,6 +30,7 @@ from ..common.util import (MLFramework, get_ml_framework_in_use, valid_frameworks) from .abstract import AbstractOptimizer +from .regularizer.default import Regularizer logger = logging.getLogger(__name__) @@ -50,6 +51,8 @@ def __init__(self): raise NotImplementedError( "supported ml framework not found; " f"supported frameworks are: {valid_frameworks}") + + self.regularizer = Regularizer() def do(self, base_weights: ModelWeights, diff --git a/lib/python/flame/optimizer/fedprox.py b/lib/python/flame/optimizer/fedprox.py new file mode 100644 index 000000000..bbafbc824 --- /dev/null +++ b/lib/python/flame/optimizer/fedprox.py @@ -0,0 +1,46 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +"""FedProx optimizer""" +"""https://arxiv.org/abs/1812.06127""" + +import logging +from ..common.util import (MLFramework, get_ml_framework_in_use) +from .regularizer.fedprox import FedProxRegularizer +from .fedavg import FedAvg + +logger = logging.getLogger(__name__) + + +class FedProx(FedAvg): + """FedProx class.""" + + def __init__(self, mu): + """Initialize FedProx instance.""" + ml_framework_in_use = get_ml_framework_in_use() + + # only support pytorch for fedprox + if ml_framework_in_use != MLFramework.PYTORCH: + raise NotImplementedError( + "supported ml framework not found; " + f"supported frameworks (for fedprox) are: {[MLFramework.PYTORCH.name.lower()]}") + + super().__init__() + + self.mu = mu + # override parent's self.regularizer + self.regularizer = FedProxRegularizer(self.mu) + logger.debug("Initializing fedprox") diff --git a/lib/python/flame/optimizer/regularizer/__init__.py b/lib/python/flame/optimizer/regularizer/__init__.py new file mode 100644 index 000000000..506f034ea --- /dev/null +++ b/lib/python/flame/optimizer/regularizer/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + + diff --git a/lib/python/flame/optimizer/regularizer/default.py b/lib/python/flame/optimizer/regularizer/default.py new file mode 100644 index 000000000..1132e5151 --- /dev/null +++ b/lib/python/flame/optimizer/regularizer/default.py @@ -0,0 +1,31 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""Dummy Regularizer.""" +import logging + +logger = logging.getLogger(__name__) + + +class Regularizer: + """Regularizer class.""" + + def __init__(self): + """Initialize Regularizer instance.""" + pass + + def get_term(self, **kwargs): + """No regularizer term for dummy regularizer.""" + return 0.0 diff --git a/lib/python/flame/optimizer/regularizer/fedprox.py b/lib/python/flame/optimizer/regularizer/fedprox.py new file mode 100644 index 000000000..c80582f33 --- /dev/null +++ b/lib/python/flame/optimizer/regularizer/fedprox.py @@ -0,0 +1,39 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 +"""FedProx Regularizer.""" +import logging +from .regularizer import Regularizer + +logger = logging.getLogger(__name__) + + +class FedProxRegularizer(Regularizer): + """Regularizer class.""" + + def __init__(self, mu): + """Initialize FedProxRegularizer instance.""" + super().__init__() + self.mu = mu + + def get_term(self, **kwargs): + """Calculate proximal term for client-side regularization""" + import torch + w = kwargs['w'] + w_t = kwargs['w_t'] + norm_sq = 0.0 + for loc_param, glob_param in zip(w, w_t): + norm_sq += torch.sum(torch.pow(loc_param-glob_param, 2)) + return (self.mu/2) * norm_sq diff --git a/lib/python/flame/optimizers.py b/lib/python/flame/optimizers.py index b3e2c4e9f..c13e5e813 100644 --- a/lib/python/flame/optimizers.py +++ b/lib/python/flame/optimizers.py @@ -21,6 +21,7 @@ from .optimizer.fedadam import FedAdam from .optimizer.fedavg import FedAvg from .optimizer.fedbuff import FedBuff +from .optimizer.fedprox import FedProx from .optimizer.fedyogi import FedYogi @@ -38,3 +39,4 @@ def get(self, optimizer_name, **kwargs): optimizer_provider.register(OptimizerType.FEDADAM, FedAdam) optimizer_provider.register(OptimizerType.FEDYOGI, FedYogi) optimizer_provider.register(OptimizerType.FEDBUFF, FedBuff) +optimizer_provider.register(OptimizerType.FEDPROX, FedProx) From 6532a3b0d53863065e2bb9d53f1c3266009c436e Mon Sep 17 00:00:00 2001 From: alexandruuBytex <56033021+alexandruuBytex@users.noreply.github.com> Date: Tue, 28 Feb 2023 11:28:05 +0200 Subject: [PATCH 13/16] Create diagnose script (#348) * Create diagnose script * Make the script executable --------- Co-authored-by: Alex Ungurean --- scripts/flameDiagnose.sh | 48 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100755 scripts/flameDiagnose.sh diff --git a/scripts/flameDiagnose.sh b/scripts/flameDiagnose.sh new file mode 100755 index 000000000..f74b323e1 --- /dev/null +++ b/scripts/flameDiagnose.sh @@ -0,0 +1,48 @@ +delimiter="----------" + +go version + +echo $delimiter + +echo "Python3:" $(python3 --version) +echo "Pyenv:" $(pyenv version) + +echo $delimiter + +docker --version + +echo $delimiter + +minikube version + +echo $delimiter + +echo "kubectl:" +kubectl version --short + +echo $delimiter + +echo "helm:" +helm version + +echo $delimiter + +echo "jq:" +jq --version + +echo $delimiter + +echo "DNS configuration:" +cat /etc/resolver/flame-test + +echo $delimiter + +echo "minikube IP:" +minikube ip + +echo $delimiter + +echo "flame pods:" +kubectl get pods -n flame + +echo $delimiter From 466e6cccb3095c8908736d2b33a28ddc6aee91be Mon Sep 17 00:00:00 2001 From: Myungjin Lee Date: Tue, 28 Feb 2023 08:57:02 -0800 Subject: [PATCH 14/16] refactor+fix: configurable deployer / lib regularizer fix (#351) deployer's job template file is hard-coded, which makes it hard to use different template file at deployment time. Using different different template file is useful when underlying infrastructure is different (e.g., k8s vs knative). To support that, template folder and file is fed as config variables. Also, deployer's config info is fed as command argument, which is cumbersome. So, the config parsing part is refactored such that the info is fed as a configuration file. During the testing of deployer change, a bug in the library is identified. The fix for it is added here too. Finally, the local dns configuration in flame.sh is updated so that it can be done correctly across different linux distributions (e.g., archlinux and ubuntu). The tests for flame.sh are under archlinux and ubuntu. --- cmd/deployer/app/resource_handler.go | 53 +++-- cmd/deployer/cmd/root.go | 186 ++++++------------ cmd/deployer/config/config.go | 62 ++++++ fiab/flame.sh | 58 +++--- .../control/templates/deployer-configmap.yaml | 36 ++++ .../deployer-default-deployment.yaml | 30 ++- fiab/helm-chart/control/values.yaml | 8 +- .../deployer-compute1-deployment.yaml | 28 +-- .../templates/deployer-configmap.yaml | 36 ++++ fiab/helm-chart/deployer/values.yaml | 8 +- .../flame/optimizer/regularizer/default.py | 2 +- .../flame/optimizer/regularizer/fedprox.py | 11 +- 12 files changed, 303 insertions(+), 215 deletions(-) create mode 100644 cmd/deployer/config/config.go create mode 100644 fiab/helm-chart/control/templates/deployer-configmap.yaml create mode 100644 fiab/helm-chart/deployer/templates/deployer-configmap.yaml diff --git a/cmd/deployer/app/resource_handler.go b/cmd/deployer/app/resource_handler.go index 7052d4617..82305d56b 100644 --- a/cmd/deployer/app/resource_handler.go +++ b/cmd/deployer/app/resource_handler.go @@ -34,6 +34,7 @@ import ( "google.golang.org/grpc/credentials/insecure" "github.com/cisco-open/flame/cmd/deployer/app/deployer" + "github.com/cisco-open/flame/cmd/deployer/config" "github.com/cisco-open/flame/pkg/openapi" pbNotify "github.com/cisco-open/flame/pkg/proto/notification" "github.com/cisco-open/flame/pkg/restapi" @@ -41,13 +42,9 @@ import ( ) const ( - deploymentDirPath = "/" + util.ProjectName + "/deployment" deploymentTemplateDir = "templates" - jobTemplateDirPath = "/" + util.ProjectName + "/template" - jobDeploymentFilePrefix = "job-agent" - jobTemplatePath = jobTemplateDirPath + "/" + jobDeploymentFilePrefix + ".yaml.mustache" - k8sShortLabelLength = 12 + k8sShortLabelLength = 12 ) var ( @@ -63,13 +60,17 @@ type resourceHandler struct { namespace string dplyr deployer.Deployer + // variables for job templates + jobTemplateDirPath string + jobTemplatePath string + deploymentDirPath string + stream pbNotify.DeployEventRoute_GetDeployEventClient grpcDialOpt grpc.DialOption } -func NewResourceHandler(apiserverEp string, notifierEp string, computeSpec openapi.ComputeSpec, - platform string, namespace string, bInsecure bool, bPlain bool) *resourceHandler { +func NewResourceHandler(cfg *config.Config, computeSpec openapi.ComputeSpec, bInsecure bool, bPlain bool) *resourceHandler { var grpcDialOpt grpc.DialOption if bPlain { @@ -85,21 +86,28 @@ func NewResourceHandler(apiserverEp string, notifierEp string, computeSpec opena grpcDialOpt = grpc.WithTransportCredentials(credentials.NewTLS(tlsCfg)) } - dplyr, err := deployer.NewDeployer(platform) + dplyr, err := deployer.NewDeployer(cfg.Platform) if err != nil { zap.S().Errorf("failed to obtain a job deployer: %v", err) return nil } + parentDir := filepath.Dir(cfg.JobTemplate.Folder) + deploymentDirPath := filepath.Join(parentDir, "deployment") + rHandler := &resourceHandler{ - apiserverEp: apiserverEp, - notifierEp: notifierEp, + apiserverEp: cfg.Apiserver, + notifierEp: cfg.Notifier, spec: computeSpec, - platform: platform, - namespace: namespace, + platform: cfg.Platform, + namespace: cfg.Namespace, dplyr: dplyr, + jobTemplateDirPath: cfg.JobTemplate.Folder, + jobTemplatePath: filepath.Join(cfg.JobTemplate.Folder, cfg.JobTemplate.File), + deploymentDirPath: deploymentDirPath, + grpcDialOpt: grpcDialOpt, } @@ -249,7 +257,7 @@ func (r *resourceHandler) revokeResource(jobId string) (err error) { } taskStatuses[taskId] = openapi.AGENT_REVOKE_SUCCESS // 2.delete all the task resource specification files - deploymentChartPath := filepath.Join(deploymentDirPath, jobId, taskId) + deploymentChartPath := filepath.Join(r.deploymentDirPath, jobId, taskId) removeErr := os.RemoveAll(deploymentChartPath) if removeErr != nil { zap.S().Errorf("Errors occurred deleting specification files: %v", removeErr) @@ -323,11 +331,14 @@ func (r *resourceHandler) deployResources(deploymentConfig openapi.DeploymentCon errMsg := fmt.Sprintf("failed to initialize a job deployer: %v", err) return fmt.Errorf(errMsg) } + agentStatuses := map[string]openapi.AgentState{} defer r.postDeploymentStatus(deploymentConfig.JobId, agentStatuses) + for taskId := range deploymentConfig.AgentKVs { - deploymentChartPath := filepath.Join(deploymentDirPath, deploymentConfig.JobId, taskId) + deploymentChartPath := filepath.Join(r.deploymentDirPath, deploymentConfig.JobId, taskId) targetTemplateDirPath := filepath.Join(deploymentChartPath, deploymentTemplateDir) + if makeErr := os.MkdirAll(targetTemplateDirPath, util.FilePerm0644); makeErr != nil { errMsg := fmt.Sprintf("failed to create a deployment template folder: %v", makeErr) err = fmt.Errorf("%v; %v", err, errMsg) @@ -336,21 +347,20 @@ func (r *resourceHandler) deployResources(deploymentConfig openapi.DeploymentCon } // Copy helm chart files to destination folder - copyErr := copyHelmCharts(helmChartFiles, jobTemplateDirPath, deploymentChartPath) + copyErr := copyHelmCharts(helmChartFiles, r.jobTemplateDirPath, deploymentChartPath) if copyErr != nil { err = fmt.Errorf("%v; %v", err, copyErr) agentStatuses[taskId] = openapi.AGENT_DEPLOY_FAILED continue } - taskKey := deploymentConfig.AgentKVs[taskId] - ctx := map[string]string{ "imageLoc": deploymentConfig.ImageLoc, "taskId": taskId, - "taskKey": taskKey, + "taskKey": deploymentConfig.AgentKVs[taskId], } - rendered, renderErr := mustache.RenderFile(jobTemplatePath, &ctx) + + rendered, renderErr := mustache.RenderFile(r.jobTemplatePath, &ctx) if renderErr != nil { errMsg := fmt.Sprintf("failed to render a template for task %s: %v", taskId, renderErr) err = fmt.Errorf("%v; %v", err, errMsg) @@ -358,8 +368,9 @@ func (r *resourceHandler) deployResources(deploymentConfig openapi.DeploymentCon continue } - deploymentFileName := fmt.Sprintf("%s-%s.yaml", jobDeploymentFilePrefix, taskId) + deploymentFileName := fmt.Sprintf("task-%s.yaml", taskId) deploymentFilePath := filepath.Join(targetTemplateDirPath, deploymentFileName) + writeErr := os.WriteFile(deploymentFilePath, []byte(rendered), util.FilePerm0644) if writeErr != nil { errMsg := fmt.Sprintf("failed to write a job rosource spec %s: %v", taskId, writeErr) @@ -367,6 +378,7 @@ func (r *resourceHandler) deployResources(deploymentConfig openapi.DeploymentCon agentStatuses[taskId] = openapi.AGENT_DEPLOY_FAILED continue } + //using short id of task as label name does not support more than 35 characters installErr := r.dplyr.Install("job-"+deploymentConfig.JobId+"-"+taskId[:k8sShortLabelLength], deploymentChartPath) if installErr != nil { @@ -375,6 +387,7 @@ func (r *resourceHandler) deployResources(deploymentConfig openapi.DeploymentCon agentStatuses[taskId] = openapi.AGENT_DEPLOY_FAILED continue } + agentStatuses[taskId] = openapi.AGENT_DEPLOY_SUCCESS } diff --git a/cmd/deployer/cmd/root.go b/cmd/deployer/cmd/root.go index 059e46761..7452fd495 100644 --- a/cmd/deployer/cmd/root.go +++ b/cmd/deployer/cmd/root.go @@ -18,149 +18,89 @@ package cmd import ( "fmt" - "strings" + "path/filepath" "github.com/spf13/cobra" + "go.uber.org/zap" "github.com/cisco-open/flame/cmd/deployer/app" + "github.com/cisco-open/flame/cmd/deployer/config" "github.com/cisco-open/flame/pkg/openapi" "github.com/cisco-open/flame/pkg/util" ) const ( - argApiserver = "apiserver" - argNotifier = "notifier" - argAdminId = "adminid" - argRegion = "region" - argComputeId = "computeid" - argApiKey = "apikey" - argPlatform = "platform" - argNamespace = "namespace" - optionInsecure = "insecure" optionPlain = "plain" ) -var rootCmd = &cobra.Command{ - Use: util.Deployer, - Short: util.ProjectName + " Deployer", - RunE: func(cmd *cobra.Command, args []string) error { - flags := cmd.Flags() - - apiserver, err := flags.GetString(argApiserver) - if err != nil { - return err - } - if len(strings.Split(apiserver, ":")) != util.NumTokensInRestEndpoint { - return fmt.Errorf("incorrect format for apiserver endpoint: %s", apiserver) - } - - notifier, err := flags.GetString(argNotifier) - if err != nil { - return err - } - if len(strings.Split(notifier, ":")) != util.NumTokensInEndpoint { - return fmt.Errorf("incorrect format for notifier endpoint: %s", notifier) - } - - adminId, err := flags.GetString(argAdminId) - if err != nil { - return err - } - - region, err := flags.GetString(argRegion) - if err != nil { - return err - } - - computeId, err := flags.GetString(argComputeId) - if err != nil { - return err - } - - apikey, err := flags.GetString(argApiKey) - if err != nil { - return err - } - - platform, err := flags.GetString(argPlatform) - if err != nil { - return err - } - - namespace, err := flags.GetString(argNamespace) - if err != nil { - return err - } - - bInsecure, _ := flags.GetBool(optionInsecure) - bPlain, _ := flags.GetBool(optionPlain) - - if bInsecure && bPlain { - err = fmt.Errorf("options --%s and --%s are incompatible; enable one of them", optionInsecure, optionPlain) - return err - } - - computeSpec := openapi.ComputeSpec{ - AdminId: adminId, - Region: region, - ComputeId: computeId, - ApiKey: apikey, - } - - compute, err := app.NewCompute(apiserver, computeSpec, bInsecure, bPlain) - if err != nil { - return err - } - - err = compute.RegisterNewCompute() - if err != nil { - err = fmt.Errorf("unable to register new compute with controller: %s", err) - return err - } - - resoureHandler := app.NewResourceHandler(apiserver, notifier, computeSpec, platform, namespace, bInsecure, bPlain) - resoureHandler.Start() - - select {} - }, -} +var ( + cfgFile string + cfg *config.Config + + rootCmd = &cobra.Command{ + Use: util.Deployer, + Short: util.ProjectName + " Deployer", + RunE: func(cmd *cobra.Command, args []string) error { + flags := cmd.Flags() + + bInsecure, _ := flags.GetBool(optionInsecure) + bPlain, _ := flags.GetBool(optionPlain) + + if bInsecure && bPlain { + err := fmt.Errorf("options --%s and --%s are incompatible; enable one of them", + optionInsecure, optionPlain) + return err + } + + computeSpec := openapi.ComputeSpec{ + AdminId: cfg.AdminId, + Region: cfg.Region, + ComputeId: cfg.ComputeId, + ApiKey: cfg.Apikey, + } + + compute, err := app.NewCompute(cfg.Apiserver, computeSpec, bInsecure, bPlain) + if err != nil { + return err + } + + err = compute.RegisterNewCompute() + if err != nil { + err = fmt.Errorf("unable to register new compute with controller: %s", err) + return err + } + + resoureHandler := app.NewResourceHandler(cfg, computeSpec, bInsecure, bPlain) + resoureHandler.Start() + + select {} + }, + } +) func init() { - defaultApiServerEp := fmt.Sprintf("http://0.0.0.0:%d", util.ApiServerRestApiPort) - rootCmd.Flags().StringP(argApiserver, "a", defaultApiServerEp, "API server endpoint") - rootCmd.MarkFlagRequired(argApiserver) - - defaultNotifierEp := fmt.Sprintf("0.0.0.0:%d", util.NotifierGrpcPort) - rootCmd.Flags().StringP(argNotifier, "n", defaultNotifierEp, "Notifier endpoint") - rootCmd.MarkFlagRequired(argNotifier) - - defaultAdminId := "admin" - rootCmd.Flags().StringP(argAdminId, "d", defaultAdminId, "unique admin id") - rootCmd.MarkFlagRequired(argAdminId) - - defaultRegion := "region" - rootCmd.Flags().StringP(argRegion, "r", defaultRegion, "region name") - rootCmd.MarkFlagRequired(argRegion) + cobra.OnInitialize(initConfig) - defaultComputeId := "compute" - rootCmd.Flags().StringP(argComputeId, "c", defaultComputeId, "unique compute id") - rootCmd.MarkFlagRequired(argComputeId) + usage := "config file (default: /etc/flame/deployer.yaml)" + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", usage) + rootCmd.CompletionOptions.DisableDefaultCmd = true - defaultApiKey := "apiKey" - rootCmd.Flags().StringP(argApiKey, "k", defaultApiKey, "unique apikey") - rootCmd.MarkFlagRequired(argApiKey) + rootCmd.PersistentFlags().Bool(optionInsecure, false, "Allow insecure connection") + rootCmd.PersistentFlags().Bool(optionPlain, false, "Allow unencrypted connection") +} - defaultPlatform := "k8s" - rootCmd.Flags().StringP(argPlatform, "p", defaultPlatform, "compute platform") - rootCmd.MarkFlagRequired(argPlatform) +func initConfig() { + if cfgFile == "" { + cfgFile = filepath.Join("/etc/flame/deployer.yaml") + } - defaultNamespace := "flame" - rootCmd.Flags().StringP(argNamespace, "s", defaultNamespace, "compute namespace") - rootCmd.MarkFlagRequired(argNamespace) + var err error - rootCmd.PersistentFlags().Bool(optionInsecure, false, "Allow insecure connection") - rootCmd.PersistentFlags().Bool(optionPlain, false, "Allow unencrypted connection") + cfg, err = config.LoadConfig(cfgFile) + if err != nil { + zap.S().Fatalf("Failed to load config %s: %v", cfgFile, err) + } } func Execute() error { diff --git a/cmd/deployer/config/config.go b/cmd/deployer/config/config.go new file mode 100644 index 000000000..aef9e1aa1 --- /dev/null +++ b/cmd/deployer/config/config.go @@ -0,0 +1,62 @@ +// Copyright 2023 Cisco Systems, Inc. and its affiliates +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// SPDX-License-Identifier: Apache-2.0 + +package config + +import ( + "github.com/spf13/afero" + "gopkg.in/yaml.v3" +) + +type Config struct { + Apiserver string `yaml:"apiserver"` + Notifier string `yaml:"notifier"` + AdminId string `yaml:"adminId"` + Region string `yaml:"region"` + ComputeId string `yaml:"computeId"` + Apikey string `yaml:"apikey"` + Platform string `yaml:"platform"` + Namespace string `yaml:"namespace"` + + JobTemplate JobTemplate `yaml:"jobTemplate"` +} + +type JobTemplate struct { + Folder string `yaml:"folder"` + File string `yaml:"file"` +} + +var fs afero.Fs + +func init() { + fs = afero.NewOsFs() +} + +func LoadConfig(configPath string) (*Config, error) { + data, err := afero.ReadFile(fs, configPath) + if err != nil { + return nil, err + } + + cfg := &Config{} + + err = yaml.Unmarshal(data, cfg) + if err != nil { + return nil, err + } + + return cfg, nil +} diff --git a/fiab/flame.sh b/fiab/flame.sh index fbd837964..7d7021cf7 100755 --- a/fiab/flame.sh +++ b/fiab/flame.sh @@ -110,22 +110,26 @@ function post_start_config { if [[ "$OSTYPE" == "linux-gnu"* ]]; then os_id=$(grep '^ID=' /etc/os-release | sed 's/"//g' | cut -d= -f2) - case $os_id in - "amzn") - echo "set flame.test domain with $minikube_ip in route 53" - ;; - *) - subnet=$(ip a show | grep br- | grep inet | awk '{print $2}') - resolver_file=/etc/systemd/network/minikube.network - echo "[Match]" | sudo tee $resolver_file > /dev/null - echo "Name=br*" | sudo tee -a $resolver_file > /dev/null - echo "[Network]" | sudo tee -a $resolver_file > /dev/null - echo "Address=$subnet" | sudo tee -a $resolver_file > /dev/null - echo "DNS=$minikube_ip" | sudo tee -a $resolver_file > /dev/null - echo "Domains=~flame.test" | sudo tee -a $resolver_file > /dev/null - sudo systemctl restart systemd-networkd - ;; - esac + case $os_id in + "amzn") + echo "set flame.test domain with $minikube_ip in route 53" + ;; + *) + IFS=. read -r oc1 oc2 oc3 oc4 <<< $minikube_ip + subnet=$(ip a show | grep $oc1.$oc2.$oc3 | awk '{print $2}') + device=$(ip a show | grep -B 2 $oc1.$oc2.$oc3 | head -n 1 | cut -d':' -f 2) + device=${device## } + + resolver_file=/etc/systemd/network/minikube.network + echo "[Match]" | sudo tee $resolver_file > /dev/null + echo "Name=$device" | sudo tee -a $resolver_file > /dev/null + echo "[Network]" | sudo tee -a $resolver_file > /dev/null + echo "Address=$subnet" | sudo tee -a $resolver_file > /dev/null + echo "DNS=$minikube_ip" | sudo tee -a $resolver_file > /dev/null + echo "Domains=~flame.test" | sudo tee -a $resolver_file > /dev/null + sudo systemctl restart systemd-networkd + ;; + esac elif [[ "$OSTYPE" == "darwin"* ]]; then resolver_file=/etc/resolver/flame-test echo "domain flame.test" | sudo tee $resolver_file > /dev/null @@ -184,17 +188,17 @@ function post_stop_cleanup { minikube_ip=$(minikube ip) if [[ "$OSTYPE" == "linux-gnu"* ]]; then - os_id=$(grep '^ID=' /etc/os-release | sed 's/"//g' | cut -d= -f2) - case $os_id in - "amzn") - echo "remove flame.test domain from route 53" - ;; - *) - resolver_file=/etc/systemd/network/minikube.network - sudo rm -f $resolver_file - sudo systemctl restart systemd-networkd - ;; - esac + os_id=$(grep '^ID=' /etc/os-release | sed 's/"//g' | cut -d= -f2) + case $os_id in + "amzn") + echo "remove flame.test domain from route 53" + ;; + *) + resolver_file=/etc/systemd/network/minikube.network + sudo rm -f $resolver_file + sudo systemctl restart systemd-networkd + ;; + esac elif [[ "$OSTYPE" == "darwin"* ]]; then resolver_file=/etc/resolver/flame-test sudo rm -f $resolver_file diff --git a/fiab/helm-chart/control/templates/deployer-configmap.yaml b/fiab/helm-chart/control/templates/deployer-configmap.yaml new file mode 100644 index 000000000..b6fd8eca6 --- /dev/null +++ b/fiab/helm-chart/control/templates/deployer-configmap.yaml @@ -0,0 +1,36 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-deployer-configmap + namespace: {{ .Release.Namespace }} +data: + deployer.yaml: | + --- + apiserver: "https://{{ .Values.frontDoorUrl.apiserver }}:443" + notifier: "{{ .Values.frontDoorUrl.notifier }}:443" + adminId: {{ .Values.deployer.adminId }} + region: {{ .Values.deployer.region }} + computeId: {{ .Values.deployer.computeId }} + apikey: {{ .Values.deployer.apiKey }} + platform: {{ .Values.deployer.platform }} + namespace: {{ .Values.deployer.namespace }} + jobTemplate: + folder: {{ .Values.deployer.jobTemplate.folder }} + file: {{ .Values.deployer.jobTemplate.file }} diff --git a/fiab/helm-chart/control/templates/deployer-default-deployment.yaml b/fiab/helm-chart/control/templates/deployer-default-deployment.yaml index d5058e2f4..2b4359640 100644 --- a/fiab/helm-chart/control/templates/deployer-default-deployment.yaml +++ b/fiab/helm-chart/control/templates/deployer-default-deployment.yaml @@ -32,22 +32,6 @@ spec: spec: containers: - args: - - --apiserver - - "https://{{ .Values.frontDoorUrl.apiserver }}:443" - - --notifier - - "{{ .Values.frontDoorUrl.notifier }}:443" - - --adminid - - {{ .Values.deployerDefault.adminId }} - - --region - - {{ .Values.deployerDefault.region }} - - --computeid - - {{ .Values.deployerDefault.computeId }} - - --apikey - - {{ .Values.deployerDefault.apiKey }} - - --platform - - {{ .Values.deployerDefault.platform }} - - --namespace - - {{ .Values.deployerDefault.namespace }} {{ if .Values.insecure }} - "--insecure" {{ end }} @@ -56,11 +40,19 @@ spec: imagePullPolicy: IfNotPresent name: {{ .Release.Name }}-deployer-default volumeMounts: - - mountPath: /flame/template + - mountPath: /etc/flame/deployer.yaml + name: config-volume + subPath: deployer.yaml + + - mountPath: {{ .Values.deployer.jobTemplate.folder }} name: job-template-volume - + serviceAccountName: deployer volumes: + - name: config-volume + configMap: + name: {{ .Release.Name }}-deployer-configmap + - name: job-template-volume configMap: - name: {{ .Release.Name }}-deployer-job-configmap \ No newline at end of file + name: {{ .Release.Name }}-deployer-job-configmap diff --git a/fiab/helm-chart/control/values.yaml b/fiab/helm-chart/control/values.yaml index f918be4b5..6f2dd5338 100644 --- a/fiab/helm-chart/control/values.yaml +++ b/fiab/helm-chart/control/values.yaml @@ -106,13 +106,19 @@ mlflow: s3EndpointUrl: http://minio.flame.test servicePort: "5000" -deployerDefault: +deployer: adminId: "admin-1" region: "default/us" computeId: "default" apiKey: "apiKey-default" platform: "k8s" namespace: "flame" + jobTemplate: + folder: /flame/template + # to use a different template file, put the file in the "job" folder + # use its file name as the value of key "file". + # also, update the name in the templates/deployer-job-configmap.yaml + file: job-agent.yaml.mustache servicePort: apiserver: "10100" diff --git a/fiab/helm-chart/deployer/templates/deployer-compute1-deployment.yaml b/fiab/helm-chart/deployer/templates/deployer-compute1-deployment.yaml index 740fdb2b6..d90bdf14c 100644 --- a/fiab/helm-chart/deployer/templates/deployer-compute1-deployment.yaml +++ b/fiab/helm-chart/deployer/templates/deployer-compute1-deployment.yaml @@ -32,22 +32,6 @@ spec: spec: containers: - args: - - --apiserver - - "https://{{ .Values.frontDoorUrl.apiserver }}:443" - - --notifier - - "{{ .Values.frontDoorUrl.notifier }}:443" - - --adminid - - {{ .Values.deployerCompute1.adminId }} - - --region - - {{ .Values.deployerCompute1.region }} - - --computeid - - {{ .Values.deployerCompute1.computeId }} - - --apikey - - {{ .Values.deployerCompute1.apiKey }} - - --platform - - {{ .Values.deployerCompute1.platform }} - - --namespace - - {{ .Values.deployerCompute1.namespace }} {{ if .Values.insecure }} - "--insecure" {{ end }} @@ -56,11 +40,19 @@ spec: imagePullPolicy: IfNotPresent name: {{ .Release.Name }}-deployer-compute1 volumeMounts: - - mountPath: /flame/template + - mountPath: /etc/flame/deployer.yaml + name: config-volume + subPath: deployer.yaml + + - mountPath: {{ .Values.deployer.jobTemplate.folder }} name: job-template-volume serviceAccountName: deployer volumes: + - name: config-volume + configMap: + name: {{ .Release.Name }}-deployer-configmap + - name: job-template-volume configMap: - name: {{ .Release.Name }}-deployer-job-configmap \ No newline at end of file + name: {{ .Release.Name }}-deployer-job-configmap diff --git a/fiab/helm-chart/deployer/templates/deployer-configmap.yaml b/fiab/helm-chart/deployer/templates/deployer-configmap.yaml new file mode 100644 index 000000000..b6fd8eca6 --- /dev/null +++ b/fiab/helm-chart/deployer/templates/deployer-configmap.yaml @@ -0,0 +1,36 @@ +# Copyright 2023 Cisco Systems, Inc. and its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: Apache-2.0 + +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Release.Name }}-deployer-configmap + namespace: {{ .Release.Namespace }} +data: + deployer.yaml: | + --- + apiserver: "https://{{ .Values.frontDoorUrl.apiserver }}:443" + notifier: "{{ .Values.frontDoorUrl.notifier }}:443" + adminId: {{ .Values.deployer.adminId }} + region: {{ .Values.deployer.region }} + computeId: {{ .Values.deployer.computeId }} + apikey: {{ .Values.deployer.apiKey }} + platform: {{ .Values.deployer.platform }} + namespace: {{ .Values.deployer.namespace }} + jobTemplate: + folder: {{ .Values.deployer.jobTemplate.folder }} + file: {{ .Values.deployer.jobTemplate.file }} diff --git a/fiab/helm-chart/deployer/values.yaml b/fiab/helm-chart/deployer/values.yaml index b6cb37dbd..65fc60bf9 100644 --- a/fiab/helm-chart/deployer/values.yaml +++ b/fiab/helm-chart/deployer/values.yaml @@ -32,13 +32,19 @@ mlflow: s3EndpointUrl: http://minio.flame.test # TODO: fix s3 access id and key issue servicePort: "5000" -deployerCompute1: +deployer: adminId: "admin-2" region: "default/us/west" computeId: "compute-1" apiKey: "apiKey-1" platform: "k8s" namespace: "flame" + jobTemplate: + folder: /flame/template + # to use a different template file, put the file in the "job" folder + # use its file name as the value of key "file". + # also, update the name in the templates/deployer-job-configmap.yaml + file: job-agent.yaml.mustache servicePort: agent: "10103" diff --git a/lib/python/flame/optimizer/regularizer/default.py b/lib/python/flame/optimizer/regularizer/default.py index 1132e5151..662d24905 100644 --- a/lib/python/flame/optimizer/regularizer/default.py +++ b/lib/python/flame/optimizer/regularizer/default.py @@ -25,7 +25,7 @@ class Regularizer: def __init__(self): """Initialize Regularizer instance.""" pass - + def get_term(self, **kwargs): """No regularizer term for dummy regularizer.""" return 0.0 diff --git a/lib/python/flame/optimizer/regularizer/fedprox.py b/lib/python/flame/optimizer/regularizer/fedprox.py index c80582f33..ef018804f 100644 --- a/lib/python/flame/optimizer/regularizer/fedprox.py +++ b/lib/python/flame/optimizer/regularizer/fedprox.py @@ -15,7 +15,8 @@ # SPDX-License-Identifier: Apache-2.0 """FedProx Regularizer.""" import logging -from .regularizer import Regularizer + +from .default import Regularizer logger = logging.getLogger(__name__) @@ -27,13 +28,13 @@ def __init__(self, mu): """Initialize FedProxRegularizer instance.""" super().__init__() self.mu = mu - + def get_term(self, **kwargs): - """Calculate proximal term for client-side regularization""" + """Calculate proximal term for client-side regularization.""" import torch w = kwargs['w'] w_t = kwargs['w_t'] norm_sq = 0.0 for loc_param, glob_param in zip(w, w_t): - norm_sq += torch.sum(torch.pow(loc_param-glob_param, 2)) - return (self.mu/2) * norm_sq + norm_sq += torch.sum(torch.pow(loc_param - glob_param, 2)) + return (self.mu / 2) * norm_sq From 66b6db5a96039af202b57d64945ea9a8683c55e0 Mon Sep 17 00:00:00 2001 From: Claudiu Date: Wed, 1 Mar 2023 17:17:41 +0200 Subject: [PATCH 15/16] Add missing merge fix --- pkg/openapi/model_role.go | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/pkg/openapi/model_role.go b/pkg/openapi/model_role.go index 55469d8dd..310c34999 100644 --- a/pkg/openapi/model_role.go +++ b/pkg/openapi/model_role.go @@ -63,29 +63,3 @@ func AssertRecurseRoleRequired(objSlice interface{}) error { return AssertRoleRequired(aRole) }) } - -// AssertRoleRequired checks if the required fields are not zero-ed -func AssertRoleRequired(obj Role) error { - elements := map[string]interface{}{ - "name": obj.Name, - } - for name, el := range elements { - if isZero := IsZeroValue(el); isZero { - return &RequiredError{Field: name} - } - } - - return nil -} - -// AssertRecurseRoleRequired recursively checks if required fields are not zero-ed in a nested slice. -// Accepts only nested slice of Role (e.g. [][]Role), otherwise ErrTypeAssertionError is thrown. -func AssertRecurseRoleRequired(objSlice interface{}) error { - return AssertRecurseInterfaceRequired(objSlice, func(obj interface{}) error { - aRole, ok := obj.(Role) - if !ok { - return ErrTypeAssertionError - } - return AssertRoleRequired(aRole) - }) -} From a627cbd799eda17406ab13fdb5ea158857bbdc6b Mon Sep 17 00:00:00 2001 From: elqurio <119978637+elqurio@users.noreply.github.com> Date: Fri, 3 Mar 2023 10:00:19 +0100 Subject: [PATCH 16/16] Make sdk config backwards compatible. (#355) --- lib/python/flame/config.py | 14 +++++++++++--- lib/python/flame/examples/adult/aggregator/main.py | 4 ++-- lib/python/flame/examples/adult/trainer/main.py | 4 ++-- .../examples/dist_mnist/trainer/keras/main.py | 4 ++-- .../examples/dist_mnist/trainer/pytorch/main.py | 4 ++-- .../examples/hier_mnist/middle_aggregator/main.py | 4 ++-- .../examples/hier_mnist/top_aggregator/main.py | 4 ++-- .../flame/examples/hier_mnist/trainer/main.py | 4 ++-- .../flame/examples/hybrid/aggregator/main.py | 4 ++-- lib/python/flame/examples/hybrid/trainer/main.py | 4 ++-- .../examples/medmnist/aggregator/pytorch/main.py | 4 ++-- .../examples/medmnist/trainer/pytorch/main.py | 4 ++-- .../flame/examples/mnist/aggregator/keras/main.py | 4 ++-- .../examples/mnist/aggregator/pytorch/main.py | 4 ++-- .../flame/examples/mnist/trainer/keras/main.py | 4 ++-- .../flame/examples/mnist/trainer/pytorch/main.py | 4 ++-- 16 files changed, 41 insertions(+), 33 deletions(-) diff --git a/lib/python/flame/config.py b/lib/python/flame/config.py index 539e535d3..f79edc1f2 100644 --- a/lib/python/flame/config.py +++ b/lib/python/flame/config.py @@ -16,8 +16,8 @@ """Config parser.""" from enum import Enum -from pydantic import Field import typing as t +from pydantic import Field from pydantic import BaseModel as pydBaseModel import json @@ -82,6 +82,8 @@ class Registry(FlameSchema): class Selector(FlameSchema): sort: SelectorType = Field(default=SelectorType.DEFAULT) kwargs: dict = Field(default={}) + + class Optimizer(FlameSchema): sort: OptimizerType = Field(default=OptimizerType.DEFAULT) kwargs: dict = Field(default={}) @@ -97,6 +99,7 @@ class Hyperparameters(FlameSchema): learning_rate: t.Optional[float] = Field(alias="learningRate") rounds: int epochs: int + aggregation_goal: t.Optional[int] = Field(alias="aggGoal", default=None) class Groups(FlameSchema): @@ -148,6 +151,12 @@ class ChannelConfigs(FlameSchema): class Config(FlameSchema): + def __init__(self, config_path: str): + raw_config = read_config(config_path) + transformed_config = transform_config(raw_config) + + super().__init__(**transformed_config) + role: str realm: str task: t.Optional[str] = Field(default="local") @@ -174,8 +183,7 @@ def read_config(filename: str) -> dict: return json.loads(f.read()) -def load_config(filename: str) -> Config: - raw_config = read_config(filename) +def transform_config(raw_config: dict) -> dict: config_data = { "role": raw_config["role"], "realm": raw_config["realm"], diff --git a/lib/python/flame/examples/adult/aggregator/main.py b/lib/python/flame/examples/adult/aggregator/main.py index 622329038..7d251d7c5 100644 --- a/lib/python/flame/examples/adult/aggregator/main.py +++ b/lib/python/flame/examples/adult/aggregator/main.py @@ -19,7 +19,7 @@ import torch import torch.nn as nn -from flame.config import Config, load_config +from flame.config import Config from flame.mode.horizontal.top_aggregator import TopAggregator logger = logging.getLogger(__name__) @@ -88,7 +88,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = PyTorchAdultAggregator(config) t.compose() diff --git a/lib/python/flame/examples/adult/trainer/main.py b/lib/python/flame/examples/adult/trainer/main.py index 2fd1bddd8..973f0e561 100644 --- a/lib/python/flame/examples/adult/trainer/main.py +++ b/lib/python/flame/examples/adult/trainer/main.py @@ -24,7 +24,7 @@ import torch.optim as optim from flame.common.constants import DATA_FOLDER_PATH from flame.common.util import install_packages -from flame.config import Config, load_config +from flame.config import Config from flame.mode.horizontal.trainer import Trainer install_packages(['scikit-learn']) @@ -149,7 +149,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = PyTorchAdultTrainer(config) t.compose() diff --git a/lib/python/flame/examples/dist_mnist/trainer/keras/main.py b/lib/python/flame/examples/dist_mnist/trainer/keras/main.py index 235093657..30cad7824 100644 --- a/lib/python/flame/examples/dist_mnist/trainer/keras/main.py +++ b/lib/python/flame/examples/dist_mnist/trainer/keras/main.py @@ -21,7 +21,7 @@ from statistics import mean import numpy as np -from flame.config import Config, load_config +from flame.config import Config from flame.mode.distributed.trainer import Trainer from tensorflow import keras from tensorflow.keras import layers @@ -132,7 +132,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = KerasMnistTrainer(config) t.compose() diff --git a/lib/python/flame/examples/dist_mnist/trainer/pytorch/main.py b/lib/python/flame/examples/dist_mnist/trainer/pytorch/main.py index 5037c2cc0..7c6498ec3 100644 --- a/lib/python/flame/examples/dist_mnist/trainer/pytorch/main.py +++ b/lib/python/flame/examples/dist_mnist/trainer/pytorch/main.py @@ -26,7 +26,7 @@ import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data_utils -from flame.config import Config, load_config +from flame.config import Config from flame.mode.distributed.trainer import Trainer from torchvision import datasets, transforms @@ -145,7 +145,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = PyTorchMnistTrainer(config) t.compose() diff --git a/lib/python/flame/examples/hier_mnist/middle_aggregator/main.py b/lib/python/flame/examples/hier_mnist/middle_aggregator/main.py index 0b83af2a9..dcca547b3 100644 --- a/lib/python/flame/examples/hier_mnist/middle_aggregator/main.py +++ b/lib/python/flame/examples/hier_mnist/middle_aggregator/main.py @@ -17,7 +17,7 @@ import logging -from flame.config import Config, load_config +from flame.config import Config from flame.mode.horizontal.middle_aggregator import MiddleAggregator # the following needs to be imported to let the flame know # this aggregator works on tensorflow model @@ -58,7 +58,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) a = KerasMnistMiddleAggregator(config) a.compose() diff --git a/lib/python/flame/examples/hier_mnist/top_aggregator/main.py b/lib/python/flame/examples/hier_mnist/top_aggregator/main.py index 808bdf5bb..2f0297801 100644 --- a/lib/python/flame/examples/hier_mnist/top_aggregator/main.py +++ b/lib/python/flame/examples/hier_mnist/top_aggregator/main.py @@ -17,7 +17,7 @@ import logging -from flame.config import Config, load_config +from flame.config import Config from flame.dataset import Dataset from flame.mode.horizontal.top_aggregator import TopAggregator from tensorflow import keras @@ -82,7 +82,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) a = KerasMnistTopAggregator(config) a.compose() diff --git a/lib/python/flame/examples/hier_mnist/trainer/main.py b/lib/python/flame/examples/hier_mnist/trainer/main.py index ee1743a75..262d602c0 100644 --- a/lib/python/flame/examples/hier_mnist/trainer/main.py +++ b/lib/python/flame/examples/hier_mnist/trainer/main.py @@ -20,7 +20,7 @@ from statistics import mean import numpy as np -from flame.config import Config, load_config +from flame.config import Config from flame.mode.horizontal.trainer import Trainer from tensorflow import keras from tensorflow.keras import layers @@ -131,7 +131,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = KerasMnistTrainer(config) t.compose() diff --git a/lib/python/flame/examples/hybrid/aggregator/main.py b/lib/python/flame/examples/hybrid/aggregator/main.py index 580f64e8e..48fa4bb8b 100644 --- a/lib/python/flame/examples/hybrid/aggregator/main.py +++ b/lib/python/flame/examples/hybrid/aggregator/main.py @@ -18,7 +18,7 @@ import logging -from flame.config import Config, load_config +from flame.config import Config from flame.dataset import Dataset from flame.mode.horizontal.top_aggregator import TopAggregator from tensorflow import keras @@ -81,7 +81,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) a = KerasMnistAggregator(config) a.compose() diff --git a/lib/python/flame/examples/hybrid/trainer/main.py b/lib/python/flame/examples/hybrid/trainer/main.py index 5920cf8e4..9326e1598 100644 --- a/lib/python/flame/examples/hybrid/trainer/main.py +++ b/lib/python/flame/examples/hybrid/trainer/main.py @@ -20,7 +20,7 @@ from statistics import mean import numpy as np -from flame.config import Config, load_config +from flame.config import Config from flame.mode.hybrid.trainer import Trainer from tensorflow import keras from tensorflow.keras import layers @@ -131,7 +131,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = KerasMnistTrainer(config) t.compose() diff --git a/lib/python/flame/examples/medmnist/aggregator/pytorch/main.py b/lib/python/flame/examples/medmnist/aggregator/pytorch/main.py index af42f79dd..3fd561371 100644 --- a/lib/python/flame/examples/medmnist/aggregator/pytorch/main.py +++ b/lib/python/flame/examples/medmnist/aggregator/pytorch/main.py @@ -18,7 +18,7 @@ import logging -from flame.config import Config, load_config +from flame.config import Config from flame.dataset import Dataset # Not sure why we need this. from flame.mode.horizontal.top_aggregator import TopAggregator import torch @@ -85,7 +85,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) a = PyTorchMedMNistAggregator(config) a.compose() diff --git a/lib/python/flame/examples/medmnist/trainer/pytorch/main.py b/lib/python/flame/examples/medmnist/trainer/pytorch/main.py index 1560fcd38..f25ae7f44 100644 --- a/lib/python/flame/examples/medmnist/trainer/pytorch/main.py +++ b/lib/python/flame/examples/medmnist/trainer/pytorch/main.py @@ -20,7 +20,7 @@ from flame.common.util import install_packages install_packages(['scikit-learn']) -from flame.config import Config, load_config +from flame.config import Config from flame.mode.horizontal.trainer import Trainer import torch import torchvision @@ -212,7 +212,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = PyTorchMedMNistTrainer(config) t.compose() diff --git a/lib/python/flame/examples/mnist/aggregator/keras/main.py b/lib/python/flame/examples/mnist/aggregator/keras/main.py index 96d805f74..677263213 100644 --- a/lib/python/flame/examples/mnist/aggregator/keras/main.py +++ b/lib/python/flame/examples/mnist/aggregator/keras/main.py @@ -17,7 +17,7 @@ import logging -from flame.config import Config, load_config +from flame.config import Config from flame.dataset import Dataset from flame.mode.horizontal.top_aggregator import TopAggregator from tensorflow import keras @@ -82,7 +82,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) a = KerasMnistAggregator(config) a.compose() diff --git a/lib/python/flame/examples/mnist/aggregator/pytorch/main.py b/lib/python/flame/examples/mnist/aggregator/pytorch/main.py index 067336ad6..f76ac07c3 100644 --- a/lib/python/flame/examples/mnist/aggregator/pytorch/main.py +++ b/lib/python/flame/examples/mnist/aggregator/pytorch/main.py @@ -24,7 +24,7 @@ import torch import torch.nn as nn import torch.nn.functional as F -from flame.config import Config, load_config +from flame.config import Config from flame.dataset import Dataset from flame.mode.horizontal.top_aggregator import TopAggregator from torchvision import datasets, transforms @@ -143,7 +143,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) a = PyTorchMnistAggregator(config) a.compose() diff --git a/lib/python/flame/examples/mnist/trainer/keras/main.py b/lib/python/flame/examples/mnist/trainer/keras/main.py index c2c5651d6..62103b85f 100644 --- a/lib/python/flame/examples/mnist/trainer/keras/main.py +++ b/lib/python/flame/examples/mnist/trainer/keras/main.py @@ -20,7 +20,7 @@ from statistics import mean import numpy as np -from flame.config import Config, load_config +from flame.config import Config from flame.mode.horizontal.trainer import Trainer from tensorflow import keras from tensorflow.keras import layers @@ -131,7 +131,7 @@ def evaluate(self) -> None: args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = KerasMnistTrainer(config) t.compose() diff --git a/lib/python/flame/examples/mnist/trainer/pytorch/main.py b/lib/python/flame/examples/mnist/trainer/pytorch/main.py index 94113fd33..6fd8d1447 100644 --- a/lib/python/flame/examples/mnist/trainer/pytorch/main.py +++ b/lib/python/flame/examples/mnist/trainer/pytorch/main.py @@ -26,7 +26,7 @@ import torch.nn.functional as F import torch.optim as optim import torch.utils.data as data_utils -from flame.config import Config, load_config +from flame.config import Config from flame.mode.horizontal.trainer import Trainer @@ -146,7 +146,7 @@ def evaluate(self) -> None: parser.add_argument('config', nargs='?', default="./config.json") args = parser.parse_args() - config = load_config(args.config) + config = Config(args.config) t = PyTorchMnistTrainer(config) t.compose()