From 3d8069e09f0e9833ee000a9b859445350e1a2c7b Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 24 Jul 2023 16:36:37 +0200 Subject: [PATCH 01/70] Add LayerNorm --- .../code_models/models/wnet/model.py | 19 +- .../code_models/worker_inference.py | 2 +- .../dev_scripts/test_new_evaluation.ipynb | 245 ++++++++++++++++++ 3 files changed, 259 insertions(+), 7 deletions(-) create mode 100644 napari_cellseg3d/dev_scripts/test_new_evaluation.ipynb diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index 060242a1..0a833fa1 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -16,6 +16,7 @@ "Xide Xia", "Brian Kulis", ] +NUM_GROUPS = 8 class WNet_encoder(nn.Module): @@ -179,11 +180,13 @@ def __init__(self, in_channels, out_channels, dropout=0.65): nn.Conv3d(in_channels, out_channels, 3, padding=1), nn.ReLU(), nn.Dropout(p=dropout), - nn.BatchNorm3d(out_channels), + # nn.BatchNorm3d(out_channels), + nn.GroupNorm(num_groups=NUM_GROUPS, num_channels=out_channels), nn.Conv3d(out_channels, out_channels, 3, padding=1), nn.ReLU(), nn.Dropout(p=dropout), - nn.BatchNorm3d(out_channels), + # nn.BatchNorm3d(out_channels), + nn.GroupNorm(num_groups=NUM_GROUPS, num_channels=out_channels), ) def forward(self, x): @@ -202,12 +205,14 @@ def __init__(self, in_channels, out_channels, dropout=0.65): nn.Conv3d(in_channels, out_channels, 1), nn.ReLU(), nn.Dropout(p=dropout), - nn.BatchNorm3d(out_channels), + # nn.BatchNorm3d(out_channels), + nn.GroupNorm(num_groups=NUM_GROUPS, num_channels=out_channels), nn.Conv3d(out_channels, out_channels, 3, padding=1), nn.Conv3d(out_channels, out_channels, 1), nn.ReLU(), nn.Dropout(p=dropout), - nn.BatchNorm3d(out_channels), + # nn.BatchNorm3d(out_channels), + nn.GroupNorm(num_groups=NUM_GROUPS, num_channels=out_channels), ) def forward(self, x): @@ -225,11 +230,13 @@ def __init__(self, in_channels, out_channels, dropout=0.65): nn.Conv3d(in_channels, 64, 3, padding=1), nn.ReLU(), nn.Dropout(p=dropout), - nn.BatchNorm3d(64), + # nn.BatchNorm3d(64), + nn.GroupNorm(num_groups=NUM_GROUPS, num_channels=64), nn.Conv3d(64, 64, 3, padding=1), nn.ReLU(), nn.Dropout(p=dropout), - nn.BatchNorm3d(64), + # nn.BatchNorm3d(64), + nn.GroupNorm(num_groups=NUM_GROUPS, num_channels=64), nn.Conv3d(64, out_channels, 1), ) diff --git a/napari_cellseg3d/code_models/worker_inference.py b/napari_cellseg3d/code_models/worker_inference.py index b66647c3..ceedac53 100644 --- a/napari_cellseg3d/code_models/worker_inference.py +++ b/napari_cellseg3d/code_models/worker_inference.py @@ -686,7 +686,7 @@ def inference(self): weights, map_location=self.config.device, ), - strict=True, + strict=False, # True, # TODO(cyril): change to True ) self.log(f"Weights status : {missing}") except Exception as e: diff --git a/napari_cellseg3d/dev_scripts/test_new_evaluation.ipynb b/napari_cellseg3d/dev_scripts/test_new_evaluation.ipynb new file mode 100644 index 00000000..12707e9b --- /dev/null +++ b/napari_cellseg3d/dev_scripts/test_new_evaluation.ipynb @@ -0,0 +1,245 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "import evaluate_labels as evl\n", + "from tifffile import imread\n", + "import time\n", + "import numpy as np" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from importlib import reload\n", + "reload(evl)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "path_true_labels=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/LABELS/relabel_gt.tif\"" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "ename": "IndexError", + "evalue": "too many indices for array: array is 1-dimensional, but 2 were indexed", + "output_type": "error", + "traceback": [ + "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[1;31mIndexError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[1;32mIn[16], line 4\u001b[0m\n\u001b[0;32m 2\u001b[0m labels \u001b[38;5;241m=\u001b[39m imread(path_model_label)\n\u001b[0;32m 3\u001b[0m \u001b[38;5;66;03m# labels.shape\u001b[39;00m\n\u001b[1;32m----> 4\u001b[0m res \u001b[38;5;241m=\u001b[39m \u001b[43mevl\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mevaluate_model_performance\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimread\u001b[49m\u001b[43m(\u001b[49m\u001b[43mpath_true_labels\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlabels\u001b[49m\u001b[43m,\u001b[49m\u001b[43mvisualize\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreturn_graphical_summary\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43mplot_according_to_gt_label\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\u001b[43m)\u001b[49m\n", + "File \u001b[1;32m~\\Desktop\\Code\\CellSeg3d\\napari_cellseg3d\\dev_scripts\\evaluate_labels.py:58\u001b[0m, in \u001b[0;36mevaluate_model_performance\u001b[1;34m(labels, model_labels, threshold_correct, print_details, visualize, return_graphical_summary, plot_according_to_gt_label)\u001b[0m\n\u001b[0;32m 20\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Evaluate the model performance.\u001b[39;00m\n\u001b[0;32m 21\u001b[0m \u001b[38;5;124;03mParameters\u001b[39;00m\n\u001b[0;32m 22\u001b[0m \u001b[38;5;124;03m----------\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 55\u001b[0m \u001b[38;5;124;03mgraph_true_positive_ratio_model: ndarray\u001b[39;00m\n\u001b[0;32m 56\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[0;32m 57\u001b[0m log\u001b[38;5;241m.\u001b[39mdebug(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mMapping labels...\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m---> 58\u001b[0m tmp \u001b[38;5;241m=\u001b[39m \u001b[43mmap_labels\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 59\u001b[0m \u001b[43m \u001b[49m\u001b[43mlabels\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 60\u001b[0m \u001b[43m \u001b[49m\u001b[43mmodel_labels\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 61\u001b[0m \u001b[43m \u001b[49m\u001b[43mthreshold_correct\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 62\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_total_number_gt_labels\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m 63\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_dict_map\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m 64\u001b[0m \u001b[43m \u001b[49m\u001b[43mreturn_graphical_summary\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mreturn_graphical_summary\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 65\u001b[0m \u001b[43m \u001b[49m\u001b[43mplot_according_to_gt_labels\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mplot_according_to_gt_label\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 66\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 67\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m return_graphical_summary:\n\u001b[0;32m 68\u001b[0m (\n\u001b[0;32m 69\u001b[0m map_labels_existing,\n\u001b[0;32m 70\u001b[0m map_fused_neurons,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 75\u001b[0m graph_true_positive_ratio_model,\n\u001b[0;32m 76\u001b[0m ) \u001b[38;5;241m=\u001b[39m tmp\n", + "File \u001b[1;32m~\\Desktop\\Code\\CellSeg3d\\napari_cellseg3d\\dev_scripts\\evaluate_labels.py:422\u001b[0m, in \u001b[0;36mmap_labels\u001b[1;34m(gt_labels, model_labels, threshold_correct, return_total_number_gt_labels, return_dict_map, accuracy_function, return_graphical_summary, plot_according_to_gt_labels)\u001b[0m\n\u001b[0;32m 419\u001b[0m \u001b[38;5;66;03m# remove from new_labels the labels that are in map_labels_existing\u001b[39;00m\n\u001b[0;32m 420\u001b[0m new_labels \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39marray(new_labels)\n\u001b[0;32m 421\u001b[0m i_new_labels \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39misin(\n\u001b[1;32m--> 422\u001b[0m \u001b[43mnew_labels\u001b[49m\u001b[43m[\u001b[49m\u001b[43m:\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdict_map\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmodel_label\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m]\u001b[49m,\n\u001b[0;32m 423\u001b[0m map_labels_existing[:, dict_map[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mmodel_label\u001b[39m\u001b[38;5;124m\"\u001b[39m]],\n\u001b[0;32m 424\u001b[0m invert\u001b[38;5;241m=\u001b[39m\u001b[38;5;28;01mTrue\u001b[39;00m,\n\u001b[0;32m 425\u001b[0m )\n\u001b[0;32m 426\u001b[0m new_labels \u001b[38;5;241m=\u001b[39m new_labels[i_new_labels, :]\n\u001b[0;32m 427\u001b[0m \u001b[38;5;66;03m# find the fused neurons: multiple gt labels are mapped to the same model label\u001b[39;00m\n", + "\u001b[1;31mIndexError\u001b[0m: too many indices for array: array is 1-dimensional, but 2 were indexed" + ] + } + ], + "source": [ + "path_model_label=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/instance/isotropic_visual_cp_masks(1).tif\"\n", + "labels = imread(path_model_label)\n", + "# labels.shape\n", + "res = evl.evaluate_model_performance(imread(path_true_labels), labels,visualize=False, return_graphical_summary=True,plot_according_to_gt_label=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAHHCAYAAAASz98lAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACLDElEQVR4nOzdd1gUV9sG8HtBekeqioKIgooNGyJiQbH3GqJgVHxjRRPbG3vDErvGmmCJxsQSNRbsWAj2rogNRA1gBcSCAuf7w495XdmFhbAs6P27Li7dM2dmnpmdnZ1nz5kzMiGEABERERERkQJamg6AiIiIiIiKLiYMRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEipAkkYJk+eDJlMJlfm6OiIwMDAglh8jmJjYyGTybB27VqpLDAwEMbGxmpfdxaZTIbJkycX2vry4+zZs2jQoAGMjIwgk8lw6dKlPC/D0dERbdu2LfjgSGPCwsJQo0YN6OvrQyaTISkpKc/LkMlkGDJkSMEH9xlRxzmicePGaNy4cYEuUxVr166FTCbDuXPnCn3dn6PGjRujatWqmg5D7bKOm9jY2DzPq+gao6CEh4dDJpMhPDw8z/Oq41pDU5/rfyMwMBCOjo75nv9zOKfk9xyvjm1X1+elSLUw7N27t8heeBfl2HLz/v17dOvWDc+fP8eCBQuwYcMGlCtXTmHdGzduYPLkyfk6qVPx8uzZM3Tv3h0GBgZYtmwZNmzYACMjI4V1//77b0yePDlfCQXl35fyeSzO51dV/PPPP5g8eXK+fqghxRwdHT/rY+Zz9NNPP8n9uFuUFadYC0sJdS04OjoaWlp5y0f27t2LZcuW5ekkUK5cObx58wY6Ojp5jDBvcortzZs3KFFCbbvyX7t79y7u37+P1atXo3///jnWvXHjBqZMmYLGjRv/q18MqOg7e/YsXr58iWnTpsHX1zfHun///TemTJmCwMBAmJubF06An5H8niNy+jweOHCggKLTvPyc+4uTf/75B1OmTIGjoyNq1Kih6XCINOKnn36ClZVVofQ++beKU6yFRW1XuXp6eupaNAAgPT0dmZmZ0NXVhb6+vlrXlRtNrz83jx8/BgBe6KlBZmYm3r17V+SPAUV4XBQedRwfurq6Bb5MIiIiRfLcJenkyZOoU6cO9PX14ezsjJUrVyqs9+k9DO/fv8eUKVPg4uICfX19lCxZEg0bNsTBgwcBfOgDt2zZMgAf+oJl/QH/u0/hxx9/xMKFC+Hs7Aw9PT3cuHFD4T0MWe7duwc/Pz8YGRmhVKlSmDp1KoQQ0nRlfRc/XWZOsWWVffrL2MWLF9GqVSuYmprC2NgYzZo1w6lTp+TqZPVdi4iIwMiRI2FtbQ0jIyN06tQJT548UfwGfOLIkSPw9vaGkZERzM3N0aFDB0RFRUnTAwMD4ePjAwDo1q0bZDKZ0v6Ra9euRbdu3QAATZo0kbbz0/1z8uRJ1K1bF/r6+ihfvjzWr1+fbVlJSUkIDg6Gg4MD9PT0UKFCBcyePRuZmZm5btPOnTvRpk0blCpVCnp6enB2dsa0adOQkZGRre7p06fRunVrWFhYwMjICNWqVcOiRYvk6ty8eRPdu3eHtbU1DAwMUKlSJfzwww9y+0hRa4qifoBZ/fU3btyIKlWqQE9PD2FhYQCAH3/8EQ0aNEDJkiVhYGAADw8PbN26VeE2/vrrr6hbty4MDQ1hYWGBRo0aSb8YBwQEwMrKCu/fv882X4sWLVCpUqWcdyCALVu2wMPDAwYGBrCyssLXX3+NR48eSdMbN26MgIAAAECdOnUgk8mU/pIyefJkjBo1CgDg5OQkHRefdpPZsWMHqlatCj09PVSpUkXaLx979OgRvvnmG9ja2kr1fvnll1y3BwBCQ0PRtGlT2NjYQE9PD5UrV8by5cuz1Tt37hz8/PxgZWUFAwMDODk54ZtvvpGrs3nzZnh4eMDExASmpqZwd3fPdtzcu3cP3bp1g6WlJQwNDVG/fn3s2bMn2/revn2LyZMno2LFitDX14e9vT06d+6Mu3fvSnU+PUfcv38fgwYNQqVKlWBgYICSJUuiW7ducvs0t8+jor7Ojx8/Rr9+/WBrawt9fX1Ur14d69atk6vz8fl01apV0vm0Tp06OHv2rNL9/6nXr19j4MCBKFmyJExNTdGnTx+8ePEiW719+/ZJ5ygTExO0adMG169fl6bndH6tVasWOnfuLLc8d3d3yGQyXLlyRSr7/fffIZPJ5M59qh5raWlpmDRpEipUqAA9PT04ODhg9OjRSEtLk6uX9dlX5Tj/WHh4OOrUqQMA6Nu3r7R9n35n3bhxA02aNIGhoSFKly6NOXPm5DtWRbLulbhy5Qp8fHxgaGiIChUqSOeoY8eOoV69etI58tChQ9mWocr3GgBcv34dTZs2hYGBAcqUKYPp06crPffndnyoKrdrjLw4ceIEunXrhrJly0r7ecSIEXjz5o3C+rldawAfflxauHAhqlSpAn19fdja2mLgwIEKPzOfWrJkCapUqSJ9X9SuXRubNm3KcZ53795h4sSJ8PDwgJmZGYyMjODt7Y2jR4/K1cvr+SDr+NfX10fVqlXx559/5ho/8OGa8Pr16zh27Jj0Gfj0/JWWlqbStdC/OWayjv+Pj83Q0FC57zRVYs2NKuf4jxXU+VSZgwcPomHDhjA3N4exsTEqVaqE//73v3napjy1MFy9ehUtWrSAtbU1Jk+ejPT0dEyaNAm2tra5zjt58mSEhISgf//+qFu3LlJSUnDu3DlcuHABzZs3x8CBA/HPP//g4MGD2LBhg8JlhIaG4u3btwgKCoKenh4sLS2VnoQyMjLQsmVL1K9fH3PmzEFYWBgmTZqE9PR0TJ06NS+brVJsH7t+/Tq8vb1hamqK0aNHQ0dHBytXrkTjxo2lk/LHhg4dCgsLC0yaNAmxsbFYuHAhhgwZgt9//z3H9Rw6dAitWrVC+fLlMXnyZLx58wZLliyBl5cXLly4AEdHRwwcOBClS5fGzJkzMWzYMNSpU0fp+9WoUSMMGzYMixcvxn//+1+4ubkBgPQvANy5cwddu3ZFv379EBAQgF9++QWBgYHw8PBAlSpVAHw48H18fPDo0SMMHDgQZcuWxd9//41x48YhPj4eCxcuzHG71q5dC2NjY4wcORLGxsY4cuQIJk6ciJSUFMydO1eqd/DgQbRt2xb29vYYPnw47OzsEBUVhd27d2P48OEAPpwcvL29oaOjg6CgIDg6OuLu3bv466+/MGPGjBzjUObIkSP4448/MGTIEFhZWUnJxqJFi9C+fXv4+/vj3bt32Lx5M7p164bdu3ejTZs20vxTpkzB5MmT0aBBA0ydOhW6uro4ffo0jhw5ghYtWqB3795Yv3499u/fL3eTeUJCAo4cOYJJkybluv/69u2LOnXqICQkBImJiVi0aBEiIiJw8eJFmJub44cffkClSpWwatUqTJ06FU5OTnB2dla4vM6dO+PWrVv47bffsGDBAlhZWQEArK2tpTonT57E9u3bMWjQIJiYmGDx4sXo0qUL4uLiULJkSQBAYmIi6tevL114WVtbY9++fejXrx9SUlIQHByc43YtX74cVapUQfv27VGiRAn89ddfGDRoEDIzMzF48GAAHy6Ys85RY8eOhbm5OWJjY7F9+3ZpOQcPHkSvXr3QrFkzzJ49GwAQFRWFiIgI6bhJTExEgwYN8Pr1awwbNgwlS5bEunXr0L59e2zduhWdOnUC8OE807ZtWxw+fBg9e/bE8OHD8fLlSxw8eBDXrl1Tuk/Pnj2Lv//+Gz179kSZMmUQGxuL5cuXo3Hjxrhx4wYMDQ1V+jx+7M2bN2jcuDHu3LmDIUOGwMnJCVu2bEFgYCCSkpKkbcuyadMmvHz5EgMHDoRMJsOcOXPQuXNn3Lt3T6UunkOGDIG5uTkmT56M6OhoLF++HPfv35d+iAGADRs2ICAgAH5+fpg9ezZev36N5cuXo2HDhrh48aJ0jlJ2fvX29sZvv/0mvX7+/DmuX78OLS0tnDhxAtWqVQPw4QLP2tpa2jeqHmuZmZlo3749Tp48iaCgILi5ueHq1atYsGABbt26hR07dsjFo8px/ik3NzdMnToVEydORFBQELy9vQEADRo0kOq8ePECLVu2ROfOndG9e3ds3boVY8aMgbu7O1q1apWvWBV58eIF2rZti549e6Jbt25Yvnw5evbsiY0bNyI4OBj/+c9/8NVXX2Hu3Lno2rUrHjx4ABMTEwCqf68lJCSgSZMmSE9Px9ixY2FkZIRVq1bBwMAgWzyqHB+qyu0aIy+2bNmC169f49tvv0XJkiVx5swZLFmyBA8fPsSWLVvk6qp6rTFw4EDp3Dxs2DDExMRg6dKluHjxIiIiIpR+5lavXo1hw4aha9euGD58ON6+fYsrV67g9OnT+Oqrr5RuQ0pKCtasWYNevXphwIABePnyJX7++Wf4+fnhzJkz2brGqXI+OHDgALp06YLKlSsjJCQEz549Q9++fVGmTJlc9+nChQsxdOhQGBsbSz/YfXotosq10L85Zh49eiT9+DJu3DgYGRlhzZo12XrEqBJrblQ5x3+soM6nily/fh1t27ZFtWrVMHXqVOjp6eHOnTuIiIjI0zZB5EHHjh2Fvr6+uH//vlR248YNoa2tLT5dVLly5URAQID0unr16qJNmzY5Ln/w4MHZliOEEDExMQKAMDU1FY8fP1Y4LTQ0VCoLCAgQAMTQoUOlsszMTNGmTRuhq6srnjx5IoQQ4ujRowKAOHr0aK7LVBabEEIAEJMmTZJed+zYUejq6oq7d+9KZf/8848wMTERjRo1kspCQ0MFAOHr6ysyMzOl8hEjRghtbW2RlJSkcH1ZatSoIWxsbMSzZ8+kssuXLwstLS3Rp08fqSxrO7ds2ZLj8oQQYsuWLQr3iRAf3lMA4vjx41LZ48ePhZ6envjuu++ksmnTpgkjIyNx69YtufnHjh0rtLW1RVxcXI4xvH79OlvZwIEDhaGhoXj79q0QQoj09HTh5OQkypUrJ168eCFX9+N92ahRI2FiYiJ3zH5aJyAgQJQrVy7bOidNmpTtPQcgtLS0xPXr13ON+927d6Jq1aqiadOmUtnt27eFlpaW6NSpk8jIyFAYU0ZGhihTpozo0aOH3PT58+cLmUwm7t27l23dH6/TxsZGVK1aVbx580Yq3717twAgJk6cKJVlHX9nz55Vurwsc+fOFQBETExMtmkAhK6urrhz545UdvnyZQFALFmyRCrr16+fsLe3F0+fPpWbv2fPnsLMzEzh+/4xRdP9/PxE+fLlpdd//vlnrts0fPhwYWpqKtLT05XWCQ4OFgDEiRMnpLKXL18KJycn4ejoKL13v/zyiwAg5s+fn20ZHx9jn54jFG1LZGSkACDWr18vleX0efTx8RE+Pj7S64ULFwoA4tdff5XK3r17Jzw9PYWxsbFISUkRQvzv/FayZEnx/Plzqe7OnTsFAPHXX38p3S9C/O+48fDwEO/evZPK58yZIwCInTt3CiE+7C9zc3MxYMAAufkTEhKEmZmZXLmy82vW9t+4cUMIIcSuXbuEnp6eaN++vdzno1q1aqJTp07Sa1WPtQ0bNggtLS2591kIIVasWCEAiIiICKlM1eNckbNnz2b7Tsni4+OT7X1PS0sTdnZ2okuXLlJZXmJVJGs9mzZtkspu3rwpndNOnTolle/fvz9bvKp+r2V9dk6fPi2VPX78WJiZmcmdQ/JyfCg6F39KlWsMRRRdByj6fIaEhAiZTCb3XaLqtcaJEycEALFx40a5ZYaFhWUr//Rz3aFDB1GlSpU8b1d6erpIS0uTK3vx4oWwtbUV33zzjVSWl/NBjRo1hL29vdy1yYEDBwQAhd+hn6pSpYrctmVR9VooL8eMIkOHDhUymUxcvHhRKnv27JmwtLTM9v2mLFZl8nuOV8f59NPPy4IFCwQA6XjML5W7JGVkZGD//v3o2LEjypYtK5W7ubnBz88v1/nNzc1x/fp13L59W9VVZtOlSxe5XzVz8/FQj1m/NL17905hU2tBycjIwIEDB9CxY0eUL19eKre3t8dXX32FkydPIiUlRW6eoKAgua4v3t7eyMjIwP3795WuJz4+HpcuXUJgYCAsLS2l8mrVqqF58+bYu3dvAW7V/1SuXFn6hQz48CtzpUqVcO/ePalsy5Yt8Pb2hoWFBZ4+fSr9+fr6IiMjA8ePH89xHR//EvXy5Us8ffoU3t7eeP36NW7evAngQ9N4TEwMgoODs/XBz9qXT548wfHjx/HNN9/IHbMf18kPHx8fVK5cOce4X7x4geTkZHh7e+PChQtS+Y4dO5CZmYmJEydmGxQgKyYtLS34+/tj165dePnypTR948aNaNCgAZycnJTGdu7cOTx+/BiDBg2S6zffpk0buLq6KuxSUxB8fX3lfk2vVq0aTE1NpeNCCIFt27ahXbt2EELIHRd+fn5ITk6W20+KfLx/k5OT8fTpU/j4+ODevXtITk4G8L/7MXbv3q2wS1dWnVevXuXYVWHv3r2oW7cuGjZsKJUZGxsjKCgIsbGxuHHjBgBg27ZtsLKywtChQ7MtI6dj7ONtef/+PZ49e4YKFSrA3Nw81/2QU8x2dnbo1auXVKajo4Nhw4YhNTUVx44dk6vfo0cPWFhYSK+zPtcff5ZzEhQUJPer6LfffosSJUpI556DBw8iKSkJvXr1knu/tbW1Ua9evWxdIxTJiinrnHHixAnUqVMHzZs3x4kTJwB86P547do1qW5ejrUtW7bAzc0Nrq6ucvWaNm0KANlizO04zy9jY2N8/fXX0mtdXV3UrVs323k1L7EqW0/Pnj2l15UqVYK5uTnc3NzkWr6z/p+1/rx8r+3duxf169dH3bp1pXrW1tbw9/eXi6Ugjo+PFcQ1RpaPP5+vXr3C06dP0aBBAwghcPHixWz1c7vW2LJlC8zMzNC8eXO5bfXw8ICxsXGO22pubo6HDx/mqbsgAGhra0v3OWVmZuL58+dIT09H7dq1FZ5jcjsfZF1zBAQEwMzMTKrXvHlzhd+H+ZHbtdC/PWbCwsLg6ekp17piaWmZ7dgsCHk9x6vzfJr1vbhz506VuoUro3LC8OTJE7x58wYuLi7ZpqnSp3rq1KlISkpCxYoV4e7ujlGjRsn1QVVFThdKn9LS0pI7sQFAxYoVAUCtQxQ+efIEr1+/VrhP3NzckJmZiQcPHsiVf3oxm/WhzalvY9YHSNl6nj59ilevXuU5/tx8GivwId6PY719+zbCwsJgbW0t95c1Ek/WzbbKXL9+HZ06dYKZmRlMTU1hbW0tfZlmXRhm9Q/PafzyrBNdQY9xruw43L17N+rXrw99fX1YWlrC2toay5cvl2IGPsStpaWV6wm2T58+ePPmjdQ/NDo6GufPn0fv3r1znC+n48LV1TXHJPTfyO24ePLkCZKSkrBq1apsx0Xfvn0B5H5cREREwNfXV7pfx9raWuqDmbWPfXx80KVLF0yZMgVWVlbo0KEDQkND5fp4Dxo0CBUrVkSrVq1QpkwZfPPNN9n6od+/f1/pZytrOvDh/axUqVKeR0B68+YNJk6cKN3jY2VlBWtrayQlJckdL3lx//59uLi4ZEtEP405S37OOx/79LvA2NgY9vb20vk168KtadOm2d7zAwcO5Pp+Ax+6Abi4uEjJwYkTJ+Dt7Y1GjRrhn3/+wb179xAREYHMzEzpAicvx9rt27dx/fr1bPWyvis+jVGV819+lClTJluCqei8mpdYVV2PmZkZHBwcspUBkPv8qvq9lnUcfurTeQvi+PhYQVxjZImLi5N+jDM2Noa1tbV0L+Cnn09VrjVu376N5ORk2NjYZNvW1NTUHLd1zJgxMDY2Rt26deHi4oLBgwer3JVk3bp1qFatmnRPh7W1Nfbs2aPwHJPb+SDr/JHfa0BV5BbDvz1m7t+/jwoVKmQrV1T2b+X1HK/O82mPHj3g5eWF/v37w9bWFj179sQff/yR5+Sh0MYCbdSoEe7evYudO3fiwIEDWLNmDRYsWIAVK1bkOtRnFkV9IP8NZb8AKrq5Vp20tbUVlotPbpoqClSJNTMzE82bN8fo0aMV1s06mSqSlJQEHx8fmJqaYurUqXB2doa+vj4uXLiAMWPG/KvsWJm8HgeKjsMTJ06gffv2aNSoEX766SfY29tDR0cHoaGhud6cpkjlypXh4eGBX3/9FX369MGvv/4KXV1ddO/ePc/LKgy5HRdZ79vXX38t3Wz9qaz+6IrcvXsXzZo1g6urK+bPnw8HBwfo6upi7969WLBggbR8mUyGrVu34tSpU/jrr7+wf/9+fPPNN5g3bx5OnToFY2Nj2NjY4NKlS9i/fz/27duHffv2ITQ0FH369Ml2g7C6DB06FKGhoQgODoanpyfMzMwgk8nQs2dPtRzjiqj7vJO1HRs2bICdnV226aomWQ0bNsThw4fx5s0bnD9/HhMnTkTVqlVhbm6OEydOICoqCsbGxqhZs6bcelU51jIzM+Hu7o758+crrPfphbS69pmq59W8xJqX9WjiO6igjo8sBXGNAXw47zdv3hzPnz/HmDFj4OrqCiMjIzx69AiBgYH5+nxmZmbCxsYGGzduVDg9p54Tbm5uiI6Oxu7duxEWFoZt27bhp59+wsSJEzFlyhSl8/36668IDAxEx44dMWrUKNjY2EBbWxshISFyAzJkKQrXIap+jxTUMaNOBX2O/zfbbmBggOPHj+Po0aPYs2cPwsLC8Pvvv6Np06Y4cOCA0v2ebR2qBps1woyi5r7o6GiVlmFpaYm+ffuib9++SE1NRaNGjTB58mTpw1yQT6bLzMzEvXv35C5Ob926BQDSjSFZ2eunD6NS9CusqrFZW1vD0NBQ4T65efMmtLS0VDqx5ybrwWvK1mNlZaX0IVw5KYj3wNnZGampqbmO7a9IeHg4nj17hu3bt6NRo0ZSeUxMTLZ1AMC1a9eUrifrV59r167luE4LCwuFDyTLy6/x27Ztg76+Pvbv3y93A1VoaGi2uDMzM3Hjxo1cx2Pv06cPRo4cifj4eGzatAlt2rSRazJW5OPjIqurQpbo6GilD+zLzb89LqytrWFiYoKMjIx8HRd//fUX0tLSsGvXLrlfoZQ1w9avXx/169fHjBkzsGnTJvj7+2Pz5s3SuUZXVxft2rVDu3btkJmZiUGDBmHlypWYMGECKlSogHLlyin9bAH/28/Ozs44ffo03r9/n6dnwWzduhUBAQGYN2+eVPb27dtsx2Fe9nu5cuVw5coVZGZmyrUyfBpzQbl9+zaaNGkivU5NTUV8fDxat24N4H+fURsbm1zf85y209vbG6Ghodi8eTMyMjLQoEEDaGlpoWHDhlLC0KBBA+lLLy/HmrOzMy5fvoxmzZqp7UnCQMGdVwsjVkXy8r1Wrlw5la4T8nJ8qCq3awxVXL16Fbdu3cK6devQp08fqVxZF0ZVrjWcnZ1x6NAheHl55euHTyMjI/To0QM9evTAu3fv0LlzZ8yYMQPjxo1TOmTz1q1bUb58eWzfvl3ueMlt0Axlss4f/+Ya8N8et//2mClXrhzu3LmTrVxR2b+NVdVzfJaCPJ8qoqWlhWbNmqFZs2aYP38+Zs6ciR9++AFHjx5VeXkqd0nS1taGn58fduzYgbi4OKk8KioK+/fvz3X+Z8+eyb02NjZGhQoV5LoKZF3gFtTTZJcuXSr9XwiBpUuXQkdHB82aNQPw4eDR1tbO1qf+p59+yrYsVWPT1tZGixYtsHPnTrmuT4mJidi0aRMaNmwIU1PTfG7R/9jb26NGjRpYt26dXEzXrl3DgQMHpIMsrwriPejevTsiIyMVHhdJSUlIT09XOm/Wl/7Hv2q8e/cu23tSq1YtODk5YeHChdlizZrX2toajRo1wi+//CJ3zH66fGdnZyQnJ8s1X8fHx6s8XFxW3DKZTK5VIjY2NtvIJR07doSWlhamTp2a7VeGT3/J6dWrF2QyGYYPH4579+7J9XFWpnbt2rCxscGKFSvkPlv79u1DVFSU3GhNefFvjwttbW106dIF27ZtU5jA5TaMsKLjIjk5OVtC9uLFi2z7MSsxy9ofn56LtLS0pF+cs+q0bt0aZ86cQWRkpFTv1atXWLVqFRwdHaUuZV26dMHTp0/lzjVZcvplTltbO9v0JUuWZGvVyst+b926NRISEuRGFElPT8eSJUtgbGwsdakoKKtWrZK7T2T58uVIT0+XRvXx8/ODqakpZs6cqfB+ko/f85y2M6ur0ezZs1GtWjWpu4y3tzcOHz6Mc+fOyd1XlZdjrXv37nj06BFWr16drd6bN28KrFtnQZ1XCyNWRfLyvda6dWucOnUKZ86ckeo9efIk26/reTk+VKHKNYYqFJ1rhBDZhl3+WG7XGt27d0dGRgamTZuWbd709PQcj4tPt0tXVxeVK1eGEELpfVrKtuP06dNy57S8+Pia4+MuNQcPHpTu6cqNkZHRv/oM/Ntjxs/PD5GRkXJPXH/+/LnClp9/G6uq5/gsBXk+/dTz58+zlX36vaiKPLXfTJkyBWFhYfD29sagQYOkL6MqVark2lewcuXKaNy4MTw8PGBpaYlz585h69atcjcLeXh4AACGDRsGPz8/aGtry92glRf6+voICwtDQEAA6tWrh3379mHPnj3473//KzX/mZmZoVu3bliyZAlkMhmcnZ2xe/duhX3B8hLb9OnTpTFvBw0ahBIlSmDlypVIS0tTOLZ2fs2dOxetWrWCp6cn+vXrJw2ramZmlu8nptaoUQPa2tqYPXs2kpOToaenJ419r6pRo0Zh165daNu2rTTk6qtXr3D16lVs3boVsbGx0tCcn2rQoAEsLCwQEBCAYcOGQSaTYcOGDdk+eFpaWli+fDnatWuHGjVqoG/fvrC3t8fNmzdx/fp1KVlZvHgxGjZsiFq1aiEoKAhOTk6IjY3Fnj17pJNGz549MWbMGHTq1AnDhg2ThiqrWLGiyjegtmnTBvPnz0fLli3x1Vdf4fHjx1i2bBkqVKgg99moUKECfvjhB0ybNg3e3t7o3Lkz9PT0cPbsWZQqVQohISFSXWtra7Rs2RJbtmyBubm5Shf7Ojo6mD17Nvr27QsfHx/06tVLGlbV0dERI0aMUGl7PpV1/P/www/o2bMndHR00K5duzy1Ys2aNQtHjx5FvXr1MGDAAFSuXBnPnz/HhQsXcOjQIYUntSwtWrSQWgUGDhyI1NRUrF69GjY2NoiPj5fqrVu3Dj/99BM6deoEZ2dnvHz5EqtXr4apqamURPfv3x/Pnz9H06ZNUaZMGdy/fx9LlixBjRo1pP7+Y8eOxW+//YZWrVph2LBhsLS0xLp16xATE4Nt27ZJv+D36dMH69evx8iRI3HmzBl4e3vj1atXOHToEAYNGoQOHToo3J62bdtiw4YNMDMzQ+XKlREZGYlDhw5lG5ozL5/HoKAgrFy5EoGBgTh//jwcHR2xdetWREREYOHChdLwmAXl3bt3aNasGbp3747o6Gj89NNPaNiwIdq3bw8AMDU1xfLly9G7d2/UqlULPXv2hLW1NeLi4rBnzx54eXlJF1o5nV8rVKgAOzs7REdHy91c3qhRI4wZMwYA5BIGQPVjrXfv3vjjjz/wn//8B0ePHoWXlxcyMjJw8+ZN/PHHH9i/fz9q1679r/eVs7MzzM3NsWLFCpiYmMDIyAj16tXL0315hRWrMqp+r40ePRobNmxAy5YtMXz4cGlY1awWsCx5OT5Uoco1hipcXV3h7OyM77//Ho8ePYKpqSm2bdum9D4VVa41fHx8MHDgQISEhODSpUto0aIFdHR0cPv2bWzZsgWLFi1C165dFS6/RYsWsLOzg5eXF2xtbREVFYWlS5eiTZs2OX6m27Zti+3bt6NTp05o06YNYmJisGLFClSuXBmpqal52idZQkJC0KZNGzRs2BDffPMNnj9/Ll0DqrJMDw8PLF++HNOnT0eFChVgY2OTrSU8J//2mBk9ejR+/fVXNG/eHEOHDpWGVS1btiyeP38u16rwb2NV9RyfpSDPp5+aOnUqjh8/jjZt2qBcuXJ4/PgxfvrpJ5QpU0ZuYI9c5XVYpWPHjgkPDw+hq6srypcvL1asWKFwyLNPh1WdPn26qFu3rjA3NxcGBgbC1dVVzJgxQ24YqfT0dDF06FBhbW0tZDKZtMysYb/mzp2bLR5lw6oaGRmJu3fvihYtWghDQ0Nha2srJk2alG0oyydPnoguXboIQ0NDYWFhIQYOHCiuXbuWbZnKYhMi+3BaQghx4cIF4efnJ4yNjYWhoaFo0qSJ+Pvvv+XqKBvWUtlwr4ocOnRIeHl5CQMDA2FqairatWsnDUH46fJUGVZVCCFWr14typcvLw2XmxVHuXLlFA5b9+kwcEJ8GAJs3LhxokKFCkJXV1dYWVmJBg0aiB9//FHuPVckIiJC1K9fXxgYGIhSpUqJ0aNHS8P8fbpPTp48KZo3by5MTEyEkZGRqFatWrYhDq9duyY6deokzM3Nhb6+vqhUqZKYMGGCXJ0DBw6IqlWrCl1dXVGpUiXx66+/Kh1WdfDgwQrj/vnnn4WLi4vQ09MTrq6uIjQ0VOlwgL/88ouoWbOm0NPTExYWFsLHx0ccPHgwW70//vhDABBBQUE57rNP/f7779LyLS0thb+/v3j48KFcnbwMqyrEh+FyS5cuLbS0tOSGoFO2Tz49BwghRGJiohg8eLBwcHAQOjo6ws7OTjRr1kysWrUq1/Xv2rVLVKtWTejr6wtHR0cxe/ZsaVjTrFguXLggevXqJcqWLSv09PSEjY2NaNu2rTh37py0nK1bt4oWLVoIGxsboaurK8qWLSsGDhwo4uPj5dZ39+5d0bVrV+m4qVu3rti9e3e2uF6/fi1++OEH4eTkJG1T165d5Yaf/PQc8eLFC9G3b19hZWUljI2NhZ+fn7h586bCfabs86joc5eYmCgtV1dXV7i7u2cbyjOn86mic9mnso6bY8eOiaCgIGFhYSGMjY2Fv7+/3BDPWY4ePSr8/PyEmZmZ0NfXF87OziIwMFDuPcnp/CqEEN26dRMAxO+//y6VvXv3ThgaGgpdXV25IYQ/3heqHGvv3r0Ts2fPFlWqVJE+jx4eHmLKlCkiOTlZbt+oepwrsnPnTlG5cmVRokQJue8XHx8fhcNmKhruWdVYFVG2HmXndUXbq8r3mhBCXLlyRfj4+Ah9fX1RunRpMW3aNPHzzz9nG7pSCNWOD1WGVVXlGkMRRd+3N27cEL6+vsLY2FhYWVmJAQMGSEPo5vdaQwghVq1aJTw8PISBgYEwMTER7u7uYvTo0eKff/6R6nz6uV65cqVo1KiRKFmypNDT0xPOzs5i1KhRub7fmZmZYubMmaJcuXJCT09P1KxZU+zevTvbcZXX88G2bduEm5ub0NPTE5UrVxbbt29XOjT5pxISEkSbNm2EiYmJACBtZ16vhVQ5ZpS5ePGi8Pb2Fnp6eqJMmTIiJCRELF68WAAQCQkJucaqTH7P8eo4n376eTl8+LDo0KGDKFWqlNDV1RWlSpUSvXr1yjb0fW5k/7+hRFQE7dy5Ex07dsTx48ez/YpKRERE/05wcDBWrlyJ1NRUlW8A/hIxYSAqwtq2bYuoqCjcuXOn0G90JCIi+py8efNG7sbzZ8+eoWLFiqhVq1aOz+ahQhxWlYhUt3nzZly5cgV79uzBokWLmCwQERH9S56enmjcuDHc3NyQmJiIn3/+GSkpKZgwYYKmQyvy2MJAVATJZDIYGxujR48eWLFiRZEaX5qIiKg4+u9//4utW7fi4cOHkMlkqFWrFiZNmlRgQ/t+zpgwEBERERGRUio/h4GIiIiIiL48TBiIiIiIiEgpdoymPMvMzMQ///wDExMT3oxLRERUTAgh8PLlS5QqVUp6ACWRKpgwUJ79888/cHBw0HQYRERElA8PHjxAmTJlNB0GFSNMGCjPsh5H/+DBA5iammo4GiIiIlJFSkoKHBwcpO9xIlUxYaA8y+qGZGpqyoSBiIiomGF3YsordmAjIiIiIiKlmDAQEREREZFSTBiIiIiIiEgp3sNARJ+tjIwMvH//XtNhEBEVCh0dHWhra2s6DPoMMWEgos+OEAIJCQlISkrSdChERIXK3NwcdnZ2vLGZChQTBiL67GQlCzY2NjA0NOQXJxF99oQQeP36NR4/fgwAsLe313BE9DlhwkBEn5WMjAwpWShZsqSmwyEiKjQGBgYAgMePH8PGxobdk6jA8KZnIvqsZN2zYGhoqOFIiIgKX9a5j/dvUUFiwkBEnyV2QyKiLxHPfaQOTBiIiIiIiEgpJgxEREVE48aNERwcrOkwirS1a9fC3Ny8yCwnN69fv0aXLl1gamoKmUxW7Ebumjx5MmrUqKHpMJTKz/sok8mwY8eOAll/Xj+z4eHhBXIcODo6YuHChf9qGUR5wZueieiL4Th2T6GuL3ZWm0JdX27Cw8PRpEkTvHjxolAultWhR48eaN26dZ7mcXR0RHBwsNyFXX6Wkx/r1q3DiRMn8Pfff8PKygpmZmZqXyfljUwmQ0xMDBwdHTUdClGRxYSBiIiKDQMDA2kkmKKwnNzcvXsXbm5uqFq1ar6XkZGRAZlMBi0tdgogIs3g2YeIqAhJT0/HkCFDYGZmBisrK0yYMAFCCGl6Wloavv/+e5QuXRpGRkaoV68ewsPDpen3799Hu3btYGFhASMjI1SpUgV79+5FbGwsmjRpAgCwsLCATCZDYGCgwhiePXuGXr16oXTp0jA0NIS7uzt+++03uTpbt26Fu7s7DAwMULJkSfj6+uLVq1cAPrRk1K1bF0ZGRjA3N4eXlxfu378vzbt8+XI4OztDV1cXlSpVwoYNG+SWnZSUhIEDB8LW1hb6+vqoWrUqdu/eDSB7F5S7d++iQ4cOsLW1hbGxMerUqYNDhw5J0xs3boz79+9jxIgRkMlk0g2hirqy5BaXTCbDmjVr0KlTJxgaGsLFxQW7du1SuA+z1j1v3jwcP34cMpkMjRs3BgC8ePECffr0gYWFBQwNDdGqVSvcvn1bmi8rtl27dqFy5crQ09NDXFycwnVcu3YNrVq1grGxMWxtbdG7d288ffpUmh4WFoaGDRvC3NwcJUuWRNu2bXH37l25ZTx8+BC9evWCpaUljIyMULt2bZw+fVquzoYNG+Do6AgzMzP07NkTL1++VLrdWfHv3r0blSpVgqGhIbp27YrXr19j3bp1cHR0hIWFBYYNG4aMjAxpvtz2S9ayy5YtC0NDQ3Tq1AnPnj3Ltv6dO3eiVq1a0NfXR/ny5TFlyhSkp6crjfdjL168gL+/P6ytrWFgYAAXFxeEhoaqNC/wYT/Vrl0bJiYmsLOzw1dffSU9F+FjERERqFatGvT19VG/fn1cu3ZNbvrJkyfh7e0NAwMDODg4YNiwYdLni0gTmDAQERUh69atQ4kSJXDmzBksWrQI8+fPx5o1a6TpQ4YMQWRkJDZv3owrV66gW7duaNmypXRhNXjwYKSlpeH48eO4evUqZs+eDWNjYzg4OGDbtm0AgOjoaMTHx2PRokUKY3j79i08PDywZ88eXLt2DUFBQejduzfOnDkDAIiPj0evXr3wzTffICoqCuHh4ejcuTOEEEhPT0fHjh3h4+ODK1euIDIyEkFBQdKF+p9//onhw4fju+++w7Vr1zBw4ED07dsXR48eBQBkZmaiVatWiIiIwK+//oobN25g1qxZSseTT01NRevWrXH48GFcvHgRLVu2RLt27aQL7O3bt6NMmTKYOnUq4uPjER8fr3A5ucWVZcqUKejevTuuXLmC1q1bw9/fH8+fP1e4zO3bt2PAgAHw9PREfHw8tm/fDgAIDAzEuXPnsGvXLkRGRkIIgdatW8sNg/n69WvMnj0ba9aswfXr12FjY5Nt+UlJSWjatClq1qyJc+fOISwsDImJiejevbtU59WrVxg5ciTOnTuHw4cPQ0tLC506dUJmZqa0/3x8fPDo0SPs2rULly9fxujRo6XpwIekbMeOHdi9ezd2796NY8eOYdasWQq3+eP4Fy9ejM2bNyMsLAzh4eHo1KkT9u7di71792LDhg1YuXIltm7dKs2T2345ffo0+vXrhyFDhuDSpUto0qQJpk+fLrfeEydOoE+fPhg+fDhu3LiBlStXYu3atZgxY0aO8WaZMGECbty4gX379iEqKgrLly+HlZWVSvMCH4YynTZtGi5fvowdO3YgNjZWYWI+atQozJs3D2fPnoW1tTXatWsnbefdu3fRsmVLdOnSBVeuXMHvv/+OkydPYsiQISrHQVTgBFEeJScnCwAiOTlZ06EQZfPmzRtx48YN8ebNm2zTyo3ZXah/eeXj4yPc3NxEZmamVDZmzBjh5uYmhBDi/v37QltbWzx69EhuvmbNmolx48YJIYRwd3cXkydPVrj8o0ePCgDixYsXeY6tTZs24rvvvhNCCHH+/HkBQMTGxmar9+zZMwFAhIeHK1xOgwYNxIABA+TKunXrJlq3bi2EEGL//v1CS0tLREdHK5w/NDRUmJmZ5RhrlSpVxJIlS6TX5cqVEwsWLMhxObnFJYQQAMT48eOl16mpqQKA2Ldvn9JYhg8fLnx8fKTXt27dEgBERESEVPb06VNhYGAg/vjjDyk2AOLSpUs5bue0adNEixYt5MoePHggACjdf0+ePBEAxNWrV4UQQqxcuVKYmJiIZ8+eKaw/adIkYWhoKFJSUqSyUaNGiXr16imNKyv+O3fuSGUDBw4UhoaG4uXLl1KZn5+fGDhwoBBCtf3Sq1cvufdDCCF69Ogh9z42a9ZMzJw5U67Ohg0bhL29vfQagPjzzz8Vxt6uXTvRt29fpdv2KR8fHzF8+HCl08+ePSsASNud9RlcP3eueH31qnh99ap4ePKkMNDXFxv+vyygc2fxTdeucss5ceKE0NLSks5rio7pLDmdA/n9TfnFFgYioiKkfv36cuOoe3p64vbt28jIyMDVq1eRkZGBihUrwtjYWPo7duyY1M1k2LBhmD59Ory8vDBp0iRcuXIlzzFkZGRg2rRpcHd3h6WlJYyNjbF//37pV/vq1aujWbNmcHd3R7du3bB69Wq8ePECAGBpaYnAwED4+fmhXbt2WLRokdyv+lFRUfDy8pJbn5eXF6KiogAAly5dQpkyZVCxYkWVYk1NTcX3338PNzc3mJubw9jYGFFRUUq78CiTW1xZqlWrJv3fyMgIpqamCruc5LSeEiVKoF69elJZyZIlUalSJbl16erqyq1LkcuXL+Po0aNyx4KrqysASMfD7du30atXL5QvXx6mpqbSjb1Z++fSpUuoWbMmLC0tla7H0dERJiYm0mt7e/tct9nQ0BDOzs7Sa1tbWzg6OsLY2FiuLGs5quyXqKgouenAh8/Hp/tk6tSpcvtkwIABiI+Px+vXr3OMGQC+/fZbbN68GTVq1MDo0aPx999/5zrPx86fP4927dqhbNmyMDExgY+PDwBkOx7rVa8u/d/SzAwujo64GRMDALgaHY1fd+6U2wY/Pz9kZmYi5v/rEBU23vRMRFRMpKamQltbG+fPn8/WRSfrQqx///7w8/PDnj17cODAAYSEhGDevHkYOnSoyuuZO3cuFi1ahIULF8Ld3R1GRkYIDg7Gu3fvAADa2to4ePAg/v77bxw4cABLlizBDz/8gNOnT8PJyQmhoaEYNmwYwsLC8Pvvv2P8+PE4ePAg6tevn+u683oj8vfff4+DBw/ixx9/RIUKFWBgYICuXbtKsRY0HR0dudcymUyu+05BMTAwyPUBXKmpqWjXrh1mz56dbZq9vT0AoF27dihXrhxWr16NUqVKITMzE1WrVpX2jyr7Oz/brGiewth3qampmDJlCjp37pxtmr6+fq7zt2rVCvfv38fevXtx8OBBNGvWDIMHD8aPP/6Y67yvXr2Cn58f/Pz8sHHjRlhbWyMuLg5+fn55Oh5fvX6Nft26YeSUKdmmlS1bVuXlEBUktjAQERUhn95seurUKbi4uEBbWxs1a9ZERkYGHj9+jAoVKsj92dnZSfM4ODjgP//5D7Zv347vvvsOq1evBvDhV2sAcjeaKhIREYEOHTrg66+/RvXq1VG+fHncunVLro5MJoOXlxemTJmCixcvQldXF3/++ac0vWbNmhg3bhz+/vtvVK1aFZs2bQIAuLm5ISIiItv6KleuDODDL/gPHz7Mtr6cYg0MDESnTp3g7u4OOzs7xMbGytXR1dXNdZtzi6uguLm5IT09Xe59fvbsGaKjo/O8rlq1auH69etwdHTMdjwYGRlJyx0/fjyaNWsGNzc3qSUoS7Vq1XDp0iWl92EUFlX2i5ubm8LPx8dq1aqF6OjobPujQoUKKo8yZW1tjYCAAPz6669YuHAhVq1apdJ8N2/exLNnzzBr1ix4e3vD1dVVaUvMmcuXpf+/SE7Gnfv34erkBACo4eaGm3fvKtyGrM8wUWFjwkBEVITExcVh5MiRiI6Oxm+//YYlS5Zg+PDhAICKFSvC398fffr0wfbt2xETE4MzZ84gJCQEe/Z8eMZEcHAw9u/fj5iYGFy4cAFHjx6Fm5sbAKBcuXKQyWTYvXs3njx5gtTUVIUxuLi4SC0IUVFRGDhwIBITE6Xpp0+fxsyZM3Hu3DnExcVh+/btePLkCdzc3BATE4Nx48YhMjIS9+/fx4EDB3D79m0phlGjRmHt2rVYvnw5bt++jfnz52P79u34/vvvAQA+Pj5o1KgRunTpgoMHDyImJgb79u1DWFiY0li3b9+OS5cu4fLly/jqq6+y/Wrt6OiI48eP49GjR3IjCH0st7gKiouLCzp06IABAwbg5MmTuHz5Mr7++muULl0aHTp0yNOyBg8ejOfPn6NXr144e/Ys7t69i/3796Nv377IyMiAhYUFSpYsiVWrVuHOnTs4cuQIRo4cKbeMXr16wc7ODh07dkRERATu3buHbdu2ITIysiA3O1eq7JesVqsff/wRt2/fxtKlS7MdFxMnTsT69esxZcoUXL9+HVFRUdi8eTPGjx+vUhwTJ07Ezp07cefOHVy/fh27d++Wjt3clC1bFrq6uliyZAnu3buHXbt2Ydq0aQrrhqxciaOnTuH67dsIGj8eJc3N0a5ZMwDAyG++wanLl6Wbu2/fvo2dO3fypmfSKCYMRERFSJ8+ffDmzRvUrVsXgwcPxvDhwxEUFCRNDw0NRZ8+ffDdd9+hUqVK6NixI86ePSt1VcjIyMDgwYPh5uaGli1bomLFivjpp58AAKVLl8aUKVMwduxY2NraKr0AGT9+PGrVqgU/Pz80btxYuqDMYmpqiuPHj6N169aoWLEixo8fj3nz5qFVq1YwNDTEzZs30aVLF1SsWBFBQUEYPHgwBg4cCADo2LEjFi1ahB9//BFVqlTBypUrERoaKg05CgDbtm1DnTp10KtXL1SuXBmjR49W2kIwf/58WFhYoEGDBmjXrh38/PxQq1YtuTpTp05FbGwsnJ2dYW1trXA5qsRVUEJDQ+Hh4YG2bdvC09MTQgjs3bs3W5ed3JQqVQoRERHIyMhAixYt4O7ujuDgYJibm0NLSwtaWlrYvHkzzp8/j6pVq2LEiBGYO3eu3DJ0dXVx4MAB2NjYoHXr1nB3d89xVCp1ym2/1K9fH6tXr8aiRYtQvXp1HDhwIFsi4Ofnh927d+PAgQOoU6cO6tevjwULFqBcuXIqxaCrq4tx48ahWrVqaNSoEbS1tbF582aV5rW2tsbatWuxZcsWVK5cGbNmzVLalWlqcDBGzZ4Nrx49kPjsGbYuWQLd/99O90qVsD80FLdu3YK3tzdq1qyJiRMnolSpUirFQaQOMiE+GuCbSAUpKSkwMzNDcnIyTE1NNR0OkZy3b98iJiYGTk5OKvVZJiIqbG8+ee6CIgb5fNhfTudAfn9TfrGFgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUkwYiIiIiIhIKSYMxczx48fRrl07lCpVCjKZDDt27JCmvX//HmPGjJGezFqqVCn06dMH//zzj9wynj9/Dn9/f5iamsLc3Bz9+vVTOh47EREREX3ZmDAUM69evUL16tWxbNmybNNev36NCxcuYMKECbhw4QK2b9+O6OhotG/fXq6ev78/rl+/joMHD2L37t04fvy43DjvRERERERZSmg6AMqbVq1aoVWrVgqnmZmZ4eDBg3JlS5cuRd26dREXF4eyZcsiKioKYWFhOHv2LGrXrg0AWLJkCVq3bo0ff/yRD4YhIiIiIjlsYfjMJScnQyaTwdzcHAAQGRkJc3NzKVkAAF9fX2hpaeH06dMKl5GWloaUlBS5PyIiIiL6MjBh+Iy9ffsWY8aMQa9evaQnOiYkJMDGxkauXokSJWBpaYmEhASFywkJCYGZmZn05+DgoPbYib5EQggEBQXB0tISMpkMly5dynWe2NhYlesWVY0bN0ZwcHCOddauXSv98EFERIWLXZI+U+/fv0f37t0hhMDy5cv/1bLGjRuHkSNHSq9TUlKYNFDxNNmskNeXnKfqYWFhWLt2LcLDw1G+fHlYWVmpKbCiZfv27dDR0ZFeOzo6Ijg4WC6J6NGjB1q3bq2B6IiIiAnDZygrWbh//z6OHDkitS4AgJ2dHR4/fixXPz09Hc+fP4ednZ3C5enp6UFPT0+tMRMRcPfuXdjb26NBgwaaDqVQWVpa5lrHwMAABgYGhRANERF9il2SPjNZycLt27dx6NAhlCxZUm66p6cnkpKScP78eansyJEjyMzMRL169Qo7XCL6f4GBgRg6dCji4uIgk8ng6OgI4EOrQ8OGDWFubo6SJUuibdu2uHv3rtLlvHjxAv7+/rC2toaBgQFcXFwQGhoqTX/w4AG6d+8Oc3NzWFpaokOHDoiNjVW6vPDwcMhkMuzZswfVqlWDvr4+6tevj2vXrsnV27ZtG6pUqQI9PT04Ojpi3rx5ctN/+uknuLi4QF9fH7a2tujatas07eMuSY0bN8b9+/cxYsQIyGQyyGQyAPJdkm7dugWZTIabN2/KrWPBggVwdnaWXl+7dg2tWrWCsbExbG1t0bt3bzx9+lTpthIRkWJMGIqZ1NRUXLp0SeqvHBMTg0uXLiEuLg7v379H165dce7cOWzcuBEZGRlISEhAQkIC3r17BwBwc3NDy5YtMWDAAJw5cwYREREYMmQIevbsyRGSiDRo0aJFmDp1KsqUKYP4+HicPXsWwIehlEeOHIlz587h8OHD0NLSQqdOnZCZmalwORMmTMCNGzewb98+REVFYfny5VLXpvfv38PPzw8mJiY4ceIEIiIiYGxsjJYtW0rnCGVGjRqFefPm4ezZs7C2tka7du3w/v17AMD58+fRvXt39OzZE1evXsXkyZMxYcIErF27FgBw7tw5DBs2DFOnTkV0dDTCwsLQqFEjhevZvn07ypQpg6lTpyI+Ph7x8fHZ6lSsWBG1a9fGxo0b5co3btyIr776CgCQlJSEpk2bombNmjh37hzCwsKQmJiI7t2757idRESUHbskFTPnzp1DkyZNpNdZ9xYEBARg8uTJ2LVrFwCgRo0acvMdPXoUjRs3BvDhS3XIkCFo1qwZtLS00KVLFyxevLhQ4icixczMzGBiYgJtbW257oFdunSRq/fLL7/A2toaN27cQNWqVbMtJy4uDjVr1pRGQstqqQCA33//HZmZmVizZo30y31oaCjMzc0RHh6OFi1aKI1v0qRJaN68OQBg3bp1KFOmDP788090794d8+fPR7NmzTBhwgQAHy7ob9y4gblz5yIwMBBxcXEwMjJC27ZtYWJignLlyqFmzZoK12NpaQltbW2YmJgo7SYJfHiezNKlSzFt2jQAH1odzp8/j19//RXAhyGla9asiZkzZ8rtOwcHB9y6dQsVK1ZUumwiIpLHhKGYady4MYQQSqfnNC2LpaUlNm3aVJBhEZGa3L59GxMnTsTp06fx9OlTqWUhLi5OYcLw7bffokuXLrhw4QJatGiBjh07SvdEXL58GXfu3IGJiYncPG/fvs2xmxPwoTtjFktLS1SqVAlRUVEAgKioKHTo0EGuvpeXFxYuXIiMjAw0b94c5cqVQ/ny5dGyZUu0bNkSnTp1gqGhYd53yP/r2bMnvv/+e5w6dQr169fHxo0bUatWLbi6ukrbevToURgbG2eb9+7du0wYiIjygAkDEVER1q5dO5QrVw6rV69GqVKlkJmZiapVqyrtQtSqVSvcv38fe/fuxcGDB9GsWTMMHjwYP/74I1JTU+Hh4ZGtKw8AWFtbq20bTExMcOHCBYSHh+PAgQOYOHEiJk+ejLNnz+Z7qFQ7Ozs0bdoUmzZtQv369bFp0yZ8++230vTU1FS0a9cOs2fPzjavvb19fjeFiOiLxHsYiIiKqGfPniE6Ohrjx49Hs2bN4ObmhhcvXuQ6n7W1NQICAvDrr79i4cKFWLVqFQCgVq1auH37NmxsbFChQgW5PzOznIecPXXqlPT/Fy9e4NatW3BzcwPw4d6oiIgIufoRERGoWLEitLW1AXx43ouvry/mzJmDK1euIDY2FkeOHFG4Ll1dXWRkZOS6nf7+/vj9998RGRmJe/fuoWfPntK0WrVq4fr163B0dMy2rUZGRrkum4iI/ocJAxFREWVhYYGSJUti1apVuHPnDo4cOSL3TBRFJk6ciJ07d+LOnTu4fv06du/eLV3Y+/v7w8rKCh06dMCJEycQExOD8PBwDBs2DA8fPsxxuVOnTsXhw4dx7do1BAYGwsrKCh07dgQAfPfddzh8+DCmTZuGW7duYd26dVi6dCm+//57AMDu3buxePFiXLp0Cffv38f69euRmZmJSpUqKVyXo6Mjjh8/jkePHuU4qlHnzp3x8uVLfPvtt2jSpIncwA2DBw/G8+fP0atXL5w9exZ3797F/v370bdvX5WSESIi+h8mDERERZSWlhY2b96M8+fPo2rVqhgxYgTmzp2b4zy6uroYN24cqlWrhkaNGkFbWxubN28GABgaGuL48eMoW7YsOnfuDDc3N/Tr1w9v376Ve16LIrNmzcLw4cPh4eGBhIQE/PXXX9DV1QXw4df8P/74A5s3b0bVqlUxceJETJ06FYGBgQAAc3NzbN++HU2bNoWbmxtWrFiB3377DVWqVFG4rqlTpyI2NhbOzs45dpUyMTFBu3btcPnyZfj7+8tNK1WqFCIiIpCRkYEWLVrA3d0dwcHBMDc3h5YWv/qIiPJCJlS5S5boIykpKTAzM0NycnKuFxlEhe3t27eIiYmBk5MT9PX1NR1OsRceHo4mTZrgxYsX+b7fgIjkvfnkOSaKGCgY1EAVOZ0D+f1N+cWfWYiIiIiISCkmDEREREREpBSHVSUiIqVye/YLERF9/tjCQERERERESjFhICIiIiIipZgwEBERERGRUkwYiIiIiIhIKSYMRERERESkFBMGIiIiIiJSigkDEREVmrVr16r0xGiZTIYdO3aoPR4iIsodn8NARF8M93Xuhbq+qwFX81S/cePGqFGjBhYuXKiegIqAHj16oHXr1tLryZMnY8eOHbh06ZJcvfj4eFhYWBRydEREpAgTBiKiYkQIgYyMDJQoUTxP3wYGBjAwMMi1np2dXSFEQ0REqmCXJCKiIiAwMBDHjh3DokWLIJPJIJPJEBsbi/DwcMhkMuzbtw8eHh7Q09PDyZMnERgYiI4dO8otIzg4GI0bN5ZeZ2ZmIiQkBE5OTjAwMED16tWxdevWHONwdHTEtGnT0KtXLxgZGaF06dJYtmyZXJ24uDh06NABxsbGMDU1Rffu3ZGYmChNv3z5Mpo0aQITExOYmprCw8MD586dAyDfJWnt2rWYMmUKLl++LG3z2rVrAch3SWrQoAHGjBkjF8OTJ0+go6OD48ePAwDS0tLw/fffo3Tp0jAyMkK9evUQHh6uwp4nIqLcMGEgIioCFi1aBE9PTwwYMADx8fGIj4+Hg4ODNH3s2LGYNWsWoqKiUK1aNZWWGRISgvXr12PFihW4fv06RowYga+//hrHjh3Lcb65c+eievXquHjxIsaOHYvhw4fj4MGDAD4kIR06dMDz589x7NgxHDx4EPfu3UOPHj2k+f39/VGmTBmcPXsW58+fx9ixY6Gjo5NtPT169MB3332HKlWqSNv88XI+Xt7mzZshhJDKfv/9d5QqVQre3t4AgCFDhiAyMhKbN2/GlStX0K1bN7Rs2RK3b99WaV8REZFyxbNNm4joM2NmZgZdXV0YGhoq7I4zdepUNG/eXOXlpaWlYebMmTh06BA8PT0BAOXLl8fJkyexcuVK+Pj4KJ3Xy8sLY8eOBQBUrFgRERERWLBgAZo3b47Dhw/j6tWriImJkRKa9evXo0qVKjh79izq1KmDuLg4jBo1Cq6urgAAFxcXhesxMDCAsbExSpQokWMXpO7duyM4OBgnT56UEoRNmzahV69ekMlkiIuLQ2hoKOLi4lCqVCkAwPfff4+wsDCEhoZi5syZKu83IiLKji0MRETFQO3atfNU/86dO3j9+jWaN28OY2Nj6W/9+vW4e/dujvNmJRgfv46KigIAREVFwcHBQa71o3LlyjA3N5fqjBw5Ev3794evry9mzZqV6/pyY21tjRYtWmDjxo0AgJiYGERGRsLf3x8AcPXqVWRkZKBixYpy23rs2LF/vW4iImILAxFRsWBkZCT3WktLS66LDgC8f/9e+n9qaioAYM+ePShdurRcPT09PTVF+cHkyZPx1VdfYc+ePdi3bx8mTZqEzZs3o1OnTvlepr+/P4YNG4YlS5Zg06ZNcHd3h7v7h1GvUlNToa2tjfPnz0NbW1tuPmNj43+1LURExISBiKjI0NXVRUZGhkp1ra2tce3aNbmyS5cuSfcKVK5cGXp6eoiLi8ux+5Eip06dyvbazc0NAODm5oYHDx7gwYMHUivDjRs3kJSUhMqVK0vzVKxYERUrVsSIESPQq1cvhIaGKkwYVN3mDh06ICgoCGFhYdi0aRP69OkjTatZsyYyMjLw+PFjqcsSEREVHHZJIiIqIhwdHXH69GnExsbi6dOnyMzMVFq3adOmOHfuHNavX4/bt29j0qRJcgmEiYkJvv/+e4wYMQLr1q3D3bt3ceHCBSxZsgTr1q3LMY6IiAjMmTMHt27dwrJly7BlyxYMHz4cAODr6wt3d3f4+/vjwoULOHPmDPr06QMfHx/Url0bb968wZAhQxAeHo779+8jIiICZ8+elRIORdscExODS5cu4enTp0hLS1NYz8jICB07dsSECRMQFRWFXr16SdMqVqwIf39/9OnTB9u3b0dMTAzOnDmDkJAQ7NmzJ8dtJSKi3DFhICIqIr7//ntoa2ujcuXKsLa2RlxcnNK6fn5+mDBhAkaPHo06derg5cuXcr+6A8C0adMwYcIEhISEwM3NDS1btsSePXvg5OSUYxzfffcdzp07h5o1a2L69OmYP38+/Pz8AHwY7nTnzp2wsLBAo0aN4Ovri/Lly+P3338HAGhra+PZs2fo06cPKlasiO7du6NVq1aYMmWKwnV16dIFLVu2RJMmTWBtbY3ffvtNaVz+/v64fPkyvL29UbZsWblpoaGh6NOnD7777jtUqlQJHTt2xNmzZ7PVIyoK7tnJcv0jKkpk4tNOsES5SElJgZmZGZKTk2FqaqrpcIjkvH37FjExMXBycoK+vr6mwyl2HB0dERwcjODgYE2HQvTZuv70eq51qlhVydeyczoH8vub8ostDEREREREpBQTBiIiIiIiUoqjJBERkSQ2NlbTIRARURHDFgYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVJMGIiISCWTJ09GjRo1VK4fGxsLmUyGS5cuKa3j6OiIhQsXqrzMtWvXwtzcXOX6yshkMuzYsUPpdCEEgoKCYGlpmes2FCWq7M+8vo8A0Lhx4yLz9O/w8HDIZDIkJSWpPE9BxF9Qxx5RccTnMBDRFyPK1a1Q1+d2MypP9Y8fP465c+fi/PnziI+Px59//omOHTuqJzjKUVhYGNauXYvw8HCUL18eVlZWmg4pX2QyWbbj6Pvvv8fQoUM1F5QSsbGxcHJywsWLF/Oc0BCRerGFgYioiHj16hWqV6+OZcuWaTqUL97du3dhb2+PBg0awM7ODiVK5P33NSEE0tPT1RDdv2NsbIySJUtqOgwiKkaYMBARFRGtWrXC9OnT0alTJ5Xnyepe8ssvv6Bs2bIwNjbGoEGDkJGRgTlz5sDOzg42NjaYMWOG3HxxcXHo0KEDjI2NYWpqiu7duyMxMVGuzqxZs2BrawsTExP069cPb9++zbb+NWvWwM3NDfr6+nB1dcVPP/2Uv43/f/Pnz4e7uzuMjIzg4OCAQYMGITU1NVu9HTt2wMXFBfr6+vDz88ODBw/kpu/cuRO1atWCvr4+ypcvjylTpqh88R4YGIihQ4ciLi4OMpkMjo6OAIC0tDQMGzYMNjY20NfXR8OGDXH27FlpvqyuMvv27YOHhwf09PRw8uTJbMvP6qr1xx9/wNvbGwYGBqhTpw5u3bqFs2fPonbt2jA2NkarVq3w5MkTaT5F3Wo6duyIwMBAhduRFXenTp3ktuPTLkmBgYHo2LEjpkyZAmtra5iamuI///kP3r17p3QfpaWl4fvvv0fp0qVhZGSEevXqITw8XGl9ALh58yYaNmwIfX19VK5cGYcOHZLrGubk5AQAqFmzJmQyGRo3bpzj8rI8e/YMvXr1QunSpWFoaAh3d3f89ttv2eqlp6djyJAhMDMzg5WVFSZMmAAhRL636fLly2jSpAlMTExgamoKDw8PnDt3TqWYiYobJgxERMXc3bt3sW/fPoSFheG3337Dzz//jDZt2uDhw4c4duwYZs+ejfHjx+P06dMAgMzMTHTo0AHPnz/HsWPHcPDgQdy7dw89evSQlvnHH39g8uTJmDlzJs6dOwd7e/tsycDGjRsxceJEzJgxA1FRUZg5cyYmTJiAdevW5XtbtLS0sHjxYly/fh3r1q3DkSNHMHr0aLk6r1+/xowZM7B+/XpEREQgKSkJPXv2lKafOHECffr0wfDhw3Hjxg2sXLkSa9euzZY0KbNo0SJMnToVZcqUQXx8vJQUjB49Gtu2bcO6detw4cIFVKhQAX5+fnj+/Lnc/GPHjsWsWbMQFRWFatWqKV3PpEmTMH78eFy4cAElSpTAV199hdGjR2PRokU4ceIE7ty5g4kTJ6q667LJijs0NFRuOxQ5fPgwoqKiEB4ejt9++w3bt2/HlClTlNYfMmQIIiMjsXnzZly5cgXdunVDy5Ytcfv2bYX1MzIy0LFjRxgaGuL06dNYtWoVfvjhB7k6Z86cAQAcOnQI8fHx2L59u0rb+fbtW3h4eGDPnj24du0agoKC0Lt3b2l5WdatW4cSJUrgzJkzWLRoEebPn481a9bke5v8/f1RpkwZnD17FufPn8fYsWOho6OjUsxExY4gyqPk5GQBQCQnJ2s6FKJs3rx5I27cuCHevHmTbdqNSq6F+vdvABB//vlnrvUmTZokDA0NRUpKilTm5+cnHB0dRUZGhlRWqVIlERISIoQQ4sCBA0JbW1vExcVJ069fvy4AiDNnzgghhPD09BSDBg2SW1e9evVE9erVpdfOzs5i06ZNcnWmTZsmPD09hRBCxMTECADi4sWLSuMvV66cWLBggdLpW7ZsESVLlpReh4aGCgDi1KlTUllUVJQAIE6fPi2EEKJZs2Zi5syZcsvZsGGDsLe3l17ntn8XLFggypUrJ71OTU0VOjo6YuPGjVLZu3fvRKlSpcScOXOEEEIcPXpUABA7duxQulwh/rdf1qxZI5X99ttvAoA4fPiwVBYSEiIqVaokvfbx8RHDhw+XW1aHDh1EQECA9PrT/aloOydNmiT3PgYEBAhLS0vx6tUrqWz58uXC2NhYOoY+Xvf9+/eFtra2ePTokdxymzVrJsaNG6dwm/ft2ydKlCgh4uPjpbKDBw/KxafK8SLE//bzixcvlNZp06aN+O6776TXPj4+ws3NTWRmZkplY8aMEW5ubipvU2hoqDAzM5OmmZiYiLVr1+YYqzLXnlzL9S+/cjoH8vub8os3PRMRFXOOjo4wMTGRXtva2kJbWxtaWlpyZY8fPwYAREVFwcHBAQ4ODtL0ypUrw9zcHFFRUahTpw6ioqLwn//8R249np6eOHr0KIAP91vcvXsX/fr1w4ABA6Q66enpMDMzy/e2HDp0CCEhIbh58yZSUlKQnp6Ot2/f4vXr1zA0NAQAlChRAnXq1JHmcXV1lWKvW7cuLl++jIiICLkWhYyMjGzLyYu7d+/i/fv38PLyksp0dHRQt25dREXJ39xeu3ZtlZb5ceuDra0tAMDd3V2uLOs9U7fq1avL7RdPT0+kpqbiwYMHKFeunFzdq1evIiMjAxUrVpQrT0tLU3pvRHR0NBwcHGBnZyeV1a1bt0Biz8jIwMyZM/HHH3/g0aNHePfuHdLS0rK9z/Xr14dMJpNee3p6Yt68ecjIyMjXNo0cORL9+/fHhg0b4Ovri27dusHZ2blAtomoqGHCQERUzH3aDUImkyksy8zMLLB1Zt1XsHr1atSrV09umra2dr6WGRsbi7Zt2+Lbb7/FjBkzYGlpiZMnT6Jfv3549+6dyhf6qampmDJlCjp37pxtmr6+fr5iywsjIyOV6n38HmVdyH5a9vF7pqWlJdfnHgDev3//b0LNl9TUVGhra+P8+fPZ3mtjY+NCj2fu3LlYtGgRFi5cKN3/EhwcnOM9GJ/KzzZNnjwZX331Ffbs2YN9+/Zh0qRJ2Lx5c57uQSIqLpgwEBF9Ydzc3PDgwQM8ePBAamW4ceMGkpKSULlyZanO6dOn0adPH2m+U6dOSf+3tbVFqVKlcO/ePfj7+xdIXOfPn0dmZibmzZsntY788ccf2eqlp6fj3Llz0i/U0dHRSEpKgpvbh2Fza9WqhejoaFSoUKFA4gIAZ2dn6OrqIiIiQvrF/f379zh79myhPZ/A2toa8fHx0uuMjAxcu3YNTZo0UTqPjo4OMjIycl325cuX8ebNGxgYGAD48F4bGxvLtUJlqVmzJjIyMvD48WN4e3urFHulSpXw4MEDJCYmSq0pn95ToaurK21XXkRERKBDhw74+uuvAXy4R+fWrVvSsZwl6x6eLKdOnYKLiwu0tbXztU0AULFiRVSsWBEjRoxAr169EBoayoSBPktMGIiIiojU1FTcuXNHeh0TE4NLly7B0tISZcuWLbD1+Pr6wt3dHf7+/li4cCHS09MxaNAg+Pj4SN1phg8fjsDAQNSuXRteXl7YuHEjrl+/jvLly0vLmTJlCoYNGwYzMzO0bNkSaWlpOHfuHF68eIGRI0fmOa4KFSrg/fv3WLJkCdq1a4eIiAisWLEiWz0dHR0MHToUixcvRokSJTBkyBDUr19fSiAmTpyItm3bomzZsujatSu0tLRw+fJlXLt2DdOnT8/XPjMyMsK3336LUaNGSe/HnDlz8Pr1a/Tr1y9fy8yrpk2bYuTIkdizZw+cnZ0xf/78XB9e5ujoiMOHD8PLywt6enqwsLBQWO/du3fo168fxo8fj9jYWEyaNAlDhgyR69aWpWLFivD390efPn0wb9481KxZE0+ePMHhw4dRrVo1tGnTJts8zZs3h7OzMwICAjBnzhy8fPkS48ePB/C/1hUbGxsYGBggLCwMZcqUgb6+vkrd21xcXLB161b8/fffsLCwwPz585GYmJgtYYiLi8PIkSMxcOBAXLhwAUuWLMG8efPytU1v3rzBqFGj0LVrVzg5OeHhw4c4e/YsunTpkmu8RMURE4ZiJrcHOwkhMGnSJKxevRpJSUnw8vLC8uXL4eLiItV5/vw5hg4dir/++gtaWlro0qULFi1apJGmZKLClNcHqRW2c+fOyf1anHXRHRAQgLVr1xbYemQyGXbu3ImhQ4eiUaNG0NLSQsuWLbFkyRKpTo8ePXD37l2MHj0ab9++RZcuXfDtt99i//79Up3+/fvD0NAQc+fOxahRo2BkZAR3d/d8/+JevXp1zJ8/H7Nnz8a4cePQqFEjhISEyLVyAIChoSHGjBmDr776Co8ePYK3tzd+/vlnabqfnx92796NqVOnYvbs2dDR0YGrqyv69++fr7iyzJo1C5mZmejduzdevnyJ2rVrY//+/UovwgvaN998g8uXL6NPnz4oUaIERowYkWPrAgDMmzcPI0eOxOrVq1G6dGnExsYqrNesWTO4uLigUaNGSEtLQ69evTB58mSlyw0NDcX06dPx3Xff4dGjR7CyskL9+vXRtm1bhfW1tbWxY8cO9O/fH3Xq1EH58uUxd+5ctGvXTuomVqJECSxevBhTp07FxIkT4e3tnetQrQAwfvx43Lt3D35+fjA0NERQUBA6duyI5ORkuXp9+vTBmzdvULduXWhra2P48OEICgrK1zZpa2vj2bNn6NOnDxITE2FlZYXOnTvnOLIUUXEmE592iKQibd++fYiIiICHhwc6d+6cLWGYPXs2QkJCsG7dOjg5OWHChAm4evUqbty4IZ2UW7Vqhfj4eKxcuRLv379H3759UadOHWzatEmlGFJSUmBmZobk5GSYmpqqYzOJ8u3t27eIiYmBk5NTofRXJyruAgMDkZSUJD0PobBERESgYcOGuHPnzhd3s/D1p9dzrVPFqkq+lp3TOZDf35RfbGEoZlq1aoVWrVopnCaEwMKFCzF+/Hh06NABALB+/XrY2tpix44d6NmzJ6KiohAWFiY9HAgAlixZgtatW+PHH39EqVKlCm1biIjoy/Hnn3/C2NgYLi4uuHPnDoYPHw4vL68vLlkgKo744LbPSExMDBISEuDr6yuVmZmZoV69eoiMjAQAREZGwtzcXG7YP19fX2hpaWW7ISxLWloaUlJS5P6IiIjy4uXLlxg8eDBcXV0RGBiIOnXqYOfOnZoOi4hUwBaGz0hCQgKA/43nncXW1laalpCQABsbG7npJUqUgKWlpVTnUyEhIeyXSUT0mSrI+2Ny0qdPn2z3oxBR8cAWBsrVuHHjkJycLP09ePBA0yERERERUSFhwvAZyXqCZmJiolx5YmKiNM3Ozi7bk0PT09Px/PlzuSdwfkxPTw+mpqZyf0RFHcdzIKIvEc99pA5MGD4jTk5OsLOzw+HDh6WylJQUnD59Gp6engAAT09PJCUl4fz581KdI0eOIDMzM9vTWomKo6wn5b5+/VrDkRARFb6sc9+nT3sn+jd4D0Mxk9uDnYKDgzF9+nS4uLhIw6qWKlVKGnrVzc0NLVu2xIABA7BixQq8f/8eQ4YMQc+ePTlCEn0WtLW1YW5uLrWkGRoaSg+GIiIqCjLfZ+Za5+3bt3laphACr1+/xuPHj2Fubg5tbe38hkeUDROGYia3BzuNHj0ar169QlBQEJKSktCwYUOEhYXJjcW8ceNGDBkyBM2aNZMe3LZ48eJC3xYidcnqXvdp9zsioqLgcWru56YSSfm7RDM3N1faxZgov/jgNsozPviFiouMjAy8f/9e02EQEclp/2f7XOvs6rQrz8vV0dHJsWWB39+UX2xhIKLPlra2NpvliajIiX8Xn2sdPqmeihLe9FzIMjIycOnSJbx48ULToRARERER5YoJg5oFBwfj559/BvAhWfDx8UGtWrXg4OCA8PBwzQZHRERERJQLJgxqtnXrVlSvXh0A8NdffyEmJgY3b97EiBEj8MMPP2g4OiIiIiKinDFhULOnT59KoxXs3bsX3bp1Q8WKFfHNN9/g6tWrGo6OiIiIiChnTBjUzNbWFjdu3EBGRgbCwsLQvHlzAB8erMKbMYmIiIioqOMoSWrWt29fdO/eHfb29pDJZPD19QUAnD59Gq6urhqOjoiIiIgoZ0wY1Gzy5MmoWrUqHjx4gG7dukFPTw/Ah+Eex44dq+HoiIiIiIhyxoShEHTt2jVbWUBAgAYiISIiIiLKGyYMarB48WKV6w4bNkyNkRARERER/TtMGNRgwYIFKtWTyWRMGIiIiIioSGPCoAYxMTGaDoGIiIiIqEBwWNVC8u7dO0RHRyM9PV3ToRARERERqYwJg5q9fv0a/fr1g6GhIapUqYK4uDgAwNChQzFr1iwNR0dERERElDMmDGo2btw4XL58GeHh4dDX15fKfX198fvvv2swMiIiIiKi3PEeBjXbsWMHfv/9d9SvXx8ymUwqr1KlCu7evavByIiIiIiIcscWBjV78uQJbGxsspW/evVKLoEgIiIiIiqKmDCoWe3atbFnzx7pdVaSsGbNGnh6emoqLCIiIiIilbBLkprNnDkTrVq1wo0bN5Ceno5Fixbhxo0b+Pvvv3Hs2DFNh0dERERElCO2MKhZw4YNcenSJaSnp8Pd3R0HDhyAjY0NIiMj4eHhoenwiIiIiIhyxBaGQuDs7IzVq1drOgwiIiIiojxjwlAIMjIy8OeffyIqKgoAULlyZXTo0AElSnD3ExEREVHRxitWNbt+/Trat2+PhIQEVKpUCQAwe/ZsWFtb46+//kLVqlU1HCERERERkXK8h0HN+vfvjypVquDhw4e4cOECLly4gAcPHqBatWoICgrSdHhERERERDliC4OaXbp0CefOnYOFhYVUZmFhgRkzZqBOnToajIyIiIiIKHdsYVCzihUrIjExMVv548ePUaFCBQ1ERERERESkOiYMapCSkiL9hYSEYNiwYdi6dSsePnyIhw8fYuvWrQgODsbs2bM1HSoRERERUY7YJUkNzM3NpSc6A4AQAt27d5fKhBAAgHbt2iEjI0MjMRIRERERqYIJgxocPXpU0yEQERERERUIJgxq4OPjo+kQiIiIiIgKBBOGQvL69WvExcXh3bt3cuXVqlXTUERERERERLljwqBmT548Qd++fbFv3z6F03kPAxEREREVZRwlSc2Cg4ORlJSE06dPw8DAAGFhYVi3bh1cXFywa9cuTYdHRERERJQjtjCo2ZEjR7Bz507Url0bWlpaKFeuHJo3bw5TU1OEhISgTZs2mg6RiIiIiEgptjCo2atXr2BjYwPgwxOenzx5AgBwd3fHhQsXNBkaEREREVGumDCoWaVKlRAdHQ0AqF69OlauXIlHjx5hxYoVsLe313B0REREREQ5Y5ckNRs+fDji4+MBAJMmTULLli2xceNG6OrqYu3atZoNjoiIiIgoF0wY1Ozrr7+W/u/h4YH79+/j5s2bKFu2LKysrDQYGRERERFR7pgwFDJDQ0PUqlVL02EQEREREamECYMajBw5UuW68+fPV2MkRERERET/DhMGNbh48aJK9WQymZojISIiIiL6d5gwqMHRo0c1HQIRERERUYHgsKpERERERKQUEwYiIiIiIlKKCQMRERERESnFhOEzk5GRgQkTJsDJyQkGBgZwdnbGtGnTIISQ6gghMHHiRNjb28PAwAC+vr64ffu2BqMmIiIioqKKCcNnZvbs2Vi+fDmWLl2KqKgozJ49G3PmzMGSJUukOnPmzMHixYuxYsUKnD59GkZGRvDz88Pbt281GDkRERERFUVMGNRs3bp12LNnj/R69OjRMDc3R4MGDXD//v0CX9/ff/+NDh06oE2bNnB0dETXrl3RokULnDlzBsCH1oWFCxdi/Pjx6NChA6pVq4b169fjn3/+wY4dOwo8HiIiIiIq3pgwqNnMmTNhYGAAAIiMjMSyZcswZ84cWFlZYcSIEQW+vgYNGuDw4cO4desWAODy5cs4efIkWrVqBQCIiYlBQkICfH19pXnMzMxQr149REZGKlxmWloaUlJS5P6IiIiI6MvA5zCo2YMHD1ChQgUAwI4dO9ClSxcEBQXBy8sLjRs3LvD1jR07FikpKXB1dYW2tjYyMjIwY8YM+Pv7AwASEhIAALa2tnLz2draStM+FRISgilTphR4rERERERU9LGFQc2MjY3x7NkzAMCBAwfQvHlzAIC+vj7evHlT4Ov7448/sHHjRmzatAkXLlzAunXr8OOPP2LdunX5Xua4ceOQnJws/T148KAAIyYiIiKioowtDGrWvHlz9O/fHzVr1sStW7fQunVrAMD169fh6OhY4OsbNWoUxo4di549ewIA3N3dcf/+fYSEhCAgIAB2dnYAgMTERNjb20vzJSYmokaNGgqXqaenBz09vQKPlYiIiIiKPrYwqNmyZcvg6emJJ0+eYNu2bShZsiQA4Pz58+jVq1eBr+/169fQ0pJ/W7W1tZGZmQkAcHJygp2dHQ4fPixNT0lJwenTp+Hp6Vng8RARERFR8cYWBjUzNzfH0qVLs5Wr656Adu3aYcaMGShbtiyqVKmCixcvYv78+fjmm28AADKZDMHBwZg+fTpcXFzg5OSECRMmoFSpUujYsaNaYiIiIiKi4osJg5rExcXJvS5btmyhrHfJkiWYMGECBg0ahMePH6NUqVIYOHAgJk6cKNUZPXo0Xr16haCgICQlJaFhw4YICwuDvr5+ocRIRERERMWHTHz8CGAqMFpaWpDJZBBCQCaTISMjQ9MhFZiUlBSYmZkhOTkZpqammg6HiIioWHFf555rnasBVwt8vfz+pvxiC4OaZN0zQERERERUnPGmZyIiIiIiUootDGqwa9culeu2b99ejZEQEREREf07TBjUQNXRhj63exuIiIiI6PPDhEENeP8CEREREX0ueA9DIXr79q2mQyAiIiIiyhMmDGqWkZGBadOmoXTp0jA2Nsa9e/cAABMmTMDPP/+s4eiIiIiIiHLGhEHNZsyYgbVr12LOnDnQ1dWVyqtWrYo1a9ZoMDIiIiIiotwxYVCz9evXY9WqVfD394e2trZUXr16ddy8eVODkRERERER5Y4Jg5o9evQIFSpUyFaemZmJ9+/fayAiIiIiIiLVMWFQs8qVK+PEiRPZyrdu3YqaNWtqICIiIiIiItVxWFU1mzhxIgICAvDo0SNkZmZi+/btiI6Oxvr167F7925Nh0dERERElCO2MKhZhw4d8Ndff+HQoUMwMjLCxIkTERUVhb/++gvNmzfXdHhERERERDliC0Mh8Pb2xsGDBzUdBhERERFRnrGFgYiIiIiIlGILgxpYWFhAJpOpVPf58+dqjoaIiIiIKP+YMKjBwoULpf8/e/YM06dPh5+fHzw9PQEAkZGR2L9/PyZMmKChCImIiIiIVCMTQghNB/E569KlC5o0aYIhQ4bIlS9duhSHDh3Cjh07NBPYv5CSkgIzMzMkJyfD1NRU0+EQEREVK+7r3HOtczXgaoGvl9/flF+8h0HN9u/fj5YtW2Yrb9myJQ4dOqSBiIiIiIiIVMeEQc1KliyJnTt3ZivfuXMnSpYsqYGIiIiIiIhUx3sY1GzKlCno378/wsPDUa9ePQDA6dOnERYWhtWrV2s4OiIiIiKinDFhULPAwEC4ublh8eLF2L59OwDAzc0NJ0+elBIIIiIiIqKiiglDIahXrx42btyo6TCIiIiIiPKMCUMhyMjIwI4dOxAVFQUAqFKlCtq3bw9tbW0NR0ZERERElDMmDGp2584dtGnTBg8fPkSlSpUAACEhIXBwcMCePXvg7Oys4QiJiIiIiJTjKElqNmzYMJQvXx4PHjzAhQsXcOHCBcTFxcHJyQnDhg3TdHhERERERDliC4OaHTt2DKdOnYKlpaVUVrJkScyaNQteXl4ajIyIiIiIKHdsYVAzPT09vHz5Mlt5amoqdHV1NRAREREREZHqmDCoWdu2bREUFITTp09DCAEhBE6dOoX//Oc/aN++vabDIyIiIiLKERMGNVu8eDGcnZ3h6ekJfX196Ovrw8vLCxUqVMCiRYs0HR4RERERUY54D4OamZubY+fOnbh9+zZu3rwJ4MOD2ypUqKDhyIiIiIiIcseEoZC4uLjAxcVF02EQEREREeUJEwY1E0Jg69atOHr0KB4/fozMzEy56du3b9dQZEREREREuWPCoGbBwcFYuXIlmjRpAltbW8hkMk2HRERERESkMiYMarZhwwZs374drVu31nQoRERERER5xlGS1MzMzAzly5fXdBhERERERPnChEHNJk+ejClTpuDNmzeaDoWIiIiIKM/YJUnNunfvjt9++w02NjZwdHSEjo6O3PQLFy5oKDIiIiIiotwxYVCzgIAAnD9/Hl9//TVveiYiIiKiYocJg5rt2bMH+/fvR8OGDTUdChERERFRnvEeBjVzcHCAqamppsMgIiIiIsoXJgxqNm/ePIwePRqxsbGaDoWIiIiIKM/YJUnNvv76a7x+/RrOzs4wNDTMdtPz8+fPNRQZEREREVHumDCo2cKFCzUdAhERERFRvjFhULOAgABNh0BERERElG+8h+Ez9OjRI3z99dcoWbIkDAwM4O7ujnPnzknThRCYOHEi7O3tYWBgAF9fX9y+fVuDERMRERFRUcWE4TPz4sULeHl5QUdHB/v27cONGzcwb948WFhYSHXmzJmDxYsXY8WKFTh9+jSMjIzg5+eHt2/fajByIiIiIiqK2CXpMzN79mw4ODggNDRUKnNycpL+L4TAwoULMX78eHTo0AEAsH79etja2mLHjh3o2bNnocdMREREREUXWxjU4MqVK8jMzNTIunft2oXatWujW7dusLGxQc2aNbF69WppekxMDBISEuDr6yuVmZmZoV69eoiMjFS4zLS0NKSkpMj9EREREdGXgQmDGtSsWRNPnz4FAJQvXx7Pnj0rtHXfu3cPy5cvh4uLC/bv349vv/0Ww4YNw7p16wAACQkJAABbW1u5+WxtbaVpnwoJCYGZmZn05+DgoN6NICIiIqIigwmDGpibmyMmJgYAEBsbW6itDZmZmahVqxZmzpyJmjVrIigoCAMGDMCKFSvyvcxx48YhOTlZ+nvw4EEBRkxERERERRnvYVCDLl26wMfHB/b29pDJZKhduza0tbUV1r13716Brtve3h6VK1eWK3Nzc8O2bdsAAHZ2dgCAxMRE2NvbS3USExNRo0YNhcvU09ODnp5egcZJRERERMUDEwY1WLVqFTp37ow7d+5g2LBhGDBgAExMTApl3V5eXoiOjpYru3XrFsqVKwfgww3QdnZ2OHz4sJQgpKSk4PTp0/j2228LJUYiIiIiKj6YMKhJy5YtAQDnz5/H8OHDCy1hGDFiBBo0aICZM2eie/fuOHPmDFatWoVVq1YBAGQyGYKDgzF9+nS4uLjAyckJEyZMQKlSpdCxY8dCiZGIiIiIig8mDGr28fCmDx8+BACUKVNGbeurU6cO/vzzT4wbNw5Tp06Fk5MTFi5cCH9/f6nO6NGj8erVKwQFBSEpKQkNGzZEWFgY9PX11RYXERERERVPMiGE0HQQn7PMzExMnz4d8+bNQ2pqKgDAxMQE3333HX744QdoaRW/+85TUlJgZmaG5ORkmJqaajocIiKiYsV9nXuuda4GXC3w9fL7m/KLLQxq9sMPP+Dnn3/GrFmz4OXlBQA4efIkJk+ejLdv32LGjBkajpCIiIiISDkmDGq2bt06rFmzBu3bt5fKqlWrhtKlS2PQoEFMGIiIiIioSCt+/WGKmefPn8PV1TVbuaurK54/f66BiIiIiIiIVMeEQc2qV6+OpUuXZitfunQpqlevroGIiIiIiIhUxy5JajZnzhy0adMGhw4dgqenJwAgMjISDx48wN69ezUcHRERERFRztjCoGY+Pj64desWOnXqhKSkJCQlJaFz586Ijo6Gt7e3psMjIiIiIsoRWxgKQalSpXhzMxEREREVS2xhICIiIiIipZgwEBERERGRUkwYiIiIiIhIKSYMaiSEQFxcHN6+favpUIiIiIiI8oUJgxoJIVChQgU8ePBA06EQEREREeULEwY10tLSgouLC549e6bpUIiIiIiI8oUJg5rNmjULo0aNwrVr1zQdChERERFRnvE5DGrWp08fvH79GtWrV4euri4MDAzkpj9//lxDkRERERER5Y4Jg5otXLhQ0yEQEREREeUbEwY1CwgI0HQIRERERET5xnsYCsHdu3cxfvx49OrVC48fPwYA7Nu3D9evX9dwZEREREREOWPCoGbHjh2Du7s7Tp8+je3btyM1NRUAcPnyZUyaNEnD0RERERER5YwJg5qNHTsW06dPx8GDB6GrqyuVN23aFKdOndJgZEREREREuWPCoGZXr15Fp06dspXb2Njg6dOnGoiIiIiIiEh1TBjUzNzcHPHx8dnKL168iNKlS2sgIiIiIiIi1TFhULOePXtizJgxSEhIgEwmQ2ZmJiIiIvD999+jT58+mg6PiIiIiChHTBjUbObMmXB1dYWDgwNSU1NRuXJlNGrUCA0aNMD48eM1HR4RERERUY74HAY109XVxerVqzFhwgRcu3YNqampqFmzJlxcXDQdGhERERFRrpgwFJKyZcvCwcEBACCTyTQcDRERERGRatglqRD8/PPPqFq1KvT19aGvr4+qVatizZo1mg6LiIiIiChXbGFQs4kTJ2L+/PkYOnQoPD09AQCRkZEYMWIE4uLiMHXqVA1HSERERESkHBMGNVu+fDlWr16NXr16SWXt27dHtWrVMHToUCYMRERERFSksUuSmr1//x61a9fOVu7h4YH09HQNREREREREpDomDGrWu3dvLF++PFv5qlWr4O/vr4GIiIiIiIhUxy5JajBy5Ejp/zKZDGvWrMGBAwdQv359AMDp06cRFxfHB7cRERERUZHHhEENLl68KPfaw8MDAHD37l0AgJWVFaysrHD9+vVCj42IiIiIKC+YMKjB0aNHNR0CEREREVGB4D0MRERERESkFFsY1Ozt27dYsmQJjh49isePHyMzM1Nu+oULFzQUGRERERFR7pgwqFm/fv1w4MABdO3aFXXr1oVMJtN0SEREREREKmPCoGa7d+/G3r174eXlpelQiIiIiIjyjPcwqFnp0qVhYmKi6TCIiIiIiPKFCYOazZs3D2PGjMH9+/c1HQoRERERUZ6xS5Ka1a5dG2/fvkX58uVhaGgIHR0duenPnz/XUGRERERERLljwqBmvXr1wqNHjzBz5kzY2trypmciIiIiKlaYMKjZ33//jcjISFSvXl3ToRARERER5RnvYVAzV1dXvHnzRtNhEBERERHlCxMGNZs1axa+++47hIeH49mzZ0hJSZH7IyIiIiIqypgwqFnLli0RGRmJZs2awcbGBhYWFrCwsIC5uTksLCzUvv5Zs2ZBJpMhODhYKnv79i0GDx6MkiVLwtjYGF26dEFiYqLaYyEiIiKi4of3MKjZ0aNHNbbus2fPYuXKlahWrZpc+YgRI7Bnzx5s2bIFZmZmGDJkCDp37oyIiAgNRUpERERERRUTBjXz8fHRyHpTU1Ph7++P1atXY/r06VJ5cnIyfv75Z2zatAlNmzYFAISGhsLNzQ2nTp1C/fr1NRIvERERERVNTBjU7Pjx4zlOb9SokVrWO3jwYLRp0wa+vr5yCcP58+fx/v17+Pr6SmWurq4oW7YsIiMjFSYMaWlpSEtLk17z3gsiIiKiLwcTBjVr3LhxtrKPn8WQkZFR4OvcvHkzLly4gLNnz2ablpCQAF1dXZibm8uV29raIiEhQeHyQkJCMGXKlAKPk4iIiIiKPt70rGYvXryQ+3v8+DHCwsJQp04dHDhwoMDX9+DBAwwfPhwbN26Evr5+gSxz3LhxSE5Olv4ePHhQIMslIiIioqKPLQxqZmZmlq2sefPm0NXVxciRI3H+/PkCXd/58+fx+PFj1KpVSyrLyMjA8ePHsXTpUuzfvx/v3r1DUlKSXCtDYmIi7OzsFC5TT08Penp6BRonERERERUPTBg0xNbWFtHR0QW+3GbNmuHq1atyZX379oWrqyvGjBkDBwcH6Ojo4PDhw+jSpQsAIDo6GnFxcfD09CzweIiIiL4kUa5uuVcax8svKl54xKrZlStX5F4LIRAfH49Zs2ahRo0aBb4+ExMTVK1aVa7MyMgIJUuWlMr79euHkSNHwtLSEqamphg6dCg8PT05QhIRERERZcOEQc1q1KgBmUwGIYRcef369fHLL79oJKYFCxZAS0sLXbp0QVpaGvz8/PDTTz9pJBYiIiIiKtqYMKhZTEyM3GstLS1YW1sX2A3JqggPD5d7ra+vj2XLlmHZsmWFFgMRERERFU9MGNSsXLlymg6BiIiIiCjfmDAUgsOHD+Pw4cN4/PgxMjMz5aZpqlsSEREREZEqmDCo2ZQpUzB16lTUrl0b9vb2cg9tIyIiIiIq6pgwqNmKFSuwdu1a9O7dW9OhEBERERHlGZ/0rGbv3r1DgwYNNB0GEREREVG+MGFQs/79+2PTpk2aDoOIiIiIKF/YJUnN3r59i1WrVuHQoUOoVq0adHR05KbPnz9fQ5EREREREeWOCYOaXblyRXqi87Vr1+Sm8QZoIiIiIirqmDCo2dGjRzUdAhERERFRvvEeBiIiIiIiUootDEREREQqiHJ1y7VO93G8tKLPD1sYiIiIiIhIKSYMRERERESkFBMGIiIiIiJSigkDEREREREpxTtziIiI6Iunyg3NRF8qtjAQEREREZFSTBiIiIiIiEgpdkkiIiKiLx6fn0CkHFsYiIiIiIhIKSYMRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVJMGIiIiIiISCkmDEREREREpBQTBiIiIiIiUqqEpgMgIiIi+lSUq1uuddxuRhVCJETEFgYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBERERERErxpmciIiIqcrqPy/0S5WohxEFEbGEgIiIiIqIcMGH4zISEhKBOnTowMTGBjY0NOnbsiOjoaLk6b9++xeDBg1GyZEkYGxujS5cuSExM1FDERERERFSUMWH4zBw7dgyDBw/GqVOncPDgQbx//x4tWrTAq1evpDojRozAX3/9hS1btuDYsWP4559/0LlzZw1GTURERERFFe9h+MyEhYXJvV67di1sbGxw/vx5NGrUCMnJyfj555+xadMmNG3aFAAQGhoKNzc3nDp1CvXr19dE2ERERERURLGF4TOXnJwMALC0tAQAnD9/Hu/fv4evr69Ux9XVFWXLlkVkZKTCZaSlpSElJUXuj4iIiIi+DEwYPmOZmZkIDg6Gl5cXqlatCgBISEiArq4uzM3N5era2toiISFB4XJCQkJgZmYm/Tk4OKg7dCIiIiIqIpgwfMYGDx6Ma9euYfPmzf9qOePGjUNycrL09+DBgwKKkIiIiIiKOt7D8JkaMmQIdu/ejePHj6NMmTJSuZ2dHd69e4ekpCS5VobExETY2dkpXJaenh709PTUHTIRERERFUFsYfjMCCEwZMgQ/Pnnnzhy5AicnJzkpnt4eEBHRweHDx+WyqKjoxEXFwdPT8/CDpeIiIiIiji2MHxmBg8ejE2bNmHnzp0wMTGR7kswMzODgYEBzMzM0K9fP4wcORKWlpYwNTXF0KFD4enpyRGSiIiIiCgbJgyfmeXLlwMAGjduLFceGhqKwMBAAMCCBQugpaWFLl26IC0tDX5+fvjpp58KOVIiIiL1i3J1U63iOF4SESnDT8dnRgiRax19fX0sW7YMy5YtK4SIiIiIiKg44z0MRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiU4k3PRERE9NnqztGPiP41tjAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMRERERESnFoQOIiIioUEW5uuVeiaMbERUZbGEgIiIiIiKlmL4TERFRoeKzEYiKF7YwEBERERGRUkwYiIiIiIhIKbYJEhERkUo3IrvdjMq1jvs694IIh4iKELYwEBERERGRUkwYiIiIiIhIKXZJIiIiomKJ3Z+ICgdbGIiIiIiISCkmDEREREREpBS7JBEREZFqD1NjFyCiLxJbGIiIiIiISCkmDEREREREpBS7JBERERWwgnoIGhFRUcAWBiIiIiIiUootDFT0TDZToU6y+uMgIsonVW4gvlpA62JrBhGpG1sYiIiIiIhIKSYMRERERESkFLskERHRF8FdhWcIXA0oqI5Chacwuz8R0ZeJLQxERERERKQUEwYiIiIiIlKKXZKIiEhjilo3oaIWjyojIEGFLklERP8GWxiIiIiIiEgpJgxERERERKQU2zGJiIiKKFVGQFKFKl2tiIiUYQsDEREREREpxYSBiIiIiIiUYpckIiINcxy7J9c6sbPaFLvlfK4KqnsPuwkRUXHBFgYiIiIiIlKKLQxERArwV/acqfJ8gIK6YZeIiDSLLQxERERERKQUEwYiIiIiIlKK7cVEuSiOXVOKY8xUvBRmdyN2fyIi0iy2MHzBli1bBkdHR+jr66NevXo4c+aMpkMiIiIioiKGCcMX6vfff8fIkSMxadIkXLhwAdWrV4efnx8eP36s6dCIiIiIqAiRCSGEpoOgwlevXj3UqVMHS5cuBQBkZmbCwcEBQ4cOxdixY3OcNyUlBWZmZkhOToapqWnBBzfZTIU6yblWUaVbTkEpzO49RW67CvH9itX/qkDWpRJVtkul5agQTxE75k3ccj4HEJH6XQ24WuDLVPv3N3222MLwBXr37h3Onz8PX19fqUxLSwu+vr6IjIzUYGREREREVNTwLrEv0NOnT5GRkQFbW1u5cltbW9y8eTNb/bS0NKSlpUmvk5M//NKZkpKingDTVGj0UmHdmWmvCyAY1ahtXyhQ5LarEN+vFFnBrEslqmyXKorYPlRFxpuMAlkOEeWfOr5XspbJziWUV0wYKFchISGYMmVKtnIHBwcNRPP/ZhVQd5ECYrZQ0xGoR4FtVwG9XyotpYgdGwUWT1HbLiJSK7Nv1feZf/nyJczMeE4h1TFh+AJZWVlBW1sbiYmJcuWJiYmws7PLVn/cuHEYOXKk9DozMxPPnz9HyZIlIZPJCjS2lJQUODg44MGDB+xfqUbcz4WD+7lwcD8XDu7nwqOufS2EwMuXL1GqVKkCWyZ9GZgwfIF0dXXh4eGBw4cPo2PHjgA+JAGHDx/GkCFDstXX09ODnp6eXJm5ublaYzQ1NeUXUiHgfi4c3M+Fg/u5cHA/Fx517Gu2LFB+MGH4Qo0cORIBAQGoXbs26tati4ULF+LVq1fo27evpkMjIiIioiKECcMXqkePHnjy5AkmTpyIhIQE1KhRA2FhYdluhCYiIiKiLxsThi/YkCFDFHZB0iQ9PT1MmjQpWxcoKljcz4WD+7lwcD8XDu7nwsN9TUUNH9xGRERERERK8cFtRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJAhW7ZsmVwdHSEvr4+6tWrhzNnzuRYf8uWLXB1dYW+vj7c3d2xd+/eQoq0eMvLfl69ejW8vb1hYWEBCwsL+Pr65vq+0Ad5PZ6zbN68GTKZTHp4IuUsr/s5KSkJgwcPhr29PfT09FCxYkWeO1SQ1/28cOFCVKpUCQYGBnBwcMCIESPw9u3bQoq2eDp+/DjatWuHUqVKQSaTYceOHbnOEx4ejlq1akFPTw8VKlTA2rVr1R4nkRxBVIg2b94sdHV1xS+//CKuX78uBgwYIMzNzUViYqLC+hEREUJbW1vMmTNH3LhxQ4wfP17o6OiIq1evFnLkxUte9/NXX30lli1bJi5evCiioqJEYGCgMDMzEw8fPizkyIuXvO7nLDExMaJ06dLC29tbdOjQoXCCLcbyup/T0tJE7dq1RevWrcXJkydFTEyMCA8PF5cuXSrkyIuXvO7njRs3Cj09PbFx40YRExMj9u/fL+zt7cWIESMKOfLiZe/eveKHH34Q27dvFwDEn3/+mWP9e/fuCUNDQzFy5Ehx48YNsWTJEqGtrS3CwsIKJ2AiIQQTBipUdevWFYMHD5ZeZ2RkiFKlSomQkBCF9bt37y7atGkjV1avXj0xcOBAtcZZ3OV1P38qPT1dmJiYiHXr1qkrxM9CfvZzenq6aNCggVizZo0ICAhgwqCCvO7n5cuXi/Lly4t3794VVoifhbzu58GDB4umTZvKlY0cOVJ4eXmpNc7PiSoJw+jRo0WVKlXkynr06CH8/PzUGBmRPHZJokLz7t07nD9/Hr6+vlKZlpYWfH19ERkZqXCeyMhIufoA4Ofnp7Q+5W8/f+r169d4//49LC0t1RVmsZff/Tx16lTY2NigX79+hRFmsZef/bxr1y54enpi8ODBsLW1RdWqVTFz5kxkZGQUVtjFTn72c4MGDXD+/Hmp29K9e/ewd+9etG7dulBi/lLwe5CKAj7pmQrN06dPkZGRAVtbW7lyW1tb3Lx5U+E8CQkJCusnJCSoLc7iLj/7+VNjxoxBqVKlsn1J0f/kZz+fPHkSP//8My5dulQIEX4e8rOf7927hyNHjsDf3x979+7FnTt3MGjQILx//x6TJk0qjLCLnfzs56+++gpPnz5Fw4YNIYRAeno6/vOf/+C///1vYYT8xVD2PZiSkoI3b97AwMBAQ5HRl4QtDEQkZ9asWdi8eTP+/PNP6Ovrazqcz8bLly/Ru3dvrF69GlZWVpoO57OWmZkJGxsbrFq1Ch4eHujRowd++OEHrFixQtOhfVbCw8Mxc+ZM/PTTT7hw4QK2b9+OPXv2YNq0aZoOjYgKGFsYqNBYWVlBW1sbiYmJcuWJiYmws7NTOI+dnV2e6lP+9nOWH3/8EbNmzcKhQ4dQrVo1dYZZ7OV1P9+9exexsbFo166dVJaZmQkAKFGiBKKjo+Hs7KzeoIuh/BzP9vb20NHRgba2tlTm5uaGhIQEvHv3Drq6umqNuTjKz36eMGECevfujf79+wMA3N3d8erVKwQFBeGHH36AlhZ/kywIyr4HTU1N2bpAhYafZio0urq68PDwwOHDh6WyzMxMHD58GJ6engrn8fT0lKsPAAcPHlRan/K3nwFgzpw5mDZtGsLCwlC7du3CCLVYy+t+dnV1xdWrV3Hp0iXpr3379mjSpAkuXboEBweHwgy/2MjP8ezl5YU7d+5ICRkA3Lp1C/b29kwWlMjPfn79+nW2pCArSRNCqC/YLwy/B6lI0PRd1/Rl2bx5s9DT0xNr164VN27cEEFBQcLc3FwkJCQIIYTo3bu3GDt2rFQ/IiJClChRQvz4448iKipKTJo0icOqqiCv+3nWrFlCV1dXbN26VcTHx0t/L1++1NQmFAt53c+f4ihJqsnrfo6LixMmJiZiyJAhIjo6WuzevVvY2NiI6dOna2oTioW87udJkyYJExMT8dtvv4l79+6JAwcOCGdnZ9G9e3dNbUKx8PLlS3Hx4kVx8eJFAUDMnz9fXLx4Udy/f18IIcTYsWNF7969pfpZw6qOGjVKREVFiWXLlnFYVSp0TBio0C1ZskSULVtW6Orqirp164pTp05J03x8fERAQIBc/T/++ENUrFhR6OrqiipVqog9e/YUcsTFU172c7ly5QSAbH+TJk0q/MCLmbwezx9jwqC6vO7nv//+W9SrV0/o6emJ8uXLixkzZoj09PRCjrr4yct+fv/+vZg8ebJwdnYW/9fOvYVE1e5xHP+NlY1OhGl2UMyaEhIcNZu6iEIZLwQx0i6KiSQJqYjAMrECbS4isCww8EKSMsK0iCgMoQPW1dhBS0NpKLMGMaLD5EUHPNDMvtjsYfvm0l7qVTf7+7lb83+e9fx5LgZ+PGsts9kciIuLC+zduzcwMDAw+Y3/D7l///6Y/7f/2dsdO3YE0tPTf5qTmpoaCA0NDVit1kBdXd2k943/b6ZAgHNDAAAAAGPjHQYAAAAAhggMAAAAAAwRGAAAAAAYIjAAAAAAMERgAAAAAGCIwAAAAADAEIEBAAAAgCECAwAAAABDBAYAAAAAhggMAIBfNjIyMtUtAAAmGYEBAKaxW7duaf369YqIiFBUVJRycnLU29sbrPf398vpdCoyMlIWi0V2u12PHj0K1m/evKk1a9bIbDZr/vz5ysvLC9ZMJpNu3Lgxar2IiAhduHBBkuT1emUymXTlyhWlp6fLbDbr0qVL8vl8cjqdio2NVXh4uGw2mxobG0fdx+/36+TJk1qxYoVmz56tJUuW6Pjx45Ikh8Ohffv2jRr/8eNHhYaGqqWl5U9sGwDgDyIwAMA09u3bNxUXF6u9vV0tLS0KCQlRXl6e/H6/vn79qvT0dL19+1ZNTU169uyZSktL5ff7JUnNzc3Ky8tTdna2Ojo61NLSorVr1/7tHg4fPqyioiJ5PB5lZWVpcHBQq1evVnNzs7q7u7Vr1y7l5+fr8ePHwTlHjhxRRUWFysvL9fz5czU0NGjhwoWSpMLCQjU0NGhoaCg4vr6+XrGxsXI4HL+5YwCAP80UCAQCU90EAODXfPr0SdHR0erq6lJra6tKSkrk9XoVGRn509h169bJarWqvr5+zHuZTCZdv35dubm5wd8iIiJUVVWlgoICeb1eLVu2TFVVVSoqKhq3r5ycHK1cuVKnTp3Sly9fFB0drerqahUWFv40dnBwUDExMaqpqdGWLVskSSkpKdq8ebNcLtff2A0AwGTghAEAprGenh45nU5ZrVbNnTtXS5culST19fWps7NTq1atGjMsSFJnZ6cyMzN/uwe73T7q+sePHzp27JhsNpsiIyM1Z84c3b59W319fZIkj8ejoaEhw7XNZrPy8/N1/vx5SdLTp0/V3d2tgoKC3+4VAPDnzZzqBgAAxjZu3Kj4+HjV1tYqJiZGfr9fSUlJGh4eVlhY2LhzJ6qbTCb99ZB5rJeaLRbLqOvKykqdOXNGVVVVstlsslgs2r9/v4aHh39pXenfjyWlpqaqv79fdXV1cjgcio+Pn3AeAGDyccIAANOUz+fTixcvVFZWpszMTCUmJmpgYCBYT05OVmdnpz5//jzm/OTk5HFfIo6Ojta7d++C1z09Pfr+/fuEfbndbm3atEnbt29XSkqKrFarXr58GawnJCQoLCxs3LVtNpvsdrtqa2vV0NCgnTt3TrguAGBqEBgAYJqaN2+eoqKidPbsWb169Ur37t1TcXFxsO50OrVo0SLl5ubK7Xbr9evXunbtmh48eCBJcrlcamxslMvlksfjUVdXl06cOBGc73A4VF1drY6ODrW3t2vPnj2aNWvWhH0lJCTo7t27am1tlcfj0e7du/X+/ftg3Ww269ChQyotLdXFixfV29urhw8f6ty5c6PuU1hYqIqKCgUCgVFfbwIATC8EBgCYpkJCQnT58mU9efJESUlJOnDggCorK4P10NBQ3blzRwsWLFB2drZsNpsqKio0Y8YMSVJGRoauXr2qpqYmpaamyuFwjPqS0enTpxUXF6cNGzZo27ZtKikpUXh4+IR9lZWVKS0tTVlZWcrIyAiGlv9WXl6ugwcP6ujRo0pMTNTWrVv14cOHUWOcTqdmzpwpp9Mps9n8GzsFAPgn8ZUkAMCU8Hq9Wr58udra2pSWljbV7QAADBAYAACTamRkRD6fTyUlJXrz5o3cbvdUtwQAGAePJAEAJpXb7dbixYvV1tammpqaqW4HADABThgAAAAAGOKEAQAAAIAhAgMAAAAAQwQGAAAAAIYIDAAAAAAMERgAAAAAGCIwAAAAADBEYAAAAABgiMAAAAAAwBCBAQAAAIChfwHkjzRZw4CAhQAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "path_model_label=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/instance/instance_pred_WNet.tif\"\n", + "res = evl.evaluate_model_performance(imread(path_true_labels), imread(path_model_label),visualize=False, return_graphical_summary=True,plot_according_to_gt_label=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAHHCAYAAAASz98lAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACFbElEQVR4nOzdd1gU1/s28HtBehVpFgRBlCI2bKiIHWvshRAFo9HELrF+YwO7xhZ7C/Zo7MaCLWJBVCxYAFERRA32igUEzvuHL/NzhYUFkV3l/lwXl+6ZM2eemZ2dnWfnzBmZEEKAiIiIiIgoGxqqDoCIiIiIiNQXEwYiIiIiIlKICQMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCEmDEREREREpFCBJAwTJ06ETCaTK7Ozs4O/v39BNJ+jhIQEyGQyrF69Wirz9/eHoaHhF192JplMhokTJxba8vIjIiICdevWhYGBAWQyGSIjI/Pchp2dHdq0aVPwwZHKhISEoGrVqtDV1YVMJsPz58/z3IZMJsPAgQMLPrhvyJc4RjRs2BANGzYs0DaVsXr1ashkMpw7d67Ql/0tatiwISpVqqTqML64zP0mISEhz/Nmd45RUEJDQyGTyRAaGprneb/EuYaqPtefw9/fH3Z2dvme/1s4puT3GP8l1v1LfV7U6grDvn371PbEW51jy8379+/RpUsXPH36FHPnzsW6detga2ubbd3o6GhMnDgxXwd1+ro8efIEXbt2hZ6eHhYtWoR169bBwMAg27qnTp3CxIkT85VQUP4Vlc/j13x8VcZ///2HiRMn5uuHGsqenZ3dN73PfIsWL14s9+OuOvuaYi0sxb5Uw7GxsdDQyFs+sm/fPixatChPBwFbW1u8ffsWWlpaeYwwb3KK7e3btyhW7Ittys8WFxeH27dvY8WKFejTp0+OdaOjoxEYGIiGDRt+1i8GpP4iIiLw6tUrTJo0CU2bNs2x7qlTpxAYGAh/f3+YmpoWToDfkPweI3L6PB48eLCAolO9/Bz7vyb//fcfAgMDYWdnh6pVq6o6HCKVWLx4MczNzQul98nn+ppiLSxf7CxXR0fnSzUNAEhLS0NGRga0tbWhq6v7RZeVG1UvPzcPHz4EAJ7ofQEZGRlITU1V+30gO9wvCs+X2D+0tbULvE0iIqLs5LlL0smTJ1GzZk3o6urCwcEBy5Yty7bep/cwvH//HoGBgXB0dISuri5KlCiB+vXr49ChQwA+9IFbtGgRgA99wTL/gP+7T+H333/HvHnz4ODgAB0dHURHR2d7D0OmW7duwdvbGwYGBihVqhSCgoIghJCmK+q7+GmbOcWWWfbpL2MXL15Ey5YtYWxsDENDQzRp0gSnT5+Wq5PZdy0sLAwBAQGwsLCAgYEBOnTogEePHmX/Bnzi33//haenJwwMDGBqaop27dohJiZGmu7v7w8vLy8AQJcuXSCTyRT2j1y9ejW6dOkCAGjUqJG0np9un5MnT6JWrVrQ1dWFvb091q5dm6Wt58+fY+jQobCxsYGOjg7Kly+PGTNmICMjI9d12rVrF1q3bo1SpUpBR0cHDg4OmDRpEtLT07PUPXPmDFq1aoXixYvDwMAAlStXxvz58+XqXLt2DV27doWFhQX09PRQsWJF/Pbbb3LbKLurKdn1A8zsr79hwwa4urpCR0cHISEhAIDff/8ddevWRYkSJaCnpwd3d3ds3bo123Vcv349atWqBX19fRQvXhwNGjSQfjH28/ODubk53r9/n2W+5s2bo2LFijlvQABbtmyBu7s79PT0YG5ujh9++AH37t2Tpjds2BB+fn4AgJo1a0Imkyn8JWXixIkYMWIEAKBcuXLSfvFpN5mdO3eiUqVK0NHRgaurq7RdPnbv3j38+OOPsLKykur9+eefua4PAAQHB6Nx48awtLSEjo4OXFxcsGTJkiz1zp07B29vb5ibm0NPTw/lypXDjz/+KFdn06ZNcHd3h5GREYyNjeHm5pZlv7l16xa6dOkCMzMz6Ovro06dOti7d2+W5b179w4TJ05EhQoVoKuri5IlS6Jjx46Ii4uT6nx6jLh9+zb69++PihUrQk9PDyVKlECXLl3ktmlun8fs+jo/fPgQvXv3hpWVFXR1dVGlShWsWbNGrs7Hx9Ply5dLx9OaNWsiIiJC4fb/1Js3b9CvXz+UKFECxsbG6NmzJ549e5al3v79+6VjlJGREVq3bo2oqChpek7H1+rVq6Njx45y7bm5uUEmk+Hy5ctS2ebNmyGTyeSOfcruaykpKZgwYQLKly8PHR0d2NjYYOTIkUhJSZGrl/nZV2Y//1hoaChq1qwJAOjVq5e0fp9+Z0VHR6NRo0bQ19dH6dKlMXPmzHzHmp3MeyUuX74MLy8v6Ovro3z58tIx6tixY6hdu7Z0jDx8+HCWNpT5XgOAqKgoNG7cGHp6eihTpgwmT56s8Nif2/6hrNzOMfLixIkT6NKlC8qWLStt52HDhuHt27fZ1s/tXAP48OPSvHnz4OrqCl1dXVhZWaFfv37ZfmY+tWDBAri6ukrfFzVq1MDGjRtznCc1NRXjx4+Hu7s7TExMYGBgAE9PTxw9elSuXl6PB5n7v66uLipVqoQdO3bkGj/w4ZwwKioKx44dkz4Dnx6/UlJSlDoX+px9JnP//3jfDA4OlvtOUybW3ChzjP9YQR1PFTl06BDq168PU1NTGBoaomLFivjf//6Xp3XK0xWGK1euoHnz5rCwsMDEiRORlpaGCRMmwMrKKtd5J06ciGnTpqFPnz6oVasWXr58iXPnzuHChQto1qwZ+vXrh//++w+HDh3CunXrsm0jODgY7969Q9++faGjowMzMzOFB6H09HS0aNECderUwcyZMxESEoIJEyYgLS0NQUFBeVltpWL7WFRUFDw9PWFsbIyRI0dCS0sLy5YtQ8OGDaWD8scGDRqE4sWLY8KECUhISMC8efMwcOBAbN68OcflHD58GC1btoS9vT0mTpyIt2/fYsGCBahXrx4uXLgAOzs79OvXD6VLl8bUqVMxePBg1KxZU+H71aBBAwwePBh//PEH/ve//8HZ2RkApH8B4ObNm+jcuTN69+4NPz8//Pnnn/D394e7uztcXV0BfNjxvby8cO/ePfTr1w9ly5bFqVOnMGbMGCQlJWHevHk5rtfq1athaGiIgIAAGBoa4t9//8X48ePx8uVLzJo1S6p36NAhtGnTBiVLlsSQIUNgbW2NmJgY7NmzB0OGDAHw4eDg6ekJLS0t9O3bF3Z2doiLi8M///yDKVOm5BiHIv/++y/+/vtvDBw4EObm5lKyMX/+fHz33Xfw9fVFamoqNm3ahC5dumDPnj1o3bq1NH9gYCAmTpyIunXrIigoCNra2jhz5gz+/fdfNG/eHD169MDatWtx4MABuZvM79+/j3///RcTJkzIdfv16tULNWvWxLRp0/DgwQPMnz8fYWFhuHjxIkxNTfHbb7+hYsWKWL58OYKCglCuXDk4ODhk217Hjh1x/fp1/PXXX5g7dy7Mzc0BABYWFlKdkydPYvv27ejfvz+MjIzwxx9/oFOnTkhMTESJEiUAAA8ePECdOnWkEy8LCwvs378fvXv3xsuXLzF06NAc12vJkiVwdXXFd999h2LFiuGff/5B//79kZGRgQEDBgD4cMKceYwaPXo0TE1NkZCQgO3bt0vtHDp0CD4+PmjSpAlmzJgBAIiJiUFYWJi03zx48AB169bFmzdvMHjwYJQoUQJr1qzBd999h61bt6JDhw4APhxn2rRpgyNHjqB79+4YMmQIXr16hUOHDuHq1asKt2lERAROnTqF7t27o0yZMkhISMCSJUvQsGFDREdHQ19fX6nP48fevn2Lhg0b4ubNmxg4cCDKlSuHLVu2wN/fH8+fP5fWLdPGjRvx6tUr9OvXDzKZDDNnzkTHjh1x69Ytpbp4Dhw4EKamppg4cSJiY2OxZMkS3L59W/ohBgDWrVsHPz8/eHt7Y8aMGXjz5g2WLFmC+vXr4+LFi9IxStHx1dPTE3/99Zf0+unTp4iKioKGhgZOnDiBypUrA/hwgmdhYSFtG2X3tYyMDHz33Xc4efIk+vbtC2dnZ1y5cgVz587F9evXsXPnTrl4lNnPP+Xs7IygoCCMHz8effv2haenJwCgbt26Up1nz56hRYsW6NixI7p27YqtW7di1KhRcHNzQ8uWLfMVa3aePXuGNm3aoHv37ujSpQuWLFmC7t27Y8OGDRg6dCh+/vlnfP/995g1axY6d+6MO3fuwMjICIDy32v3799Ho0aNkJaWhtGjR8PAwADLly+Hnp5elniU2T+Ulds5Rl5s2bIFb968wS+//IISJUrg7NmzWLBgAe7evYstW7bI1VX2XKNfv37SsXnw4MGIj4/HwoULcfHiRYSFhSn8zK1YsQKDBw9G586dMWTIELx79w6XL1/GmTNn8P333ytch5cvX2LlypXw8fHBTz/9hFevXmHVqlXw9vbG2bNns3SNU+Z4cPDgQXTq1AkuLi6YNm0anjx5gl69eqFMmTK5btN58+Zh0KBBMDQ0lH6w+/RcRJlzoc/ZZ+7duyf9+DJmzBgYGBhg5cqVWXrEKBNrbpQ5xn+soI6n2YmKikKbNm1QuXJlBAUFQUdHBzdv3kRYWFie1gkiD9q3by90dXXF7du3pbLo6GihqakpPm3K1tZW+Pn5Sa+rVKkiWrdunWP7AwYMyNKOEELEx8cLAMLY2Fg8fPgw22nBwcFSmZ+fnwAgBg0aJJVlZGSI1q1bC21tbfHo0SMhhBBHjx4VAMTRo0dzbVNRbEIIAUBMmDBBet2+fXuhra0t4uLipLL//vtPGBkZiQYNGkhlwcHBAoBo2rSpyMjIkMqHDRsmNDU1xfPnz7NdXqaqVasKS0tL8eTJE6ns0qVLQkNDQ/Ts2VMqy1zPLVu25NieEEJs2bIl220ixIf3FIA4fvy4VPbw4UOho6Mjfv31V6ls0qRJwsDAQFy/fl1u/tGjRwtNTU2RmJiYYwxv3rzJUtavXz+hr68v3r17J4QQIi0tTZQrV07Y2tqKZ8+eydX9eFs2aNBAGBkZye2zn9bx8/MTtra2WZY5YcKELO85AKGhoSGioqJyjTs1NVVUqlRJNG7cWCq7ceOG0NDQEB06dBDp6enZxpSeni7KlCkjunXrJjd9zpw5QiaTiVu3bmVZ9sfLtLS0FJUqVRJv376Vyvfs2SMAiPHjx0tlmftfRESEwvYyzZo1SwAQ8fHxWaYBENra2uLmzZtS2aVLlwQAsWDBAqmsd+/eomTJkuLx48dy83fv3l2YmJhk+75/LLvp3t7ewt7eXnq9Y8eOXNdpyJAhwtjYWKSlpSmsM3ToUAFAnDhxQip79eqVKFeunLCzs5Peuz///FMAEHPmzMnSxsf72KfHiOzWJTw8XAAQa9eulcpy+jx6eXkJLy8v6fW8efMEALF+/XqpLDU1VXh4eAhDQ0Px8uVLIcT/Hd9KlCghnj59KtXdtWuXACD++ecfhdtFiP/bb9zd3UVqaqpUPnPmTAFA7Nq1SwjxYXuZmpqKn376SW7++/fvCxMTE7lyRcfXzPWPjo4WQgixe/duoaOjI7777ju5z0flypVFhw4dpNfK7mvr1q0TGhoacu+zEEIsXbpUABBhYWFSmbL7eXYiIiKyfKdk8vLyyvK+p6SkCGtra9GpUyepLC+xZidzORs3bpTKrl27Jh3TTp8+LZUfOHAgS7zKfq9lfnbOnDkjlT18+FCYmJjIHUPysn9kdyz+lDLnGNnJ7jwgu8/ntGnThEwmk/suUfZc48SJEwKA2LBhg1ybISEhWco//Vy3a9dOuLq65nm90tLSREpKilzZs2fPhJWVlfjxxx+lsrwcD6pWrSpKliwpd25y8OBBASDb79BPubq6yq1bJmXPhfKyz2Rn0KBBQiaTiYsXL0plT548EWZmZlm+3xTFqkh+j/Ff4nj66edl7ty5AoC0P+aX0l2S0tPTceDAAbRv3x5ly5aVyp2dneHt7Z3r/KampoiKisKNGzeUXWQWnTp1kvtVMzcfD/WY+UtTampqtpdaC0p6ejoOHjyI9u3bw97eXiovWbIkvv/+e5w8eRIvX76Um6dv375yXV88PT2Rnp6O27dvK1xOUlISIiMj4e/vDzMzM6m8cuXKaNasGfbt21eAa/V/XFxcpF/IgA+/MlesWBG3bt2SyrZs2QJPT08UL14cjx8/lv6aNm2K9PR0HD9+PMdlfPxL1KtXr/D48WN4enrizZs3uHbtGoAPl8bj4+MxdOjQLH3wM7flo0ePcPz4cfz4449y++zHdfLDy8sLLi4uOcb97NkzvHjxAp6enrhw4YJUvnPnTmRkZGD8+PFZBgXIjElDQwO+vr7YvXs3Xr16JU3fsGED6tati3LlyimM7dy5c3j48CH69+8v12++devWcHJyyrZLTUFo2rSp3K/plStXhrGxsbRfCCGwbds2tG3bFkIIuf3C29sbL168kNtO2fl4+7548QKPHz+Gl5cXbt26hRcvXgD4v/sx9uzZk22Xrsw6r1+/zrGrwr59+1CrVi3Ur19fKjM0NETfvn2RkJCA6OhoAMC2bdtgbm6OQYMGZWkjp33s43V5//49njx5gvLly8PU1DTX7ZBTzNbW1vDx8ZHKtLS0MHjwYCQnJ+PYsWNy9bt164bixYtLrzM/1x9/lnPSt29fuV9Ff/nlFxQrVkw69hw6dAjPnz+Hj4+P3PutqamJ2rVrZ+kakZ3MmDKPGSdOnEDNmjXRrFkznDhxAsCH7o9Xr16V6uZlX9uyZQucnZ3h5OQkV69x48YAkCXG3Pbz/DI0NMQPP/wgvdbW1katWrWyHFfzEqui5XTv3l16XbFiRZiamsLZ2Vnuynfm/zOXn5fvtX379qFOnTqoVauWVM/CwgK+vr5ysRTE/vGxgjjHyPTx5/P169d4/Pgx6tatCyEELl68mKV+bucaW7ZsgYmJCZo1aya3ru7u7jA0NMxxXU1NTXH37t08dRcEAE1NTek+p4yMDDx9+hRpaWmoUaNGtseY3I4Hmeccfn5+MDExkeo1a9Ys2+/D/MjtXOhz95mQkBB4eHjIXV0xMzPLsm8WhLwe47/k8TTze3HXrl1KdQtXROmE4dGjR3j79i0cHR2zTFOmT3VQUBCeP3+OChUqwM3NDSNGjJDrg6qMnE6UPqWhoSF3YAOAChUqAMAXHaLw0aNHePPmTbbbxNnZGRkZGbhz545c+acns5kf2pz6NmZ+gBQt5/Hjx3j9+nWe48/Np7ECH+L9ONYbN24gJCQEFhYWcn+ZI/Fk3myrSFRUFDp06AATExMYGxvDwsJC+jLNPDHM7B+e0/jlmQe6gh7jXNF+uGfPHtSpUwe6urowMzODhYUFlixZIsUMfIhbQ0Mj1wNsz5498fbtW6l/aGxsLM6fP48ePXrkOF9O+4WTk1OOSejnyG2/ePToEZ4/f47ly5dn2S969eoFIPf9IiwsDE2bNpXu17GwsJD6YGZuYy8vL3Tq1AmBgYEwNzdHu3btEBwcLNfHu3///qhQoQJatmyJMmXK4Mcff8zSD/327dsKP1uZ04EP72fFihXzPALS27dvMX78eOkeH3Nzc1hYWOD58+dy+0te3L59G46OjlkS0U9jzpSf487HPv0uMDQ0RMmSJaXja+aJW+PGjbO85wcPHsz1/QY+dANwdHSUkoMTJ07A09MTDRo0wH///Ydbt24hLCwMGRkZ0glOXva1GzduICoqKku9zO+KT2NU5viXH2XKlMmSYGZ3XM1LrMoux8TEBDY2NlnKAMh9fpX9XsvcDz/16bwFsX98rCDOMTIlJiZKP8YZGhrCwsJCuhfw08+nMucaN27cwIsXL2BpaZllXZOTk3Nc11GjRsHQ0BC1atWCo6MjBgwYoHRXkjVr1qBy5crSPR0WFhbYu3dvtseY3I4HmceP/J4DKiO3GD53n7l9+zbKly+fpTy7ss+V12P8lzyeduvWDfXq1UOfPn1gZWWF7t274++//85z8lBoY4E2aNAAcXFx2LVrFw4ePIiVK1di7ty5WLp0aa5DfWbKrg/k51D0C2B2N9d+SZqamtmWi09umlIHysSakZGBZs2aYeTIkdnWzTyYZuf58+fw8vKCsbExgoKC4ODgAF1dXVy4cAGjRo36rOxYkbzuB9nthydOnMB3332HBg0aYPHixShZsiS0tLQQHByc681p2XFxcYG7uzvWr1+Pnj17Yv369dDW1kbXrl3z3FZhyG2/yHzffvjhB+lm609l9kfPTlxcHJo0aQInJyfMmTMHNjY20NbWxr59+zB37lypfZlMhq1bt+L06dP4559/cODAAfz444+YPXs2Tp8+DUNDQ1haWiIyMhIHDhzA/v37sX//fgQHB6Nnz55ZbhD+UgYNGoTg4GAMHToUHh4eMDExgUwmQ/fu3b/IPp6dL33cyVyPdevWwdraOst0ZZOs+vXr48iRI3j79i3Onz+P8ePHo1KlSjA1NcWJEycQExMDQ0NDVKtWTW65yuxrGRkZcHNzw5w5c7Kt9+mJ9JfaZsoeV/MSa16Wo4rvoILaPzIVxDkG8OG436xZMzx9+hSjRo2Ck5MTDAwMcO/ePfj7++fr85mRkQFLS0ts2LAh2+k59ZxwdnZGbGws9uzZg5CQEGzbtg2LFy/G+PHjERgYqHC+9evXw9/fH+3bt8eIESNgaWkJTU1NTJs2TW5AhkzqcB6i7PdIQe0zX1JBH+M/Z9319PRw/PhxHD16FHv37kVISAg2b96Mxo0b4+DBgwq3e5ZlKBts5ggz2V3ui42NVaoNMzMz9OrVC7169UJycjIaNGiAiRMnSh/mgnwyXUZGBm7duiV3cnr9+nUAkG4MycxeP30YVXa/wiobm4WFBfT19bPdJteuXYOGhoZSB/bcZD54TdFyzM3NFT6EKycF8R44ODggOTk517H9sxMaGoonT55g+/btaNCggVQeHx+fZRkAcPXqVYXLyfzV5+rVqzkus3jx4tk+kCwvv8Zv27YNurq6OHDggNwNVMHBwVnizsjIQHR0dK7jsffs2RMBAQFISkrCxo0b0bp1a7lLxtn5eL/I7KqQKTY2VuED+3LzufuFhYUFjIyMkJ6enq/94p9//kFKSgp2794t9yuUosuwderUQZ06dTBlyhRs3LgRvr6+2LRpk3Ss0dbWRtu2bdG2bVtkZGSgf//+WLZsGcaNG4fy5cvD1tZW4WcL+L/t7ODggDNnzuD9+/d5ehbM1q1b4efnh9mzZ0tl7969y7If5mW729ra4vLly8jIyJC7yvBpzAXlxo0baNSokfQ6OTkZSUlJaNWqFYD/+4xaWlrm+p7ntJ6enp4IDg7Gpk2bkJ6ejrp160JDQwP169eXEoa6detKX3p52dccHBxw6dIlNGnS5Is9SRgouONqYcSanbx8r9na2ip1npCX/UNZuZ1jKOPKlSu4fv061qxZg549e0rlirowKnOu4eDggMOHD6NevXr5+uHTwMAA3bp1Q7du3ZCamoqOHTtiypQpGDNmjMIhm7du3Qp7e3ts375dbn/JbdAMRTKPH59zDvi5++3n7jO2tra4efNmlvLsyj43VmWP8ZkK8niaHQ0NDTRp0gRNmjTBnDlzMHXqVPz22284evSo0u0p3SVJU1MT3t7e2LlzJxITE6XymJgYHDhwINf5nzx5Ivfa0NAQ5cuXl+sqkHmCW1BPk124cKH0fyEEFi5cCC0tLTRp0gTAh51HU1MzS5/6xYsXZ2lL2dg0NTXRvHlz7Nq1S67r04MHD7Bx40bUr18fxsbG+Vyj/1OyZElUrVoVa9askYvp6tWrOHjwoLST5VVBvAddu3ZFeHh4tvvF8+fPkZaWpnDezC/9j3/VSE1NzfKeVK9eHeXKlcO8efOyxJo5r4WFBRo0aIA///xTbp/9tH0HBwe8ePFC7vJ1UlKS0sPFZcYtk8nkrkokJCRkGbmkffv20NDQQFBQUJZfGT79JcfHxwcymQxDhgzBrVu35Po4K1KjRg1YWlpi6dKlcp+t/fv3IyYmRm60prz43P1CU1MTnTp1wrZt27JN4HIbRji7/eLFixdZErJnz55l2Y6ZiVnm9vj0WKShoSH94pxZp1WrVjh79izCw8Oleq9fv8by5cthZ2cndSnr1KkTHj9+LHesyZTTL3OamppZpi9YsCDLVa28bPdWrVrh/v37ciOKpKWlYcGCBTA0NJS6VBSU5cuXy90nsmTJEqSlpUmj+nh7e8PY2BhTp07N9n6Sj9/znNYzs6vRjBkzULlyZam7jKenJ44cOYJz587J3VeVl32ta9euuHfvHlasWJGl3tu3bwusW2dBHVcLI9bs5OV7rVWrVjh9+jTOnj0r1Xv06FGWX9fzsn8oQ5lzDGVkd6wRQmQZdvljuZ1rdO3aFenp6Zg0aVKWedPS0nLcLz5dL21tbbi4uEAIofA+LUXrcebMGbljWl58fM7xcZeaQ4cOSfd05cbAwOCzPgOfu894e3sjPDxc7onrT58+zfbKz+fGquwxPlNBHk8/9fTp0yxln34vKiNP128CAwMREhICT09P9O/fX/oycnV1zbWvoIuLCxo2bAh3d3eYmZnh3Llz2Lp1q9zNQu7u7gCAwYMHw9vbG5qamnI3aOWFrq4uQkJC4Ofnh9q1a2P//v3Yu3cv/ve//0mX/0xMTNClSxcsWLAAMpkMDg4O2LNnT7Z9wfIS2+TJk6Uxb/v3749ixYph2bJlSElJyXZs7fyaNWsWWrZsCQ8PD/Tu3VsaVtXExCTfT0ytWrUqNDU1MWPGDLx48QI6OjrS2PfKGjFiBHbv3o02bdpIQ66+fv0aV65cwdatW5GQkCANzfmpunXronjx4vDz88PgwYMhk8mwbt26LB88DQ0NLFmyBG3btkXVqlXRq1cvlCxZEteuXUNUVJSUrPzxxx+oX78+qlevjr59+6JcuXJISEjA3r17pYNG9+7dMWrUKHTo0AGDBw+WhiqrUKGC0jegtm7dGnPmzEGLFi3w/fff4+HDh1i0aBHKly8v99koX748fvvtN0yaNAmenp7o2LEjdHR0EBERgVKlSmHatGlSXQsLC7Ro0QJbtmyBqampUif7WlpamDFjBnr16gUvLy/4+PhIw6ra2dlh2LBhSq3PpzL3/99++w3du3eHlpYW2rZtm6erWNOnT8fRo0dRu3Zt/PTTT3BxccHTp09x4cIFHD58ONuDWqbmzZtLVwX69euH5ORkrFixApaWlkhKSpLqrVmzBosXL0aHDh3g4OCAV69eYcWKFTA2NpaS6D59+uDp06do3LgxypQpg9u3b2PBggWoWrWq1N9/9OjR+Ouvv9CyZUsMHjwYZmZmWLNmDeLj47Ft2zbpF/yePXti7dq1CAgIwNmzZ+Hp6YnXr1/j8OHD6N+/P9q1a5ft+rRp0wbr1q2DiYkJXFxcEB4ejsOHD2cZmjMvn8e+ffti2bJl8Pf3x/nz52FnZ4etW7ciLCwM8+bNk4bHLCipqalo0qQJunbtitjYWCxevBj169fHd999BwAwNjbGkiVL0KNHD1SvXh3du3eHhYUFEhMTsXfvXtSrV0860crp+Fq+fHlYW1sjNjZW7ubyBg0aYNSoUQAglzAAyu9rPXr0wN9//42ff/4ZR48eRb169ZCeno5r167h77//xoEDB1CjRo3P3lYODg4wNTXF0qVLYWRkBAMDA9SuXTtP9+UVVqyKKPu9NnLkSKxbtw4tWrTAkCFDpGFVM6+AZcrL/qEMZc4xlOHk5AQHBwcMHz4c9+7dg7GxMbZt26bwPhVlzjW8vLzQr18/TJs2DZGRkWjevDm0tLRw48YNbNmyBfPnz0fnzp2zbb958+awtrZGvXr1YGVlhZiYGCxcuBCtW7fO8TPdpk0bbN++HR06dEDr1q0RHx+PpUuXwsXFBcnJyXnaJpmmTZuG1q1bo379+vjxxx/x9OlT6RxQmTbd3d2xZMkSTJ48GeXLl4elpWWWK+E5+dx9ZuTIkVi/fj2aNWuGQYMGScOqli1bFk+fPpW7qvC5sSp7jM9UkMfTTwUFBeH48eNo3bo1bG1t8fDhQyxevBhlypSRG9gjV3kdVunYsWPC3d1daGtrC3t7e7F06dJshzz7dFjVyZMni1q1aglTU1Ohp6cnnJycxJQpU+SGkUpLSxODBg0SFhYWQiaTSW1mDvs1a9asLPEoGlbVwMBAxMXFiebNmwt9fX1hZWUlJkyYkGUoy0ePHolOnToJfX19Ubx4cdGvXz9x9erVLG0qik2IrMNpCSHEhQsXhLe3tzA0NBT6+vqiUaNG4tSpU3J1FA1rqWi41+wcPnxY1KtXT+jp6QljY2PRtm1baQjCT9tTZlhVIYRYsWKFsLe3l4bLzYzD1tY222HrPh0GTogPQ4CNGTNGlC9fXmhrawtzc3NRt25d8fvvv8u959kJCwsTderUEXp6eqJUqVJi5MiR0jB/n26TkydPimbNmgkjIyNhYGAgKleunGWIw6tXr4oOHToIU1NToaurKypWrCjGjRsnV+fgwYOiUqVKQltbW1SsWFGsX79e4bCqAwYMyDbuVatWCUdHR6GjoyOcnJxEcHCwwuEA//zzT1GtWjWho6MjihcvLry8vMShQ4ey1Pv7778FANG3b98ct9mnNm/eLLVvZmYmfH19xd27d+Xq5GVYVSE+DJdbunRpoaGhITcEnaJt8ukxQAghHjx4IAYMGCBsbGyElpaWsLa2Fk2aNBHLly/Pdfm7d+8WlStXFrq6usLOzk7MmDFDGtY0M5YLFy4IHx8fUbZsWaGjoyMsLS1FmzZtxLlz56R2tm7dKpo3by4sLS2Ftra2KFu2rOjXr59ISkqSW15cXJzo3LmztN/UqlVL7NmzJ0tcb968Eb/99psoV66ctE6dO3eWG37y02PEs2fPRK9evYS5ubkwNDQU3t7e4tq1a9luM0Wfx+w+dw8ePJDa1dbWFm5ublmG8szpeJrdsexTmfvNsWPHRN++fUXx4sWFoaGh8PX1lRviOdPRo0eFt7e3MDExEbq6usLBwUH4+/vLvSc5HV+FEKJLly4CgNi8ebNUlpqaKvT19YW2trbcEMIfbwtl9rXU1FQxY8YM4erqKn0e3d3dRWBgoHjx4oXctlF2P8/Orl27hIuLiyhWrJjc94uXl1e2w2ZmN9yzsrFmR9FyFB3Xs1tfZb7XhBDi8uXLwsvLS+jq6orSpUuLSZMmiVWrVmUZulII5fYPZYZVVeYcIzvZfd9GR0eLpk2bCkNDQ2Fubi5++uknaQjd/J5rCCHE8uXLhbu7u9DT0xNGRkbCzc1NjBw5Uvz3339SnU8/18uWLRMNGjQQJUqUEDo6OsLBwUGMGDEi1/c7IyNDTJ06Vdja2godHR1RrVo1sWfPniz7VV6PB9u2bRPOzs5CR0dHuLi4iO3btyscmvxT9+/fF61btxZGRkYCgLSeeT0XUmafUeTixYvC09NT6OjoiDJlyohp06aJP/74QwAQ9+/fzzVWRfJ7jP8Sx9NPPy9HjhwR7dq1E6VKlRLa2tqiVKlSwsfHJ8vQ97mR/f8VJSI1tGvXLrRv3x7Hjx/P8isqERERfZ6hQ4di2bJlSE5OVvoG4KKICQORGmvTpg1iYmJw8+bNQr/RkYiI6Fvy9u1buRvPnzx5ggoVKqB69eo5PpuHCnFYVSJS3qZNm3D58mXs3bsX8+fPZ7JARET0mTw8PNCwYUM4OzvjwYMHWLVqFV6+fIlx48apOjS1xysMRGpIJpPB0NAQ3bp1w9KlS9VqfGkiIqKv0f/+9z9s3boVd+/ehUwmQ/Xq1TFhwoQCG9r3W8aEgYiIiIiIFFL6OQxERERERFT0MGEgIiIiIiKF2DGa8iwjIwP//fcfjIyMeDMuERHRV0IIgVevXqFUqVLSAyiJlMGEgfLsv//+g42NjarDICIiony4c+cOypQpo+ow6CvChIHyLPNx9Hfu3IGxsbGKoyEiIiJlvHz5EjY2NtL3OJGymDBQnmV2QzI2NmbCQERE9JVhd2LKK3ZgIyIiIiIihZgwEBERERGRQkwYiIiIiIhIId7DQETfrPT0dLx//17VYRARFQotLS1oamqqOgz6BjFhIKJvjhAC9+/fx/Pnz1UdChFRoTI1NYW1tTVvbKYCxYSBiL45mcmCpaUl9PX1+cVJRN88IQTevHmDhw8fAgBKliyp4ojoW8KEgYi+Kenp6VKyUKJECVWHQ0RUaPT09AAADx8+hKWlJbsnUYHhTc9E9E3JvGdBX19fxZEQERW+zGMf79+igsSEgYi+SeyGRERFEY999CUwYSAiIiIiIoWYMBARqYmGDRti6NChqg5Dra1evRqmpqZq005u3rx5g06dOsHY2BgymeyrG7lr4sSJqFq1qqrDUCg/76NMJsPOnTsLZPl5/cyGhoYWyH5gZ2eHefPmfVYbRHnBm56JqMiwG723UJeXML11oS4vN6GhoWjUqBGePXtWKCfLX0K3bt3QqlWrPM1jZ2eHoUOHyp3Y5aed/FizZg1OnDiBU6dOwdzcHCYmJl98mZQ3MpkM8fHxsLOzU3UoRGqLCQMREX019PT0pJFg1KGd3MTFxcHZ2RmVKlXKdxvp6emQyWTQ0GCnACJSDR59iIjUSFpaGgYOHAgTExOYm5tj3LhxEEJI01NSUjB8+HCULl0aBgYGqF27NkJDQ6Xpt2/fRtu2bVG8eHEYGBjA1dUV+/btQ0JCAho1agQAKF68OGQyGfz9/bON4cmTJ/Dx8UHp0qWhr68PNzc3/PXXX3J1tm7dCjc3N+jp6aFEiRJo2rQpXr9+DeDDlYxatWrBwMAApqamqFevHm7fvi3Nu2TJEjg4OEBbWxsVK1bEunXr5Np+/vw5+vXrBysrK+jq6qJSpUrYs2cPgKxdUOLi4tCuXTtYWVnB0NAQNWvWxOHDh6XpDRs2xO3btzFs2DDIZDLphtDsurLkFpdMJsPKlSvRoUMH6Ovrw9HREbt37852G2Yue/bs2Th+/DhkMhkaNmwIAHj27Bl69uyJ4sWLQ19fHy1btsSNGzek+TJj2717N1xcXKCjo4PExMRsl3H16lW0bNkShoaGsLKyQo8ePfD48WNpekhICOrXrw9TU1OUKFECbdq0QVxcnFwbd+/ehY+PD8zMzGBgYIAaNWrgzJkzcnXWrVsHOzs7mJiYoHv37nj16pXC9c6Mf8+ePahYsSL09fXRuXNnvHnzBmvWrIGdnR2KFy+OwYMHIz09XZovt+2S2XbZsmWhr6+PDh064MmTJ1mWv2vXLlSvXh26urqwt7dHYGAg0tLSFMb7sWfPnsHX1xcWFhbQ09ODo6MjgoODlZoX+LCdatSoASMjI1hbW+P777+XnovwsbCwMFSuXBm6urqoU6cOrl69Kjf95MmT8PT0hJ6eHmxsbDB48GDp80WkCkwYiIjUyJo1a1CsWDGcPXsW8+fPx5w5c7By5Upp+sCBAxEeHo5Nmzbh8uXL6NKlC1q0aCGdWA0YMAApKSk4fvw4rly5ghkzZsDQ0BA2NjbYtm0bACA2NhZJSUmYP39+tjG8e/cO7u7u2Lt3L65evYq+ffuiR48eOHv2LAAgKSkJPj4++PHHHxETE4PQ0FB07NgRQgikpaWhffv28PLywuXLlxEeHo6+fftKJ+o7duzAkCFD8Ouvv+Lq1avo168fevXqhaNHjwIAMjIy0LJlS4SFhWH9+vWIjo7G9OnTFY4nn5ycjFatWuHIkSO4ePEiWrRogbZt20on2Nu3b0eZMmUQFBSEpKQkJCUlZdtObnFlCgwMRNeuXXH58mW0atUKvr6+ePr0abZtbt++HT/99BM8PDyQlJSE7du3AwD8/f1x7tw57N69G+Hh4RBCoFWrVnLDYL558wYzZszAypUrERUVBUtLyyztP3/+HI0bN0a1atVw7tw5hISE4MGDB+jatatU5/Xr1wgICMC5c+dw5MgRaGhooEOHDsjIyJC2n5eXF+7du4fdu3fj0qVLGDlypDQd+JCU7dy5E3v27MGePXtw7NgxTJ8+Pdt1/jj+P/74A5s2bUJISAhCQ0PRoUMH7Nu3D/v27cO6deuwbNkybN26VZont+1y5swZ9O7dGwMHDkRkZCQaNWqEyZMnyy33xIkT6NmzJ4YMGYLo6GgsW7YMq1evxpQpU3KMN9O4ceMQHR2N/fv3IyYmBkuWLIG5ublS8wIfhjKdNGkSLl26hJ07dyIhISHbxHzEiBGYPXs2IiIiYGFhgbZt20rrGRcXhxYtWqBTp064fPkyNm/ejJMnT2LgwIFKx0FU4ARRHr148UIAEC9evFB1KERZvH37VkRHR4u3b99mmWY7ak+h/uWVl5eXcHZ2FhkZGVLZqFGjhLOzsxBCiNu3bwtNTU1x7949ufmaNGkixowZI4QQws3NTUycODHb9o8ePSoAiGfPnuU5ttatW4tff/1VCCHE+fPnBQCRkJCQpd6TJ08EABEaGpptO3Xr1hU//fSTXFmXLl1Eq1athBBCHDhwQGhoaIjY2Nhs5w8ODhYmJiY5xurq6ioWLFggvba1tRVz587NsZ3c4hJCCABi7Nix0uvk5GQBQOzfv19hLEOGDBFeXl7S6+vXrwsAIiwsTCp7/Pix0NPTE3///bcUGwARGRmZ43pOmjRJNG/eXK7szp07AoDC7ffo0SMBQFy5ckUIIcSyZcuEkZGRePLkSbb1J0yYIPT19cXLly+lshEjRojatWsrjCsz/ps3b0pl/fr1E/r6+uLVq1dSmbe3t+jXr58QQrnt4uPjI/d+CCFEt27d5N7HJk2aiKlTp8rVWbdunShZsqT0GoDYsWNHtrG3bdtW9OrVS+G6fcrLy0sMGTJE4fSIiAgBQFrvzM/grOWzxNVHV8XVR1dF2PUwoaunK35f8bu4+uiq6OjbUXTp2UWunRMnTggNDQ3puJbdPp0pp2Mgv78pv3iFgYhIjdSpU0duHHUPDw/cuHED6enpuHLlCtLT01GhQgUYGhpKf8eOHZO6mQwePBiTJ09GvXr1MGHCBFy+fDnPMaSnp2PSpElwc3ODmZkZDA0NceDAAelX+ypVqqBJkyZwc3NDly5dsGLFCjx79gwAYGZmBn9/f3h7e6Nt27aYP3++3K/6MTExqFevntzy6tWrh5iYGABAZGQkypQpgwoVKigVa3JyMoYPHw5nZ2eYmprC0NAQMTExCrvwKJJbXJkqV64s/d/AwADGxsbZdjnJaTnFihVD7dq1pbISJUqgYsWKcsvS1taWW1Z2Ll26hKNHj8rtC05OTgAg7Q83btyAj48P7O3tYWxsLN3Ym7l9IiMjUa1aNZiZmSlcjp2dHYyMjKTXJUuWzHWd9fX14eDgIL22srKCnZ0dDA0N5coy21Fmu8TExMhNBz58Pj7dJkFBQXLb5KeffkJSUhLevHmTY8wA8Msvv2DTpk2oWrUqRo4ciVOnTuU6z8fOnz+Ptm3bomzZsjAyMoKXlxcAZNkfq9asKv3fpLgJ7BzscOvGLQBAbFQsdm7aKbcO3t7eyMjIQHx8fJ7iISoovOmZiOgrkZycDE1NTZw/fz5LF53ME7E+ffrA29sbe/fuxcGDBzFt2jTMnj0bgwYNUno5s2bNwvz58zFv3jy4ubnBwMAAQ4cORWpqKgBAU1MThw4dwqlTp3Dw4EEsWLAAv/32G86cOYNy5cohODgYgwcPRkhICDZv3oyxY8fi0KFDqFOnTq7LzuuNyMOHD8ehQ4fw+++/o3z58tDT00Pnzp2lWAualpaW3GuZTCbXfaeg6Onp5foAruTkZLRt2xYzZszIMq1kyZIAgLZt28LW1hYrVqxAqVKlkJGRgUqVKknbR5ntnZ91zm6ewth2ycnJCAwMRMeOHbNM09XVzXX+li1b4vbt29i3bx8OHTqEJk2aYMCAAfj9999znff169fw9vaGt7c3NmzYAAsLCyQmJsLb2ztP++Ob12/QpWcXBI4KzDKtbNmySrdDVJB4hYGISI18erPp6dOn4ejoCE1NTVSrVg3p6el4+PAhypcvL/dnbW0tzWNjY4Off/4Z27dvx6+//ooVK1YA+PCrNQC5G02zExYWhnbt2uGHH35AlSpVYG9vj+vXr8vVkclkqFevHgIDA3Hx4kVoa2tjx44d0vRq1aphzJgxOHXqFCpVqoSNGzcCAJydnREWFpZleS4uLgA+/IJ/9+7dLMvLKVZ/f3906NABbm5usLa2RkJCglwdbW3tXNc5t7gKirOzM9LS0uTe5ydPniA2NjbPy6pevTqioqJgZ2eXZX8wMDCQ2h07diyaNGkCZ2dn6UpQpsqVKyMyMlLhfRiFRZnt4uzsnO3n42PVq1dHbGxslu1Rvnx5pUeZsrCwgJ+fH9avX4958+Zh+fLlSs137do1PHnyBNOnT4enpyecnJwUXom5dO6S9P8Xz1/g9q3bsHe0BwC4VHbBreu3sl2HzM8wUWFjwkBEpEYSExMREBCA2NhY/PXXX1iwYAGGDBkCAKhQoQJ8fX3Rs2dPbN++HfHx8Th79iymTZuGvXs/PGNi6NChOHDgAOLj43HhwgUcPXoUzs7OAABbW1vIZDLs2bMHjx49QnJycrYxODo6SlcQYmJi0K9fPzx48ECafubMGUydOhXnzp1DYmIitm/fjkePHsHZ2Rnx8fEYM2YMwsPDcfv2bRw8eBA3btyQYhgxYgRWr16NJUuW4MaNG5gzZw62b9+O4cOHAwC8vLzQoEEDdOrUCYcOHUJ8fDz279+PkJAQhbFu374dkZGRuHTpEr7//vssv1rb2dnh+PHjuHfvntwIQh/LLa6C4ujoiHbt2uGnn37CyZMncenSJfzwww8oXbo02rVrl6e2BgwYgKdPn8LHxwcRERGIi4vDgQMH0KtXL6Snp6N48eIoUaIEli9fjps3b+Lff/9FQECAXBs+Pj6wtrZG+/btERYWhlu3bmHbtm0IDw8vyNXOlTLbJfOq1e+//44bN25g4cKFWfaL8ePHY+3atQgMDERUVBRiYmKwadMmjB07Vqk4xo8fj127duHmzZuIiorCnj17pH03N2XLloW2tjYWLFiAW7duYffu3Zg0aVK2dZfOXorTx0/jRswNjB00FqZmpmjSqgkA4MdBPyIyIlK6ufvGjRvYtWsXb3omlWLCQESkRnr27Im3b9+iVq1aGDBgAIYMGYK+fftK04ODg9GzZ0/8+uuvqFixItq3b4+IiAipq0J6ejoGDBgAZ2dntGjRAhUqVMDixYsBAKVLl0ZgYCBGjx4NKysrhScgY8eORfXq1eHt7Y2GDRtKJ5SZjI2Ncfz4cbRq1QoVKlTA2LFjMXv2bLRs2RL6+vq4du0aOnXqhAoVKqBv374YMGAA+vXrBwBo37495s+fj99//x2urq5YtmwZgoODpSFHAWDbtm2oWbMmfHx84OLigpEjRyq8QjBnzhwUL14cdevWRdu2beHt7Y3q1avL1QkKCkJCQgIcHBxgYWGRbTvKxFVQgoOD4e7ujjZt2sDDwwNCCOzbty9Ll53clCpVCmFhYUhPT0fz5s3h5uaGoUOHwtTUFBoaGtDQ0MCmTZtw/vx5VKpUCcOGDcOsWbPk2tDW1sbBgwdhaWmJVq1awc3NLcdRqb6k3LZLnTp1sGLFCsyfPx9VqlTBwYMHsyQC3t7e2LNnDw4ePIiaNWuiTp06mDt3LmxtbZWKQVtbG2PGjEHlypXRoEEDaGpqYtOmTUrNa2FhgdWrV2PLli1wcXHB9OnTFXZlGjpuKKb/Nh1dm3bF44ePsXD9Qmhpf1jPiq4VEbwrGNevX4enpyeqVauG8ePHo1SpUkrFQfQlyIT4aIBvIiW8fPkSJiYmePHiBYyNjVUdDpGcd+/eIT4+HuXKlVOqzzIRUWGLehyVax1Xc9d8tZ3TMZDf35RfvMJAREREREQKMWEgIiIiIiKFmDAQEREREZFCTBiIiIiIiEghJgxERERERKQQEwYiIiIiIlKICQMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiUhNCCPTt2xdmZmaQyWSIjIzMdZ6EhASl66qrhg0bYujQoTnWWb16NUxNTQslHiIikldM1QEQERWaiSaFvLwXeaoeEhKC1atXIzQ0FPb29jA3N/9CgamX7du3Q0tLS3ptZ2eHoUOHyiUR3bp1Q6tWrVQQHRERMWEgIlITcXFxKFmyJOrWravqUAqVmZlZrnX09PSgp6dXCNEQEdGn2CWJiEgN+Pv7Y9CgQUhMTIRMJoOdnR2AD1cd6tevD1NTU5QoUQJt2rRBXFycwnaePXsGX19fWFhYQE9PD46OjggODpam37lzB127doWpqSnMzMzQrl07JCQkKGwvNDQUMpkMe/fuReXKlaGrq4s6derg6tWrcvW2bdsGV1dX6OjowM7ODrNnz5abvnjxYjg6OkJXVxdWVlbo3LmzNO3jLkkNGzbE7du3MWzYMMhkMshkMgDyXZKuX78OmUyGa9euyS1j7ty5cHBwkF5fvXoVLVu2hKGhIaysrNCjRw88fvxY4boSEVH2mDAQEamB+fPnIygoCGXKlEFSUhIiIiIAAK9fv0ZAQADOnTuHI0eOQENDAx06dEBGRka27YwbNw7R0dHYv38/YmJisGTJEqlr0/v37+Ht7Q0jIyOcOHECYWFhMDQ0RIsWLZCamppjfCNGjMDs2bMREREBCwsLtG3bFu/fvwcAnD9/Hl27dkX37t1x5coVTJw4EePGjcPq1asBAOfOncPgwYMRFBSE2NhYhISEoEGDBtkuZ/v27ShTpgyCgoKQlJSEpKSkLHUqVKiAGjVqYMOGDXLlGzZswPfffw8AeP78ORo3boxq1arh3LlzCAkJwYMHD9C1a9cc15OIiLJilyQiIjVgYmICIyMjaGpqwtraWirv1KmTXL0///wTFhYWiI6ORqVKlbK0k5iYiGrVqqFGjRoAIF2pAIDNmzcjIyMDK1eulH65Dw4OhqmpKUJDQ9G8eXOF8U2YMAHNmjUDAKxZswZlypTBjh070LVrV8yZMwdNmjTBuHHjAHw4oY+OjsasWbPg7++PxMREGBgYoE2bNjAyMoKtrS2qVauW7XLMzMygqakJIyMjue3wKV9fXyxcuBCTJk0C8OGqw/nz57F+/XoAwMKFC1GtWjVMnTpVbtvZ2Njg+vXrqFChgsK2iYhIHq8wEBGpsRs3bsDHxwf29vYwNjaWEoDExMRs6//yyy/YtGkTqlatipEjR+LUqVPStEuXLuHmzZswMjKCoaEhDA0NYWZmhnfv3uXYzQkAPDw8pP+bmZmhYsWKiImJAQDExMSgXr16cvXr1auHGzduID09Hc2aNYOtrS3s7e3Ro0cPbNiwAW/evMnP5pB0794dCQkJOH36NIAPVxeqV68OJycnaV2PHj0qraehoaE0Lbd1JSIiebzCQESkxtq2bQtbW1usWLECpUqVQkZGBipVqqSwC1HLli1x+/Zt7Nu3D4cOHUKTJk0wYMAA/P7770hOToa7u3uWrjwAYGFh8cXWwcjICBcuXEBoaCgOHjyI8ePHY+LEiYiIiMj3UKnW1tZo3LgxNm7ciDp16mDjxo345ZdfpOnJyclo27YtZsyYkWXekiVL5ndViIiKJF5hICJSU0+ePEFsbCzGjh2LJk2awNnZGc+ePct1PgsLC/j5+WH9+vWYN28eli9fDgCoXr06bty4AUtLS5QvX17uz8Qk5yFnM3/JBz7cWH39+nU4OzsDAJydnREWFiZXPywsDBUqVICmpiYAoFixYmjatClmzpyJy5cvIyEhAf/++2+2y9LW1kZ6enqu6+nr64vNmzcjPDwct27dQvfu3aVp1atXR1RUFOzs7LKsq4GBQa5tExHR/2HCQESkpooXL44SJUpg+fLluHnzJv79918EBATkOM/48eOxa9cu3Lx5E1FRUdizZ490Yu/r6wtzc3O0a9cOJ06cQHx8PEJDQzF48GDcvXs3x3aDgoJw5MgRXL16Ff7+/jA3N0f79u0BAL/++iuOHDmCSZMm4fr161izZg0WLlyI4cOHAwD27NmDP/74A5GRkbh9+zbWrl2LjIwMVKxYMdtl2dnZ4fjx47h3716Ooxp17NgRr169wi+//IJGjRqhVKlS0rQBAwbg6dOn8PHxQUREBOLi4nDgwAH06tVLqWSEiIj+DxMGIiI1paGhgU2bNuH8+fOoVKkShg0bhlmzZuU4j7a2NsaMGYPKlSujQYMG0NTUxKZNmwAA+vr6OH78OMqWLYuOHTvC2dkZvXv3xrt372BsbJxju9OnT8eQIUPg7u6O+/fv459//oG2tjaAD7/m//3339i0aRMqVaqE8ePHIygoCP7+/gAAU1NTbN++HY0bN4azszOWLl2Kv/76C66urtkuKygoCAkJCXBwcMixq5SRkRHatm2LS5cuwdfXV25aqVKlEBYWhvT0dDRv3hxubm4YOnQoTE1NoaHBrz4ioryQCSGEqoOgr8vLly9hYmKCFy9e5HqSQVTY3r17h/j4eJQrVw66urqqDuerFxoaikaNGuHZs2f5vt+AiORFPY7KtY6refYJdW5yOgby+5vyiz+zEBERERGRQkwYiIiIiIhIIQ6rSkRECjVs2BDsuUpEVLTxCgMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCEmDEREVGhWr16t1BOjZTIZdu7c+cXjISKi3PE5DERUZLitcSvU5V3xu5Kn+g0bNkTVqlUxb968LxOQGujWrRtatWolvZ44cSJ27tyJyMhIuXpJSUkoXrx4IUdHRETZYcJARPQVEUIgPT0dxYp9nYdvPT096Onp5VrP2tq6EKIhIiJlsEsSEZEa8Pf3x7FjxzB//nzIZDLIZDIkJCQgNDQUMpkM+/fvh7u7O3R0dHDy5En4+/ujffv2cm0MHToUDRs2lF5nZGRg2rRpKFeuHPT09FClShVs3bo1xzjs7OwwadIk+Pj4wMDAAKVLl8aiRYvk6iQmJqJdu3YwNDSEsbExunbtigcPHkjTL126hEaNGsHIyAjGxsZwd3fHuXPnAMh3SVq9ejUCAwNx6dIlaZ1Xr14NQL5LUt26dTFq1Ci5GB49egQtLS0cP34cAJCSkoLhw4ejdOnSMDAwQO3atREaGqrEliciotwwYfjGTZ8+HTKZDEOHDpXK3r17hwEDBqBEiRIwNDREp06d5L7siajwzZ8/Hx4eHvjpp5+QlJSEpKQk2NjYSNNHjx6N6dOnIyYmBpUrV1aqzWnTpmHt2rVYunQpoqKiMGzYMPzwww84duxYjvPNmjULVapUwcWLFzF69GgMGTIEhw4dAvAhCWnXrh2ePn2KY8eO4dChQ7h16xa6desmze/r64syZcogIiIC58+fx+jRo6GlpZVlOd26dcOvv/4KV1dXaZ0/bufj9jZt2gQhhFS2efNmlCpVCp6engCAgQMHIjw8HJs2bcLly5fRpUsXtGjRAjdu3FBqWxERkWJf5zVtUkpERASWLVuW5eRi2LBh2Lt3L7Zs2QITExMMHDgQHTt2RFhYmIoiJSITExNoa2tDX18/2+44QUFBaNasmdLtpaSkYOrUqTh8+DA8PDwAAPb29jh58iSWLVsGLy8vhfPWq1cPo0ePBgBUqFABYWFhmDt3Lpo1a4YjR47gypUriI+PlxKatWvXwtXVFREREahZsyYSExMxYsQIODk5AQAcHR2zXY6enh4MDQ1RrFixHLsgde3aFUOHDsXJkyelBGHjxo3w8fGBTCZDYmIigoODkZiYiFKlSgEAhg8fjpCQEAQHB2Pq1KlKbzciIsqKVxi+UcnJyfD19cWKFSvkbhx88eIFVq1ahTlz5qBx48Zwd3dHcHAwTp06hdOnT6swYiLKSY0aNfJU/+bNm3jz5g2aNWsGQ0ND6W/t2rWIi4vLcd7MBOPj1zExMQCAmJgY2NjYyF39cHFxgampqVQnICAAffr0QdOmTTF9+vRcl5cbCwsLNG/eHBs2bAAAxMfHIzw8HL6+vgCAK1euID09HRUqVJBb12PHjn32somIiAnDN2vAgAFo3bo1mjZtKld+/vx5vH//Xq7cyckJZcuWRXh4eGGHSURKMjAwkHutoaEh10UHAN6/fy/9Pzk5GQCwd+9eREZGSn/R0dG53sfwuSZOnIioqCi0bt0a//77L1xcXLBjx47PatPX1xdbt27F+/fvsXHjRri5ucHN7cOoV8nJydDU1MT58+fl1jUmJgbz588viFUiIirS2CXpG7Rp0yZcuHABERERWabdv38f2traWcZBt7Kywv3797NtLyUlBSkpKdLrly9fFmi8RPSBtrY20tPTlaprYWGBq1evypVFRkZK9wq4uLhAR0cHiYmJOXY/ys6nVxtPnz4NZ2dnAICzszPu3LmDO3fuSFcZoqOj8fz5c7i4uEjzVKhQARUqVMCwYcPg4+OD4OBgdOjQIcuylF3ndu3aoW/fvggJCcHGjRvRs2dPaVq1atWQnp6Ohw8fSl2WiIio4DBh+MbcuXNHukFRV1e3QNqcNm0aAgMDC6QtIlLMzs4OZ86cQUJCAgwNDWFmZqawbuPGjTFr1iysXbsWHh4eWL9+Pa5evYpq1aoBAIyMjDB8+HAMGzYMGRkZqF+/Pl68eIGwsDAYGxvDz89PYdthYWGYOXMm2rdvj0OHDmHLli3Yu3cvAKBp06Zwc3ODr68v5s2bh7S0NPTv3x9eXl6oUaMG3r59ixEjRqBz584oV64c7t69i4iICHTq1EnhOsfHxyMyMhJlypSBkZERdHR0stQzMDBA+/btMW7cOMTExMDHx0eaVqFCBfj6+qJnz56YPXs2qlWrhkePHuHIkSOoXLkyWrdurdT2p69P1OOoXOu4mrsWQiRE3zZ2SfrGnD9/Hg8fPkT16tVRrFgxFCtWDMeOHcMff/yBYsWKwcrKCqmpqXj+/LncfA8ePFB40+GYMWPw4sUL6e/OnTuFsCZERc/w4cOhqakJFxcXWFhYIDExUWFdb29vjBs3DiNHjkTNmjXx6tUruV/dAWDSpEkYN24cpk2bBmdnZ7Ro0QJ79+5FuXLlcozj119/xblz51CtWjVMnjwZc+bMgbe3N4APw53u2rULxYsXR4MGDdC0aVPY29tj8+bNAABNTU08efIEPXv2RIUKFdC1a1e0bNlS4Y8OnTp1QosWLdCoUSNYWFjgr7/+UhiXr68vLl26BE9PT5QtW1ZuWnBwMHr27Ilff/0VFStWRPv27REREZGlHhER5Z1MfNoJlr5qr169wu3bt+XKevXqBScnJ4waNQo2NjbSl3LmL36xsbFwcnJCeHg46tSpk+syXr58CRMTE7x48QLGxsZfZD2I8uvdu3eIj49HuXLlCuwqW1FiZ2eHoUOHyg3FTKSuvtYrDF8y7pyOgfz+pvxil6RvjJGRESpVqiRXZmBggBIlSkjlvXv3RkBAAMzMzGBsbIxBgwbBw8NDqWSBiIiIiIoWJgxF0Ny5c6GhoYFOnTohJSUF3t7eWLx4sarDIiIiIiI1xIShCAgNDZV7rauri0WLFmHRokWqCYiI1FZCQoKqQyAiIjXDm56JiIiIiEghJgxERERERKQQEwYiIiIiIlKICQMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIqVMnDgRVatWVbp+QkICZDIZIiMjFdaxs7PDvHnzlG5z9erVMDU1Vbq+IjKZDDt37lQ4XQiBvn37wszMLNd1UCfKbM+8vo8A0LBhQ7V5+ndoaChkMhmeP3+u9DwFEX9B7XtEXyM+h4GIiowYJ+dCXZ7ztZg81T9+/DhmzZqF8+fPIykpCTt27ED79u2/THCUo5CQEKxevRqhoaGwt7eHubm5qkPKF5lMlmU/Gj58OAYNGqS6oBRISEhAuXLlcPHixTwnNET0ZfEKAxGRmnj9+jWqVKnChyqqgbi4OJQsWRJ169aFtbU1ihXL++9rQgikpaV9geg+j6GhIUqUKKHqMIjoK8KEgYhITbRs2RKTJ09Ghw4dlJ4ns3vJn3/+ibJly8LQ0BD9+/dHeno6Zs6cCWtra1haWmLKlCly8yUmJqJdu3YwNDSEsbExunbtigcPHsjVmT59OqysrGBkZITevXvj3bt3WZa/cuVKODs7Q1dXF05OTli8eHH+Vv7/mzNnDtzc3GBgYAAbGxv0798fycnJWert3LkTjo6O0NXVhbe3N+7cuSM3fdeuXahevTp0dXVhb2+PwMBApU/e/f39MWjQICQmJkImk8HOzg4AkJKSgsGDB8PS0hK6urqoX78+IiIipPkyu8rs378f7u7u0NHRwcmTJ7O0n9lV6++//4anpyf09PRQs2ZNXL9+HREREahRowYMDQ3RsmVLPHr0SJovu2417du3h7+/f7brkRl3hw4d5Nbj0y5J/v7+aN++PQIDA2FhYQFjY2P8/PPPSE1NVbiNUlJSMHz4cJQuXRoGBgaoXbs2QkNDFdYHgGvXrqF+/frQ1dWFi4sLDh8+LNc1rFy5cgCAatWqQSaToWHDhjm2l+n50+cY0XcEGrs1Ro2yNdChQQfs274vS720tDQMHDgQJiYmMDc3x7hx4yCEyPc6Xbp0CY0aNYKRkRGMjY3h7u6Oc+fOKRUz0deGCQMR0VcuLi4O+/fvR0hICP766y+sWrUKrVu3xt27d3Hs2DHMmDEDY8eOxZkzZwAAGRkZaNeuHZ4+fYpjx47h0KFDuHXrFrp16ya1+ffff2PixImYOnUqzp07h5IlS2ZJBjZs2IDx48djypQpiImJwdSpUzFu3DisWbMm3+uioaGBP/74A1FRUVizZg3+/fdfjBw5Uq7OmzdvMGXKFKxduxZhYWF4/vw5unfvLk0/ceIEevbsiSFDhiA6OhrLli3D6tWrsyRNisyfPx9BQUEoU6YMkpKSpKRg5MiR2LZtG9asWYMLFy6gfPny8Pb2xtOnT+XmHz16NKZPn46YmBhUrlxZ4XImTJiAsWPH4sKFCyhWrBi+//57jBw5EvPnz8eJEydw8+ZNjB8/XtlNl0Vm3MHBwXLrkZ0jR44gJiYGoaGh+Ouvv7B9+3YEBgYqrD9w4ECEh4dj06ZNuHz5Mrp06YIWLVrgxo0b2dZPT09H+/btoa+vjzNnzmD58uX47bff5OqcPXsWAHD48GEkJSVh+/btSq1nSkoKXKq4YPHGxdhxfAc69+iMMf3H4MqFK3L11qxZg2LFiuHs2bOYP38+5syZg5UrV+Z7nXx9fVGmTBlERETg/PnzGD16NLS0tJSKmehrw3sYiIi+chkZGfjzzz9hZGQEFxcXNGrUCLGxsdi3bx80NDRQsWJFzJgxA0ePHkXt2rVx5MgRXLlyBfHx8bCxsQEArF27Fq6uroiIiEDNmjUxb9489O7dG7179wYATJ48GYcPH5a7yjBhwgTMnj0bHTt2BPDhF+LME3Q/P798rcvHv6Db2dlh8uTJ+Pnnn+WSlffv32PhwoWoXbs2gA8ngs7Ozjh79ixq1aqFwMBAjB49WorB3t4ekyZNwsiRIzFhwoRcYzAxMYGRkRE0NTVhbW0N4EN3sSVLlmD16tVo2bIlAGDFihU4dOgQVq1ahREjRkjzBwUFoVmzZrkuZ/jw4fD29gYADBkyBD4+Pjhy5Ajq1asHAOjduzdWr16dazuKWFhYAABMTU2l9VBEW1sbf/75J/T19eHq6oqgoCCMGDECkyZNgoaG/G+LiYmJCA4ORmJiIkqVKiWtS0hICIKDgzF16tQs7R86dAhxcXEIDQ2VYpkyZYrcdsqMt0SJErnG+zGrklboNaCX9Nr3J1+EHQ1DyK4QuFV3k8ptbGwwd+5cyGQyVKxYEVeuXMHcuXPx008/5WudEhMTMWLECDg5OQEAHB0dlY6Z6GvDhIGI6CtnZ2cHIyMj6bWVlRU0NTXlTvSsrKzw8OFDAEBMTAxsbGykZAEAXFxcYGpqipiYGNSsWRMxMTH4+eef5Zbj4eGBo0ePAvhwAh0XF4fevXvjp59+kuqkpaXBxMQk3+ty+PBhTJs2DdeuXcPLly+RlpaGd+/e4c2bN9DX1wcAFCtWDDVr1pTmcXJykmKvVasWLl26hLCwMLkrCunp6VnayYu4uDi8f/9eOpkHAC0tLdSqVQsxMfI3t9eoUUOpNj+++mBlZQUAcHNzkyvLfM++tCpVqshtFw8PDyQnJ+POnTuwtbWVq3vlyhWkp6ejQoUKcuUpKSkK742IjY2FjY2NXCJQq1atAok9PT0dK+atwIFdB/Ag6QHep77H+9T30NPTk6tXp04dyGQy6bWHhwdmz56N9PT0fK1TQEAA+vTpg3Xr1qFp06bo0qULHBwcCmSdiNQNEwYioq/cp90gZDJZtmUZGRkFtszM+wpWrFgh/dKfSVNTM19tJiQkoE2bNvjll18wZcoUmJmZ4eTJk+jduzdSU1OVPtFPTk5GYGCgdOXjY7q6uvmKLS8MDAyUqvfxe5R5Ivtp2cfvmYaGhlyfe+DD1ZbClpycDE1NTZw/fz7Le21oaFjo8QQvDMb65esxavIoODo7Ql9fH9PHTs/TtsnPOk2cOBHff/899u7di/3792PChAnYtGlTnu5BIvpaMGEgIipinJ2dcefOHdy5c0e6yhAdHY3nz5/DxcVFqnPmzBn07NlTmu/06dPS/62srFCqVCncunULvr6+BRLX+fPnkZGRgdmzZ0tXR/7+++8s9dLS0nDu3DnpF+rY2Fg8f/4czs4fhs2tXr06YmNjUb58+QKJCwAcHBygra2NsLAw6Rf39+/fIyIiotCeT2BhYYGkpCTpdXp6Oq5evYpGjRopnEdLSwvp6em5tn3p0iW8fftW+lX+9OnTMDQ0lLsKlalatWpIT0/Hw4cP4enpqVTsFStWxJ07d/DgwQPpasqn91Roa2tL65UXF89eRKMWjdC2S1sAH7ro3Y67DYeK8r/2Z97Dk+n06dNwdHSEpqZmvtYJACpUqIAKFSpg2LBh8PHxQXBwMBMG+iYxYSAiUhPJycm4efOm9Do+Ph6RkZEwMzND2bJlC2w5TZs2hZubG3x9fTFv3jykpaWhf//+8PLykrrTDBkyBP7+/qhRowbq1auHDRs2ICoqCvb29lI7gYGBGDx4MExMTNCiRQukpKTg3LlzePbsGQICAvIcV/ny5fH+/XssWLAAbdu2RVhYGJYuXZqlnpaWFgYNGoQ//vgDxYoVw8CBA1GnTh0pgRg/fjzatGmDsmXLonPnztDQ0MClS5dw9epVTJ48OV/bzMDAAL/88gtGjBghvR8zZ87EmzdvpPs8vrTGjRsjICAAe/fuhYODA+bMmZPrw8vs7Oyk+yJ0dHRQvHjxbOulpqaid+/eGDt2LBISEjBhwgQMHDgwy/0LwIeTZF9fX/Ts2ROzZ89GtWrV8OjRIxw5cgSVK1dG69ats8zTrFkzODg4wM/PDzNnzsSrV68wduxYAP93dcXS0hJ6enoICQlBmTJloKurq1T3Nlt7Wxz85yAunr0IY1NjrF2yFk8ePcmSMCQmJiIgIAD9+vXDhQsXsGDBAsyePTtf6/T27VuMGDECnTt3Rrly5XD37l1ERESgU6dOucZL9DViwkBERUZeH6RW2M6dOyf3a3HmSbefn99n3fz6KZlMhl27dmHQoEFo0KABNDQ00KJFCyxYsECq061bN8TFxWHkyJF49+4dOnXqhF9++QUHDhyQ6vTp0wf6+vqYNWsWRowYAQMDA7i5ueX7F/cqVapgzpw5mDFjBsaMGYMGDRpg2rRpclc5AEBfXx+jRo3C999/j3v37sHT0xOrVq2Spnt7e2PPnj0ICgrCjBkzoKWlBScnJ/Tp0ydfcWWaPn06MjIy0KNHD7x69Qo1atTAgQMHFJ6EF7Qff/wRly5dQs+ePVGsWDEMGzYsx6sLADB79mwEBARgxYoVKF26NBISErKt16RJEzg6OqJBgwZISUmBj48PJk6cqLDd4OBgTJ48Gb/++ivu3bsHc3Nz1KlTB23atMm2vqamJnbu3Ik+ffqgZs2asLe3x6xZs9C2bVupm1ixYsXwxx9/ICgoCOPHj4enp2euQ7UCQL+Afrh7+y76de0HXX1ddOnRBY1bNkbyK/nheHv27Im3b9+iVq1a0NTUxJAhQ9C3b998rZOmpiaePHmCnj174sGDBzA3N0fHjh1zHFmK6GsmE592iCTKxcuXL2FiYoIXL17A2NhY1eEQyXn37h3i4+NRrly5QumvTvS18/f3x/Pnz6XnIRSWsLAw1K9fHzdv3sz3zcJRj6NyreNq7pqvtr+kLxl3TsdAfn9TfvEKAxEREX1xO3bsgKGhIRwdHXHz5k0MGTIE9erV48hCRF8BJgxERET0xb169QqjRo1CYmIizM3N0bRpU+keAiJSb0wYiIiIirCCvD8mJz179sxyPwoRfR2yDn9ARERERET0/zFhIKJvEsdzIKKiiMc++hKYMBDRNyXzSblv3rxRcSRERIUv89j36dPeiT4H72Egom+KpqYmTE1N8fDhQwAfxuzPfDAUEX1bMt5n5FrnStIVpdpyLO74ueEoTZm43717l6c2hRB48+YNHj58CFNTU2hqauY3PKIsmDAQ0TfH2toaAKSkgYi+TQ+TC+4zXux54Z0SKRN3fuMxNTWVjoFEBYUJAxF9c2QyGUqWLAlLS0u8f/9e1eEQ0RcyZMeQAmtrd4fdBdZWbpSJOz/xaGlp8coCfRFMGIjom6WpqckvT6JvWFJqUoG1VZhPhlcmbj6pntQJb3pWQ+np6YiMjMSzZ89UHQoRERERFXFMGNTA0KFDsWrVKgAfkgUvLy9Ur14dNjY2CA0NVW1wRERERFSkMWFQA1u3bkWVKlUAAP/88w/i4+Nx7do1DBs2DL/99puKoyMiIiKioowJgxp4/PixNKLBvn370KVLF1SoUAE//vgjrlxRbjg4IiIiIqIvgQmDGrCyskJ0dDTS09MREhKCZs2aAfjw8BXesElEREREqsRRktRAr1690LVrV5QsWRIymQxNmzYFAJw5cwZOTk4qjo6IiIiIijImDGpg4sSJqFSpEu7cuYMuXbpAR0cHwIchIUePHq3i6IiIiIioKGPCoCY6d+6cpczPz08FkRARERER/R8mDCryxx9/KF138ODBXzASIiIiIiLFmDCoyNy5c5WqJ5PJmDAQERERkcowYVCR+Ph4VYdARERERJQrDquqRlJTUxEbG4u0tDRVh0JEREREBIAJg1p48+YNevfuDX19fbi6uiIxMREAMGjQIEyfPl3F0RERERFRUcaEQQ2MGTMGly5dQmhoKHR1daXypk2bYvPmzSqMjIiIiIiKOt7DoAZ27tyJzZs3o06dOpDJZFK5q6sr4uLiVBgZERERERV1vMKgBh49egRLS8ss5a9fv5ZLIIiIiIiIChsTBjVQo0YN7N27V3qdmSSsXLkSHh4eqgqLiIiIiIhdktTB1KlT0bJlS0RHRyMtLQ3z589HdHQ0Tp06hWPHjqk6PCIiIiIqwniFQQ3Ur18fkZGRSEtLg5ubGw4ePAhLS0uEh4fD3d1d1eERERERURHGKwxqwsHBAStWrFB1GEREREREcpgwqIn09HTs2LEDMTExAAAXFxe0a9cOxYrxLSIiIiIi1eHZqBqIiorCd999h/v376NixYoAgBkzZsDCwgL//PMPKlWqpOIIiYiIiKio4j0MaqBPnz5wdXXF3bt3ceHCBVy4cAF37txB5cqV0bdvX1WHR0RERERFGK8wqIHIyEicO3cOxYsXl8qKFy+OKVOmoGbNmiqMjIiIiIiKOl5hUAMVKlTAgwcPspQ/fPgQ5cuXV0FEREREREQfMGFQkZcvX0p/06ZNw+DBg7F161bcvXsXd+/exdatWzF06FDMmDFD1aESERERURHGLkkqYmpqKj3RGQCEEOjatatUJoQAALRt2xbp6ekqiZGIiIiIiAmDihw9elTVIRARERER5YoJg4p4eXmpOgQiIiIiolwxYVAjb968QWJiIlJTU+XKK1eurKKIiIiIiKioY8KgBh49eoRevXph//792U7nPQxEREREpCocJUkNDB06FM+fP8eZM2egp6eHkJAQrFmzBo6Ojti9e7eqwyMiIiKiIoxXGNTAv//+i127dqFGjRrQ0NCAra0tmjVrBmNjY0ybNg2tW7dWdYhEREREVETxCoMaeP36NSwtLQF8eMLzo0ePAABubm64cOGCKkMjIiIioiKOCYMaqFixImJjYwEAVapUwbJly3Dv3j0sXboUJUuWVHF0RERERFSUsUuSGhgyZAiSkpIAABMmTECLFi2wYcMGaGtrY/Xq1aoNjoiIiIiKNCYMauCHH36Q/u/u7o7bt2/j2rVrKFu2LMzNzVUYGREREREVdUwY1JC+vj6qV6+u6jCIiIiIiJgwqEpAQIDSdefMmaN03SVLlmDJkiVISEgAALi6umL8+PFo2bIlAODdu3f49ddfsWnTJqSkpMDb2xuLFy+GlZVVnuInIiIioqKBCYOKXLx4Ual6MpksT+2WKVMG06dPh6OjI4QQWLNmDdq1a4eLFy/C1dUVw4YNw969e7FlyxaYmJhg4MCB6NixI8LCwvKzGkRERET0jWPCoCJHjx79Iu22bdtW7vWUKVOwZMkSnD59GmXKlMGqVauwceNGNG7cGAAQHBwMZ2dnnD59GnXq1PkiMRERERHR14vDqn7D0tPTsWnTJrx+/RoeHh44f/483r9/j6ZNm0p1nJycULZsWYSHhytsJyUlBS9fvpT7IyIiIqKigQnDN+jKlSswNDSEjo4Ofv75Z+zYsQMuLi64f/8+tLW1YWpqKlffysoK9+/fV9jetGnTYGJiIv3Z2Nh84TUgIiIiInXBhOEbVLFiRURGRuLMmTP45Zdf4Ofnh+jo6Hy3N2bMGLx48UL6u3PnTgFGS0RERETqjPcwfIO0tbVRvnx5AB+e6xAREYH58+ejW7duSE1NxfPnz+WuMjx48ADW1tYK29PR0YGOjs6XDpuIiIiI1BCvMBQBGRkZSElJgbu7O7S0tHDkyBFpWmxsLBITE+Hh4aHCCImIiIhIXTFhUANr1qzB3r17pdcjR46Eqakp6tati9u3b+eprTFjxuD48eNISEjAlStXMGbMGISGhsLX1xcmJibo3bs3AgICcPToUZw/fx69evWCh4cHR0giIiIiomwxYVADU6dOhZ6eHgAgPDwcixYtwsyZM2Fubo5hw4blqa2HDx+iZ8+eqFixIpo0aYKIiAgcOHAAzZo1AwDMnTsXbdq0QadOndCgQQNYW1tj+/btBb5ORERERPRt4D0MauDOnTvSPQc7d+5Ep06d0LdvX9SrVw8NGzbMU1urVq3Kcbquri4WLVqERYsW5TdcIiIiIipCeIVBDRgaGuLJkycAgIMHD0pXA3R1dfH27VtVhkZERERERRyvMKiBZs2aoU+fPqhWrRquX7+OVq1aAQCioqJgZ2en2uCIiIiIqEjjFQY1sGjRInh4eODRo0fYtm0bSpQoAQA4f/48fHx8VBwdERERERVlvMKgBkxNTbFw4cIs5YGBgSqIhoiIiIjo/zBhUKHExES512XLllVRJERERERE2WPCoEJ2dnaQyWQQQkAmkyE9PV3VIRERERERyWHCoEIZGRmqDoGIiIiIKEe86ZmIiIiIiBTiFQYV2b17t9J1v/vuuy8YCRERfevc1rjlWueK35VCiISIvkZMGFSkffv2StXjvQ1EREREpEpMGFSE9y8QERER0deA9zComXfv3qk6BCIiIiIiCRMGNZCeno5JkyahdOnSMDQ0xK1btwAA48aNw6pVq1QcHREREREVZUwY1MCUKVOwevVqzJw5E9ra2lJ5pUqVsHLlShVGRkRERERFHRMGNbB27VosX74cvr6+0NTUlMqrVKmCa9euqTAyIiIiIirqmDCogXv37qF8+fJZyjMyMvD+/XsVRERERERE9AFHSVIDLi4uOHHiBGxtbeXKt27dimrVqqkoKiIiIsorZZ55QfS1YcKgBsaPHw8/Pz/cu3cPGRkZ2L59O2JjY7F27Vrs2bNH1eERERERURHGLklqoF27dvjnn39w+PBhGBgYYPz48YiJicE///yDZs2aqTo8IiIiIirCeIVBTXh6euLQoUOqDoOIiIiISA6vMBARERERkUK8wqAixYsXh0wmU6ru06dPv3A0RERERETZY8KgIvPmzZP+/+TJE0yePBne3t7w8PAAAISHh+PAgQMYN26ciiIkIiIiImLCoDJ+fn7S/zt16oSgoCAMHDhQKhs8eDAWLlyIw4cPY9iwYaoIkYiIiIiICYM6OHDgAGbMmJGlvEWLFhg9erQKIiIiIlItPs+ASH3wpmc1UKJECezatStL+a5du1CiRAkVRERERERE9AGvMKiBwMBA9OnTB6GhoahduzYA4MyZMwgJCcGKFStUHB0RERERFWVMGNSAv78/nJ2d8ccff2D79u0AAGdnZ5w8eVJKIIiIiIiIVIEJg5qoXbs2NmzYoOowiIiIiIjkMGFQE+np6di5cydiYmIAAK6urvjuu++gqamp4siIiIiIqChjwqAGbt68idatW+Pu3buoWLEiAGDatGmwsbHB3r174eDgoOIIiYiIiKio4ihJamDw4MGwt7fHnTt3cOHCBVy4cAGJiYkoV64cBg8erOrwiIiIiKgI4xUGNXDs2DGcPn0aZmZmUlmJEiUwffp01KtXT4WREREREVFRxysMakBHRwevXr3KUp6cnAxtbW0VRERERERE9AETBjXQpk0b9O3bF2fOnIEQAkIInD59Gj///DO+++47VYdHREREREUYEwY18Mcff8DBwQEeHh7Q1dWFrq4u6tWrh/Lly2P+/PmqDo+IiIiIijDew6AGTE1NsWvXLty4cQPXrl0D8OHBbeXLl1dxZERERERU1DFhUCOOjo5wdHRUdRhERERERBImDGpACIGtW7fi6NGjePjwITIyMuSmb9++XUWREREREVFRx4RBDQwdOhTLli1Do0aNYGVlBZlMpuqQiIiI8sVtjVuuda74XSmESAqeMutG9C1iwqAG1q1bh+3bt6NVq1aqDoWIiIiISA5HSVIDJiYmsLe3V3UYRERERERZMGFQAxMnTkRgYCDevn2r6lCIiIiIiOSwS5Ia6Nq1K/766y9YWlrCzs4OWlpactMvXLigosiIiIiIqKhjwqAG/Pz8cP78efzwww+86ZmIiIiI1AoTBjWwd+9eHDhwAPXr11d1KEREREREcngPgxqwsbGBsbGxqsMgIiIiIsqCVxjUwOzZszFy5EgsXboUdnZ2qg6HiIgoWzFOzrlXGvN1nlrwGQtEin2dn+pvzA8//IA3b97AwcEB+vr6WW56fvr0qYoiIyIiIqKijgmDGpg3b56qQyAiIiIiyhYTBjXg5+en6hCIiIiIiLLFm56JiIiIiEghJgxERERERKQQEwYiIiIiIlKI9zCoyOXLl1GpUiVoaDBnIyKir0PXr3TIVCL6PDxbVZFq1arh8ePHAAB7e3s8efJExREREREREWXFhEFFTE1NER8fDwBISEhARkaGiiMiIiIiIsqK1xZVpFOnTvDy8kLJkiUhk8lQo0YNaGpqZlv31q1bhRwdEREREdEHTBhUZPny5ejYsSNu3ryJwYMH46effoKRkZGqwyIiIiIiksOEQYVatGgBADh//jyGDBnChIGIiIiI1A4TBjUQHBws/f/u3bsAgDJlyqgqHCIiIiIiCW96VgMZGRkICgqCiYkJbG1tYWtrC1NTU0yaNIk3QxMRERGRSvEKgxr47bffsGrVKkyfPh316tUDAJw8eRITJ07Eu3fvMGXKFBVHSERERERFFRMGNbBmzRqsXLkS3333nVRWuXJllC5dGv3792fCQEREREQqwy5JauDp06dwcnLKUu7k5ISnT5+qICIiIiIiog+YMKiBKlWqYOHChVnKFy5ciCpVquSprWnTpqFmzZowMjKCpaUl2rdvj9jYWLk67969w4ABA1CiRAkYGhqiU6dOePDgwWetAxERERF9m9glSQ3MnDkTrVu3xuHDh+Hh4QEACA8Px507d7Bv3748tXXs2DEMGDAANWvWRFpaGv73v/+hefPmiI6OhoGBAQBg2LBh2Lt3L7Zs2QITExMMHDgQHTt2RFhYWIGvGxERERF93ZgwqAEvLy9cv34dixYtwrVr1wAAHTt2RP/+/VGqVKk8tRUSEiL3evXq1bC0tMT58+fRoEEDvHjxAqtWrcLGjRvRuHFjAB+GdXV2dsbp06dRp06dglkpIiIiIvomMGFQE6VKlfoiNze/ePECAGBmZgbgw0Pi3r9/j6ZNm0p1nJycULZsWYSHhzNhICIiIiI5TBi+YRkZGRg6dCjq1auHSpUqAQDu378PbW1tmJqaytW1srLC/fv3s20nJSUFKSkp0uuXL19+sZiJiIiISL3wpudv2IABA3D16lVs2rTps9qZNm0aTExMpD8bG5sCipCIiIiI1B0Thm/UwIEDsWfPHhw9ehRlypSRyq2trZGamornz5/L1X/w4AGsra2zbWvMmDF48eKF9Hfnzp0vGToRERERqREmDComhEBiYiLevXtXYO0NHDgQO3bswL///oty5crJTXd3d4eWlhaOHDkilcXGxiIxMVEaoelTOjo6MDY2lvsjIiIioqKB9zComBAC5cuXR1RUFBwdHT+7vQEDBmDjxo3YtWsXjIyMpPsSTExMoKenBxMTE/Tu3RsBAQEwMzODsbExBg0aBA8PD97wTERERERZMGFQMQ0NDTg6OuLJkycFkjAsWbIEANCwYUO58uDgYPj7+wMA5s6dCw0NDXTq1AkpKSnw9vbG4sWLP3vZRERERPTtYcKgBqZPn44RI0ZgyZIl0mhG+SWEyLWOrq4uFi1ahEWLFn3WsoiIiIjo28eEQQ307NkTb968QZUqVaCtrQ09PT256U+fPlVRZERERERU1DFhUAPz5s1TdQhERESFxm2Nm6pDIKI8YMKgBvz8/FQdAhERERFRtjisqpqIi4vD2LFj4ePjg4cPHwIA9u/fj6ioKBVHRkRERERFGRMGNXDs2DG4ubnhzJkz2L59O5KTkwEAly5dwoQJE1QcHREREREVZUwY1MDo0aMxefJkHDp0CNra2lJ548aNcfr0aRVGRkRERERFHRMGNXDlyhV06NAhS7mlpSUeP36sgoiIiIiIiD5gwqAGTE1NkZSUlKX84sWLKF26tAoiIiIiIiL6gAmDGujevTtGjRqF+/fvQyaTISMjA2FhYRg+fDh69uyp6vCIiIiIqAhjwqAGpk6dCicnJ9jY2CA5ORkuLi5o0KAB6tati7Fjx6o6PCIiIiIqwvgcBjWgra2NFStWYNy4cbh69SqSk5NRrVo1ODo6qjo0IiIiIirimDCokbJly8LGxgYAIJPJVBwNERERERG7JKmNVatWoVKlStDV1YWuri4qVaqElStXqjosIiIiIirieIVBDYwfPx5z5szBoEGD4OHhAQAIDw/HsGHDkJiYiKCgIBVHSERERERFFRMGNbBkyRKsWLECPj4+Utl3332HypUrY9CgQUwYiIiIiEhl2CVJDbx//x41atTIUu7u7o60tDQVRERERERE9AETBjXQo0cPLFmyJEv58uXL4evrq4KIiIiIiIg+YJckFQkICJD+L5PJsHLlShw8eBB16tQBAJw5cwaJiYl8cBsRERERqRQTBhW5ePGi3Gt3d3cAQFxcHADA3Nwc5ubmiIqKKvTYiIiIiIgyMWFQkaNHj6o6BCIiIiKiXPEeBiIiIiIiUohXGNTAu3fvsGDBAhw9ehQPHz5ERkaG3PQLFy6oKDIiIiIiKuqYMKiB3r174+DBg+jcuTNq1aoFmUym6pCIiIiIiAAwYVALe/bswb59+1CvXj1Vh0JEREREJIf3MKiB0qVLw8jISNVhEBERERFlwYRBDcyePRujRo3C7du3VR0KEREREZEcdklSAzVq1MC7d+9gb28PfX19aGlpyU1/+vSpiiIjIiIioqKOCYMa8PHxwb179zB16lRYWVnxpmciIiIiUhtMGNTAqVOnEB4ejipVqqg6FCIiIiIiObyHQQ04OTnh7du3qg6DiIiIiCgLJgxqYPr06fj1118RGhqKJ0+e4OXLl3J/RERERESqwi5JaqBFixYAgCZNmsiVCyEgk8mQnp6uirCIiIiIiJgwqIOjR4+qOgQiIiIiomwxYVADXl5eqg6BiIiIiChbTBjUwPHjx3Oc3qBBg0KKhIiIiIhIHhMGNdCwYcMsZR8/i4H3MBARERGRqnCUJDXw7Nkzub+HDx8iJCQENWvWxMGDB1UdHhEREREVYbzCoAZMTEyylDVr1gza2toICAjA+fPnVRAVERERERGvMKg1KysrxMbGqjoMIiIiIirCeIVBDVy+fFnutRACSUlJmD59OqpWraqaoIiIiIiIwIRBLVStWhUymQxCCLnyOnXq4M8//1RRVERE9DVwW+Om6hCI6BvHhEENxMfHy73W0NCAhYUFdHV1VRQREREREdEHTBjUgK2trapDICIiIiLKFhMGNXHkyBEcOXIEDx8+REZGhtw0dksiIiIiIlVhwqAGAgMDERQUhBo1aqBkyZJyD20jIiIiIlIlJgxqYOnSpVi9ejV69Oih6lCIiIiIiOTwOQxqIDU1FXXr1lV1GEREREREWTBhUAN9+vTBxo0bVR0GEREREVEW7JKkBt69e4fly5fj8OHDqFy5MrS0tOSmz5kzR0WREREREVFRx4RBDVy+fFl6ovPVq1flpvEGaCIiIiJSJSYMauDo0aOqDoGIiIiIKFu8h4GIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUK86ZmIiIjgtsZN1SEQkZriFQYiIiIiIlKICQMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCE+h4GIiEhNxTg5515pDL/KiejL4hUGIiIiIiJSiAkDEREREREpxIThG3P8+HG0bdsWpUqVgkwmw86dO+WmCyEwfvx4lCxZEnp6emjatClu3LihmmCJiIiISO0xYfjGvH79GlWqVMGiRYuynT5z5kz88ccfWLp0Kc6cOQMDAwN4e3vj3bt3hRwpEREREX0NeKfUN6Zly5Zo2bJlttOEEJg3bx7Gjh2Ldu3aAQDWrl0LKysr7Ny5E927dy/MUImIiIjoK8ArDEVIfHw87t+/j6ZNm0plJiYmqF27NsLDwxXOl5KSgpcvX8r9EREREVHRwCsMRcj9+/cBAFZWVnLlVlZW0rTsTJs2DYGBgV80NiIiyqorh0wlIjXAKwyUqzFjxuDFixfS3507d1QdEhEREREVEiYMRYi1tTUA4MGDB3LlDx48kKZlR0dHB8bGxnJ/RERERFQ0MGEoQsqVKwdra2scOXJEKnv58iXOnDkDDw8PFUZGREREROqKnSO/McnJybh586b0Oj4+HpGRkTAzM0PZsmUxdOhQTJ48GY6OjihXrhzGjRuHUqVKoX379qoLmoiIiIjUFhOGb8y5c+fQqFEj6XVAQAAAwM/PD6tXr8bIkSPx+vVr9O3bF8+fP0f9+vUREhICXV1dVYVMRERERGpMJoQQqg6Cvi4vX76EiYkJXrx4wfsZiIi+ILc1bqoOgVTkit+VAm+T39+UX7yHgYiIiIiIFGKXJCIiIhXg1QMi+lrwCgMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCEmDEREREREpBCfw0BERJQHyjw/4Us8pZeISFV4hYGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCEmDEREREREpBATBiIiIiIiUogJAxERERERKcSEgYiIiIiIFGLCQERERERECjFhICIiIiIihZgwEBERERGRQkwYiIiIiIhIISYMRERERESkEBMGIiIiIiJSiAkDEREREREpxISBiIiIiIgUYsJAREREREQKMWEgIiIiIiKFmDAQEREREZFCTBiIiIiIiEihYqoOgIiIKCdua9xyrXPF70qhtUNEVNTwCgMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCEmDEREREREpBCfw0BERF89ZZ6x8DW2Q0SkDniFgYiIiIiIFGLCQERERERECjFhICIiIiIihZgwEBERERGRQkwYiIiIiIhIISYMRERERESkEBMGIiIiIiJSiAkDEREREREpxISBiIiIiIgUYsJAREREREQKMWEgIiIiIiKFmDAQEREREZFCTBiIiIiIiEghJgxERERERKRQMVUHQJTFRBMl6rz48nHQN8tu9N4CaSdheusCaUfduK1xK5B2rvhdKZB2iIhItXiFgYiIiIiIFGLCQERERERECjFhICIiIiIihZgwEBERERGRQkwYiIiIiIhIISYMRERERESkEBMGIiIiIiJSiM9hIFInfAbFZyuoZywU1LKUeVZDYbajDCPnAmmmwJ7nQEREqsUrDEREREREpBATBiIiIiIiUogJQxG2aNEi2NnZQVdXF7Vr18bZs2dVHRIRERERqRkmDEXU5s2bERAQgAkTJuDChQuoUqUKvL298fDhQ1WHRkRERERqhAlDETVnzhz89NNP6NWrF1xcXLB06VLo6+vjzz//VHVoRERERKRGmDAUQampqTh//jyaNm0qlWloaKBp06YIDw9XYWREREREpG44rGoR9PjxY6Snp8PKykqu3MrKCteuXctSPyUlBSkpKdLrFy8+DOv58uXLLxNgisi9zpdatqoV5XUvIBkpb1QdghxlPifKxFxQ7Sgj/W16gbRDRPn3Jb5jM9sUQonvGqKPMGGgXE2bNg2BgYFZym1sbFQQzf83XYnnFXyrivK6f4VM5qlXO0T0dTD55csd61+9egUTE36XkPKYMBRB5ubm0NTUxIMHD+TKHzx4AGtr6yz1x4wZg4CAAOl1RkYGnj59ihIlSkAmkxVobC9fvoSNjQ3u3LkDY2PjAm2b/g+3c+Hgdi4c3M6Fg9u58HypbS2EwKtXr1CqVKkCa5OKBiYMRZC2tjbc3d1x5MgRtG/fHsCHJODIkSMYOHBglvo6OjrQ0dGRKzM1Nf2iMRobG/MLqRBwOxcObufCwe1cOLidC8+X2Na8skD5wYShiAoICICfnx9q1KiBWrVqYd68eXj9+jV69eql6tCIiIiISI0wYSiiunXrhkePHmH8+PG4f/8+qlatipCQkCw3QhMRERFR0caEoQgbOHBgtl2QVElHRwcTJkzI0gWKCha3c+Hgdi4c3M6Fg9u58HBbk7qRCY6tRURERERECvDBbUREREREpBATBiIiIiIiUogJAxERERERKcSEgYiIiIiIFGLCQIVu0aJFsLOzg66uLmrXro2zZ8/mWH/Lli1wcnKCrq4u3NzcsG/fvkKK9OuWl+28YsUKeHp6onjx4ihevDiaNm2a6/tCH+R1f860adMmyGQy6eGJlLO8bufnz59jwIABKFmyJHR0dFChQgUeO5SQ1+08b948VKxYEXp6erCxscGwYcPw7t27Qor263T8+HG0bdsWpUqVgkwmw86dO3OdJzQ0FNWrV4eOjg7Kly+P1atXf/E4ieQIokK0adMmoa2tLf78808RFRUlfvrpJ2FqaioePHiQbf2wsDChqakpZs6cKaKjo8XYsWOFlpaWuHLlSiFH/nXJ63b+/vvvxaJFi8TFixdFTEyM8Pf3FyYmJuLu3buFHPnXJa/bOVN8fLwoXbq08PT0FO3atSucYL9ied3OKSkpokaNGqJVq1bi5MmTIj4+XoSGhorIyMhCjvzrktftvGHDBqGjoyM2bNgg4uPjxYEDB0TJkiXFsGHDCjnyr8u+ffvEb7/9JrZv3y4AiB07duRY/9atW0JfX18EBASI6OhosWDBAqGpqSlCQkIKJ2AiIQQTBipUtWrVEgMGDJBep6eni1KlSolp06ZlW79r166idevWcmW1a9cW/fr1+6Jxfu3yup0/lZaWJoyMjMSaNWu+VIjfhPxs57S0NFG3bl2xcuVK4efnx4RBCXndzkuWLBH29vYiNTW1sEL8JuR1Ow8YMEA0btxYriwgIEDUq1fvi8b5LVEmYRg5cqRwdXWVK+vWrZvw9vb+gpERyWOXJCo0qampOH/+PJo2bSqVaWhooGnTpggPD892nvDwcLn6AODt7a2wPuVvO3/qzZs3eP/+PczMzL5UmF+9/G7noKAgWFpaonfv3oUR5lcvP9t59+7d8PDwwIABA2BlZYVKlSph6tSpSE9PL6ywvzr52c5169bF+fPnpW5Lt27dwr59+9CqVatCibmo4PcgqQM+6ZkKzePHj5Geng4rKyu5cisrK1y7di3bee7fv59t/fv373+xOL92+dnOnxo1ahRKlSqV5UuK/k9+tvPJkyexatUqREZGFkKE34b8bOdbt27h33//ha+vL/bt24ebN2+if//+eP/+PSZMmFAYYX918rOdv//+ezx+/Bj169eHEAJpaWn4+eef8b///a8wQi4yFH0Pvnz5Em/fvoWenp6KIqOihFcYiEjO9OnTsWnTJuzYsQO6urqqDueb8erVK/To0QMrVqyAubm5qsP5pmVkZMDS0hLLly+Hu7s7unXrht9++w1Lly5VdWjflNDQUEydOhWLFy/GhQsXsH37duzduxeTJk1SdWhEVMB4hYEKjbm5OTQ1NfHgwQO58gcPHsDa2jrbeaytrfNUn/K3nTP9/vvvmD59Og4fPozKlSt/yTC/enndznFxcUhISEDbtm2lsoyMDABAsWLFEBsbCwcHhy8b9FcoP/tzyZIloaWlBU1NTanM2dkZ9+/fR2pqKrS1tb9ozF+j/GzncePGoUePHujTpw8AwM3NDa9fv0bfvn3x22+/QUODv0kWBEXfg8bGxry6QIWGn2YqNNra2nB3d8eRI0eksoyMDBw5cgQeHh7ZzuPh4SFXHwAOHTqksD7lbzsDwMyZMzFp0iSEhISgRo0ahRHqVy2v29nJyQlXrlxBZGSk9Pfdd9+hUaNGiIyMhI2NTWGG/9XIz/5cr1493Lx5U0rIAOD69esoWbIkkwUF8rOd37x5kyUpyEzShBBfLtgiht+DpBZUfdc1FS2bNm0SOjo6YvXq1SI6Olr07dtXmJqaivv37wshhOjRo4cYPXq0VD8sLEwUK1ZM/P777yImJkZMmDCBw6oqIa/befr06UJbW1ts3bpVJCUlSX+vXr1S1Sp8FfK6nT/FUZKUk9ftnJiYKIyMjMTAgQNFbGys2LNnj7C0tBSTJ09W1Sp8FfK6nSdMmCCMjIzEX3/9JW7duiUOHjwoHBwcRNeuXVW1Cl+FV69eiYsXL4qLFy8KAGLOnDni4sWL4vbt20IIIUaPHi169Ogh1c8cVnXEiBEiJiZGLFq0iMOqUqFjwkCFbsGCBaJs2bJCW1tb1KpVS5w+fVqa5uXlJfz8/OTq//3336JChQpCW1tbuLq6ir179xZyxF+nvGxnW1tbASDL34QJEwo/8K9MXvfnjzFhUF5et/OpU6dE7dq1hY6OjrC3txdTpkwRaWlphRz11ycv2/n9+/di4sSJwsHBQejq6gobGxvRv39/8ezZs8IP/Cty9OjRbI+3mdvWz89PeHl5ZZmnatWqQltbW9jb24vg4OBCj5uKNpkQvG5IRERERETZ4z0MRERERESkEBMGIiIiIiJSiAkDEREREREpxISBiIiIiIgUYsJAREREREQKMWEgIiIiIiKFmDAQEREREZFCTBiIiIiIiEghJgxERERERKQQEwYiIlLa+/fvVR0CEREVMiYMRERqLCQkBPXr14epqSlKlCiBNm3aIC4uTpp+9+5d+Pj4wMzMDAYGBqhRowbOnDkjTf/nn39Qs2ZN6OrqwtzcHB06dJCmyWQy7Ny5U255pqamWL16NQAgISEBMpkMmzdvhpeXF3R1dbFhwwY8efIEPj4+KF26NPT19eHm5oa//vpLrp2MjAzMnDkT5cuXh46ODsqWLYspU6YAABo3boyBAwfK1X/06BG0tbVx5MiRgthsRERUgJgwEBGpsdevXyMgIADnzp3DkSNHoKGhgQ4dOiAjIwPJycnw8vLCvXv3sHv3bly6dAkjR45ERkYGAGDv3r3o0KEDWrVqhYsXL+LIkSOoVatWnmMYPXo0hgwZgpiYGHh7e+Pdu3dwd3fH3r17cfXqVfTt2xc9evTA2bNnpXnGjBmD6dOnY9y4cYiOjsbGjRthZWUFAOjTpw82btyIlJQUqf769etRunRpNG7c+DO3GBERFTSZEEKoOggiIlLO48ePYWFhgStXruDUqVMYPnw4EhISYGZmlqVu3bp1YW9vj/Xr12fblkwmw44dO9C+fXupzNTUFPPmzYO/vz8SEhJQrlw5zJs3D0OGDMkxrjZt2sDJyQm///47Xr16BQsLCyxcuBB9+vTJUvfdu3coVaoUli5diq5duwIAqlSpgo4dO2LChAl52BpERFQYeIWBiEiN3bhxAz4+PrC3t4exsTHs7OwAAImJiYiMjES1atWyTRYAIDIyEk2aNPnsGGrUqCH3Oj09HZMmTYKbmxvMzMxgaGiIAwcOIDExEQAQExODlJQUhcvW1dVFjx498OeffwIALly4gKtXr8Lf3/+zYyUiooJXTNUBEBGRYm3btoWtrS1WrFiBUqVKISMjA5UqVUJqair09PRynDe36TKZDJ9eZM7upmYDAwO517NmzcL8+fMxb948uLm5wcDAAEOHDkVqaqpSywU+dEuqWrUq7t69i+DgYDRu3Bi2tra5zkdERIWPVxiIiNTUkydPEBsbi7Fjx6JJkyZwdnbGs2fPpOmVK1dGZGQknj59mu38lStXzvEmYgsLCyQlJUmvb9y4gTdv3uQaV1hYGNq1a4cffvgBVapUgb29Pa5fvy5Nd3R0hJ6eXo7LdnNzQ40aNbBixQps3LgRP/74Y67LJSIi1WDCQESkpooXL44SJUpg+fLluHnzJv79918EBARI0318fGBtbY327dsjLCwMt27dwrZt2xAeHg4AmDBhAv766y9MmDABMTExuHLlCmbMmCHN37hxYyxcuBAXL17EuXPn8PPPP0NLSyvXuBwdHXHo0CGcOnUKMTEx6NevHx48eCBN19XVxahRozBy5EisXbsWcXFxOH36NFatWiXXTp8+fTB9+nSI/9fOHaqoFkUBGF6oiPgAgsFkEhTkdEEsgi8gx7cQjWoWxO4jCDaj3XTwGTQLPoFhpg3XcO6Eyx0nfF/dOyxO+9mH9fHxsr0JgN9FMAD8UoVCIfb7fVwul2i32zGdTmOz2Xydl8vlOJ1OUavVYjQaRafTifV6HcViMSIi+v1+HA6HOB6P0e12YzAYvGwy2m630Wg0otfrxWQyifl8HtVq9du5FotFJEkSw+Ew+v3+V7T8ablcxmw2i9VqFa1WK8bjcdzv95c7aZpGqVSKNE2jUqn8w5cC4H+yJQmAt7jdbtFsNiPLskiS5N3jAJBDMADwo57PZzwej5jP53G9XuN8Pr97JAD+wi9JAPyo8/kc9Xo9siyL3W737nEA+IYXBgAAIJcXBgAAIJdgAAAAcgkGAAAgl2AAAAByCQYAACCXYAAAAHIJBgAAIJdgAAAAcgkGAAAg1ycFfSLmeKC8awAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "path_model_label=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/instance/stardist_labels.tif\"\n", + "res = evl.evaluate_model_performance(imread(path_true_labels), imread(path_model_label),visualize=False, return_graphical_summary=True,plot_according_to_gt_label=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAHHCAYAAAASz98lAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACNnElEQVR4nOzdd1gU1/s28HtBehXpNhBFUbFhQ0UsKNbYCyECxqiJFTG2X2ygEUvsMdYESzQmGlss2CIWREUUC2IHUQM2FMSCAuf9w5f5urILC7IUvT/XxaV75syZZ2ZnZ+fZOXNGJoQQICIiIiIiUkCjuAMgIiIiIqKSiwkDEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKqUBKG6dOnQyaTyZXZ2dnBz8+vMJrPVXx8PGQyGdauXSuV+fn5wdDQUO3LziaTyTB9+vQiW15BREZGolmzZjAwMIBMJkN0dHS+27Czs0OXLl0KPzgqNqGhoahXrx50dXUhk8nw7NmzfLchk8kwYsSIwg/uE6KOY0SrVq3QqlWrQm1TFWvXroVMJsPZs2eLfNmfolatWqF27drFHYbaZe838fHx+Z5X0TlGYQkLC4NMJkNYWFi+51XHuUZxfa4/hp+fH+zs7Ao8/6dwTCnoMV4d666uz0uJusKwd+/eEnviXZJjy8vbt2/Rp08fJCcnY+HChdiwYQMqV66ssO6VK1cwffr0Ah3UqXR58uQJ+vbtCz09PSxbtgwbNmyAgYGBwronT57E9OnTC5RQUMF9Lp/H0nx8VcV///2H6dOnF+iHGlLMzs7uk95nPkW//PKL3I+7JVlpirWolFFXw9euXYOGRv7ykb1792LZsmX5OghUrlwZr169gpaWVj4jzJ/cYnv16hXKlFHbpvxot27dwp07d7B69Wp88803uda9cuUKAgMD0apVq4/6xYBKvsjISDx//hwzZsyAh4dHrnVPnjyJwMBA+Pn5wdTUtGgC/IQU9BiR2+fxwIEDhRRd8SvIsb80+e+//xAYGAg7OzvUq1evuMMhKha//PILzM3Ni6T3yccqTbEWFbWd5ero6KiraQBARkYGsrKyoK2tDV1dXbUuKy/Fvfy8PHz4EAB4oqcGWVlZePPmTYnfBxThflF01LF/aGtrF3qbREREiuS7S9KJEyfQqFEj6OrqwsHBAStXrlRY78N7GN6+fYvAwEBUq1YNurq6KFeuHFq0aIGDBw8CeNcHbtmyZQDe9QXL/gP+d5/CTz/9hEWLFsHBwQE6Ojq4cuWKwnsYst2+fRuenp4wMDCAra0tgoKCIISQpivru/hhm7nFll324S9j58+fR8eOHWFsbAxDQ0O0bdsWp06dkquT3XctPDwcAQEBsLCwgIGBAXr06IFHjx4pfgM+8O+//8LNzQ0GBgYwNTVFt27dEBsbK0338/ODu7s7AKBPnz6QyWRK+0euXbsWffr0AQC0bt1aWs8Pt8+JEyfQuHFj6OrqokqVKli/fn2Otp49ewZ/f39UrFgROjo6qFq1KubMmYOsrKw812nnzp3o3LkzbG1toaOjAwcHB8yYMQOZmZk56p4+fRqdOnVC2bJlYWBggDp16mDx4sVyda5evYq+ffvCwsICenp6qF69On744Qe5baToaoqifoDZ/fU3btyIWrVqQUdHB6GhoQCAn376Cc2aNUO5cuWgp6cHFxcXbN26VeE6/v7772jcuDH09fVRtmxZtGzZUvrF2NfXF+bm5nj79m2O+dq3b4/q1avnvgEBbNmyBS4uLtDT04O5uTm++uor3L9/X5reqlUr+Pr6AgAaNWoEmUym9JeU6dOnY9y4cQAAe3t7ab/4sJvMjh07ULt2bejo6KBWrVrSdnnf/fv38fXXX8PKykqq99tvv+W5PgAQEhKCNm3awNLSEjo6OqhZsyaWL1+eo97Zs2fh6ekJc3Nz6Onpwd7eHl9//bVcnc2bN8PFxQVGRkYwNjaGs7Nzjv3m9u3b6NOnD8zMzKCvr4+mTZtiz549OZb3+vVrTJ8+HY6OjtDV1YWNjQ169uyJW7duSXU+PEbcuXMHw4YNQ/Xq1aGnp4dy5cqhT58+cts0r8+jor7ODx8+xKBBg2BlZQVdXV3UrVsX69atk6vz/vF01apV0vG0UaNGiIyMVLr9P/Ty5UsMHToU5cqVg7GxMXx8fPD06dMc9fbt2ycdo4yMjNC5c2fExMRI03M7vjZo0AA9e/aUa8/Z2RkymQwXL16Uyv7880/IZDK5Y5+q+1p6ejqmTZuGqlWrQkdHBxUrVsT48eORnp4uVy/7s6/Kfv6+sLAwNGrUCAAwcOBAaf0+/M66cuUKWrduDX19fZQvXx5z584tcKyKZN8rcfHiRbi7u0NfXx9Vq1aVjlFHjx5FkyZNpGPkoUOHcrShyvcaAMTExKBNmzbQ09NDhQoVMHPmTKXH/rz2D1XldY6RH8ePH0efPn1QqVIlaTuPGTMGr169Ulg/r3MN4N2PS4sWLUKtWrWgq6sLKysrDB06VOFn5kNLly5FrVq1pO+Lhg0bYtOmTbnO8+bNG0ydOhUuLi4wMTGBgYEB3NzccOTIEbl6+T0eZO//urq6qF27NrZv355n/MC7c8KYmBgcPXpU+gx8ePxKT09X6VzoY/aZ7P3//X0zJCRE7jtNlVjzosox/n2FdTxV5uDBg2jRogVMTU1haGiI6tWr4//+7//ytU75usJw6dIltG/fHhYWFpg+fToyMjIwbdo0WFlZ5Tnv9OnTERwcjG+++QaNGzdGamoqzp49i3PnzqFdu3YYOnQo/vvvPxw8eBAbNmxQ2EZISAhev36NIUOGQEdHB2ZmZkoPQpmZmejQoQOaNm2KuXPnIjQ0FNOmTUNGRgaCgoLys9oqxfa+mJgYuLm5wdjYGOPHj4eWlhZWrlyJVq1aSQfl940cORJly5bFtGnTEB8fj0WLFmHEiBH4888/c13OoUOH0LFjR1SpUgXTp0/Hq1evsHTpUjRv3hznzp2DnZ0dhg4divLly2PWrFkYNWoUGjVqpPT9atmyJUaNGoUlS5bg//7v/+Dk5AQA0r8AcPPmTfTu3RuDBg2Cr68vfvvtN/j5+cHFxQW1atUC8G7Hd3d3x/379zF06FBUqlQJJ0+exKRJk5CYmIhFixblul5r166FoaEhAgICYGhoiH///RdTp05Famoq5s2bJ9U7ePAgunTpAhsbG4wePRrW1taIjY3F7t27MXr0aADvDg5ubm7Q0tLCkCFDYGdnh1u3buGff/7Bjz/+mGscyvz777/466+/MGLECJibm0vJxuLFi/HFF1/A29sbb968webNm9GnTx/s3r0bnTt3luYPDAzE9OnT0axZMwQFBUFbWxunT5/Gv//+i/bt22PAgAFYv3499u/fL3eTeVJSEv79919MmzYtz+03cOBANGrUCMHBwXjw4AEWL16M8PBwnD9/Hqampvjhhx9QvXp1rFq1CkFBQbC3t4eDg4PC9nr27Inr16/jjz/+wMKFC2Fubg4AsLCwkOqcOHEC27Ztw7Bhw2BkZIQlS5agV69eSEhIQLly5QAADx48QNOmTaUTLwsLC+zbtw+DBg1Camoq/P39c12v5cuXo1atWvjiiy9QpkwZ/PPPPxg2bBiysrIwfPhwAO9OmLOPURMnToSpqSni4+Oxbds2qZ2DBw/Cy8sLbdu2xZw5cwAAsbGxCA8Pl/abBw8eoFmzZnj58iVGjRqFcuXKYd26dfjiiy+wdetW9OjRA8C740yXLl1w+PBh9O/fH6NHj8bz589x8OBBXL58Wek2jYyMxMmTJ9G/f39UqFAB8fHxWL58OVq1aoUrV65AX19fpc/j+169eoVWrVrh5s2bGDFiBOzt7bFlyxb4+fnh2bNn0rpl27RpE54/f46hQ4dCJpNh7ty56NmzJ27fvq1SF88RI0bA1NQU06dPx7Vr17B8+XLcuXNH+iEGADZs2ABfX194enpizpw5ePnyJZYvX44WLVrg/Pnz0jFK2fHVzc0Nf/zxh/Q6OTkZMTEx0NDQwPHjx1GnTh0A707wLCwspG2j6r6WlZWFL774AidOnMCQIUPg5OSES5cuYeHChbh+/Tp27NghF48q+/mHnJycEBQUhKlTp2LIkCFwc3MDADRr1kyq8/TpU3To0AE9e/ZE3759sXXrVkyYMAHOzs7o2LFjgWJV5OnTp+jSpQv69++PPn36YPny5ejfvz82btwIf39/fPvtt/jyyy8xb9489O7dG3fv3oWRkREA1b/XkpKS0Lp1a2RkZGDixIkwMDDAqlWroKenlyMeVfYPVeV1jpEfW7ZswcuXL/Hdd9+hXLlyOHPmDJYuXYp79+5hy5YtcnVVPdcYOnSodGweNWoU4uLi8PPPP+P8+fMIDw9X+plbvXo1Ro0ahd69e2P06NF4/fo1Ll68iNOnT+PLL79Uug6pqalYs2YNvLy8MHjwYDx//hy//vorPD09cebMmRxd41Q5Hhw4cAC9evVCzZo1ERwcjCdPnmDgwIGoUKFCntt00aJFGDlyJAwNDaUf7D48F1HlXOhj9pn79+9LP75MmjQJBgYGWLNmTY4eMarEmhdVjvHvK6zjqSIxMTHo0qUL6tSpg6CgIOjo6ODmzZsIDw/P1zpB5EP37t2Frq6uuHPnjlR25coVoampKT5sqnLlysLX11d6XbduXdG5c+dc2x8+fHiOdoQQIi4uTgAQxsbG4uHDhwqnhYSESGW+vr4CgBg5cqRUlpWVJTp37iy0tbXFo0ePhBBCHDlyRAAQR44cybNNZbEJIQQAMW3aNOl19+7dhba2trh165ZU9t9//wkjIyPRsmVLqSwkJEQAEB4eHiIrK0sqHzNmjNDU1BTPnj1TuLxs9erVE5aWluLJkydS2YULF4SGhobw8fGRyrLXc8uWLbm2J4QQW7ZsUbhNhHj3ngIQx44dk8oePnwodHR0xNixY6WyGTNmCAMDA3H9+nW5+SdOnCg0NTVFQkJCrjG8fPkyR9nQoUOFvr6+eP36tRBCiIyMDGFvby8qV64snj59Klf3/W3ZsmVLYWRkJLfPfljH19dXVK5cOccyp02bluM9ByA0NDRETExMnnG/efNG1K5dW7Rp00Yqu3HjhtDQ0BA9evQQmZmZCmPKzMwUFSpUEP369ZObvmDBAiGTycTt27dzLPv9ZVpaWoratWuLV69eSeW7d+8WAMTUqVOlsuz9LzIyUml72ebNmycAiLi4uBzTAAhtbW1x8+ZNqezChQsCgFi6dKlUNmjQIGFjYyMeP34sN3///v2FiYmJwvf9fYqme3p6iipVqkivt2/fnuc6jR49WhgbG4uMjAyldfz9/QUAcfz4cans+fPnwt7eXtjZ2Unv3W+//SYAiAULFuRo4/197MNjhKJ1iYiIEADE+vXrpbLcPo/u7u7C3d1der1o0SIBQPz+++9S2Zs3b4Srq6swNDQUqampQoj/Hd/KlSsnkpOTpbo7d+4UAMQ///yjdLsI8b/9xsXFRbx580Yqnzt3rgAgdu7cKYR4t71MTU3F4MGD5eZPSkoSJiYmcuXKjq/Z63/lyhUhhBC7du0SOjo64osvvpD7fNSpU0f06NFDeq3qvrZhwwahoaEh9z4LIcSKFSsEABEeHi6VqbqfKxIZGZnjOyWbu7t7jvc9PT1dWFtbi169ekll+YlVkezlbNq0SSq7evWqdEw7deqUVL5///4c8ar6vZb92Tl9+rRU9vDhQ2FiYiJ3DMnP/qHoWPwhVc4xFFF0HqDo8xkcHCxkMpncd4mq5xrHjx8XAMTGjRvl2gwNDc1R/uHnulu3bqJWrVr5Xq+MjAyRnp4uV/b06VNhZWUlvv76a6ksP8eDevXqCRsbG7lzkwMHDggACr9DP1SrVi25dcum6rlQfvYZRUaOHClkMpk4f/68VPbkyRNhZmaW4/tNWazKFPQYr47j6Yefl4ULFwoA0v5YUCp3ScrMzMT+/fvRvXt3VKpUSSp3cnKCp6dnnvObmpoiJiYGN27cUHWROfTq1UvuV828vD/UY/YvTW/evFF4qbWwZGZm4sCBA+jevTuqVKkildvY2ODLL7/EiRMnkJqaKjfPkCFD5Lq+uLm5ITMzE3fu3FG6nMTERERHR8PPzw9mZmZSeZ06ddCuXTvs3bu3ENfqf2rWrCn9Qga8+5W5evXquH37tlS2ZcsWuLm5oWzZsnj8+LH05+HhgczMTBw7dizXZbz/S9Tz58/x+PFjuLm54eXLl7h69SqAd5fG4+Li4O/vn6MPfva2fPToEY4dO4avv/5abp99v05BuLu7o2bNmrnG/fTpU6SkpMDNzQ3nzp2Tynfs2IGsrCxMnTo1x6AA2TFpaGjA29sbu3btwvPnz6XpGzduRLNmzWBvb680trNnz+Lhw4cYNmyYXL/5zp07o0aNGgq71BQGDw8PuV/T69SpA2NjY2m/EELg77//RteuXSGEkNsvPD09kZKSIredFHl/+6akpODx48dwd3fH7du3kZKSAuB/92Ps3r1bYZeu7DovXrzItavC3r170bhxY7Ro0UIqMzQ0xJAhQxAfH48rV64AAP7++2+Ym5tj5MiROdrIbR97f13evn2LJ0+eoGrVqjA1Nc1zO+QWs7W1Nby8vKQyLS0tjBo1CmlpaTh69Khc/X79+qFs2bLS6+zP9fuf5dwMGTJE7lfR7777DmXKlJGOPQcPHsSzZ8/g5eUl935ramqiSZMmObpGKJIdU/Yx4/jx42jUqBHatWuH48ePA3jX/fHy5ctS3fzsa1u2bIGTkxNq1KghV69NmzYAkCPGvPbzgjI0NMRXX30lvdbW1kbjxo1zHFfzE6uy5fTv3196Xb16dZiamsLJyUnuynf2/7OXn5/vtb1796Jp06Zo3LixVM/CwgLe3t5ysRTG/vG+wjjHyPb+5/PFixd4/PgxmjVrBiEEzp8/n6N+XucaW7ZsgYmJCdq1aye3ri4uLjA0NMx1XU1NTXHv3r18dRcEAE1NTek+p6ysLCQnJyMjIwMNGzZUeIzJ63iQfc7h6+sLExMTqV67du0Ufh8WRF7nQh+7z4SGhsLV1VXu6oqZmVmOfbMw5PcYr87jafb34s6dO1XqFq6MygnDo0eP8OrVK1SrVi3HNFX6VAcFBeHZs2dwdHSEs7Mzxo0bJ9cHVRW5nSh9SENDQ+7ABgCOjo4AoNYhCh89eoSXL18q3CZOTk7IysrC3bt35co/PJnN/tDm1rcx+wOkbDmPHz/Gixcv8h1/Xj6MFXgX7/ux3rhxA6GhobCwsJD7yx6JJ/tmW2ViYmLQo0cPmJiYwNjYGBYWFtKXafaJYXb/8NzGL88+0BX2GOfK9sPdu3ejadOm0NXVhZmZGSwsLLB8+XIpZuBd3BoaGnkeYH18fPDq1Supf+i1a9cQFRWFAQMG5DpfbvtFjRo1ck1CP0Ze+8WjR4/w7NkzrFq1Ksd+MXDgQAB57xfh4eHw8PCQ7texsLCQ+mBmb2N3d3f06tULgYGBMDc3R7du3RASEiLXx3vYsGFwdHREx44dUaFCBXz99dc5+qHfuXNH6Wcrezrw7v2sXr16vkdAevXqFaZOnSrd42Nubg4LCws8e/ZMbn/Jjzt37qBatWo5EtEPY85WkOPO+z78LjA0NISNjY10fM0+cWvTpk2O9/zAgQN5vt/Au24A1apVk5KD48ePw83NDS1btsR///2H27dvIzw8HFlZWdIJTn72tRs3biAmJiZHvezvig9jVOX4VxAVKlTIkWAqOq7mJ1ZVl2NiYoKKFSvmKAMg9/lV9Xstez/80IfzFsb+8b7COMfIlpCQIP0YZ2hoCAsLC+lewA8/n6qca9y4cQMpKSmwtLTMsa5paWm5ruuECRNgaGiIxo0bo1q1ahg+fLjKXUnWrVuHOnXqSPd0WFhYYM+ePQqPMXkdD7KPHwU9B1RFXjF87D5z584dVK1aNUe5orKPld9jvDqPp/369UPz5s3xzTffwMrKCv3798dff/2V7+ShyMYCbdmyJW7duoWdO3fiwIEDWLNmDRYuXIgVK1bkOdRnNkV9ID+Gsl8AFd1cq06ampoKy8UHN02VBKrEmpWVhXbt2mH8+PEK62YfTBV59uwZ3N3dYWxsjKCgIDg4OEBXVxfnzp3DhAkTPio7Via/+4Gi/fD48eP44osv0LJlS/zyyy+wsbGBlpYWQkJC8rw5TZGaNWvCxcUFv//+O3x8fPD7779DW1sbffv2zXdbRSGv/SL7ffvqq6+km60/lN0fXZFbt26hbdu2qFGjBhYsWICKFStCW1sbe/fuxcKFC6X2ZTIZtm7dilOnTuGff/7B/v378fXXX2P+/Pk4deoUDA0NYWlpiejoaOzfvx/79u3Dvn37EBISAh8fnxw3CKvLyJEjERISAn9/f7i6usLExAQymQz9+/dXyz6uiLqPO9nrsWHDBlhbW+eYrmqS1aJFCxw+fBivXr1CVFQUpk6ditq1a8PU1BTHjx9HbGwsDA0NUb9+fbnlqrKvZWVlwdnZGQsWLFBY78MTaXVtM1WPq/mJNT/LKY7voMLaP7IVxjkG8O64365dOyQnJ2PChAmoUaMGDAwMcP/+ffj5+RXo85mVlQVLS0ts3LhR4fTcek44OTnh2rVr2L17N0JDQ/H333/jl19+wdSpUxEYGKh0vt9//x1+fn7o3r07xo0bB0tLS2hqaiI4OFhuQIZsJeE8RNXvkcLaZ9SpsI/xH7Puenp6OHbsGI4cOYI9e/YgNDQUf/75J9q0aYMDBw4o3e45lqFqsNkjzCi63Hft2jWV2jAzM8PAgQMxcOBApKWloWXLlpg+fbr0YS7MJ9NlZWXh9u3bcien169fBwDpxpDs7PXDh1Ep+hVW1dgsLCygr6+vcJtcvXoVGhoaKh3Y85L94DVlyzE3N1f6EK7cFMZ74ODggLS0tDzH9lckLCwMT548wbZt29CyZUupPC4uLscyAODy5ctKl5P9q8/ly5dzXWbZsmUVPpAsP7/G//3339DV1cX+/fvlbqAKCQnJEXdWVhauXLmS53jsPj4+CAgIQGJiIjZt2oTOnTvLXTJW5P39IrurQrZr164pfWBfXj52v7CwsICRkREyMzMLtF/8888/SE9Px65du+R+hVJ2GbZp06Zo2rQpfvzxR2zatAne3t7YvHmzdKzR1tZG165d0bVrV2RlZWHYsGFYuXIlpkyZgqpVq6Jy5cpKP1vA/7azg4MDTp8+jbdv3+brWTBbt26Fr68v5s+fL5W9fv06x36Yn+1euXJlXLx4EVlZWXJXGT6MubDcuHEDrVu3ll6npaUhMTERnTp1AvC/z6ilpWWe73lu6+nm5oaQkBBs3rwZmZmZaNasGTQ0NNCiRQspYWjWrJn0pZeffc3BwQEXLlxA27Zt1fYkYaDwjqtFEasi+fleq1y5skrnCfnZP1SV1zmGKi5duoTr169j3bp18PHxkcqVdWFU5VzDwcEBhw4dQvPmzQv0w6eBgQH69euHfv364c2bN+jZsyd+/PFHTJo0SemQzVu3bkWVKlWwbds2uf0lr0EzlMk+fnzMOeDH7rcfu89UrlwZN2/ezFGuqOxjY1X1GJ+tMI+nimhoaKBt27Zo27YtFixYgFmzZuGHH37AkSNHVG5P5S5Jmpqa8PT0xI4dO5CQkCCVx8bGYv/+/XnO/+TJE7nXhoaGqFq1qlxXgewT3MJ6muzPP/8s/V8IgZ9//hlaWlpo27YtgHc7j6amZo4+9b/88kuOtlSNTVNTE+3bt8fOnTvluj49ePAAmzZtQosWLWBsbFzANfofGxsb1KtXD+vWrZOL6fLlyzhw4IC0k+VXYbwHffv2RUREhML94tmzZ8jIyFA6b/aX/vu/arx58ybHe9KgQQPY29tj0aJFOWLNntfCwgItW7bEb7/9JrfPfti+g4MDUlJS5C5fJyYmqjxcXHbcMplM7qpEfHx8jpFLunfvDg0NDQQFBeX4leHDX3K8vLwgk8kwevRo3L59W66PszINGzaEpaUlVqxYIffZ2rdvH2JjY+VGa8qPj90vNDU10atXL/z9998KE7i8hhFWtF+kpKTkSMiePn2aYztmJ2bZ2+PDY5GGhob0i3N2nU6dOuHMmTOIiIiQ6r148QKrVq2CnZ2d1KWsV69eePz4sdyxJltuv8xpamrmmL506dIcV7Xys907deqEpKQkuRFFMjIysHTpUhgaGkpdKgrLqlWr5O4TWb58OTIyMqRRfTw9PWFsbIxZs2YpvJ/k/fc8t/XM7mo0Z84c1KlTR+ou4+bmhsOHD+Ps2bNy91XlZ1/r27cv7t+/j9WrV+eo9+rVq0Lr1llYx9WiiFWR/HyvderUCadOncKZM2ekeo8ePcrx63p+9g9VqHKOoQpFxxohRI5hl9+X17lG3759kZmZiRkzZuSYNyMjI9f94sP10tbWRs2aNSGEUHqflrL1OH36tNwxLT/eP+d4v0vNwYMHpXu68mJgYPBRn4GP3Wc8PT0REREh98T15ORkhVd+PjZWVY/x2QrzePqh5OTkHGUffi+qIl/XbwIDAxEaGgo3NzcMGzZM+jKqVatWnn0Fa9asiVatWsHFxQVmZmY4e/Ystm7dKnezkIuLCwBg1KhR8PT0hKamptwNWvmhq6uL0NBQ+Pr6okmTJti3bx/27NmD//u//5Mu/5mYmKBPnz5YunQpZDIZHBwcsHv3boV9wfIT28yZM6Uxb4cNG4YyZcpg5cqVSE9PVzi2dkHNmzcPHTt2hKurKwYNGiQNq2piYlLgJ6bWq1cPmpqamDNnDlJSUqCjoyONfa+qcePGYdeuXejSpYs05OqLFy9w6dIlbN26FfHx8dLQnB9q1qwZypYtC19fX4waNQoymQwbNmzI8cHT0NDA8uXL0bVrV9SrVw8DBw6EjY0Nrl69ipiYGClZWbJkCVq0aIEGDRpgyJAhsLe3R3x8PPbs2SMdNPr3748JEyagR48eGDVqlDRUmaOjo8o3oHbu3BkLFixAhw4d8OWXX+Lhw4dYtmwZqlatKvfZqFq1Kn744QfMmDEDbm5u6NmzJ3R0dBAZGQlbW1sEBwdLdS0sLNChQwds2bIFpqamKp3sa2lpYc6cORg4cCDc3d3h5eUlDatqZ2eHMWPGqLQ+H8re/3/44Qf0798fWlpa6Nq1a76uYs2ePRtHjhxBkyZNMHjwYNSsWRPJyck4d+4cDh06pPCglq19+/bSVYGhQ4ciLS0Nq1evhqWlJRITE6V669atwy+//IIePXrAwcEBz58/x+rVq2FsbCwl0d988w2Sk5PRpk0bVKhQAXfu3MHSpUtRr149qb//xIkT8ccff6Bjx44YNWoUzMzMsG7dOsTFxeHvv/+WfsH38fHB+vXrERAQgDNnzsDNzQ0vXrzAoUOHMGzYMHTr1k3h+nTp0gUbNmyAiYkJatasiYiICBw6dCjH0Jz5+TwOGTIEK1euhJ+fH6KiomBnZ4etW7ciPDwcixYtkobHLCxv3rxB27Zt0bdvX1y7dg2//PILWrRogS+++AIAYGxsjOXLl2PAgAFo0KAB+vfvDwsLCyQkJGDPnj1o3ry5dKKV2/G1atWqsLa2xrVr1+RuLm/ZsiUmTJgAAHIJA6D6vjZgwAD89ddf+Pbbb3HkyBE0b94cmZmZuHr1Kv766y/s378fDRs2/Oht5eDgAFNTU6xYsQJGRkYwMDBAkyZN8nVfXlHFqoyq32vjx4/Hhg0b0KFDB4wePVoaVjX7Cli2/OwfqlDlHEMVNWrUgIODA77//nvcv38fxsbG+Pvvv5Xep6LKuYa7uzuGDh2K4OBgREdHo3379tDS0sKNGzewZcsWLF68GL1791bYfvv27WFtbY3mzZvDysoKsbGx+Pnnn9G5c+dcP9NdunTBtm3b0KNHD3Tu3BlxcXFYsWIFatasibS0tHxtk2zBwcHo3LkzWrRoga+//hrJycnSOaAqbbq4uGD58uWYOXMmqlatCktLyxxXwnPzsfvM+PHj8fvvv6Ndu3YYOXKkNKxqpUqVkJycLHdV4WNjVfUYn60wj6cfCgoKwrFjx9C5c2dUrlwZDx8+xC+//IIKFSrIDeyRp/wOq3T06FHh4uIitLW1RZUqVcSKFSsUDnn24bCqM2fOFI0bNxampqZCT09P1KhRQ/z4449yw0hlZGSIkSNHCgsLCyGTyaQ2s4f9mjdvXo54lA2ramBgIG7duiXat28v9PX1hZWVlZg2bVqOoSwfPXokevXqJfT19UXZsmXF0KFDxeXLl3O0qSw2IXIOpyWEEOfOnROenp7C0NBQ6Ovri9atW4uTJ0/K1VE2rKWy4V4VOXTokGjevLnQ09MTxsbGomvXrtIQhB+2p8qwqkIIsXr1alGlShVpuNzsOCpXrqxw2LoPh4ET4t0QYJMmTRJVq1YV2trawtzcXDRr1kz89NNPcu+5IuHh4aJp06ZCT09P2NraivHjx0vD/H24TU6cOCHatWsnjIyMhIGBgahTp06OIQ4vX74sevToIUxNTYWurq6oXr26mDJlilydAwcOiNq1awttbW1RvXp18fvvvysdVnX48OEK4/71119FtWrVhI6OjqhRo4YICQlROhzgb7/9JurXry90dHRE2bJlhbu7uzh48GCOen/99ZcAIIYMGZLrNvvQn3/+KbVvZmYmvL29xb179+Tq5GdYVSHeDZdbvnx5oaGhITcEnbJt8uExQAghHjx4IIYPHy4qVqwotLS0hLW1tWjbtq1YtWpVnsvftWuXqFOnjtDV1RV2dnZizpw50rCm2bGcO3dOeHl5iUqVKgkdHR1haWkpunTpIs6ePSu1s3XrVtG+fXthaWkptLW1RaVKlcTQoUNFYmKi3PJu3bolevfuLe03jRs3Frt3784R18uXL8UPP/wg7O3tpXXq3bu33PCTHx4jnj59KgYOHCjMzc2FoaGh8PT0FFevXlW4zZR9HhV97h48eCC1q62tLZydnXMM5Znb8VTRsexD2fvN0aNHxZAhQ0TZsmWFoaGh8Pb2lhviOduRI0eEp6enMDExEbq6usLBwUH4+fnJvSe5HV+FEKJPnz4CgPjzzz+lsjdv3gh9fX2hra0tN4Tw+9tClX3tzZs3Ys6cOaJWrVrS59HFxUUEBgaKlJQUuW2j6n6uyM6dO0XNmjVFmTJl5L5f3N3dFQ6bqWi4Z1VjVUTZcpQd1xWtryrfa0IIcfHiReHu7i50dXVF+fLlxYwZM8Svv/6aY+hKIVTbP1QZVlWVcwxFFH3fXrlyRXh4eAhDQ0Nhbm4uBg8eLA2hW9BzDSGEWLVqlXBxcRF6enrCyMhIODs7i/Hjx4v//vtPqvPh53rlypWiZcuWoly5ckJHR0c4ODiIcePG5fl+Z2VliVmzZonKlSsLHR0dUb9+fbF79+4c+1V+jwd///23cHJyEjo6OqJmzZpi27ZtSocm/1BSUpLo3LmzMDIyEgCk9czvuZAq+4wy58+fF25ubkJHR0dUqFBBBAcHiyVLlggAIikpKc9YlSnoMV4dx9MPPy+HDx8W3bp1E7a2tkJbW1vY2toKLy+vHEPf50X2/1eUiEqgnTt3onv37jh27FiOX1GJiIjo4/j7+2PlypVIS0tT+QbgzxETBqISrEuXLoiNjcXNmzeL/EZHIiKiT8mrV6/kbjx/8uQJHB0d0aBBg1yfzUNFOKwqEalu8+bNuHjxIvbs2YPFixczWSAiIvpIrq6uaNWqFZycnPDgwQP8+uuvSE1NxZQpU4o7tBKPVxiISiCZTAZDQ0P069cPK1asKFHjSxMREZVG//d//4etW7fi3r17kMlkaNCgAaZNm1ZoQ/t+ypgwEBERERGRUio/h4GIiIiIiD4/TBiIiIiIiEgpdoymfMvKysJ///0HIyMj3oxLRERUSggh8Pz5c9ja2koPoCRSBRMGyrf//vsPFStWLO4wiIiIqADu3r2LChUqFHcYVIowYaB8y34c/d27d2FsbFzM0RAREZEqUlNTUbFiRel7nEhVTBgo37K7IRkbGzNhICIiKmXYnZjyix3YiIiIiIhIKSYMRERERESkFBMGIiIiIiJSivcwENEnKzMzE2/fvi3uMIiIioSWlhY0NTWLOwz6BDFhIKJPjhACSUlJePbsWXGHQkRUpExNTWFtbc0bm6lQMWEoZY4dO4Z58+YhKioKiYmJ2L59O7p3766w7rfffouVK1di4cKF8Pf3l8qTk5MxcuRI/PPPP9DQ0ECvXr2wePFiGBoaFs1KEKlZdrJgaWkJfX19fnES0SdPCIGXL1/i4cOHAAAbG5tijog+JUwYSpkXL16gbt26+Prrr9GzZ0+l9bZv345Tp07B1tY2xzRvb28kJibi4MGDePv2LQYOHIghQ4Zg06ZN6gydqEhkZmZKyUK5cuWKOxwioiKjp6cHAHj48CEsLS3ZPYkKDROGUqZjx47o2LFjrnXu37+PkSNHYv/+/ejcubPctNjYWISGhiIyMhINGzYEACxduhSdOnXCTz/9pDDBICpNsu9Z0NfXL+ZIiIiKXvax7+3bt0wYqNBwlKRPTFZWFgYMGIBx48ahVq1aOaZHRETA1NRUShYAwMPDAxoaGjh9+nRRhkqkVuyGRESfIx77SB14heETM2fOHJQpUwajRo1SOD0pKQmWlpZyZWXKlIGZmRmSkpIUzpOeno709HTpdWpqauEFTEREREQlGq8wfEKioqKwePFirF27tlB/YQgODoaJiYn0V7FixUJrm4j+p1WrVnIDFFBOa9euhampaYlpJy8vX75Er169YGxsDJlMVupG7po+fTrq1atX3GEoVZD3USaTYceOHYWy/Px+ZsPCwgplP7Czs8OiRYs+qg2i/OAVhk/I8ePH8fDhQ1SqVEkqy8zMxNixY7Fo0SLEx8fD2tpaGkEhW0ZGBpKTk2Ftba2w3UmTJiEgIEB6nZqayqSBSiW7iXuKdHnxszvnXakIhYWFoXXr1nj69GmRnCyrQ79+/dCpU6d8zWNnZwd/f3+5E7uCtFMQ69atw/Hjx3Hy5EmYm5vDxMRE7cuk/JHJZIiLi4OdnV1xh0JUYjFh+IQMGDAAHh4ecmWenp4YMGAABg4cCABwdXXFs2fPEBUVBRcXFwDAv//+i6ysLDRp0kRhuzo6OtDR0VFv8EREKtDT05NGgikJ7eTl1q1bcHJyQu3atQvcRmZmJmQyGTQ02CmAiIoHjz6lTFpaGqKjoxEdHQ0AiIuLQ3R0NBISElCuXDnUrl1b7k9LSwvW1taoXr06AMDJyQkdOnTA4MGDcebMGYSHh2PEiBHo378/R0giKgEyMjIwYsQImJiYwNzcHFOmTIEQQpqenp6O77//HuXLl4eBgQGaNGmCsLAwafqdO3fQtWtXlC1bFgYGBqhVqxb27t2L+Ph4tG7dGgBQtmxZyGQy+Pn5KYzhyZMn8PLyQvny5aGvrw9nZ2f88ccfcnW2bt0KZ2dn6OnpoVy5cvDw8MCLFy8AvLuS0bhxYxgYGMDU1BTNmzfHnTt3pHmXL18OBwcHaGtro3r16tiwYYNc28+ePcPQoUNhZWUFXV1d1K5dG7t37waQswvKrVu30K1bN1hZWcHQ0BCNGjXCoUOHpOmtWrXCnTt3MGbMGMhkMqm7pqKuLHnFJZPJsGbNGvTo0QP6+vqoVq0adu3apXAbZi97/vz5OHbsGGQyGVq1agUAePr0KXx8fFC2bFno6+ujY8eOuHHjhjRfdmy7du1CzZo1oaOjg4SEBIXLuHz5Mjp27AhDQ0NYWVlhwIABePz4sTQ9NDQULVq0gKmpKcqVK4cuXbrg1q1bcm3cu3cPXl5eMDMzg4GBARo2bJhjEIwNGzbAzs4OJiYm6N+/P54/f650vbPj3717N6pXrw59fX307t0bL1++xLp162BnZ4eyZcti1KhRyMzMlObLa7tkt12pUiXo6+ujR48eePLkSY7l79y5Ew0aNICuri6qVKmCwMBAZGRkKI33fU+fPoW3tzcsLCygp6eHatWqISQkRKV5gXfbqWHDhjAyMoK1tTW+/PLLHFf1ASA8PBx16tSBrq4umjZtisuXL8tNP3HiBNzc3KCnp4eKFSti1KhR0ueLqDgwYShlzp49i/r166N+/foAgICAANSvXx9Tp05VuY2NGzeiRo0aaNu2LTp16oQWLVpg1apV6gqZiPJh3bp1KFOmDM6cOYPFixdjwYIFWLNmjTR9xIgRiIiIwObNm3Hx4kX06dMHHTp0kE6shg8fjvT0dBw7dgyXLl3CnDlzYGhoiIoVK+Lvv/8GAFy7dg2JiYlYvHixwhhev34NFxcX7NmzB5cvX8aQIUMwYMAAnDlzBgCQmJgILy8vfP3114iNjUVYWBh69uwJIQQyMjLQvXt3uLu74+LFi4iIiMCQIUOkE/Xt27dj9OjRGDt2LC5fvoyhQ4di4MCBOHLkCIB3I7117NgR4eHh+P3333HlyhXMnj1b6fCQaWlp6NSpEw4fPozz58+jQ4cO6Nq1q3SCvW3bNlSoUAFBQUFITExEYmKiwnbyiitbYGAg+vbti4sXL6JTp07w9vZGcnKywja3bduGwYMHw9XVFYmJidi2bRsAwM/PD2fPnsWuXbsQEREBIQQ6deokDQkMvLv3Yc6cOVizZg1iYmJyDFYBvEus2rRpg/r16+Ps2bMIDQ3FgwcP0LdvX6nOixcvEBAQgLNnz+Lw4cPQ0NBAjx49kJWVJW0/d3d33L9/H7t27cKFCxcwfvx4aTrwLinbsWMHdu/ejd27d+Po0aOYPXu2wnV+P/4lS5Zg8+bNCA0NRVhYGHr06IG9e/di79692LBhA1auXImtW7dK8+S1XU6fPo1BgwZhxIgRiI6ORuvWrTFz5ky55R4/fhw+Pj4YPXo0rly5gpUrV2Lt2rX48ccfc40325QpU3DlyhXs27cPsbGxWL58OczNzVWaF3g3lOmMGTNw4cIF7NixA/Hx8QoT83HjxmH+/PmIjIyEhYUFunbtKq3nrVu30KFDB/Tq1QsXL17En3/+iRMnTmDEiBEqx0FU6ARRPqWkpAgAIiUlpbhDIcrh1atX4sqVK+LVq1c5plWesLtI//LL3d1dODk5iaysLKlswoQJwsnJSQghxJ07d4Smpqa4f/++3Hxt27YVkyZNEkII4ezsLKZPn66w/SNHjggA4unTp/mOrXPnzmLs2LFCCCGioqIEABEfH5+j3pMnTwQAERYWprCdZs2aicGDB8uV9enTR3Tq1EkIIcT+/fuFhoaGuHbtmsL5Q0JChImJSa6x1qpVSyxdulR6XblyZbFw4cJc28krLiGEACAmT54svU5LSxMAxL59+5TGMnr0aOHu7i69vn79ugAgwsPDpbLHjx8LPT098ddff0mxARDR0dG5rueMGTNE+/bt5cru3r0rACjdfo8ePRIAxKVLl4QQQqxcuVIYGRmJJ0+eKKw/bdo0oa+vL1JTU6WycePGiSZNmiiNKzv+mzdvSmVDhw4V+vr64vnz51KZp6enGDp0qBBCte3i5eUl934IIUS/fv3k3se2bduKWbNmydXZsGGDsLGxkV4DENu3b1cYe9euXcXAgQOVrtuH3N3dxejRo5VOj4yMFACk9c7+DK6fN0+8vHRJvLx0Sdw7cULo6eqKDf+/zLdnT/F1795y7Rw/flxoaGhIxzVF+3S23I6B/P6mguIVBiKiEqRp06Zyo5y5urrixo0byMzMxKVLl5CZmQlHR0cYGhpKf0ePHpW6mYwaNQozZ85E8+bNMW3aNFy8eDHfMWRmZmLGjBlwdnaGmZkZDA0NsX//fulX+7p166Jt27ZwdnZGnz59sHr1ajx9+hQAYGZmBj8/P3h6eqJr165YvHix3K/6sbGxaN68udzymjdvjtjYWABAdHQ0KlSoAEdHR5ViTUtLw/fffw8nJyeYmprC0NAQsbGxSrvwKJNXXNnq1Kkj/d/AwADGxsYKu5zktpwyZcrI3TNWrlw5VK9eXW5Z2tracstS5MKFCzhy5IjcvlCjRg0AkPaHGzduwMvLC1WqVIGxsbF0Y2/29omOjkb9+vVhZmamdDl2dnYwMjKSXtvY2OS5zvr6+nBwcJBeW1lZwc7ODoaGhnJl2e2osl1iY2Nz3Gvn6uqaY5sEBQXJbZPBgwcjMTERL1++zDVmAPjuu++wefNm1KtXD+PHj8fJkyfznOd9UVFR6Nq1KypVqgQjIyO4u7sDQI79sUndutL/zUxMUM3ODlfj4gAAl65dw+87d8qtg6enJ7KyshD3/+sQFTXe9ExEVEqkpaVBU1MTUVFRObroZJ+IffPNN/D09MSePXtw4MABBAcHY/78+Rg5cqTKy5k3bx4WL16MRYsWwdnZGQYGBvD398ebN28AAJqamjh48CBOnjyJAwcOYOnSpfjhhx9w+vRp2NvbIyQkBKNGjUJoaCj+/PNPTJ48GQcPHkTTpk3zXHZ+b0T+/vvvcfDgQfz000+oWrUq9PT00Lt3bynWwqalpSX3WiaTyXXfKSx6enp5Do+dlpaGrl27Ys6cOTmm2djYAAC6du2KypUrY/Xq1bC1tUVWVhZq164tbR9VtndB1lnRPEWx7dLS0hAYGIiePXvmmKarq5vn/B07dsSdO3ewd+9eHDx4EG3btsXw4cPx008/5Tnvixcv4OnpCU9PT2zcuBEWFhZISEiAp6dnvvbHFy9fYlCfPggIDMwx7f1REImKEq8wEBGVIB/ebHrq1ClUq1YNmpqaqF+/PjIzM/Hw4UNUrVpV7u/9YZErVqyIb7/9Ftu2bcPYsWOxevVqAO9+tQYgd6OpIuHh4ejWrRu++uor1K1bF1WqVMH169fl6shkMjRv3hyBgYE4f/48tLW1sX37dml6/fr1MWnSJJw8eRK1a9fGpk2bALwbeCE8PDzH8mrWrAng3S/49+7dy7G83GL18/NDjx494OzsDGtra8THx8vV0dbWznOd84qrsDg5OSEjI0PufX7y5AmuXbuW72U1aNAAMTExsLOzy7E/GBgYSO1OnjwZbdu2hZOTk3QlKFudOnUQHR2t9D6MoqLKdnFyclL4+XhfgwYNcO3atRzbo2rVqiqPMmVhYQFfX1/8/vvvWLRokcr3+F29ehVPnjzB7Nmz4ebmhho1aii9EnPmwgXp/09TUnDzzh3UsLcHANRzcsLVW7cUrkP2Z5ioqDFhICIqQRISEhAQEIBr167hjz/+wNKlSzF69GgAgKOjI7y9veHj44Nt27YhLi4OZ86cQXBwMPbsefeMCX9/f+zfvx9xcXE4d+4cjhw5AicnJwBA5cqVIZPJsHv3bjx69AhpaWkKY6hWrZp0BSE2NhZDhw7FgwcPpOmnT5/GrFmzcPbsWSQkJGDbtm149OgRnJycEBcXh0mTJiEiIgJ37tzBgQMHcOPGDSmGcePGYe3atVi+fDlu3LiBBQsWYNu2bfj+++8BAO7u7mjZsiV69eqFgwcPIi4uDvv27UNoaKjSWLdt24bo6GhcuHABX375ZY5fre3s7HDs2DHcv39fbgSh9+UVV2GpVq0aunXrhsGDB+PEiRO4cOECvvrqK5QvXx7dunXLV1vDhw9HcnIyvLy8EBkZiVu3bmH//v0YOHAgMjMzUbZsWZQrVw6rVq3CzZs38e+//8o9UwcAvLy8YG1tje7duyM8PBy3b9/G33//jYiIiMJc7Typsl2yr1r99NNPuHHjBn7++ecc+8XUqVOxfv16BAYGIiYmBrGxsdi8eTMmT56sUhxTp07Fzp07cfPmTcTExGD37t3SvpuXSpUqQVtbG0uXLsXt27exa9cuzJgxQ2Hd4JUrceTUKcTcuIEhkyejnKkpurZtCwAI+PprnLpwQbq5+8aNG9i5cydveqZixYSBiKgE8fHxwatXr9C4cWMMHz4co0ePxpAhQ6TpISEh8PHxwdixY1G9enV0794dkZGRUleFzMxMDB8+XBpC2dHREb/88gsAoHz58ggMDMTEiRNhZWWl9ARk8uTJaNCgATw9PdGqVSvphDKbsbExjh07hk6dOsHR0RGTJ0/G/Pnz0bFjR+jr6+Pq1avo1asXHB0dMWTIEAwfPhxDhw4FAHTv3h2LFy/GTz/9hFq1amHlypUICQmRhhwFgL///huNGjWCl5cXatasifHjxyu9QrBgwQKULVsWzZo1Q9euXeHp6YkGDRrI1QkKCkJ8fDwcHBxgYWGhsB1V4iosISEhcHFxQZcuXeDq6gohBPbu3Zujy05ebG1tER4ejszMTLRv3x7Ozs7w9/eHqakpNDQ0oKGhgc2bNyMqKgq1a9fGmDFjMG/ePLk2tLW1ceDAAVhaWqJTp05wdnbOdVQqdcpruzRt2hSrV6/G4sWLUbduXRw4cCBHIuDp6Yndu3fjwIEDaNSoEZo2bYqFCxeicuXKKsWgra2NSZMmoU6dOmjZsiU0NTWxefNmlea1sLDA2rVrsWXLFtSsWROzZ89W2pUpyN8f4+bMQfN+/fDgyRNsXboU2v9/PZ2rV8f+kBBcv34dbm5u0kiIHPqcipNMiPcG+CZSQWpqKkxMTJCSkgJjY+PiDodIzuvXrxEXFwd7e3uV+iwTERW1Vx88d0ERvQI+7C+3YyC/v6mgeIWBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEVEIIITBkyBCYmZlBJpMhOjo6z3ni4+NVrltStWrVCv7+/rnWWbt2LUxNTYskHiIiklemuAMgIioy002KeHkp+aoeGhqKtWvXIiwsDFWqVIG5ubmaAitZtm3bBi0tLem1nZ0d/P395ZKIfv36oVOnTsUQHRERMWEgIiohbt26BRsbGzRr1qy4QylSZmZmedbR09ODnp5eEURDREQfYpckIqISwM/PDyNHjkRCQgJkMhns7OwAvLvq0KJFC5iamqJcuXLo0qULbt26pbSdp0+fwtvbGxYWFtDT00O1atUQEhIiTb979y769u0LU1NTmJmZoVu3boiPj1faXlhYGGQyGfbs2YM6depAV1cXTZs2xeXLl+Xq/f3336hVqxZ0dHRgZ2eH+fPny03/5ZdfUK1aNejq6sLKygq9e/eWpr3fJalVq1a4c+cOxowZA5lMBplMBkC+S9L169chk8lw9epVuWUsXLgQDg4O0uvLly+jY8eOMDQ0hJWVFQYMGIDHjx8rXVciIlKMCQMRUQmwePFiBAUFoUKFCkhMTERkZCQA4MWLFwgICMDZs2dx+PBhaGhooEePHsjKylLYzpQpU3DlyhXs27cPsbGxWL58udS16e3bt/D09ISRkRGOHz+O8PBwGBoaokOHDnjz5k2u8Y0bNw7z589HZGQkLCws0LVrV7x9+xYAEBUVhb59+6J///64dOkSpk+fjilTpmDt2rUAgLNnz2LUqFEICgrCtWvXEBoaipYtWypczrZt21ChQgUEBQUhMTERiYmJOeo4OjqiYcOG2Lhxo1z5xo0b8eWXXwIAnj17hjZt2qB+/fo4e/YsQkND8eDBA/Tt2zfX9SQiopzYJYmIqAQwMTGBkZERNDU1YW1tLZX36tVLrt5vv/0GCwsLXLlyBbVr187RTkJCAurXr4+GDRsCgHSlAgD+/PNPZGVlYc2aNdIv9yEhITA1NUVYWBjat2+vNL5p06ahXbt2AIB169ahQoUK2L59O/r27YsFCxagbdu2mDJlCoB3J/RXrlzBvHnz4Ofnh4SEBBgYGKBLly4wMjJC5cqVUb9+fYXLMTMzg6amJoyMjOS2w4e8vb3x888/Y8aMGQDeXXWIiorC77//DgD4+eefUb9+fcyaNUtu21WsWBHXr1+Ho6Oj0raJiEgerzAQEZVgN27cgJeXF6pUqQJjY2MpAUhISFBY/7vvvsPmzZtRr149jB8/HidPnpSmXbhwATdv3oSRkREMDQ1haGgIMzMzvH79OtduTgDg6uoq/d/MzAzVq1dHbGwsACA2NhbNmzeXq9+8eXPcuHEDmZmZaNeuHSpXrowqVapgwIAB2LhxI16+fFmQzSHp378/4uPjcerUKQDvri40aNAANWrUkNb1yJEj0noaGhpK0/JaVyIikscrDEREJVjXrl1RuXJlrF69Gra2tsjKykLt2rWVdiHq2LEj7ty5g7179+LgwYNo27Ythg8fjp9++glpaWlwcXHJ0ZUHACwsLNS2DkZGRjh37hzCwsJw4MABTJ06FdOnT0dkZGSBh0q1trZGmzZtsGnTJjRt2hSbNm3Cd999J01PS0tD165dMWfOnBzz2tjYFHRViArFbWtZnnVqFUEcRKriFQYiohLqyZMnuHbtGiZPnoy2bdvCyckJT58+zXM+CwsL+Pr64vfff8eiRYuwatUqAECDBg1w48YNWFpaomrVqnJ/Jia5Dzmb/Us+8O7G6uvXr8PJyQkA4OTkhPDwcLn64eHhcHR0hKamJgCgTJky8PDwwNy5c3Hx4kXEx8fj33//VbgsbW1tZGZm5rme3t7e+PPPPxEREYHbt2+jf//+0rQGDRogJiYGdnZ2OdbVwMAgz7aJiOh/mDAQEZVQZcuWRbly5bBq1SrcvHkT//77LwICAnKdZ+rUqdi5cydu3ryJmJgY7N69Wzqx9/b2hrm5Obp164bjx48jLi4OYWFhGDVqFO7du5dru0FBQTh8+DAuX74MPz8/mJubo3v37gCAsWPH4vDhw5gxYwauX7+OdevW4eeff8b3338PANi9ezeWLFmC6Oho3LlzB+vXr0dWVhaqV6+ucFl2dnY4duwY7t+/n+uoRj179sTz58/x3XffoXXr1rC1tZWmDR8+HMnJyfDy8kJkZCRu3bqF/fv3Y+DAgSolI0RE9D9MGIiISigNDQ1s3rwZUVFRqF27NsaMGYN58+blOo+2tjYmTZqEOnXqoGXLltDU1MTmzZsBAPr6+jh27BgqVaqEnj17wsnJCYMGDcLr169hbGyca7uzZ8/G6NGj4eLigqSkJPzzzz/Q1tYG8O7X/L/++gubN29G7dq1MXXqVAQFBcHPzw8AYGpqim3btqFNmzZwcnLCihUr8Mcff6BWLcWdLoKCghAfHw8HB4dcu0oZGRmha9euuHDhAry9veWm2draIjw8HJmZmWjfvj2cnZ3h7+8PU1NTaGjwq4+IKD9kQghR3EFQ6ZKamgoTExOkpKTkeZJBVNRev36NuLg42NvbQ1dXt7jDKfXCwsLQunVrPH36tMD3GxCRvJjHMXnWqWVesLsYcjsG8vubCoo/sxARERERkVJMGIiIiIiISCkOq0pEREq1atUK7LlKRPR54xUGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiKjIrF27VqUnRstkMuzYsUPt8RARUd74HAYi+mw4r3Mu0uVd8r2Ur/qtWrVCvXr1sGjRIvUEVAL069cPnTp1kl5Pnz4dO3bsQHR0tFy9xMRElC1btoijIyIiRZgwEBGVIkIIZGZmokyZ0nn41tPTg56eXp71rK2tiyAaIiJSBbskERGVAH5+fjh69CgWL14MmUwGmUyG+Ph4hIWFQSaTYd++fXBxcYGOjg5OnDgBPz8/dO/eXa4Nf39/tGrVSnqdlZWF4OBg2NvbQ09PD3Xr1sXWrVtzjcPOzg4zZsyAl5cXDAwMUL58eSxbtkyuTkJCArp16wZDQ0MYGxujb9++ePDggTT9woULaN26NYyMjGBsbAwXFxecPXsWgHyXpLVr1yIwMBAXLlyQ1nnt2rUA5LskNWvWDBMmTJCL4dGjR9DS0sKxY8cAAOnp6fj+++9Rvnx5GBgYoEmTJggLC1NhyxMRUV6YMJQyx44dQ9euXWFra5ujj+/bt28xYcIEODs7w8DAALa2tvDx8cF///0n10ZycjK8vb1hbGwMU1NTDBo0CGlpaUW8JkT0vsWLF8PV1RWDBw9GYmIiEhMTUbFiRWn6xIkTMXv2bMTGxqJOnToqtRkcHIz169djxYoViImJwZgxY/DVV1/h6NGjuc43b9481K1bF+fPn8fEiRMxevRoHDx4EMC7JKRbt25ITk7G0aNHcfDgQdy+fRv9+vWT5vf29kaFChUQGRmJqKgoTJw4EVpaWjmW069fP4wdOxa1atWS1vn9dt5vb/PmzRBCSGV//vknbG1t4ebmBgAYMWIEIiIisHnzZly8eBF9+vRBhw4dcOPGDZW2FRERKVc6r2l/xl68eIG6devi66+/Rs+ePeWmvXz5EufOncOUKVNQt25dPH36FKNHj8YXX3wh/boHvPvyTUxMxMGDB/H27VsMHDgQQ4YMwaZNm4p6dYjo/zMxMYG2tjb09fUVdscJCgpCu3btVG4vPT0ds2bNwqFDh+Dq6goAqFKlCk6cOIGVK1fC3d1d6bzNmzfHxIkTAQCOjo4IDw/HwoUL0a5dOxw+fBiXLl1CXFyclNCsX78etWrVQmRkJBo1aoSEhASMGzcONWrUAABUq1ZN4XL09PRgaGiIMmXK5NoFqW/fvvD398eJEyekBGHTpk3w8vKCTCZDQkICQkJCkJCQAFtbWwDA999/j9DQUISEhGDWrFkqbzciIsqJCUMp07FjR3Ts2FHhNBMTE+lXwGw///wzGjdujISEBFSqVAmxsbEIDQ1FZGQkGjZsCABYunQpOnXqhJ9++kn6siWikiX786qqmzdv4uXLlzmSjDdv3qB+/fq5zpudYLz/OvtG7NjYWFSsWFHu6kfNmjVhamqK2NhYNGrUCAEBAfjmm2+wYcMGeHh4oE+fPnBwcMhX/O+zsLBA+/btsXHjRri5uSEuLg4RERFYuXIlAODSpUvIzMyEo6Oj3Hzp6ekoV65cgZdLRETvMGH4xKWkpEAmk0l9hiMiImBqaip38uHh4QENDQ2cPn0aPXr0KKZIiSg3BgYGcq81NDTkuugA77olZsvuZrhnzx6UL19erp6Ojo6aonxn+vTp+PLLL7Fnzx7s27cP06ZNw+bNmz/q+OLt7Y1Ro0Zh6dKl2LRpE5ydneHs/G7Uq7S0NGhqaiIqKgqamppy8xkaGn7UuhAREROGT9rr168xYcIEeHl5wdjYGACQlJQES0tLuXplypSBmZkZkpKSFLaTnp6O9PR06XVqaqr6gib6jGlrayMzM1OluhYWFrh8+bJcWXR0tHSvQM2aNaGjo4OEhIRcux8pcurUqRyvnZycAABOTk64e/cu7t69K11luHLlCp49e4aaNWtK8zg6OsLR0RFjxoyBl5cXQkJCFCYMqq5zt27dMGTIEISGhmLTpk3w8fGRptWvXx+ZmZl4+PCh1GWJiIgKD296/kS9ffsWffv2hRACy5cv/6i2goODYWJiIv293xWBiAqPnZ0dTp8+jfj4eDx+/BhZWVlK67Zp0wZnz57F+vXrcePGDUybNk0ugTAyMsL333+PMWPGYN26dbh16xbOnTuHpUuXYt26dbnGER4ejrlz5+L69etYtmwZtmzZgtGjRwN4d0XS2dkZ3t7eOHfuHM6cOQMfHx+4u7ujYcOGePXqFUaMGIGwsDDcuXMH4eHhiIyMlBIORescFxeH6OhoPH78WO7HifcZGBige/fumDJlCmJjY+Hl5SVNc3R0hLe3N3x8fLBt2zbExcXhzJkzCA4Oxp49e3JdVyIiyhsThk9QdrJw584dHDx4ULq6ALwb2/zhw4dy9TMyMpCcnKz0psNJkyYhJSVF+rt7965a4yf6XH3//ffQ1NREzZo1YWFhgYSEBKV1PT09MWXKFIwfPx6NGjXC8+fP5X51B4AZM2ZgypQpCA4OhpOTEzp06IA9e/bA3t4+1zjGjh2Ls2fPon79+pg5cyYWLFgAT09PAO+GO925cyfKli2Lli1bwsPDA1WqVMGff/4JANDU1MSTJ0/g4+MDR0dH9O3bFx07dkRgYKDCZfXq1QsdOnRA69atYWFhgT/++ENpXN7e3rhw4QLc3NxQqVIluWkhISHw8fHB2LFjUb16dXTv3h2RkZE56hERUf7JxIedYKnUkMlk2L59u9xY7NnJwo0bN3DkyBFYWFjIzRMbG4uaNWvi7NmzcHFxAQAcOHAAHTp0wL1791S66Tk1NRUmJiZISUmRS0aISoLXr18jLi4O9vb20NXVLe5wSh07Ozv4+/vD39+/uEMh+mTFPI7Js04t81oFaju3YyC/v6mgeA9DKZOWloabN29Kr7Mv5ZuZmcHGxga9e/fGuXPnsHv3bmRmZkr3JZiZmUFbW1v6lXHw4MFYsWIF3r59ixEjRqB///4cIYmIiIiIcmDCUMqcPXsWrVu3ll4HBAQAAHx9fTF9+nTs2rULAFCvXj25+Y4cOSI9AXbjxo0YMWIE2rZtCw0NDfTq1QtLliwpkviJiIiIqHRhwlDKtGrVKsdQiu9TpYeZmZkZH9JGRArFx8cXdwhERFTC8KZnIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiEgl06dPz/GMl9zEx8dDJpMhOjpaaR07OzssWrRI5TbXrl0LU1NTlesrI5PJsGPHDqXThRAYMmQIzMzM8lyHkkSV7Znf9xF4N6R3SXn6d1hYGGQyGZ49e6byPIURf2Hte0SlEZ/DQESfjdgaTkW6PKersfmqf+zYMcybNw9RUVFITEzE9u3b0b17d/UER7kKDQ3F2rVrERYWhipVqsDc3Ly4QyoQmUyWYz/6/vvvMXLkyOILSon4+HjY29vj/Pnz+U5oiEi9eIWBiKiEePHiBerWrYtly5YVdyifvVu3bsHGxgbNmjWDtbU1ypTJ/+9rQghkZGSoIbqPY2hoiHLlyhV3GERUijBhICIqITp27IiZM2eiR48eKs+T3b3kt99+Q6VKlWBoaIhhw4YhMzMTc+fOhbW1NSwtLfHjjz/KzZeQkIBu3brB0NAQxsbG6Nu3Lx48eCBXZ/bs2bCysoKRkREGDRqE169f51j+mjVr4OTkBF1dXdSoUQO//PJLwVb+/1uwYAGcnZ1hYGCAihUrYtiwYUhLS8tRb8eOHahWrRp0dXXh6emJu3fvyk3fuXMnGjRoAF1dXVSpUgWBgYEqn7z7+flh5MiRSEhIgEwmg52dHQAgPT0do0aNgqWlJXR1ddGiRQtERkZK82V3ldm3bx9cXFygo6ODEydO5Gg/u6vWX3/9BTc3N+jp6aFRo0a4fv06IiMj0bBhQxgaGqJjx4549OiRNJ+ibjXdu3eHn5+fwvXIjrtHjx5y6/FhlyQ/Pz90794dgYGBsLCwgLGxMb799lu8efNG6TZKT0/H999/j/Lly8PAwABNmjRBWFiY0voAcPXqVbRo0QK6urqoWbMmDh06JNc1zN7eHgBQv359yGQytGrVKtf2sj158gReXl4oX7489PX14ezsjD/++CNHvYyMDIwYMQImJiYwNzfHlClTIIQo8DpduHABrVu3hpGREYyNjeHi4oKzZ8+qFDNRacOEgYiolLt16xb27duH0NBQ/PHHH/j111/RuXNn3Lt3D0ePHsWcOXMwefJknD59GgCQlZWFbt26ITk5GUePHsXBgwdx+/Zt9OvXT2rzr7/+wvTp0zFr1iycPXsWNjY2OZKBjRs3YurUqfjxxx8RGxuLWbNmYcqUKVi3bl2B10VDQwNLlixBTEwM1q1bh3///Rfjx4+Xq/Py5Uv8+OOPWL9+PcLDw/Hs2TP0799fmn78+HH4+Phg9OjRuHLlClauXIm1a9fmSJqUWbx4MYKCglChQgUkJiZKScH48ePx999/Y926dTh37hyqVq0KT09PJCcny80/ceJEzJ49G7GxsahTp47S5UybNg2TJ0/GuXPnUKZMGXz55ZcYP348Fi9ejOPHj+PmzZuYOnWqqpsuh+y4Q0JC5NZDkcOHDyM2NhZhYWH4448/sG3bNgQGBiqtP2LECERERGDz5s24ePEi+vTpgw4dOuDGjRsK62dmZqJ79+7Q19fH6dOnsWrVKvzwww9ydc6cOQMAOHToEBITE7Ft2zaV1vP169dwcXHBnj17cPnyZQwZMgQDBgyQ2su2bt06lClTBmfOnMHixYuxYMECrFmzpsDr5O3tjQoVKiAyMhJRUVGYOHEitLS0VIqZqNQRRPmUkpIiAIiUlJTiDoUoh1evXokrV66IV69e5Zh2pXqNIv37GADE9u3b86w3bdo0oa+vL1JTU6UyT09PYWdnJzIzM6Wy6tWri+DgYCGEEAcOHBCampoiISFBmh4TEyMAiDNnzgghhHB1dRXDhg2TW1aTJk1E3bp1pdcODg5i06ZNcnVmzJghXF1dhRBCxMXFCQDi/PnzSuOvXLmyWLhwodLpW7ZsEeXKlZNeh4SECADi1KlTUllsbKwAIE6fPi2EEKJt27Zi1qxZcu1s2LBB2NjYSK/z2r4LFy4UlStXll6npaUJLS0tsXHjRqnszZs3wtbWVsydO1cIIcSRI0cEALFjxw6l7Qrxv+2yZs0aqeyPP/4QAMThw4elsuDgYFG9enXptbu7uxg9erRcW926dRO+vr7S6w+3p6L1nDZtmtz76OvrK8zMzMSLFy+ksuXLlwtDQ0NpH3p/2Xfu3BGampri/v37cu22bdtWTJo0SeE679u3T5QpU0YkJiZKZQcPHpSLT5X9RYj/beenT58qrdO5c2cxduxY6bW7u7twcnISWVlZUtmECROEk5OTyusUEhIiTExMpGlGRkZi7dq1ucaqzOVHl/P8K6jcjoH8/qaC4k3PRESlnJ2dHYyMjKTXVlZW0NTUhIaGhlzZw4cPAQCxsbGoWLEiKlasKE2vWbMmTE1NERsbi0aNGiE2Nhbffvut3HJcXV1x5MgRAO/ut7h16xYGDRqEwYMHS3UyMjJgYmJS4HU5dOgQgoODcfXqVaSmpiIjIwOvX7/Gy5cvoa+vDwAoU6YMGjVqJM1To0YNKfbGjRvjwoULCA8Pl7uikJmZmaOd/Lh16xbevn2L5s2bS2VaWlpo3LgxYmPlb25v2LChSm2+f/XBysoKAODs7CxXlv2eqVvdunXltourqyvS0tJw9+5dVK5cWa7upUuXkJmZCUdHR7ny9PR0pfdGXLt2DRUrVoS1tbVU1rhx40KJPTMzE7NmzcJff/2F+/fv482bN0hPT8/xPjdt2hQymUx67erqivnz5yMzM7NA6xQQEIBvvvkGGzZsgIeHB/r06QMHB4dCWSeikoYJAxFRKfdhNwiZTKawLCsrq9CWmX1fwerVq9GkSRO5aZqamgVqMz4+Hl26dMF3332HH3/8EWZmZjhx4gQGDRqEN2/eqHyin5aWhsDAQPTs2TPHNF1d3QLFlh8GBgYq1Xv/Pco+kf2w7P33TENDQ67PPQC8ffv2Y0ItkLS0NGhqaiIqKirHe21oaFjk8cybNw+LFy/GokWLpPtf/P39c70H40MFWafp06fjyy+/xJ49e7Bv3z5MmzYNmzdvztc9SESlBRMGIqLPjJOTE+7evYu7d+9KVxmuXLmCZ8+eoWbNmlKd06dPw8fHR5rv1KlT0v+trKxga2uL27dvw9vbu1DiioqKQlZWFubPny9dHfnrr79y1MvIyMDZs2elX6ivXbuGZ8+ewcnp3bC5DRo0wLVr11C1atVCiQsAHBwcoK2tjfDwcOkX97dv3yIyMrLInk9gYWGBxMRE6XVmZiYuX76M1q1bK51HS0sLmZmZebZ94cIFvHr1Cnp6egDevdeGhoZyV6Gy1a9fH5mZmXj48CHc3NxUir169eq4e/cuHjx4IF1N+fCeCm1tbWm98iM8PBzdunXDV199BeDdPTrXr1+X9uVs2ffwZDt16hSqVasGTU3NAq0TADg6OsLR0RFjxoyBl5cXQkJCmDDQJ4kJAxFRCZGWloabN29Kr+Pi4hAdHQ0zMzNUqlSp0Jbj4eEBZ2dneHt7Y9GiRcjIyMCwYcPg7u4udacZPXo0/Pz80LBhQzRv3hwbN25ETEwMqlSpIrUTGBiIUaNGwcTEBB06dEB6ejrOnj2Lp0+fIiAgIN9xVa1aFW/fvsXSpUvRtWtXhIeHY8WKFTnqaWlpYeTIkViyZAnKlCmDESNGoGnTplICMXXqVHTp0gWVKlVC7969oaGhgQsXLuDy5cuYOXNmgbaZgYEBvvvuO4wbN056P+bOnYuXL19i0KBBBWozv9q0aYOAgADs2bMHDg4OWLBgQZ4PL7Ozs8Phw4fRvHlz6OjooGzZsgrrvXnzBoMGDcLkyZMRHx+PadOmYcSIEXLd2rI5OjrC29sbPj4+mD9/PurXr49Hjx7h8OHDqFOnDjp37pxjnnbt2sHBwQG+vr6YO3cunj9/jsmTJwP439UVS0tL6OnpITQ0FBUqVICurq5K3duqVauGrVu34uTJkyhbtiwWLFiABw8e5EgYEhISEBAQgKFDh+LcuXNYunQp5s+fX6B1evXqFcaNG4fevXvD3t4e9+7dQ2RkJHr16pVnvESlERMGIvps5PdBakXt7Nmzcr8WZ590+/r6Yu3atYW2HJlMhp07d2LkyJFo2bIlNDQ00KFDByxdulSq069fP9y6dQvjx4/H69ev0atXL3z33XfYv3+/VOebb76Bvr4+5s2bh3HjxsHAwADOzs4F/sW9bt26WLBgAebMmYNJkyahZcuWCA4OlrvKAQD6+vqYMGECvvzyS9y/fx9ubm749ddfpemenp7YvXs3goKCMGfOHGhpaaFGjRr45ptvChRXttmzZyMrKwsDBgzA8+fP0bBhQ+zfv1/pSXhh+/rrr3HhwgX4+PigTJkyGDNmTK5XFwBg/vz5CAgIwOrVq1G+fHnEx8crrNe2bVtUq1YNLVu2RHp6Ory8vDB9+nSl7YaEhGDmzJkYO3Ys7t+/D3NzczRt2hRdunRRWF9TUxM7duzAN998g0aNGqFKlSqYN28eunbtKnUTK1OmDJYsWYKgoCBMnToVbm5ueQ7VCgCTJ0/G7du34enpCX19fQwZMgTdu3dHSkqKXD0fHx+8evUKjRs3hqamJkaPHo0hQ4YUaJ00NTXx5MkT+Pj44MGDBzA3N0fPnj1zHVmKqDSTiQ87RBLlITU1FSYmJkhJSYGxsXFxh0Mk5/Xr14iLi4O9vX2R9FcnKu38/Pzw7Nkz6XkIRSU8PBwtWrTAzZs3P7ubhWMex+RZp5Z5rQK1ndsxkN/fVFC8wkBERERqt337dhgaGqJatWq4efMmRo8ejebNm392yQJRacSEgYiIiNTu+fPnmDBhAhISEmBubg4PDw/pHgIiKtmYMBAREX3GCvP+mNz4+PjkuB+FiEqHnMMfEBERERER/X9MGIjok8TxHIjoc8RjH6kDEwYi+qRkPyn35cuXxRwJEVHRyz72ffi0d6KPwXsYiOiToqmpCVNTUzx8+BDAuzH7sx8MRURUEmS9zcqzzuvXr/PVphACL1++xMOHD2FqagpNTc2ChkeUAxMGIvrkWFtbA4CUNBARlSQP0/I+NpV5VrBTNFNTU+kYSFRYmDAQ0SdHJpPBxsYGlpaWePv2bXGHQ0QkZ/T20XnW2dVjV77b1dLS4pUFUgsmDET0ydLU1OSXJxGVOIlvEvOswyfVU0nCm56LWGZmJqKjo/H06dPiDoWIiIiIKE9MGNTM398fv/76K4B3yYK7uzsaNGiAihUrIiwsrHiDIyIiIiLKAxMGNdu6dSvq1q0LAPjnn38QFxeHq1evYsyYMfjhhx+KOToiIiIiotwxYVCzx48fS6MV7N27F3369IGjoyO+/vprXLp0qZijIyIiIiLKHRMGNbOyssKVK1eQmZmJ0NBQtGvXDsC7B6vwZkwiIiIiKuk4SpKaDRw4EH379oWNjQ1kMhk8PDwAAKdPn0aNGjWKOToiIiIiotwxYVCz6dOno3bt2rh79y769OkDHR0dAO+Ge5w4cWIxR0dERERElDsmDEWgd+/eOcp8fX2LIRIiIiIiovxhwqAGS5YsUbnuqFGj1BgJEREREdHHYcKgBgsXLlSpnkwmY8JARERERCUaEwY1iIuLK+4QiIiIiIgKBYdVLSJv3rzBtWvXkJGRUdyhEBERERGpjAmDmr18+RKDBg2Cvr4+atWqhYSEBADAyJEjMXv27Hy3d+zYMXTt2hW2traQyWTYsWOH3HQhBKZOnQobGxvo6enBw8MDN27ckKuTnJwMb29vGBsbw9TUFIMGDUJaWlqB15GIiIiIPl1MGNRs0qRJuHDhAsLCwqCrqyuVe3h44M8//8x3ey9evEDdunWxbNkyhdPnzp2LJUuWYMWKFTh9+jQMDAzg6emJ169fS3W8vb0RExODgwcPYvfu3Th27BiGDBmS/5UjIiIiok8e72FQsx07duDPP/9E06ZNIZPJpPJatWrh1q1b+W6vY8eO6Nixo8JpQggsWrQIkydPRrdu3QAA69evh5WVFXbs2IH+/fsjNjYWoaGhiIyMRMOGDQEAS5cuRadOnfDTTz/B1ta2AGtJRERERJ8qXmFQs0ePHsHS0jJH+YsXL+QSiMIQFxeHpKQk6WnSAGBiYoImTZogIiICABAREQFTU1MpWQDeXe3Q0NDA6dOnCzUeIiIiIir9mDCoWcOGDbFnzx7pdXaSsGbNGri6uhbqspKSkgAAVlZWcuVWVlbStKSkpBwJTJkyZWBmZibV+VB6ejpSU1Pl/oiIiIjo88AuSWo2a9YsdOzYEVeuXEFGRgYWL16MK1eu4OTJkzh69Ghxh6eS4OBgBAYGFncYRERERFQMeIVBzVq0aIHo6GhkZGTA2dkZBw4cgKWlJSIiIuDi4lKoy7K2tgYAPHjwQK78wYMH0jRra2s8fPhQbnpGRgaSk5OlOh+aNGkSUlJSpL+7d+8WatxEREREVHLxCkMRcHBwwOrVq9W+HHt7e1hbW+Pw4cOoV68eACA1NRWnT5/Gd999BwBwdXXFs2fPEBUVJSUs//77L7KystCkSROF7ero6EBHR0ft8RMRERFRycOEoQhkZmZi+/btiI2NBQDUrFkT3bp1Q5ky+d/8aWlpuHnzpvQ6Li4O0dHRMDMzQ6VKleDv74+ZM2eiWrVqsLe3x5QpU2Bra4vu3bsDAJycnNChQwcMHjwYK1aswNu3bzFixAj079+fIyQRERERUQ5MGNQsJiYGX3zxBZKSklC9enUAwJw5c2BhYYF//vkHtWvXzld7Z8+eRevWraXXAQEBAABfX1+sXbsW48ePx4sXLzBkyBA8e/YMLVq0QGhoqNwzIDZu3IgRI0agbdu20NDQQK9evbBkyZJCWFsiIiIi+tTIhBCiuIP4lLm6usLCwgLr1q1D2bJlAQBPnz6Fn58fHj16hJMnTxZzhPmXmpoKExMTpKSkwNjYuLjDISIiKlWc1znnWeeS76VCXy6/v6mgeIVBzaKjo3H27FkpWQCAsmXL4scff0SjRo2KMTIiIiIiorxxlCQ1c3R0zDFqEQA8fPgQVatWLYaIiIiIiIhUx4RBDd5/wFlwcDBGjRqFrVu34t69e7h37x62bt0Kf39/zJkzp7hDJSIiIiLKFbskqYGpqan0RGcAEEKgb9++Uln2bSNdu3ZFZmZmscRIRERERKQKJgxqcOTIkeIOgYiIiIioUDBhUAN3d/fiDoGIiIiIqFAwYSgiL1++REJCAt68eSNXXqdOnWKKiIiIiIgob0wY1OzRo0cYOHAg9u3bp3A672EgIiIiopKMoySpmb+/P549e4bTp09DT08PoaGhWLduHapVq4Zdu3YVd3hERERERLniFQY1+/fff7Fz5040bNgQGhoaqFy5Mtq1awdjY2MEBwejc+fOxR0iEREREZFSvMKgZi9evIClpSWAd094fvToEQDA2dkZ586dK87QiIiIiIjyxIRBzapXr45r164BAOrWrYuVK1fi/v37WLFiBWxsbIo5OiIiIiKi3LFLkpqNHj0aiYmJAIBp06ahQ4cO2LhxI7S1tbF27driDY6IiIiIKA9MGNTsq6++kv7v4uKCO3fu4OrVq6hUqRLMzc2LMTIiIiIiorwxYShi+vr6aNCgQXGHQURERESkEiYMahAQEKBy3QULFqgxEiIiIiKij8OEQQ3Onz+vUj2ZTKbmSIiIiIiIPg4TBjU4cuRIcYdARERERFQoOKwqEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBjUbN26ddizZ4/0evz48TA1NUWzZs1w586dYoyMiIiIiChvTBjUbNasWdDT0wMAREREYNmyZZg7dy7Mzc0xZsyYYo6OiIiIiCh3fA6Dmt29exdVq1YFAOzYsQO9evXCkCFD0Lx5c7Rq1ap4gyMiIiIiygOvMKiZoaEhnjx5AgA4cOAA2rVrBwDQ1dXFq1evijM0IiIiIqI88QqDmrVr1w7ffPMN6tevj+vXr6NTp04AgJiYGNjZ2RVvcEREREREeeAVBjVbtmwZXF1d8ejRI/z9998oV64cACAqKgpeXl7FHB0RERERUe5kQghR3EFQ6ZKamgoTExOkpKTA2Ni4uMMhIiIqVZzXOedZ55LvpUJfLr+/qaDYJUlNEhIS5F5XqlSpmCIhIiIiIio4JgxqYmdnB5lMBiEEZDIZMjMzizskIiIiIqJ8Y8KgJllZWcUdAhERERHRR+NNz0REREREpBSvMKjBrl27VK77xRdfqDESIiIiIqKPw4RBDbp3765SPd7bQEREREQlHRMGNeD9C0RERET0qeA9DEXo9evXxR0CEREREVG+MGFQs8zMTMyYMQPly5eHoaEhbt++DQCYMmUKfv3112KOjoiIiIgod0wY1OzHH3/E2rVrMXfuXGhra0vltWvXxpo1a4oxMiIiIiKivDFhULP169dj1apV8Pb2hqamplRet25dXL16tdCXl5mZiSlTpsDe3h56enpwcHDAjBkzIISQ6gghMHXqVNjY2EBPTw8eHh64ceNGocdCRERERKUfEwY1u3//PqpWrZqjPCsrC2/fvi305c2ZMwfLly/Hzz//jNjYWMyZMwdz587F0qVLpTpz587FkiVLsGLFCpw+fRoGBgbw9PTkPRZERERElAMTBjWrWbMmjh8/nqN869atqF+/fqEv7+TJk+jWrRs6d+4MOzs79O7dG+3bt8eZM2cAvLu6sGjRIkyePBndunVDnTp1sH79evz333/YsWNHocdDRERERKUbh1VVs6lTp8LX1xf3799HVlYWtm3bhmvXrmH9+vXYvXt3oS+vWbNmWLVqFa5fvw5HR0dcuHABJ06cwIIFCwAAcXFxSEpKgoeHhzSPiYkJmjRpgoiICPTv37/QYyIiIiKi0osJg5p169YN//zzD4KCgmBgYICpU6eiQYMG+Oeff9CuXbtCX97EiRORmpqKGjVqQFNTE5mZmfjxxx/h7e0NAEhKSgIAWFlZyc1nZWUlTftQeno60tPTpdepqamFHjcRERERlUxMGIqAm5sbDh48WCTL+uuvv7Bx40Zs2rQJtWrVQnR0NPz9/WFrawtfX98CtRkcHIzAwMBCjpSIiIiISgPew/CJGTduHCZOnIj+/fvD2dkZAwYMwJgxYxAcHAwAsLa2BgA8ePBAbr4HDx5I0z40adIkpKSkSH93795V70oQERERUYnBKwxqULZsWchkMpXqJicnF+qyX758CQ0N+TxQU1MTWVlZAAB7e3tYW1vj8OHDqFevHoB3XYxOnz6N7777TmGbOjo60NHRKdQ4iYiIiKh0YMKgBosWLZL+/+TJE8ycOROenp5wdXUFAERERGD//v2YMmVKoS+7a9eu+PHHH1GpUiXUqlUL58+fx4IFC/D1118DAGQyGfz9/TFz5kxUq1YN9vb2mDJlCmxtbdG9e/dCj4eIiIiISjeZeP+JXlToevXqhdatW2PEiBFy5T///DMOHTpU6EOZPn/+HFOmTMH27dvx8OFD2NrawsvLC1OnTpWeNC2EwLRp07Bq1So8e/YMLVq0wC+//AJHR0eVlpGamgoTExOkpKTA2Ni4UOMnIiL61Dmvc86zziXfS4W+XH5/U0ExYVAzQ0NDREdH53h4282bN1GvXj2kpaUVU2QFxwMOERFRwTFhoNKGNz2rWbly5bBz584c5Tt37kS5cuWKISIiIiIiItXxHgY1CwwMxDfffIOwsDA0adIEAHD69GmEhoZi9erVxRwdEREREVHumDComZ+fH5ycnLBkyRJs27YNAODk5IQTJ05ICQQRERERUUnFhKEINGnSBBs3bizuMIiIiIiI8o0JQxHIzMzEjh07EBsbCwCoVasWvvjiC2hqahZzZEREREREuWPCoGY3b95E586dce/ePVSvXh0AEBwcjIoVK2LPnj1wcHAo5giJiIiIiJTjKElqNmrUKFSpUgV3797FuXPncO7cOSQkJMDe3h6jRo0q7vCIiIiIiHLFKwxqdvToUZw6dQpmZmZSWbly5TB79mw0b968GCMjIiIiIsobrzComY6ODp4/f56jPC0tTXryMhERERFRScWEQc26dOmCIUOG4PTp0xBCQAiBU6dO4dtvv8UXX3xR3OEREREREeWKCYOaLVmyBA4ODnB1dYWuri50dXXRvHlzVK1aFYsXLy7u8IiIiIiIcsV7GNTM1NQUO3fuxI0bN3D16lUA7x7cVrVq1WKOjIiIiIgob0wYiki1atVQrVq14g6DiIiIiChfmDComRACW7duxZEjR/Dw4UNkZWXJTd+2bVsxRUZERERElDcmDGrm7++PlStXonXr1rCysoJMJivukIiIiIiIVMaEQc02bNiAbdu2oVOnTsUdChERERFRvnGUJDUzMTFBlSpVijsMIiIiIqICYcKgZtOnT0dgYCBevXpV3KEQEREREeUbuySpWd++ffHHH3/A0tISdnZ20NLSkpt+7ty5YoqMiIiIiChvTBjUzNfXF1FRUfjqq6940zMRERERlTpMGNRsz5492L9/P1q0aFHcoRARERER5RvvYVCzihUrwtjYuLjDICIiIiIqECYMajZ//nyMHz8e8fHxxR0KEREREVG+sUuSmn311Vd4+fIlHBwcoK+vn+Om5+Tk5GKKjIiIiIgob0wY1GzRokXFHQIRERERUYExYVAzX1/f4g6BiIiIiKjAeA8DEREREREpxYSBiIiIiIiUYsJARERERERKMWFQg4sXLyIrK6u4wyAiIiIi+mhMGNSgfv36ePz4MQCgSpUqePLkSTFHRERERERUMEwY1MDU1BRxcXEAgPj4eF5tICIiIqJSi8OqqkGvXr3g7u4OGxsbyGQyNGzYEJqamgrr3r59u4ijIyIiIiJSHRMGNVi1ahV69uyJmzdvYtSoURg8eDCMjIyKOywiIiIionxjwqAmHTp0AABERUVh9OjRTBiIiIiIqFRiwqBmISEh0v/v3bsHAKhQoUJxhUNERERElC+86VnNsrKyEBQUBBMTE1SuXBmVK1eGqakpZsyYwZuhiYiIiKjE4xUGNfvhhx/w66+/Yvbs2WjevDkA4MSJE5g+fTpev36NH3/8sZgjJCIiIiJSjgmDmq1btw5r1qzBF198IZXVqVMH5cuXx7Bhw5gwEBEREVGJxi5JapacnIwaNWrkKK9RowaSk5OLISIiIiIiItUxYVCzunXr4ueff85R/vPPP6Nu3bpqWeb9+/fx1VdfoVy5ctDT04OzszPOnj0rTRdCYOrUqbCxsYGenh48PDxw48YNtcRCRERERKUbuySp2dy5c9G5c2ccOnQIrq6uAICIiAjcvXsXe/fuLfTlPX36FM2bN0fr1q2xb98+WFhY4MaNGyhbtqxcTEuWLMG6detgb2+PKVOmwNPTE1euXIGurm6hx0REREREpZdMCCGKO4hP3X///Ydly5bh6tWrAAAnJycMGzYMtra2hb6siRMnIjw8HMePH1c4XQgBW1tbjB07Ft9//z0AICUlBVZWVli7di369++f5zJSU1NhYmKClJQUGBsbF2r8REREnzrndc551rnke6nQl8vvbyooXmEoAra2tkV2c/OuXbvg6emJPn364OjRo9LN1YMHDwYAxMXFISkpCR4eHtI8JiYmaNKkCSIiIlRKGIiIiIjo88F7GD4xt2/fxvLly1GtWjXs378f3333HUaNGoV169YBAJKSkgAAVlZWcvNZWVlJ0z6Unp6O1NRUuT8iIiIi+jzwCsMnJisrCw0bNsSsWbMAAPXr18fly5exYsUK+Pr6FqjN4OBgBAYGFmaYRERERFRK8ArDJ8bGxgY1a9aUK3NyckJCQgIAwNraGgDw4MEDuToPHjyQpn1o0qRJSElJkf7u3r2rhsiJiIiIqCRiwqBGQggkJCTg9evXRbbM5s2b49q1a3Jl169fR+XKlQEA9vb2sLa2xuHDh6XpqampOH36tDSK04d0dHRgbGws90dEREREnwcmDGokhEDVqlWL9Bf5MWPG4NSpU5g1axZu3ryJTZs2YdWqVRg+fDgAQCaTwd/fHzNnzsSuXbtw6dIl+Pj4wNbWFt27dy+yOImIiIiodOA9DGqkoaGBatWq4cmTJ6hWrVqRLLNRo0bYvn07Jk2ahKCgINjb22PRokXw9vaW6owfPx4vXrzAkCFD8OzZM7Ro0QKhoaF8BgMRERER5cDnMKjZP//8g7lz52L58uWoXbt2cYdTKDiOMxERUcHxOQxU2vAKg5r5+Pjg5cuXqFu3LrS1taGnpyc3PTk5uZgiIyIiIiLKGxMGNVu0aFFxh0BEREREVGBMGNSsoM8+ICIiIiIqCThKUhG4desWJk+eDC8vLzx8+BAAsG/fPsTExBRzZEREREREuWPCoGZHjx6Fs7MzTp8+jW3btiEtLQ0AcOHCBUybNq2YoyMiIiIiyh0TBjWbOHEiZs6ciYMHD0JbW1sqb9OmDU6dOlWMkRERERER5Y0Jg5pdunQJPXr0yFFuaWmJx48fF0NERERERESqY8KgZqampkhMTMxRfv78eZQvX74YIiIiIiIiUh0TBjXr378/JkyYgKSkJMhkMmRlZSE8PBzff/89fHx8ijs8IiIiIqJcMWFQs1mzZqFGjRqoWLEi0tLSULNmTbRs2RLNmjXD5MmTizs8IiIiIqJc8TkMaqatrY3Vq1djypQpuHz5MtLS0lC/fn1Uq1atuEMjIiIiIsoTE4YiUqlSJVSsWBEAIJPJijkaIiIiIiLVsEtSEfj1119Ru3Zt6OrqQldXF7Vr18aaNWuKOywiIiIiojzxCoOaTZ06FQsWLMDIkSPh6uoKAIiIiMCYMWOQkJCAoKCgYo6QiIiIiEg5Jgxqtnz5cqxevRpeXl5S2RdffIE6depg5MiRTBiIiIiIqERjlyQ1e/v2LRo2bJij3MXFBRkZGcUQERERERGR6pgwqNmAAQOwfPnyHOWrVq2Ct7d3MURERERERKQ6dklSg4CAAOn/MpkMa9aswYEDB9C0aVMAwOnTp5GQkMAHtxEREX1iYms45V1pEk+/qHThHqsG58+fl3vt4uICALh16xYAwNzcHObm5oiJiSny2IiIiIiI8oMJgxocOXKkuEMgIiKifFDlyoDT1dgiiISo5OE9DEREREREpBSvMKjZ69evsXTpUhw5cgQPHz5EVlaW3PRz584VU2RERERERHljwqBmgwYNwoEDB9C7d280btwYMpmsuEMiIiIiIlIZEwY12717N/bu3YvmzZsXdyhERERERPnGhEHNypcvDyMjo+IOg4iIiHLRV4WhTi8VQRxEJRFvelaz+fPnY8KECbhz505xh0JERERElG+8wqBmDRs2xOvXr1GlShXo6+tDS0tLbnpycnIxRUZERERElDcmDGrm5eWF+/fvY9asWbCysuJNz0RERERUqjBhULOTJ08iIiICdevWLe5QiIiIiIjyjfcwqFmNGjXw6tWr4g6DiIiIiKhAmDCo2ezZszF27FiEhYXhyZMnSE1NlfsjIiIiIirJ2CVJzTp06AAAaNu2rVy5EAIymQyZmZnFERYRERERkUqYMKjZkSNHijuE0me6iQp1UtQfBxERUT6p8jwHotKGe7Waubu7F3cIREREREQFxoRBzY4dO5br9JYtWxZRJERERERE+ceEQc1atWqVo+z9ZzHwHgYiIiIiKsk4SpKaPX36VO7v4cOHCA0NRaNGjXDgwIHiDo+IiIiIKFe8wqBmJiY5b+Bt164dtLW1ERAQgKioqGKIioiIiPIrtoZT3pV40zN9gniFoZhYWVnh2rVrxR0GEREREVGumAar2cWLF+VeCyGQmJiI2bNno169esUTFBERERGRipgwqFm9evUgk8kghJArb9q0KX777Te1L3/27NmYNGkSRo8ejUWLFgEAXr9+jbFjx2Lz5s1IT0+Hp6cnfvnlF1hZWak9HiIiotKKz1igzxX3fDWLi4uTe62hoQELCwvo6uqqfdmRkZFYuXIl6tSpI1c+ZswY7NmzB1u2bIGJiQlGjBiBnj17Ijw8XO0xEREREVHpwoRBzSpXrlwsy01LS4O3tzdWr16NmTNnSuUpKSn49ddfsWnTJrRp0wYAEBISAicnJ5w6dQpNmzYtlniJiIiIqGRiwlAEDh8+jMOHD+Phw4fIysqSm6aubknDhw9H586d4eHhIZcwREVF4e3bt/Dw8JDKatSogUqVKiEiIoIJAxERERHJYcKgZoGBgQgKCkLDhg1hY2Mj99A2ddm8eTPOnTuHyMjIHNOSkpKgra0NU1NTuXIrKyskJSUpbC89PR3p6enS69TU1EKNl4iIiIhKLiYMarZixQqsXbsWAwYMKJLl3b17F6NHj8bBgwcL7T6J4OBgBAYGFkpbRERERFS68DkMavbmzRs0a9asyJYXFRWFhw8fokGDBihTpgzKlCmDo0ePYsmSJShTpgysrKzw5s0bPHv2TG6+Bw8ewNraWmGbkyZNQkpKivR39+7dIlgTIiIiIioJmDCo2TfffINNmzYV2fLatm2LS5cuITo6Wvpr2LAhvL29pf9raWnh8OHD0jzXrl1DQkICXF1dFbapo6MDY2NjuT8iIiIi+jywS5KavX79GqtWrcKhQ4dQp04daGlpyU1fsGBBoS7PyMgItWvXliszMDBAuXLlpPJBgwYhICAAZmZmMDY2xsiRI+Hq6sobnomIiIgoByYManbx4kXpic6XL1+Wm1YUN0ArsnDhQmhoaKBXr15yD24jIiIiIvqQTHz4CGKiPKSmpsLExAQpKSnq6Z403USFOimFv1wiIvpsOa9zLu4Q5FzyvVTobar9+5s+WbyHgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUkwYiIiIiIhIKSYMRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVJMGIiIiIiISCkmDEREREREpBQTBiIiIiIiUooJAxERERERKcWEgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUkwYiIiIiIhIqTLFHQARERGRusTWcFKt4iSeEhEpwysMRERERESkFBMGIiIiIiJSitffiIiI6JPVl12NiD4arzAQEREREZFSTBiIiIiIiEgpXqcjIiKiUsl5nXNxh0D0WeAVBiIiIiIiUooJAxERERERKcWEgYiIiIiIlGLCQERERERESjFh+MQEBwejUaNGMDIygqWlJbp3745r167J1Xn9+jWGDx+OcuXKwdDQEL169cKDBw+KKWIiIiIiKsmYMHxijh49iuHDh+PUqVM4ePAg3r59i/bt2+PFixdSnTFjxuCff/7Bli1bcPToUfz333/o2bNnMUZNRERERCUVh1X9xISGhsq9Xrt2LSwtLREVFYWWLVsiJSUFv/76KzZt2oQ2bdoAAEJCQuDk5IRTp06hadOmxRE2EREREZVQvMLwiUtJSQEAmJmZAQCioqLw9u1beHh4SHVq1KiBSpUqISIiolhiJCIiIqKSi1cYPmFZWVnw9/dH8+bNUbt2bQBAUlIStLW1YWpqKlfXysoKSUlJCttJT09Henq69Do1NVVtMRMRERFRycIrDJ+w4cOH4/Lly9i8efNHtRMcHAwTExPpr2LFioUUIRERERGVdEwYPlEjRozA7t27ceTIEVSoUEEqt7a2xps3b/Ds2TO5+g8ePIC1tbXCtiZNmoSUlBTp7+7du+oMnYiIiIhKECYMnxghBEaMGIHt27fj33//hb29vdx0FxcXaGlp4fDhw1LZtWvXkJCQAFdXV4Vt6ujowNjYWO6PiIiIiD4PvIfhEzN8+HBs2rQJO3fuhJGRkXRfgomJCfT09GBiYoJBgwYhICAAZmZmMDY2xsiRI+Hq6soRkoiIiIgoByYMn5jly5cDAFq1aiVXHhISAj8/PwDAwoULoaGhgV69eiE9PR2enp745ZdfijhSIiIiIioNmDB8YoQQedbR1dXFsmXLsGzZsiKIiIiIKP9iazjlXWkST2OIigLvYSAiIiIiIqWYMBARERERkVJMGIiIiIiISCkmDEREREREpBTvFiIiIqISpy9vaCYqMXiFgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUryjiIiIiIqU8zrn4g6BiPKBVxiIiIiIiEgpJgxERERERKQUuyQRERGRSmJrOOVZx+lqbBFEQkRFiVcYiIiIiIhIKV5hICIiokKjylUI8CnORKUKrzAQEREREZFSTBiIiIiIiEgpXhMkIiIilfRlVyKizxKvMBARERERkVJMGIiIiIiISCkmDEREREREpBQTBiIiIiIiUooJAxERERERKcWEgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUnxkIxGRGtlN3JNnnfjZnYsgEqLcOa9zLu4QiKiE4hUGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERK8aZnIiL6LKhyU+8l30tFEAkRUenCKwxERERERKQUEwYiIiIiIlKKXZKIiIgKGbs/EdGnhFcYiIiIiIhIKV5hICIiOao8ndrIaWKedYryF/TYGk55V5pUOF95RbksVahyNeOv4Iy8GyrCmImodOEVBiIiIiIiUooJAxERERERKcXrj5+xZcuWYd68eUhKSkLdunWxdOlSNG7cuLjDoiKiSreT+NmdiyCSkonb5+Op0nWnb2F1gynC7jSFFbMqXYkKS6FtZyL6LPEKw2fqzz//REBAAKZNm4Zz586hbt268PT0xMOHD4s7NCIiIiIqQfiTw2dqwYIFGDx4MAYOHAgAWLFiBfbs2YPffvsNEyfmfTNjsZtukmcVu9eb8qxT0n4hLmm/ahdWPKWxHVWoFI/ulyq0k/e+WliKcv/hr9pERJ8GXmH4DL158wZRUVHw8PCQyjQ0NODh4YGIiIhijIyIiIiIShr+/PMZevz4MTIzM2FlZSVXbmVlhatXr+aon56ejvT0dOl1SkoKACA1NVU9AaaLQmkmK/1lnnXUtg4FVJQxq7IsVagST2GtV1G2U1hSZXnvz0UaTyFtn8xXmYURTomjyvb5VNedShZ1fD9ltylE4XzP0ueDCQPlKTg4GIGBgTnKK1asWAzR5EffPGuYLFJ/FIWtpMVcWPGUtHYKS96d5wBV9tXCUtK2T0lj8p1q7xiRuqlzX3z+/DlMTLivk+qYMHyGzM3NoampiQcPHsiVP3jwANbW1jnqT5o0CQEBAdLrrKwsJCcno1y5cpDJZIUaW2pqKipWrIi7d+/C2Ni4UNum/+F2LhrczkWD27locDsXHXVtayEEnj9/Dltb20Jrkz4PTBg+Q9ra2nBxccHhw4fRvXt3AO+SgMOHD2PEiBE56uvo6EBHR0euzNTUVK0xGhsb8wupCHA7Fw1u56LB7Vw0uJ2Ljjq2Na8sUEEwYfhMBQQEwNfXFw0bNkTjxo2xaNEivHjxQho1iYiIiIgIYMLw2erXrx8ePXqEqVOnIikpCfXq1UNoaGiOG6GJiIiI6PPGhOEzNmLECIVdkIqTjo4Opk2blqMLFBUubueiwe1cNLidiwa3c9HhtqaSRiY4thYRERERESnBB7cREREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMVuWXLlsHOzg66urpo0qQJzpw5k2v9LVu2oEaNGtDV1YWzszP27t1bRJGWbvnZzqtXr4abmxvKli2LsmXLwsPDI8/3hd7J7/6cbfPmzZDJZNLDEyl3+d3Oz549w/Dhw2FjYwMdHR04Ojry2KGC/G7nRYsWoXr16tDT00PFihUxZswYvH79uoiiLZ2OHTuGrl27wtbWFjKZDDt27MhznrCwMDRo0AA6OjqoWrUq1q5dq/Y4ieQIoiK0efNmoa2tLX777TcRExMjBg8eLExNTcWDBw8U1g8PDxeamppi7ty54sqVK2Ly5MlCS0tLXLp0qYgjL13yu52//PJLsWzZMnH+/HkRGxsr/Pz8hImJibh3714RR1665Hc7Z4uLixPly5cXbm5uolu3bkUTbCmW3+2cnp4uGjZsKDp16iROnDgh4uLiRFhYmIiOji7iyEuX/G7njRs3Ch0dHbFx40YRFxcn9u/fL2xsbMSYMWOKOPLSZe/eveKHH34Q27ZtEwDE9u3bc61/+/Ztoa+vLwICAsSVK1fE0qVLhaampggNDS2agImEEEwYqEg1btxYDB8+XHqdmZkpbG1tRXBwsML6ffv2FZ07d5Yra9KkiRg6dKha4yzt8rudP5SRkSGMjIzEunXr1BXiJ6Eg2zkjI0M0a9ZMrFmzRvj6+jJhUEF+t/Py5ctFlSpVxJs3b4oqxE9Cfrfz8OHDRZs2beTKAgICRPPmzdUa56dElYRh/PjxolatWnJl/fr1E56enmqMjEgeuyRRkXnz5g2ioqLg4eEhlWloaMDDwwMREREK54mIiJCrDwCenp5K61PBtvOHXr58ibdv38LMzExdYZZ6Bd3OQUFBsLS0xKBBg4oizFKvINt5165dcHV1xfDhw2FlZYXatWtj1qxZyMzMLKqwS52CbOdmzZohKipK6rZ0+/Zt7N27F506dSqSmD8X/B6kkoBPeqYi8/jxY2RmZsLKykqu3MrKClevXlU4T1JSksL6SUlJaouztCvIdv7QhAkTYGtrm+NLiv6nINv5xIkT+PXXXxEdHV0EEX4aCrKdb9++jX///Rfe3t7Yu3cvbt68iWHDhuHt27eYNm1aUYRd6hRkO3/55Zd4/PgxWrRoASEEMjIy8O233+L//u//iiLkz4ay78HU1FS8evUKenp6xRQZfU54hYGI5MyePRubN2/G9u3boaurW9zhfDKeP3+OAQMGYPXq1TA3Ny/ucD5pWVlZsLS0xKpVq/D/2rm/2CarP47jn26jK0zIGAw3yJh2YlikY7KJwT8ZlgsSAsq8gNTYQMwyjSFBceIMzJpMTIVpZkSzSBRicENBMMgSBKvedKIgrRmhGTCsCwamTi4mBDbt8eIXGid7GOVPu/18v5Je9Dnn9Pn2m27NZ2fPU1ZWpqVLl2rNmjVqampKdWn/V77++mu9+uqreuedd3T48GHt3LlTra2tqq+vT3VpAG4wdhiQNBMnTlR6erq6u7sHHO/u7lZeXt6ga/Ly8hKaj2vr8yUNDQ3y+/364osvVFJScjPLHPES7XNnZ6ei0agWLVoUPxaLxSRJGRkZ6ujoUFFR0c0tegS6ls9zfn6+Ro0apfT09Pix4uJinTlzRn19fbLb7Te15pHoWvpcV1cnr9erqqoqSZLL5dK5c+dUXV2tNWvWKC2Nv0neCFbfg+PGjWN3AUnDTzOSxm63q6ysTIFAIH4sFospEAhozpw5g66ZM2fOgPmStH//fsv5uLY+S9L69etVX1+vvXv3qry8PBmljmiJ9nn69Olqb29XOByOPx5++GE99NBDCofDKigoSGb5I8a1fJ7vv/9+nThxIh7IJOnYsWPKz88nLFi4lj6fP3/+slBwKaQZY25esf8xfA9iWEj1Vdf4b9m2bZvJzMw0W7ZsMUePHjXV1dUmOzvbnDlzxhhjjNfrNbW1tfH5wWDQZGRkmIaGBhOJRIzP5+O2qlch0T77/X5jt9vNjh07zOnTp+OP3t7eVL2FESHRPv8bd0m6Oon2uaury4wdO9asWLHCdHR0mD179phJkyaZV155JVVvYURItM8+n8+MHTvWtLS0mJMnT5p9+/aZoqIis2TJklS9hRGht7fXhEIhEwqFjCTzxhtvmFAoZH766SdjjDG1tbXG6/XG51+6rerzzz9vIpGIefvtt7mtKpKOwICke+utt8zUqVON3W43s2fPNgcOHIiPVVRUmGXLlg2Y//HHH5s777zT2O12c9ddd5nW1tYkVzwyJdLnwsJCI+myh8/nS37hI0yin+d/IjBcvUT73NbWZu69916TmZlpnE6nWbdunfnzzz+TXPXIk0if+/v7zcsvv2yKioqMw+EwBQUF5umnnzZnz55NfuEjyFdffTXo79tLvV22bJmpqKi4bE1paamx2+3G6XSazZs3J71u/LfZjGHfEAAAAMDguIYBAAAAgCUCAwAAAABLBAYAAAAAlggMAAAAACwRGAAAAABYIjAAAAAAsERgAAAAAGCJwAAAAADAEoEBAAAAgCUCAwDgqvX396e6BABAkhEYAGAY27t3rx544AFlZ2drwoQJWrhwoTo7O+Pjp06dksfjUU5OjrKyslReXq5vv/02Pv7ZZ5/pnnvukcPh0MSJE1VZWRkfs9ls+vTTTwecLzs7W1u2bJEkRaNR2Ww2ffTRR6qoqJDD4dCHH36onp4eeTweTZkyRWPGjJHL5VJLS8uA14nFYlq/fr3uuOMOZWZmaurUqVq3bp0kye12a8WKFQPm//rrr7Lb7QoEAjeibQCAG4jAAADD2Llz57Rq1SodOnRIgUBAaWlpqqysVCwW0x9//KGKigr9/PPP2r17t3744QetXr1asVhMktTa2qrKykotWLBAoVBIgUBAs2fPTriG2tparVy5UpFIRPPnz9eFCxdUVlam1tZWHTlyRNXV1fJ6vfruu+/ia1588UX5/X7V1dXp6NGjam5u1q233ipJqqqqUnNzsy5evBifv3XrVk2ZMkVut/s6OwYAuNFsxhiT6iIAAFfnt99+U25urtrb29XW1qaamhpFo1Hl5ORcNve+++6T0+nU1q1bB30tm82mXbt2afHixfFj2dnZamxs1PLlyxWNRnX77bersbFRK1euvGJdCxcu1PTp09XQ0KDe3l7l5uZq48aNqqqqumzuhQsXNHnyZDU1NWnJkiWSpJkzZ+rRRx+Vz+dLoBsAgGRghwEAhrHjx4/L4/HI6XRq3Lhxuu222yRJXV1dCofDuvvuuwcNC5IUDoc1b968666hvLx8wPO//vpL9fX1crlcysnJ0S233KLPP/9cXV1dkqRIJKKLFy9antvhcMjr9er999+XJB0+fFhHjhzR8uXLr7tWAMCNl5HqAgAA1hYtWqTCwkJt2rRJkydPViwW04wZM9TX16fRo0dfce1Q4zabTf/eZB7souasrKwBzzds2KA333xTjY2NcrlcysrK0jPPPKO+vr6rOq/0v39LKi0t1alTp7R582a53W4VFhYOuQ4AkHzsMADAMNXT06OOjg6tXbtW8+bNU3Fxsc6ePRsfLykpUTgc1u+//z7o+pKSkiteRJybm6vTp0/Hnx8/flznz58fsq5gMKhHHnlEjz/+uGbOnCmn06ljx47Fx6dNm6bRo0df8dwul0vl5eXatGmTmpub9cQTTwx5XgBAahAYAGCYGj9+vCZMmKB3331XJ06c0JdffqlVq1bFxz0ej/Ly8rR48WIFg0GdPHlSn3zyib755htJks/nU0tLi3w+nyKRiNrb2/Xaa6/F17vdbm3cuFGhUEiHDh3SU089pVGjRg1Z17Rp07R//361tbUpEonoySefVHd3d3zc4XDohRde0OrVq/XBBx+os7NTBw4c0HvvvTfgdaqqquT3+2WMGXD3JgDA8EJgAIBhKi0tTdu2bdP333+vGTNm6Nlnn9WGDRvi43a7Xfv27dOkSZO0YMECuVwu+f1+paenS5Lmzp2r7du3a/fu3SotLZXb7R5wJ6PXX39dBQUFevDBB/XYY4+ppqZGY8aMGbKutWvXatasWZo/f77mzp0bDy3/VFdXp+eee04vvfSSiouLtXTpUv3yyy8D5ng8HmVkZMjj8cjhcFxHpwAANxN3SQIApEQ0GlVRUZEOHjyoWbNmpbocAIAFAgMAIKn6+/vV09Ojmpoa/fjjjwoGg6kuCQBwBfxLEgAgqYLBoPLz83Xw4EE1NTWluhwAwBDYYQAAAABgiR0GAAAAAJYIDAAAAAAsERgAAAAAWCIwAAAAALBEYAAAAABgicAAAAAAwBKBAQAAAIAlAgMAAAAASwQGAAAAAJb+BqL0CVciIionAAAAAElFTkSuQmCC", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "path_model_label=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/instance/instance_threshold_pred_Swin_Generalized_latest(1).tif\"\n", + "res = evl.evaluate_model_performance(imread(path_true_labels), imread(path_model_label),visualize=False, return_graphical_summary=True,plot_according_to_gt_label=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAHHCAYAAAASz98lAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACM0UlEQVR4nOzdd1gU1/s28HtB6U2kK4JiQwULdkXs2LA3JBGMiom9ReM3FsBELNFojLEmWKIx0WCJBVvEgtjFgoiKIGqwC4goCpz3D1/m58ouLMiyoPfnurh0z5yZeWZ2dnaePWfOyIQQAkRERERERApoaToAIiIiIiIquZgwEBERERGRUkwYiIiIiIhIKSYMRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERKMWEgIiIiIiKliiRhCAgIgEwmkytzdHSEn59fUSw+TwkJCZDJZFi7dq1U5ufnByMjI7WvO4dMJkNAQECxra8wzpw5g+bNm8PQ0BAymQxRUVEFXoajoyO6detW9MGRxoSFhaFevXrQ09ODTCZDcnJygZchk8kwevToog/uI6KOc0Tr1q3RunXrIl2mKtauXQuZTIazZ88W+7o/Rq1bt0adOnU0HYba5Rw3CQkJBZ5X0TVGUQkPD4dMJkN4eHiB51XHtYamPtcfws/PD46OjoWe/2M4pxT2HK+ObVfX56VEtTDs2bOnxF54l+TY8vPmzRv069cPT58+xY8//ogNGzbAwcFBYd2rV68iICCgUCd1Kl2ePHmC/v37Q19fH8uWLcOGDRtgaGiosO6JEycQEBBQqISCCu9T+TyW5vOrKv777z8EBAQU6ocaUszR0fGjPmY+Rr/88ovcj7slWWmKtbiUUdeCY2NjoaVVsHxkz549WLZsWYFOAg4ODnj58iXKli1bwAgLJq/YXr58iTJl1LYrP1hcXBxu376N1atXY9iwYXnWvXr1KgIDA9G6desP+sWASr4zZ87g+fPnmD17Ntq3b59n3RMnTiAwMBB+fn4wMzMrngA/IoU9R+T1edy/f38RRad5hTn3lyb//fcfAgMD4ejoiHr16mk6HCKN+OWXX2BhYVEsvU8+VGmKtbio7SpXV1dXXYsGAGRmZiI7Oxs6OjrQ09NT67ryo+n15+fhw4cAwAs9NcjOzsbr169L/DGgCI+L4qOO40NHR6fIl0lERKRIgbskHT9+HI0aNYKenh6cnJywcuVKhfXev4fhzZs3CAwMRLVq1aCnp4fy5cujZcuWOHDgAIC3feCWLVsG4G1fsJw/4P/uU/jhhx+wePFiODk5QVdXF1evXlV4D0OOW7duwdPTE4aGhrCzs0NQUBCEENJ0ZX0X319mXrHllL3/y9iFCxfQuXNnmJiYwMjICO3atcPJkyfl6uT0XYuIiMDEiRNhaWkJQ0ND9OrVC48ePVL8Brzn33//hbu7OwwNDWFmZoYePXogJiZGmu7n5wcPDw8AQL9+/SCTyZT2j1y7di369esHAGjTpo20ne/vn+PHj6Nx48bQ09NDlSpVsH79+lzLSk5Oxvjx42Fvbw9dXV1UrVoV8+bNQ3Z2dr7btGPHDnTt2hV2dnbQ1dWFk5MTZs+ejaysrFx1T506hS5duqBcuXIwNDSEq6srlixZIlfn2rVr6N+/PywtLaGvr48aNWrg22+/ldtHilpTFPUDzOmvv3HjRtSuXRu6uroICwsDAPzwww9o3rw5ypcvD319fbi5uWHr1q0Kt/H3339H48aNYWBggHLlyqFVq1bSL8a+vr6wsLDAmzdvcs3XsWNH1KhRI+8dCGDLli1wc3ODvr4+LCws8Nlnn+HevXvS9NatW8PX1xcA0KhRI8hkMqW/pAQEBODrr78GAFSuXFk6Lt7vJrN9+3bUqVMHurq6qF27trRf3nXv3j188cUXsLa2lur99ttv+W4PAISEhKBt27awsrKCrq4uatWqheXLl+eqd/bsWXh6esLCwgL6+vqoXLkyvvjiC7k6mzdvhpubG4yNjWFiYgIXF5dcx82tW7fQr18/mJubw8DAAE2bNsXu3btzre/Vq1cICAhA9erVoaenB1tbW/Tu3RtxcXFSnffPEbdv38bIkSNRo0YN6Ovro3z58ujXr5/cPs3v86ior/PDhw8xdOhQWFtbQ09PD3Xr1sW6devk6rx7Pl21apV0Pm3UqBHOnDmjdP+/Lz09HSNGjED58uVhYmKCwYMH49mzZ7nq7d27VzpHGRsbo2vXroiOjpam53V+bdCgAXr37i23PBcXF8hkMly6dEkq+/PPPyGTyeTOfaoeaxkZGZg1axaqVq0KXV1d2NvbY8qUKcjIyJCrl/PZV+U4f1d4eDgaNWoEABgyZIi0fe9/Z129ehVt2rSBgYEBKlSogPnz5xc6VkVy7pW4dOkSPDw8YGBggKpVq0rnqCNHjqBJkybSOfLgwYO5lqHK9xoAREdHo23bttDX10fFihXx3XffKT3353d8qCq/a4yCOHbsGPr164dKlSpJ+3nChAl4+fKlwvr5XWsAb39cWrx4MWrXrg09PT1YW1tjxIgRCj8z71u6dClq164tfV80bNgQmzZtynOe169fY+bMmXBzc4OpqSkMDQ3h7u6Ow4cPy9Ur6Pkg5/jX09NDnTp1sG3btnzjB95eE0ZHR+PIkSPSZ+D981dGRoZK10IfcszkHP/vHpshISFy32mqxJofVc7x7yqq86kyBw4cQMuWLWFmZgYjIyPUqFED//vf/wq0TQVqYbh8+TI6duwIS0tLBAQEIDMzE7NmzYK1tXW+8wYEBCA4OBjDhg1D48aNkZqairNnz+L8+fPo0KEDRowYgf/++w8HDhzAhg0bFC4jJCQEr169gr+/P3R1dWFubq70JJSVlYVOnTqhadOmmD9/PsLCwjBr1ixkZmYiKCioIJutUmzvio6Ohru7O0xMTDBlyhSULVsWK1euROvWraWT8rvGjBmDcuXKYdasWUhISMDixYsxevRo/Pnnn3mu5+DBg+jcuTOqVKmCgIAAvHz5EkuXLkWLFi1w/vx5ODo6YsSIEahQoQLmzJmDsWPHolGjRkrfr1atWmHs2LH46aef8L///Q/Ozs4AIP0LADdv3kTfvn0xdOhQ+Pr64rfffoOfnx/c3NxQu3ZtAG8PfA8PD9y7dw8jRoxApUqVcOLECUybNg1JSUlYvHhxntu1du1aGBkZYeLEiTAyMsK///6LmTNnIjU1FQsWLJDqHThwAN26dYOtrS3GjRsHGxsbxMTEYNeuXRg3bhyAtycHd3d3lC1bFv7+/nB0dERcXBz++ecffP/993nGocy///6Lv/76C6NHj4aFhYWUbCxZsgTdu3eHj48PXr9+jc2bN6Nfv37YtWsXunbtKs0fGBiIgIAANG/eHEFBQdDR0cGpU6fw77//omPHjvj888+xfv167Nu3T+4m8/v37+Pff//FrFmz8t1/Q4YMQaNGjRAcHIwHDx5gyZIliIiIwIULF2BmZoZvv/0WNWrUwKpVqxAUFITKlSvDyclJ4fJ69+6N69ev448//sCPP/4ICwsLAIClpaVU5/jx4wgNDcXIkSNhbGyMn376CX369EFiYiLKly8PAHjw4AGaNm0qXXhZWlpi7969GDp0KFJTUzF+/Pg8t2v58uWoXbs2unfvjjJlyuCff/7ByJEjkZ2djVGjRgF4e8Gcc4765ptvYGZmhoSEBISGhkrLOXDgALy9vdGuXTvMmzcPABATE4OIiAjpuHnw4AGaN2+O9PR0jB07FuXLl8e6devQvXt3bN26Fb169QLw9jzTrVs3HDp0CAMHDsS4cePw/PlzHDhwAFeuXFG6T8+cOYMTJ05g4MCBqFixIhISErB8+XK0bt0aV69ehYGBgUqfx3e9fPkSrVu3xs2bNzF69GhUrlwZW7ZsgZ+fH5KTk6Vty7Fp0yY8f/4cI0aMgEwmw/z589G7d2/cunVLpS6eo0ePhpmZGQICAhAbG4vly5fj9u3b0g8xALBhwwb4+vrC09MT8+bNQ3p6OpYvX46WLVviwoUL0jlK2fnV3d0df/zxh/T66dOniI6OhpaWFo4dOwZXV1cAby/wLC0tpX2j6rGWnZ2N7t274/jx4/D394ezszMuX76MH3/8EdevX8f27dvl4lHlOH+fs7MzgoKCMHPmTPj7+8Pd3R0A0Lx5c6nOs2fP0KlTJ/Tu3Rv9+/fH1q1bMXXqVLi4uKBz586FilWRZ8+eoVu3bhg4cCD69euH5cuXY+DAgdi4cSPGjx+PL7/8EoMGDcKCBQvQt29f3LlzB8bGxgBU/167f/8+2rRpg8zMTHzzzTcwNDTEqlWroK+vnyseVY4PVeV3jVEQW7ZsQXp6Or766iuUL18ep0+fxtKlS3H37l1s2bJFrq6q1xojRoyQzs1jx45FfHw8fv75Z1y4cAERERFKP3OrV6/G2LFj0bdvX4wbNw6vXr3CpUuXcOrUKQwaNEjpNqSmpmLNmjXw9vbG8OHD8fz5c/z666/w9PTE6dOnc3WNU+V8sH//fvTp0we1atVCcHAwnjx5giFDhqBixYr57tPFixdjzJgxMDIykn6we/9aRJVroQ85Zu7duyf9+DJt2jQYGhpizZo1uXrEqBJrflQ5x7+rqM6nikRHR6Nbt25wdXVFUFAQdHV1cfPmTURERBRomyAKoGfPnkJPT0/cvn1bKrt69arQ1tYW7y/KwcFB+Pr6Sq/r1q0runbtmufyR40alWs5QggRHx8vAAgTExPx8OFDhdNCQkKkMl9fXwFAjBkzRirLzs4WXbt2FTo6OuLRo0dCCCEOHz4sAIjDhw/nu0xlsQkhBAAxa9Ys6XXPnj2Fjo6OiIuLk8r+++8/YWxsLFq1aiWVhYSECACiffv2Ijs7WyqfMGGC0NbWFsnJyQrXl6NevXrCyspKPHnyRCq7ePGi0NLSEoMHD5bKcrZzy5YteS5PCCG2bNmicJ8I8fY9BSCOHj0qlT18+FDo6uqKSZMmSWWzZ88WhoaG4vr163Lzf/PNN0JbW1skJibmGUN6enqushEjRggDAwPx6tUrIYQQmZmZonLlysLBwUE8e/ZMru67+7JVq1bC2NhY7ph9v46vr69wcHDItc5Zs2bles8BCC0tLREdHZ1v3K9fvxZ16tQRbdu2lcpu3LghtLS0RK9evURWVpbCmLKyskTFihXFgAED5KYvWrRIyGQycevWrVzrfnedVlZWok6dOuLly5dS+a5duwQAMXPmTKks5/g7c+aM0uXlWLBggQAg4uPjc00DIHR0dMTNmzelsosXLwoAYunSpVLZ0KFDha2trXj8+LHc/AMHDhSmpqYK3/d3KZru6ekpqlSpIr3etm1bvts0btw4YWJiIjIzM5XWGT9+vAAgjh07JpU9f/5cVK5cWTg6Okrv3W+//SYAiEWLFuVaxrvH2PvnCEXbEhkZKQCI9evXS2V5fR49PDyEh4eH9Hrx4sUCgPj999+lstevX4tmzZoJIyMjkZqaKoT4v/Nb+fLlxdOnT6W6O3bsEADEP//8o3S/CPF/x42bm5t4/fq1VD5//nwBQOzYsUMI8XZ/mZmZieHDh8vNf//+fWFqaipXruz8mrP9V69eFUIIsXPnTqGrqyu6d+8u9/lwdXUVvXr1kl6reqxt2LBBaGlpyb3PQgixYsUKAUBERERIZaoe54qcOXMm13dKDg8Pj1zve0ZGhrCxsRF9+vSRygoSqyI569m0aZNUdu3aNemcdvLkSal83759ueJV9Xst57Nz6tQpqezhw4fC1NRU7hxSkOND0bn4fapcYyii6DpA0eczODhYyGQyue8SVa81jh07JgCIjRs3yi0zLCwsV/n7n+sePXqI2rVrF3i7MjMzRUZGhlzZs2fPhLW1tfjiiy+ksoKcD+rVqydsbW3lrk32798vACj8Dn1f7dq15bYth6rXQgU5ZhQZM2aMkMlk4sKFC1LZkydPhLm5ea7vN2WxKlPYc7w6zqfvf15+/PFHAUA6HgtL5S5JWVlZ2LdvH3r27IlKlSpJ5c7OzvD09Mx3fjMzM0RHR+PGjRuqrjKXPn36yP2qmZ93h3rM+aXp9evXCptai0pWVhb279+Pnj17okqVKlK5ra0tBg0ahOPHjyM1NVVuHn9/f7muL+7u7sjKysLt27eVricpKQlRUVHw8/ODubm5VO7q6ooOHTpgz549RbhV/6dWrVrSL2TA21+Za9SogVu3bkllW7Zsgbu7O8qVK4fHjx9Lf+3bt0dWVhaOHj2a5zre/SXq+fPnePz4Mdzd3ZGeno5r164BeNs0Hh8fj/Hjx+fqg5+zLx89eoSjR4/iiy++kDtm361TGB4eHqhVq1aecT979gwpKSlwd3fH+fPnpfLt27cjOzsbM2fOzDUoQE5MWlpa8PHxwc6dO/H8+XNp+saNG9G8eXNUrlxZaWxnz57Fw4cPMXLkSLl+8127dkXNmjUVdqkpCu3bt5f7Nd3V1RUmJibScSGEwN9//w0vLy8IIeSOC09PT6SkpMjtJ0Xe3b8pKSl4/PgxPDw8cOvWLaSkpAD4v/sxdu3apbBLV06dFy9e5NlVYc+ePWjcuDFatmwplRkZGcHf3x8JCQm4evUqAODvv/+GhYUFxowZk2sZeR1j727Lmzdv8OTJE1StWhVmZmb57oe8YraxsYG3t7dUVrZsWYwdOxZpaWk4cuSIXP0BAwagXLly0uucz/W7n+W8+Pv7y/0q+tVXX6FMmTLSuefAgQNITk6Gt7e33Putra2NJk2a5OoaoUhOTDnnjGPHjqFRo0bo0KEDjh07BuBt98crV65IdQtyrG3ZsgXOzs6oWbOmXL22bdsCQK4Y8zvOC8vIyAifffaZ9FpHRweNGzfOdV4tSKzK1jNw4EDpdY0aNWBmZgZnZ2e5lu+c/+esvyDfa3v27EHTpk3RuHFjqZ6lpSV8fHzkYimK4+NdRXGNkePdz+eLFy/w+PFjNG/eHEIIXLhwIVf9/K41tmzZAlNTU3To0EFuW93c3GBkZJTntpqZmeHu3bsF6i4IANra2tJ9TtnZ2Xj69CkyMzPRsGFDheeY/M4HOdccvr6+MDU1lep16NBB4fdhYeR3LfShx0xYWBiaNWsm17pibm6e69gsCgU9x6vzfJrzvbhjxw6VuoUro3LC8OjRI7x8+RLVqlXLNU2VPtVBQUFITk5G9erV4eLigq+//lquD6oq8rpQep+WlpbciQ0AqlevDgBqHaLw0aNHSE9PV7hPnJ2dkZ2djTt37siVv38xm/OhzatvY84HSNl6Hj9+jBcvXhQ4/vy8HyvwNt53Y71x4wbCwsJgaWkp95czEk/OzbbKREdHo1evXjA1NYWJiQksLS2lL9OcC8Oc/uF5jV+ec6Ir6jHOlR2Hu3btQtOmTaGnpwdzc3NYWlpi+fLlUszA27i1tLTyPcEOHjwYL1++lPqHxsbG4ty5c/j888/znC+v46JmzZp5JqEfIr/j4tGjR0hOTsaqVatyHRdDhgwBkP9xERERgfbt20v361haWkp9MHP2sYeHB/r06YPAwEBYWFigR48eCAkJkevjPXLkSFSvXh2dO3dGxYoV8cUXX+Tqh3779m2ln62c6cDb97NGjRoFHgHp5cuXmDlzpnSPj4WFBSwtLZGcnCx3vBTE7du3Ua1atVyJ6Psx5yjMeedd738XGBkZwdbWVjq/5ly4tW3bNtd7vn///nzfb+BtN4Bq1apJycGxY8fg7u6OVq1a4b///sOtW7cQERGB7Oxs6QKnIMfajRs3EB0dnateznfF+zGqcv4rjIoVK+ZKMBWdVwsSq6rrMTU1hb29fa4yAHKfX1W/13KOw/e9P29RHB/vKoprjByJiYnSj3FGRkawtLSU7gV8//OpyrXGjRs3kJKSAisrq1zbmpaWlue2Tp06FUZGRmjcuDGqVauGUaNGqdyVZN26dXB1dZXu6bC0tMTu3bsVnmPyOx/knD8Kew2oivxi+NBj5vbt26hatWquckVlH6qg53h1nk8HDBiAFi1aYNiwYbC2tsbAgQPx119/FTh5KLaxQFu1aoW4uDjs2LED+/fvx5o1a/Djjz9ixYoV+Q71mUNRH8gPoewXQEU316qTtra2wnLx3k1TJYEqsWZnZ6NDhw6YMmWKwro5J1NFkpOT4eHhARMTEwQFBcHJyQl6eno4f/48pk6d+kHZsTIFPQ4UHYfHjh1D9+7d0apVK/zyyy+wtbVF2bJlERISku/NaYrUqlULbm5u+P333zF48GD8/vvv0NHRQf/+/Qu8rOKQ33GR87599tln0s3W78vpj65IXFwc2rVrh5o1a2LRokWwt7eHjo4O9uzZgx9//FFavkwmw9atW3Hy5En8888/2LdvH7744gssXLgQJ0+ehJGREaysrBAVFYV9+/Zh79692Lt3L0JCQjB48OBcNwiry5gxYxASEoLx48ejWbNmMDU1hUwmw8CBA9VyjCui7vNOznZs2LABNjY2uaarmmS1bNkShw4dwsuXL3Hu3DnMnDkTderUgZmZGY4dO4aYmBgYGRmhfv36cutV5VjLzs6Gi4sLFi1apLDe+xfS6tpnqp5XCxJrQdajie+gojo+chTFNQbw9rzfoUMHPH36FFOnTkXNmjVhaGiIe/fuwc/Pr1Cfz+zsbFhZWWHjxo0Kp+fVc8LZ2RmxsbHYtWsXwsLC8Pfff+OXX37BzJkzERgYqHS+33//HX5+fujZsye+/vprWFlZQVtbG8HBwXIDMuQoCdchqn6PFNUxo05FfY7/kG3X19fH0aNHcfjwYezevRthYWH4888/0bZtW+zfv1/pfs+1DlWDzRlhRlFzX2xsrErLMDc3x5AhQzBkyBCkpaWhVatWCAgIkD7MRflkuuzsbNy6dUvu4vT69esAIN0YkpO9vv8wKkW/wqoam6WlJQwMDBTuk2vXrkFLS0ulE3t+ch68pmw9FhYWSh/ClZeieA+cnJyQlpaW79j+ioSHh+PJkycIDQ1Fq1atpPL4+Phc6wCAK1euKF1Pzq8+V65cyXOd5cqVU/hAsoL8Gv/3339DT08P+/btk7uBKiQkJFfc2dnZuHr1ar7jsQ8ePBgTJ05EUlISNm3ahK5du8o1GSvy7nGR01UhR2xsrNIH9uXnQ48LS0tLGBsbIysrq1DHxT///IOMjAzs3LlT7lcoZc2wTZs2RdOmTfH9999j06ZN8PHxwebNm6VzjY6ODry8vODl5YXs7GyMHDkSK1euxIwZM1C1alU4ODgo/WwB/7efnZyccOrUKbx586ZAz4LZunUrfH19sXDhQqns1atXuY7Dgux3BwcHXLp0CdnZ2XKtDO/HXFRu3LiBNm3aSK/T0tKQlJSELl26APi/z6iVlVW+73le2+nu7o6QkBBs3rwZWVlZaN68ObS0tNCyZUspYWjevLn0pVeQY83JyQkXL15Eu3bt1PYkYaDozqvFEasiBflec3BwUOk6oSDHh6ryu8ZQxeXLl3H9+nWsW7cOgwcPlsqVdWFU5VrDyckJBw8eRIsWLQr1w6ehoSEGDBiAAQMG4PXr1+jduze+//57TJs2TemQzVu3bkWVKlUQGhoqd7zkN2iGMjnnjw+5BvzQ4/ZDjxkHBwfcvHkzV7misg+NVdVzfI6iPJ8qoqWlhXbt2qFdu3ZYtGgR5syZg2+//RaHDx9WeXkqd0nS1taGp6cntm/fjsTERKk8JiYG+/bty3f+J0+eyL02MjJC1apV5boK5FzgFtXTZH/++Wfp/0II/PzzzyhbtizatWsH4O3Bo62tnatP/S+//JJrWarGpq2tjY4dO2LHjh1yXZ8ePHiATZs2oWXLljAxMSnkFv0fW1tb1KtXD+vWrZOL6cqVK9i/f790kBVUUbwH/fv3R2RkpMLjIjk5GZmZmUrnzfnSf/dXjdevX+d6Txo0aIDKlStj8eLFuWLNmdfS0hKtWrXCb7/9JnfMvr98JycnpKSkyDVfJyUlqTxcXE7cMplMrlUiISEh18glPXv2hJaWFoKCgnL9yvD+Lzne3t6QyWQYN24cbt26JdfHWZmGDRvCysoKK1askPts7d27FzExMXKjNRXEhx4X2tra6NOnD/7++2+FCVx+wwgrOi5SUlJyJWTPnj3LtR9zErOc/fH+uUhLS0v6xTmnTpcuXXD69GlERkZK9V68eIFVq1bB0dFR6lLWp08fPH78WO5ckyOvX+a0tbVzTV+6dGmuVq2C7PcuXbrg/v37ciOKZGZmYunSpTAyMpK6VBSVVatWyd0nsnz5cmRmZkqj+nh6esLExARz5sxReD/Ju+95XtuZ09Vo3rx5cHV1lbrLuLu749ChQzh79qzcfVUFOdb69++Pe/fuYfXq1bnqvXz5ssi6dRbVebU4YlWkIN9rXbp0wcmTJ3H69Gmp3qNHj3L9ul6Q40MVqlxjqELRuUYIkWvY5Xfld63Rv39/ZGVlYfbs2bnmzczMzPO4eH+7dHR0UKtWLQghlN6npWw7Tp06JXdOK4h3rzne7VJz4MAB6Z6u/BgaGn7QZ+BDjxlPT09ERkbKPXH96dOnClt+PjRWVc/xOYryfPq+p0+f5ip7/3tRFQVqvwkMDERYWBjc3d0xcuRI6cuodu3a+fYVrFWrFlq3bg03NzeYm5vj7Nmz2Lp1q9zNQm5ubgCAsWPHwtPTE9ra2nI3aBWEnp4ewsLC4OvriyZNmmDv3r3YvXs3/ve//0nNf6ampujXrx+WLl0KmUwGJycn7Nq1S2FfsILE9t1330lj3o4cORJlypTBypUrkZGRoXBs7cJasGABOnfujGbNmmHo0KHSsKqmpqaFfmJqvXr1oK2tjXnz5iElJQW6urrS2Peq+vrrr7Fz505069ZNGnL1xYsXuHz5MrZu3YqEhARpaM73NW/eHOXKlYOvry/Gjh0LmUyGDRs25PrgaWlpYfny5fDy8kK9evUwZMgQ2Nra4tq1a4iOjpaSlZ9++gktW7ZEgwYN4O/vj8qVKyMhIQG7d++WThoDBw7E1KlT0atXL4wdO1Yaqqx69eoq34DatWtXLFq0CJ06dcKgQYPw8OFDLFu2DFWrVpX7bFStWhXffvstZs+eDXd3d/Tu3Ru6uro4c+YM7OzsEBwcLNW1tLREp06dsGXLFpiZmal0sV+2bFnMmzcPQ4YMgYeHB7y9vaVhVR0dHTFhwgSVtud9Ocf/t99+i4EDB6Js2bLw8vIqUCvW3LlzcfjwYTRp0gTDhw9HrVq18PTpU5w/fx4HDx5UeFLL0bFjR6lVYMSIEUhLS8Pq1athZWWFpKQkqd66devwyy+/oFevXnBycsLz58+xevVqmJiYSEn0sGHD8PTpU7Rt2xYVK1bE7du3sXTpUtSrV0/q7//NN9/gjz/+QOfOnTF27FiYm5tj3bp1iI+Px99//y39gj948GCsX78eEydOxOnTp+Hu7o4XL17g4MGDGDlyJHr06KFwe7p164YNGzbA1NQUtWrVQmRkJA4ePJhraM6CfB79/f2xcuVK+Pn54dy5c3B0dMTWrVsRERGBxYsXS8NjFpXXr1+jXbt26N+/P2JjY/HLL7+gZcuW6N69OwDAxMQEy5cvx+eff44GDRpg4MCBsLS0RGJiInbv3o0WLVpIF1p5nV+rVq0KGxsbxMbGyt1c3qpVK0ydOhUA5BIGQPVj7fPPP8dff/2FL7/8EocPH0aLFi2QlZWFa9eu4a+//sK+ffvQsGHDD95XTk5OMDMzw4oVK2BsbAxDQ0M0adKkQPflFVesyqj6vTZlyhRs2LABnTp1wrhx46RhVXNawHIU5PhQhSrXGKqoWbMmnJycMHnyZNy7dw8mJib4+++/ld6nosq1hoeHB0aMGIHg4GBERUWhY8eOKFu2LG7cuIEtW7ZgyZIl6Nu3r8Lld+zYETY2NmjRogWsra0RExODn3/+GV27ds3zM92tWzeEhoaiV69e6Nq1K+Lj47FixQrUqlULaWlpBdonOYKDg9G1a1e0bNkSX3zxBZ4+fSpdA6qyTDc3NyxfvhzfffcdqlatCisrq1wt4Xn50GNmypQp+P3339GhQweMGTNGGla1UqVKePr0qVyrwofGquo5PkdRnk/fFxQUhKNHj6Jr165wcHDAw4cP8csvv6BixYpyA3vkq6DDKh05ckS4ubkJHR0dUaVKFbFixQqFQ569P6zqd999Jxo3bizMzMyEvr6+qFmzpvj+++/lhpHKzMwUY8aMEZaWlkImk0nLzBn2a8GCBbniUTasqqGhoYiLixMdO3YUBgYGwtraWsyaNSvXUJaPHj0Sffr0EQYGBqJcuXJixIgR4sqVK7mWqSw2IXIPpyWEEOfPnxeenp7CyMhIGBgYiDZt2ogTJ07I1VE2rKWy4V4VOXjwoGjRooXQ19cXJiYmwsvLSxqC8P3lqTKsqhBCrF69WlSpUkUaLjcnDgcHB4XD1r0/DJwQb4cAmzZtmqhatarQ0dERFhYWonnz5uKHH36Qe88ViYiIEE2bNhX6+vrCzs5OTJkyRRrm7/19cvz4cdGhQwdhbGwsDA0Nhaura64hDq9cuSJ69eolzMzMhJ6enqhRo4aYMWOGXJ39+/eLOnXqCB0dHVGjRg3x+++/Kx1WddSoUQrj/vXXX0W1atWErq6uqFmzpggJCVE6HOBvv/0m6tevL3R1dUW5cuWEh4eHOHDgQK56f/31lwAg/P3989xn7/vzzz+l5ZubmwsfHx9x9+5duToFGVZViLfD5VaoUEFoaWnJDUGnbJ+8fw4QQogHDx6IUaNGCXt7e1G2bFlhY2Mj2rVrJ1atWpXv+nfu3ClcXV2Fnp6ecHR0FPPmzZOGNc2J5fz588Lb21tUqlRJ6OrqCisrK9GtWzdx9uxZaTlbt24VHTt2FFZWVkJHR0dUqlRJjBgxQiQlJcmtLy4uTvTt21c6bho3bix27dqVK6709HTx7bffisqVK0vb1LdvX7nhJ98/Rzx79kwMGTJEWFhYCCMjI+Hp6SmuXbumcJ8p+zwq+tw9ePBAWq6Ojo5wcXHJNZRnXudTReey9+UcN0eOHBH+/v6iXLlywsjISPj4+MgN8Zzj8OHDwtPTU5iamgo9PT3h5OQk/Pz85N6TvM6vQgjRr18/AUD8+eefUtnr16+FgYGB0NHRkRtC+N19ocqx9vr1azFv3jxRu3Zt6fPo5uYmAgMDRUpKity+UfU4V2THjh2iVq1aokyZMnLfLx4eHgqHzVQ03LOqsSqibD3KzuuKtleV7zUhhLh06ZLw8PAQenp6okKFCmL27Nni119/zTV0pRCqHR+qDKuqyjWGIoq+b69evSrat28vjIyMhIWFhRg+fLg0hG5hrzWEEGLVqlXCzc1N6OvrC2NjY+Hi4iKmTJki/vvvP6nO+5/rlStXilatWony5csLXV1d4eTkJL7++ut83+/s7GwxZ84c4eDgIHR1dUX9+vXFrl27ch1XBT0f/P3338LZ2Vno6uqKWrVqidDQUKVDk7/v/v37omvXrsLY2FgAkLazoNdCqhwzyly4cEG4u7sLXV1dUbFiRREcHCx++uknAUDcv38/31iVKew5Xh3n0/c/L4cOHRI9evQQdnZ2QkdHR9jZ2Qlvb+9cQ9/nR/b/N5SISqAdO3agZ8+eOHr0aK5fUYmIiOjDjB8/HitXrkRaWprKNwB/ipgwEJVg3bp1Q0xMDG7evFnsNzoSERF9TF6+fCl34/mTJ09QvXp1NGjQIM9n81AxDqtKRKrbvHkzLl26hN27d2PJkiVMFoiIiD5Qs2bN0Lp1azg7O+PBgwf49ddfkZqaihkzZmg6tBKPLQxEJZBMJoORkREGDBiAFStWlKjxpYmIiEqj//3vf9i6dSvu3r0LmUyGBg0aYNasWUU2tO/HjAkDEREREREppfJzGIiIiIiI6NPDhIGIiIiIiJRix2gqsOzsbPz3338wNjbmzbhERESlhBACz58/h52dnfQASiJVMGGgAvvvv/9gb2+v6TCIiIioEO7cuYOKFStqOgwqRZgwUIHlPI7+zp07MDEx0XA0REREpIrU1FTY29tL3+NEqmLCQAWW0w3JxMSECQMREVEpw+7EVFDswEZEREREREoxYSAiIiIiIqWYMBARERERkVK8h4GIPlpZWVl48+aNpsMgIioWZcuWhba2tqbDoI8QEwYi+ugIIXD//n0kJydrOhQiomJlZmYGGxsb3thMRYoJAxF9dHKSBSsrKxgYGPCLk4g+ekIIpKen4+HDhwAAW1tbDUdEHxMmDET0UcnKypKShfLly2s6HCKiYqOvrw8AePjwIaysrNg9iYoMb3omoo9Kzj0LBgYGGo6EiKj45Zz7eP8WFSUmDET0UWI3JCL6FPHcR+rAhIGIiIiIiJRiwkBEVEK0bt0a48eP13QYJdratWthZmZWYpaTn/T0dPTp0wcmJiaQyWSlbuSugIAA1KtXT9NhKFWY91Emk2H79u1Fsv6CfmbDw8OL5DhwdHTE4sWLP2gZRAXBm56J6JPh+M3uYl1fwtyuxbq+/ISHh6NNmzZ49uxZsVwsq8OAAQPQpUuXAs3j6OiI8ePHy13YFWY5hbFu3TocO3YMJ06cgIWFBUxNTdW+TioYmUyG+Ph4ODo6ajoUohKLCQMREZUa+vr60kgwJWE5+YmLi4OzszPq1KlT6GVkZWVBJpNBS4udAohIM3j2ISIqQTIzMzF69GiYmprCwsICM2bMgBBCmp6RkYHJkyejQoUKMDQ0RJMmTRAeHi5Nv337Nry8vFCuXDkYGhqidu3a2LNnDxISEtCmTRsAQLly5SCTyeDn56cwhidPnsDb2xsVKlSAgYEBXFxc8Mcff8jV2bp1K1xcXKCvr4/y5cujffv2ePHiBYC3LRmNGzeGoaEhzMzM0KJFC9y+fVuad/ny5XBycoKOjg5q1KiBDRs2yC07OTkZI0aMgLW1NfT09FCnTh3s2rULQO4uKHFxcejRowesra1hZGSERo0a4eDBg9L01q1b4/bt25gwYQJkMpl0Q6iiriz5xSWTybBmzRr06tULBgYGqFatGnbu3KlwH+ase+HChTh69ChkMhlat24NAHj27BkGDx6McuXKwcDAAJ07d8aNGzek+XJi27lzJ2rVqgVdXV0kJiYqXMeVK1fQuXNnGBkZwdraGp9//jkeP34sTQ8LC0PLli1hZmaG8uXLo1u3boiLi5Nbxt27d+Ht7Q1zc3MYGhqiYcOGOHXqlFydDRs2wNHREaamphg4cCCeP3+udLtz4t+1axdq1KgBAwMD9O3bF+np6Vi3bh0cHR1Rrlw5jB07FllZWdJ8+e2XnGVXqlQJBgYG6NWrF548eZJr/Tt27ECDBg2gp6eHKlWqIDAwEJmZmUrjfdezZ8/g4+MDS0tL6Ovro1q1aggJCVFpXuDtfmrYsCGMjY1hY2ODQYMGSc9FeFdERARcXV2hp6eHpk2b4sqVK3LTjx8/Dnd3d+jr68Pe3h5jx46VPl9EmsCEgYioBFm3bh3KlCmD06dPY8mSJVi0aBHWrFkjTR89ejQiIyOxefNmXLp0Cf369UOnTp2kC6tRo0YhIyMDR48exeXLlzFv3jwYGRnB3t4ef//9NwAgNjYWSUlJWLJkicIYXr16BTc3N+zevRtXrlyBv78/Pv/8c5w+fRoAkJSUBG9vb3zxxReIiYlBeHg4evfuDSEEMjMz0bNnT3h4eODSpUuIjIyEv7+/dKG+bds2jBs3DpMmTcKVK1cwYsQIDBkyBIcPHwYAZGdno3PnzoiIiMDvv/+Oq1evYu7cuUrHk09LS0OXLl1w6NAhXLhwAZ06dYKXl5d0gR0aGoqKFSsiKCgISUlJSEpKUric/OLKERgYiP79++PSpUvo0qULfHx88PTpU4XLDA0NxfDhw9GsWTMkJSUhNDQUAODn54ezZ89i586diIyMhBACXbp0kRsGMz09HfPmzcOaNWsQHR0NKyurXMtPTk5G27ZtUb9+fZw9exZhYWF48OAB+vfvL9V58eIFJk6ciLNnz+LQoUPQ0tJCr169kJ2dLe0/Dw8P3Lt3Dzt37sTFixcxZcoUaTrwNinbvn07du3ahV27duHIkSOYO3euwm1+N/6ffvoJmzdvRlhYGMLDw9GrVy/s2bMHe/bswYYNG7By5Ups3bpVmie//XLq1CkMHToUo0ePRlRUFNq0aYPvvvtObr3Hjh3D4MGDMW7cOFy9ehUrV67E2rVr8f333+cZb44ZM2bg6tWr2Lt3L2JiYrB8+XJYWFioNC/wdijT2bNn4+LFi9i+fTsSEhIUJuZff/01Fi5ciDNnzsDS0hJeXl7SdsbFxaFTp07o06cPLl26hD///BPHjx/H6NGjVY6DqMgJogJKSUkRAERKSoqmQyHK5eXLl+Lq1avi5cuXuaY5TN1VrH8F5eHhIZydnUV2drZUNnXqVOHs7CyEEOL27dtCW1tb3Lt3T26+du3aiWnTpgkhhHBxcREBAQEKl3/48GEBQDx79qzAsXXt2lVMmjRJCCHEuXPnBACRkJCQq96TJ08EABEeHq5wOc2bNxfDhw+XK+vXr5/o0qWLEEKIffv2CS0tLREbG6tw/pCQEGFqappnrLVr1xZLly6VXjs4OIgff/wxz+XkF5cQQgAQ06dPl16npaUJAGLv3r1KYxk3bpzw8PCQXl+/fl0AEBEREVLZ48ePhb6+vvjrr7+k2ACIqKioPLdz9uzZomPHjnJld+7cEQCU7r9Hjx4JAOLy5ctCCCFWrlwpjI2NxZMnTxTWnzVrljAwMBCpqalS2ddffy2aNGmiNK6c+G/evCmVjRgxQhgYGIjnz59LZZ6enmLEiBFCCNX2i7e3t9z7IYQQAwYMkHsf27VrJ+bMmSNXZ8OGDcLW1lZ6DUBs27ZNYexeXl5iyJAhSrftfR4eHmLcuHFKp585c0YAkLY75zO4fsECkX75ski/fFncPX5c6OvpiQ3/v8y3d2/xRd++css5duyY0NLSks5rio7pHHmdA/n9TYXFFgYiohKkadOmcuOoN2vWDDdu3EBWVhYuX76MrKwsVK9eHUZGRtLfkSNHpG4mY8eOxXfffYcWLVpg1qxZuHTpUoFjyMrKwuzZs+Hi4gJzc3MYGRlh37590q/2devWRbt27eDi4oJ+/fph9erVePbsGQDA3Nwcfn5+8PT0hJeXF5YsWSL3q35MTAxatGght74WLVogJiYGABAVFYWKFSuievXqKsWalpaGyZMnw9nZGWZmZjAyMkJMTIzSLjzK5BdXDldXV+n/hoaGMDExUdjlJK/1lClTBk2aNJHKypcvjxo1asitS0dHR25dily8eBGHDx+WOxZq1qwJANLxcOPGDXh7e6NKlSowMTGRbuzN2T9RUVGoX78+zM3Nla7H0dERxsbG0mtbW9t8t9nAwABOTk7Sa2trazg6OsLIyEiuLGc5quyXmJgYuenA28/H+/skKChIbp8MHz4cSUlJSE9PzzNmAPjqq6+wefNm1KtXD1OmTMGJEyfynedd586dg5eXFypVqgRjY2N4eHgAQK7jsUndutL/zU1NUc3REdfi4wEAl2Nj8fuOHXLb4OnpiezsbMT//zpExY03PRMRlRJpaWnQ1tbGuXPncnXRybkQGzZsGDw9PbF7927s378fwcHBWLhwIcaMGaPyehYsWIAlS5Zg8eLFcHFxgaGhIcaPH4/Xr18DALS1tXHgwAGcOHEC+/fvx9KlS/Htt9/i1KlTqFy5MkJCQjB27FiEhYXhzz//xPTp03HgwAE0bdo033UX9EbkyZMn48CBA/jhhx9QtWpV6Ovro2/fvlKsRa1s2bJyr2UymVz3naKir6+f7wO40tLS4OXlhXnz5uWaZmtrCwDw8vKCg4MDVq9eDTs7O2RnZ6NOnTrS/lFlfxdmmxXNUxz7Li0tDYGBgejdu3euaXp6evnO37lzZ9y+fRt79uzBgQMH0K5dO4waNQo//PBDvvO+ePECnp6e8PT0xMaNG2FpaYnExER4enoW6Hh8kZ6Oof36YWJgYK5plSpVUnk5REWJLQxERCXI+zebnjx5EtWqVYO2tjbq16+PrKwsPHz4EFWrVpX7s7Gxkeaxt7fHl19+idDQUEyaNAmrV68G8PZXawByN5oqEhERgR49euCzzz5D3bp1UaVKFVy/fl2ujkwmQ4sWLRAYGIgLFy5AR0cH27Ztk6bXr18f06ZNw4kTJ1CnTh1s2rQJAODs7IyIiIhc66tVqxaAt7/g3717N9f68orVz88PvXr1gouLC2xsbJCQkCBXR0dHJ99tzi+uouLs7IzMzEy59/nJkyeIjY0t8LoaNGiA6OhoODo65joeDA0NpeVOnz4d7dq1g7Ozs9QSlMPV1RVRUVFK78MoLqrsF2dnZ4Wfj3c1aNAAsbGxufZH1apVVR5lytLSEr6+vvj999+xePFirFq1SqX5rl27hidPnmDu3Llwd3dHzZo1lbbEnL54Ufr/s5QU3Lx9GzUrVwYA1HN2xrW4OIXbkPMZJipuTBiIiEqQxMRETJw4EbGxsfjjjz+wdOlSjBs3DgBQvXp1+Pj4YPDgwQgNDUV8fDxOnz6N4OBg7N799hkT48ePx759+xAfH4/z58/j8OHDcHZ2BgA4ODhAJpNh165dePToEdLS0hTGUK1aNakFISYmBiNGjMCDBw+k6adOncKcOXNw9uxZJCYmIjQ0FI8ePYKzszPi4+Mxbdo0REZG4vbt29i/fz9u3LghxfD1119j7dq1WL58OW7cuIFFixYhNDQUkydPBgB4eHigVatW6NOnDw4cOID4+Hjs3bsXYWFhSmMNDQ1FVFQULl68iEGDBuX61drR0RFHjx7FvXv35EYQeld+cRWVatWqoUePHhg+fDiOHz+Oixcv4rPPPkOFChXQo0ePAi1r1KhRePr0Kby9vXHmzBnExcVh3759GDJkCLKyslCuXDmUL18eq1atws2bN/Hvv/9i4sSJcsvw9vaGjY0NevbsiYiICNy6dQt///03IiMji3Kz86XKfslptfrhhx9w48YN/Pzzz7mOi5kzZ2L9+vUIDAxEdHQ0YmJisHnzZkyfPl2lOGbOnIkdO3bg5s2biI6Oxq5du6RjNz+VKlWCjo4Oli5dilu3bmHnzp2YPXu2wrrBK1fi8MmTiL5xA/7Tp6O8mRm82rUDAEz84gucvHhRurn7xo0b2LFjB296Jo1iwkBEVIIMHjwYL1++ROPGjTFq1CiMGzcO/v7+0vSQkBAMHjwYkyZNQo0aNdCzZ0+cOXNG6qqQlZWFUaNGwdnZGZ06dUL16tXxyy+/AAAqVKiAwMBAfPPNN7C2tlZ6ATJ9+nQ0aNAAnp6eaN26tXRBmcPExARHjx5Fly5dUL16dUyfPh0LFy5E586dYWBggGvXrqFPnz6oXr06/P39MWrUKIwYMQIA0LNnTyxZsgQ//PADateujZUrVyIkJEQachQA/v77bzRq1Aje3t6oVasWpkyZorSFYNGiRShXrhyaN28OLy8veHp6okGDBnJ1goKCkJCQACcnJ1haWipcjipxFZWQkBC4ubmhW7duaNasGYQQ2LNnT64uO/mxs7NDREQEsrKy0LFjR7i4uGD8+PEwMzODlpYWtLS0sHnzZpw7dw516tTBhAkTsGDBArll6OjoYP/+/bCyskKXLl3g4uKS56hU6pTffmnatClWr16NJUuWoG7duti/f3+uRMDT0xO7du3C/v370ahRIzRt2hQ//vgjHBwcVIpBR0cH06ZNg6urK1q1agVtbW1s3rxZpXktLS2xdu1abNmyBbVq1cLcuXOVdmUKGj8eX8+bhxYDBuDBkyfYunQpdP7/drrUqIF9ISG4fv063N3dUb9+fcycORN2dnYqxUGkDjIh3hngm0gFqampMDU1RUpKCkxMTDQdDpGcV69eIT4+HpUrV1apzzIRUXF7+d5zFxTRL+TD/vI6B/L7mwqLLQxERERERKQUEwYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVJMGIiIiIiISCkmDEREREREpBQTBiKiEkIIAX9/f5ibm0MmkyEqKirfeRISElSuW1K1bt0a48ePz7PO2rVrYWZmVizxEBGRvDKaDoCIqNgEmBbz+lIKVD0sLAxr165FeHg4qlSpAgsLCzUFVrKEhoaibNmy0mtHR0eMHz9eLokYMGAAunTpooHoiIiICQMRUQkRFxcHW1tbNG/eXNOhFCtzc/N86+jr60NfX78YoiEiovexSxIRUQng5+eHMWPGIDExETKZDI6OjgDetjq0bNkSZmZmKF++PLp164a4uDily3n27Bl8fHxgaWkJfX19VKtWDSEhIdL0O3fuoH///jAzM4O5uTl69OiBhIQEpcsLDw+HTCbD7t274erqCj09PTRt2hRXrlyRq/f333+jdu3a0NXVhaOjIxYuXCg3/ZdffkG1atWgp6cHa2tr9O3bV5r2bpek1q1b4/bt25gwYQJkMhlkMhkA+S5J169fh0wmw7Vr1+TW8eOPP8LJyUl6feXKFXTu3BlGRkawtrbG559/jsePHyvdViIiUowJAxFRCbBkyRIEBQWhYsWKSEpKwpkzZwAAL168wMSJE3H27FkcOnQIWlpa6NWrF7KzsxUuZ8aMGbh69Sr27t2LmJgYLF++XOra9ObNG3h6esLY2BjHjh1DREQEjIyM0KlTJ7x+/TrP+L7++mssXLgQZ86cgaWlJby8vPDmzRsAwLlz59C/f38MHDgQly9fRkBAAGbMmIG1a9cCAM6ePYuxY8ciKCgIsbGxCAsLQ6tWrRSuJzQ0FBUrVkRQUBCSkpKQlJSUq0716tXRsGFDbNy4Ua5848aNGDRoEAAgOTkZbdu2Rf369XH27FmEhYXhwYMH6N+/f57bSVQcbtnI8v0jKknYJYmIqAQwNTWFsbExtLW1YWNjI5X36dNHrt5vv/0GS0tLXL16FXXq1Mm1nMTERNSvXx8NGzYEAKmlAgD+/PNPZGdnY82aNdIv9yEhITAzM0N4eDg6duyoNL5Zs2ahQ4cOAIB169ahYsWK2LZtG/r3749FixahXbt2mDFjBoC3F/RXr17FggUL4Ofnh8TERBgaGqJbt24wNjaGg4MD6tevr3A95ubm0NbWhrGxsdx+eJ+Pjw9+/vlnzJ49G8DbVodz587h999/BwD8/PPPqF+/PubMmSO37+zt7XH9+nVUr15d6bKJiEgeWxiIiEqwGzduwNvbG1WqVIGJiYmUACQmJiqs/9VXX2Hz5s2oV68epkyZghMnTkjTLl68iJs3b8LY2BhGRkYwMjKCubk5Xr16lWc3JwBo1qyZ9H9zc3PUqFEDMTExAICYmBi0aNFCrn6LFi1w48YNZGVloUOHDnBwcECVKlXw+eefY+PGjUhPTy/M7pAMHDgQCQkJOHnyJIC3rQsNGjRAzZo1pW09fPiwtJ1GRkbStPy2lYiI5DFhKGWOHj0KLy8v2NnZQSaTYfv27XLTc/r8vv+3YMECqY6jo2Ou6XPnzi3mLSEiVXh5eeHp06dYvXo1Tp06hVOnTgGA0i5EnTt3lu4B+O+//9CuXTtMnjwZAJCWlgY3NzdERUXJ/V2/fl3qyqMOxsbGOH/+PP744w/Y2tpi5syZqFu3LpKTkwu9TBsbG7Rt2xabNm0CAGzatAk+Pj7S9LS0NHh5eeXa1hs3bijtDkVERIoxYShlXrx4gbp162LZsmUKp+f0+c35++233yCTyXJ1a3i3f3BSUhLGjBlTHOETUQE8efIEsbGxmD59Otq1awdnZ2c8e/Ys3/ksLS3h6+uL33//HYsXL8aqVasAAA0aNMCNGzdgZWWFqlWryv2ZmuY95GzOL/nA2xurr1+/DmdnZwCAs7MzIiIi5OpHRESgevXq0NbWBgCUKVMG7du3x/z583Hp0iUkJCTg33//VbguHR0dZGVl5budPj4++PPPPxEZGYlbt25h4MCB0rQGDRogOjoajo6OubbV0NAw32UTEdH/YcJQynTu3BnfffcdevXqpXC6jY2N3N+OHTvQpk0bVKlSRa5eTv/gnD9+gRKVPOXKlUP58uWxatUq3Lx5E//++y8mTpyY5zwzZ87Ejh07cPPmTURHR2PXrl3Shb2Pjw8sLCzQo0cPHDt2DPHx8QgPD8fYsWNx9+7dPJcbFBSEQ4cO4cqVK/Dz84OFhQV69uwJAJg0aRIOHTqE2bNn4/r161i3bh1+/vlnqWVj165d+OmnnxAVFYXbt29j/fr1yM7ORo0aNRSuy9HREUePHsW9e/fyHNWod+/eeP78Ob766iu0adMGdnZ20rRRo0bh6dOn8Pb2xpkzZxAXF4d9+/ZhyJAhKiUjRET0f5gwfMQePHiA3bt3Y+jQobmmzZ07F+XLl0f9+vWxYMECZGZmaiBCIsqLlpYWNm/ejHPnzqFOnTqYMGGCXPdCRXR0dDBt2jS4urqiVatW0NbWxubNmwEABgYGOHr0KCpVqoTevXvD2dkZQ4cOxatXr2BiYpLncufOnYtx48bBzc0N9+/fxz///AMdHR0Ab3/N/+uvv7B582bUqVMHM2fORFBQEPz8/AAAZmZmCA0NRdu2beHs7IwVK1bgjz/+QO3atRWuKygoCAkJCXBycoKlpaXSmIyNjeHl5YWLFy/KdUcCADs7O0RERCArKwsdO3aEi4sLxo8fDzMzM2hp8auPiKggZEIIoekgqHBkMhm2bdsm/cr3vvnz52Pu3Ln477//oKenJ5UvWrQIDRo0gLm5OU6cOIFp06ZhyJAhWLRokcLlZGRkICMjQ3qdmpoKe3t7pKSk5HuRQVTcXr16hfj4eFSuXFnuuKfCCQ8PR5s2bfDs2TPpOQhE9GGiH0fnW6e2heKEOj95nQNTU1NhamrK728qMA6r+hH77bff4OPjk+uE8W6XBldXV+jo6GDEiBEIDg6Grq5uruUEBwcjMDBQ7fESERERUcnDdtmP1LFjxxAbG4thw4blW7dJkybIzMxU+rTXadOmISUlRfq7c+dOEUdLRERERCUVWxg+Ur/++ivc3NxQt27dfOtGRUVBS0sLVlZWCqfr6uoqbHkgoo9f69atwZ6rRESfNiYMpUxaWhpu3rwpvY6Pj0dUVBTMzc1RqVIlAG/7KG7ZsgULFy7MNX9kZCROnTqFNm3awNjYGJGRkZgwYQI+++wzlCtXrti2g4iIiIhKByYMpczZs2fRpk0b6XXO/Qi+vr5Yu3YtAGDz5s0QQsDb2zvX/Lq6uti8eTMCAgKQkZGBypUrY8KECfkO1UhEREREnyaOkkQFxlEWqCTjKElEVNJxlCQqbXjTMxERERERKcWEgYiIiIiIlGLCQERERERESjFhICKiYrN27VqVnhgtk8mwfft2tcdDRET54yhJRPTJcFnnUqzru+x7uUD1W7dujXr16mHx4sXqCagEGDBgALp06SK9DggIwPbt2xEVFSVXLykpiUM9ExGVEEwYiIhKESEEsrKyUKZM6Tx96+vrQ19fP996NjY2xRANERGpgl2SiIhKAD8/Pxw5cgRLliyBTCaDTCZDQkICwsPDIZPJsHfvXri5uUFXVxfHjx+Hn58fevbsKbeM8ePHo3Xr1tLr7OxsBAcHo3LlytDX10fdunWxdevWPONwdHTE7Nmz4e3tDUNDQ1SoUAHLli2Tq5OYmIgePXrAyMgIJiYm6N+/Px48eCBNv3jxovRwSBMTE7i5ueHs2bMA5LskrV27FoGBgbh48aK0zTnPk3m3S1Lz5s0xdepUuRgePXqEsmXL4ujRowCAjIwMTJ48GRUqVIChoSGaNGmC8PBwFfY8ERHlhwkDEVEJsGTJEjRr1gzDhw9HUlISkpKSYG9vL03/5ptvMHfuXMTExMDV1VWlZQYHB2P9+vVYsWIFoqOjpae6HzlyJM/5FixYgLp16+LChQv45ptvMG7cOBw4cADA2ySkR48eePr0KY4cOYIDBw7g1q1bGDBggDS/j48PKlasiDNnzuDcuXP45ptvULZs2VzrGTBgACZNmoTatWtL2/zuct5dXs4DKXP8+eefsLOzg7u7OwBg9OjRiIyMxObNm3Hp0iX069cPnTp1wo0bN1TaV0REpFzpbNMmIvrImJqaQkdHBwYGBgq74wQFBaFDhw4qLy8jIwNz5szBwYMH0axZMwBAlSpVcPz4caxcuRIeHh5K523RogW++eYbAED16tURERGBH3/8ER06dMChQ4dw+fJlxMfHSwnN+vXrUbt2bZw5cwaNGjVCYmIivv76a9SsWRMAUK1aNYXr0dfXh5GREcqUKZNnF6T+/ftj/PjxOH78uJQgbNq0Cd7e3pDJZEhMTERISAgSExNhZ2cHAJg8eTLCwsIQEhKCOXPmqLzfiIgoN7YwEBGVAg0bNixQ/Zs3byI9PR0dOnSAkZGR9Ld+/XrExcXlOW9OgvHu65iYGABATEwM7O3t5Vo/atWqBTMzM6nOxIkTMWzYMLRv3x5z587Nd335sbS0RMeOHbFx40YAQHx8PCIjI+Hj4wMAuHz5MrKyslC9enW5bT1y5MgHr5uIiNjCQERUKhgaGsq91tLSkuuiAwBv3ryR/p+WlgYA2L17NypUqCBXT1dXV01RvhUQEIBBgwZh9+7d2Lt3L2bNmoXNmzejV69ehV6mj48Pxo4di6VLl2LTpk1wcXGBi8vbUa/S0tKgra2Nc+fOQVtbW24+IyOjD9oWIiJiwkBEVGLo6OggKytLpbqWlpa4cuWKXFlUVJR0r0CtWrWgq6uLxMTEPLsfKXLy5Mlcr52dnQEAzs7OuHPnDu7cuSO1Mly9ehXJycmoVauWNE/16tVRvXp1TJgwAd7e3ggJCVGYMKi6zT169IC/vz/CwsKwadMmDB48WJpWv359ZGVl4eHDh1KXJSIiKjrskkREVEI4Ojri1KlTSEhIwOPHj5Gdna20btu2bXH27FmsX78eN27cwKxZs+QSCGNjY0yePBkTJkzAunXrEBcXh/Pnz2Pp0qVYt25dnnFERERg/vz5uH79OpYtW4YtW7Zg3LhxAID27dvDxcUFPj4+OH/+PE6fPo3BgwfDw8MDDRs2xMuXLzF69GiEh4fj9u3biIiIwJkzZ6SEQ9E2x8fHIyoqCo8fP0ZGRobCeoaGhujZsydmzJiBmJgYeHt7S9OqV68OHx8fDB48GKGhoYiPj8fp06cRHByM3bt357mtRESUPyYMREQlxOTJk6GtrY1atWrB0tISiYmJSut6enpixowZmDJlCho1aoTnz5/L/eoOALNnz8aMGTMQHBwMZ2dndOrUCbt370blypXzjGPSpEk4e/Ys6tevj++++w6LFi2Cp6cngLfDne7YsQPlypVDq1at0L59e1SpUgV//vknAEBbWxtPnjzB4MGDUb16dfTv3x+dO3dGYGCgwnX16dMHnTp1Qps2bWBpaYk//vhDaVw+Pj64ePEi3N3dUalSJblpISEhGDx4MCZNmoQaNWqgZ8+eOHPmTK56RERUcDLxfidYonykpqbC1NQUKSkpMDEx0XQ4RHJevXqF+Ph4VK5cGXp6epoOp9RxdHTE+PHjMX78eE2HQvTRin4cnW+d2ha1C7XsvM6B/P6mwmILAxERERERKcWEgYiIiIiIlOIoSUREJElISNB0CEREVMKwhYGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVJMGIiIiIiISCkmDEREREREpBQTBiIiUklAQADq1auncv2EhATIZDJERUUprePo6IjFixervMy1a9fCzMxM5frKyGQybN++Xel0IQT8/f1hbm6e7zaUJKrsz4K+jwDQunXrEvP07/DwcMhkMiQnJ6s8T1HEX1THHlFpxOcwENEnI6amc7Guz/laTIHqHz16FAsWLMC5c+eQlJSEbdu2oWfPnuoJjvIUFhaGtWvXIjw8HFWqVIGFhYWmQyoUmUyW6ziaPHkyxowZo7mglEhISEDlypVx4cKFAic0RKRebGEgIiohXrx4gbp162LZsmWaDuWTFxcXB1tbWzRv3hw2NjYoU6bgv68JIZCZmamG6D6MkZERypcvr+kwiKgUYcJARFRCdO7cGd999x169eql8jw53Ut+++03VKpUCUZGRhg5ciSysrIwf/582NjYwMrKCt9//73cfImJiejRoweMjIxgYmKC/v3748GDB3J15s6dC2traxgbG2Po0KF49epVrvWvWbMGzs7O0NPTQ82aNfHLL78UbuP/v0WLFsHFxQWGhoawt7fHyJEjkZaWlqve9u3bUa1aNejp6cHT0xN37tyRm75jxw40aNAAenp6qFKlCgIDA1W+ePfz88OYMWOQmJgImUwGR0dHAEBGRgbGjh0LKysr6OnpoWXLljhz5ow0X05Xmb1798LNzQ26uro4fvx4ruXndNX666+/4O7uDn19fTRq1AjXr1/HmTNn0LBhQxgZGaFz58549OiRNJ+ibjU9e/aEn5+fwu3IibtXr15y2/F+lyQ/Pz/07NkTgYGBsLS0hImJCb788ku8fv1a6T7KyMjA5MmTUaFCBRgaGqJJkyYIDw9XWh8Arl27hpYtW0JPTw+1atXCwYMH5bqGVa5cGQBQv359yGQytG7dOs/l5Xjy5Am8vb1RoUIFGBgYwMXFBX/88UeuepmZmRg9ejRMTU1hYWGBGTNmQAhR6G26ePEi2rRpA2NjY5iYmMDNzQ1nz55VKWai0oYJAxFRKRcXF4e9e/ciLCwMf/zxB3799Vd07doVd+/exZEjRzBv3jxMnz4dp06dAgBkZ2ejR48eePr0KY4cOYIDBw7g1q1bGDBggLTMv/76CwEBAZgzZw7Onj0LW1vbXMnAxo0bMXPmTHz//feIiYnBnDlzMGPGDKxbt67Q26KlpYWffvoJ0dHRWLduHf79919MmTJFrk56ejq+//57rF+/HhEREUhOTsbAgQOl6ceOHcPgwYMxbtw4XL16FStXrsTatWtzJU3KLFmyBEFBQahYsSKSkpKkpGDKlCn4+++/sW7dOpw/fx5Vq1aFp6cnnj59Kjf/N998g7lz5yImJgaurq5K1zNr1ixMnz4d58+fR5kyZTBo0CBMmTIFS5YswbFjx3Dz5k3MnDlT1V2XS07cISEhctuhyKFDhxATE4Pw8HD88ccfCA0NRWBgoNL6o0ePRmRkJDZv3oxLly6hX79+6NSpE27cuKGwflZWFnr27AkDAwOcOnUKq1atwrfffitX5/Tp0wCAgwcPIikpCaGhoSpt56tXr+Dm5obdu3fjypUr8Pf3x+effy4tL8e6detQpkwZnD59GkuWLMGiRYuwZs2aQm+Tj48PKlasiDNnzuDcuXP45ptvULZsWZViJip1BFEBpaSkCAAiJSVF06EQ5fLy5Utx9epV8fLly1zTrtaoWax/HwKA2LZtW771Zs2aJQwMDERqaqpU5unpKRwdHUVWVpZUVqNGDREcHCyEEGL//v1CW1tbJCYmStOjo6MFAHH69GkhhBDNmjUTI0eOlFtXkyZNRN26daXXTk5OYtOmTXJ1Zs+eLZo1ayaEECI+Pl4AEBcuXFAav4ODg/jxxx+VTt+yZYsoX7689DokJEQAECdPnpTKYmJiBABx6tQpIYQQ7dq1E3PmzJFbzoYNG4Stra30Or/9++OPPwoHBwfpdVpamihbtqzYuHGjVPb69WthZ2cn5s+fL4QQ4vDhwwKA2L59u9LlCvF/+2XNmjVS2R9//CEAiEOHDkllwcHBokaNGtJrDw8PMW7cOLll9ejRQ/j6+kqv39+firZz1qxZcu+jr6+vMDc3Fy9evJDKli9fLoyMjKRj6N113759W2hra4t79+7JLbddu3Zi2rRpCrd57969okyZMiIpKUkqO3DggFx8qhwvQvzffn727JnSOl27dhWTJk2SXnt4eAhnZ2eRnZ0tlU2dOlU4OzurvE0hISHC1NRUmmZsbCzWrl2bZ6zKXHl0Jd+/wsrrHMjvbyos3vRMRFTKOTo6wtjYWHptbW0NbW1taGlpyZU9fPgQABATEwN7e3vY29tL02vVqgUzMzPExMSgUaNGiImJwZdffim3nmbNmuHw4cMA3t5vERcXh6FDh2L48OFSnczMTJiamhZ6Ww4ePIjg4GBcu3YNqampyMzMxKtXr5Ceng4DAwMAQJkyZdCoUSNpnpo1a0qxN27cGBcvXkRERIRci0JWVlau5RREXFwc3rx5gxYtWkhlZcuWRePGjRETI39ze8OGDVVa5rutD9bW1gAAFxcXubKc90zd6tatK7dfmjVrhrS0NNy5cwcODg5ydS9fvoysrCxUr15drjwjI0PpvRGxsbGwt7eHjY2NVNa4ceMiiT0rKwtz5szBX3/9hXv37uH169fIyMjI9T43bdoUMplMet2sWTMsXLgQWVlZhdqmiRMnYtiwYdiwYQPat2+Pfv36wcnJqUi2iaikYcJARFTKvd8NQiaTKSzLzs4usnXm3FewevVqNGnSRG6atrZ2oZaZkJCAbt264auvvsL3338Pc3NzHD9+HEOHDsXr169VvtBPS0tDYGAgevfunWuanp5eoWIrCENDQ5Xqvfse5VzIvl/27numpaUl1+ceAN68efMhoRZKWloatLW1ce7cuVzvtZGRUbHHs2DBAixZsgSLFy+W7n8ZP358nvdgvK8w2xQQEIBBgwZh9+7d2Lt3L2bNmoXNmzcX6B4kotKCCQMR0SfG2dkZd+7cwZ07d6RWhqtXryI5ORm1atWS6pw6dQqDBw+W5jt58qT0f2tra9jZ2eHWrVvw8fEpkrjOnTuH7OxsLFy4UGod+euvv3LVy8zMxNmzZ6VfqGNjY5GcnAxn57fD5jZo0ACxsbGoWrVqkcQFAE5OTtDR0UFERIT0i/ubN29w5syZYns+gaWlJZKSkqTXWVlZuHLlCtq0aaN0nrJlyyIrKyvfZV+8eBEvX76Evr4+gLfvtZGRkVwrVI769esjKysLDx8+hLu7u0qx16hRA3fu3MGDBw+k1pT376nQ0dGRtqsgIiIi0KNHD3z22WcA3t6jc/36delYzpFzD0+OkydPolq1atDW1i7UNgFA9erVUb16dUyYMAHe3t4ICQlhwkAfJSYMREQlRFpaGm7evCm9jo+PR1RUFMzNzVGpUqUiW0/79u3h4uICHx8fLF68GJmZmRg5ciQ8PDyk7jTjxo2Dn58fGjZsiBYtWmDjxo2Ijo5GlSpVpOUEBgZi7NixMDU1RadOnZCRkYGzZ8/i2bNnmDhxYoHjqlq1Kt68eYOlS5fCy8sLERERWLFiRa56ZcuWxZgxY/DTTz+hTJkyGD16NJo2bSolEDNnzkS3bt1QqVIl9O3bF1paWrh48SKuXLmC7777rlD7zNDQEF999RW+/vpr6f2YP38+0tPTMXTo0EIts6Datm2LiRMnYvfu3XBycsKiRYvyfXiZo6MjDh06hBYtWkBXVxflypVTWO/169cYOnQopk+fjoSEBMyaNQujR4+W69aWo3r16vDx8cHgwYOxcOFC1K9fH48ePcKhQ4fg6uqKrl275pqnQ4cOcHJygq+vL+bPn4/nz59j+vTpAP6vdcXKygr6+voICwtDxYoVoaenp1L3tmrVqmHr1q04ceIEypUrh0WLFuHBgwe5EobExERMnDgRI0aMwPnz57F06VIsXLiwUNv08uVLfP311+jbty8qV66Mu3fv4syZM+jTp0++8RKVRkwYiOiTUdAHqRW3s2fPyv1anHPR7evri7Vr1xbZemQyGXbs2IExY8agVatW0NLSQqdOnbB06VKpzoABAxAXF4cpU6bg1atX6NOnD7766ivs27dPqjNs2DAYGBhgwYIF+Prrr2FoaAgXF5dC/+Jet25dLFq0CPPmzcO0adPQqlUrBAcHy7VyAICBgQGmTp2KQYMG4d69e3B3d8evv/4qTff09MSuXbsQFBSEefPmoWzZsqhZsyaGDRtWqLhyzJ07F9nZ2fj888/x/PlzNGzYEPv27VN6EV7UvvjiC1y8eBGDBw9GmTJlMGHChDxbFwBg4cKFmDhxIlavXo0KFSogISFBYb127dqhWrVqaNWqFTIyMuDt7Y2AgAClyw0JCcF3332HSZMm4d69e7CwsEDTpk3RrVs3hfW1tbWxfft2DBs2DI0aNUKVKlWwYMECeHl5Sd3EypQpg59++glBQUGYOXMm3N3d8x2qFQCmT5+OW7duwdPTEwYGBvD390fPnj2RkpIiV2/w4MF4+fIlGjduDG1tbYwbNw7+/v6F2iZtbW08efIEgwcPxoMHD2BhYYHevXvnObIUUWkmE+93iCTKR2pqKkxNTZGSkgITExNNh0Mk59WrV4iPj0flypWLpb86UWnn5+eH5ORk6XkIxSUiIgItW7bEzZs3P7mbhaMfR+dbp7ZF7UItO69zIL+/qbD4HIZS5ujRo/Dy8oKdnZ3cA29y+Pn5QSaTyf116tRJrs7Tp0/h4+MDExMTmJmZYejQoQofjERERFRUtm3bhgMHDiAhIQEHDx6Ev78/WrRo8cklC0SlEROGUubFixeoW7culi1bprROp06dkJSUJP29/8RLHx8fREdH48CBA9i1axeOHj0q1yxLRERU1J4/f45Ro0ahZs2a8PPzQ6NGjbBjxw5Nh0VEKuA9DKVM586d0blz5zzr6Orqyo11/a6YmBiEhYXhzJkz0s2NS5cuRZcuXfDDDz/Azs6uyGMmIqKSqyjvj8nL4MGDc92PQkSlA1sYPkLh4eGwsrJCjRo18NVXX+HJkyfStMjISJiZmck9WKh9+/bQ0tLKNeRcjoyMDKSmpsr9EREREdGngQnDR6ZTp05Yv349Dh06hHnz5uHIkSPo3LmzNK71/fv3YWVlJTdPmTJlYG5ujvv37ytcZnBwMExNTaU/ReNyE5U0HM+BiD5FPPeROrBL0kdm4MCB0v9dXFzg6uoKJycnhIeHo127doVa5rRp0+TGVE9NTWXSQCVWzpNy09PTpYdQERF9KtLT0wHkfgI80YdgwvCRq1KlCiwsLHDz5k20a9cONjY2ePjwoVydzMxMPH36VOl9D7q6utDV1S2OcIk+mLa2NszMzKTj3MDAQHowFBFRSZD9JjvfOq9evSrQMoUQSE9Px8OHD2FmZgZtbe3ChkeUCxOGj9zdu3fx5MkT2NraAgCaNWuG5ORknDt3Dm5ubgCAf//9F9nZ2WjSpIkmQyUqMjnJ7/vJMRFRSfAwLf9zU5nkwl2imZmZKf0BkKiwmDCUMmlpabh586b0Oj4+HlFRUTA3N4e5uTkCAwPRp08f2NjYSE9prVq1Kjw9PQEAzs7O6NSpE4YPH44VK1bgzZs3GD16NAYOHMgRkuijIZPJYGtrCysrK7x580bT4RARyRm3bVy+dXb22lng5ZYtW5YtC6QWTBhKmbNnz6JNmzbS65x7C3x9fbF8+XJcunQJ69atQ3JyMuzs7NCxY0fMnj1brkvRxo0bMXr0aLRr1w5aWlro06cPfvrpp2LfFiJ109bW5pcnEZU4Sa+T8q3DJ9VTSSITvJ2+WGVlZeHy5ctwcHBAuXLlNB1OofDR8kRERIXnss4l3zqXfS8X+Xr5/U2FxWFV1Wz8+PH49ddfAbxNFjw8PNCgQQPY29sjPDxcs8EREREREeWDCYOabd26FXXr1gUA/PPPP4iPj8e1a9cwYcIEfPvttxqOjoiIiIgob0wY1Ozx48fSaAV79uxBv379UL16dXzxxRe4fLnomxuJiIiIiIoSEwY1s7a2xtWrV5GVlYWwsDB06NABwNsHq/BmTCIiIiIq6ThKkpoNGTIE/fv3h62tLWQyGdq3bw8AOHXqFGrWrKnh6IiIiIiI8saEQc0CAgJQp04d3LlzB/369ZOGN9XW1sY333yj4eiIiIiIiPLGhKEY9O3bN1eZr6+vBiIhIiIiIioYJgxqUJCHoI0dO1aNkRARERERfRgmDGrw448/qlRPJpMxYSAiIiKiEo0JgxrEx8drOgQiIiIioiLBYVWLyevXrxEbG4vMzExNh0JEREREpDImDGqWnp6OoUOHwsDAALVr10ZiYiIAYMyYMZg7d66GoyMiIiIiyhsTBjWbNm0aLl68iPDwcOjp6Unl7du3x59//qnByIiIiIiI8sd7GNRs+/bt+PPPP9G0aVPIZDKpvHbt2oiLi9NgZERERERE+WMLg5o9evQIVlZWucpfvHghl0AQEREREZVETBjUrGHDhti9e7f0OidJWLNmDZo1a6apsIiIiIiIVMIuSWo2Z84cdO7cGVevXkVmZiaWLFmCq1ev4sSJEzhy5IimwyMiIiIiyhNbGNSsZcuWiIqKQmZmJlxcXLB//35YWVkhMjISbm5umg6PiIiIiChPbGEoBk5OTli9erWmwyAiIiIiKjAmDMUgKysL27ZtQ0xMDACgVq1a6NGjB8qU4e4nIiIiopKNV6xqFh0dje7du+P+/fuoUaMGAGDevHmwtLTEP//8gzp16mg4QiIiIiIi5XgPg5oNGzYMtWvXxt27d3H+/HmcP38ed+7cgaurK/z9/TUdHhERERFRntjCoGZRUVE4e/YsypUrJ5WVK1cO33//PRo1aqTByIiIiIiI8scWBjWrXr06Hjx4kKv84cOHqFq1qgYiIiIiIiJSHRMGNUhNTZX+goODMXbsWGzduhV3797F3bt3sXXrVowfPx7z5s3TdKhERERERHlilyQ1MDMzk57oDABCCPTv318qE0IAALy8vJCVlaWRGImIiIiIVMGEQQ0OHz6s6RCIiIiIiIoEEwY18PDw0HQIRERERERFgglDMUlPT0diYiJev34tV+7q6qqhiIiIiIiI8seEQc0ePXqEIUOGYO/evQqn8x4GIiIiIirJOEqSmo0fPx7Jyck4deoU9PX1ERYWhnXr1qFatWrYuXOnpsMjIiIiIsoTWxjU7N9//8WOHTvQsGFDaGlpwcHBAR06dICJiQmCg4PRtWtXTYdIRERERKQUWxjU7MWLF7CysgLw9gnPjx49AgC4uLjg/PnzmgyNiIiIiChfTBjUrEaNGoiNjQUA1K1bFytXrsS9e/ewYsUK2Nraajg6IiIiIqK8sUuSmo0bNw5JSUkAgFmzZqFTp07YuHEjdHR0sHbtWs0GR0RERESUD7YwqNlnn30GPz8/AICbmxtu376NM2fO4M6dOxgwYECBl3f06FF4eXnBzs4OMpkM27dvl6a9efMGU6dOhYuLCwwNDWFnZ4fBgwfjv//+k1uGo6MjZDKZ3N/cuXM/ZDOJiIiI6CPFhKGYGRgYoEGDBrCwsCjU/C9evEDdunWxbNmyXNPS09Nx/vx5zJgxA+fPn0doaChiY2PRvXv3XHWDgoKQlJQk/Y0ZM6ZQ8RARERHRx41dktRg4sSJKtddtGhRgZbduXNndO7cWeE0U1NTHDhwQK7s559/RuPGjZGYmIhKlSpJ5cbGxrCxsSnQuomIiIjo08OEQQ0uXLigUj2ZTKbmSICUlBTIZDKYmZnJlc+dOxezZ89GpUqVMGjQIEyYMAFlyig+HDIyMpCRkSG9Tk1NVWfIRERERFSCMGFQg8OHD2s6BADAq1evMHXqVHh7e8PExEQqHzt2LBo0aABzc3OcOHEC06ZNQ1JSktLWjuDgYAQGBhZX2ERERERUgsiEEELTQVDhyGQybNu2DT179sw17c2bN+jTpw/u3r2L8PBwuYThfb/99htGjBiBtLQ06Orq5pquqIXB3t4eKSkpeS6XiIiIcnNZ55Jvncu+l4t8vampqTA1NeX3NxUYWxg+Qm/evEH//v1x+/Zt/Pvvv/meFJo0aYLMzEwkJCSgRo0auabr6uoqTCSIiIiI6OPHhOEjk5Ms3LhxA4cPH0b58uXznScqKgpaWlrSE6mJiIiIiHIwYShl0tLScPPmTel1fHw8oqKiYG5uDltbW/Tt2xfnz5/Hrl27kJWVhfv37wMAzM3NoaOjg8jISJw6dQpt2rSBsbExIiMjMWHCBHz22WcoV66cpjaLiIiIiEooJgylzNmzZ9GmTRvpdc4Qrr6+vggICMDOnTsBAPXq1ZOb7/Dhw2jdujV0dXWxefNmBAQEICMjA5UrV8aECRMKNBQsEREREX06mDCo2bp162BhYYGuXbsCAKZMmYJVq1ahVq1a+OOPP+Dg4FCg5bVu3Rp53aee3z3sDRo0wMmTJwu0TiIiIiL6dPFJz2o2Z84c6OvrAwAiIyOxbNkyzJ8/HxYWFpgwYYKGoyMiIiIiyhtbGNTszp07qFq1KgBg+/bt6NOnD/z9/dGiRQu0bt1as8EREREREeWDLQxqZmRkhCdPngAA9u/fjw4dOgAA9PT08PLlS02GRkRERESUL7YwqFmHDh0wbNgw1K9fH9evX0eXLl0AANHR0XB0dNRscERERERE+WALg5otW7YMzZo1w6NHj/D3339Lz0U4d+4cvL29NRwdEREREVHeZCK/YXWI3sNHyxMRERWeyzqXfOtc9r1c5Ovl9zcVFrskqUliYqLc60qVKmkoEiIiIiKiwmPCoCaOjo6QyWQQQkAmkyErK0vTIRERERERFRgTBjXJzs7WdAhERERERB+MNz0TEREREZFSbGFQg507d6pct3v37mqMhIiIiIjowzBhUIOePXuqVI/3NhARERFRSceEQQ14/wIRERERfSx4D0MxevXqlaZDICIiIiIqECYMapaVlYXZs2ejQoUKMDIywq1btwAAM2bMwK+//qrh6IiIiIiI8saEQc2+//57rF27FvPnz4eOjo5UXqdOHaxZs0aDkRERERER5Y8Jg5qtX78eq1atgo+PD7S1taXyunXr4tq1axqMjIiIiIgof0wY1OzevXuoWrVqrvLs7Gy8efNGAxEREREREamOCYOa1apVC8eOHctVvnXrVtSvX18DERERERERqY7DqqrZzJkz4evri3v37iE7OxuhoaGIjY3F+vXrsWvXLk2HR0RERESUJ7YwqFmPHj3wzz//4ODBgzA0NMTMmTMRExODf/75Bx06dNB0eEREREREeWILQzFwd3fHgQMHNB0GEREREVGBsYWBiIiIiIiUYguDGpQrVw4ymUyluk+fPlVzNEREREREhceEQQ0WL14s/f/Jkyf47rvv4OnpiWbNmgEAIiMjsW/fPsyYMUNDERIRERERqUYmhBCaDuJj1qdPH7Rp0wajR4+WK//5559x8OBBbN++XTOBfYDU1FSYmpoiJSUFJiYmmg6HiIioVHFZ55Jvncu+l4t8vfz+psLiPQxqtm/fPnTq1ClXeadOnXDw4EENREREREREpDomDGpWvnx57NixI1f5jh07UL58eQ1ERERERESkOt7DoGaBgYEYNmwYwsPD0aRJEwDAqVOnEBYWhtWrV2s4OiIiIiKivDFhUDM/Pz84Ozvjp59+QmhoKADA2dkZx48flxIIIiIiIqKSiglDMWjSpAk2btyo6TCIiIiIiAqMCUMxyMrKwvbt2xETEwMAqF27Nrp37w5tbW0NR0ZERERElDcmDGp28+ZNdO3aFXfv3kWNGjUAAMHBwbC3t8fu3bvh5OSk4QiJiIiIiJTjKElqNnbsWFSpUgV37tzB+fPncf78eSQmJqJy5coYO3aspsMjIiIiIsoTWxjU7MiRIzh58iTMzc2lsvLly2Pu3Llo0aKFBiMjIiIiIsofWxjUTFdXF8+fP89VnpaWBh0dnQIv7+jRo/Dy8oKdnR1kMlmuJ0ULITBz5kzY2tpCX18f7du3x40bN+TqPH36FD4+PjAxMYGZmRmGDh2KtLS0AsdCRERERB8/Jgxq1q1bN/j7++PUqVMQQkAIgZMnT+LLL79E9+7dC7y8Fy9eoG7duli2bJnC6fPnz8dPP/2EFStW4NSpUzA0NISnpydevXol1fHx8UF0dDQOHDiAXbt24ejRo/D39y/0NhIRERHRx0smhBCaDuJjlpycDF9fX/zzzz8oW7YsACAzMxPdu3fH2rVrYWpqWuhly2QybNu2DT179gTwtnXBzs4OkyZNwuTJkwEAKSkpsLa2xtq1azFw4EDExMSgVq1aOHPmDBo2bAgACAsLQ5cuXXD37l3Y2dnlu97U1FSYmpoiJSUFJiYmhY6fiIjoU+SyziXfOpd9Lxf5evn9TYXFexjUzMzMDDt27MCNGzdw7do1AG8f3Fa1atUiX1d8fDzu37+P9u3bS2WmpqZo0qQJIiMjMXDgQERGRsLMzExKFgCgffv20NLSwqlTp9CrV69cy83IyEBGRob0OjU1tchjJyIiIqKSiQlDMalWrRqqVaum1nXcv38fAGBtbS1Xbm1tLU27f/8+rKys5KaXKVMG5ubmUp33BQcHIzAwUA0RExEREVFJx4RBzYQQ2Lp1Kw4fPoyHDx8iOztbbnpoaKiGIlPdtGnTMHHiROl1amoq7O3tNRgRERERERUXJgxqNn78eKxcuRJt2rSBtbU1ZDKZ2tZlY2MDAHjw4AFsbW2l8gcPHqBevXpSnYcPH8rNl5mZiadPn0rzv09XVxe6urrqCZqIiIiISjQmDGq2YcMGhIaGokuXLmpfV+XKlWFjY4NDhw5JCUJqaipOnTqFr776CgDQrFkzJCcn49y5c3BzcwMA/Pvvv8jOzkaTJk3UHiMRERERlS5MGNTM1NQUVapUKbLlpaWl4ebNm9Lr+Ph4REVFwdzcHJUqVcL48ePx3XffoVq1aqhcuTJmzJgBOzs7aSQlZ2dndOrUCcOHD8eKFSvw5s0bjB49GgMHDlRphCQiIiIi+rTwOQxqFhAQgMDAQLx8+bJIlnf27FnUr18f9evXBwBMnDgR9evXx8yZMwEAU6ZMwZgxY+Dv749GjRohLS0NYWFh0NPTk5axceNG1KxZE+3atUOXLl3QsmVLrFq1qkjiIyIiIqKPC5/DoGYvX75Er169EBERAUdHR+lZDDnOnz+vocgKj+M4ExERFR6fw0ClDbskqZmvry/OnTuHzz77TO03PRMRERERFTUmDGq2e/du7Nu3Dy1bttR0KEREREREBcZ7GNTM3t6ezX5EREREVGoxYVCzhQsXYsqUKUhISNB0KEREREREBcYuSWr22WefIT09HU5OTjAwMMh10/PTp081FBkRERERUf6YMKjZ4sWLNR0CEREREVGhMWFQM19fX02HQERERERUaLyHgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwqMGlS5eQnZ2t6TCIiIiIiD4YEwY1qF+/Ph4/fgwAqFKlCp48eaLhiIiIiIiICocJgxqYmZkhPj4eAJCQkMDWBiIiIiIqtTisqhr06dMHHh4esLW1hUwmQ8OGDaGtra2w7q1bt4o5OiIiIiIi1TFhUINVq1ahd+/euHnzJsaOHYvhw4fD2NhY02ERERERERUYEwY16dSpEwDg3LlzGDduHBMGIiIiIiqVmDCoWUhIiPT/u3fvAgAqVqyoqXCIiIiIiAqENz2rWXZ2NoKCgmBqagoHBwc4ODjAzMwMs2fP5s3QRERERFTisYVBzb799lv8+uuvmDt3Llq0aAEAOH78OAICAvDq1St8//33Go6QiIiIiEg5Jgxqtm7dOqxZswbdu3eXylxdXVGhQgWMHDmSCQMRERERlWjskqRmT58+Rc2aNXOV16xZE0+fPtVAREREREREqmPCoGZ169bFzz//nKv8559/Rt26dTUQERERERGR6tglSc3mz5+Prl274uDBg2jWrBkAIDIyEnfu3MGePXs0HB0RERERUd7YwqBmHh4euH79Onr16oXk5GQkJyejd+/eiI2Nhbu7u6bDIyIiIiLKE1sYioGdnR1vbiYiIiKiUoktDEREREREpBQTBiIiIiIiUooJAxERERERKcWEQY2EEEhMTMSrV680HQoRERERUaEwYVAjIQSqVq2KO3fuaDoUIiIiIqJCYcKgRlpaWqhWrRqePHmi6VCIiIiIiAqFCYOazZ07F19//TWuXLmi6VCIiIiIiAqMz2FQs8GDByM9PR1169aFjo4O9PX15aY/ffpUQ5EREREREeWPCYOaLV68WNMhEBEREREVGhMGNfP19dV0CEREREREhcZ7GIpBXFwcpk+fDm9vbzx8+BAAsHfvXkRHRxf5uhwdHSGTyXL9jRo1CgDQunXrXNO+/PLLIo+DiIiIiD4OTBjU7MiRI3BxccGpU6cQGhqKtLQ0AMDFixcxa9asIl/fmTNnkJSUJP0dOHAAANCvXz+pzvDhw+XqzJ8/v8jjICIiIqKPAxMGNfvmm2/w3Xff4cCBA9DR0ZHK27Zti5MnTxb5+iwtLWFjYyP97dq1C05OTvDw8JDqGBgYyNUxMTEp8jiIiIiI6OPAhEHNLl++jF69euUqt7KywuPHj9W67tevX+P333/HF198AZlMJpVv3LgRFhYWqFOnDqZNm4b09HS1xkFEREREpRdvelYzMzMzJCUloXLlynLlFy5cQIUKFdS67u3btyM5ORl+fn5S2aBBg+Dg4AA7OztcunQJU6dORWxsLEJDQ5UuJyMjAxkZGdLr1NRUdYZNRERERCUIEwY1GzhwIKZOnYotW7ZAJpMhOzsbERERmDx5MgYPHqzWdf/666/o3Lkz7OzspDJ/f3/p/y4uLrC1tUW7du0QFxcHJycnhcsJDg5GYGCgWmMlIiIiopKJXZLUbM6cOahZsybs7e2RlpaGWrVqoVWrVmjevDmmT5+utvXevn0bBw8exLBhw/Ks16RJEwDAzZs3ldaZNm0aUlJSpL87d+4UaaxEREREVHKxhUHNdHR0sHr1asyYMQNXrlxBWloa6tevj2rVqql1vSEhIbCyskLXrl3zrBcVFQUAsLW1VVpHV1cXurq6RRkeEREREZUSTBiKSaVKlWBvbw8Acjcgq0N2djZCQkLg6+uLMmX+7y2Oi4vDpk2b0KVLF5QvXx6XLl3ChAkT0KpVK7i6uqo1JiIiIiIqndglqRj8+uuvqFOnDvT09KCnp4c6depgzZo1alvfwYMHkZiYiC+++EKuXEdHBwcPHkTHjh1Rs2ZNTJo0CX369ME///yjtliIiIiIqHRjC4OazZw5E4sWLcKYMWPQrFkzAEBkZCQmTJiAxMREBAUFFfk6O3bsCCFErnJ7e3scOXKkyNdHRERERB8vJgxqtnz5cqxevRre3t5SWffu3eHq6ooxY8aoJWEgIiIiIioq7JKkZm/evEHDhg1zlbu5uSEzM1MDERERERERqY4Jg5p9/vnnWL58ea7yVatWwcfHRwMRERERERGpjl2S1GDixInS/2UyGdasWYP9+/ejadOmAIBTp04hMTFR7Q9uIyIiIiL6UEwY1ODChQtyr93c3AC8HdYUACwsLGBhYYHo6Ohij42IiIiIqCCYMKjB4cOHNR0CEREREVGR4D0MRERERESkFFsY1OzVq1dYunQpDh8+jIcPHyI7O1tu+vnz5zUUGRERERFR/pgwqNnQoUOxf/9+9O3bF40bN4ZMJtN0SEREREREKmPCoGa7du3Cnj170KJFC02HQkRERERUYLyHQc0qVKgAY2NjTYdBRERERFQoTBjUbOHChZg6dSpu376t6VCIiIiIiAqMXZLUrGHDhnj16hWqVKkCAwMDlC1bVm7606dPNRQZEREREVH+mDCombe3N+7du4c5c+bA2tqaNz0TERERUanChEHNTpw4gcjISNStW1fToRARERERFRjvYVCzmjVr4uXLl5oOg4iIiIioUJgwqNncuXMxadIkhIeH48mTJ0hNTZX7IyIiIiIqydglSc06deoEAGjXrp1cuRACMpkMWVlZmgiLiIiIiEglTBjU7PDhw5oOgYiIiIio0JgwqJmHh4emQyAiIiIiKjQmDGp29OjRPKe3atWqmCIhIiIiIio4Jgxq1rp161xl7z6LgfcwEBEREVFJxlGS1OzZs2dyfw8fPkRYWBgaNWqE/fv3azo8IiIiIqI8sYVBzUxNTXOVdejQATo6Opg4cSLOnTungaiIiIiIiFTDFgYNsba2RmxsrKbDICIiIiLKE1sY1OzSpUtyr4UQSEpKwty5c1GvXj3NBEVEREREpCImDGpWr149yGQyCCHkyps2bYrffvtNQ1EREREREamGCYOaxcfHy73W0tKCpaUl9PT0NBQREREREZHqmDComYODg6ZDICIiIiIqNCYMxeDQoUM4dOgQHj58iOzsbLlp7JZERERERCUZEwY1CwwMRFBQEBo2bAhbW1u5h7YREREREZV0TBjUbMWKFVi7di0+//xzTYdCRERERFRgfA6Dmr1+/RrNmzfXdBhERERERIXChEHNhg0bhk2bNmk6DCIiIiKiQmGXJDV79eoVVq1ahYMHD8LV1RVly5aVm75o0SINRUZERERElD8mDGp26dIl6YnOV65ckZvGG6CJiIiIqKRjwqBmhw8fLtb1BQQEIDAwUK6sRo0auHbtGoC3LR6TJk3C5s2bkZGRAU9PT/zyyy+wtrYu1jiJiIiIqHTgPQwfodq1ayMpKUn6O378uDRtwoQJ+Oeff7BlyxYcOXIE//33H3r37q3BaImIiIioJGMLw0eoTJkysLGxyVWekpKCX3/9FZs2bULbtm0BACEhIXB2dsbJkyfRtGnT4g5VsQBTFeqkqD8OIiIiImILw8foxo0bsLOzQ5UqVeDj44PExEQAwLlz5/DmzRu0b99eqluzZk1UqlQJkZGRSpeXkZGB1NRUuT8iIiIi+jQwYfjINGnSBGvXrkVYWBiWL1+O+Ph4uLu74/nz57h//z50dHRgZmYmN4+1tTXu37+vdJnBwcEwNTWV/uzt7dW8FURERERUUrBL0kemc+fO0v9dXV3RpEkTODg44K+//oK+vn6hljlt2jRMnDhRep2amsqkgYiIiOgTwRaGj5yZmRmqV6+OmzdvwsbGBq9fv0ZycrJcnQcPHii85yGHrq4uTExM5P6IiIiI6NPAhOEjl5aWhri4ONja2sLNzQ1ly5bFoUOHpOmxsbFITExEs2bNNBglEREREZVU7JL0kZk8eTK8vLzg4OCA//77D7NmzYK2tja8vb1hamqKoUOHYuLEiTA3N4eJiQnGjBmDZs2alZwRkoiIiIioRGHC8JG5e/cuvL298eTJE1haWqJly5Y4efIkLC0tAQA//vgjtLS00KdPH7kHtxERERERKcKE4SOzefPmPKfr6elh2bJlWLZsWTFFRERERESlGe9hICIiIiIipZgwEBERERGRUuySRERERFREYmo6519pGi+/qHRhCwMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVK8TZ+IiIg+eaqMbuR8LaYYIiEqedjCQERERERESrGFgYiIiD55/VV4NsLlYoiDqCRiCwMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqV40zMRERFREVHl5mmi0oYtDEREREREpBQTBiIiIiIiUooJAxERERERKcWEgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUkwYiIiIiIhIKSYMRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUKqPpAIiIiIhKg5iazvlXmsZLK/r4sIWBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpJgxERERERKQUE4aPTHBwMBo1agRjY2NYWVmhZ8+eiI2NlavTunVryGQyub8vv/xSQxETERERUUnGwYI/MkeOHMGoUaPQqFEjZGZm4n//+x86duyIq1evwtDQUKo3fPhwBAUFSa8NDAw0ES4REVGp0Z/PWKBPFI/8j0xYWJjc67Vr18LKygrnzp1Dq1atpHIDAwPY2NgUd3hEREREVMowYfjIpaSkAADMzc3lyjdu3Ijff/8dNjY28PLywowZM5S2MmRkZCAjI0N6nZqaqr6AiYiIVKTKk5edr8UUQyREHzcmDB+x7OxsjB8/Hi1atECdOnWk8kGDBsHBwQF2dna4dOkSpk6ditjYWISGhipcTnBwMAIDA4srbCIioiKjSlIBAGB3IyKl+On4iI0aNQpXrlzB8ePH5cr9/f2l/7u4uMDW1hbt2rVDXFwcnJycci1n2rRpmDhxovQ6NTUV9vb26guciIiIiEoMJgwfqdGjR2PXrl04evQoKlasmGfdJk2aAABu3rypMGHQ1dWFrq6uWuIkIiIiopKNCcNHRgiBMWPGYNu2bQgPD0flypXznScqKgoAYGtrq+boiIiIiKi0YcLwkRk1ahQ2bdqEHTt2wNjYGPfv3wcAmJqaQl9fH3Fxcdi0aRO6dOmC8uXL49KlS5gwYQJatWoFV1dXDUdPRESkOlWGOf0rOLMYIiH6uDFh+MgsX74cwNuHs70rJCQEfn5+0NHRwcGDB7F48WK8ePEC9vb26NOnD6ZPn66BaImIiIiopGPC8JERQuQ53d7eHkeOHCmmaIiIiDSLD1sj+nBamg6AiIiIiIhKLiYMRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUYsJARERERERKcawxIiIiKnFiajrnX4lDphIVC7YwEBERERGRUkzNiYiIqMThA9eISg62MBARERERkVJMGIiIiIiISCkmDEREREREpBQTBiIiIiIiUooJAxERERERKcWEgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUkwYiIiIiIhIKSYMRERERESkFBMGIiIiIiJSigkDEREREREpxYSBiIiIiIiUKqPpAIiIiOjTElPTOf9K03iJQlRSsIWBiIiIiIiUYvpOREREKlGlZcD5WkwxREJExYktDEREREREpBRbGIiIiD5yRdUy0F+F+wouqxCPKsshopKDLQxERERERKQUU3wiIjVy/GZ3vnUS5nYthkhUVxpj/pSpNOIQEdEHYMJARPSR4IV+8SjOG39VWVdRde9RpSsREX2a2CWJiIiIiIiUYgsDEREVWFH9yu6yziXfOpd9S9Zv30V1429RraukUeU9JaLShS0MRERERESkVOn76YKIqBgU5/0ApfHeg+L8lb2ofrEuzpaK0viAM7YMEJEybGH4hC1btgyOjo7Q09NDkyZNcPr0aU2HREREREQlDFsYPlF//vknJk6ciBUrVqBJkyZYvHgxPD09ERsbCysrK02HR6RQUf0Sr8pySpqiilmV5Rg7f1Mk6/qUf7EuzhYYIiJ1YwvDJ2rRokUYPnw4hgwZglq1amHFihUwMDDAb7/9punQiIiIiKgEYQvDJ+j169c4d+4cpk2bJpVpaWmhffv2iIyM1GBkBRBgqkKdFPXHUQAlrZ96Uf1iXZz9+Eua0hjzp6yktXiUtHiIiJRhwvAJevz4MbKysmBtbS1Xbm1tjWvXruWqn5GRgYyMDOl1SsrbC/HU1FT1BJghimY56oqvkLIz0vOto7Z9qoAq8aiiqGIuzniKal0fq6yXWZoOgeiTp47vg5xlClFE37P0yWDCQPkKDg5GYGBgrnJ7e3sNRFMAc1VohShhTBdrOoKCK2kxl7R4iIgKw/Qr9X2HPX/+HKampe87kjSHCcMnyMLCAtra2njw4IFc+YMHD2BjY5Or/rRp0zBx4kTpdXZ2Np4+fYry5ctDJpMVaWypqamwt7fHnTt3YGJiUqTLpv/D/Vw8uJ+LB/dz8eB+Lj7q2tdCCDx//hx2dnZFtkz6NDBh+ATp6OjAzc0Nhw4dQs+ePQG8TQIOHTqE0aNH56qvq6sLXV1duTIzMzO1xmhiYsIvpGLA/Vw8uJ+LB/dz8eB+Lj7q2NdsWaDCYMLwiZo4cSJ8fX3RsGFDNG7cGIsXL8aLFy8wZMgQTYdGRERERCUIE4ZP1IABA/Do0SPMnDkT9+/fR7169RAWFpbrRmgiIiIi+rQxYfiEjR49WmEXJE3S1dXFrFmzcnWBoqLF/Vw8uJ+LB/dz8eB+Lj7c11TSyATH1iIiIiIiIiX4pGciIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEREREREoxYaBit2zZMjg6OkJPTw9NmjTB6dOn86y/ZcsW1KxZE3p6enBxccGePXuKKdLSrSD7efXq1XB3d0e5cuVQrlw5tG/fPt/3hd4q6PGcY/PmzZDJZNLDEylvBd3PycnJGDVqFGxtbaGrq4vq1avz3KGCgu7nxYsXo0aNGtDX14e9vT0mTJiAV69eFVO0pdPRo0fh5eUFOzs7yGQybN++Pd95wsPD0aBBA+jq6qJq1apYu3at2uMkkiOIitHmzZuFjo6O+O2330R0dLQYPny4MDMzEw8ePFBYPyIiQmhra4v58+eLq1eviunTp4uyZcuKy5cvF3PkpUtB9/OgQYPEsmXLxIULF0RMTIzw8/MTpqam4u7du8UceelS0P2cIz4+XlSoUEG4u7uLHj16FE+wpVhB93NGRoZo2LCh6NKlizh+/LiIj48X4eHhIioqqpgjL10Kup83btwodHV1xcaNG0V8fLzYt2+fsLW1FRMmTCjmyEuXPXv2iG+//VaEhoYKAGLbtm151r9165YwMDAQEydOFFevXhVLly4V2traIiwsrHgCJhJCMGGgYtW4cWMxatQo6XVWVpaws7MTwcHBCuv3799fdO3aVa6sSZMmYsSIEWqNs7Qr6H5+X2ZmpjA2Nhbr1q1TV4gfhcLs58zMTNG8eXOxZs0a4evry4RBBQXdz8uXLxdVqlQRr1+/Lq4QPwoF3c+jRo0Sbdu2lSubOHGiaNGihVrj/JiokjBMmTJF1K5dW65swIABwtPTU42REcljlyQqNq9fv8a5c+fQvn17qUxLSwvt27dHZGSkwnkiIyPl6gOAp6en0vpUuP38vvT0dLx58wbm5ubqCrPUK+x+DgoKgpWVFYYOHVocYZZ6hdnPO3fuRLNmzTBq1ChYW1ujTp06mDNnDrKysoor7FKnMPu5efPmOHfunNRt6datW9izZw+6dOlSLDF/Kvg9SCUBn/RMxebx48fIysqCtbW1XLm1tTWuXbumcJ779+8rrH///n21xVnaFWY/v2/q1Kmws7PL9SVF/6cw+/n48eP49ddfERUVVQwRfhwKs59v3bqFf//9Fz4+PtizZw9u3ryJkSNH4s2bN5g1a1ZxhF3qFGY/Dxo0CI8fP0bLli0hhEBmZia+/PJL/O9//yuOkD8Zyr4HU1NT8fLlS+jr62soMvqUsIWBiOTMnTsXmzdvxrZt26Cnp6fpcD4az58/x+eff47Vq1fDwsJC0+F81LKzs2FlZYVVq1bBzc0NAwYMwLfffosVK1ZoOrSPSnh4OObMmYNffvkF58+fR2hoKHbv3o3Zs2drOjQiKmJsYaBiY2FhAW1tbTx48ECu/MGDB7CxsVE4j42NTYHqU+H2c44ffvgBc+fOxcGDB+Hq6qrOMEu9gu7nuLg4JCQkwMvLSyrLzs4GAJQpUwaxsbFwcnJSb9ClUGGOZ1tbW5QtWxba2tpSmbOzM+7fv4/Xr19DR0dHrTGXRoXZzzNmzMDnn3+OYcOGAQBcXFzw4sUL+Pv749tvv4WWFn+TLArKvgdNTEzYukDFhp9mKjY6Ojpwc3PDoUOHpLLs7GwcOnQIzZo1UzhPs2bN5OoDwIEDB5TWp8LtZwCYP38+Zs+ejbCwMDRs2LA4Qi3VCrqfa9asicuXLyMqKkr66969O9q0aYOoqCjY29sXZ/ilRmGO5xYtWuDmzZtSQgb8v3buLqTJ9o8D+Hdmc2o+LE1LRa2ZkeTUzAyimMwDQYy0A2PRUEIsQrDMzEBbUMJKCyMDSUoJ014pLKEM62hmac1QHGraEsPezANLfKldz8GfxuOTS/3Xsyl9P7CD2+u67vt3/xDly737Arq6uuDr68uwYMP/0+eRkZEfQsH3kCaE+O+K/cPw/yDNCY5+65r+LFeuXBEuLi6isrJSdHR0iIyMDCGXy8Xbt2+FEEJotVqRl5dnnW8wGISzs7MoLi4WJpNJ6HQ6bqs6A7Pts16vF1KpVNy4cUMMDAxYP8PDw466hXlhtn3+N+6SNDOz7XNfX5/w8PAQmZmZorOzU9y9e1f4+PiI48ePO+oW5oXZ9lmn0wkPDw9RU1Mjent7RX19vQgODhYpKSmOuoV5YXh4WBiNRmE0GgUAcfr0aWE0GsXr16+FEELk5eUJrVZrnf99W9WDBw8Kk8kkzp07x21Vye4YGMjuzp49KwIDA4VUKhUxMTGiqanJOqZSqURqauqk+deuXROrVq0SUqlUrFmzRtTV1dm54vlpNn0OCgoSAH746HQ6+xc+z8z29/mfGBhmbrZ9bmxsFBs2bBAuLi5CoVCIwsJC8fXrVztXPf/Mps8TExPi6NGjIjg4WMhkMhEQECD27t0rhoaG7F/4PPLo0aMp/95+721qaqpQqVQ/rImMjBRSqVQoFApRUVFh97rpzyYRgs8NiYiIiIhoanyHgYiIiIiIbGJgICIiIiIimxgYiIiIiIjIJgYGIiIiIiKyiYGBiIiIiIhsYmAgIiIiIiKbGBiIiIiIiMgmBgYiIiIiIrKJgYGIiIiIiGxiYCAiohmbmJhwdAlERGRnDAxERHPYvXv3sGnTJsjlcnh5eSExMRE9PT3W8f7+fmg0Gnh6esLd3R3R0dF48uSJdfzOnTtYv349ZDIZlixZguTkZOuYRCLB7du3J11PLpejsrISAGA2myGRSHD16lWoVCrIZDJcvnwZg4OD0Gg08Pf3h5ubG5RKJWpqaiadx2Kx4OTJk1i5ciVcXFwQGBiIwsJCAIBarUZmZuak+R8+fIBUKkVDQ8PvaBsREf1GDAxERHPYly9fkJ2djZaWFjQ0NMDJyQnJycmwWCz4/PkzVCoV3rx5g9raWrx48QK5ubmwWCwAgLq6OiQnJyMhIQFGoxENDQ2IiYmZdQ15eXnIysqCyWRCfHw8RkdHsW7dOtTV1aG9vR0ZGRnQarV4+vSpdc3hw4eh1+tRUFCAjo4OVFdXY+nSpQCA9PR0VFdXY2xszDq/qqoK/v7+UKvVv9gxIiL63SRCCOHoIoiIaGY+fvwIb29vtLW1obGxETk5OTCbzfD09Pxh7saNG6FQKFBVVTXluSQSCW7duoWkpCTrz+RyOUpKSpCWlgaz2YwVK1agpKQEWVlZP60rMTERq1evRnFxMYaHh+Ht7Y3S0lKkp6f/MHd0dBR+fn4oKytDSkoKACAiIgLbtm2DTqebRTeIiMge+ISBiGgO6+7uhkajgUKhwF9//YXly5cDAPr6+tDa2oq1a9dOGRYAoLW1FXFxcb9cQ3R09KTjb9++4dixY1AqlfD09MSiRYtw//599PX1AQBMJhPGxsZsXlsmk0Gr1eLixYsAgOfPn6O9vR1paWm/XCsREf1+zo4ugIiIbNuyZQuCgoJQXl4OPz8/WCwWhIWFYXx8HK6urj9dO924RCLBvx8yT/VSs7u7+6TjoqIinDlzBiUlJVAqlXB3d8e+ffswPj4+o+sC//taUmRkJPr7+1FRUQG1Wo2goKBp1xERkf3xCQMR0Rw1ODiIzs5O5OfnIy4uDqGhoRgaGrKOh4eHo7W1FZ8+fZpyfXh4+E9fIvb29sbAwID1uLu7GyMjI9PWZTAYsHXrVuzcuRMRERFQKBTo6uqyjoeEhMDV1fWn11YqlYiOjkZ5eTmqq6uxa9euaa9LRESOwcBARDRHLV68GF5eXjh//jxevnyJhw8fIjs72zqu0WiwbNkyJCUlwWAwoLe3Fzdv3sTjx48BADqdDjU1NdDpdDCZTGhra8OJEyes69VqNUpLS2E0GtHS0oI9e/Zg4cKF09YVEhKCBw8eoLGxESaTCbt378a7d++s4zKZDIcOHUJubi4uXbqEnp4eNDU14cKFC5POk56eDr1eDyHEpN2biIhobmFgICKao5ycnHDlyhU8e/YMYWFh2L9/P4qKiqzjUqkU9fX18PHxQUJCApRKJfR6PRYsWAAAiI2NxfXr11FbW4vIyEio1epJOxmdOnUKAQEB2Lx5M3bs2IGcnBy4ublNW1d+fj6ioqIQHx+P2NhYa2j5p4KCAhw4cABHjhxBaGgotm/fjvfv30+ao9Fo4OzsDI1GA5lM9gudIiKi/xJ3SSIiIocwm80IDg5Gc3MzoqKiHF0OERHZwMBARER2NTExgcHBQeTk5ODVq1cwGAyOLomIiH6CX0kiIiK7MhgM8PX1RXNzM8rKyhxdDhERTYNPGIiIiIiIyCY+YSAiIiIiIpsYGIiIiIiIyCYGBiIiIiIisomBgYiIiIiIbGJgICIiIiIimxgYiIiIiIjIJgYGIiIiIiKyiYGBiIiIiIhsYmAgIiIiIiKb/gY59atwiG7GygAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "path_model_label=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/instance/instance_threshold_pred_TRAILMAP_DiceCE_best_metric(1).tif\"\n", + "res = evl.evaluate_model_performance(imread(path_true_labels), imread(path_model_label),visualize=False, return_graphical_summary=True,plot_according_to_gt_label=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAHHCAYAAAASz98lAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACN90lEQVR4nOzdd1gUV9sG8HtBehWpNkAsoGLDhopYUKyxxUJIBGOib+xiNPrGBhYssceuwRKNid1YsEUsiA27IjYQNYgVEBUQON8ffszryi4siOwK9++6uHTPnDnzzOzs7Dw7Z87IhBACRERERERECmipOwAiIiIiItJcTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVKFkjBMnjwZMplMrszBwQH+/v6F0XyuYmNjIZPJsGbNGqnM398fxsbGn3zZ2WQyGSZPnlxkyyuIs2fPokmTJjAyMoJMJsPFixfz3YaDgwM6depU+MGR2oSGhqJOnTrQ19eHTCZDYmJivtuQyWQYMmRI4QdXjHyKY0SLFi3QokWLQm1TFWvWrIFMJsO5c+eKfNnFUYsWLVCzZk11h/HJZe83sbGx+Z5X0TlGYQkLC4NMJkNYWFi+5/0U5xrq+lx/DH9/fzg4OBR4/uJwTCnoMf5TrPun+rxo1BWGvXv3auyJtybHlpe3b9+iZ8+eeP78OebNm4f169fD3t5eYd3r169j8uTJBTqo0+fl2bNn6NWrFwwMDLB48WKsX78eRkZGCuuePHkSkydPLlBCQQVXUj6Pn/PxVRX//vsvJk+eXKAfakgxBweHYr3PFEdLliyR+3FXk31OsRaVUp+q4ejoaGhp5S8f2bt3LxYvXpyvg4C9vT3evHkDHR2dfEaYP7nF9ubNG5Qq9ck25Ue7c+cO7t27h5UrV+K7777Lte7169cRGBiIFi1afNQvBqT5zp49i5cvX2LKlCnw8vLKte7JkycRGBgIf39/mJubF02AxUhBjxG5fR4PHDhQSNGpX0GO/Z+Tf//9F4GBgXBwcECdOnXUHQ6RWixZsgSWlpZF0vvkY31OsRaVT3aWq6en96maBgBkZGQgKysLurq60NfX/6TLyou6l5+Xx48fAwBP9D6BrKwspKena/w+oAj3i6LzKfYPXV3dQm+TiIhIkXx3STpx4gQaNGgAfX19ODk5Yfny5QrrfXgPw9u3bxEYGIgqVapAX18fZcqUQbNmzXDw4EEA7/rALV68GMC7vmDZf8D/7lP45ZdfMH/+fDg5OUFPTw/Xr19XeA9Dtrt378Lb2xtGRkYoW7YsgoKCIISQpivru/hhm7nFll324S9jFy5cQPv27WFqagpjY2O0bt0ap06dkquT3XctPDwcAQEBsLKygpGREbp164YnT54ofgM+8M8//8DDwwNGRkYwNzdHly5dEBUVJU339/eHp6cnAKBnz56QyWRK+0euWbMGPXv2BAC0bNlSWs8Pt8+JEyfQsGFD6Ovro1KlSli3bl2OthITEzFixAhUqFABenp6qFy5MmbOnImsrKw812nnzp3o2LEjypYtCz09PTg5OWHKlCnIzMzMUff06dPo0KEDSpcuDSMjI9SqVQsLFiyQq3Pjxg306tULVlZWMDAwQLVq1fDzzz/LbSNFV1MU9QPM7q+/YcMG1KhRA3p6eggNDQUA/PLLL2jSpAnKlCkDAwMDuLm5YcuWLQrX8ffff0fDhg1haGiI0qVLo3nz5tIvxn5+frC0tMTbt29zzNe2bVtUq1Yt9w0IYPPmzXBzc4OBgQEsLS3x9ddf4+HDh9L0Fi1awM/PDwDQoEEDyGQypb+kTJ48GaNHjwYAODo6SvvFh91kduzYgZo1a0JPTw81atSQtsv7Hj58iG+//RY2NjZSvd9++y3P9QGAkJAQtGrVCtbW1tDT00P16tWxdOnSHPXOnTsHb29vWFpawsDAAI6Ojvj222/l6mzatAlubm4wMTGBqakpXF1dc+w3d+/eRc+ePWFhYQFDQ0M0btwYe/bsybG81NRUTJ48GVWrVoW+vj7s7OzQvXt33LlzR6rz4THi3r17GDRoEKpVqwYDAwOUKVMGPXv2lNumeX0eFfV1fvz4Mfr37w8bGxvo6+ujdu3aWLt2rVyd94+nK1askI6nDRo0wNmzZ5Vu/w+9fv0aAwcORJkyZWBqaoq+ffvixYsXOert27dPOkaZmJigY8eOuHbtmjQ9t+NrvXr10L17d7n2XF1dIZPJcPnyZanszz//hEwmkzv2qbqvpaWlYdKkSahcuTL09PRQoUIFjBkzBmlpaXL1sj/7quzn7wsLC0ODBg0AAP369ZPW78PvrOvXr6Nly5YwNDREuXLlMGvWrALHqkj2vRKXL1+Gp6cnDA0NUblyZekYdfToUTRq1Eg6Rh46dChHG6p8rwHAtWvX0KpVKxgYGKB8+fKYOnWq0mN/XvuHqvI6x8iP48ePo2fPnqhYsaK0nUeOHIk3b94orJ/XuQbw7sel+fPno0aNGtDX14eNjQ0GDhyo8DPzoUWLFqFGjRrS90X9+vWxcePGXOdJT0/HxIkT4ebmBjMzMxgZGcHDwwNHjhyRq5ff40H2/q+vr4+aNWti+/btecYPvDsnvHbtGo4ePSp9Bj48fqWlpal0LvQx+0z2/v/+vhkSEiL3naZKrHlR5Rj/vsI6nipz8OBBNGvWDObm5jA2Nka1atXw3//+N1/rlK8rDFeuXEHbtm1hZWWFyZMnIyMjA5MmTYKNjU2e806ePBnBwcH47rvv0LBhQyQnJ+PcuXM4f/482rRpg4EDB+Lff//FwYMHsX79eoVthISEIDU1FQMGDICenh4sLCyUHoQyMzPRrl07NG7cGLNmzUJoaCgmTZqEjIwMBAUF5We1VYrtfdeuXYOHhwdMTU0xZswY6OjoYPny5WjRooV0UH7f0KFDUbp0aUyaNAmxsbGYP38+hgwZgj///DPX5Rw6dAjt27dHpUqVMHnyZLx58waLFi1C06ZNcf78eTg4OGDgwIEoV64cpk+fjmHDhqFBgwZK36/mzZtj2LBhWLhwIf773//CxcUFAKR/AeD27dv48ssv0b9/f/j5+eG3336Dv78/3NzcUKNGDQDvdnxPT088fPgQAwcORMWKFXHy5EmMGzcO8fHxmD9/fq7rtWbNGhgbGyMgIADGxsb4559/MHHiRCQnJ2P27NlSvYMHD6JTp06ws7PD8OHDYWtri6ioKOzevRvDhw8H8O7g4OHhAR0dHQwYMAAODg64c+cO/v77b0ybNi3XOJT5559/8Ndff2HIkCGwtLSUko0FCxbgiy++gK+vL9LT07Fp0yb07NkTu3fvRseOHaX5AwMDMXnyZDRp0gRBQUHQ1dXF6dOn8c8//6Bt27b45ptvsG7dOuzfv1/uJvNHjx7hn3/+waRJk/Lcfv369UODBg0QHByMhIQELFiwAOHh4bhw4QLMzc3x888/o1q1alixYgWCgoLg6OgIJycnhe11794dN2/exB9//IF58+bB0tISAGBlZSXVOXHiBLZt24ZBgwbBxMQECxcuRI8ePRAXF4cyZcoAABISEtC4cWPpxMvKygr79u1D//79kZycjBEjRuS6XkuXLkWNGjXwxRdfoFSpUvj7778xaNAgZGVlYfDgwQDenTBnH6PGjh0Lc3NzxMbGYtu2bVI7Bw8ehI+PD1q3bo2ZM2cCAKKiohAeHi7tNwkJCWjSpAlev36NYcOGoUyZMli7di2++OILbNmyBd26dQPw7jjTqVMnHD58GH369MHw4cPx8uVLHDx4EFevXlW6Tc+ePYuTJ0+iT58+KF++PGJjY7F06VK0aNEC169fh6GhoUqfx/e9efMGLVq0wO3btzFkyBA4Ojpi8+bN8Pf3R2JiorRu2TZu3IiXL19i4MCBkMlkmDVrFrp37467d++q1MVzyJAhMDc3x+TJkxEdHY2lS5fi3r170g8xALB+/Xr4+fnB29sbM2fOxOvXr7F06VI0a9YMFy5ckI5Ryo6vHh4e+OOPP6TXz58/x7Vr16ClpYXjx4+jVq1aAN6d4FlZWUnbRtV9LSsrC1988QVOnDiBAQMGwMXFBVeuXMG8efNw8+ZN7NixQy4eVfbzD7m4uCAoKAgTJ07EgAED4OHhAQBo0qSJVOfFixdo164dunfvjl69emHLli346aef4Orqivbt2xcoVkVevHiBTp06oU+fPujZsyeWLl2KPn36YMOGDRgxYgT+85//4KuvvsLs2bPx5Zdf4v79+zAxMQGg+vfao0eP0LJlS2RkZGDs2LEwMjLCihUrYGBgkCMeVfYPVeV1jpEfmzdvxuvXr/HDDz+gTJkyOHPmDBYtWoQHDx5g8+bNcnVVPdcYOHCgdGweNmwYYmJi8Ouvv+LChQsIDw9X+plbuXIlhg0bhi+//BLDhw9HamoqLl++jNOnT+Orr75Sug7JyclYtWoVfHx88P333+Ply5dYvXo1vL29cebMmRxd41Q5Hhw4cAA9evRA9erVERwcjGfPnqFfv34oX758ntt0/vz5GDp0KIyNjaUf7D48F1HlXOhj9pmHDx9KP76MGzcORkZGWLVqVY4eMarEmhdVjvHvK6zjqSLXrl1Dp06dUKtWLQQFBUFPTw+3b99GeHh4vtYJIh+6du0q9PX1xb1796Sy69evC21tbfFhU/b29sLPz096Xbt2bdGxY8dc2x88eHCOdoQQIiYmRgAQpqam4vHjxwqnhYSESGV+fn4CgBg6dKhUlpWVJTp27Ch0dXXFkydPhBBCHDlyRAAQR44cybNNZbEJIQQAMWnSJOl1165dha6urrhz545U9u+//woTExPRvHlzqSwkJEQAEF5eXiIrK0sqHzlypNDW1haJiYkKl5etTp06wtraWjx79kwqu3TpktDS0hJ9+/aVyrLXc/Pmzbm2J4QQmzdvVrhNhHj3ngIQx44dk8oeP34s9PT0xKhRo6SyKVOmCCMjI3Hz5k25+ceOHSu0tbVFXFxcrjG8fv06R9nAgQOFoaGhSE1NFUIIkZGRIRwdHYW9vb148eKFXN33t2Xz5s2FiYmJ3D77YR0/Pz9hb2+fY5mTJk3K8Z4DEFpaWuLatWt5xp2eni5q1qwpWrVqJZXdunVLaGlpiW7duonMzEyFMWVmZory5cuL3r17y02fO3eukMlk4u7duzmW/f4yra2tRc2aNcWbN2+k8t27dwsAYuLEiVJZ9v539uxZpe1lmz17tgAgYmJickwDIHR1dcXt27elskuXLgkAYtGiRVJZ//79hZ2dnXj69Knc/H369BFmZmYK3/f3KZru7e0tKlWqJL3evn17nus0fPhwYWpqKjIyMpTWGTFihAAgjh8/LpW9fPlSODo6CgcHB+m9++233wQAMXfu3BxtvL+PfXiMULQuERERAoBYt26dVJbb59HT01N4enpKr+fPny8AiN9//10qS09PF+7u7sLY2FgkJycLIf53fCtTpox4/vy5VHfnzp0CgPj777+Vbhch/rffuLm5ifT0dKl81qxZAoDYuXOnEOLd9jI3Nxfff/+93PyPHj0SZmZmcuXKjq/Z63/9+nUhhBC7du0Senp64osvvpD7fNSqVUt069ZNeq3qvrZ+/XqhpaUl9z4LIcSyZcsEABEeHi6VqbqfK3L27Nkc3ynZPD09c7zvaWlpwtbWVvTo0UMqy0+simQvZ+PGjVLZjRs3pGPaqVOnpPL9+/fniFfV77Xsz87p06elssePHwszMzO5Y0h+9g9Fx+IPqXKOoYii8wBFn8/g4GAhk8nkvktUPdc4fvy4ACA2bNgg12ZoaGiO8g8/1126dBE1atTI93plZGSItLQ0ubIXL14IGxsb8e2330pl+Tke1KlTR9jZ2cmdmxw4cEAAUPgd+qEaNWrIrVs2Vc+F8rPPKDJ06FAhk8nEhQsXpLJnz54JCwuLHN9vymJVpqDH+E9xPP3w8zJv3jwBQNofC0rlLkmZmZnYv38/unbtiooVK0rlLi4u8Pb2znN+c3NzXLt2Dbdu3VJ1kTn06NFD7lfNvLw/1GP2L03p6ekKL7UWlszMTBw4cABdu3ZFpUqVpHI7Ozt89dVXOHHiBJKTk+XmGTBggFzXFw8PD2RmZuLevXtKlxMfH4+LFy/C398fFhYWUnmtWrXQpk0b7N27txDX6n+qV68u/UIGvPuVuVq1arh7965UtnnzZnh4eKB06dJ4+vSp9Ofl5YXMzEwcO3Ys12W8/0vUy5cv8fTpU3h4eOD169e4ceMGgHeXxmNiYjBixIgcffCzt+WTJ09w7NgxfPvtt3L77Pt1CsLT0xPVq1fPNe4XL14gKSkJHh4eOH/+vFS+Y8cOZGVlYeLEiTkGBciOSUtLC76+vti1axdevnwpTd+wYQOaNGkCR0dHpbGdO3cOjx8/xqBBg+T6zXfs2BHOzs4Ku9QUBi8vL7lf02vVqgVTU1NpvxBCYOvWrejcuTOEEHL7hbe3N5KSkuS2kyLvb9+kpCQ8ffoUnp6euHv3LpKSkgD8736M3bt3K+zSlV3n1atXuXZV2Lt3Lxo2bIhmzZpJZcbGxhgwYABiY2Nx/fp1AMDWrVthaWmJoUOH5mgjt33s/XV5+/Ytnj17hsqVK8Pc3DzP7ZBbzLa2tvDx8ZHKdHR0MGzYMKSkpODo0aNy9Xv37o3SpUtLr7M/1+9/lnMzYMAAuV9Ff/jhB5QqVUo69hw8eBCJiYnw8fGRe7+1tbXRqFGjHF0jFMmOKfuYcfz4cTRo0ABt2rTB8ePHAbzr/nj16lWpbn72tc2bN8PFxQXOzs5y9Vq1agUAOWLMaz8vKGNjY3z99dfSa11dXTRs2DDHcTU/sSpbTp8+faTX1apVg7m5OVxcXOSufGf/P3v5+fle27t3Lxo3boyGDRtK9aysrODr6ysXS2HsH+8rjHOMbO9/Pl+9eoWnT5+iSZMmEELgwoULOernda6xefNmmJmZoU2bNnLr6ubmBmNj41zX1dzcHA8ePMhXd0EA0NbWlu5zysrKwvPnz5GRkYH69esrPMbkdTzIPufw8/ODmZmZVK9NmzYKvw8LIq9zoY/dZ0JDQ+Hu7i53dcXCwiLHvlkY8nuM/5TH0+zvxZ07d6rULVwZlROGJ0+e4M2bN6hSpUqOaar0qQ4KCkJiYiKqVq0KV1dXjB49Wq4PqipyO1H6kJaWltyBDQCqVq0KAJ90iMInT57g9evXCreJi4sLsrKycP/+fbnyD09msz+0ufVtzP4AKVvO06dP8erVq3zHn5cPYwXexft+rLdu3UJoaCisrKzk/rJH4sm+2VaZa9euoVu3bjAzM4OpqSmsrKykL9PsE8Ps/uG5jV+efaAr7DHOle2Hu3fvRuPGjaGvrw8LCwtYWVlh6dKlUszAu7i1tLTyPMD27dsXb968kfqHRkdHIzIyEt98802u8+W2Xzg7O+eahH6MvPaLJ0+eIDExEStWrMixX/Tr1w9A3vtFeHg4vLy8pPt1rKyspD6Y2dvY09MTPXr0QGBgICwtLdGlSxeEhITI9fEeNGgQqlativbt26N8+fL49ttvc/RDv3fvntLPVvZ04N37Wa1atXyPgPTmzRtMnDhRusfH0tISVlZWSExMlNtf8uPevXuoUqVKjkT0w5izFeS4874PvwuMjY1hZ2cnHV+zT9xatWqV4z0/cOBAnu838K4bQJUqVaTk4Pjx4/Dw8EDz5s3x77//4u7duwgPD0dWVpZ0gpOffe3WrVu4du1ajnrZ3xUfxqjK8a8gypcvnyPBVHRczU+sqi7HzMwMFSpUyFEGQO7zq+r3WvZ++KEP5y2M/eN9hXGOkS0uLk76Mc7Y2BhWVlbSvYAffj5VOde4desWkpKSYG1tnWNdU1JScl3Xn376CcbGxmjYsCGqVKmCwYMHq9yVZO3atahVq5Z0T4eVlRX27Nmj8BiT1/Eg+/hR0HNAVeQVw8fuM/fu3UPlypVzlCsq+1j5PcZ/yuNp79690bRpU3z33XewsbFBnz598Ndff+U7eSiysUCbN2+OO3fuYOfOnThw4ABWrVqFefPmYdmyZXkO9ZlNUR/Ij6HsF0BFN9d+Stra2grLxQc3TWkCVWLNyspCmzZtMGbMGIV1sw+miiQmJsLT0xOmpqYICgqCk5MT9PX1cf78efz0008flR0rk9/9QNF+ePz4cXzxxRdo3rw5lixZAjs7O+jo6CAkJCTPm9MUqV69Otzc3PD777+jb9+++P3336Grq4tevXrlu62ikNd+kf2+ff3119LN1h/K7o+uyJ07d9C6dWs4Oztj7ty5qFChAnR1dbF3717MmzdPal8mk2HLli04deoU/v77b+zfvx/ffvst5syZg1OnTsHY2BjW1ta4ePEi9u/fj3379mHfvn0ICQlB3759c9wg/KkMHToUISEhGDFiBNzd3WFmZgaZTIY+ffp8kn1ckU993Mlej/Xr18PW1jbHdFWTrGbNmuHw4cN48+YNIiMjMXHiRNSsWRPm5uY4fvw4oqKiYGxsjLp168otV5V9LSsrC66urpg7d67Ceh+eSH+qbabqcTU/seZnOer4Diqs/SNbYZxjAO+O+23atMHz58/x008/wdnZGUZGRnj48CH8/f0L9PnMysqCtbU1NmzYoHB6bj0nXFxcEB0djd27dyM0NBRbt27FkiVLMHHiRAQGBiqd7/fff4e/vz+6du2K0aNHw9raGtra2ggODpYbkCGbJpyHqPo9Ulj7zKdU2Mf4j1l3AwMDHDt2DEeOHMGePXsQGhqKP//8E61atcKBAweUbvccy1A12OwRZhRd7ouOjlapDQsLC/Tr1w/9+vVDSkoKmjdvjsmTJ0sf5sJ8Ml1WVhbu3r0rd3J68+ZNAJBuDMnOXj98GJWiX2FVjc3KygqGhoYKt8mNGzegpaWl0oE9L9kPXlO2HEtLS6UP4cpNYbwHTk5OSElJyXNsf0XCwsLw7NkzbNu2Dc2bN5fKY2JiciwDAK5evap0Odm/+ly9ejXXZZYuXVrhA8ny82v81q1boa+vj/3798vdQBUSEpIj7qysLFy/fj3P8dj79u2LgIAAxMfHY+PGjejYsaPcJWNF3t8vsrsqZIuOjlb6wL68fOx+YWVlBRMTE2RmZhZov/j777+RlpaGXbt2yf0KpewybOPGjdG4cWNMmzYNGzduhK+vLzZt2iQda3R1ddG5c2d07twZWVlZGDRoEJYvX44JEyagcuXKsLe3V/rZAv63nZ2cnHD69Gm8ffs2X8+C2bJlC/z8/DBnzhypLDU1Ncd+mJ/tbm9vj8uXLyMrK0vuKsOHMReWW7duoWXLltLrlJQUxMfHo0OHDgD+9xm1trbO8z3PbT09PDwQEhKCTZs2ITMzE02aNIGWlhaaNWsmJQxNmjSRvvTys685OTnh0qVLaN269Sd7kjBQeMfVoohVkfx8r9nb26t0npCf/UNVeZ1jqOLKlSu4efMm1q5di759+0rlyrowqnKu4eTkhEOHDqFp06YF+uHTyMgIvXv3Ru/evZGeno7u3btj2rRpGDdunNIhm7ds2YJKlSph27ZtcvtLXoNmKJN9/PiYc8CP3W8/dp+xt7fH7du3c5QrKvvYWFU9xmcrzOOpIlpaWmjdujVat26NuXPnYvr06fj5559x5MgRldtTuUuStrY2vL29sWPHDsTFxUnlUVFR2L9/f57zP3v2TO61sbExKleuLNdVIPsEt7CeJvvrr79K/xdC4Ndff4WOjg5at24N4N3Oo62tnaNP/ZIlS3K0pWps2traaNu2LXbu3CnX9SkhIQEbN25Es2bNYGpqWsA1+h87OzvUqVMHa9eulYvp6tWrOHDggLST5VdhvAe9evVCRESEwv0iMTERGRkZSufN/tJ//1eN9PT0HO9JvXr14OjoiPnz5+eINXteKysrNG/eHL/99pvcPvth+05OTkhKSpK7fB0fH6/ycHHZcctkMrmrErGxsTlGLunatSu0tLQQFBSU41eGD3/J8fHxgUwmw/Dhw3H37l25Ps7K1K9fH9bW1li2bJncZ2vfvn2IioqSG60pPz52v9DW1kaPHj2wdetWhQlcXsMIK9ovkpKSciRkL168yLEdsxOz7O3x4bFIS0tL+sU5u06HDh1w5swZRERESPVevXqFFStWwMHBQepS1qNHDzx9+lTuWJMtt1/mtLW1c0xftGhRjqta+dnuHTp0wKNHj+RGFMnIyMCiRYtgbGwsdakoLCtWrJC7T2Tp0qXIyMiQRvXx9vaGqakppk+frvB+kvff89zWM7ur0cyZM1GrVi2pu4yHhwcOHz6Mc+fOyd1XlZ99rVevXnj48CFWrlyZo96bN28KrVtnYR1XiyJWRfLzvdahQwecOnUKZ86ckeo9efIkx6/r+dk/VKHKOYYqFB1rhBA5hl1+X17nGr169UJmZiamTJmSY96MjIxc94sP10tXVxfVq1eHEELpfVrK1uP06dNyx7T8eP+c4/0uNQcPHpTu6cqLkZHRR30GPnaf8fb2RkREhNwT158/f67wys/HxqrqMT5bYR5PP/T8+fMcZR9+L6oiX9dvAgMDERoaCg8PDwwaNEj6MqpRo0aefQWrV6+OFi1awM3NDRYWFjh37hy2bNkid7OQm5sbAGDYsGHw9vaGtra23A1a+aGvr4/Q0FD4+fmhUaNG2LdvH/bs2YP//ve/0uU/MzMz9OzZE4sWLYJMJoOTkxN2796tsC9YfmKbOnWqNObtoEGDUKpUKSxfvhxpaWkKx9YuqNmzZ6N9+/Zwd3dH//79pWFVzczMCvzE1Dp16kBbWxszZ85EUlIS9PT0pLHvVTV69Gjs2rULnTp1koZcffXqFa5cuYItW7YgNjZWGprzQ02aNEHp0qXh5+eHYcOGQSaTYf369Tk+eFpaWli6dCk6d+6MOnXqoF+/frCzs8ONGzdw7do1KVlZuHAhmjVrhnr16mHAgAFwdHREbGws9uzZIx00+vTpg59++gndunXDsGHDpKHKqlatqvINqB07dsTcuXPRrl07fPXVV3j8+DEWL16MypUry302KleujJ9//hlTpkyBh4cHunfvDj09PZw9exZly5ZFcHCwVNfKygrt2rXD5s2bYW5urtLJvo6ODmbOnIl+/frB09MTPj4+0rCqDg4OGDlypErr86Hs/f/nn39Gnz59oKOjg86dO+frKtaMGTNw5MgRNGrUCN9//z2qV6+O58+f4/z58zh06JDCg1q2tm3bSlcFBg4ciJSUFKxcuRLW1taIj4+X6q1duxZLlixBt27d4OTkhJcvX2LlypUwNTWVkujvvvsOz58/R6tWrVC+fHncu3cPixYtQp06daT+/mPHjsUff/yB9u3bY9iwYbCwsMDatWsRExODrVu3Sr/g9+3bF+vWrUNAQADOnDkDDw8PvHr1CocOHcKgQYPQpUsXhevTqVMnrF+/HmZmZqhevToiIiJw6NChHENz5ufzOGDAACxfvhz+/v6IjIyEg4MDtmzZgvDwcMyfP18aHrOwpKeno3Xr1ujVqxeio6OxZMkSNGvWDF988QUAwNTUFEuXLsU333yDevXqoU+fPrCyskJcXBz27NmDpk2bSidauR1fK1euDFtbW0RHR8vdXN68eXP89NNPACCXMACq72vffPMN/vrrL/znP//BkSNH0LRpU2RmZuLGjRv466+/sH//ftSvX/+jt5WTkxPMzc2xbNkymJiYwMjICI0aNcrXfXlFFasyqn6vjRkzBuvXr0e7du0wfPhwaVjV7Ctg2fKzf6hClXMMVTg7O8PJyQk//vgjHj58CFNTU2zdulXpfSqqnGt4enpi4MCBCA4OxsWLF9G2bVvo6Ojg1q1b2Lx5MxYsWIAvv/xSYftt27aFra0tmjZtChsbG0RFReHXX39Fx44dc/1Md+rUCdu2bUO3bt3QsWNHxMTEYNmyZahevTpSUlLytU2yBQcHo2PHjmjWrBm+/fZbPH/+XDoHVKVNNzc3LF26FFOnTkXlypVhbW2d40p4bj52nxkzZgx+//13tGnTBkOHDpWGVa1YsSKeP38ud1XhY2NV9RifrTCPpx8KCgrCsWPH0LFjR9jb2+Px48dYsmQJypcvLzewR57yO6zS0aNHhZubm9DV1RWVKlUSy5YtUzjk2YfDqk6dOlU0bNhQmJubCwMDA+Hs7CymTZsmN4xURkaGGDp0qLCyshIymUxqM3vYr9mzZ+eIR9mwqkZGRuLOnTuibdu2wtDQUNjY2IhJkyblGMryyZMnokePHsLQ0FCULl1aDBw4UFy9ejVHm8piEyLncFpCCHH+/Hnh7e0tjI2NhaGhoWjZsqU4efKkXB1lw1oqG+5VkUOHDommTZsKAwMDYWpqKjp37iwNQfhhe6oMqyqEECtXrhSVKlWShsvNjsPe3l7hsHUfDgMnxLshwMaNGycqV64sdHV1haWlpWjSpIn45Zdf5N5zRcLDw0Xjxo2FgYGBKFu2rBgzZow0zN+H2+TEiROiTZs2wsTERBgZGYlatWrlGOLw6tWrolu3bsLc3Fzo6+uLatWqiQkTJsjVOXDggKhZs6bQ1dUV1apVE7///rvSYVUHDx6sMO7Vq1eLKlWqCD09PeHs7CxCQkKUDgf422+/ibp16wo9PT1RunRp4enpKQ4ePJij3l9//SUAiAEDBuS6zT70559/Su1bWFgIX19f8eDBA7k6+RlWVYh3w+WWK1dOaGlpyQ1Bp2ybfHgMEEKIhIQEMXjwYFGhQgWho6MjbG1tRevWrcWKFSvyXP6uXbtErVq1hL6+vnBwcBAzZ86UhjXNjuX8+fPCx8dHVKxYUejp6Qlra2vRqVMnce7cOamdLVu2iLZt2wpra2uhq6srKlasKAYOHCji4+Pllnfnzh3x5ZdfSvtNw4YNxe7du3PE9fr1a/Hzzz8LR0dHaZ2+/PJLueEnPzxGvHjxQvTr109YWloKY2Nj4e3tLW7cuKFwmyn7PCr63CUkJEjt6urqCldX1xxDeeZ2PFV0LPtQ9n5z9OhRMWDAAFG6dGlhbGwsfH195YZ4znbkyBHh7e0tzMzMhL6+vnBychL+/v5y70lux1chhOjZs6cAIP7880+pLD09XRgaGgpdXV25IYTf3xaq7Gvp6eli5syZokaNGtLn0c3NTQQGBoqkpCS5baPqfq7Izp07RfXq1UWpUqXkvl88PT0VDpupaLhnVWNVRNlylB3XFa2vKt9rQghx+fJl4enpKfT19UW5cuXElClTxOrVq3MMXSmEavuHKsOqqnKOoYii79vr168LLy8vYWxsLCwtLcX3338vDaFb0HMNIYRYsWKFcHNzEwYGBsLExES4urqKMWPGiH///Veq8+Hnevny5aJ58+aiTJkyQk9PTzg5OYnRo0fn+X5nZWWJ6dOnC3t7e6Gnpyfq1q0rdu/enWO/yu/xYOvWrcLFxUXo6emJ6tWri23btikdmvxDjx49Eh07dhQmJiYCgLSe+T0XUmWfUebChQvCw8ND6OnpifLly4vg4GCxcOFCAUA8evQoz1iVKegx/lMcTz/8vBw+fFh06dJFlC1bVujq6oqyZcsKHx+fHEPf50X2/ytKRBpo586d6Nq1K44dO5bjV1QiIiL6OCNGjMDy5cuRkpKi8g3AJRETBiIN1qlTJ0RFReH27dtFfqMjERFRcfLmzRu5G8+fPXuGqlWrol69erk+m4eKcFhVIlLdpk2bcPnyZezZswcLFixgskBERPSR3N3d0aJFC7i4uCAhIQGrV69GcnIyJkyYoO7QNB6vMBBpIJlMBmNjY/Tu3RvLli3TqPGliYiIPkf//e9/sWXLFjx48AAymQz16tXDpEmTCm1o3+KMCQMRERERESml8nMYiIiIiIio5GHCQERERERESrFjNOVbVlYW/v33X5iYmPBmXCIios+EEAIvX75E2bJlpQdQEqmCCQPl27///osKFSqoOwwiIiIqgPv376N8+fLqDoM+I0wYKN+yH0d///59mJqaqjkaIiIiUkVycjIqVKggfY8TqYoJA+VbdjckU1NTJgxERESfGXYnpvxiBzYiIiIiIlKKCQMRERERESnFhIGIiIiIiJTiPQxEVGxlZmbi7du36g6DiKhI6OjoQFtbW91hUDHEhIGIih0hBB49eoTExER1h0JEVKTMzc1ha2vLG5upUDFhIKJiJztZsLa2hqGhIb84iajYE0Lg9evXePz4MQDAzs5OzRFRccKEgYiKlczMTClZKFOmjLrDISIqMgYGBgCAx48fw9ramt2TqNDwpmciKlay71kwNDRUcyREREUv+9jH+7eoMDFhIKJiid2QiKgk4rGPPgUmDEREREREpBQTBiIiDdGiRQuMGDFC3WFotDVr1sDc3Fxj2snL69ev0aNHD5iamkImk312I3dNnjwZderUUXcYShXkfZTJZNixY0ehLD+/n9mwsLBC2Q8cHBwwf/78j2qDKD940zMRlRgOY/cU6fJiZ3Qs0uXlJSwsDC1btsSLFy+K5GT5U+jduzc6dOiQr3kcHBwwYsQIuRO7grRTEGvXrsXx48dx8uRJWFpawszM7JMvk/JHJpMhJiYGDg4O6g6FSGMxYSAios+GgYGBNBKMJrSTlzt37sDFxQU1a9YscBuZmZmQyWTQ0mKnACJSDx59iIg0SEZGBoYMGQIzMzNYWlpiwoQJEEJI09PS0vDjjz+iXLlyMDIyQqNGjRAWFiZNv3fvHjp37ozSpUvDyMgINWrUwN69exEbG4uWLVsCAEqXLg2ZTAZ/f3+FMTx79gw+Pj4oV64cDA0N4erqij/++EOuzpYtW+Dq6goDAwOUKVMGXl5eePXqFYB3VzIaNmwIIyMjmJubo2nTprh3754079KlS+Hk5ARdXV1Uq1YN69evl2s7MTERAwcOhI2NDfT19VGzZk3s3r0bQM4uKHfu3EGXLl1gY2MDY2NjNGjQAIcOHZKmt2jRAvfu3cPIkSMhk8mkG0IVdWXJKy6ZTIZVq1ahW7duMDQ0RJUqVbBr1y6F2zB72XPmzMGxY8cgk8nQokULAMCLFy/Qt29flC5dGoaGhmjfvj1u3bolzZcd265du1C9enXo6ekhLi5O4TKuXr2K9u3bw9jYGDY2Nvjmm2/w9OlTaXpoaCiaNWsGc3NzlClTBp06dcKdO3fk2njw4AF8fHxgYWEBIyMj1K9fH6dPn5ars379ejg4OMDMzAx9+vTBy5cvla53dvy7d+9GtWrVYGhoiC+//BKvX7/G2rVr4eDggNKlS2PYsGHIzMyU5stru2S3XbFiRRgaGqJbt2549uxZjuXv3LkT9erVg76+PipVqoTAwEBkZGQojfd9L168gK+vL6ysrGBgYIAqVaogJCREpXmBd9upfv36MDExga2tLb766ivpuQjvCw8PR61ataCvr4/GjRvj6tWrctNPnDgBDw8PGBgYoEKFChg2bJj0+SJSByYMREQaZO3atShVqhTOnDmDBQsWYO7cuVi1apU0fciQIYiIiMCmTZtw+fJl9OzZE+3atZNOrAYPHoy0tDQcO3YMV65cwcyZM2FsbIwKFSpg69atAIDo6GjEx8djwYIFCmNITU2Fm5sb9uzZg6tXr2LAgAH45ptvcObMGQBAfHw8fHx88O233yIqKgphYWHo3r07hBDIyMhA165d4enpicuXLyMiIgIDBgyQTtS3b9+O4cOHY9SoUbh69SoGDhyIfv364ciRIwCArKwstG/fHuHh4fj9999x/fp1zJgxQ+l48ikpKejQoQMOHz6MCxcuoF27dujcubN0gr1t2zaUL18eQUFBiI+PR3x8vMJ28oorW2BgIHr16oXLly+jQ4cO8PX1xfPnzxW2uW3bNnz//fdwd3dHfHw8tm3bBgDw9/fHuXPnsGvXLkREREAIgQ4dOsgNg/n69WvMnDkTq1atwrVr12BtbZ2j/cTERLRq1Qp169bFuXPnEBoaioSEBPTq1Uuq8+rVKwQEBODcuXM4fPgwtLS00K1bN2RlZUnbz9PTEw8fPsSuXbtw6dIljBkzRpoOvEvKduzYgd27d2P37t04evQoZsyYoXCd349/4cKF2LRpE0JDQxEWFoZu3bph79692Lt3L9avX4/ly5djy5Yt0jx5bZfTp0+jf//+GDJkCC5evIiWLVti6tSpcss9fvw4+vbti+HDh+P69etYvnw51qxZg2nTpuUab7YJEybg+vXr2LdvH6KiorB06VJYWlqqNC/wbijTKVOm4NKlS9ixYwdiY2MVJuajR4/GnDlzcPbsWVhZWaFz587Set65cwft2rVDjx49cPnyZfz55584ceIEhgwZonIcRIVOEOVTUlKSACCSkpLUHQpRDm/evBHXr18Xb968yTHN/qfdRfqXX56ensLFxUVkZWVJZT/99JNwcXERQghx7949oa2tLR4+fCg3X+vWrcW4ceOEEEK4urqKyZMnK2z/yJEjAoB48eJFvmPr2LGjGDVqlBBCiMjISAFAxMbG5qj37NkzAUCEhYUpbKdJkybi+++/lyvr2bOn6NChgxBCiP379wstLS0RHR2tcP6QkBBhZmaWa6w1atQQixYtkl7b29uLefPm5dpOXnEJIQQAMX78eOl1SkqKACD27dunNJbhw4cLT09P6fXNmzcFABEeHi6VPX36VBgYGIi//vpLig2AuHjxYq7rOWXKFNG2bVu5svv37wsASrffkydPBABx5coVIYQQy5cvFyYmJuLZs2cK60+aNEkYGhqK5ORkqWz06NGiUaNGSuPKjv/27dtS2cCBA4WhoaF4+fKlVObt7S0GDhwohFBtu/j4+Mi9H0II0bt3b7n3sXXr1mL69OlyddavXy/s7Oyk1wDE9u3bFcbeuXNn0a9fP6Xr9iFPT08xfPhwpdPPnj0rAEjrnf0ZXDd7tnh95Yp4feWKeHDihDDQ1xfr/7/Mr3t38e2XX8q1c/z4caGlpSUd1xTt09lyOwby+5sKilcYiIg0SOPGjeXGUXd3d8etW7eQmZmJK1euIDMzE1WrVoWxsbH0d/ToUambybBhwzB16lQ0bdoUkyZNwuXLl/MdQ2ZmJqZMmQJXV1dYWFjA2NgY+/fvl361r127Nlq3bg1XV1f07NkTK1euxIsXLwAAFhYW8Pf3h7e3Nzp37owFCxbI/aofFRWFpk2byi2vadOmiIqKAgBcvHgR5cuXR9WqVVWKNSUlBT/++CNcXFxgbm4OY2NjREVFKe3Co0xecWWrVauW9H8jIyOYmpoq7HKS23JKlSqFRo0aSWVlypRBtWrV5Jalq6srtyxFLl26hCNHjsjtC87OzgAg7Q+3bt2Cj48PKlWqBFNTU+nG3uztc/HiRdStWxcWFhZKl+Pg4AATExPptZ2dXZ7rbGhoCCcnJ+m1jY0NHBwcYGxsLFeW3Y4q2yUqKkpuOvDu8/HhNgkKCpLbJt9//z3i4+Px+vXrXGMGgB9++AGbNm1CnTp1MGbMGJw8eTLPed4XGRmJzp07o2LFijAxMYGnpycA5NgfG9WuLf3fwswMVRwccCMmBgBwJToav+/cKbcO3t7eyMrKQsz/1yEqarzpmYjoM5GSkgJtbW1ERkbm6KKTfSL23XffwdvbG3v27MGBAwcQHByMOXPmYOjQoSovZ/bs2ViwYAHmz58PV1dXGBkZYcSIEUhPTwcAaGtr4+DBgzh58iQOHDiARYsW4eeff8bp06fh6OiIkJAQDBs2DKGhofjzzz8xfvx4HDx4EI0bN85z2fm9EfnHH3/EwYMH8csvv6By5cowMDDAl19+KcVa2HR0dORey2Qyue47hcXAwCDPB3ClpKSgc+fOmDlzZo5pdnZ2AIDOnTvD3t4eK1euRNmyZZGVlYWaNWtK20eV7V2QdVY0T1Fsu5SUFAQGBqJ79+45punr6+c5f/v27XHv3j3s3bsXBw8eROvWrTF48GD88ssvec776tUreHt7w9vbGxs2bICVlRXi4uLg7e2dr/3x1evX6N+zJwICA3NMq1ixosrtEBUmXmEgItIgH95seurUKVSpUgXa2tqoW7cuMjMz8fjxY1SuXFnuz9bWVpqnQoUK+M9//oNt27Zh1KhRWLlyJYB3v1oDkLvRVJHw8HB06dIFX3/9NWrXro1KlSrh5s2bcnVkMhmaNm2KwMBAXLhwAbq6uti+fbs0vW7duhg3bhxOnjyJmjVrYuPGjQAAFxcXhIeH51he9erVAbz7Bf/Bgwc5lpdbrP7+/ujWrRtcXV1ha2uL2NhYuTq6urp5rnNecRUWFxcXZGRkyL3Pz549Q3R0dL6XVa9ePVy7dg0ODg459gcjIyOp3fHjx6N169ZwcXGRrgRlq1WrFi5evKj0Poyiosp2cXFxUfj5eF+9evUQHR2dY3tUrlxZ5VGmrKys4Ofnh99//x3z58/HihUrVJrvxo0bePbsGWbMmAEPDw84OzsrvRJz5tIl6f8vkpJw+949ODs6AgDquLjgxp07Ctch+zNMVNSYMBARaZC4uDgEBAQgOjoaf/zxBxYtWoThw4cDAKpWrQpfX1/07dsX27ZtQ0xMDM6cOYPg4GDs2fPuGRMjRozA/v37ERMTg/Pnz+PIkSNwcXEBANjb20Mmk2H37t148uQJUlJSFMZQpUoV6QpCVFQUBg4ciISEBGn66dOnMX36dJw7dw5xcXHYtm0bnjx5AhcXF8TExGDcuHGIiIjAvXv3cODAAdy6dUuKYfTo0VizZg2WLl2KW7duYe7cudi2bRt+/PFHAICnpyeaN2+OHj164ODBg4iJicG+ffsQGhqqNNZt27bh4sWLuHTpEr766qscv1o7ODjg2LFjePjwodwIQu/LK67CUqVKFXTp0gXff/89Tpw4gUuXLuHrr79GuXLl0KVLl3y1NXjwYDx//hw+Pj44e/Ys7ty5g/3796Nfv37IzMxE6dKlUaZMGaxYsQK3b9/GP//8g4CAALk2fHx8YGtri65duyI8PBx3797F1q1bERERUZirnSdVtkv2VatffvkFt27dwq+//ppjv5g4cSLWrVuHwMBAXLt2DVFRUdi0aRPGjx+vUhwTJ07Ezp07cfv2bVy7dg27d++W9t28VKxYEbq6uli0aBHu3r2LXbt2YcqUKQrrBi9fjiOnTuHarVsYMH48ypibo3Pr1gCAgG+/xalLl6Sbu2/duoWdO3fypmdSKyYMREQapG/fvnjz5g0aNmyIwYMHY/jw4RgwYIA0PSQkBH379sWoUaNQrVo1dO3aFWfPnpW6KmRmZmLw4MFwcXFBu3btULVqVSxZsgQAUK5cOQQGBmLs2LGwsbFRegIyfvx41KtXD97e3mjRooV0QpnN1NQUx44dQ4cOHVC1alWMHz8ec+bMQfv27WFoaIgbN26gR48eqFq1KgYMGIDBgwdj4MCBAICuXbtiwYIF+OWXX1CjRg0sX74cISEh0pCjALB161Y0aNAAPj4+qF69OsaMGaP0CsHcuXNRunRpNGnSBJ07d4a3tzfq1asnVycoKAixsbFwcnKClZWVwnZUiauwhISEwM3NDZ06dYK7uzuEENi7d2+OLjt5KVu2LMLDw5GZmYm2bdvC1dUVI0aMgLm5ObS0tKClpYVNmzYhMjISNWvWxMiRIzF79my5NnR1dXHgwAFYW1ujQ4cOcHV1zXVUqk8pr+3SuHFjrFy5EgsWLEDt2rVx4MCBHImAt7c3du/ejQMHDqBBgwZo3Lgx5s2bB3t7e5Vi0NXVxbhx41CrVi00b94c2tra2LRpk0rzWllZYc2aNdi8eTOqV6+OGTNmKO3KFDRiBEbPnImmvXsj4dkzbFm0CLr/v56u1aphf0gIbt68CQ8PD9StWxcTJ05E2bJlVYqD6FOQCfHeAN9EKkhOToaZmRmSkpJgamqq7nCI5KSmpiImJgaOjo4q9VkmIipqbz547oIiBgV82F9ux0B+f1NB8QoDEREREREpxYShmMnMzMSECRPg6OgIAwMDODk5YcqUKXJPihVCYOLEibCzs4OBgQG8vLxyPE2TiIiIiAhgwlDszJw5E0uXLsWvv/6KqKgozJw5E7NmzcKiRYukOrNmzcLChQuxbNkynD59GkZGRvD29kZqaqoaIyciIiIiTcTnMBQzJ0+eRJcuXdCxY0cA70YH+eOPP3DmzBkA764uzJ8/H+PHj5dGnli3bh1sbGywY8cO9OnTR22xExEREZHm4RWGYqZJkyY4fPiwNIb5pUuXcOLECbRv3x4AEBMTg0ePHsHLy0uax8zMDI0aNVI6jF5aWhqSk5Pl/oiIiIioZOAVhmJm7NixSE5OhrOzM7S1tZGZmYlp06bB19cXAPDo0SMAgI2Njdx8NjY20rQPBQcHI1DBEyeJiIiIqPjjFYZi5q+//sKGDRuwceNGnD9/HmvXrsUvv/yCtWvXFrjNcePGISkpSfq7f/9+IUZMRERERJqMVxiKmdGjR2Ps2LHSvQiurq64d+8egoOD4efnB1tbWwBAQkIC7OzspPkSEhJQp04dhW3q6elBT0/vk8dORERERJqHVxiKmdevX0NLS/5t1dbWRlZWFgDA0dERtra2OHz4sDQ9OTkZp0+fhru7e5HGSkRERESajwlDMdO5c2dMmzYNe/bsQWxsLLZv3465c+eiW7duAACZTIYRI0Zg6tSp2LVrF65cuYK+ffuibNmy6Nq1q3qDJyrhhBAYMGAALCwsIJPJcPHixTzniY2NVbmupmrRogVGjBiRa501a9bA3Ny8SOIhIiJ57JJUzCxatAgTJkzAoEGD8PjxY5QtWxYDBw7ExIkTpTpjxozBq1evMGDAACQmJqJZs2YIDQ3N8Qh5omJnslkRLy8pX9VDQ0OxZs0ahIWFoVKlSrC0tPxEgWmWbdu2QUdHR3rt4OCAESNGyCURvXv3RocOHdQQHRERMWEoZkxMTDB//nzMnz9faR2ZTIagoCAEBQUVXWBElKc7d+7Azs4OTZo0UXcoRcrCwiLPOgYGBjAwMCiCaIiI6EPskkREpAH8/f0xdOhQxMXFQSaTwcHBAcC7qw7NmjWDubk5ypQpg06dOuHOnTtK23nx4gV8fX1hZWUFAwMDVKlSBSEhIdL0+/fvo1evXjA3N4eFhQW6dOmC2NhYpe2FhYVBJpNhz549qFWrFvT19dG4cWNcvXpVrt7WrVtRo0YN6OnpwcHBAXPmzJGbvmTJElSpUgX6+vqwsbHBl19+KU17v0tSixYtcO/ePYwcORIymQwymQyAfJekmzdvQiaT4caNG3LLmDdvHpycnKTXV69eRfv27WFsbAwbGxt88803ePr0qdJ1JSIixZgwEBFpgAULFiAoKAjly5dHfHw8zp49CwB49eoVAgICcO7cORw+fBhaWlro1q2bNJDBhyZMmIDr169j3759iIqKwtKlS6WuTW/fvoW3tzdMTExw/PhxhIeHw9jYGO3atUN6enqu8Y0ePRpz5szB2bNnYWVlhc6dO+Pt27cAgMjISPTq1Qt9+vTBlStXMHnyZEyYMAFr1qwBAJw7dw7Dhg1DUFAQoqOjERoaiubNmytczrZt21C+fHkEBQUhPj4e8fHxOepUrVoV9evXx4YNG+TKN2zYgK+++goAkJiYiFatWqFu3bo4d+4cQkNDkZCQgF69euW6nkRElBO7JBERaQAzMzOYmJhAW1tbGv4YAHr06CFX77fffoOVlRWuX7+OmjVr5mgnLi4OdevWRf369QFAulIBAH/++SeysrKwatUq6Zf7kJAQmJubIywsDG3btlUa36RJk9CmTRsAwNq1a1G+fHls374dvXr1wty5c9G6dWtMmDABwLsT+uvXr2P27Nnw9/dHXFwcjIyM0KlTJ5iYmMDe3h5169ZVuBwLCwtoa2vDxMREbjt8yNfXF7/++iumTJkC4N1Vh8jISPz+++8AgF9//RV169bF9OnT5bZdhQoVcPPmTVStWlVp20REJI9XGIiINNitW7fg4+ODSpUqwdTUVEoA4uLiFNb/4YcfsGnTJtSpUwdjxozByZMnpWmXLl3C7du3YWJiAmNjYxgbG8PCwgKpqam5dnMCIDfssoWFBapVq4aoqCgAQFRUFJo2bSpXv2nTprh16xYyMzPRpk0b2Nvbo1KlSvjmm2+wYcMGvH79uiCbQ9KnTx/Exsbi1KlTAN5dXahXrx6cnZ2ldT1y5Ii0nsbGxtK0vNaViIjk8QoDEZEG69y5M+zt7bFy5UqULVsWWVlZqFmzptIuRO3bt8e9e/ewd+9eHDx4EK1bt8bgwYPxyy+/ICUlBW5ubjm68gCAlZXVJ1sHExMTnD9/HmFhYThw4AAmTpyIyZMn4+zZswUeKtXW1hatWrXCxo0b0bhxY2zcuBE//PCDND0lJQWdO3fGzJkzc8z7/kMriYgob7zCQESkoZ49e4bo6GiMHz8erVu3houLC168eJHnfFZWVvDz88Pvv/+O+fPnY8WKFQCAevXq4datW7C2tkblypXl/szMch9yNvuXfODdjdU3b96Ei4sLAMDFxQXh4eFy9cPDw1G1alVoa2sDAEqVKgUvLy/MmjULly9fRmxsLP755x+Fy9LV1UVmZmae6+nr64s///wTERERuHv3rvSE++x1vXbtGhwcHHKsq5GRUZ5tExHR/zBhICLSUKVLl0aZMmWwYsUK3L59G//88w8CAgJynWfixInYuXMnbt++jWvXrmH37t3Sib2vry8sLS3RpUsXHD9+HDExMQgLC8OwYcPw4MGDXNsNCgrC4cOHcfXqVfj7+8PS0lJ62OOoUaNw+PBhTJkyBTdv3sTatWvx66+/4scffwQA7N69GwsXLsTFixdx7949rFu3DllZWahWrZrCZTk4OODYsWN4+PBhrqMade/eHS9fvsQPP/yAli1bomzZstK0wYMH4/nz5/Dx8cHZs2dx584d7N+/H/369VMpGSEiov9hwkBEpKG0tLSwadMmREZGombNmhg5ciRmz56d6zy6uroYN24catWqhebNm0NbWxubNm0CABgaGuLYsWOoWLEiunfvDhcXF/Tv3x+pqakwNTXNtd0ZM2Zg+PDhcHNzw6NHj/D3339DV1cXwLtf8//66y9s2rQJNWvWxMSJExEUFAR/f38AgLm5ObZt24ZWrVrBxcUFy5Ytwx9//IEaNWooXFZQUBBiY2Ph5OSUa1cpExMTdO7cGZcuXYKvr6/ctLJlyyI8PByZmZlo27YtXF1dMWLECJibm0NLi199RET5IRNCCHUHQZ+X5ORkmJmZISkpKc+TDKKilpqaipiYGDg6OvLp5YUgLCwMLVu2xIsXLwp8vwERyXvzwXNMFDFQMAqaKnI7BvL7mwqKP7MQEREREZFSTBiIiIiIiEgpDqtKRERKtWjRAuy5SkRUsvEKAxERERERKcWEgYiIiIiIlGLCQERERERESjFhICIiIiIipZgwEBERERGRUkwYiIiIiIhIKSYMRERUZNasWaPSE6NlMhl27NjxyeMhIqK88TkMRFRiuK51LdLlXfG7kq/6LVq0QJ06dTB//vxPE5AG6N27Nzp06CC9njx5Mnbs2IGLFy/K1YuPj0fp0qWLODoiIlKECQMR0WdECIHMzEyUKvV5Hr4NDAxgYGCQZz1bW9siiIaIiFTBLklERBrA398fR48exYIFCyCTySCTyRAbG4uwsDDIZDLs27cPbm5u0NPTw4kTJ+Dv74+uXbvKtTFixAi0aNFCep2VlYXg4GA4OjrCwMAAtWvXxpYtW3KNw8HBAVOmTIGPjw+MjIxQrlw5LF68WK5OXFwcunTpAmNjY5iamqJXr15ISEiQpl+6dAktW7aEiYkJTE1N4ebmhnPnzgGQ75K0Zs0aBAYG4tKlS9I6r1mzBoB8l6QmTZrgp59+kovhyZMn0NHRwbFjxwAAaWlp+PHHH1GuXDkYGRmhUaNGCAsLU2HLExFRXpgwEBFpgAULFsDd3R3ff/894uPjER8fjwoVKkjTx44dixkzZiAqKgq1atVSqc3g4GCsW7cOy5Ytw7Vr1zBy5Eh8/fXXOHr0aK7zzZ49G7Vr18aFCxcwduxYDB8+HAcPHgTwLgnp0qULnj9/jqNHj+LgwYO4e/cuevfuLc3v6+uL8uXL4+zZs4iMjMTYsWOho6OTYzm9e/fGqFGjUKNGDWmd32/n/fY2bdoEIYRU9ueff6Js2bLw8PAAAAwZMgQRERHYtGkTLl++jJ49e6Jdu3a4deuWStuKiIiU+zyvaRMRFTNmZmbQ1dWFoaGhwu44QUFBaNOmjcrtpaWlYfr06Th06BDc3d0BAJUqVcKJEyewfPlyeHp6Kp23adOmGDt2LACgatWqCA8Px7x589CmTRscPnwYV65cQUxMjJTQrFu3DjVq1MDZs2fRoEEDxMXFYfTo0XB2dgYAVKlSReFyDAwMYGxsjFKlSuXaBalXr14YMWIETpw4ISUIGzduhI+PD2QyGeLi4hASEoK4uDiULVsWAPDjjz8iNDQUISEhmD59usrbjYiIcuIVBiKiz0D9+vXzVf/27dt4/fo12rRpA2NjY+lv3bp1uHPnTq7zZicY77+OiooCAERFRaFChQpyVz+qV68Oc3NzqU5AQAC+++47eHl5YcaMGXkuLy9WVlZo27YtNmzYAACIiYlBREQEfH19AQBXrlxBZmYmqlatKreuR48e/ehlExERrzAQEX0WjIyM5F5raWnJddEBgLdv30r/T0lJAQDs2bMH5cqVk6unp6f3iaJ8Z/Lkyfjqq6+wZ88e7Nu3D5MmTcKmTZvQrVu3Arfp6+uLYcOGYdGiRdi4cSNcXV3h6vpu1KuUlBRoa2sjMjIS2tracvMZGxt/1LoQERETBiIijaGrq4vMzEyV6lpZWeHq1atyZRcvXpTuFahevTr09PQQFxeXa/cjRU6dOpXjtYuLCwDAxcUF9+/fx/3796WrDNevX0diYiKqV68uzVO1alVUrVoVI0eOhI+PD0JCQhQmDKquc5cuXTBgwACEhoZi48aN6Nu3rzStbt26yMzMxOPHj6UuS0REVHjYJYmISEM4ODjg9OnTiI2NxdOnT5GVlaW0bqtWrXDu3DmsW7cOt27dwqRJk+QSCBMTE/z4448YOXIk1q5dizt37uD8+fNYtGgR1q5dm2sc4eHhmDVrFm7evInFixdj8+bNGD58OADAy8sLrq6u8PX1xfnz53HmzBn07dsXnp6eqF+/Pt68eYMhQ4YgLCwM9+7dQ3h4OM6ePSslHIrWOSYmBhcvXsTTp0+RlpamsJ6RkRG6du2KCRMmICoqCj4+PtK0qlWrwtfXF3379sW2bdsQExODM2fOIDg4GHv27Ml1XYmIKG9MGIiINMSPP/4IbW1tVK9eHVZWVoiLi1Na19vbGxMmTMCYMWPQoEEDvHz5Uu5XdwCYMmUKJkyYgODgYLi4uKBdu3bYs2cPHB0dc41j1KhROHfuHOrWrYupU6di7ty58Pb2BvBuuNOdO3eidOnSaN68Oby8vFCpUiX8+eefAABtbW08e/YMffv2RdWqVdGrVy+0b98egYGBCpfVo0cPtGvXDi1btoSVlRX++OMPpXH5+vri0qVL8PDwQMWKFeWmhYSEoG/fvhg1ahSqVauGrl274uzZsznqERFR/snEh51gifKQnJwMMzMzJCUlwdTUVN3hEMlJTU1FTEwMHB0doa+vr+5wPjsODg4YMWIERowYoe5QiIqtNx90J1TEoGbNArWd2zGQ399UULzCQERERERESjFhICIiIiIipZgwFDMODg6QyWQ5/gYPHgzg3aXKwYMHo0yZMjA2NkaPHj2QkJCg5qiJSFPExsayOxIREclhwlDMnD17FvHx8dLfwYMHAQA9e/YEAIwcORJ///03Nm/ejKNHj+Lff/9F9+7d1RkyEREREWkwPoehmLGyspJ7PWPGDDg5OcHT0xNJSUlYvXo1Nm7ciFatWgF4N7KIi4sLTp06hcaNG6sjZCIiIiLSYLzCUIylp6fj999/x7fffguZTIbIyEi8ffsWXl5eUh1nZ2dUrFgRERERSttJS0tDcnKy3B8RERERlQxMGIqxHTt2IDExEf7+/gCAR48eQVdXF+bm5nL1bGxs8OjRI6XtBAcHw8zMTPrLfrorERERERV/TBiKsdWrV6N9+/YoW7bsR7Uzbtw4JCUlSX/3798vpAiJiIiISNPxHoZi6t69ezh06BC2bdsmldna2iI9PR2JiYlyVxkSEhJga2urtC09PT3o6el9ynCJiIiISEPxCkMxFRISAmtra3Ts2FEqc3Nzg46ODg4fPiyVRUdHIy4uDu7u7uoIk4g+I5MnT0adOnVUrh8bGwuZTIaLFy8qrePg4ID58+er3OaaNWtydKssCJlMhh07diidLoTAgAEDYGFhkec6aBJVtmd+30cAaNGihcYMtxsWFgaZTIbExESV5ymM+Atr3yP6HPEKQzGUlZWFkJAQ+Pn5oVSp/73FZmZm6N+/PwICAmBhYQFTU1MMHToU7u7uHCGJSoQoZ5ciXZ7Ljah81T927Bhmz56NyMhIxMfHY/v27ejateunCY5yFRoaijVr1iAsLAyVKlWCpaWlukMqEJlMlmM/+vHHHzF06FD1BaVEbGwsHB0dceHChXwnNET0aTFhKIYOHTqEuLg4fPvttzmmzZs3D1paWujRowfS0tLg7e2NJUuWqCFKIvrQq1evULt2bXz77bd8Poqa3blzB3Z2dmjSpEmB2xBCIDMzU+6HG01gbGwMY2NjdYdBRJ8Rdkkqhtq2bQshBKpWrZpjmr6+PhYvXoznz5/j1atX2LZtW673LxBR0Wnfvj2mTp2Kbt26qTxPdveS3377DRUrVoSxsTEGDRqEzMxMzJo1C7a2trC2tsa0adPk5ouLi0OXLl1gbGwMU1NT9OrVK8dT32fMmAEbGxuYmJigf//+SE1NzbH8VatWwcXFBfr6+nB2dv7oHyDmzp0LV1dXGBkZoUKFChg0aBBSUlJy1NuxYweqVKkCfX19eHt75xiMYefOnahXrx709fVRqVIlBAYGIiMjQ6UY/P39MXToUMTFxUEmk8HBwQHAuyGmhw0bBmtra+jr66NZs2Y4e/asNF92V5l9+/bBzc0Nenp6OHHiRI72s7tq/fXXX/Dw8ICBgQEaNGiAmzdv4uzZs6hfvz6MjY3Rvn17PHnyRJpPUbearl27SiPhfSg77m7dusmtx4ddkvz9/dG1a1cEBgbCysoKpqam+M9//oP09HSl2ygtLQ0//vgjypUrByMjIzRq1AhhYWFK6wPAjRs30KxZM+jr66N69eo4dOiQXNcwR0dHAEDdunUhk8nQokWLXNvL9uzZM/j4+KBcuXIwNDSEq6sr/vjjjxz1MjIyMGTIEJiZmcHS0hITJkyAEKLA63Tp0iW0bNkSJiYmMDU1hZubG86dO6dSzESfGyYMRESfuTt37mDfvn0IDQ3FH3/8gdWrV6Njx4548OABjh49ipkzZ2L8+PE4ffo0gHfdFrt06YLnz5/j6NGjOHjwIO7evYvevXtLbf7111+YPHkypk+fjnPnzsHOzi5HMrBhwwZMnDgR06ZNQ1RUFKZPn44JEyZg7dq1BV4XLS0tLFy4ENeuXcPatWvxzz//YMyYMXJ1Xr9+jWnTpmHdunUIDw9HYmIi+vTpI00/fvw4+vbti+HDh+P69etYvnw51qxZkyNpUmbBggUICgpC+fLlER8fLyUFY8aMwdatW7F27VqcP38elStXhre3N54/fy43/9ixYzFjxgxERUWhVq1aSpczadIkjB8/HufPn0epUqXw1VdfYcyYMViwYAGOHz+O27dvY+LEiapuuhyy4w4JCZFbD0UOHz6MqKgohIWF4Y8//sC2bdsQGBiotP6QIUMQERGBTZs24fLly+jZsyfatWuHW7duKayfmZmJrl27wtDQEKdPn8aKFSvw888/y9U5c+YMgHdXyePj4+UG7chNamoq3NzcsGfPHly9ehUDBgzAN998I7WXbe3atShVqhTOnDmDBQsWYO7cuVi1alWB18nX1xfly5fH2bNnERkZibFjx0JHR0elmIk+O4Ion5KSkgQAkZSUpO5QiHJ48+aNuH79unjz5k2OaderORfp38cAILZv355nvUmTJglDQ0ORnJwslXl7ewsHBweRmZkplVWrVk0EBwcLIYQ4cOCA0NbWFnFxcdL0a9euCQDizJkzQggh3N3dxaBBg+SW1ahRI1G7dm3ptZOTk9i4caNcnSlTpgh3d3chhBAxMTECgLhw4YLS+O3t7cW8efOUTt+8ebMoU6aM9DokJEQAEKdOnZLKoqKiBABx+vRpIYQQrVu3FtOnT5drZ/369cLOzk56ndf2nTdvnrC3t5dep6SkCB0dHbFhwwapLD09XZQtW1bMmjVLCCHEkSNHBACxY8cOpe0K8b/tsmrVKqnsjz/+EADE4cOHpbLg4GBRrVo16bWnp6cYPny4XFtdunQRfn5+0usPt6ei9Zw0aZLc++jn5ycsLCzEq1evpLKlS5cKY2NjaR96f9n37t0T2tra4uHDh3Lttm7dWowbN07hOu/bt0+UKlVKxMfHS2UHDx6Ui0+V/UWI/23nFy9eKK3TsWNHMWrUKOm1p6encHFxEVlZWVLZTz/9JFxcXFRep5CQEGFmZiZNMzExEWvWrMk1VmWuPrma519B5XYM5Pc3FZRmdawkIqJ8c3BwgImJifTaxsYG2tra0NLSkit7/PgxACAqKgoVKlSQewhj9erVYW5ujqioKDRo0ABRUVH4z3/+I7ccd3d3HDlyBMC7+y3u3LmD/v374/vvv5fqZGRkwMzMrMDrcujQIQQHB+PGjRtITk5GRkYGUlNT8fr1axgaGgIASpUqhQYNGkjzODs7S7E3bNgQly5dQnh4uNwVhczMzBzt5MedO3fw9u1bNG3aVCrT0dFBw4YNERUlf3N7/fr1VWrz/asPNjY2AABXV1e5suz37FOrXbu23HZxd3dHSkoK7t+/D3t7e7m6V65cQWZmZo5ur2lpaShTpozC9qOjo1GhQgW5LrANGzYslNgzMzMxffp0/PXXX3j48CHS09ORlpaW431u3LgxZDKZ9Nrd3R1z5sxBZmZmgdYpICAA3333HdavXw8vLy/07NkTTk5OhbJORJqGCQMR0Wfuw24QMplMYVlWVlahLTP7voKVK1eiUaNGctO0tbUL1GZsbCw6deqEH374AdOmTYOFhQVOnDiB/v37Iz09XeUT/ZSUFAQGBiq8cVxfX79AseWHkZGRSvXef4+yT2Q/LHv/PdPS0pLrcw8Ab9++/ZhQCyQlJQXa2tqIjIzM8V6r42bq2bNnY8GCBZg/f750/8uIESNyvQfjQwVZp8mTJ+Orr77Cnj17sG/fPkyaNAmbNm3K1z1IRJ8LJgxERCWMi4sL7t+/j/v370tXGa5fv47ExERUr15dqnP69Gn07dtXmu/UqVPS/21sbFC2bFncvXsXvr6+hRJXZGQksrKyMGfOHOnqyF9//ZWjXkZGBs6dOyf9Qh0dHY3ExES4uLwbNrdevXqIjo5G5cqVCyUuAHBycoKuri7Cw8OlX9zfvn2Ls2fPFtnzCaysrBAfHy+9zszMxNWrV9GyZUul8+jo6CAzMzPPti9duoQ3b97AwMAAwLv32tjYWO4qVLa6desiMzMTjx8/hoeHh0qxV6tWDffv30dCQoJ0NeXDeyp0dXWl9cqP8PBwdOnSBV9//TWAd/fo3Lx5U9qXs2Xfw5Pt1KlTqFKlCrS1tQu0TgBQtWpVVK1aFSNHjoSPjw9CQkKYMFCxxISBiEhDpKSk4Pbt29LrmJgYXLx4ERYWFqhYsWKhLcfLywuurq7w9fXF/PnzkZGRgUGDBsHT01PqTjN8+HD4+/ujfv36aNq0KTZs2IBr166hUqVKUjuBgYEYNmwYzMzM0K5dO6SlpeHcuXN48eIFAgIC8h1X5cqV8fbtWyxatAidO3dGeHg4li1blqOejo4Ohg4dioULF6JUqVIYMmQIGjduLCUQEydORKdOnVCxYkV8+eWX0NLSwqVLl3D16lVMnTq1QNvMyMgIP/zwA0aPHi29H7NmzcLr16/Rv3//ArWZX61atUJAQAD27NkDJycnzJ07N8+Hlzk4OODw4cNo2rQp9PT0ULp0aYX10tPT0b9/f4wfPx6xsbGYNGkShgwZItetLVvVqlXh6+uLvn37Ys6cOahbty6ePHmCw4cPo1atWnIPDM3Wpk0bODk5wc/PD7NmzcLLly8xfvx4AP+7umJtbQ0DAwOEhoaifPny0NfXV6l7W5UqVbBlyxacPHkSpUuXxty5c5GQkJAjYYiLi0NAQAAGDhyI8+fPY9GiRZgzZ06B1unNmzcYPXo0vvzySzg6OuLBgwc4e/YsevTokWe8RJ8jJgxEVGLk90FqRe3cuXNyvxZnn3T7+flhzZo1hbYcmUyGnTt3YujQoWjevDm0tLTQrl07LFq0SKrTu3dv3LlzB2PGjEFqaip69OiBH374Afv375fqfPfddzA0NMTs2bMxevRoGBkZwdXVtcC/uNeuXRtz587FzJkzMW7cODRv3hzBwcFyVzkAwNDQED/99BO++uorPHz4EB4eHli9erU03dvbG7t370ZQUBBmzpwJHR0dODs747vvvitQXNlmzJiBrKwsfPPNN3j58iXq16+P/fv3Kz0JL2zffvstLl26hL59+6JUqVIYOXJkrlcXAGDOnDkICAjAypUrUa5cOcTGxiqs17p1a1SpUgXNmzdHWloafHx8MHnyZKXthoSEYOrUqRg1ahQePnwIS0tLNG7cGJ06dVJYX1tbGzt27MB3332HBg0aoFKlSpg9ezY6d+4sdRMrVaoUFi5ciKCgIEycOBEeHh55DtUKAOPHj8fdu3fh7e0NQ0NDDBgwAF27dkVSUpJcvb59++LNmzdo2LAhtLW1MXz4cAwYMKBA66StrY1nz56hb9++SEhIgKWlJbp3757ryFJEnzOZ+LBDJFEekpOTYWZmhqSkJJiamqo7HCI5qampiImJgaOjY5H0Vyf63Pn7+yMxMVF6HkJRCQ8PR7NmzXD79u0Sd7PwtafX8qxTw7JGgdrO7RjI728qKF5hICIiok9u+/btMDY2RpUqVXD79m0MHz4cTZs2LXHJAtHniAkDERERfXIvX77ETz/9hLi4OFhaWsLLy0u6h4CINBsTBiIiohKsMO+PyU3fvn1z3I9CRJ+HnMMfEBERERER/T8mDERULHE8ByIqiXjso0+BCQMRFSvZT8p9/fq1miMhIip62ce+D5/2TvQxeA8DERUr2traMDc3x+PHjwG8G7M/+8FQRESaIOttVp51UlNT89WmEAKvX7/G48ePYW5uDm1t7YKGR5QDEwYiKnZsbW0BQEoaiIg0yeOUvI9NpRILdopmbm4uHQOJCgsTBiIqdmQyGezs7GBtbY23b9+qOxwiIjnDtw/Ps86ubrvy3a6Ojg6vLNAnwYSBiIotbW1tfnkSkcaJT4/Psw6fVE+ahDc9a6DMzExcvHgRL168UHcoRERERFTCMWHQACNGjMDq1asBvEsWPD09Ua9ePVSoUAFhYWHqDY6IiIiISjQmDBpgy5YtqF27NgDg77//RkxMDG7cuIGRI0fi559/VnN0RERERFSSMWHQAE+fPpVGNNi7dy969uyJqlWr4ttvv8WVK1fUHB0RERERlWRMGDSAjY0Nrl+/jszMTISGhqJNmzYA3j18hTdsEhEREZE6cZQkDdCvXz/06tULdnZ2kMlk8PLyAgCcPn0azs7Oao6OiIiIiEoyJgwaYPLkyahZsybu37+Pnj17Qk9PD8C7ISHHjh2r5uiIiIiIqCRjwqAhvvzyyxxlfn5+aoiEiIiIiOh/mDCoycKFC1WuO2zYsE8YCRERERGRckwY1GTevHkq1ZPJZEwYiIiIiEhtmDCoSUxMjLpDICIiIiLKE4dV1SDp6emIjo5GRkaGukMhIiIiIgLAhEEjvH79Gv3794ehoSFq1KiBuLg4AMDQoUMxY8YMNUdHRERERCUZEwYNMG7cOFy6dAlhYWHQ19eXyr28vPDnn3/mu72HDx/i66+/RpkyZWBgYABXV1ecO3dOmi6EwMSJE2FnZwcDAwN4eXnh1q1bhbIuRERERFS8MGHQADt27MCvv/6KZs2aQSaTSeU1atTAnTt38tXWixcv0LRpU+jo6GDfvn24fv065syZg9KlS0t1Zs2ahYULF2LZsmU4ffo0jIyM4O3tjdTU1EJbJyIiIiIqHnjTswZ48uQJrK2tc5S/evVKLoFQxcyZM1GhQgWEhIRIZY6OjtL/hRCYP38+xo8fjy5dugAA1q1bBxsbG+zYsQN9+vQp4FoQERERUXHEKwwaoH79+tizZ4/0OjtJWLVqFdzd3fPV1q5du1C/fn307NkT1tbWqFu3LlauXClNj4mJwaNHj+Dl5SWVmZmZoVGjRoiIiFDYZlpaGpKTk+X+iIiIiKhk4BUGDTB9+nS0b98e169fR0ZGBhYsWIDr16/j5MmTOHr0aL7aunv3LpYuXYqAgAD897//xdmzZzFs2DDo6urCz88Pjx49AgDY2NjIzWdjYyNN+1BwcDACAwMLtnJERERE9FnjFQYN0KxZM1y8eBEZGRlwdXXFgQMHYG1tjYiICLi5ueWrraysLNSrVw/Tp09H3bp1MWDAAHz//fdYtmxZgeMbN24ckpKSpL/79+8XuC0iIiIi+rzwCoOGcHJykus6VFB2dnaoXr26XJmLiwu2bt0KALC1tQUAJCQkwM7OTqqTkJCAOnXqKGxTT08Penp6Hx0bEREREX1+mDBoiMzMTGzfvh1RUVEAgOrVq6NLly4oVSp/b1HTpk0RHR0tV3bz5k3Y29sDeHcDtK2tLQ4fPiwlCMnJyTh9+jR++OGHj18RIiIiIipWmDBogGvXruGLL77Ao0ePUK1aNQDvRjuysrLC33//jZo1a6rc1siRI9GkSRNMnz4dvXr1wpkzZ7BixQqsWLECwLsbqkeMGIGpU6eiSpUqcHR0xIQJE1C2bFl07dr1U6weEREREX3GmDBogO+++w41atTAuXPnpOclvHjxAv7+/hgwYABOnjypclsNGjTA9u3bMW7cOAQFBcHR0RHz58+Hr6+vVGfMmDF49eoVBgwYgMTERDRr1gyhoaFyD40jIiIiIgIAmRBCqDuIks7AwADnzp1DjRo15MqvXr2KBg0a4M2bN2qKTLHk5GSYmZkhKSkJpqam6g6HiIjos+K61jXPOlf8rhT6cvn9TQXFUZI0QNWqVZGQkJCj/PHjx6hcubIaIiIiIiIieocJg5q8/xC04OBgDBs2DFu2bMGDBw/w4MEDbNmyBSNGjMDMmTPVHSoRERERlWC8h0FNzM3NpSc6A4AQAr169ZLKsnuKde7cGZmZmWqJkYiIiIiICYOaHDlyRN0hEBERERHliQmDmnh6eqo7BCIiIiKiPDFh0CCvX79GXFwc0tPT5cpr1aqlpoiIiIiIqKRjwqABnjx5gn79+mHfvn0Kp/MeBiIiIiJSF46SpAFGjBiBxMREnD59GgYGBggNDcXatWtRpUoV7Nq1S93hEREREVEJxisMGuCff/7Bzp07Ub9+fWhpacHe3h5t2rSBqakpgoOD0bFjR3WHSEREREQlFK8waIBXr17B2toaAFC6dGk8efIEAODq6orz58+rMzQiIiIiKuGYMGiAatWqITo6GgBQu3ZtLF++HA8fPsSyZctgZ2en5uiIiIiIqCRjlyQNMHz4cMTHxwMAJk2ahHbt2mHDhg3Q1dXFmjVr1BscEREREZVoTBg0wNdffy39383NDffu3cONGzdQsWJFWFpaqjEyIiIiIirpmDBoIENDQ9SrV0/dYRARERERMWFQl4CAAJXrzp079xNGQkRERESkHBMGNblw4YJK9WQy2SeOhIiIiIhIOSYManLkyBF1h0BERERElCcOq0pEREREREoxYSAiIiIiIqWYMBARERERkVJMGIiIiIiISCkmDEREREREpBQTBg2wdu1a7NmzR3o9ZswYmJubo0mTJrh3754aIyMiIiKiko4JgwaYPn06DAwMAAARERFYvHgxZs2aBUtLS4wcOVLN0RERERFRScbnMGiA+/fvo3LlygCAHTt2oEePHhgwYACaNm2KFi1aqDc4IiIiIirReIVBAxgbG+PZs2cAgAMHDqBNmzYAAH19fbx580adoRERERFRCccrDBqgTZs2+O6771C3bl3cvHkTHTp0AABcu3YNDg4O6g2OiIiIiEo0XmHQAIsXL4a7uzuePHmCrVu3okyZMgCAyMhI+Pj4qDk6IiIiIirJZEIIoe4g6POSnJwMMzMzJCUlwdTUVN3hEBERfVZc17rmWeeK35VCXy6/v6mg2CVJjeLi4uReV6xYUU2REBEREREpxoRBjRwcHCCTySCEgEwmQ2ZmprpDIiIiIiKSw3sY1CgrKwuZmZnSv4Vh8uTJkMlkcn/Ozs7S9NTUVAwePBhlypSBsbExevTogYSEhEJZNhEREREVP0wYiqEaNWogPj5e+jtx4oQ0beTIkfj777+xefNmHD16FP/++y+6d++uxmiJiIiISJOxS5Ka7Nq1S+W6X3zxRb7aLlWqFGxtbXOUJyUlYfXq1di4cSNatWoFAAgJCYGLiwtOnTqFxo0b52s5RERERFT8MWFQk65du6pUryD3Nty6dQtly5aFvr4+3N3dERwcjIoVKyIyMhJv376Fl5eXVNfZ2RkVK1ZEREQEEwYiIiIiyoEJg5pkZWV9knYbNWqENWvWoFq1aoiPj0dgYCA8PDxw9epVPHr0CLq6ujA3N5ebx8bGBo8ePVLaZlpaGtLS0qTXycnJnyR2IiIiItI8TBg0TGpqKvT19Qs8f/v27aX/16pVC40aNYK9vT3++usvGBgYFKjN4OBgBAYGFjgmIiIiIvp88aZnDZCZmYkpU6agXLlyMDY2xt27dwEAEyZMwOrVqz+qbXNzc1StWhW3b9+Gra0t0tPTkZiYKFcnISFB4T0P2caNG4ekpCTp7/79+x8VExERERF9PpgwaIBp06ZhzZo1mDVrFnR1daXymjVrYtWqVR/VdkpKCu7cuQM7Ozu4ublBR0cHhw8flqZHR0cjLi4O7u7uStvQ09ODqamp3B8RERERlQxMGDTAunXrsGLFCvj6+kJbW1sqr127Nm7cuJGvtn788UccPXoUsbGxOHnyJLp16wZtbW34+PjAzMwM/fv3R0BAAI4cOYLIyEj069cP7u7uvOGZiIiIiBTiPQwa4OHDh6hcuXKO8qysLLx9+zZfbT148AA+Pj549uwZrKys0KxZM5w6dQpWVlYAgHnz5kFLSws9evRAWloavL29sWTJkkJZDyIiIiIqfpgwaIDq1avj+PHjsLe3lyvfsmUL6tatm6+2Nm3alOt0fX19LF68GIsXL853nERERERU8jBh0AATJ06En58fHj58iKysLGzbtg3R0dFYt24ddu/ere7wiIiIiKgE4z0MGqBLly74+++/cejQIRgZGWHixImIiorC33//jTZt2qg7PCIiIiIqwXiFQUN4eHjg4MGD6g6DiIiIiEgOrzAQEREREZFSvMKgJqVLl4ZMJlOp7vPnzz9xNEREREREijFhUJP58+dL/3/27BmmTp0Kb29v6QFqERER2L9/PyZMmKCmCImIiIiIAJkQQqg7iJKuR48eaNmyJYYMGSJX/uuvv+LQoUPYsWOHegJTIjk5GWZmZkhKSuJTn4mIiPLJda1rnnWu+F0p9OXy+5sKivcwaID9+/ejXbt2OcrbtWuHQ4cOqSEiIiIiIqJ3mDBogDJlymDnzp05ynfu3IkyZcqoISIiIiIiond4D4MGCAwMxHfffYewsDA0atQIAHD69GmEhoZi5cqVao6OiIiIiEoyJgwawN/fHy4uLli4cCG2bdsGAHBxccGJEyekBIKIiIiISB2YMGiIRo0aYcOGDeoOg4iIiIhIDhMGDZGZmYkdO3YgKioKAFCjRg188cUX0NbWVnNkRERERFSSMWHQALdv30bHjh3x4MEDVKtWDQAQHByMChUqYM+ePXByclJzhERERERUUnGUJA0wbNgwVKpUCffv38f58+dx/vx5xMXFwdHREcOGDVN3eERERERUgvEKgwY4evQoTp06BQsLC6msTJkymDFjBpo2barGyIiIiIiopOMVBg2gp6eHly9f5ihPSUmBrq6uGiIiIiIiInqHCYMG6NSpEwYMGIDTp09DCAEhBE6dOoX//Oc/+OKLL9QdHhERERGVYEwYNMDChQvh5OQEd3d36OvrQ19fH02bNkXlypWxYMECdYdHRERERCUY72HQAObm5ti5cydu3bqFGzduAHj34LbKlSurOTIiIiIiKumYMGiQKlWqoEqVKuoOg4iIiIhIwoRBAwghsGXLFhw5cgSPHz9GVlaW3PRt27apKTIiIiIiKumYMGiAESNGYPny5WjZsiVsbGwgk8nUHRIREREREQAmDBph/fr12LZtGzp06KDuUIiIiIiI5HCUJA1gZmaGSpUqqTsMIiIiIqIcmDBogMmTJyMwMBBv3rxRdyhERERERHLYJUkD9OrVC3/88Qesra3h4OAAHR0duennz59XU2REREREVNIxYdAAfn5+iIyMxNdff82bnomIiIhIozBh0AB79uzB/v370axZM3WHQkREREQkh/cwaIAKFSrA1NRU3WEQEREREeXAhEEDzJkzB2PGjEFsbKy6QyEiIiIiksMuSRrg66+/xuvXr+Hk5ARDQ8McNz0/f/5cTZERERERUUnHhEEDzJ8//5O1PWPGDIwbNw7Dhw+XlpOamopRo0Zh06ZNSEtLg7e3N5YsWQIbG5tPFgcRERERfZ6YMGgAPz+/T9Lu2bNnsXz5ctSqVUuufOTIkdizZw82b94MMzMzDBkyBN27d0d4ePgniYOIiIiIPl+8h6GYSklJga+vL1auXInSpUtL5UlJSVi9ejXmzp2LVq1awc3NDSEhITh58iROnTqlxoiJiIiISBMxYSimBg8ejI4dO8LLy0uuPDIyEm/fvpUrd3Z2RsWKFREREaGwrbS0NCQnJ8v9EREREVHJwC5JxdCmTZtw/vx5nD17Nse0R48eQVdXF+bm5nLlNjY2ePTokcL2goODERgY+ClCJSIiIiINxysManL58mVkZWUVerv379/H8OHDsWHDBujr6xdKm+PGjUNSUpL0d//+/UJpl4iIiIg0HxMGNalbty6ePn0KAKhUqRKePXtWKO1GRkbi8ePHqFevHkqVKoVSpUrh6NGjWLhwIUqVKgUbGxukp6cjMTFRbr6EhATY2toqbFNPTw+mpqZyf0RERERUMjBhUBNzc3PExMQAAGJjYwvtakPr1q1x5coVXLx4UfqrX78+fH19pf/r6Ojg8OHD0jzR0dGIi4uDu7t7ocRARERERMUH72FQkx49esDT0xN2dnaQyWSoX78+tLW1Fda9e/euyu2amJigZs2acmVGRkYoU6aMVN6/f38EBATAwsICpqamGDp0KNzd3dG4ceOCrxARERERFUtMGNRkxYoV6N69O27fvo1hw4bh+++/h4mJSZEse968edDS0kKPHj3kHtxGRERERPQhmRBCqDuIkq5fv35YuHBhkSUMHys5ORlmZmZISkri/QxERET55LrWNc86V/yuFPpy+f1NBcUrDBogJCRE+v+DBw8AAOXLl1dXOEREREREEt70rAGysrIQFBQEMzMz2Nvbw97eHubm5pgyZconGXqViIiIiEhVvMKgAX7++WesXr0aM2bMQNOmTQEAJ06cwOTJk5Gamopp06apOUIiIiIiKqmYMGiAtWvXYtWqVfjiiy+kslq1aqFcuXIYNGgQEwYiIiIiUht2SdIAz58/h7Ozc45yZ2dnPH/+XA0RERERERG9w4RBA9SuXRu//vprjvJff/0VtWvXVkNERERERETvsEuSBpg1axY6duyIQ4cOSU9bjoiIwP3797F37141R0dERFT8RTm7FEo7LjeiCqUdIk3CKwwawNPTEzdv3kS3bt2QmJiIxMREdO/eHdHR0fDw8FB3eERERERUgvEKg4YoW7Ysb24mIiIiIo3DKwxERERERKQUEwYiIiIiIlKKCQMRERERESnFhEHNhBCIi4tDamqqukMhIiIiIsqBCYOaCSFQuXJl3L9/X92hEBERERHlwIRBzbS0tFClShU8e/ZM3aEQEREREeXAhEEDzJgxA6NHj8bVq1fVHQoRERERkRw+h0ED9O3bF69fv0bt2rWhq6sLAwMDuenPnz9XU2REREREVNIxYdAA8+fPV3cIREREREQKMWHQAH5+fuoOgYiIiIhIId7DoCHu3LmD8ePHw8fHB48fPwYA7Nu3D9euXVNzZERERERUkjFh0ABHjx6Fq6srTp8+jW3btiElJQUAcOnSJUyaNEnN0RERERFRScaEQQOMHTsWU6dOxcGDB6GrqyuVt2rVCqdOnVJjZERERERU0jFh0ABXrlxBt27dcpRbW1vj6dOnaoiIiIiIiOgdJgwawNzcHPHx8TnKL1y4gHLlyqkhIiIiIiKid5gwaIA+ffrgp59+wqNHjyCTyZCVlYXw8HD8+OOP6Nu3r7rDIyIiIqISjAmDBpg+fTqcnZ1RoUIFpKSkoHr16mjevDmaNGmC8ePHqzs8IiIiIirB+BwGDaCrq4uVK1diwoQJuHr1KlJSUlC3bl1UqVJF3aERERERUQnHhEGDVKxYERUqVAAAyGQyNUdDRERE7+s1Lu/TpitFEAdRUWOXJA2xevVq1KxZE/r6+tDX10fNmjWxatUqdYdFRERERCUcrzBogIkTJ2Lu3LkYOnQo3N3dAQAREREYOXIk4uLiEBQUpOYIiYiIiKikYsKgAZYuXYqVK1fCx8dHKvviiy9Qq1YtDB06lAkDEREREakNEwYN8PbtW9SvXz9HuZubGzIyMvLV1tKlS7F06VLExsYCAGrUqIGJEyeiffv2AIDU1FSMGjUKmzZtQlpaGry9vbFkyRLY2Nh89HoQERF9rlS5P4GopOI9DBrgm2++wdKlS3OUr1ixAr6+vvlqq3z58pgxYwYiIyNx7tw5tGrVCl26dMG1a9cAACNHjsTff/+NzZs34+jRo/j333/RvXv3QlkPIiIiIip+mE6rSUBAgPR/mUyGVatW4cCBA2jcuDEA4PTp04iLi8v3g9s6d+4s93ratGlYunQpTp06hfLly2P16tXYuHEjWrVqBQAICQmBi4sLTp06JS2biIiIiCgbEwY1uXDhgtxrNzc3AMCdO3cAAJaWlrC0tJSuDBREZmYmNm/ejFevXsHd3R2RkZF4+/YtvLy8pDrOzs6oWLEiIiIilCYMaWlpSEtLk14nJycXOCYiIiIi+rwwYVCTI0eOfLK2r1y5And3d6SmpsLY2Bjbt29H9erVcfHiRejq6sLc3Fyuvo2NDR49eqS0veDgYAQGBn6yeImIiIhIc/EehmKoWrVquHjxIk6fPo0ffvgBfn5+uH79eoHbGzduHJKSkqS/+/fvF2K0RERERKTJeIVBA6SmpmLRokU4cuQIHj9+jKysLLnp58+fz1d7urq6qFy5MoB3XZ3Onj2LBQsWoHfv3khPT0diYqLcVYaEhATY2toqbU9PTw96enr5ioGIiIiIigcmDBqgf//+OHDgAL788ks0bNgQMpmsUNvPyspCWloa3NzcoKOjg8OHD6NHjx4AgOjoaMTFxUkPjCMiIiIieh8TBg2we/du7N27F02bNv3otsaNG4f27dujYsWKePnyJTZu3IiwsDDs378fZmZm6N+/PwICAmBhYQFTU1Pp6dIcIYmIiIiIFGHCoAHKlSsHExOTQmnr8ePH6Nu3L+Lj42FmZoZatWph//79aNOmDQBg3rx50NLSQo8ePeQe3EZEREREpIhMCCHUHURJt2/fPixcuBDLli2Dvb29usPJU3JyMszMzJCUlARTU1N1h0NERPTRXNe6Fko7V/yuFMqyVGknv/j9TQXFKwwaoH79+khNTUWlSpVgaGgIHR0duenPnz9XU2REREREVNIxYdAAPj4+ePjwIaZPnw4bG5tCv+mZiIiIiKigmDBogJMnTyIiIgK1a9dWdyhERETFSpSzi2oVx/GUiEgZPrhNAzg7O+PNmzfqDoOIiIiIKAcmDBpgxowZGDVqFMLCwvDs2TMkJyfL/RERERERqQuvv2mAdu3aAQBat24tVy6EgEwmQ2ZmpjrCIiIiIiJiwqAJjhw5ou4QiIiIiIgUYsKgATw9PdUdAhERERGRQkwYNMCxY8dynd68efMiioSIiIiISB4TBg3QokWLHGXvP4uB9zAQERERkbowYdAAL168kHv99u1bXLhwARMmTMC0adPUFBURERHll0rPfeAzH+gzwz1WA5iZmeUoa9OmDXR1dREQEIDIyEg1REVERERExOcwaDQbGxtER0erOwwiIiIiKsF4hUEDXL58We61EALx8fGYMWMG6tSpo56giIiIiIjAhEEj1KlTBzKZDEIIufLGjRvjt99+U1NURERERERMGDRCTEyM3GstLS1YWVlBX19fTRERERF9OqrcGOxyI6oIIiEiVTBh0AD29vbqDoGIiIiISCEmDBri8OHDOHz4MB4/foysrCy5aeyWRERERETqwoRBAwQGBiIoKAj169eHnZ2d3EPbiIiIiIjUiQmDBli2bBnWrFmDb775Rt2hEBERFSu9+JA0oo/G5zBogPT0dDRp0kTdYRARERER5cCEQQN899132Lhxo7rDICIiIiLKgdfpNEBqaipWrFiBQ4cOoVatWtDR0ZGbPnfuXDVFRkREREQlHRMGDXD58mXpic5Xr16Vm8YboImIiIhInZgwaIAjR46oOwQiIiIiIoV4DwMRERERESnFhIGIiIiIiJRilyQiIiLSOFHOLnnWcbkRVQSREBGvMBARERERkVJMGIiIiIiISCkmDEREREREpBQTBiIiIiIiUoo3PRczwcHB2LZtG27cuAEDAwM0adIEM2fORLVq1aQ6qampGDVqFDZt2oS0tDR4e3tjyZIlsLGxUWPkRERUUvQal/fpx1/BGUUQCRGpglcYipmjR49i8ODBOHXqFA4ePIi3b9+ibdu2ePXqlVRn5MiR+Pvvv7F582YcPXoU//77L7p3767GqImIiIhIU/EKQzETGhoq93rNmjWwtrZGZGQkmjdvjqSkJKxevRobN25Eq1atAAAhISFwcXHBqVOn0LhxY3WETUREREQailcYirmkpCQAgIWFBQAgMjISb9++hZeXl1TH2dkZFStWREREhMI20tLSkJycLPdHRERERCUDE4ZiLCsrCyNGjEDTpk1Rs2ZNAMCjR4+gq6sLc3Nzubo2NjZ49OiRwnaCg4NhZmYm/VWoUOFTh05EREREGoIJQzE2ePBgXL16FZs2bfqodsaNG4ekpCTp7/79+4UUIRERERFpOt7DUEwNGTIEu3fvxrFjx1C+fHmp3NbWFunp6UhMTJS7ypCQkABbW1uFbenp6UFPT+9Th0xEREREGohXGIoZIQSGDBmC7du3459//oGjo6PcdDc3N+jo6ODw4cNSWXR0NOLi4uDu7l7U4RIRERGRhuMVhmJm8ODB2LhxI3bu3AkTExPpvgQzMzMYGBjAzMwM/fv3R0BAACwsLGBqaoqhQ4fC3d2dIyQREZHGUOVZDVeKII78UiVuos8N9+piZunSpQCAFi1ayJWHhITA398fADBv3jxoaWmhR48ecg9uIyIiIiL6EBOGYkYIkWcdfX19LF68GIsXLy6CiIiIiIjoc8Z7GIiIiIiISCleYSAiIqJCE+XsknelQurnX5TLIirJeIWBiIiIiIiUYsJARERERERKMWEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMRERERESnFp5kQERFRoenFB6URFTu8wkBEREREREoxYSAiIiIiIqWYMBARERERkVLsaEhERESfJd4vQVQ0eIWBiIiIiIiUYsJARERERERKMWEgIiIiIiKl2PmPiIiIEOXskmcdlxtRRRAJEWkaXmEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKCQMRERERESnFhIGIiIiIiJRiwkBEREREREoxYSAiIiIiIqWYMBARERERkVJMGIiIiIiISCkmDEREREREpFQpdQdAhevYsWOYPXs2IiMjER8fj+3bt6Nr167SdCEEJk2ahJUrVyIxMRFNmzbF0qVLUaVKFfUFTUREn1SUs0uedXqNy/uU4EphBENEnx1eYShmXr16hdq1a2Px4sUKp8+aNQsLFy7EsmXLcPr0aRgZGcHb2xupqalFHCkRERERfQ54haGYad++Pdq3b69wmhAC8+fPx/jx49GlSxcAwLp162BjY4MdO3agT58+RRkqEREREX0GeIWhBImJicGjR4/g5eUllZmZmaFRo0aIiIhQOl9aWhqSk5Pl/oiIiIioZOAVhhLk0aNHAAAbGxu5chsbG2maIsHBwQgMDPyksRER0aejyv0JRETK8AoD5WncuHFISkqS/u7fv6/ukIiIiIioiDBhKEFsbW0BAAkJCXLlCQkJ0jRF9PT0YGpqKvdHRERERCUDE4YSxNHREba2tjh8+LBUlpycjNOnT8Pd3V2NkRERERGRpmKnxmImJSUFt2/fll7HxMTg4sWLsLCwQMWKFTFixAhMnToVVapUgaOjIyZMmICyZcvKPauBiIiIiCgbE4Zi5ty5c2jZsqX0OiAgAADg5+eHNWvWYMyYMXj16hUGDBiAxMRENGvWDKGhodDX11dXyDlNNlOhTtKnj4OIiIiImDAUNy1atIAQQul0mUyGoKAgBAUFFWFURERERPS54j0MRERERESkFBMGIiIiIiJSil2SiIiI1CDK2SXPOi43ooogEiKi3PEKAxERERERKcWEgYiIiIiIlGLCQERERERESvEeBiIionxwXeuaZ50rflfyrNNrXN5fwXm3otq9EFBhWUREyvAKAxERERERKcWEgYiIiIiIlGLCQERERERESrFTIxERffY07ZkGqtznoApV1kuVeyGIiD4GrzAQEREREZFSTBiIiIiIiEgpJgxERERERKQUEwYiIiIiIlKKd0oREdEnodIDxVSgys3Kqtz4+5eG3RhNRPS54BUGIiIiIiJSigkDEREREREpxYSBiIiIiIiU4j0MREQkx2HsnjzrxM7omGedQnugWCE9BE2VeK4UypIKj6Y9lK2wHkhHRJ8XXmEgIiIiIiKlmDAQEREREZFSTBiIiIiIiEgpzeocSUQapbD6stPnxcRlrAq1+L4TEZUUvMJARERERERKMWEgIiIiIiKlmDAQEREREZFSvIeBii1N63+vafFQ8aPKPqYKE5e863A8fiKikoNXGIiIiIiISCkmDEREREREpBQTBiIiIiIiUor3MJRgixcvxuzZs/Ho0SPUrl0bixYtQsOGDdUdlmomm6lQaeMnD+NzVlj93Yvy3ozCWlZhrTsREVFJwCsMJdSff/6JgIAATJo0CefPn0ft2rXh7e2Nx48fqzs0IiIiItIgTBhKqLlz5+L7779Hv379UL16dSxbtgyGhob47bff1B0aEREREWkQJgwlUHp6OiIjI+Hl5SWVaWlpwcvLCxEREWqMjIiIiIg0De9hKIGePn2KzMxM2NjYyJXb2Njgxo0bOeqnpaUhLS1Nep2UlAQASE5O/jQBpolCaSYr7XWedT7ZOijwOcZTWAprvQprGxblun+OMt9kqjsEtVFl/ynJ24eKzqf4PshuU4jC+Z6lkoMJA+UpODgYgYGBOcorVKighmjyo1eeNczmf/oo8kPT4iksRblexXUbUtEw+0GVARWIPr1PuS++fPkSZmbc10l1TBhKIEtLS2hrayMhIUGuPCEhAba2tjnqjxs3DgEBAdLrrKwsPH/+HGXKlIFMJivU2JKTk1GhQgXcv38fpqamhdo2/Q+3c9Hgdi4a3M5Fg9u56HyqbS2EwMuXL1G2bNlCa5NKBiYMJZCuri7c3Nxw+PBhdO3aFcC7JODw4cMYMmRIjvp6enrQ09OTKzM3N/+kMZqamvILqQhwOxcNbueiwe1cNLidi86n2Na8skAFwYShhAoICICfnx/q16+Phg0bYv78+Xj16hX69eun7tCIiIiISIMwYSihevfujSdPnmDixIl49OgR6tSpg9DQ0Bw3QhMRERFRycaEoQQbMmSIwi5I6qSnp4dJkybl6AJFhYvbuWhwOxcNbueiwe1cdLitSdPIBMfWIiIiIiIiJfjgNiIiIiIiUooJAxERERERKcWEgYiIiIiIlGLCQERERERESjFhoCK3ePFiODg4QF9fH40aNcKZM2dyrb9582Y4OztDX18frq6u2Lt3bxFF+nnLz3ZeuXIlPDw8ULp0aZQuXRpeXl55vi/0Tn7352ybNm2CTCaTHp5Iucvvdk5MTMTgwYNhZ2cHPT09VK1alccOFeR3O8+fPx/VqlWDgYEBKlSogJEjRyI1NbWIov08HTt2DJ07d0bZsmUhk8mwY8eOPOcJCwtDvXr1oKenh8qVK2PNmjWfPE4iOYKoCG3atEno6uqK3377TVy7dk18//33wtzcXCQkJCisHx4eLrS1tcWsWbPE9evXxfjx44WOjo64cuVKEUf+ecnvdv7qq6/E4sWLxYULF0RUVJTw9/cXZmZm4sGDB0Uc+eclv9s5W0xMjChXrpzw8PAQXbp0KZpgP2P53c5paWmifv36okOHDuLEiRMiJiZGhIWFiYsXLxZx5J+X/G7nDRs2CD09PbFhwwYRExMj9u/fL+zs7MTIkSOLOPLPy969e8XPP/8stm3bJgCI7du351r/7t27wtDQUAQEBIjr16+LRYsWCW1tbREaGlo0ARMJIZgwUJFq2LChGDx4sPQ6MzNTlC1bVgQHByus36tXL9GxY0e5skaNGomBAwd+0jg/d/ndzh/KyMgQJiYmYu3atZ8qxGKhINs5IyNDNGnSRKxatUr4+fkxYVBBfrfz0qVLRaVKlUR6enpRhVgs5Hc7Dx48WLRq1UquLCAgQDRt2vSTxlmcqJIwjBkzRtSoUUOurHfv3sLb2/sTRkYkj12SqMikp6cjMjISXl5eUpmWlha8vLwQERGhcJ6IiAi5+gDg7e2ttD4VbDt/6PXr13j79i0sLCw+VZifvYJu56CgIFhbW6N///5FEeZnryDbedeuXXB3d8fgwYNhY2ODmjVrYvr06cjMzCyqsD87BdnOTZo0QWRkpNRt6e7du9i7dy86dOhQJDGXFPweJE3AJz1TkXn69CkyMzNhY2MjV25jY4MbN24onOfRo0cK6z969OiTxfm5K8h2/tBPP/2EsmXL5viSov8pyHY+ceIEVq9ejYsXLxZBhMVDQbbz3bt38c8//8DX1xd79+7F7du3MWjQILx9+xaTJk0qirA/OwXZzl999RWePn2KZs2aQQiBjIwM/Oc//8F///vfogi5xFD2PZicnIw3b97AwMBATZFRScIrDEQkZ8aMGdi0aRO2b98OfX19dYdTbLx8+RLffPMNVq5cCUtLS3WHU6xlZWXB2toaK1asgJub2/+1c3+xTVZ/HMc/3UZXmJA5GGyQMe3EsEjHhIlBMcNyQUJAmReQGhuIWaYxJAhMmIFZk4mpMMyMaBaJQgxu+N+gSxCsetOJgrRmhGbAsCwQmDq5mBDYtMeLX2ic7GGUP+328/1KetHnnNPn22+6NZ+dPY+WLl2q9evXq7GxMdWl/V/59ttv9fLLL+vNN9/UoUOH9Mknn6ilpUV1dXWpLg3ATcYOA5Jm3LhxSk9PV1dXV7/jXV1dysvLG3BNXl5eQvNxfX2+rL6+Xn6/X1999ZVKSkpuZZnDXqJ97ujoUDQa1aJFi+LHYrGYJCkjI0Pt7e0qKiq6tUUPQ9fzec7Pz9eIESOUnp4eP1ZcXKyzZ8+qt7dXdrv9ltY8HF1Pn2tra+X1elVZWSlJcrlcOn/+vKqqqrR+/XqlpfE3yZvB6ntwzJgx7C4gafhpRtLY7XbNnDlTgUAgfiwWiykQCGj27NkDrpk9e3a/+ZK0b98+y/m4vj5L0qZNm1RXV6c9e/aorKwsGaUOa4n2eerUqWpra1M4HI4/HnnkET388MMKh8MqKChIZvnDxvV8nh988EEdP348Hsgk6ejRo8rPzycsWLiePl+4cOGKUHA5pBljbl2x/zF8D2JISPVV1/hv2bVrl8nMzDQ7duwwR44cMVVVVSY7O9ucPXvWGGOM1+s1NTU18fnBYNBkZGSY+vp6E4lEjM/n47aq1yDRPvv9fmO3281HH31kzpw5E3/09PSk6i0MC4n2+d+4S9K1SbTPnZ2dZvTo0WbFihWmvb3dfPHFF2b8+PHmpZdeStVbGBYS7bPP5zOjR482zc3N5sSJE2bv3r2mqKjILFmyJFVvYVjo6ekxoVDIhEIhI8m8+uqrJhQKmZMnTxpjjKmpqTFerzc+//JtVZ977jkTiUTMG2+8wW1VkXQEBiTd66+/biZPnmzsdruZNWuW2b9/f3ysvLzcLFu2rN/8Dz74wNx9993Gbrebe+65x7S0tCS54uEpkT4XFhYaSVc8fD5f8gsfZhL9PP8TgeHaJdrn1tZWc//995vMzEzjdDrNxo0bzZ9//pnkqoefRPrc19dnXnzxRVNUVGQcDocpKCgwzzzzjDl37lzyCx9GvvnmmwF/317u7bJly0x5efkVa0pLS43dbjdOp9Ns37496XXjv81mDPuGAAAAAAbGNQwAAAAALBEYAAAAAFgiMAAAAACwRGAAAAAAYInAAAAAAMASgQEAAACAJQIDAAAAAEsEBgAAAACWCAwAAAAALBEYAADXrK+vL9UlAACSjMAAAEPYnj17NGfOHGVnZ2vs2LFauHChOjo64uOnTp2Sx+NRTk6OsrKyVFZWpu+//z4+/vnnn+u+++6Tw+HQuHHjVFFRER+z2Wz67LPP+p0vOztbO3bskCRFo1HZbDa9//77Ki8vl8Ph0Hvvvafu7m55PB5NmjRJo0aNksvlUnNzc7/XicVi2rRpk+666y5lZmZq8uTJ2rhxoyTJ7XZrxYoV/eb/+uuvstvtCgQCN6NtAICbiMAAAEPY+fPntXr1ah08eFCBQEBpaWmqqKhQLBbTH3/8ofLycp0+fVq7d+/WTz/9pLVr1yoWi0mSWlpaVFFRoQULFigUCikQCGjWrFkJ11BTU6OVK1cqEolo/vz5unjxombOnKmWlhYdPnxYVVVV8nq9+uGHH+Jrnn/+efn9ftXW1urIkSNqamrShAkTJEmVlZVqamrSpUuX4vN37typSZMmye1232DHAAA3m80YY1JdBADg2vz222/Kzc1VW1ubWltbVV1drWg0qpycnCvmPvDAA3I6ndq5c+eAr2Wz2fTpp59q8eLF8WPZ2dlqaGjQ8uXLFY1Gdeedd6qhoUErV668al0LFy7U1KlTVV9fr56eHuXm5mrr1q2qrKy8Yu7Fixc1ceJENTY2asmSJZKk6dOn67HHHpPP50ugGwCAZGCHAQCGsGPHjsnj8cjpdGrMmDG64447JEmdnZ0Kh8O69957BwwLkhQOhzVv3rwbrqGsrKzf87/++kt1dXVyuVzKycnRbbfdpi+//FKdnZ2SpEgkokuXLlme2+FwyOv16p133pEkHTp0SIcPH9by5ctvuFYAwM2XkeoCAADWFi1apMLCQm3btk0TJ05ULBbTtGnT1Nvbq5EjR1517WDjNptN/95kHuii5qysrH7PN2/erNdee00NDQ1yuVzKysrSs88+q97e3ms6r/S/f0sqLS3VqVOntH37drndbhUWFg66DgCQfOwwAMAQ1d3drfb2dm3YsEHz5s1TcXGxzp07Fx8vKSlROBzW77//PuD6kpKSq15EnJubqzNnzsSfHzt2TBcuXBi0rmAwqEcffVRPPPGEpk+fLqfTqaNHj8bHp0yZopEjR1713C6XS2VlZdq2bZuampr05JNPDnpeAEBqEBgAYIi6/fbbNXbsWL311ls6fvy4vv76a61evTo+7vF4lJeXp8WLFysYDOrEiRP6+OOP9d1330mSfD6fmpub5fP5FIlE1NbWpldeeSW+3u12a+vWrQqFQjp48KCefvppjRgxYtC6pkyZon379qm1tVWRSERPPfWUurq64uMOh0Pr1q3T2rVr9e6776qjo0P79+/X22+/3e91Kisr5ff7ZYzpd/cmAMDQQmAAgCEqLS1Nu3bt0o8//qhp06Zp1apV2rx5c3zcbrdr7969Gj9+vBYsWCCXyyW/36/09HRJ0ty5c/Xhhx9q9+7dKi0tldvt7ncnoy1btqigoEAPPfSQHn/8cVVXV2vUqFGD1rVhwwbNmDFD8+fP19y5c+Oh5Z9qa2u1Zs0avfDCCyouLtbSpUv1yy+/9Jvj8XiUkZEhj8cjh8NxA50CANxK3CUJAJAS0WhURUVFOnDggGbMmJHqcgAAFggMAICk6uvrU3d3t6qrq/Xzzz8rGAymuiQAwFXwL0kAgKQKBoPKz8/XgQMH1NjYmOpyAACDYIcBAAAAgCV2GAAAAABYIjAAAAAAsERgAAAAAGCJwAAAAADAEoEBAAAAgCUCAwAAAABLBAYAAAAAlggMAAAAACwRGAAAAABY+hsm0CsDFBGZFwAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "path_model_label=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/instance/instance_threshold_pred_VNet_Generalized_latest(1).tif\"\n", + "res = evl.evaluate_model_performance(imread(path_true_labels), imread(path_model_label),visualize=False, return_graphical_summary=True,plot_according_to_gt_label=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "tags": [] + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAwwAAAHHCAYAAAASz98lAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAA9hAAAPYQGoP6dpAACLWklEQVR4nOzdd1gUV9sG8HtB6U2kqiiIKKjYsCEiduy9hqgYFd9Y0cT2xoYascQeY00QjSWxGwt2LIgNuyA2EDWIBQERRYHz/eHHvK6wsCDLgt6/6+LSPXNm5pnZ2dl59pw5IxNCCBAREREREWVDQ90BEBERERFR0cWEgYiIiIiIFGLCQERERERECjFhICIiIiIihZgwEBERERGRQkwYiIiIiIhIISYMRERERESkEBMGIiIiIiJSiAkDEREREREpVCAJw/Tp0yGTyeTKbG1t4e3tXRCLz1F0dDRkMhnWrVsnlXl7e8PAwEDl684kk8kwffr0Qltffly4cAGNGjWCvr4+ZDIZrly5kudl2NraokOHDgUfHKlNUFAQatWqBR0dHchkMiQkJOR5GTKZDCNGjCj44L4gqjhHNG3aFE2bNi3QZSpj3bp1kMlkuHjxYqGv+0vUtGlTVK9eXd1hqFzmcRMdHZ3nebO7xigowcHBkMlkCA4OzvO8qrjWUNfn+nN4e3vD1tY23/N/CeeU/J7jVbHtqvq8FKkWhv379xfZC++iHFtu3r9/j549eyI+Ph6LFi3Chg0bUKFChWzrhoeHY/r06fk6qVPx8uLFC/Tq1Qu6urpYvnw5NmzYAH19/WzrnjlzBtOnT89XQkH597V8Hovz+VUZ//77L6ZPn56vH2ooe7a2tl/0MfMl+u233+R+3C3KilOshaWEqhYcGRkJDY285SP79+/H8uXL83QSqFChAt68eYOSJUvmMcK8ySm2N2/eoEQJle3Kz3bv3j08ePAAa9asweDBg3OsGx4eDj8/PzRt2vSzfjGgou/ChQt49eoVZs6ciZYtW+ZY98yZM/Dz84O3tzdMTEwKJ8AvSH7PETl9Hg8dOlRA0alffs79xcm///4LPz8/2NraolatWuoOh0gtfvvtN5iZmRVK75PPVZxiLSwqu8rV1tZW1aIBAGlpacjIyICWlhZ0dHRUuq7cqHv9uXn69CkA8EJPBTIyMvDu3bsifwxkh8dF4VHF8aGlpVXgyyQiIspOnrsknT59GvXq1YOOjg7s7e2xatWqbOt9eg/D+/fv4efnBwcHB+jo6KB06dJo3LgxDh8+DOBDH7jly5cD+NAXLPMP+N99Cr/88gsWL14Me3t7aGtrIzw8PNt7GDLdv38fnp6e0NfXR5kyZTBjxgwIIaTpivoufrrMnGLLLPv0l7HLly+jbdu2MDIygoGBAVq0aIGzZ8/K1cnsuxYSEoKxY8fC3Nwc+vr66Nq1K549e5b9G/CJY8eOwd3dHfr6+jAxMUHnzp0REREhTff29oaHhwcAoGfPnpDJZAr7R65btw49e/YEADRr1kzazk/3z+nTp1G/fn3o6OigYsWKWL9+fZZlJSQkwNfXFzY2NtDW1kalSpUwd+5cZGRk5LpNu3fvRvv27VGmTBloa2vD3t4eM2fORHp6epa6586dQ7t27VCqVCno6+ujRo0aWLJkiVydW7duoVevXjA3N4euri6qVKmCn376SW4fZdeakl0/wMz++hs3bkS1atWgra2NoKAgAMAvv/yCRo0aoXTp0tDV1YWLiwu2bduW7Tb++eefqF+/PvT09FCqVCk0adJE+sV4wIABMDMzw/v377PM17p1a1SpUiXnHQhg69atcHFxga6uLszMzPDtt9/i8ePH0vSmTZtiwIABAIB69epBJpMp/CVl+vTpGDduHADAzs5OOi4+7Saza9cuVK9eHdra2qhWrZq0Xz72+PFjfPfdd7C0tJTq/fHHH7luDwAEBASgefPmsLCwgLa2NqpWrYoVK1ZkqXfx4kV4enrCzMwMurq6sLOzw3fffSdXZ8uWLXBxcYGhoSGMjIzg7Oyc5bi5f/8+evbsCVNTU+jp6aFhw4bYt29flvW9ffsW06dPR+XKlaGjowNra2t069YN9+7dk+p8eo548OABhg0bhipVqkBXVxelS5dGz5495fZpbp/H7Po6P336FIMGDYKlpSV0dHRQs2ZNBAYGytX5+Hy6evVq6Xxar149XLhwQeH+/1RKSgqGDh2K0qVLw8jICP3798fLly+z1Dtw4IB0jjI0NET79u1x8+ZNaXpO59c6deqgW7ducstzdnaGTCbDtWvXpLK//voLMplM7tyn7LGWmpqKadOmoVKlStDW1oaNjQ3Gjx+P1NRUuXqZn31ljvOPBQcHo169egCAgQMHStv36XdWeHg4mjVrBj09PZQtWxbz5s3Ld6zZybxX4tq1a/Dw8ICenh4qVaoknaNOnDiBBg0aSOfII0eOZFmGMt9rAHDz5k00b94curq6KFeuHGbNmqXw3J/b8aGs3K4x8uLUqVPo2bMnypcvL+3nMWPG4M2bN9nWz+1aA/jw49LixYtRrVo16OjowNLSEkOHDs32M/OpZcuWoVq1atL3Rd26dbFp06Yc53n37h2mTp0KFxcXGBsbQ19fH+7u7jh+/LhcvbyeDzKPfx0dHVSvXh07d+7MNX7gwzXhzZs3ceLECekz8On5KzU1Valroc85ZjKP/4+PzYCAALnvNGVizY0y5/iPFdT5VJHDhw+jcePGMDExgYGBAapUqYL//ve/edqmPLUwXL9+Ha1bt4a5uTmmT5+OtLQ0TJs2DZaWlrnOO336dPj7+2Pw4MGoX78+kpKScPHiRVy6dAmtWrXC0KFD8e+//+Lw4cPYsGFDtssICAjA27dv4ePjA21tbZiamio8CaWnp6NNmzZo2LAh5s2bh6CgIEybNg1paWmYMWNGXjZbqdg+dvPmTbi7u8PIyAjjx49HyZIlsWrVKjRt2lQ6KX9s5MiRKFWqFKZNm4bo6GgsXrwYI0aMwF9//ZXjeo4cOYK2bduiYsWKmD59Ot68eYNly5bBzc0Nly5dgq2tLYYOHYqyZcti9uzZGDVqFOrVq6fw/WrSpAlGjRqFpUuX4r///S+cnJwAQPoXAO7evYsePXpg0KBBGDBgAP744w94e3vDxcUF1apVA/DhwPfw8MDjx48xdOhQlC9fHmfOnMGkSZMQGxuLxYsX57hd69atg4GBAcaOHQsDAwMcO3YMU6dORVJSEubPny/VO3z4MDp06ABra2uMHj0aVlZWiIiIwN69ezF69GgAH04O7u7uKFmyJHx8fGBra4t79+7hn3/+wc8//5xjHIocO3YMf//9N0aMGAEzMzMp2ViyZAk6deoELy8vvHv3Dlu2bEHPnj2xd+9etG/fXprfz88P06dPR6NGjTBjxgxoaWnh3LlzOHbsGFq3bo1+/fph/fr1OHjwoNxN5k+ePMGxY8cwbdq0XPffwIEDUa9ePfj7+yMuLg5LlixBSEgILl++DBMTE/z000+oUqUKVq9ejRkzZsDOzg729vbZLq9bt264ffs2Nm/ejEWLFsHMzAwAYG5uLtU5ffo0duzYgWHDhsHQ0BBLly5F9+7dERMTg9KlSwMA4uLi0LBhQ+nCy9zcHAcOHMCgQYOQlJQEX1/fHLdrxYoVqFatGjp16oQSJUrgn3/+wbBhw5CRkYHhw4cD+HDBnHmOmjhxIkxMTBAdHY0dO3ZIyzl8+DD69u2LFi1aYO7cuQCAiIgIhISESMdNXFwcGjVqhJSUFIwaNQqlS5dGYGAgOnXqhG3btqFr164APpxnOnTogKNHj6JPnz4YPXo0Xr16hcOHD+PGjRsK9+mFCxdw5swZ9OnTB+XKlUN0dDRWrFiBpk2bIjw8HHp6ekp9Hj/25s0bNG3aFHfv3sWIESNgZ2eHrVu3wtvbGwkJCdK2Zdq0aRNevXqFoUOHQiaTYd68eejWrRvu37+vVBfPESNGwMTEBNOnT0dkZCRWrFiBBw8eSD/EAMCGDRswYMAAeHp6Yu7cuUhJScGKFSvQuHFjXL58WTpHKTq/uru7Y/PmzdLr+Ph43Lx5ExoaGjh16hRq1KgB4MMFnrm5ubRvlD3WMjIy0KlTJ5w+fRo+Pj5wcnLC9evXsWjRIty+fRu7du2Si0eZ4/xTTk5OmDFjBqZOnQofHx+4u7sDABo1aiTVefnyJdq0aYNu3bqhV69e2LZtGyZMmABnZ2e0bds2X7Fm5+XLl+jQoQP69OmDnj17YsWKFejTpw82btwIX19f/Oc//8E333yD+fPno0ePHnj48CEMDQ0BKP+99uTJEzRr1gxpaWmYOHEi9PX1sXr1aujq6maJR5njQ1m5XWPkxdatW5GSkoLvv/8epUuXxvnz57Fs2TI8evQIW7dulaur7LXG0KFDpXPzqFGjEBUVhV9//RWXL19GSEiIws/cmjVrMGrUKPTo0QOjR4/G27dvce3aNZw7dw7ffPONwm1ISkrC2rVr0bdvXwwZMgSvXr3C77//Dk9PT5w/fz5L1zhlzgeHDh1C9+7dUbVqVfj7++PFixcYOHAgypUrl+s+Xbx4MUaOHAkDAwPpB7tPr0WUuRb6nGPm8ePH0o8vkyZNgr6+PtauXZulR4wyseZGmXP8xwrqfJqdmzdvokOHDqhRowZmzJgBbW1t3L17FyEhIXnaJog86NKli9DR0REPHjyQysLDw4Wmpqb4dFEVKlQQAwYMkF7XrFlTtG/fPsflDx8+PMtyhBAiKipKABBGRkbi6dOn2U4LCAiQygYMGCAAiJEjR0plGRkZon379kJLS0s8e/ZMCCHE8ePHBQBx/PjxXJepKDYhhAAgpk2bJr3u0qWL0NLSEvfu3ZPK/v33X2FoaCiaNGkilQUEBAgAomXLliIjI0MqHzNmjNDU1BQJCQnZri9TrVq1hIWFhXjx4oVUdvXqVaGhoSH69+8vlWVu59atW3NcnhBCbN26Ndt9IsSH9xSAOHnypFT29OlToa2tLX744QepbObMmUJfX1/cvn1bbv6JEycKTU1NERMTk2MMKSkpWcqGDh0q9PT0xNu3b4UQQqSlpQk7OztRoUIF8fLlS7m6H+/LJk2aCENDQ7lj9tM6AwYMEBUqVMiyzmnTpmV5zwEIDQ0NcfPmzVzjfvfunahevbpo3ry5VHbnzh2hoaEhunbtKtLT07ONKT09XZQrV0707t1bbvrChQuFTCYT9+/fz7Luj9dpYWEhqlevLt68eSOV7927VwAQU6dOlcoyj78LFy4oXF6m+fPnCwAiKioqyzQAQktLS9y9e1cqu3r1qgAgli1bJpUNGjRIWFtbi+fPn8vN36dPH2FsbJzt+/6x7KZ7enqKihUrSq937tyZ6zaNHj1aGBkZibS0NIV1fH19BQBx6tQpqezVq1fCzs5O2NraSu/dH3/8IQCIhQsXZlnGx8fYp+eI7LYlNDRUABDr16+XynL6PHp4eAgPDw/p9eLFiwUA8eeff0pl7969E66ursLAwEAkJSUJIf53fitdurSIj4+X6u7evVsAEP/884/C/SLE/44bFxcX8e7dO6l83rx5AoDYvXu3EOLD/jIxMRFDhgyRm//JkyfC2NhYrlzR+TVz+8PDw4UQQuzZs0doa2uLTp06yX0+atSoIbp27Sq9VvZY27Bhg9DQ0JB7n4UQYuXKlQKACAkJkcqUPc6zc+HChSzfKZk8PDyyvO+pqanCyspKdO/eXSrLS6zZyVzPpk2bpLJbt25J57SzZ89K5QcPHswSr7Lfa5mfnXPnzkllT58+FcbGxnLnkLwcH9mdiz+lzDVGdrK7Dsju8+nv7y9kMpncd4my1xqnTp0SAMTGjRvllhkUFJSl/NPPdefOnUW1atXyvF1paWkiNTVVruzly5fC0tJSfPfdd1JZXs4HtWrVEtbW1nLXJocOHRIAsv0O/VS1atXkti2TstdCeTlmsjNy5Eghk8nE5cuXpbIXL14IU1PTLN9vimJVJL/neFWcTz/9vCxatEgAkI7H/FK6S1J6ejoOHjyILl26oHz58lK5k5MTPD09c53fxMQEN2/exJ07d5RdZRbdu3eX+1UzNx8P9Zj5S9O7d++ybWotKOnp6Th06BC6dOmCihUrSuXW1tb45ptvcPr0aSQlJcnN4+PjI9f1xd3dHenp6Xjw4IHC9cTGxuLKlSvw9vaGqampVF6jRg20atUK+/fvL8Ct+p+qVatKv5ABH35lrlKlCu7fvy+Vbd26Fe7u7ihVqhSeP38u/bVs2RLp6ek4efJkjuv4+JeoV69e4fnz53B3d0dKSgpu3boF4EPTeFRUFHx9fbP0wc/cl8+ePcPJkyfx3XffyR2zH9fJDw8PD1StWjXHuF++fInExES4u7vj0qVLUvmuXbuQkZGBqVOnZhkUIDMmDQ0NeHl5Yc+ePXj16pU0fePGjWjUqBHs7OwUxnbx4kU8ffoUw4YNk+s33759ezg6OmbbpaYgtGzZUu7X9Bo1asDIyEg6LoQQ2L59Ozp27AghhNxx4enpicTERLn9lJ2P929iYiKeP38ODw8P3L9/H4mJiQD+dz/G3r17s+3SlVnn9evXOXZV2L9/P+rXr4/GjRtLZQYGBvDx8UF0dDTCw8MBANu3b4eZmRlGjhyZZRk5HWMfb8v79+/x4sULVKpUCSYmJrnuh5xitrKyQt++faWykiVLYtSoUUhOTsaJEyfk6vfu3RulSpWSXmd+rj/+LOfEx8dH7lfR77//HiVKlJDOPYcPH0ZCQgL69u0r935ramqiQYMGWbpGZCczpsxzxqlTp1CvXj20atUKp06dAvCh++ONGzekunk51rZu3QonJyc4OjrK1WvevDkAZIkxt+M8vwwMDPDtt99Kr7W0tFC/fv0s59W8xKpoPX369JFeV6lSBSYmJnBycpJr+c78f+b68/K9tn//fjRs2BD169eX6pmbm8PLy0suloI4Pj5WENcYmT7+fL5+/RrPnz9Ho0aNIITA5cuXs9TP7Vpj69atMDY2RqtWreS21cXFBQYGBjluq4mJCR49epSn7oIAoKmpKd3nlJGRgfj4eKSlpaFu3brZnmNyOx9kXnMMGDAAxsbGUr1WrVpl+32YH7ldC33uMRMUFARXV1e51hVTU9Msx2ZByOs5XpXn08zvxd27dyvVLVwRpROGZ8+e4c2bN3BwcMgyTZk+1TNmzEBCQgIqV64MZ2dnjBs3Tq4PqjJyulD6lIaGhtyJDQAqV64MACodovDZs2dISUnJdp84OTkhIyMDDx8+lCv/9GI280ObU9/GzA+QovU8f/4cr1+/znP8ufk0VuBDvB/HeufOHQQFBcHc3FzuL3MknsybbRW5efMmunbtCmNjYxgZGcHc3Fz6Ms28MMzsH57T+OWZJ7qCHuNc0XG4d+9eNGzYEDo6OjA1NYW5uTlWrFghxQx8iFtDQyPXE2z//v3x5s0bqX9oZGQkwsLC0K9fvxzny+m4cHR0zDEJ/Ry5HRfPnj1DQkICVq9eneW4GDhwIIDcj4uQkBC0bNlSul/H3Nxc6oOZuY89PDzQvXt3+Pn5wczMDJ07d0ZAQIBcH+9hw4ahcuXKaNu2LcqVK4fvvvsuSz/0Bw8eKPxsZU4HPryfVapUyfMISG/evMHUqVOle3zMzMxgbm6OhIQEueMlLx48eAAHB4csieinMWfKz3nnY59+FxgYGMDa2lo6v2ZeuDVv3jzLe37o0KFc32/gQzcABwcHKTk4deoU3N3d0aRJE/z777+4f/8+QkJCkJGRIV3g5OVYu3PnDm7evJmlXuZ3xacxKnP+y49y5cplSTCzO6/mJVZl12NsbAwbG5ssZQDkPr/Kfq9lHoef+nTegjg+PlYQ1xiZYmJipB/jDAwMYG5uLt0L+OnnU5lrjTt37iAxMREWFhZZtjU5OTnHbZ0wYQIMDAxQv359ODg4YPjw4Up3JQkMDESNGjWkezrMzc2xb9++bM8xuZ0PMs8f+b0GVEZuMXzuMfPgwQNUqlQpS3l2ZZ8rr+d4VZ5Pe/fuDTc3NwwePBiWlpbo06cP/v777zwnD4U2FmiTJk1w79497N69G4cOHcLatWuxaNEirFy5MtehPjNl1wfycyj6BTC7m2tVSVNTM9ty8clNU0WBMrFmZGSgVatWGD9+fLZ1M0+m2UlISICHhweMjIwwY8YM2NvbQ0dHB5cuXcKECRM+KztWJK/HQXbH4alTp9CpUyc0adIEv/32G6ytrVGyZEkEBATkenNadqpWrQoXFxf8+eef6N+/P/78809oaWmhV69eeV5WYcjtuMh837799lvpZutPZfZHz869e/fQokULODo6YuHChbCxsYGWlhb279+PRYsWScuXyWTYtm0bzp49i3/++QcHDx7Ed999hwULFuDs2bMwMDCAhYUFrly5goMHD+LAgQM4cOAAAgIC0L9//yw3CKvKyJEjERAQAF9fX7i6usLY2BgymQx9+vRRyTGeHVWfdzK3Y8OGDbCyssoyXdkkq3Hjxjh69CjevHmDsLAwTJ06FdWrV4eJiQlOnTqFiIgIGBgYoHbt2nLrVeZYy8jIgLOzMxYuXJhtvU8vpFW1z5Q9r+Yl1rysRx3fQQV1fGQqiGsM4MN5v1WrVoiPj8eECRPg6OgIfX19PH78GN7e3vn6fGZkZMDCwgIbN27MdnpOPSecnJwQGRmJvXv3IigoCNu3b8dvv/2GqVOnws/PT+F8f/75J7y9vdGlSxeMGzcOFhYW0NTUhL+/v9yADJmKwnWIst8jBXXMqFJBn+M/Z9t1dXVx8uRJHD9+HPv27UNQUBD++usvNG/eHIcOHVK437OsQ9lgM0eYya65LzIyUqllmJqaYuDAgRg4cCCSk5PRpEkTTJ8+XfowF+ST6TIyMnD//n25i9Pbt28DgHRjSGb2+unDqLL7FVbZ2MzNzaGnp5ftPrl16xY0NDSUOrHnJvPBa4rWY2ZmpvAhXDkpiPfA3t4eycnJuY7tn53g4GC8ePECO3bsQJMmTaTyqKioLOsAgBs3bihcT+avPjdu3MhxnaVKlcr2gWR5+TV++/bt0NHRwcGDB+VuoAoICMgSd0ZGBsLDw3Mdj71///4YO3YsYmNjsWnTJrRv316uyTg7Hx8XmV0VMkVGRip8YF9uPve4MDc3h6GhIdLT0/N1XPzzzz9ITU3Fnj175H6FUtQM27BhQzRs2BA///wzNm3aBC8vL2zZskU612hpaaFjx47o2LEjMjIyMGzYMKxatQpTpkxBpUqVUKFCBYWfLeB/+9ne3h7nzp3D+/fv8/QsmG3btmHAgAFYsGCBVPb27dssx2Fe9nuFChVw7do1ZGRkyLUyfBpzQblz5w6aNWsmvU5OTkZsbCzatWsH4H+fUQsLi1zf85y2093dHQEBAdiyZQvS09PRqFEjaGhooHHjxlLC0KhRI+lLLy/Hmr29Pa5evYoWLVqo7EnCQMGdVwsj1uzk5XutQoUKSl0n5OX4UFZu1xjKuH79Om7fvo3AwED0799fKlfUhVGZaw17e3scOXIEbm5u+frhU19fH71790bv3r3x7t07dOvWDT///DMmTZqkcMjmbdu2oWLFitixY4fc8ZLboBmKZJ4/Puca8HOP2889ZipUqIC7d+9mKc+u7HNjVfYcn6kgz6fZ0dDQQIsWLdCiRQssXLgQs2fPxk8//YTjx48rvTyluyRpamrC09MTu3btQkxMjFQeERGBgwcP5jr/ixcv5F4bGBigUqVKcl0FMi9wC+ppsr/++qv0fyEEfv31V5QsWRItWrQA8OHg0dTUzNKn/rfffsuyLGVj09TUROvWrbF79265rk9xcXHYtGkTGjduDCMjo3xu0f9YW1ujVq1aCAwMlIvpxo0bOHTokHSQ5VVBvAe9evVCaGhotsdFQkIC0tLSFM6b+aX/8a8a7969y/Ke1KlTB3Z2dli8eHGWWDPnNTc3R5MmTfDHH3/IHbOfLt/e3h6JiYlyzdexsbFKDxeXGbdMJpNrlYiOjs4yckmXLl2goaGBGTNmZPmV4dNfcvr27QuZTIbRo0fj/v37cn2cFalbty4sLCywcuVKuc/WgQMHEBERITdaU1587nGhqamJ7t27Y/v27dkmcLkNI5zdcZGYmJglIXv58mWW/ZiZmGXuj0/PRRoaGtIvzpl12rVrh/PnzyM0NFSq9/r1a6xevRq2trZSl7Lu3bvj+fPncueaTDn9MqepqZll+rJly7K0auVlv7dr1w5PnjyRG1EkLS0Ny5Ytg4GBgdSloqCsXr1a7j6RFStWIC0tTRrVx9PTE0ZGRpg9e3a295N8/J7ntJ2ZXY3mzp2LGjVqSN1l3N3dcfToUVy8eFHuvqq8HGu9evXC48ePsWbNmiz13rx5U2DdOgvqvFoYsWYnL99r7dq1w9mzZ3H+/Hmp3rNnz7L8up6X40MZylxjKCO7c40QIsuwyx/L7VqjV69eSE9Px8yZM7PMm5aWluNx8el2aWlpoWrVqhBCKLxPS9F2nDt3Tu6clhcfX3N83KXm8OHD0j1dudHX1/+sz8DnHjOenp4IDQ2Ve+J6fHx8ti0/nxursuf4TAV5Pv1UfHx8lrJPvxeVkaf2Gz8/PwQFBcHd3R3Dhg2TvoyqVauWa1/BqlWromnTpnBxcYGpqSkuXryIbdu2yd0s5OLiAgAYNWoUPD09oampKXeDVl7o6OggKCgIAwYMQIMGDXDgwAHs27cP//3vf6XmP2NjY/Ts2RPLli2DTCaDvb099u7dm21fsLzENmvWLGnM22HDhqFEiRJYtWoVUlNTsx1bO7/mz5+Ptm3bwtXVFYMGDZKGVTU2Ns73E1Nr1aoFTU1NzJ07F4mJidDW1pbGvlfWuHHjsGfPHnTo0EEacvX169e4fv06tm3bhujoaGlozk81atQIpUqVwoABAzBq1CjIZDJs2LAhywdPQ0MDK1asQMeOHVGrVi0MHDgQ1tbWuHXrFm7evCklK0uXLkXjxo1Rp04d+Pj4wM7ODtHR0di3b5900ujTpw8mTJiArl27YtSoUdJQZZUrV1b6BtT27dtj4cKFaNOmDb755hs8ffoUy5cvR6VKleQ+G5UqVcJPP/2EmTNnwt3dHd26dYO2tjYuXLiAMmXKwN/fX6prbm6ONm3aYOvWrTAxMVHqYr9kyZKYO3cuBg4cCA8PD/Tt21caVtXW1hZjxoxRans+lXn8//TTT+jTpw9KliyJjh075qkVa86cOTh+/DgaNGiAIUOGoGrVqoiPj8elS5dw5MiRbE9qmVq3bi21CgwdOhTJyclYs2YNLCwsEBsbK9ULDAzEb7/9hq5du8Le3h6vXr3CmjVrYGRkJCXRgwcPRnx8PJo3b45y5crhwYMHWLZsGWrVqiX19584cSI2b96Mtm3bYtSoUTA1NUVgYCCioqKwfft26Rf8/v37Y/369Rg7dizOnz8Pd3d3vH79GkeOHMGwYcPQuXPnbLenQ4cO2LBhA4yNjVG1alWEhobiyJEjWYbmzMvn0cfHB6tWrYK3tzfCwsJga2uLbdu2ISQkBIsXL5aGxywo7969Q4sWLdCrVy9ERkbit99+Q+PGjdGpUycAgJGREVasWIF+/fqhTp066NOnD8zNzRETE4N9+/bBzc1NutDK6fxaqVIlWFlZITIyUu7m8iZNmmDChAkAIJcwAMofa/369cPff/+N//znPzh+/Djc3NyQnp6OW7du4e+//8bBgwdRt27dz95X9vb2MDExwcqVK2FoaAh9fX00aNAgT/flFVasiij7vTZ+/Hhs2LABbdq0wejRo6VhVTNbwDLl5fhQhjLXGMpwdHSEvb09fvzxRzx+/BhGRkbYvn27wvtUlLnW8PDwwNChQ+Hv748rV66gdevWKFmyJO7cuYOtW7diyZIl6NGjR7bLb926NaysrODm5gZLS0tERETg119/Rfv27XP8THfo0AE7duxA165d0b59e0RFRWHlypWoWrUqkpOT87RPMvn7+6N9+/Zo3LgxvvvuO8THx0vXgMos08XFBStWrMCsWbNQqVIlWFhYZGkJz8nnHjPjx4/Hn3/+iVatWmHkyJHSsKrly5dHfHy8XKvC58aq7Dk+U0GeTz81Y8YMnDx5Eu3bt0eFChXw9OlT/PbbbyhXrpzcwB65yuuwSidOnBAuLi5CS0tLVKxYUaxcuTLbIc8+HVZ11qxZon79+sLExETo6uoKR0dH8fPPP8sNI5WWliZGjhwpzM3NhUwmk5aZOezX/Pnzs8SjaFhVfX19ce/ePdG6dWuhp6cnLC0txbRp07IMZfns2TPRvXt3oaenJ0qVKiWGDh0qbty4kWWZimITIutwWkIIcenSJeHp6SkMDAyEnp6eaNasmThz5oxcHUXDWioa7jU7R44cEW5ubkJXV1cYGRmJjh07SkMQfro8ZYZVFUKINWvWiIoVK0rD5WbGUaFChWyHrft0GDghPgwBNmnSJFGpUiWhpaUlzMzMRKNGjcQvv/wi955nJyQkRDRs2FDo6uqKMmXKiPHjx0vD/H26T06fPi1atWolDA0Nhb6+vqhRo0aWIQ5v3LghunbtKkxMTISOjo6oUqWKmDJlilydQ4cOierVqwstLS1RpUoV8eeffyocVnX48OHZxv37778LBwcHoa2tLRwdHUVAQIDC4QD/+OMPUbt2baGtrS1KlSolPDw8xOHDh7PU+/vvvwUA4ePjk+M++9Rff/0lLd/U1FR4eXmJR48eydXJy7CqQnwYLrds2bJCQ0NDbgg6Rfvk03OAEELExcWJ4cOHCxsbG1GyZElhZWUlWrRoIVavXp3r+vfs2SNq1KghdHR0hK2trZg7d640rGlmLJcuXRJ9+/YV5cuXF9ra2sLCwkJ06NBBXLx4UVrOtm3bROvWrYWFhYXQ0tIS5cuXF0OHDhWxsbFy67t3757o0aOHdNzUr19f7N27N0tcKSkp4qeffhJ2dnbSNvXo0UNu+MlPzxEvX74UAwcOFGZmZsLAwEB4enqKW7duZbvPFH0es/vcxcXFScvV0tISzs7OWYbyzOl8mt257FOZx82JEyeEj4+PKFWqlDAwMBBeXl5yQzxnOn78uPD09BTGxsZCR0dH2NvbC29vb7n3JKfzqxBC9OzZUwAQf/31l1T27t07oaenJ7S0tOSGEP54XyhzrL17907MnTtXVKtWTfo8uri4CD8/P5GYmCi3b5Q9zrOze/duUbVqVVGiRAm57xcPD49sh83MbrhnZWPNjqL1KDqvZ7e9ynyvCSHEtWvXhIeHh9DR0RFly5YVM2fOFL///nuWoSuFUO74UGZYVWWuMbKT3fdteHi4aNmypTAwMBBmZmZiyJAh0hC6+b3WEEKI1atXCxcXF6GrqysMDQ2Fs7OzGD9+vPj333+lOp9+rletWiWaNGkiSpcuLbS1tYW9vb0YN25cru93RkaGmD17tqhQoYLQ1tYWtWvXFnv37s1yXOX1fLB9+3bh5OQktLW1RdWqVcWOHTsUDk3+qSdPnoj27dsLQ0NDAUDazrxeCylzzChy+fJl4e7uLrS1tUW5cuWEv7+/WLp0qQAgnjx5kmusiuT3HK+K8+mnn5ejR4+Kzp07izJlyggtLS1RpkwZ0bdv3yxD3+dG9v8bSkRF0O7du9GlSxecPHkyy6+oRERE9Hl8fX2xatUqJCcnK30D8NeICQNREdahQwdERETg7t27hX6jIxER0ZfkzZs3cjeev3jxApUrV0adOnVyfDYPFeKwqkSkvC1btuDatWvYt28flixZwmSBiIjoM7m6uqJp06ZwcnJCXFwcfv/9dyQlJWHKlCnqDq3IYwsDUREkk8lgYGCA3r17Y+XKlUVqfGkiIqLi6L///S+2bduGR48eQSaToU6dOpg2bVqBDe37JWPCQERERERECin9HAYiIiIiIvr6MGEgIiIiIiKF2DGa8iwjIwP//vsvDA0NeTMuERFRMSGEwKtXr1CmTBnpAZREymDCQHn277//wsbGRt1hEBERUT48fPgQ5cqVU3cYVIwwYaA8y3wc/cOHD2FkZKTmaIiIiEgZSUlJsLGxkb7HiZTFhIHyLLMbkpGRERMGIiKiYobdiSmv2IGNiIiIiIgUYsJAREREREQKMWEgIiIiIiKFeA8DEX2x0tPT8f79e3WHQURUKEqWLAlNTU11h0FfICYMRPTFEULgyZMnSEhIUHcoRESFysTEBFZWVryxmQoUEwYi+uJkJgsWFhbQ09PjFycRffGEEEhJScHTp08BANbW1mqOiL4kTBiI6IuSnp4uJQulS5dWdzhERIVGV1cXAPD06VNYWFiwexIVGN70TERflMx7FvT09NQcCRFR4cs89/H+LSpITBiI6IvEbkhE9DXiuY9UgQkDEREREREpxISBiKiIaNq0KXx9fdUdRpG2bt06mJiYFJnl5CYlJQXdu3eHkZERZDJZsRu5a/r06ahVq5a6w1AoP++jTCbDrl27CmT9ef3MBgcHF8hxYGtri8WLF3/WMojygjc9E9FXw3bivkJdX/Sc9oW6vtwEBwejWbNmePnyZaFcLKtC79690a5duzzNY2trC19fX7kLu/wsJz8CAwNx6tQpnDlzBmZmZjA2Nlb5OilvZDIZoqKiYGtrq+5QiIosJgxERFRs6OrqSiPBFIXl5ObevXtwcnJC9erV872M9PR0yGQyaGiwUwARqQfPPkRERUhaWhpGjBgBY2NjmJmZYcqUKRBCSNNTU1Px448/omzZstDX10eDBg0QHBwsTX/w4AE6duyIUqVKQV9fH9WqVcP+/fsRHR2NZs2aAQBKlSoFmUwGb2/vbGN48eIF+vbti7Jly0JPTw/Ozs7YvHmzXJ1t27bB2dkZurq6KF26NFq2bInXr18D+NCSUb9+fejr68PExARubm548OCBNO+KFStgb28PLS0tVKlSBRs2bJBbdkJCAoYOHQpLS0vo6OigevXq2Lt3L4CsXVDu3buHzp07w9LSEgYGBqhXrx6OHDkiTW/atCkePHiAMWPGQCaTSTeEZteVJbe4ZDIZ1q5di65du0JPTw8ODg7Ys2dPtvswc90LFizAyZMnIZPJ0LRpUwDAy5cv0b9/f5QqVQp6enpo27Yt7ty5I82XGduePXtQtWpVaGtrIyYmJtt13LhxA23btoWBgQEsLS3Rr18/PH/+XJoeFBSExo0bw8TEBKVLl0aHDh1w7949uWU8evQIffv2hampKfT19VG3bl2cO3dOrs6GDRtga2sLY2Nj9OnTB69evVK43Znx7927F1WqVIGenh569OiBlJQUBAYGwtbWFqVKlcKoUaOQnp4uzZfbfslcdvny5aGnp4euXbvixYsXWda/e/du1KlTBzo6OqhYsSL8/PyQlpamMN6PvXz5El5eXjA3N4euri4cHBwQEBCg1LzAh/1Ut25dGBoawsrKCt988430XISPhYSEoEaNGtDR0UHDhg1x48YNuemnT5+Gu7s7dHV1YWNjg1GjRkmfLyJ1YMJARFSEBAYGokSJEjh//jyWLFmChQsXYu3atdL0ESNGIDQ0FFu2bMG1a9fQs2dPtGnTRrqwGj58OFJTU3Hy5Elcv34dc+fOhYGBAWxsbLB9+3YAQGRkJGJjY7FkyZJsY3j79i1cXFywb98+3LhxAz4+PujXrx/Onz8PAIiNjUXfvn3x3XffISIiAsHBwejWrRuEEEhLS0OXLl3g4eGBa9euITQ0FD4+PtKF+s6dOzF69Gj88MMPuHHjBoYOHYqBAwfi+PHjAICMjAy0bdsWISEh+PPPPxEeHo45c+YoHE8+OTkZ7dq1w9GjR3H58mW0adMGHTt2lC6wd+zYgXLlymHGjBmIjY1FbGxstsvJLa5Mfn5+6NWrF65du4Z27drBy8sL8fHx2S5zx44dGDJkCFxdXREbG4sdO3YAALy9vXHx4kXs2bMHoaGhEEKgXbt2csNgpqSkYO7cuVi7di1u3rwJCwuLLMtPSEhA8+bNUbt2bVy8eBFBQUGIi4tDr169pDqvX7/G2LFjcfHiRRw9ehQaGhro2rUrMjIypP3n4eGBx48fY8+ePbh69SrGjx8vTQc+JGW7du3C3r17sXfvXpw4cQJz5szJdps/jn/p0qXYsmULgoKCEBwcjK5du2L//v3Yv38/NmzYgFWrVmHbtm3SPLntl3PnzmHQoEEYMWIErly5gmbNmmHWrFly6z116hT69++P0aNHIzw8HKtWrcK6devw888/5xhvpilTpiA8PBwHDhxAREQEVqxYATMzM6XmBT4MZTpz5kxcvXoVu3btQnR0dLaJ+bhx47BgwQJcuHAB5ubm6Nixo7Sd9+7dQ5s2bdC9e3dcu3YNf/31F06fPo0RI0YoHQdRgRNEeZSYmCgAiMTERHWHQpTFmzdvRHh4uHjz5k2WaRUm7C3Uv7zy8PAQTk5OIiMjQyqbMGGCcHJyEkII8eDBA6GpqSkeP34sN1+LFi3EpEmThBBCODs7i+nTp2e7/OPHjwsA4uXLl3mOrX379uKHH34QQggRFhYmAIjo6Ogs9V68eCEAiODg4GyX06hRIzFkyBC5sp49e4p27doJIYQ4ePCg0NDQEJGRkdnOHxAQIIyNjXOMtVq1amLZsmXS6woVKohFixbluJzc4hJCCABi8uTJ0uvk5GQBQBw4cEBhLKNHjxYeHh7S69u3bwsAIiQkRCp7/vy50NXVFX///bcUGwBx5cqVHLdz5syZonXr1nJlDx8+FAAU7r9nz54JAOL69etCCCFWrVolDA0NxYsXL7KtP23aNKGnpyeSkpKksnHjxokGDRoojCsz/rt370plQ4cOFXp6euLVq1dSmaenpxg6dKgQQrn90rdvX7n3QwghevfuLfc+tmjRQsyePVuuzoYNG4S1tbX0GoDYuXNntrF37NhRDBw4UOG2fcrDw0OMHj1a4fQLFy4IANJ2Z34G18+fL1KuXxcp16+LR6dPC10dHbHh/8sGdOsmvuvRQ245p06dEhoaGtJ5LbtjOlNO50B+f1N+sYWBiKgIadiwodw46q6urrhz5w7S09Nx/fp1pKeno3LlyjAwMJD+Tpw4IXUzGTVqFGbNmgU3NzdMmzYN165dy3MM6enpmDlzJpydnWFqagoDAwMcPHhQ+tW+Zs2aaNGiBZydndGzZ0+sWbMGL1++BACYmprC29sbnp6e6NixI5YsWSL3q35ERATc3Nzk1ufm5oaIiAgAwJUrV1CuXDlUrlxZqViTk5Px448/wsnJCSYmJjAwMEBERITCLjyK5BZXpho1akj/19fXh5GRUbZdTnJaT4kSJdCgQQOprHTp0qhSpYrcurS0tOTWlZ2rV6/i+PHjcseCo6MjAEjHw507d9C3b19UrFgRRkZG0o29mfvnypUrqF27NkxNTRWux9bWFoaGhtJra2vrXLdZT08P9vb20mtLS0vY2trCwMBArixzOcrsl4iICLnpwIfPx6f7ZMaMGXL7ZMiQIYiNjUVKSkqOMQPA999/jy1btqBWrVoYP348zpw5k+s8HwsLC0PHjh1Rvnx5GBoawsPDAwCyHI8NataU/m9qbAwHW1vciooCAFyPjMSfu3fLbYOnpycyMjIQ9f91iAobb3omIiomkpOToampibCwsCxddDIvxAYPHgxPT0/s27cPhw4dgr+/PxYsWICRI0cqvZ758+djyZIlWLx4MZydnaGvrw9fX1+8e/cOAKCpqYnDhw/jzJkzOHToEJYtW4affvoJ586dg52dHQICAjBq1CgEBQXhr7/+wuTJk3H48GE0bNgw13Xn9UbkH3/8EYcPH8Yvv/yCSpUqQVdXFz169JBiLWglS5aUey2TyeS67xQUXV3dXB/AlZycjI4dO2Lu3LlZpllbWwMAOnbsiAoVKmDNmjUoU6YMMjIyUL16dWn/KLO/87PN2c1TGPsuOTkZfn5+6NatW5ZpOjo6uc7ftm1bPHjwAPv378fhw4fRokULDB8+HL/88kuu875+/Rqenp7w9PTExo0bYW5ujpiYGHh6eubpeHydkoJBPXtirJ9flmnly5dXejlEBYktDERERcinN5uePXsWDg4O0NTURO3atZGeno6nT5+iUqVKcn9WVlbSPDY2NvjPf/6DHTt24IcffsCaNWsAfPjVGoDcjabZCQkJQefOnfHtt9+iZs2aqFixIm7fvi1XRyaTwc3NDX5+frh8+TK0tLSwc+dOaXrt2rUxadIknDlzBtWrV8emTZsAAE5OTggJCcmyvqpVqwL48Av+o0ePsqwvp1i9vb3RtWtXODs7w8rKCtHR0XJ1tLS0ct3m3OIqKE5OTkhLS5N7n1+8eIHIyMg8r6tOnTq4efMmbG1tsxwP+vr60nInT56MFi1awMnJSWoJylSjRg1cuXJF4X0YhUWZ/eLk5JTt5+NjderUQWRkZJb9UalSJaVHmTI3N8eAAQPw559/YvHixVi9erVS8926dQsvXrzAnDlz4O7uDkdHR4UtMeevXpX+/zIxEXcfPICjnR0AoJaTE27du5ftNmR+hokKGxMGIqIiJCYmBmPHjkVkZCQ2b96MZcuWYfTo0QCAypUrw8vLC/3798eOHTsQFRWF8+fPw9/fH/v2fXjGhK+vLw4ePIioqChcunQJx48fh5OTEwCgQoUKkMlk2Lt3L549e4bk5ORsY3BwcJBaECIiIjB06FDExcVJ08+dO4fZs2fj4sWLiImJwY4dO/Ds2TM4OTkhKioKkyZNQmhoKB48eIBDhw7hzp07Ugzjxo3DunXrsGLFCty5cwcLFy7Ejh078OOPPwIAPDw80KRJE3Tv3h2HDx9GVFQUDhw4gKCgIIWx7tixA1euXMHVq1fxzTffZPnV2tbWFidPnsTjx4/lRhD6WG5xFRQHBwd07twZQ4YMwenTp3H16lV8++23KFu2LDp37pynZQ0fPhzx8fHo27cvLly4gHv37uHgwYMYOHAg0tPTUapUKZQuXRqrV6/G3bt3cezYMYwdO1ZuGX379oWVlRW6dOmCkJAQ3L9/H9u3b0doaGhBbnaulNkvma1Wv/zyC+7cuYNff/01y3ExdepUrF+/Hn5+frh58yYiIiKwZcsWTJ48Wak4pk6dit27d+Pu3bu4efMm9u7dKx27uSlfvjy0tLSwbNky3L9/H3v27MHMmTOzreu/ahWOnz2Lm3fuwGfyZJQ2MUHHFi0AAGO/+w5nr16Vbu6+c+cOdu/ezZueSa2YMBARFSH9+/fHmzdvUL9+fQwfPhyjR4+Gj4+PND0gIAD9+/fHDz/8gCpVqqBLly64cOGC1FUhPT0dw4cPh5OTE9q0aYPKlSvjt99+AwCULVsWfn5+mDhxIiwtLRVegEyePBl16tSBp6cnmjZtKl1QZjIyMsLJkyfRrl07VK5cGZMnT8aCBQvQtm1b6Onp4datW+jevTsqV64MHx8fDB8+HEOHDgUAdOnSBUuWLMEvv/yCatWqYdWqVQgICJCGHAWA7du3o169eujbty+qVq2K8ePHK2whWLhwIUqVKoVGjRqhY8eO8PT0RJ06deTqzJgxA9HR0bC3t4e5uXm2y1EmroISEBAAFxcXdOjQAa6urhBCYP/+/Vm67OSmTJkyCAkJQXp6Olq3bg1nZ2f4+vrCxMQEGhoa0NDQwJYtWxAWFobq1atjzJgxmD9/vtwytLS0cOjQIVhYWKBdu3ZwdnbOcVQqVcptvzRs2BBr1qzBkiVLULNmTRw6dChLIuDp6Ym9e/fi0KFDqFevHho2bIhFixahQoUKSsWgpaWFSZMmoUaNGmjSpAk0NTWxZcsWpeY1NzfHunXrsHXrVlStWhVz5sxR2JVphq8vxs2dC7fevRH34gW2LVsGrf/fTucqVXAwIAC3b9+Gu7s7ateujalTp6JMmTJKxUGkCjIhPhrgm0gJSUlJMDY2RmJiIoyMjNQdDpGct2/fIioqCnZ2dkr1WSYiKmxvPnnuQnZ08/mwv5zOgfz+pvxiCwMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCEmDEREREREpBATBiIiIiIiUogJAxERERERKcSEgYioiBBCwMfHB6amppDJZLhy5Uqu80RHRytdt6hq2rQpfH19c6yzbt06mJiYFEo8REQkr4S6AyAiKjTTjQt5fYl5qh4UFIR169YhODgYFStWhJmZmYoCK1p27NiBkiVLSq9tbW3h6+srl0T07t0b7dq1U0N0RETEhIGIqIi4d+8erK2t0ahRI3WHUqhMTU1zraOrqwtdXd1CiIaIiD7FLklEREWAt7c3Ro4ciZiYGMhkMtja2gL40OrQuHFjmJiYoHTp0ujQoQPu3buncDkvX76El5cXzM3NoaurCwcHBwQEBEjTHz58iF69esHExASmpqbo3LkzoqOjFS4vODgYMpkM+/btQ40aNaCjo4OGDRvixo0bcvW2b9+OatWqQVtbG7a2tliwYIHc9N9++w0ODg7Q0dGBpaUlevToIU37uEtS06ZN8eDBA4wZMwYymQwymQyAfJek27dvQyaT4datW3LrWLRoEezt7aXXN27cQNu2bWFgYABLS0v069cPz58/V7itRESUPSYMRERFwJIlSzBjxgyUK1cOsbGxuHDhAgDg9evXGDt2LC5evIijR49CQ0MDXbt2RUZGRrbLmTJlCsLDw3HgwAFERERgxYoVUtem9+/fw9PTE4aGhjh16hRCQkJgYGCANm3a4N27dznGN27cOCxYsAAXLlyAubk5OnbsiPfv3wMAwsLC0KtXL/Tp0wfXr1/H9OnTMWXKFKxbtw4AcPHiRYwaNQozZsxAZGQkgoKC0KRJk2zXs2PHDpQrVw4zZsxAbGwsYmNjs9SpXLky6tati40bN8qVb9y4Ed988w0AICEhAc2bN0ft2rVx8eJFBAUFIS4uDr169cpxO4mIKCt2SSIiKgKMjY1haGgITU1NWFlZSeXdu3eXq/fHH3/A3Nwc4eHhqF69epblxMTEoHbt2qhbty4ASC0VAPDXX38hIyMDa9eulX65DwgIgImJCYKDg9G6dWuF8U2bNg2tWrUCAAQGBqJcuXLYuXMnevXqhYULF6JFixaYMmUKgA8X9OHh4Zg/fz68vb0RExMDfX19dOjQAYaGhqhQoQJq166d7XpMTU2hqakJQ0NDuf3wKS8vL/z666+YOXMmgA+tDmFhYfjzzz8BAL/++itq166N2bNny+07Gxsb3L59G5UrV1a4bCIikscWBiKiIuzOnTvo27cvKlasCCMjIykBiImJybb+999/jy1btqBWrVoYP348zpw5I027evUq7t69C0NDQxgYGMDAwACmpqZ4+/Ztjt2cAMDV1VX6v6mpKapUqYKIiAgAQEREBNzc3OTqu7m54c6dO0hPT0erVq1QoUIFVKxYEf369cPGjRuRkpKSn90h6dOnD6Kjo3H27FkAH1oX6tSpA0dHR2lbjx8/Lm2ngYGBNC23bSUiInlMGIqZkydPomPHjihTpgxkMhl27dolTXv//j0mTJgAZ2dn6Ovro0yZMujfvz/+/fdfuWXEx8fDy8sLRkZGMDExwaBBg5CcnFzIW0JEyujYsSPi4+OxZs0anDt3DufOnQMAhV2I2rZtK90D8O+//6JFixb48ccfAQDJyclwcXHBlStX5P5u374tdeVRBUNDQ1y6dAmbN2+GtbU1pk6dipo1ayIhISHfy7SyskLz5s2xadMmAMCmTZvg5eUlTU9OTkbHjh2zbOudO3cUdociIqLsMWEoZl6/fo2aNWti+fLlWaalpKTg0qVLmDJlCi5duoQdO3YgMjISnTp1kqvn5eWFmzdv4vDhw9i7dy9OnjwJHx+fwtoEIlLSixcvEBkZicmTJ6NFixZwcnLCy5cvc53P3NwcAwYMwJ9//onFixdj9erVAIA6dergzp07sLCwQKVKleT+jI1zHnI285d84MON1bdv34aTkxMAwMnJCSEhIXL1Q0JCULlyZWhqagIASpQogZYtW2LevHm4du0aoqOjcezYsWzXpaWlhfT09Fy308vLC3/99RdCQ0Nx//599OnTR5pWp04d3Lx5E7a2tlm2VV9fP9dlExHR/zBhKGbatm2LWbNmoWvXrlmmGRsb4/Dhw+jVqxeqVKmChg0b4tdff0VYWJjUfSEiIgJBQUFYu3YtGjRogMaNG2PZsmXYsmVLlpYIIlKvUqVKoXTp0li9ejXu3r2LY8eOYezYsTnOM3XqVOzevRt3797FzZs3sXfvXunC3svLC2ZmZujcuTNOnTqFqKgoBAcHY9SoUXj06FGOy50xYwaOHj2KGzduwNvbG2ZmZujSpQsA4IcffsDRo0cxc+ZM3L59G4GBgfj111+llo29e/di6dKluHLlCh48eID169cjIyMDVapUyXZdtra2OHnyJB4/fpzjqEbdunXDq1ev8P3336NZs2YoU6aMNG348OGIj49H3759ceHCBdy7dw8HDx7EwIEDlUpGiIjof5gwfOESExMhk8mk4QhDQ0NhYmIi3RAJAC1btoSGhobU1eFTqampSEpKkvsjItXT0NDAli1bEBYWhurVq2PMmDGYP39+jvNoaWlh0qRJqFGjBpo0aQJNTU1s2bIFAKCnp4eTJ0+ifPny6NatG5ycnDBo0CC8ffsWRkZGOS53zpw5GD16NFxcXPDkyRP8888/0NLSAvDh1/y///4bW7ZsQfXq1TF16lTMmDED3t7eAAATExPs2LEDzZs3h5OTE1auXInNmzejWrVq2a5rxowZiI6Ohr29PczNzRXGZGhoiI4dO+Lq1aty3ZEAoEyZMggJCUF6ejpat24NZ2dn+Pr6wsTEBBoa/OojIsoLmRBCqDsIyh+ZTIadO3dKv/J96u3bt3Bzc4Ojo6M0/ODs2bMRGBiIyMhIuboWFhbw8/PD999/n2U506dPh5+fX5byxMTEXC8yiArb27dvERUVBTs7O+jo6Kg7nGIvODgYzZo1w8uXL6UfHojo87z55Dkm2dHNZhQ0ZeR0DkxKSoKxsTG/vynP+DPLF+r9+/fo1asXhBBYsWLFZy1r0qRJSExMlP4ePnxYQFESERERUVHH5zB8gTKThQcPHuDYsWNyvyJYWVnh6dOncvXT0tIQHx+vcMxzbW1taGtrqzRmIiIiIiqa2MLwhclMFu7cuYMjR46gdOnSctNdXV2RkJCAsLAwqezYsWPIyMhAgwYNCjtcIirimjZtCiEEuyMREX3F2MJQzCQnJ+Pu3bvS66ioKFy5cgWmpqawtrZGjx49cOnSJezduxfp6el48uQJgA8PWtLS0oKTkxPatGmDIUOGYOXKlXj//j1GjBiBPn36yI0wQkREREQEMGEodi5evIhmzZpJrzOHWBwwYACmT5+OPXv2AABq1aolN9/x48fRtGlTAB+eiDpixAi0aNECGhoa6N69O5YuXVoo8RMRERFR8cKEoZjJ7B6giDKDXpmamkpPRyUiIiIiygnvYSAiIiIiIoWYMBARERERkUJMGIiIiIiISCEmDEREVGjWrVun1BCtMpkMu3btUnk8RESUO970TERfDedA50Jd3/UB1/NUv2nTpqhVqxYWL16smoCKgN69e6Ndu3bS6+nTp2PXrl24cuWKXL3Y2FiUKlWqkKMjIqLsMGEgIipGhBBIT09HiRLF8/Stq6sLXV3dXOspevI8EREVPnZJIiIqAry9vXHixAksWbIEMpkMMpkM0dHRCA4Ohkwmw4EDB+Di4gJtbW2cPn0a3t7e6NKli9wyfH19peetAEBGRgb8/f1hZ2cHXV1d1KxZE9u2bcsxDltbW8ycORN9+/aFvr4+ypYti+XLl8vViYmJQefOnWFgYAAjIyP06tULcXFx0vSrV6+iWbNmMDQ0hJGREVxcXHDx4kUA8l2S1q1bBz8/P1y9elXa5nXr1gGQ75LUqFEjTJgwQS6GZ8+eoWTJkjh58iQAIDU1FT/++CPKli0LfX19NGjQAMHBwUrseSIiyg0TBiKiImDJkiVwdXXFkCFDEBsbi9jYWNjY2EjTJ06ciDlz5iAiIgI1atRQapn+/v5Yv349Vq5ciZs3b2LMmDH49ttvceLEiRznmz9/PmrWrInLly9j4sSJGD16NA4fPgzgQxLSuXNnxMfH48SJEzh8+DDu37+P3r17S/N7eXmhXLlyuHDhAsLCwjBx4kSULFkyy3p69+6NH374AdWqVZO2+ePlfLy8LVu2yD1n5q+//kKZMmXg7u4OABgxYgRCQ0OxZcsWXLt2DT179kSbNm1w584dpfYVEREpVjzbtImIvjDGxsbQ0tKCnp5ett1xZsyYgVatWim9vNTUVMyePRtHjhyBq6srAKBixYo4ffo0Vq1aBQ8PD4Xzurm5YeLEiQCAypUrIyQkBIsWLUKrVq1w9OhRXL9+HVFRUVJCs379elSrVg0XLlxAvXr1EBMTg3HjxsHR0REA4ODgkO16dHV1YWBggBIlSuTYBalXr17w9fXF6dOnpQRh06ZN6Nu3L2QyGWJiYhAQEICYmBiUKVMGAPDjjz8iKCgIAQEBmD17ttL7jYiIsmILAxFRMVC3bt081b979y5SUlLQqlUrGBgYSH/r16/HvXv3cpw3M8H4+HVERAQAICIiAjY2NnKtH1WrVoWJiYlUZ+zYsRg8eDBatmyJOXPm5Lq+3Jibm6N169bYuHEjACAqKgqhoaHw8vICAFy/fh3p6emoXLmy3LaeOHHis9dNRERsYSAiKhb09fXlXmtoaMh10QGA9+/fS/9PTk4GAOzbtw9ly5aVq6etra2iKD+YPn06vvnmG+zbtw8HDhzAtGnTsGXLFnTt2jXfy/Ty8sKoUaOwbNkybNq0Cc7OznB2/jDqVXJyMjQ1NREWFgZNTU25+QwMDD5rW4iIiAkDEVGRoaWlhfT0dKXqmpub48aNG3JlV65cke4VqFq1KrS1tRETE5Nj96PsnD17NstrJycnAICTkxMePnyIhw8fSq0M4eHhSEhIQNWqVaV5KleujMqVK2PMmDHo27cvAgICsk0YlN3mzp07w8fHB0FBQdi0aRP69+8vTatduzbS09Px9OlTqcsSEREVHCYMRERFhK2tLc6dO4fo6GgYGBjA1NRUYd3mzZtj/vz5WL9+PVxdXfHnn3/ixo0bqF27NgDA0NAQP/74I8aMGYOMjAw0btwYiYmJCAkJgZGREQYMGKBw2SEhIZg3bx66dOmCw4cPY+vWrdi3bx8AoGXLlnB2doaXlxcWL16MtLQ0DBs2DB4eHqhbty7evHmDcePGoUePHrCzs8OjR49w4cIFdO/eXeE2R0VF4cqVKyhXrhwMDQ2zbQHR19dHly5dMGXKFERERKBv377StMqVK8PLywv9+/fHggULULt2bTx79gxHjx5FjRo10L59e6X2P1FhuW8ly7VOtUKIg0hZvIeBiKiI+PHHH6GpqYmqVavC3NwcMTExCut6enpiypQpGD9+POrVq4dXr17J/eoOADNnzsSUKVPg7+8PJycntGnTBvv27YOdnV2Ocfzwww+4ePEiateujVmzZmHhwoXw9PQE8GG40927d6NUqVJo0qQJWrZsiYoVK+Kvv/4CAGhqauLFixfo378/KleujF69eqFt27bw8/PLdl3du3dHmzZt0KxZM5ibm2Pz5s0K4/Ly8sLVq1fh7u6O8uXLy00LCAhA//798cMPP6BKlSro0qULLly4kKUeERHlnUx82gmWKBdJSUkwNjZGYmIijIyM1B0OkZy3b98iKioKdnZ20NHRUXc4xY6trS18fX3h6+ur7lCIvlg3n9/MtU41s/y1MeR0DuT3N+UXWxiIiIiIiEghJgxERERERKQQb3omIiJJdHS0ukMgIqIihi0MRERERESkEBMGIiIiIiJSiAkDEREREREpxISBiIiIiIgUYsJAREREREQKMWEgIiIiIiKFmDAQEZFSpk+fjlq1aildPzo6GjKZDFeuXFFYx9bWFosXL1Z6mevWrYOJiYnS9RWRyWTYtWuXwulCCPj4+MDU1DTXbShKlNmfeX0fAaBp06ZF5unfwcHBkMlkSEhIUHqegoi/oI49ouKIz2Egoq9GhKNToa7P6VZEnuqfPHkS8+fPR1hYGGJjY7Fz50506dJFNcFRjoKCgrBu3ToEBwejYsWKMDMzU3dI+SKTybIcRz/++CNGjhypvqAUiI6Ohp2dHS5fvpznhIaIVIstDERERcTr169Rs2ZNLF++XN2hfPXu3bsHa2trNGrUCFZWVihRIu+/rwkhkJaWpoLoPo+BgQFKly6t7jCIqBhhwkBEVES0bdsWs2bNQteuXZWeJ7N7yR9//IHy5cvDwMAAw4YNQ3p6OubNmwcrKytYWFjg559/lpsvJiYGnTt3hoGBAYyMjNCrVy/ExcXJ1ZkzZw4sLS1haGiIQYMG4e3bt1nWv3btWjg5OUFHRweOjo747bff8rfx/2/hwoVwdnaGvr4+bGxsMGzYMCQnJ2ept2vXLjg4OEBHRweenp54+PCh3PTdu3ejTp060NHRQcWKFeHn56f0xbu3tzdGjhyJmJgYyGQy2NraAgBSU1MxatQoWFhYQEdHB40bN8aFCxek+TK7yhw4cAAuLi7Q1tbG6dOnsyw/s6vW33//DXd3d+jq6qJevXq4ffs2Lly4gLp168LAwABt27bFs2fPpPmy61bTpUsXeHt7Z7sdmXF37dpVbjs+7ZLk7e2NLl26wM/PD+bm5jAyMsJ//vMfvHv3TuE+Sk1NxY8//oiyZctCX18fDRo0QHBwsML6AHDr1i00btwYOjo6qFq1Ko4cOSLXNczOzg4AULt2bchkMjRt2jTH5WV68eIF+vbti7Jly0JPTw/Ozs7YvHlzlnppaWkYMWIEjI2NYWZmhilTpkAIke9tunr1Kpo1awZDQ0MYGRnBxcUFFy9eVCpmouKGCQMRUTF37949HDhwAEFBQdi8eTN+//13tG/fHo8ePcKJEycwd+5cTJ48GefOnQMAZGRkoHPnzoiPj8eJEydw+PBh3L9/H71795aW+ffff2P69OmYPXs2Ll68CGtr6yzJwMaNGzF16lT8/PPPiIiIwOzZszFlyhQEBgbme1s0NDSwdOlS3Lx5E4GBgTh27BjGjx8vVyclJQU///wz1q9fj5CQECQkJKBPnz7S9FOnTqF///4YPXo0wsPDsWrVKqxbty5L0qTIkiVLMGPGDJQrVw6xsbFSUjB+/Hhs374dgYGBuHTpEipVqgRPT0/Ex8fLzT9x4kTMmTMHERERqFGjhsL1TJs2DZMnT8alS5dQokQJfPPNNxg/fjyWLFmCU6dO4e7du5g6daqyuy6LzLgDAgLktiM7R48eRUREBIKDg7F582bs2LEDfn5+CuuPGDECoaGh2LJlC65du4aePXuiTZs2uHPnTrb109PT0aVLF+jp6eHcuXNYvXo1fvrpJ7k658+fBwAcOXIEsbGx2LFjh1Lb+fbtW7i4uGDfvn24ceMGfHx80K9fP2l5mQIDA1GiRAmcP38eS5YswcKFC7F27dp8b5OXlxfKlSuHCxcuICwsDBMnTkTJkiWVipmo2BFEeZSYmCgAiMTERHWHQpTFmzdvRHh4uHjz5k2WaeFVHAv173MAEDt37sy13rRp04Senp5ISkqSyjw9PYWtra1IT0+XyqpUqSL8/f2FEEIcOnRIaGpqipiYGGn6zZs3BQBx/vx5IYQQrq6uYtiwYXLratCggahZs6b02t7eXmzatEmuzsyZM4Wrq6sQQoioqCgBQFy+fFlh/BUqVBCLFi1SOH3r1q2idOnS0uuAgAABQJw9e1Yqi4iIEADEuXPnhBBCtGjRQsyePVtuORs2bBDW1tbS69z276JFi0SFChWk18nJyaJkyZJi48aNUtm7d+9EmTJlxLx584QQQhw/flwAELt27VK4XCH+t1/Wrl0rlW3evFkAEEePHpXK/P39RZUqVaTXHh4eYvTo0XLL6ty5sxgwYID0+tP9md12Tps2Te59HDBggDA1NRWvX7+WylasWCEMDAykY+jjdT948EBoamqKx48fyy23RYsWYtKkSdlu84EDB0SJEiVEbGysVHb48GG5+JQ5XoT4335++fKlwjrt27cXP/zwg/Taw8NDODk5iYyMDKlswoQJwsnJSeltCggIEMbGxtI0Q0NDsW7duhxjVeTGsxu5/uVXTudAfn9TfvGmZyKiYs7W1haGhobSa0tLS2hqakJDQ0Ou7OnTpwCAiIgI2NjYwMbGRppetWpVmJiYICIiAvXq1UNERAT+85//yK3H1dUVx48fB/Dhfot79+5h0KBBGDJkiFQnLS0NxsbG+d6WI0eOwN/fH7du3UJSUhLS0tLw9u1bpKSkQE9PDwBQokQJ1KtXT5rH0dFRir1+/fq4evUqQkJC5FoU0tPTsywnL+7du4f379/Dzc1NKitZsiTq16+PiAj5m9vr1q2r1DI/bn2wtLQEADg7O8uVZb5nqlazZk25/eLq6ork5GQ8fPgQFSpUkKt7/fp1pKeno3LlynLlqampCu+NiIyMhI2NDaysrKSy+vXrF0js6enpmD17Nv7++288fvwY7969Q2pqapb3uWHDhpDJZNJrV1dXLFiwAOnp6fnaprFjx2Lw4MHYsGEDWrZsiZ49e8Le3r5AtomoqGHCQERUzH3aDUImk2VblpGRUWDrzLyvYM2aNWjQoIHcNE1NzXwtMzo6Gh06dMD333+Pn3/+Gaampjh9+jQGDRqEd+/eKX2hn5ycDD8/P3Tr1i3LNB0dnXzFlhf6+vpK1fv4Pcq8kP207OP3TENDQ67PPQC8f//+c0LNl+TkZGhqaiIsLCzLe21gYFDo8cyfPx9LlizB4sWLpftffH19c7wH41P52abp06fjm2++wb59+3DgwAFMmzYNW7ZsydM9SETFBRMGIqKvjJOTEx4+fIiHDx9KrQzh4eFISEhA1apVpTrnzp1D//79pfnOnj0r/d/S0hJlypTB/fv34eXlVSBxhYWFISMjAwsWLJBaR/7+++8s9dLS0nDx4kXpF+rIyEgkJCTAyenDsLl16tRBZGQkKlWqVCBxAYC9vT20tLQQEhIi/eL+/v17XLhwodCeT2Bubo7Y2FjpdXp6Om7cuIFmzZopnKdkyZJIT0/PddlXr17FmzdvoKurC+DDe21gYCDXCpWpdu3aSE9Px9OnT+Hu7q5U7FWqVMHDhw8RFxcntaZ8ek+FlpaWtF15ERISgs6dO+Pbb78F8OEendu3b0vHcqbMe3gynT17Fg4ODtDU1MzXNgFA5cqVUblyZYwZMwZ9+/ZFQEAAEwb6IjFhICIqIpKTk3H37l3pdVRUFK5cuQJTU1OUL1++wNbTsmVLODs7w8vLC4sXL0ZaWhqGDRsGDw8PqTvN6NGj4e3tjbp168LNzQ0bN27EzZs3UbFiRWk5fn5+GDVqFIyNjdGmTRukpqbi4sWLePnyJcaOHZvnuCpVqoT3799j2bJl6NixI0JCQrBy5cos9UqWLImRI0di6dKlKFGiBEaMGIGGDRtKCcTUqVPRoUMHlC9fHj169ICGhgauXr2KGzduYNasWfnaZ/r6+vj+++8xbtw46f2YN28eUlJSMGjQoHwtM6+aN2+OsWPHYt++fbC3t8fChQtzfXiZra0tjh49Cjc3N2hra6NUqVLZ1nv37h0GDRqEyZMnIzo6GtOmTcOIESPkurVlqly5Mry8vNC/f38sWLAAtWvXxrNnz3D06FHUqFED7du3zzJPq1atYG9vjwEDBmDevHl49eoVJk+eDOB/rSsWFhbQ1dVFUFAQypUrBx0dHaW6tzk4OGDbtm04c+YMSpUqhYULFyIuLi5LwhATE4OxY8di6NChuHTpEpYtW4YFCxbka5vevHmDcePGoUePHrCzs8OjR49w4cIFdO/ePdd4iYojJgxE9NXI64PUCtvFixflfi3OvOgeMGAA1q1bV2Drkclk2L17N0aOHIkmTZpAQ0MDbdq0wbJly6Q6vXv3xr179zB+/Hi8ffsW3bt3x/fff4+DBw9KdQYPHgw9PT3Mnz8f48aNg76+PpydnfP9i3vNmjWxcOFCzJ07F5MmTUKTJk3g7+8v18oBAHp6epgwYQK++eYbPH78GO7u7vj999+l6Z6enti7dy9mzJiBuXPnomTJknB0dMTgwYPzFVemOXPmICMjA/369cOrV69Qt25dHDx4UOFFeEH77rvvcPXqVfTv3x8lSpTAmDFjcmxdAIAFCxZg7NixWLNmDcqWLYvo6Ohs67Vo0QIODg5o0qQJUlNT0bdvX0yfPl3hcgMCAjBr1iz88MMPePz4MczMzNCwYUN06NAh2/qamprYtWsXBg8ejHr16qFixYqYP38+OnbsKHUTK1GiBJYuXYoZM2Zg6tSpcHd3z3WoVgCYPHky7t+/D09PT+jp6cHHxwddunRBYmKiXL3+/fvjzZs3qF+/PjQ1NTF69Gj4+Pjka5s0NTXx4sUL9O/fH3FxcTAzM0O3bt1yHFmKqDiTiU87RBLlIikpCcbGxkhMTISRkZG6wyGS8/btW0RFRcHOzq5Q+qsTFXfe3t5ISEiQnodQWEJCQtC4cWPcvXv3q7tZ+Obzm7nWqWZWLV/LzukcyO9vyi+2MBAREZHK7dy5EwYGBnBwcMDdu3cxevRouLm5fXXJAlFxxISBiIiIVO7Vq1eYMGECYmJiYGZmhpYtW0r3EBBR0caEgYiI6CtWkPfH5KR///5Z7kchouIh6/AHRERERERE/48JAxF9kTieAxF9jXjuI1VgwkBEX5TMJ+WmpKSoORIiosKXee779GnvRJ+D9zAQ0RdFU1MTJiYmePr0KYAPY/ZnPhiKiKgoyHifkWudt2/f5mmZQgikpKTg6dOnMDExgaamZn7DI8qCCUMxc/LkScyfPx9hYWGIjY3Fzp070aVLF2m6EALTpk3DmjVrkJCQADc3N6xYsQIODg5Snfj4eIwcORL//PMPNDQ00L17dyxZsgQGBgZq2CKigmdlZQUAUtJARFSUPE3O/dxUIiF/l2gmJibSOZCooDBhKGZev36NmjVr4rvvvkO3bt2yTJ83bx6WLl2KwMBA2NnZYcqUKfD09ER4eLj0ABcvLy/Exsbi8OHDeP/+PQYOHAgfHx9s2rSpsDeHSCVkMhmsra1hYWGB9+/fqzscIiI5o3eOzrXOnq578rzckiVLsmWBVIJPei7GZDKZXAuDEAJlypTBDz/8gB9//BEAkJiYCEtLS6xbtw59+vRBREQEqlatigsXLqBu3boAgKCgILRr1w6PHj1CmTJlcl0vnxRJRESUf86BzrnWuT7geoGvl9/flF+86bmQpaen48qVK3j58mWBLzsqKgpPnjxBy5YtpTJjY2M0aNAAoaGhAIDQ0FCYmJhIyQIAtGzZEhoaGjh37ly2y01NTUVSUpLcHxERERF9HZgwqJivry9+//13AB+SBQ8PD9SpUwc2NjYIDg4u0HU9efIEAGBpaSlXbmlpKU178uQJLCws5KaXKFECpqamUp1P+fv7w9jYWPqzsbEp0LiJiIiIqOhiwqBi27ZtQ82aNQEA//zzD6KionDr1i2MGTMGP/30k5qjU86kSZOQmJgo/T18+FDdIRERERFRIWHCoGLPnz+XRivYv38/evbsicqVK+O7777D9esF2z8xcz1xcXFy5XFxcdI0KyurLCPHpKWlIT4+XuGoCtra2jAyMpL7IyIiIqKvAxMGFbO0tER4eDjS09MRFBSEVq1aAfjwYJWCHsnAzs4OVlZWOHr0qFSWlJSEc+fOwdXVFQDg6uqKhIQEhIWFSXWOHTuGjIwMNGjQoEDjISIiIqLij8OqqtjAgQPRq1cvWFtbQyaTSTcknzt3Do6OjnleXnJyMu7evSu9joqKwpUrV2Bqaory5cvD19cXs2bNgoODgzSsapkyZaSRlJycnNCmTRsMGTIEK1euxPv37zFixAj06dNHqRGSiIiIiOjrwoRBxaZPn47q1avj4cOH6NmzJ7S1tQF8eBrtxIkT87y8ixcvolmzZtLrsWPHAgAGDBiAdevWYfz48Xj9+jV8fHyQkJCAxo0bIygoSHoGAwBs3LgRI0aMQIsWLaQHty1duvQzt5SIiIiIvkR8DgPlGcdxJiIiyj8+h4GKG7YwqEBefq0fNWqUCiMhIiIiIvo8TBhUYNGiRUrVk8lkTBiIiIiIqEhjwqACUVFR6g6BiIiIiKhAcFjVQvLu3TtERkYiLS1N3aEQERERESmNCYOKpaSkYNCgQdDT00O1atUQExMDABg5ciTmzJmj5uiIiIiIiHLGhEHFJk2ahKtXryI4OFhuaNOWLVvir7/+UmNkRERERES54z0MKrZr1y789ddfaNiwIWQymVRerVo13Lt3T42RERERERHlji0MKvbs2TNYWFhkKX/9+rVcAkFEREREVBQxYVCxunXrYt++fdLrzCRh7dq1cHV1VVdYRERERERKYZckFZs9ezbatm2L8PBwpKWlYcmSJQgPD8eZM2dw4sQJdYdHRERERJQjtjCoWOPGjXHlyhWkpaXB2dkZhw4dgoWFBUJDQ+Hi4qLu8IiIiIiIcsQWhkJgb2+PNWvWqDsMIiIiIqI8Y8JQCNLT07Fz505EREQAAKpWrYrOnTujRAnufiIiIiIq2njFqmI3b95Ep06d8OTJE1SpUgUAMHfuXJibm+Off/5B9erV1RwhEREREZFivIdBxQYPHoxq1arh0aNHuHTpEi5duoSHDx+iRo0a8PHxUXd4REREREQ5YguDil25cgUXL15EqVKlpLJSpUrh559/Rr169dQYGRERERFR7tjCoGKVK1dGXFxclvKnT5+iUqVKaoiIiIiIiEh5TBhUICkpSfrz9/fHqFGjsG3bNjx69AiPHj3Ctm3b4Ovri7lz56o7VCIiIiKiHLFLkgqYmJhIT3QGACEEevXqJZUJIQAAHTt2RHp6ulpiJCIiIiJSBhMGFTh+/Li6QyAiIiIiKhBMGFTAw8ND3SEQERERERUIJgyFJCUlBTExMXj37p1ceY0aNdQUERERERFR7pgwqNizZ88wcOBAHDhwINvpvIeBiIiIiIoyjpKkYr6+vkhISMC5c+egq6uLoKAgBAYGwsHBAXv27FF3eEREREREOWILg4odO3YMu3fvRt26daGhoYEKFSqgVatWMDIygr+/P9q3b6/uEImIiIiIFGILg4q9fv0aFhYWAD484fnZs2cAAGdnZ1y6dEmdoRERERER5YoJg4pVqVIFkZGRAICaNWti1apVePz4MVauXAlra2s1R0dERERElDN2SVKx0aNHIzY2FgAwbdo0tGnTBhs3boSWlhbWrVun3uCIiIiIiHLBhEHFvv32W+n/Li4uePDgAW7duoXy5cvDzMxMjZEREREREeWOCUMh09PTQ506ddQdBhERERGRUpgwqMDYsWOVrrtw4UIVRkJERERE9HmYMKjA5cuXlaonk8lUHAkRERER0edhwqACx48fV3cIREREREQFgsOqEhERERGRQkwYiIiIiIhIISYMRERERESkEBMGIiIiIiJSiAkDEREREREpxIRBxQIDA7Fv3z7p9fjx42FiYoJGjRrhwYMHaoyMiIiIiCh3TBhUbPbs2dDV1QUAhIaGYvny5Zg3bx7MzMwwZswYNUdHRERERJQzPodBxR4+fIhKlSoBAHbt2oXu3bvDx8cHbm5uaNq0qXqDIyIiIiLKBVsYVMzAwAAvXrwAABw6dAitWrUCAOjo6ODNmzfqDI2IiIiIKFdsYVCxVq1aYfDgwahduzZu376Ndu3aAQBu3rwJW1tb9QZHRERERJQLtjCo2PLly+Hq6opnz55h+/btKF26NAAgLCwMffv2LfD1paenY8qUKbCzs4Ouri7s7e0xc+ZMCCGkOkIITJ06FdbW1tDV1UXLli1x586dAo+FiIiIiIo/tjComImJCX799dcs5X5+fipZ39y5c7FixQoEBgaiWrVquHjxIgYOHAhjY2OMGjUKADBv3jwsXboUgYGBsLOzw5QpU+Dp6Ynw8HDo6OioJC4iIiIiKp6YMKhITEyM3Ovy5csXynrPnDmDzp07o3379gAAW1tbbN68GefPnwfwoXVh8eLFmDx5Mjp37gwAWL9+PSwtLbFr1y706dOnUOIkIiIiouKBXZJUxNbWFnZ2dtK/haVRo0Y4evQobt++DQC4evUqTp8+jbZt2wIAoqKi8OTJE7Rs2VKax9jYGA0aNEBoaGi2y0xNTUVSUpLcHxERERF9HdjCoCIZGRlqWe/EiRORlJQER0dHaGpqIj09HT///DO8vLwAAE+ePAEAWFpays1naWkpTfuUv7+/yrpQEREREVHRxhaGL8zff/+NjRs3YtOmTbh06RICAwPxyy+/IDAwMN/LnDRpEhITE6W/hw8fFmDERERERFSUsYVBBfbs2aN03U6dOhXouseNG4eJEydK9yI4OzvjwYMH8Pf3x4ABA2BlZQUAiIuLg7W1tTRfXFwcatWqle0ytbW1oa2tXaBxEhEREVHxwIRBBbp06aJUPZlMhvT09AJdd0pKCjQ05BuONDU1pS5SdnZ2sLKywtGjR6UEISkpCefOncP3339foLEQERERUfHHhEEF1HX/AgB07NgRP//8M8qXL49q1arh8uXLWLhwIb777jsAH5IUX19fzJo1Cw4ODtKwqmXKlFE60SEiIiKirwcThkL09u1blT/nYNmyZZgyZQqGDRuGp0+fokyZMhg6dCimTp0q1Rk/fjxev34NHx8fJCQkoHHjxggKCuIzGIiIiIgoC5n4+BHAVODS09Mxe/ZsrFy5EnFxcbh9+zYqVqyIKVOmwNbWFoMGDVJ3iHmWlJQEY2NjJCYmwsjISN3hEBERFSvOgc651rk+4HqBr5ff35RfHCVJxX7++WesW7cO8+bNg5aWllRevXp1rF27Vo2RERERERHljgmDiq1fvx6rV6+Gl5cXNDU1pfKaNWvi1q1baoyMiIiIiCh3TBhU7PHjx6hUqVKW8oyMDLx//14NERERERERKY8Jg4pVrVoVp06dylK+bds21K5dWw0REREREREpj6MkqdjUqVMxYMAAPH78GBkZGdixYwciIyOxfv167N27V93hERERERHliC0MKta5c2f8888/OHLkCPT19TF16lRERETgn3/+QatWrdQdHhERERFRjtjCUAjc3d1x+PBhdYdBRERERJRnbGEgIiIiIiKF2MKgAqVKlYJMJlOqbnx8vIqjISIiIiLKPyYMKrB48WLp/y9evMCsWbPg6ekJV1dXAEBoaCgOHjyIKVOmqClCIiIiIiLlyIQQQt1BfMm6d++OZs2aYcSIEXLlv/76K44cOYJdu3apJ7DPwEfLExER5Z9zoHOuda4PuF7g6+X3N+UX72FQsYMHD6JNmzZZytu0aYMjR46oISIiIiIiIuUxYVCx0qVLY/fu3VnKd+/ejdKlS6shIiIiIiIi5fEeBhXz8/PD4MGDERwcjAYNGgAAzp07h6CgIKxZs0bN0RERERER5YwJg4p5e3vDyckJS5cuxY4dOwAATk5OOH36tJRAEBEREREVVUwYCkGDBg2wceNGdYdBRERERJRnTBgKQXp6Onbt2oWIiAgAQLVq1dCpUydoamqqOTIiIiIiopwxYVCxu3fvon379nj06BGqVKkCAPD394eNjQ327dsHe3t7NUdIRERERKQYR0lSsVGjRqFixYp4+PAhLl26hEuXLiEmJgZ2dnYYNWqUusMjIiIiIsoRWxhU7MSJEzh79ixMTU2lstKlS2POnDlwc3NTY2RERERERLljC4OKaWtr49WrV1nKk5OToaWlpYaIiIiIiIiUx4RBxTp06AAfHx+cO3cOQggIIXD27Fn85z//QadOndQdHhERERFRjpgwqNjSpUthb28PV1dX6OjoQEdHB25ubqhUqRKWLFmi7vCIiIiIiHLEexhUzMTEBLt378adO3dw69YtAB8e3FapUiU1R0ZERERElDsmDIXEwcEBDg4O6g6DiIiIiChPmDComBAC27Ztw/Hjx/H06VNkZGTITd+xY4eaIiMiIiIiyh0TBhXz9fXFqlWr0KxZM1haWkImk6k7JCIiIiIipTFhULENGzZgx44daNeunbpDISIiIiLKM46SpGLGxsaoWLGiusMgIiIiIsoXJgwqNn36dPj5+eHNmzfqDoWIiIiIKM/YJUnFevXqhc2bN8PCwgK2trYoWbKk3PRLly6pKTIiIiIiotwxYVCxAQMGICwsDN9++y1veiYiIiKiYocJg4rt27cPBw8eROPGjdUdChERERFRnvEeBhWzsbGBkZGRusMgIiIiIsoXJgwqtmDBAowfPx7R0dHqDoWIiIiIKM/YJUnFvv32W6SkpMDe3h56enpZbnqOj49XU2RERERERLljwqBiixcvVncIRERERET5xoRBxQYMGKDuEIiIiIiI8o33MBARERERkUJMGIiIiIiISCEmDEREREREpBATBhW4du0aMjIy1B0GEREREdFnY8KgArVr18bz588BABUrVsSLFy/UHBERERERUf4wYVABExMTREVFAQCio6MLvbXh8ePH+Pbbb1G6dGno6urC2dkZFy9elKYLITB16lRYW1tDV1cXLVu2xJ07dwo1RiIiIiIqHjisqgp0794dHh4esLa2hkwmQ926daGpqZlt3fv37xfoul++fAk3Nzc0a9YMBw4cgLm5Oe7cuYNSpUpJdebNm4elS5ciMDAQdnZ2mDJlCjw9PREeHg4dHZ0CjYeIiIiIijcmDCqwevVqdOvWDXfv3sWoUaMwZMgQGBoaFsq6586dCxsbGwQEBEhldnZ20v+FEFi8eDEmT56Mzp07AwDWr18PS0tL7Nq1C3369CmUOImIiIioeGDCoCJt2rQBAISFhWH06NGFljDs2bMHnp6e6NmzJ06cOIGyZcti2LBhGDJkCAAgKioKT548QcuWLaV5jI2N0aBBA4SGhmabMKSmpiI1NVV6nZSUpPoNISIiIqIigfcwqFhAQICULDx69AiPHj1S6fru37+PFStWwMHBAQcPHsT333+PUaNGITAwEADw5MkTAIClpaXcfJaWltK0T/n7+8PY2Fj6s7GxUek2EBEREVHRwYRBxTIyMjBjxgwYGxujQoUKqFChAkxMTDBz5kyV3AydkZGBOnXqYPbs2ahduzZ8fHwwZMgQrFy5Mt/LnDRpEhITE6W/hw8fFmDERERERFSUsUuSiv3000/4/fffMWfOHLi5uQEATp8+jenTp+Pt27f4+eefC3R91tbWqFq1qlyZk5MTtm/fDgCwsrICAMTFxcHa2lqqExcXh1q1amW7TG1tbWhraxdonERERERUPDBhULHAwECsXbsWnTp1kspq1Kgh3VtQ0AmDm5sbIiMj5cpu376NChUqAPhwA7SVlRWOHj0qJQhJSUk4d+4cvv/++wKNhYiIiIiKPyYMKhYfHw9HR8cs5Y6OjoiPjy/w9Y0ZMwaNGjXC7Nmz0atXL5w/fx6rV6/G6tWrAQAymQy+vr6YNWsWHBwcpGFVy5Qpgy5duhR4PERERERUvPEeBhWrWbMmfv311yzlv/76K2rWrFng66tXrx527tyJzZs3o3r16pg5cyYWL14MLy8vqc748eMxcuRI+Pj4oF69ekhOTkZQUBCfwUBEREREWciEEELdQXzJTpw4gfbt26N8+fJwdXUFAISGhuLhw4fYv38/3N3d1Rxh3iUlJcHY2BiJiYkwMjJSdzhERETFinOgc651rg+4XuDr5fc35RdbGFTMw8MDt2/fRteuXZGQkICEhAR069YNkZGRxTJZICIiIqKvC+9hKARlypQp8JubiYiIiIgKA1sYiIiIiIhIISYMRERERESkELskERERERWQCEen3CtN4uUXFS9sYVAhIQRiYmLw9u1bdYdCRERERJQvTBhUSAiBSpUq4eHDh+oOhYiIiIgoX5gwqJCGhgYcHBzw4sULdYdCRERERJQvTBhUbM6cORg3bhxu3Lih7lCIiIiIiPKMd92oWP/+/ZGSkoKaNWtCS0sLurq6ctPj4+PVFBkRERERUe6YMKjY4sWL1R0CEREREVG+MWFQsQEDBqg7BCIiIiKifGPCUAju3buHgIAA3Lt3D0uWLIGFhQUOHDiA8uXLo1q1auoOj4iIiApILz5jgb5AvOlZxU6cOAFnZ2ecO3cOO3bsQHJyMgDg6tWrmDZtmpqjIyIiIiLKGRMGFZs4cSJmzZqFw4cPQ0tLSypv3rw5zp49q8bIiIiIiIhyx4RBxa5fv46uXbtmKbewsMDz58/VEBERERERkfKYMKiYiYkJYmNjs5RfvnwZZcuWVUNERERERETKY8KgYn369MGECRPw5MkTyGQyZGRkICQkBD/++CP69++v7vCIiIiIiHLEhEHFZs+eDUdHR9jY2CA5ORlVq1ZFkyZN0KhRI0yePFnd4RERERER5Yhjf6mYlpYW1qxZgylTpuDGjRtITk5G7dq14eDgoO7QiIiIiIhyxYShkJQvXx42NjYAAJlMpuZoiIiIiIiUwy5JheD3339H9erVoaOjAx0dHVSvXh1r165Vd1hERERERLliC4OKTZ06FQsXLsTIkSPh6uoKAAgNDcWYMWMQExODGTNmqDlCIiIiIiLFmDCo2IoVK7BmzRr07dtXKuvUqRNq1KiBkSNHMmEgIiIioiKNXZJU7P3796hbt26WchcXF6SlpakhIiIiIiIi5TFhULF+/fphxYoVWcpXr14NLy8vNURERERERKQ8dklSgbFjx0r/l8lkWLt2LQ4dOoSGDRsCAM6dO4eYmBg+uI2IiIiIijwmDCpw+fJludcuLi4AgHv37gEAzMzMYGZmhps3bxZ6bEREREREecGEQQWOHz+u7hCIiIiIiAoE72EgIiIiIiKF2MKgYm/fvsWyZctw/PhxPH36FBkZGXLTL126pKbIiIiIiIhyx4RBxQYNGoRDhw6hR48eqF+/PmQymbpDIiIiIiJSGhMGFdu7dy/2798PNzc3dYdCRERERJRnvIdBxcqWLQtDQ0N1h0FERERElC9MGFRswYIFmDBhAh48eKDuUIiIiIiI8oxdklSsbt26ePv2LSpWrAg9PT2ULFlSbnp8fLyaIiMiIiIiyh0TBhXr27cvHj9+jNmzZ8PS0pI3PRMRERFRscKEQcXOnDmD0NBQ1KxZU92hEBERERHlGe9hUDFHR0e8efNG3WEQEREREeULEwYVmzNnDn744QcEBwfjxYsXSEpKkvsjIiIiIirK2CVJxdq0aQMAaNGihVy5EAIymQzp6enqCIuIiIiISClMGFTs+PHj6g6BiIiIiCjfmDComIeHh7pDICIiIiLKN97DoGInT57M8U/V5syZA5lMBl9fX6ns7du3GD58OEqXLg0DAwN0794dcXFxKo+FiIiIiIoftjCoWNOmTbOUffwsBlXew3DhwgWsWrUKNWrUkCsfM2YM9u3bh61bt8LY2BgjRoxAt27dEBISorJYiIiIiKh4YguDir18+VLu7+nTpwgKCkK9evVw6NAhla03OTkZXl5eWLNmDUqVKiWVJyYm4vfff8fChQvRvHlzuLi4ICAgAGfOnMHZs2dVFg8RERERFU9MGFTM2NhY7s/MzAytWrXC3LlzMX78eJWtd/jw4Wjfvj1atmwpVx4WFob379/LlTs6OqJ8+fIIDQ3NdlmpqakcDpaIiIjoK8UuSWpiaWmJyMhIlSx7y5YtuHTpEi5cuJBl2pMnT6ClpQUTE5Ms8Tx58iTb5fn7+8PPz08VoRIRERFREceEQcWuXbsm91oIgdjYWMyZMwe1atUq8PU9fPgQo0ePxuHDh6Gjo1Mgy5w0aRLGjh0rvU5KSoKNjU2BLJuIiIiIijYmDCpWq1YtyGQyCCHkyhs2bIg//vijwNcXFhaGp0+fok6dOlJZeno6Tp48iV9//RUHDx7Eu3fvkJCQINfKEBcXBysrq2yXqa2tDW1t7QKPlYiIiIiKPiYMKhYVFSX3WkNDA+bm5gX26/+nWrRogevXr8uVDRw4EI6OjpgwYQJsbGxQsmRJHD16FN27dwcAREZGIiYmBq6uriqJiYiIiIiKLyYMKlahQoVCXZ+hoSGqV68uV6avr4/SpUtL5YMGDcLYsWNhamoKIyMjjBw5Eq6urmjYsGGhxkpERERERR8ThkJw9OhRHD16FE+fPkVGRobcNFV0S8rNokWLoKGhge7duyM1NRWenp747bffCj0OIiIiIir6mDComJ+fH2bMmIG6devC2tpa7qFthSU4OFjutY6ODpYvX47ly5cXeixEREREVLwwYVCxlStXYt26dejXr5+6QyEiIiIiyjM+uE3F3r17h0aNGqk7DCIiIiKifGHCoGKDBw/Gpk2b1B0GEREREVG+sEuSir19+xarV6/GkSNHUKNGDZQsWVJu+sKFC9UUGRERERFR7pgwqNi1a9ekJzrfuHFDbpo6boAmIiIiIsoLJgwqdvz4cXWHQERERESUb7yHgYiIiIiIFGLCQERERERECjFhICIiIiIihZgwEBERERGRQkwYiIiIiIhIISYMRERERESkEIdVJSIioi9WhKOTUvWcbkUUzLIm8dKKvjxsYSAiIiIiIoWYMBARERERkUJsNyMiIqKvnrJdl4i+RmxhICIiIiIihZgwEBERERGRQuySRERERF+sXkqOWvS3f5qKIyEqvtjCQERERERECrGFgYiIiL56yrREsBWCvlZsYSAiIiIiIoWYMBARERERkULskkRERESkBGVvoCb60rCFgYiIiIiIFGLCQERERERECjFhICIiIiIihZgwEBERERGRQkwYiIiIiIhIISYMRERERESkEBMGIiIiIiJSiAkDEREREREpxISBiIiIiIgUYsJAREREREQKMWEgIiIiIiKFmDAQEREREZFCTBiIiIiIiEghJgxERERERKQQEwYiIiIiIlKICQMRERERESnEhIGIiIiIiBRiwkBERERERAqVUHcARERERJ+KcHTKtY7TrYhCiISI2MLwhfH390e9evVgaGgICwsLdOnSBZGRkXJ13r59i+HDh6N06dIwMDBA9+7dERcXp6aIiYiIiKgoY8LwhTlx4gSGDx+Os2fP4vDhw3j//j1at26N169fS3XGjBmDf/75B1u3bsWJEyfw77//olu3bmqMmoiIiIiKKnZJ+sIEBQXJvV63bh0sLCwQFhaGJk2aIDExEb///js2bdqE5s2bAwACAgLg5OSEs2fPomHDhuoIm4iISE6vSblfolwvhDiIiC0MX7zExEQAgKmpKQAgLCwM79+/R8uWLaU6jo6OKF++PEJDQ9USIxEREREVXWxh+IJlZGTA19cXbm5uqF69OgDgyZMn0NLSgomJiVxdS0tLPHnyJNvlpKamIjU1VXqdlJSkspiJiIiIqGhhC8MXbPjw4bhx4wa2bNnyWcvx9/eHsbGx9GdjY1NAERIRERFRUceE4Qs1YsQI7N27F8ePH0e5cuWkcisrK7x79w4JCQly9ePi4mBlZZXtsiZNmoTExETp7+HDh6oMnYiIiIiKECYMXxghBEaMGIGdO3fi2LFjsLOzk5vu4uKCkiVL4ujRo1JZZGQkYmJi4Orqmu0ytbW1YWRkJPdHRERERF8H3sPwhRk+fDg2bdqE3bt3w9DQULovwdjYGLq6ujA2NsagQYMwduxYmJqawsjICCNHjoSrqytHSCIiIiKiLJgwfGFWrFgBAGjatKlceUBAALy9vQEAixYtgoaGBrp3747U1FR4enrit99+K+RIiYiIiKg4YMLwhRFC5FpHR0cHy5cvx/LlywshIiIi+ppEODrlWsfpVkShrQtKPM+BiHLGexiIiIiIiEghJgxERERERKQQEwYiIiIiIlKICQMRERERESnEO4GIiIiowPRS4ibj64UQBxEVHLYwEBERERGRQkwYiIiIiIhIIXZJoqJnurESdRJVHwcRERVpynR/IqLPxxYGIiIiIiJSiAkDEREREREpxLY8IiIiKlQRjk65V2J3I6Iigy0MRERERESkEBMGIiIiIiJSiO19REREVKg4uhFR8cIWBiIiIiIiUogJAxERERERKcSEgYiIiIiIFGLCQERERERECjFhICIiIiIihZgwEBERERGRQkwYiIiIiIhIIQ6ETEREREqJcHTKvRKfsUD0xWELAxERERERKcSfAYiIiEgpfEIz0deJLQxERERERKQQEwYiIiIiIlKICQMRERERESnEhIGIiIiIiBRiwkBERERERAoxYSAiIiIiIoWYMBARERERkUIcUJmIiFRCmacCO92KKIRIiibuHyIqLtjCQERERERECjFhICIiIiIihdgliYiIij1luvcoo6C6ADkHOudeaVLuX8HXCyAWQMl4iIgUYAsDEREREREpxISBiIiIiIgUYpckokJiO3FfrnWi57QvhEiICkevQuxyo8y6lKJE152//dNyX05BxUNEVASwhYGIiIiIiBRiwkBERERERAqxzZToC8TuT/Ql+ZpH+FFm9KcC645FRKQAWxiIiIiIiEgh/ixBRMVGQbWcFMcWmMLc9gO7fsy1TkH9qq3U8xOK2C/ohfmLPlsPiKgoYAvDV2z58uWwtbWFjo4OGjRogPPnz6s7JCIiIiIqYpgwfKX++usvjB07FtOmTcOlS5dQs2ZNeHp64unTp+oOjYiIiIiKELZ1fqUWLlyIIUOGYODAgQCAlStXYt++ffjjjz8wceJENUdXMJTpelHUFGZXmcLcPwUVc7TON0rUSiyQdRUmpd73Qtx2drkhIqKPsYXhK/Tu3TuEhYWhZcuWUpmGhgZatmyJ0NBQNUZGREREREUNf9r5Cj1//hzp6emwtLSUK7e0tMStW7ey1E9NTUVqaqr0OjHxw6+YSUlJqgkwVeReR4l1Z6SmFEAwRY8y+72obXuBHSuFeGyo7PjOhlLxyApv29PfpOe+LiJSKVWcgzKXKYQS5xOijzBhoFz5+/vDz88vS7mNjY0aovl/c4zVt241M16s7gjyrlBjLqBjo6jtZ6W26iv+XBB9aYy/V93n+dWrVzA25vmClMeE4StkZmYGTU1NxMXFyZXHxcXBysoqS/1JkyZh7Nix0uuMjAzEx8ejdOnSkMlkBRpbUlISbGxs8PDhQxgZGRXosul/uJ8LB/dz4eB+Lhzcz4VHVftaCIFXr16hTJkyBbZM+jowYfgKaWlpwcXFBUePHkWXLl0AfEgCjh49ihEjRmSpr62tDW1tbbkyExMTlcZoZGTEL6RCwP1cOLifCwf3c+Hgfi48qtjXbFmg/GDC8JUaO3YsBgwYgLp166J+/fpYvHgxXr9+LY2aREREREQEMGH4avXu3RvPnj3D1KlT8eTJE9SqVQtBQUFZboQmIiIioq8bE4av2IgRI7LtgqRO2tramDZtWpYuUFSwuJ8LB/dz4eB+Lhzcz4WH+5qKGpng2FpERERERKQAH9xGREREREQKMWEgIiIiIiKFmDAQEREREZFCTBiIiIiIiEghJgxU6JYvXw5bW1vo6OigQYMGOH/+fI71t27dCkdHR+jo6MDZ2Rn79+8vpEiLt7zs5zVr1sDd3R2lSpVCqVKl0LJly1zfF/ogr8dzpi1btkAmk0kPT6Sc5XU/JyQkYPjw4bC2toa2tjYqV67Mc4cS8rqfFy9ejCpVqkBXVxc2NjYYM2YM3r59W0jRFk8nT55Ex44dUaZMGchkMuzatSvXeYKDg1GnTh1oa2ujUqVKWLduncrjJJIjiArRli1bhJaWlvjjjz/EzZs3xZAhQ4SJiYmIi4vLtn5ISIjQ1NQU8+bNE+Hh4WLy5MmiZMmS4vr164UcefGS1/38zTffiOXLl4vLly+LiIgI4e3tLYyNjcWjR48KOfLiJa/7OVNUVJQoW7ascHd3F507dy6cYIuxvO7n1NRUUbduXdGuXTtx+vRpERUVJYKDg8WVK1cKOfLiJa/7eePGjUJbW1ts3LhRREVFiYMHDwpra2sxZsyYQo68eNm/f7/46aefxI4dOwQAsXPnzhzr379/X+jp6YmxY8eK8PBwsWzZMqGpqSmCgoIKJ2AiIQQTBipU9evXF8OHD5dep6enizJlygh/f/9s6/fq1Uu0b99erqxBgwZi6NChKo2zuMvrfv5UWlqaMDQ0FIGBgaoK8YuQn/2clpYmGjVqJNauXSsGDBjAhEEJed3PK1asEBUrVhTv3r0rrBC/CHndz8OHDxfNmzeXKxs7dqxwc3NTaZxfEmUShvHjx4tq1arJlfXu3Vt4enqqMDIieeySRIXm3bt3CAsLQ8uWLaUyDQ0NtGzZEqGhodnOExoaKlcfADw9PRXWp/zt50+lpKTg/fv3MDU1VVWYxV5+9/OMGTNgYWGBQYMGFUaYxV5+9vOePXvg6uqK4cOHw9LSEtWrV8fs2bORnp5eWGEXO/nZz40aNUJYWJjUben+/fvYv38/2rVrVygxfy34PUhFAZ/0TIXm+fPnSE9Ph6WlpVy5paUlbt26le08T548ybb+kydPVBZncZef/fypCRMmoEyZMlm+pOh/8rOfT58+jd9//x1XrlwphAi/DPnZz/fv38exY8fg5eWF/fv34+7duxg2bBjev3+PadOmFUbYxU5+9vM333yD58+fo3HjxhBCIC0tDf/5z3/w3//+tzBC/moo+h5MSkrCmzdvoKurq6bI6GvCFgYikjNnzhxs2bIFO3fuhI6OjrrD+WK8evUK/fr1w5o1a2BmZqbucL5oGRkZsLCwwOrVq+Hi4oLevXvjp59+wsqVK9Ud2hclODgYs2fPxm+//YZLly5hx44d2LdvH2bOnKnu0IiogLGFgQqNmZkZNDU1ERcXJ1ceFxcHKyurbOexsrLKU33K337O9Msvv2DOnDk4cuQIatSoocowi7287ud79+4hOjoaHTt2lMoyMjIAACVKlEBkZCTs7e1VG3QxlJ/j2draGiVLloSmpqZU5uTkhCdPnuDdu3fQ0tJSaczFUX7285QpU9CvXz8MHjwYAODs7IzXr1/Dx8cHP/30EzQ0+JtkQVD0PWhkZMTWBSo0/DRTodHS0oKLiwuOHj0qlWVkZODo0aNwdXXNdh5XV1e5+gBw+PBhhfUpf/sZAObNm4eZM2ciKCgIdevWLYxQi7W87mdHR0dcv34dV65ckf46deqEZs2a4cqVK7CxsSnM8IuN/BzPbm5uuHv3rpSQAcDt27dhbW3NZEGB/OznlJSULElBZpImhFBdsF8Zfg9SkaDuu67p67Jlyxahra0t1q1bJ8LDw4WPj48wMTERT548EUII0a9fPzFx4kSpfkhIiChRooT45ZdfREREhJg2bRqHVVVCXvfznDlzhJaWlti2bZuIjY2V/l69eqWuTSgW8rqfP8VRkpST1/0cExMjDA0NxYgRI0RkZKTYu3evsLCwELNmzVLXJhQLed3P06ZNE4aGhmLz5s3i/v374tChQ8Le3l706tVLXZtQLLx69UpcvnxZXL58WQAQCxcuFJcvXxYPHjwQQggxceJE0a9fP6l+5rCq48aNExEREWL58uUcVpUKHRMGKnT/1869hUT57WEcf8ZqHDXCNDsoao0ZSY6amUEUynghiJF2UUwkSkhFBJaJFWgGEVgWGHkhSRlh2oEoDKED1pVmaWkoSZklYkQH86IDHsp3X2z28HfnZFJbjf39gBevv7Xe9WMxDDysed9Tp04ZQUFBhtlsNmJjY42GhgZnLS4uzkhPTx8x/vLly8aSJUsMs9lsLFu2zKipqZngjv9O49nn4OBgQ9IPfwUFBRPf+F9mvJ/nfyIw/Lrx7nN9fb2xatUqw93d3bBarcaRI0eMb9++TXDXf5/x7PPQ0JBx6NAhIyQkxLBYLEZgYKCxc+dOo6+vb+Ib/4vcu3dv1O/b/+xtenq6ERcX98OcqKgow2w2G1ar1SgvL5/wvvH/zWQYnBsCAAAAGB3PMAAAAABwicAAAAAAwCUCAwAAAACXCAwAAAAAXCIwAAAAAHCJwAAAAADAJQIDAAAAAJcIDAAAAABcIjAAAAAAcInAAAD4ZUNDQ5PdAgBgghEYAGAKu3nzptasWSNvb2/5+voqOTlZnZ2dznpPT48cDod8fHzk5eWlmJgYPXjwwFm/ceOGVq5cKYvFojlz5ig1NdVZM5lMun79+oj1vL29de7cOUlSV1eXTCaTLl26pLi4OFksFl24cEG9vb1yOBwKCAiQp6enbDabqqqqRtxneHhYx44d0+LFi+Xu7q6goCAdOXJEkmS327Vr164R49+/fy+z2aza2to/sW0AgD+IwAAAU9iXL1+UnZ2tpqYm1dbWys3NTampqRoeHtbnz58VFxen169fq7q6Wk+ePFFubq6Gh4clSTU1NUpNTVVSUpKam5tVW1ur2NjYcfewf/9+ZWVlqb29XYmJierv79eKFStUU1OjtrY2bdu2TWlpaXr48KFzzoEDB1RYWKj8/Hw9ffpUlZWVmjdvniQpMzNTlZWVGhgYcI6vqKhQQECA7Hb7b+4YAOBPMxmGYUx2EwCAX/Phwwf5+fmptbVV9fX1ysnJUVdXl3x8fH4Yu3r1almtVlVUVIx6L5PJpGvXriklJcX5P29vbxUXFysjI0NdXV1atGiRiouLlZWV9dO+kpOTtXTpUh0/flyfPn2Sn5+fSkpKlJmZ+cPY/v5++fv7q7S0VBs3bpQkRUZGasOGDSooKBjHbgAAJgInDAAwhXV0dMjhcMhqtWrWrFlauHChJKm7u1stLS1avnz5qGFBklpaWpSQkPDbPcTExIy4/v79uw4fPiybzSYfHx/NnDlTt27dUnd3tySpvb1dAwMDLte2WCxKS0vT2bNnJUmPHz9WW1ubMjIyfrtXAMCfN32yGwAAuLZu3ToFBwerrKxM/v7+Gh4eVnh4uAYHB+Xh4fHTuWPVTSaT/vuQebSHmr28vEZcFxUV6eTJkyouLpbNZpOXl5d2796twcHBX1pX+vfPkqKiotTT06Py8nLZ7XYFBwePOQ8AMPE4YQCAKaq3t1fPnj1TXl6eEhISFBYWpr6+Pmc9IiJCLS0t+vjx46jzIyIifvoQsZ+fn968eeO87ujo0NevX8fsq66uTuvXr9eWLVsUGRkpq9Wq58+fO+uhoaHy8PD46do2m00xMTEqKytTZWWltm7dOua6AIDJQWAAgClq9uzZ8vX11enTp/XixQvdvXtX2dnZzrrD4dD8+fOVkpKiuro6vXz5UlevXtX9+/clSQUFBaqqqlJBQYHa29vV2tqqo0ePOufb7XaVlJSoublZTU1N2rFjh2bMmDFmX6Ghobpz547q6+vV3t6u7du36+3bt866xWLRvn37lJubq/Pnz6uzs1MNDQ06c+bMiPtkZmaqsLBQhmGMeHsTAGBqITAAwBTl5uamixcv6tGjRwoPD9eePXtUVFTkrJvNZt2+fVtz585VUlKSbDabCgsLNW3aNElSfHy8rly5ourqakVFRclut494k9GJEycUGBiotWvXavPmzcrJyZGnp+eYfeXl5Sk6OlqJiYmKj493hpZ/ys/P1969e3Xw4EGFhYVp06ZNevfu3YgxDodD06dPl8PhkMVi+Y2dAgD8L/GWJADApOjq6lJISIgaGxsVHR092e0AAFwgMAAAJtTQ0JB6e3uVk5OjV69eqa6ubrJbAgD8BD9JAgBMqLq6Oi1YsECNjY0qLS2d7HYAAGPghAEAAACAS5wwAAAAAHCJwAAAAADAJQIDAAAAAJcIDAAAAABcIjAAAAAAcInAAAAAAMAlAgMAAAAAlwgMAAAAAFwiMAAAAABw6V/kSy9fRpQolgAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "path_model_label=Path.home() / \"Desktop/Code/CELLSEG_BENCHMARK/RESULTS/full data/instance/instance_threshold_pred_SegResNet_Generalized_latest.tif\"\n", + "res = evl.evaluate_model_performance(imread(path_true_labels), imread(path_model_label),visualize=False, return_graphical_summary=True,plot_according_to_gt_label=False)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.16" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 20cba1f83604c305c09408b5cd91b021640bbb79 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 25 Jul 2023 15:23:14 +0200 Subject: [PATCH 02/70] Change softmax arg --- napari_cellseg3d/code_models/models/wnet/model.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index 0a833fa1..4746ebea 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -32,7 +32,7 @@ def __init__( self.encoder = UNet( in_channels=in_channels, out_channels=out_channels, - encoder=True, + softmax=False, ) def forward(self, x): @@ -55,10 +55,10 @@ def __init__( ): super(WNet, self).__init__() self.encoder = UNet( - in_channels, num_classes, encoder=True, dropout=dropout + in_channels, num_classes, softmax=True, dropout=dropout ) self.decoder = UNet( - num_classes, out_channels, encoder=False, dropout=dropout + num_classes, out_channels, softmax=False, dropout=dropout ) def forward(self, x): @@ -84,7 +84,7 @@ def __init__( in_channels: int, out_channels: int, channels: List[int] = None, - encoder: bool = True, + softmax: bool = True, dropout: float = 0.65, ): if channels is None: @@ -120,7 +120,7 @@ def __init__( ) self.sm = nn.Softmax(dim=1) - self.encoder = encoder + self.softmax = softmax def forward(self, x): """Forward pass of the U-Net model.""" @@ -165,7 +165,7 @@ def forward(self, x): dim=1, ) ) - if self.encoder: + if self.softmax: x = self.sm(x) return x From f85a6052ec08335b887f45b35ff205bd7e9b0487 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 25 Jul 2023 15:27:59 +0200 Subject: [PATCH 03/70] Num group 2 --- napari_cellseg3d/code_models/models/wnet/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index 4746ebea..c0fe8900 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -16,7 +16,7 @@ "Xide Xia", "Brian Kulis", ] -NUM_GROUPS = 8 +NUM_GROUPS = 2 class WNet_encoder(nn.Module): From ea07ad4e60c8ca2a9150ab58b591a461576f688f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 25 Jul 2023 15:31:41 +0200 Subject: [PATCH 04/70] Update model.py --- napari_cellseg3d/code_models/models/wnet/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index c0fe8900..fc44a0a6 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -16,7 +16,7 @@ "Xide Xia", "Brian Kulis", ] -NUM_GROUPS = 2 +NUM_GROUPS = 4 class WNet_encoder(nn.Module): From 83d14e8eeeb1103435a6d8922f9a259dc56d7012 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 25 Jul 2023 15:33:06 +0200 Subject: [PATCH 05/70] Update model.py --- napari_cellseg3d/code_models/models/wnet/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index fc44a0a6..b0690ce0 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -16,7 +16,7 @@ "Xide Xia", "Brian Kulis", ] -NUM_GROUPS = 4 +NUM_GROUPS = 16 class WNet_encoder(nn.Module): From aaf174e7b3c58f22a51232499097e1a98e616b02 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 25 Jul 2023 18:46:03 +0200 Subject: [PATCH 06/70] Reduce depth of WNet --- .../code_models/models/wnet/model.py | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index b0690ce0..cd2bcb16 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -16,7 +16,7 @@ "Xide Xia", "Brian Kulis", ] -NUM_GROUPS = 16 +NUM_GROUPS = 4 class WNet_encoder(nn.Module): @@ -100,21 +100,22 @@ def __init__( self.in_b = InBlock(in_channels, self.channels[0], dropout=dropout) self.conv1 = Block(channels[0], self.channels[1], dropout=dropout) self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) - self.conv3 = Block(channels[2], self.channels[3], dropout=dropout) - self.bot = Block(channels[3], self.channels[4], dropout=dropout) - self.deconv1 = Block(channels[4], self.channels[3], dropout=dropout) - self.conv_trans1 = nn.ConvTranspose3d( - self.channels[4], self.channels[3], 2, stride=2 - ) + # self.conv3 = Block(channels[2], self.channels[3], dropout=dropout) + # self.bot = Block(channels[3], self.channels[4], dropout=dropout) + self.bot = Block(channels[2], self.channels[3], dropout=dropout) + # self.deconv1 = Block(channels[4], self.channels[3], dropout=dropout) self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) + self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) + self.out_b = OutBlock(channels[1], out_channels, dropout=dropout) + # self.conv_trans1 = nn.ConvTranspose3d( + # self.channels[4], self.channels[3], 2, stride=2 + # ) self.conv_trans2 = nn.ConvTranspose3d( self.channels[3], self.channels[2], 2, stride=2 ) - self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) self.conv_trans3 = nn.ConvTranspose3d( self.channels[2], self.channels[1], 2, stride=2 ) - self.out_b = OutBlock(channels[1], out_channels, dropout=dropout) self.conv_trans_out = nn.ConvTranspose3d( self.channels[1], self.channels[0], 2, stride=2 ) @@ -127,17 +128,18 @@ def forward(self, x): in_b = self.in_b(x) c1 = self.conv1(self.max_pool(in_b)) c2 = self.conv2(self.max_pool(c1)) - c3 = self.conv3(self.max_pool(c2)) - x = self.bot(self.max_pool(c3)) - x = self.deconv1( - torch.cat( - [ - c3, - self.conv_trans1(x), - ], - dim=1, - ) - ) + # c3 = self.conv3(self.max_pool(c2)) + # x = self.bot(self.max_pool(c3)) + x = self.bot(self.max_pool(c2)) + # x = self.deconv1( + # torch.cat( + # [ + # c3, + # self.conv_trans1(x), + # ], + # dim=1, + # ) + # ) x = self.deconv2( torch.cat( [ From 622e9b31ffdcbe20241d24890b308a221d70d28e Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 26 Jul 2023 16:49:41 +0200 Subject: [PATCH 07/70] Started WNet training UI --- napari_cellseg3d/_tests/test_training.py | 10 +- .../code_models/models/wnet/train_wnet.py | 45 +- .../code_models/worker_training.py | 794 +++++++++++++++++- .../code_plugins/plugin_model_training.py | 477 +++++++---- napari_cellseg3d/config.py | 88 +- napari_cellseg3d/interface.py | 99 ++- 6 files changed, 1246 insertions(+), 267 deletions(-) diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py index 0c54d36a..ac5d32a7 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_training.py @@ -14,7 +14,7 @@ def test_update_loss_plot(make_napari_viewer_proxy): view = make_napari_viewer_proxy() widget = Trainer(view) - widget.worker_config = config.TrainingWorkerConfig() + widget.worker_config = config.SupervisedTrainingWorkerConfig() widget.worker_config.validation_interval = 1 widget.worker_config.results_path_folder = "." @@ -55,8 +55,8 @@ def test_update_loss_plot(make_napari_viewer_proxy): def test_check_matching_losses(): plugin = Trainer(None) - config = plugin._set_worker_config() - worker = plugin._create_worker_from_config(config) + config = plugin._set_supervised_worker_config() + worker = plugin._create_supervised_worker_from_config(config) assert plugin.loss_list == list(worker.loss_dict.keys()) @@ -84,9 +84,9 @@ def test_training(make_napari_viewer_proxy, qtbot): MODEL_LIST["test"] = TestModel widget.model_choice.addItem("test") widget.model_choice.setCurrentText("test") - worker_config = widget._set_worker_config() + worker_config = widget._set_supervised_worker_config() assert worker_config.model_info.name == "test" - worker = widget._create_worker_from_config(worker_config) + worker = widget._create_supervised_worker_from_config(worker_config) worker.config.train_data_dict = [{"image": im_path, "label": im_path}] worker.config.val_data_dict = [{"image": im_path, "label": im_path}] worker.config.max_epochs = 1 diff --git a/napari_cellseg3d/code_models/models/wnet/train_wnet.py b/napari_cellseg3d/code_models/models/wnet/train_wnet.py index 3b2ad353..7207fe35 100644 --- a/napari_cellseg3d/code_models/models/wnet/train_wnet.py +++ b/napari_cellseg3d/code_models/models/wnet/train_wnet.py @@ -115,11 +115,11 @@ def create_dataset_dict_no_labs(volume_directory): ################################ -# Config & WANDB # +# WNet: Config & WANDB # ################################ -class Config: +class WNetTrainingWorkerConfig: def __init__(self): # WNet self.in_channels = 1 @@ -144,29 +144,20 @@ def __init__(self): self.num_epochs = 100 self.val_interval = 5 self.batch_size = 2 - self.num_workers = 4 - - # CRF - self.sa = 50 # 10 - self.sb = 20 - self.sg = 1 - self.w1 = 50 # 10 - self.w2 = 20 - self.n_iter = 5 # Data - self.train_volume_directory = "./../dataset/VIP_full" - self.eval_volume_directory = "./../dataset/VIP_cropped/eval/" + # self.train_volume_directory = "./../dataset/VIP_full" + # self.eval_volume_directory = "./../dataset/VIP_cropped/eval/" self.normalize_input = True self.normalizing_function = remap_image # normalize_quantile - self.use_patch = False - self.patch_size = (64, 64, 64) - self.num_patches = 30 - self.eval_num_patches = 20 - self.do_augmentation = True - self.parallel = False - - self.save_model = True + # self.use_patch = False + # self.patch_size = (64, 64, 64) + # self.num_patches = 30 + # self.eval_num_patches = 20 + # self.do_augmentation = True + # self.parallel = False + + # self.save_model = True self.save_model_path = ( r"./../results/new_model/wnet_new_model_all_data_3class.pth" ) @@ -177,7 +168,7 @@ def __init__(self): self.weights_path = None -c = Config() +c = WNetTrainingWorkerConfig() ############### # Scheduler config ############### @@ -283,9 +274,9 @@ def __init__(self): def train(weights_path=None, train_config=None): if train_config is None: - config = Config() + config = WNetTrainingWorkerConfig() ############## - # disable metadata tracking + # disable metadata tracking in MONAI set_track_meta(False) ############## if WANDB_INSTALLED: @@ -698,7 +689,7 @@ def get_dataset(config): """Creates a Dataset from the original data using the tifffile library Args: - config (Config): The configuration object + config (WNetTrainingWorkerConfig): The configuration object Returns: (tuple): A tuple containing the shape of the data and the dataset @@ -776,7 +767,7 @@ def get_patch_dataset(config): """Creates a Dataset from the original data using the tifffile library Args: - config (Config): The configuration object + config (WNetTrainingWorkerConfig): The configuration object Returns: (tuple): A tuple containing the shape of the data and the dataset @@ -885,7 +876,7 @@ def get_dataset_monai(config): """Creates a Dataset applying some transforms/augmentation on the data using the MONAI library Args: - config (Config): The configuration object + config (WNetTrainingWorkerConfig): The configuration object Returns: (tuple): A tuple containing the shape of the data and the dataset diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index d7a49fd9..a1850e91 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -1,10 +1,12 @@ import platform import time +from abc import abstractmethod from math import ceil from pathlib import Path import numpy as np import torch +import torch.nn as nn # MONAI from monai.data import ( @@ -14,6 +16,7 @@ decollate_batch, pad_list_data_collate, ) +from monai.data.meta_obj import set_track_meta from monai.inferers import sliding_window_inference from monai.losses import ( DiceCELoss, @@ -23,8 +26,9 @@ ) from monai.metrics import DiceMetric from monai.transforms import ( - # AsDiscrete, + AsDiscrete, Compose, + EnsureChannelFirst, EnsureChannelFirstd, EnsureType, EnsureTyped, @@ -37,7 +41,9 @@ RandRotate90d, RandShiftIntensityd, RandSpatialCropSamplesd, + ScaleIntensityRanged, SpatialPadd, + ToTensor, ) from monai.utils import set_determinism @@ -46,6 +52,8 @@ # local from napari_cellseg3d import config, utils +from napari_cellseg3d.code_models.models.wnet.model import WNet +from napari_cellseg3d.code_models.models.wnet.soft_Ncuts import SoftNCutsLoss from napari_cellseg3d.code_models.workers_utils import ( PRETRAINED_WEIGHTS_DIR, LogSignal, @@ -60,6 +68,17 @@ VERBOSE_SCHEDULER = True logger.debug(f"PRETRAINED WEIGHT DIR LOCATION : {PRETRAINED_WEIGHTS_DIR}") +try: + import wandb + + WANDB_INSTALLED = True +except ImportError: + logger.warning( + "wandb not installed, wandb config will not be taken into account", + stacklevel=1, + ) + WANDB_INSTALLED = False + """ Writing something to log messages from outside the main thread needs specific care, Following the instructions in the guides below to have a worker with custom signals, @@ -70,14 +89,742 @@ # https://www.pythoncentral.io/pysidepyqt-tutorial-creating-your-own-signals-and-slots/ # https://napari-staging-site.github.io/guides/stable/threading.html +# TODO list for WNet training : +# 1. Create a custom base worker for training to avoid code duplication +# 2. Create a custom worker for WNet training +# 3. Adapt UI for WNet training (Advanced tab + model choice on first tab) +# 4. Adapt plots and TrainingReport for WNet training + + +class TrainingWorkerBase(GeneratorWorker): + """A basic worker abstract class, to run training jobs in. + Contains the minimal common elements required for training models.""" + + def __init__(self): + super().__init__(self.train) + self._signals = LogSignal() + self.log_signal = self._signals.log_signal + self.warn_signal = self._signals.warn_signal + self.error_signal = self._signals.error_signal + self.downloader = WeightsDownloader() + self.train_files = [] + self.val_files = [] + self.config = None + + self._weight_error = False + ################################ + + def set_download_log(self, widget): + """Sets the log widget for the downloader to output to""" + self.downloader.log_widget = widget -class TrainingWorker(GeneratorWorker): - """A custom worker to run training jobs in. - Inherits from :py:class:`napari.qt.threading.GeneratorWorker`""" + def log(self, text): + """Sends a signal that ``text`` should be logged + Goes in a Log object, defined in :py:mod:`napari_cellseg3d.interface + Sends a signal to the main thread to log the text. + Signal is defined in napari_cellseg3d.workers_utils.LogSignal + + Args: + text (str): text to logged + """ + self.log_signal.emit(text) + + def warn(self, warning): + """Sends a warning to main thread""" + self.warn_signal.emit(warning) + + def raise_error(self, exception, msg): + """Sends an error to main thread""" + logger.error(msg, exc_info=True) + logger.error(exception, exc_info=True) + self.error_signal.emit(exception, msg) + self.errored.emit(exception) + self.quit() + + @abstractmethod + def log_parameters(self): + """Logs the parameters of the training""" + raise NotImplementedError + + @abstractmethod + def train(self): + """Starts a training job""" + raise NotImplementedError + + +class WNetTrainingWorker(TrainingWorkerBase): + """A custom worker to run WNet (unsupervised) training jobs in. + Inherits from :py:class:`napari.qt.threading.GeneratorWorker` via :py:class:`TrainingWorkerBase` + """ def __init__( self, - worker_config: config.TrainingWorkerConfig, + worker_config: config.WNetTrainingWorkerConfig, + ): + super().__init__() + self.config = worker_config + + @staticmethod + def create_dataset_dict_no_labs(volume_directory): + """Creates unsupervised data dictionary for MONAI transforms and training.""" + images_filepaths = sorted( + Path.glob(str(Path(volume_directory) / "*.tif")) + ) + if len(images_filepaths) == 0: + raise ValueError(f"Data folder {volume_directory} is empty") + + logger.info("Images :") + for file in images_filepaths: + logger.info(Path(file).stem) + logger.info("*" * 10) + return [{"image": image_name} for image_name in images_filepaths] + + @staticmethod + def create_dataset_dict(volume_directory, label_directory): + """Creates data dictionary for MONAI transforms and training.""" + images_filepaths = sorted( + [str(file) for file in Path(volume_directory).glob("*.tif")] + ) + + labels_filepaths = sorted( + [str(file) for file in Path(label_directory).glob("*.tif")] + ) + if len(images_filepaths) == 0 or len(labels_filepaths) == 0: + raise ValueError( + f"Data folders are empty \n{volume_directory} \n{label_directory}" + ) + + logger.info("Images :") + for file in images_filepaths: + logger.info(Path(file).stem) + logger.info("*" * 10) + logger.info("Labels :") + for file in labels_filepaths: + logger.info(Path(file).stem) + try: + data_dicts = [ + {"image": image_name, "label": label_name} + for image_name, label_name in zip( + images_filepaths, labels_filepaths + ) + ] + except ValueError as e: + raise ValueError( + f"Number of images and labels does not match : \n{volume_directory} \n{label_directory}" + ) from e + # self.log(f"Loaded eval image: {data_dicts}") + return data_dicts + + def get_patch_dataset(self, volume_directory): + """Creates a Dataset from the original data using the tifffile library + + Args: + volume_directory (str): Path to the directory containing the data + + Returns: + (tuple): A tuple containing the shape of the data and the dataset + """ + + train_files = self.create_dataset_dict_no_labs( + volume_directory=volume_directory + ) + + patch_func = Compose( + [ + LoadImaged(keys=["image"], image_only=True), + EnsureChannelFirstd(keys=["image"], channel_dim="no_channel"), + RandSpatialCropSamplesd( + keys=["image"], + roi_size=( + self.config.sample_size + ), # multiply by axis_stretch_factor if anisotropy + # max_roi_size=(120, 120, 120), + random_size=False, + num_samples=self.config.num_samples, + ), + Orientationd(keys=["image"], axcodes="PLI"), + SpatialPadd( + keys=["image"], + spatial_size=( + utils.get_padding_dim(self.config.sample_size) + ), + ), + EnsureTyped(keys=["image"]), + ] + ) + + train_transforms = Compose( + [ + ScaleIntensityRanged( + keys=["image"], + a_min=0, + a_max=2000, + b_min=0.0, + b_max=1.0, + clip=True, + ), + RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), + RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), + RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), + RandRotate90d(keys=["image"], prob=0.1, max_k=3), + EnsureTyped(keys=["image"]), + ] + ) + + dataset = PatchDataset( + data=train_files, + samples_per_image=self.config.num_samples, + patch_func=patch_func, + transform=train_transforms, + ) + + return self.config.sample_size, dataset + + def get_patch_eval_dataset(self, volume_directory): + eval_files = self.create_dataset_dict( + volume_directory=volume_directory + "/vol", + label_directory=volume_directory + "/lab", + ) + + patch_func = Compose( + [ + LoadImaged(keys=["image", "label"], image_only=True), + EnsureChannelFirstd( + keys=["image", "label"], channel_dim="no_channel" + ), + # NormalizeIntensityd(keys=["image"]) if config.normalize_input else lambda x: x, + RandSpatialCropSamplesd( + keys=["image", "label"], + roi_size=( + self.config.sample_size + ), # multiply by axis_stretch_factor if anisotropy + # max_roi_size=(120, 120, 120), + random_size=False, + num_samples=self.config.eval_num_patches, + ), + Orientationd(keys=["image", "label"], axcodes="PLI"), + SpatialPadd( + keys=["image", "label"], + spatial_size=( + utils.get_padding_dim(self.config.sample_size) + ), + ), + EnsureTyped(keys=["image", "label"]), + ] + ) + + eval_transforms = Compose( + [ + EnsureTyped(keys=["image", "label"]), + ] + ) + + return PatchDataset( + data=eval_files, + samples_per_image=self.config.eval_num_patches, + patch_func=patch_func, + transform=eval_transforms, + ) + + def get_dataset_monai(self): + """Creates a Dataset applying some transforms/augmentation on the data using the MONAI library + + Args: + config (WNetTrainingWorkerConfig): The configuration object + + Returns: + (tuple): A tuple containing the shape of the data and the dataset + """ + # train_files = self.create_dataset_dict_no_labs( + # volume_directory=self.config.train_volume_directory + # ) + # self.log(train_files) + # self.log(len(train_files)) + # self.log(train_files[0]) + train_files = self.config.train_data_dict + + first_volume = LoadImaged(keys=["image"])(train_files[0]) + first_volume_shape = first_volume["image"].shape + + # Transforms to be applied to each volume + load_single_images = Compose( + [ + LoadImaged(keys=["image"]), + EnsureChannelFirstd(keys=["image"]), + Orientationd(keys=["image"], axcodes="PLI"), + SpatialPadd( + keys=["image"], + spatial_size=(utils.get_padding_dim(first_volume_shape)), + ), + EnsureTyped(keys=["image"]), + ] + ) + + if self.config.do_augmentation: + train_transforms = Compose( + [ + ScaleIntensityRanged( + keys=["image"], + a_min=0, + a_max=2000, + b_min=0.0, + b_max=1.0, + clip=True, + ), + RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), + RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), + RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), + RandRotate90d(keys=["image"], prob=0.1, max_k=3), + EnsureTyped(keys=["image"]), + ] + ) + else: + train_transforms = EnsureTyped(keys=["image"]) + + # Create the dataset + dataset = CacheDataset( + data=train_files, + transform=Compose([load_single_images, train_transforms]), + ) + + return first_volume_shape, dataset + + # def get_scheduler(self, optimizer, verbose=False): + # scheduler_name = self.config.scheduler + # if scheduler_name == "None": + # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + # optimizer, + # T_max=100, + # eta_min=config.lr - 1e-6, + # verbose=verbose, + # ) + # + # elif scheduler_name == "ReduceLROnPlateau": + # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + # optimizer, + # mode="min", + # factor=schedulers["ReduceLROnPlateau"]["factor"], + # patience=schedulers["ReduceLROnPlateau"]["patience"], + # verbose=verbose, + # ) + # elif scheduler_name == "CosineAnnealingLR": + # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( + # optimizer, + # T_max=schedulers["CosineAnnealingLR"]["T_max"], + # eta_min=schedulers["CosineAnnealingLR"]["eta_min"], + # verbose=verbose, + # ) + # elif scheduler_name == "CosineAnnealingWarmRestarts": + # scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts( + # optimizer, + # T_0=schedulers["CosineAnnealingWarmRestarts"]["T_0"], + # eta_min=schedulers["CosineAnnealingWarmRestarts"]["eta_min"], + # T_mult=schedulers["CosineAnnealingWarmRestarts"]["T_mult"], + # verbose=verbose, + # ) + # elif scheduler_name == "CyclicLR": + # scheduler = torch.optim.lr_scheduler.CyclicLR( + # optimizer, + # base_lr=schedulers["CyclicLR"]["base_lr"], + # max_lr=schedulers["CyclicLR"]["max_lr"], + # step_size_up=schedulers["CyclicLR"]["step_size_up"], + # mode=schedulers["CyclicLR"]["mode"], + # cycle_momentum=False, + # ) + # else: + # raise ValueError(f"Scheduler {scheduler_name} not provided") + # return scheduler + def train(self): + if self.config is None: + self.config = config.WNetTrainingWorkerConfig() + ############## + # disable metadata tracking in MONAI + set_track_meta(False) + ############## + # if WANDB_INSTALLED: + # wandb.init( + # config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE + # ) + + set_determinism( + seed=self.config.deterministic_config.seed + ) # use default seed from NP_MAX + torch.use_deterministic_algorithms(True, warn_only=True) + + normalize_function = self.config.normalizing_function + CUDA = torch.cuda.is_available() + device = torch.device("cuda" if CUDA else "cpu") + + self.log(f"Using device: {device}") + + self.log("Config:") + [self.log(str(a)) for a in self.config.__dict__.items()] + + self.log("Initializing training...") + self.log("Getting the data") + + if self.config.sampling: + (data_shape, dataset) = self.get_patch_dataset(self.config) + else: + (data_shape, dataset) = self.get_dataset(self.config) + transform = Compose( + [ + ToTensor(), + EnsureChannelFirst(channel_dim=0), + ] + ) + dataset = [transform(im) for im in dataset] + for data in dataset: + self.log(f"Data shape: {data.shape}") + break + + dataloader = DataLoader( + dataset, + batch_size=self.config.batch_size, + shuffle=True, + num_workers=self.config.num_workers, + collate_fn=pad_list_data_collate, + ) + + if self.config.eval_volume_dict is not None: + eval_dataset = self.get_patch_eval_dataset( + self.config.eval_volume_dict + ) # FIXME + + eval_dataloader = DataLoader( + eval_dataset, + batch_size=self.config.batch_size, + shuffle=False, + num_workers=self.config.num_workers, + collate_fn=pad_list_data_collate, + ) + + dice_metric = DiceMetric( + include_background=False, reduction="mean", get_not_nans=False + ) + ################################################### + # Training the model # + ################################################### + self.log("Initializing the model:") + + self.log("- getting the model") + # Initialize the model + model = WNet( + in_channels=self.config.in_channels, + out_channels=self.config.out_channels, + num_classes=self.config.num_classes, + dropout=self.config.dropout, + ) + model = ( + nn.DataParallel(model).cuda() + if CUDA and self.config.parallel + else model + ) + model.to(device) + + if self.config.use_clipping: + for p in model.parameters(): + p.register_hook( + lambda grad: torch.clamp( + grad, + min=-self.config.clipping, + max=self.config.clipping, + ) + ) + + if WANDB_INSTALLED: + wandb.watch(model, log_freq=100) + + if self.config.weights_info.path is not None: + model.load_state_dict( + torch.load(self.config.weights_info.path, map_location=device) + ) + + self.log("- getting the optimizers") + # Initialize the optimizers + if self.config.weight_decay is not None: + decay = self.config.weight_decay + optimizer = torch.optim.Adam( + model.parameters(), lr=self.config.lr, weight_decay=decay + ) + else: + optimizer = torch.optim.Adam(model.parameters(), lr=self.config.lr) + + self.log("- getting the loss functions") + # Initialize the Ncuts loss function + criterionE = SoftNCutsLoss( + data_shape=data_shape, + device=device, + intensity_sigma=self.config.intensity_sigma, + spatial_sigma=self.config.spatial_sigma, + radius=self.config.radius, + ) + + if self.config.reconstruction_loss == "MSE": + criterionW = nn.MSELoss() + elif self.config.reconstruction_loss == "BCE": + criterionW = nn.BCELoss() + else: + raise ValueError( + f"Unknown reconstruction loss : {self.config.reconstruction_loss} not supported" + ) + + self.log("- getting the learning rate schedulers") + # Initialize the learning rate schedulers + # scheduler = get_scheduler(self.config, optimizer) + # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + # optimizer, mode="min", factor=0.5, patience=10, verbose=True + # ) + model.train() + + self.log("Ready") + self.log("Training the model") + self.log("*" * 50) + + startTime = time.time() + ncuts_losses = [] + rec_losses = [] + total_losses = [] + best_dice = -1 + + # Train the model + for epoch in range(self.config.num_epochs): + self.log(f"Epoch {epoch + 1} of {self.config.num_epochs}") + + epoch_ncuts_loss = 0 + epoch_rec_loss = 0 + epoch_loss = 0 + + for _i, batch in enumerate(dataloader): + # raise NotImplementedError("testing") + if self.config.sampling: + image = batch["image"].to(device) + else: + image = batch.to(device) + if self.config.batch_size == 1: + image = image.unsqueeze(0) + else: + image = image.unsqueeze(0) + image = torch.swapaxes(image, 0, 1) + + # Forward pass + enc = model.forward_encoder(image) + # Compute the Ncuts loss + Ncuts = criterionE(enc, image) + epoch_ncuts_loss += Ncuts.item() + # if WANDB_INSTALLED: + # wandb.log({"Ncuts loss": Ncuts.item()}) + + # Forward pass + enc, dec = model(image) + + # Compute the reconstruction loss + if isinstance(criterionW, nn.MSELoss): + reconstruction_loss = criterionW(dec, image) + elif isinstance(criterionW, nn.BCELoss): + reconstruction_loss = criterionW( + torch.sigmoid(dec), + utils.remap_image(image, new_max=1), + ) + + epoch_rec_loss += reconstruction_loss.item() + if WANDB_INSTALLED: + wandb.log( + {"Reconstruction loss": reconstruction_loss.item()} + ) + + # Backward pass for the reconstruction loss + optimizer.zero_grad() + alpha = self.config.n_cuts_weight + beta = self.config.rec_loss_weight + + loss = alpha * Ncuts + beta * reconstruction_loss + epoch_loss += loss.item() + # if WANDB_INSTALLED: + # wandb.log({"Sum of losses": loss.item()}) + loss.backward(loss) + optimizer.step() + + # if self.config.scheduler == "CosineAnnealingWarmRestarts": + # scheduler.step(epoch + _i / len(dataloader)) + # if ( + # self.config.scheduler == "CosineAnnealingLR" + # or self.config.scheduler == "CyclicLR" + # ): + # scheduler.step() + + ncuts_losses.append(epoch_ncuts_loss / len(dataloader)) + rec_losses.append(epoch_rec_loss / len(dataloader)) + total_losses.append(epoch_loss / len(dataloader)) + + # if WANDB_INSTALLED: + # wandb.log({"Ncuts loss_epoch": ncuts_losses[-1]}) + # wandb.log({"Reconstruction loss_epoch": rec_losses[-1]}) + # wandb.log({"Sum of losses_epoch": total_losses[-1]}) + # wandb.log({"epoch": epoch}) + # wandb.log({"learning_rate model": optimizerW.param_groups[0]["lr"]}) + # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) + # wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) + + self.log("Ncuts loss: " + str(ncuts_losses[-1])) + if epoch > 0: + self.log( + "Ncuts loss difference: " + + str(ncuts_losses[-1] - ncuts_losses[-2]) + ) + self.log("Reconstruction loss: " + str(rec_losses[-1])) + if epoch > 0: + self.log( + "Reconstruction loss difference: " + + str(rec_losses[-1] - rec_losses[-2]) + ) + self.log("Sum of losses: " + str(total_losses[-1])) + if epoch > 0: + self.log( + "Sum of losses difference: " + + str(total_losses[-1] - total_losses[-2]), + ) + + # Update the learning rate + # if self.config.scheduler == "ReduceLROnPlateau": + # # schedulerE.step(epoch_ncuts_loss) + # # schedulerW.step(epoch_rec_loss) + # scheduler.step(epoch_rec_loss) + if ( + self.config.eval_volume_directory is not None + and (epoch + 1) % self.config.val_interval == 0 + ): + model.eval() + self.log("Validating...") + with torch.no_grad(): + for _k, val_data in enumerate(eval_dataloader): + val_inputs, val_labels = ( + val_data["image"].to(device), + val_data["label"].to(device), + ) + + # normalize val_inputs across channels + for i in range(val_inputs.shape[0]): + for j in range(val_inputs.shape[1]): + val_inputs[i][j] = normalize_function( + val_inputs[i][j] + ) + + val_outputs = model.forward_encoder(val_inputs) + val_outputs = AsDiscrete(threshold=0.5)(val_outputs) + + # compute metric for current iteration + for channel in range(val_outputs.shape[1]): + max_dice_channel = torch.argmax( + torch.Tensor( + [ + utils.dice_coeff( + y_pred=val_outputs[ + :, + channel : (channel + 1), + :, + :, + :, + ], + y_true=val_labels, + ) + ] + ) + ) + + dice_metric( + y_pred=val_outputs[ + :, + max_dice_channel : (max_dice_channel + 1), + :, + :, + :, + ], + y=val_labels, + ) + + # aggregate the final mean dice result + metric = dice_metric.aggregate().item() + self.log("Validation Dice score: ", metric) + if best_dice < metric < 2: + best_dice = metric + epoch + 1 + if self.config.save_model: + save_best_path = Path( + self.config.save_model_path + ).parents[0] + save_best_path.mkdir(parents=True, exist_ok=True) + save_best_name = Path( + self.config.save_model_path + ).stem + save_path = ( + str(save_best_path / save_best_name) + + "_best_metric.pth" + ) + self.log(f"Saving new best model to {save_path}") + torch.save(model.state_dict(), save_path) + + if WANDB_INSTALLED: + # log validation dice score for each validation round + wandb.log({"val/dice_metric": metric}) + + # reset the status for next validation round + dice_metric.reset() + + eta = ( + (time.time() - startTime) + * (self.config.num_epochs / (epoch + 1) - 1) + / 60 + ) + self.log( + f"ETA: {eta} minutes", + ) + self.log("-" * 20) + + # Save the model # FIXME + if self.config.save_model and epoch % self.config.save_every == 0: + torch.save(model.state_dict(), self.config.save_model_path) + # with open(self.config.save_losses_path, "wb") as f: + # pickle.dump((ncuts_losses, rec_losses), f) + + self.log("Training finished") + self.log(f"Best dice metric : {best_dice}") + # if WANDB_INSTALLED and self.config.eval_volume_directory is not None: + # wandb.log( + # { + # "best_dice_metric": best_dice, + # "best_metric_epoch": best_dice_epoch, + # } + # ) + self.log("*" * 50) + + # Save the model FIXME + if self.config.save_model: + print("Saving the model to: ", self.config.save_model_path) + torch.save(model.state_dict(), self.config.save_model_path) + # with open(self.config.save_losses_path, "wb") as f: + # pickle.dump((ncuts_losses, rec_losses), f) + # if WANDB_INSTALLED: + # model_artifact = wandb.Artifact( + # "WNet", + # type="model", + # description="WNet benchmark", + # metadata=dict(WANDB_CONFIG), + # ) + # model_artifact.add_file(self.config.save_model_path) + # wandb.log_artifact(model_artifact) + + return ncuts_losses, rec_losses, model + + +class TrainingWorker(TrainingWorkerBase): + """A custom worker to run supervised training jobs in. + Inherits from :py:class:`napari.qt.threading.GeneratorWorker` via :py:class:`TrainingWorkerBase` + """ + + def __init__( + self, + worker_config: config.SupervisedTrainingWorkerConfig, ): """Initializes a worker for inference with the arguments needed by the :py:func:`~train` function. Note: See :py:func:`~train` @@ -116,21 +863,9 @@ def __init__( """ - super().__init__(self.train) - self._signals = LogSignal() - self.log_signal = self._signals.log_signal - self.warn_signal = self._signals.warn_signal - self.error_signal = self._signals.error_signal - - self._weight_error = False - ############################################# + super().__init__() # worker function is self.train in parent class self.config = worker_config - - self.train_files = [] - self.val_files = [] ####################################### - self.downloader = WeightsDownloader() - self.loss_dict = { "Dice": DiceLoss(sigmoid=True), # "BCELoss": torch.nn.BCELoss(), # dev @@ -150,29 +885,6 @@ def set_loss_from_config(self): self.raise_error(e, "Loss function not found, aborting job") return self.loss_function - def set_download_log(self, widget): - self.downloader.log_widget = widget - - def log(self, text): - """Sends a signal that ``text`` should be logged - - Args: - text (str): text to logged - """ - self.log_signal.emit(text) - - def warn(self, warning): - """Sends a warning to main thread""" - self.warn_signal.emit(warning) - - def raise_error(self, exception, msg): - """Sends an error to main thread""" - logger.error(msg, exc_info=True) - logger.error(exception, exc_info=True) - self.error_signal.emit(exception, msg) - self.errored.emit(exception) - self.quit() - def log_parameters(self): self.log("-" * 20) self.log("Parameters summary :\n") diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 80767396..e71f82cc 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -27,7 +27,7 @@ ) from napari_cellseg3d.code_models.workers_utils import TrainingReport -NUMBER_TABS = 3 # how many tabs in the widget +NUMBER_TABS = 4 # how many tabs in the widget DEFAULT_PATCH_SIZE = 64 # default patch size for training logger = utils.LOGGER @@ -37,7 +37,7 @@ class Trainer(ModelFramework, metaclass=ui.QWidgetSingleton): Features parameter selection for training, dynamic loss plotting and automatic saving of the best weights during training through validation.""" - default_config = config.TrainingWorkerConfig() + default_config = config.SupervisedTrainingWorkerConfig() def __init__( self, @@ -159,8 +159,8 @@ def __init__( # self.model_choice.setCurrentIndex(0) ################### # TODO(cyril) : disable if we implement WNet training - wnet_index = self.model_choice.findText("WNet") - self.model_choice.removeItem(wnet_index) + # wnet_index = self.model_choice.findText("WNet") + # self.model_choice.removeItem(wnet_index) ################################ # interface @@ -275,7 +275,7 @@ def __init__( "Deterministic training", func=self._toggle_deterministic_param ) self.box_seed = ui.IntIncrementCounter( - upper=10000000, + upper=1000000000, default=self.default_config.deterministic_config.seed, ) self.lbl_seed = ui.make_label("Seed", self) @@ -286,68 +286,85 @@ def __init__( self.progress.setVisible(False) """Dock widget containing the progress bar""" - self.btn_start = ui.Button("Start training", self.start) - - # self.btn_model_path.setVisible(False) - # self.lbl_model_path.setVisible(False) - + self.start_button_supervised = None # button created later and only shown if supervised model is selected + self.loss_group = None # group box created later and only shown if supervised model is selected ############################ ############################ - def set_tooltips(): - # tooltips - self.zip_choice.setToolTip( - "Checking this will save a copy of the results as a zip folder" - ) - self.validation_percent_choice.tooltips = "Choose the proportion of images to retain for training.\nThe remaining images will be used for validation" - self.epoch_choice.tooltips = "The number of epochs to train for.\nThe more you train, the better the model will fit the training data" - self.loss_choice.setToolTip( - "The loss function to use for training.\nSee the list in the training guide for more info" - ) - self.sample_choice_slider.tooltips = ( - "The number of samples to extract per image" - ) - self.batch_choice.tooltips = ( - "The batch size to use for training.\n A larger value will feed more images per iteration to the model,\n" - " which is faster and possibly improves performance, but uses more memory" - ) - self.val_interval_choice.tooltips = ( - "The number of epochs to perform before validating data.\n " - "The lower the value, the more often the score of the model will be computed and the more often the weights will be saved." - ) - self.learning_rate_choice.setToolTip( - "The learning rate to use in the optimizer. \nUse a lower value if you're using pre-trained weights" - ) - self.scheduler_factor_choice.setToolTip( - "The factor by which to reduce the learning rate once the loss reaches a plateau" - ) - self.scheduler_patience_choice.setToolTip( - "The amount of epochs to wait for before reducing the learning rate" - ) - self.augment_choice.setToolTip( - "Check this to enable data augmentation, which will randomly deform, flip and shift the intensity in images" - " to provide a more general dataset. \nUse this if you're extracting more than 10 samples per image" - ) - [ - w.setToolTip("Size of the sample to extract") - for w in self.patch_size_widgets - ] - self.patch_choice.setToolTip( - "Check this to automatically crop your images in smaller, cubic images for training." - "\nShould be used if you have a small dataset (and large images)" - ) - self.use_deterministic_choice.setToolTip( - "Enable deterministic training for reproducibility." - "Using the same seed with all other parameters being similar should yield the exact same results between two runs." - ) - self.use_transfer_choice.setToolTip( - "Use this you want to initialize the model with pre-trained weights or use your own weights." - ) - self.box_seed.setToolTip("Seed to use for RNG") - + # WNet parameters + self.wnet_widgets = ( + None # widgets created later and only shown if WNet is selected + ) + self.advanced_next_button = ( + None # button created later and only shown if WNet is selected + ) + self.start_button_unsupervised = ( + None # button created later and only shown if WNet is selected + ) + ############################ + # self.btn_model_path.setVisible(False) + # self.lbl_model_path.setVisible(False) ############################ ############################ - set_tooltips() + self._set_tooltips() self._build() + self.model_choice.currentTextChanged.connect( + self._toggle_unsupervised_mode + ) + self._toggle_unsupervised_mode() + + def _set_tooltips(self): + # tooltips + self.zip_choice.setToolTip( + "Checking this will save a copy of the results as a zip folder" + ) + self.validation_percent_choice.tooltips = "Choose the proportion of images to retain for training.\nThe remaining images will be used for validation" + self.epoch_choice.tooltips = "The number of epochs to train for.\nThe more you train, the better the model will fit the training data" + self.loss_choice.setToolTip( + "The loss function to use for training.\nSee the list in the training guide for more info" + ) + self.sample_choice_slider.tooltips = ( + "The number of samples to extract per image" + ) + self.batch_choice.tooltips = ( + "The batch size to use for training.\n A larger value will feed more images per iteration to the model,\n" + " which is faster and possibly improves performance, but uses more memory" + ) + self.val_interval_choice.tooltips = ( + "The number of epochs to perform before validating data.\n " + "The lower the value, the more often the score of the model will be computed and the more often the weights will be saved." + ) + self.learning_rate_choice.setToolTip( + "The learning rate to use in the optimizer. \nUse a lower value if you're using pre-trained weights" + ) + self.scheduler_factor_choice.setToolTip( + "The factor by which to reduce the learning rate once the loss reaches a plateau" + ) + self.scheduler_patience_choice.setToolTip( + "The amount of epochs to wait for before reducing the learning rate" + ) + self.augment_choice.setToolTip( + "Check this to enable data augmentation, which will randomly deform, flip and shift the intensity in images" + " to provide a more general dataset. \nUse this if you're extracting more than 10 samples per image" + ) + [ + w.setToolTip("Size of the sample to extract") + for w in self.patch_size_widgets + ] + self.patch_choice.setToolTip( + "Check this to automatically crop your images in smaller, cubic images for training." + "\nShould be used if you have a small dataset (and large images)" + ) + self.use_deterministic_choice.setToolTip( + "Enable deterministic training for reproducibility." + "Using the same seed with all other parameters being similar should yield the exact same results between two runs." + ) + self.use_transfer_choice.setToolTip( + "Use this you want to initialize the model with pre-trained weights or use your own weights." + ) + self.box_seed.setToolTip("Seed to use for RNG") + + def _make_start_button(self): + return ui.Button("Start training", self.start, parent=self) def _hide_unused(self): [ @@ -411,6 +428,33 @@ def check_ready(self): return False return True + def _toggle_unsupervised_mode(self): + """Change all the UI elements needed for unsupervised learning mode""" + if self.model_choice.currentText() == "WNet": + self.setTabVisible(3, True) + self.setTabEnabled(3, True) + self.start_button_unsupervised.setVisible(True) + self.start_button_supervised.setVisible(False) + self.advanced_next_button.setVisible(True) + self.start_btn = self.start_button_unsupervised + # loss + # self.loss_choice.setVisible(False) + self.loss_group.setVisible(False) + self.scheduler_factor_choice.setVisible(False) + self.scheduler_patience_choice.setVisible(False) + else: + self.setTabVisible(3, False) + self.setTabEnabled(3, False) + self.start_button_unsupervised.setVisible(False) + self.start_button_supervised.setVisible(True) + self.advanced_next_button.setVisible(False) + self.start_btn = self.start_button_supervised + # loss + # self.loss_choice.setVisible(True) + self.loss_group.setVisible(True) + self.scheduler_factor_choice.setVisible(True) + self.scheduler_patience_choice.setVisible(True) + def _build(self): """Builds the layout of the widget and creates the following tabs and prompts: @@ -453,48 +497,16 @@ def _build(self): ######## ################ ######################## - # first tab : model and dataset choices - data_tab = ui.ContainerWidget() - ################ - # first group : Data - data_group, data_layout = ui.make_group("Data") - - ui.add_widgets( - data_layout, - [ - # ui.combine_blocks( - # self.filetype_choice, self.filetype_choice.label - # ), # file extension - self.image_filewidget, - self.labels_filewidget, - self.results_filewidget, - # ui.combine_blocks(self.model_choice, self.model_choice.label), # model choice - # TODO : add custom model choice - self.zip_choice, # save as zip - ], - ) - - for w in [ - self.image_filewidget, - self.labels_filewidget, - self.results_filewidget, - ]: - w.check_ready() - - if self.data_path is not None: - self.image_filewidget.text_field.setText(self.data_path) - - if self.label_path is not None: - self.labels_filewidget.text_field.setText(self.label_path) - - if self.results_path is not None: - self.results_filewidget.text_field.setText(self.results_path) - - data_group.setLayout(data_layout) - data_tab.layout.addWidget(data_group, alignment=ui.LEFT_AL) - # end of first group : Data + # first tab : model, weights and device choices + model_tab = ui.ContainerWidget() ################ - ui.add_blank(widget=data_tab, layout=data_tab.layout) + ui.GroupedWidget.create_single_widget_group( + "Model", + self.model_choice, + model_tab.layout, + ) # model choice + self.model_choice.label.setVisible(False) + ui.add_blank(model_tab, model_tab.layout) ################ transfer_group_w, transfer_group_l = ui.make_group("Transfer learning") @@ -510,26 +522,21 @@ def _build(self): self.weights_filewidget.setVisible(False) transfer_group_w.setLayout(transfer_group_l) - data_tab.layout.addWidget(transfer_group_w, alignment=ui.LEFT_AL) + model_tab.layout.addWidget(transfer_group_w, alignment=ui.LEFT_AL) ################ - ui.add_blank(self, data_tab.layout) + ui.add_blank(self, model_tab.layout) ################ - ui.GroupedWidget.create_single_widget_group( - "Validation (%)", - self.validation_percent_choice.container, - data_tab.layout, - ) ui.GroupedWidget.create_single_widget_group( "Device", self.device_choice, - data_tab.layout, + model_tab.layout, ) ################ - ui.add_blank(self, data_tab.layout) + ui.add_blank(self, model_tab.layout) ################ # buttons ui.add_widgets( - data_tab.layout, + model_tab.layout, [ self._make_next_button(), # next ui.add_blank(self), @@ -539,13 +546,54 @@ def _build(self): ################## ############ ###### - # second tab : image sizes, data augmentation, patches size and behaviour + # Second tab : image sizes, data augmentation, patches size and behaviour ###### ############ ################## - augment_tab_w = ui.ContainerWidget() - augment_tab_l = augment_tab_w.layout + data_tab_w = ui.ContainerWidget() + data_tab_l = data_tab_w.layout ################## + ################ + # group : Data + data_group, data_layout = ui.make_group("Data") + + ui.add_widgets( + data_layout, + [ + # ui.combine_blocks( + # self.filetype_choice, self.filetype_choice.label + # ), # file extension + self.image_filewidget, + self.labels_filewidget, + self.results_filewidget, + # ui.combine_blocks(self.model_choice, self.model_choice.label), # model choice + # TODO : add custom model choice + self.zip_choice, # save as zip + ], + ) + + for w in [ + self.image_filewidget, + self.labels_filewidget, + self.results_filewidget, + ]: + w.check_ready() + + if self.data_path is not None: + self.image_filewidget.text_field.setText(self.data_path) + + if self.label_path is not None: + self.labels_filewidget.text_field.setText(self.label_path) + + if self.results_path is not None: + self.results_filewidget.text_field.setText(self.results_path) + + data_group.setLayout(data_layout) + data_tab_l.addWidget(data_group, alignment=ui.LEFT_AL) + # end of first group : Data + ################ + ui.add_blank(widget=data_tab_w, layout=data_tab_l) + ################ # extract patches or not patch_size_w = ui.ContainerWidget() @@ -579,27 +627,36 @@ def _build(self): horizontal=False, ) ui.GroupedWidget.create_single_widget_group( - "Sampling", sampling, augment_tab_l, b=0, t=11 + "Sampling", sampling, data_tab_l, b=0, t=11 ) ####################### ####################### - ui.add_blank(augment_tab_w, augment_tab_l) + ui.add_blank(data_tab_w, data_tab_l) ####################### ####################### ui.GroupedWidget.create_single_widget_group( "Augmentation", self.augment_choice, - augment_tab_l, + data_tab_l, ) # augment data toggle self.augment_choice.toggle() ####################### + ui.add_blank(data_tab_w, data_tab_l) ####################### - ui.add_blank(augment_tab_w, augment_tab_l) + ui.GroupedWidget.create_single_widget_group( + "Validation (%)", + self.validation_percent_choice.container, + data_tab_l, + ) + ####################### ####################### - augment_tab_l.addWidget( + ui.add_blank(self, data_tab_l) + ####################### + ####################### + data_tab_l.addWidget( ui.combine_blocks( left_or_above=self._make_prev_button(), right_or_below=self._make_next_button(), @@ -608,40 +665,29 @@ def _build(self): alignment=ui.LEFT_AL, ) - augment_tab_l.addWidget(self.close_buttons[1], alignment=ui.LEFT_AL) + data_tab_l.addWidget(self.close_buttons[1], alignment=ui.LEFT_AL) ################## ############ ###### - # third tab : training parameters + # Third tab : training parameters ###### ############ ################## train_tab = ui.ContainerWidget() ################## - # solo groups for loss and model ui.add_blank(train_tab, train_tab.layout) - - ui.GroupedWidget.create_single_widget_group( - "Model", - self.model_choice, - train_tab.layout, - ) # model choice - self.model_choice.label.setVisible(False) - - ui.add_blank(train_tab, train_tab.layout) - ui.GroupedWidget.create_single_widget_group( + ################## + self.loss_group = ui.GroupedWidget.create_single_widget_group( "Loss", self.loss_choice, train_tab.layout, ) # loss choice self.lbl_loss_choice.setVisible(False) - - # end of solo groups for loss and model + # end of solo groups for loss ################## ui.add_blank(train_tab, train_tab.layout) ################## # training params group - train_param_group_w, train_param_group_l = ui.make_group( "Training parameters", r=1, b=5, t=11 ) @@ -679,24 +725,29 @@ def _build(self): [self.use_deterministic_choice, self.container_seed], ui.LEFT_AL, ) - # self.container_seed.setVisible(False) self.use_deterministic_choice.setChecked(True) - seed_w.setLayout(seed_l) train_tab.layout.addWidget(seed_w) - # end of deterministic choice group ################## # buttons ui.add_blank(self, train_tab.layout) + self.advanced_next_button = self._make_next_button() + self.advanced_next_button.setVisible(False) + self.start_button_supervised = self._make_start_button() + ui.add_widgets( train_tab.layout, [ - self._make_prev_button(), # previous - self.btn_start, # start + ui.combine_blocks( + left_or_above=self._make_prev_button(), # previous + right_or_below=self.advanced_next_button, # next (only if unsupervised) + l=1, + ), + self.start_button_supervised, # start ui.add_blank(self), self.close_buttons[2], ], @@ -704,17 +755,105 @@ def _build(self): ################## ############ ###### - # end of tab layouts + # Fourth tab : advanced parameters (unsupervised only) + ###### + ############ + ################## + advanced_tab = ui.ContainerWidget(parent=self) + self.wnet_widgets = ui.WNetWidgets(parent=advanced_tab) + ui.add_blank(advanced_tab, advanced_tab.layout) + ################## + model_params_group_w, model_params_group_l = ui.make_group( + "WNet parameters", r=20, b=5, t=11 + ) + ui.add_widgets( + model_params_group_l, + [ + self.wnet_widgets.num_classes_choice.label, + self.wnet_widgets.num_classes_choice, + self.wnet_widgets.loss_choice.label, + self.wnet_widgets.loss_choice, + ], + ) + model_params_group_w.setLayout(model_params_group_l) + advanced_tab.layout.addWidget(model_params_group_w) + ################## + ui.add_blank(advanced_tab, advanced_tab.layout) + ################## + ncuts_loss_params_group_w, ncuts_loss_params_group_l = ui.make_group( + "NCuts loss parameters", r=35, b=5, t=11 + ) + ui.add_widgets( + ncuts_loss_params_group_l, + [ + self.wnet_widgets.intensity_sigma_choice.label, + self.wnet_widgets.intensity_sigma_choice, + self.wnet_widgets.spatial_sigma_choice.label, + self.wnet_widgets.spatial_sigma_choice, + self.wnet_widgets.radius_choice.label, + self.wnet_widgets.radius_choice, + ], + ) + ncuts_loss_params_group_w.setLayout(ncuts_loss_params_group_l) + advanced_tab.layout.addWidget(ncuts_loss_params_group_w) + ################## + ui.add_blank(advanced_tab, advanced_tab.layout) + ################## + losses_weights_group_w, losses_weights_group_l = ui.make_group( + "Losses weights", r=1, b=5, t=11 + ) + + # container for reconstruction weight and divide factor + reconstruction_weight_container = ui.ContainerWidget( + vertical=False, parent=losses_weights_group_w + ) + ui.add_widgets( + reconstruction_weight_container.layout, + [ + self.wnet_widgets.reconstruction_weight_choice, + ui.make_label(" / "), + self.wnet_widgets.reconstruction_weight_divide_factor_choice, + ], + ) + ui.add_widgets( + losses_weights_group_l, + [ + self.wnet_widgets.ncuts_weight_choice.label, + self.wnet_widgets.ncuts_weight_choice, + self.wnet_widgets.reconstruction_weight_choice.label, + reconstruction_weight_container, + ], + ) + losses_weights_group_w.setLayout(losses_weights_group_l) + advanced_tab.layout.addWidget(losses_weights_group_w) + ################## + ui.add_blank(advanced_tab, advanced_tab.layout) + ################## + # buttons + self.start_button_unsupervised = self._make_start_button() + ui.add_widgets( + advanced_tab.layout, + [ + self._make_prev_button(), # previous + self.start_button_unsupervised, # start + ui.add_blank(self), + self.close_buttons[3], + ], + ) + ################## + ############ + ###### + # end of tab layouts ui.ScrollArea.make_scrollable( - contained_layout=data_tab.layout, - parent=data_tab, + contained_layout=model_tab.layout, + parent=model_tab, min_wh=[200, 300], ) # , max_wh=[200,1000]) ui.ScrollArea.make_scrollable( - contained_layout=augment_tab_l, - parent=augment_tab_w, + contained_layout=data_tab_l, + parent=data_tab_w, min_wh=[200, 300], ) @@ -723,30 +862,28 @@ def _build(self): parent=train_tab, min_wh=[200, 300], ) - self.addTab(data_tab, "Data") - self.addTab(augment_tab_w, "Augmentation") + ui.ScrollArea.make_scrollable( + contained_layout=advanced_tab.layout, + parent=advanced_tab, + min_wh=[200, 300], + ) + + self.addTab(model_tab, "Model") + self.addTab(data_tab_w, "Data") self.addTab(train_tab, "Training") + self.addTab(advanced_tab, "Advanced") self.setMinimumSize(220, 100) self._hide_unused() default_results_path = ( - config.TrainingWorkerConfig().results_path_folder + config.SupervisedTrainingWorkerConfig().results_path_folder ) self.results_filewidget.text_field.setText(default_results_path) self.results_filewidget.check_ready() self._check_results_path(default_results_path) self.results_path = default_results_path - # def _show_dialog_lab(self): - # """Shows the dialog to load label files in a path, loads them (see :doc:model_framework) and changes the widget - # label :py:attr:`self.label_filewidget.text_field` accordingly""" - # folder = ui.open_folder_dialog(self, self._default_path) - # - # if folder: - # self.label_path = folder - # self.labels_filewidget.text_field.setText(self.label_path) - def send_log(self, text): """Sends a message via the Log attribute""" self.log.print_and_log(text) @@ -790,7 +927,7 @@ def start(self): pass else: self.worker.start() - self.btn_start.setText("Running... Click to stop") + self.start_btn.setText("Running... Click to stop") else: # starting a new job goes here self.log.print_and_log("Starting...") self.log.print_and_log("*" * 20) @@ -806,7 +943,7 @@ def start(self): self.config = config.TrainerConfig( save_as_zip=self.zip_choice.isChecked() ) - self._set_worker_config() + self._set_supervised_worker_config() self.worker = TrainingWorker(worker_config=self.worker_config) self.worker.set_download_log(self.log) @@ -829,15 +966,15 @@ def start(self): f"Stop requested at {utils.get_time()}. \nWaiting for next yielding step..." ) self.stop_requested = True - self.btn_start.setText("Stopping... Please wait") + self.start_btn.setText("Stopping... Please wait") self.log.print_and_log("*" * 20) self.worker.quit() else: self.worker.start() - self.btn_start.setText("Running... Click to stop") + self.start_btn.setText("Running... Click to stop") - def _create_worker_from_config( - self, worker_config: config.TrainingWorkerConfig + def _create_supervised_worker_from_config( + self, worker_config: config.SupervisedTrainingWorkerConfig ): if isinstance(config, config.TrainerConfig): raise TypeError( @@ -845,7 +982,9 @@ def _create_worker_from_config( ) return TrainingWorker(worker_config=worker_config) - def _set_worker_config(self) -> config.TrainingWorkerConfig: + def _set_supervised_worker_config( + self, + ) -> config.SupervisedTrainingWorkerConfig: model_config = config.ModelInfo(name=self.model_choice.currentText()) self.weights_config.path = self.weights_config.path @@ -875,7 +1014,7 @@ def _set_worker_config(self) -> config.TrainingWorkerConfig: patch_size = [w.value() for w in self.patch_size_widgets] logger.debug("Loading config...") - self.worker_config = config.TrainingWorkerConfig( + self.worker_config = config.SupervisedTrainingWorkerConfig( device=self.check_device_choice(), model_info=model_config, weights_info=self.weights_config, @@ -938,7 +1077,7 @@ def on_finish(self): except ValueError as e: logger.warning(f"Error while saving CSV report: {e}") - self.btn_start.setText("Start") + self.start_btn.setText("Start") [btn.setVisible(True) for btn in self.close_buttons] # del self.worker @@ -974,7 +1113,7 @@ def on_error(self): def on_stop(self): self._remove_result_layers() self.worker = None - self.btn_start.setText("Start") + self.start_btn.setText("Start") [btn.setVisible(True) for btn in self.close_buttons] def _remove_result_layers(self): diff --git a/napari_cellseg3d/config.py b/napari_cellseg3d/config.py index b05f7ac7..84f6468c 100644 --- a/napari_cellseg3d/config.py +++ b/napari_cellseg3d/config.py @@ -14,7 +14,7 @@ from napari_cellseg3d.code_models.models.model_TRAILMAP_MS import TRAILMAP_MS_ from napari_cellseg3d.code_models.models.model_VNet import VNet_ from napari_cellseg3d.code_models.models.model_WNet import WNet_ -from napari_cellseg3d.utils import LOGGER +from napari_cellseg3d.utils import LOGGER, remap_image logger = LOGGER @@ -37,7 +37,8 @@ ################ -# Review +# Review # +################ @dataclass @@ -61,13 +62,14 @@ class ReviewSession: time_taken: datetime.timedelta -################ -# Model & weights +################### +# Model & weights # +################### @dataclass class ModelInfo: - """Dataclass recording model info + """Dataclass recording supervised models info Args: name (str): name of the model model_input_size (Optional[List[int]]): input size of the model @@ -102,8 +104,9 @@ class WeightsInfo: use_pretrained: Optional[bool] = False -################ -# Post processing & instance segmentation +############################################# +# Post processing & instance segmentation # +############################################# @dataclass @@ -153,8 +156,9 @@ class CRFConfig: n_iters: int = 5 -################ -# Inference configs +##################### +# Inference configs # +##################### @dataclass @@ -219,8 +223,9 @@ class InferenceWorkerConfig: layer: napari.layers.Layer = None -################ -# Training configs +#################### +# Training configs # +#################### @dataclass @@ -228,7 +233,7 @@ class DeterministicConfig: """Class to record deterministic config""" enabled: bool = False - seed: int = 23498 + seed: int = 34936339 # default seed from NP_MAX @dataclass @@ -240,26 +245,65 @@ class TrainerConfig: @dataclass class TrainingWorkerConfig: - """Class to record config for Trainer plugin""" + """General class to record config for training""" + # model params device: str = "cpu" - model_info: ModelInfo = None - weights_info: WeightsInfo = None - train_data_dict: dict = None - validation_percent: float = 0.8 max_epochs: int = 50 - loss_function: callable = None learning_rate: np.float64 = 1e-3 - scheduler_patience: int = 10 - scheduler_factor: float = 0.5 validation_interval: int = 2 batch_size: int = 1 + deterministic_config: DeterministicConfig = DeterministicConfig() + scheduler_factor: float = 0.5 + scheduler_patience: int = 10 + weights_info: WeightsInfo = None + # data params results_path_folder: str = str(Path.home() / Path("cellseg3d/training")) sampling: bool = False num_samples: int = 2 sample_size: List[int] = None do_augmentation: bool = True - deterministic_config: DeterministicConfig = DeterministicConfig() + num_workers: int = 4 + train_data_dict: dict = None + + +@dataclass +class SupervisedTrainingWorkerConfig(TrainingWorkerConfig): + """Class to record config for Trainer plugin""" + + model_info: ModelInfo = None + loss_function: callable = None + validation_percent: float = 0.8 + + +@dataclass +class WNetTrainingWorkerConfig(TrainingWorkerConfig): + """Class to record config for WNet worker""" + + # model params + in_channels: int = 1 # encoder input channels + out_channels: int = 1 # decoder (reconstruction) output channels + num_classes: int = 2 # encoder output channels + dropout: float = 0.65 + use_clipping: bool = False # use gradient clipping + clipping: float = 1.0 # clipping value + # NCuts loss params + intensity_sigma: float = 1.0 + spatial_sigma: float = 4.0 + radius: int = 2 # pixel radius for loss computation; might be overriden depending on data shape + # reconstruction loss params + reconstruction_loss: str = "MSE" # or "BCE" + # summed losses weights + n_cuts_weight: float = 0.5 + rec_loss_weight: float = ( + 0.5 / 100 + ) # must be adjusted depending on images; compare to NCuts loss value + # normalization params + normalizing_function: callable = remap_image + # data params + train_data_dict: dict = None + eval_volume_dict: str = None + eval_num_patches: int = 10 ################ @@ -269,7 +313,7 @@ class TrainingWorkerConfig: @dataclass class WNetCRFConfig: - "Class to store parameters of WNet CRF post processing" + """Class to store parameters of WNet CRF post-processing""" # CRF sa = 10 # 50 diff --git a/napari_cellseg3d/interface.py b/napari_cellseg3d/interface.py index 22f6a4a3..e5f448f3 100644 --- a/napari_cellseg3d/interface.py +++ b/napari_cellseg3d/interface.py @@ -8,8 +8,6 @@ # Qt # from qtpy.QtCore import QtWarningMsg from qtpy import QtCore - -# from qtpy.QtCore import QtWarningMsg from qtpy.QtCore import QObject, Qt, QUrl from qtpy.QtGui import QCursor, QDesktopServices, QTextCursor from qtpy.QtWidgets import ( @@ -38,6 +36,7 @@ # Local from napari_cellseg3d import utils +from napari_cellseg3d.config import WNetTrainingWorkerConfig """ User interface functions and aliases""" @@ -1329,9 +1328,10 @@ def create_single_widget_group( alignment=LEFT_AL, ): group = cls(title, l, t, r, b) - group.layout.addWidget(widget) + group.layout.addWidget(widget, alignment=alignment) group.setLayout(group.layout) layout.addWidget(group, alignment=alignment) + return group def add_widgets(layout, widgets, alignment=LEFT_AL): @@ -1417,3 +1417,96 @@ def open_url(url): url (str): Url to be opened """ QDesktopServices.openUrl(QUrl(url, QUrl.TolerantMode)) + + +class WNetWidgets: + """A collection of widgets for the WNet training GUI""" + + default_config = WNetTrainingWorkerConfig() + + def __init__(self, parent): + self.num_classes_choice = DropdownMenu( + entries=["2", "3", "4"], + parent=parent, + text_label="Number of classes", + ) + self.intensity_sigma_choice = DoubleIncrementCounter( + lower=1.0, + upper=100.0, + default=self.default_config.intensity_sigma, + parent=parent, + text_label="Intensity sigma", + ) + self.intensity_sigma_choice.setMaximumWidth(20) + self.spatial_sigma_choice = DoubleIncrementCounter( + lower=1.0, + upper=100.0, + default=self.default_config.spatial_sigma, + parent=parent, + text_label="Spatial sigma", + ) + self.spatial_sigma_choice.setMaximumWidth(20) + self.radius_choice = IntIncrementCounter( + lower=1, + upper=5, + default=self.default_config.radius, + parent=parent, + text_label="Radius", + ) + self.radius_choice.setMaximumWidth(20) + self.loss_choice = DropdownMenu( + entries=["MSE", "BCE"], parent=parent, text_label="Loss function" + ) + self.ncuts_weight_choice = DoubleIncrementCounter( + lower=0.1, + upper=1.0, + default=self.default_config.n_cuts_weight, + parent=parent, + text_label="NCuts weight", + ) + self.reconstruction_weight_choice = DoubleIncrementCounter( + lower=0.1, + upper=1.0, + default=0.5, + parent=parent, + text_label="Reconstruction weight", + ) + self.reconstruction_weight_choice.setMaximumWidth(20) + self.reconstruction_weight_divide_factor_choice = IntIncrementCounter( + lower=1, + upper=10000, + default=100, + parent=parent, + text_label="Reconstruction weight divide factor", + ) + self.reconstruction_weight_divide_factor_choice.setMaximumWidth(20) + self.evaluation_patches_choice = Slider( + lower=1, + upper=100, + default=self.default_config.eval_num_patches, + parent=parent, + text_label="Number of patches for evaluation", + ) + + self._set_tooltips() + + def _set_tooltips(self): + self.num_classes_choice.setToolTip("Number of classes to segment") + self.intensity_sigma_choice.setToolTip( + "Intensity sigma for the NCuts loss" + ) + self.spatial_sigma_choice.setToolTip( + "Spatial sigma for the NCuts loss" + ) + self.radius_choice.setToolTip("Radius of NCuts loss region") + self.loss_choice.setToolTip("Loss function to use for reconstruction") + self.ncuts_weight_choice.setToolTip("Weight of the NCuts loss") + self.reconstruction_weight_choice.setToolTip( + "Weight of the reconstruction loss" + ) + self.reconstruction_weight_divide_factor_choice.setToolTip( + "Divide factor for the reconstruction loss.\nThis might have to be changed depending on your images.\nIf you notice that the reconstruction loss is too high, raise this factor until the\nreconstruction loss is in the same order of magnitude as the NCuts loss." + ) + self.evaluation_patches_choice.setToolTip( + "Number of patches to use for evaluation" + ) From fdcf797fc9c3fcbf4131aa0ae004c4ff54bfc7e0 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 27 Jul 2023 17:23:16 +0200 Subject: [PATCH 08/70] Workable WNet training prototype --- napari_cellseg3d/_tests/test_inference.py | 23 +- .../_tests/test_model_framework.py | 3 +- napari_cellseg3d/_tests/test_training.py | 135 +- .../code_models/model_framework.py | 26 + .../code_models/models/wnet/model.py | 66 +- .../code_models/models/wnet/train_wnet.py | 1983 +++++++++-------- .../code_models/worker_training.py | 973 ++++---- napari_cellseg3d/code_models/workers_utils.py | 10 +- napari_cellseg3d/code_plugins/plugin_base.py | 37 +- .../code_plugins/plugin_model_training.py | 704 +++--- napari_cellseg3d/config.py | 12 +- napari_cellseg3d/interface.py | 111 +- 12 files changed, 2175 insertions(+), 1908 deletions(-) diff --git a/napari_cellseg3d/_tests/test_inference.py b/napari_cellseg3d/_tests/test_inference.py index 336630f5..f5a89b14 100644 --- a/napari_cellseg3d/_tests/test_inference.py +++ b/napari_cellseg3d/_tests/test_inference.py @@ -23,7 +23,7 @@ def test_onnx_inference(make_napari_viewer_proxy): path = str(Path(PRETRAINED_WEIGHTS_DIR).resolve() / "wnet.onnx") assert Path(path).is_file() dims = 64 - batch = 2 + batch = 1 x = torch.randn(size=(batch, 1, dims, dims, dims)) worker = ONNXModelWrapper(file_location=path) assert worker.eval() is None @@ -66,16 +66,27 @@ def test_inference_on_folder(): config.images_filepaths = [ str(Path(__file__).resolve().parent / "res/test.tif") ] + config.sliding_window_config.window_size = 8 - def mock_work(x): - return x + class mock_work: + @staticmethod + def eval(): + return True + + def __call__(self, x): + return torch.Tensor(x) worker = InferenceWorker(worker_config=config) - worker.aniso_transform = mock_work + worker.aniso_transform = mock_work() - image = torch.Tensor(rand_gen.random((1, 1, 64, 64, 64))) + image = torch.Tensor(rand_gen.random(size=(1, 1, 8, 8, 8))) + assert image.shape == (1, 1, 8, 8, 8) + assert image.dtype == torch.float32 res = worker.inference_on_folder( - {"image": image}, 0, model=mock_work, post_process_transforms=mock_work + {"image": image}, + 0, + model=mock_work(), + post_process_transforms=mock_work(), ) assert isinstance(res, InferenceResult) diff --git a/napari_cellseg3d/_tests/test_model_framework.py b/napari_cellseg3d/_tests/test_model_framework.py index 497d97e8..0a078273 100644 --- a/napari_cellseg3d/_tests/test_model_framework.py +++ b/napari_cellseg3d/_tests/test_model_framework.py @@ -17,7 +17,7 @@ def test_update_default(make_napari_viewer_proxy): widget._update_default_paths() - assert widget._default_path == [None, None, None] + assert widget._default_path == [None, None, None, None] widget.images_filepaths = [ pth("C:/test/test/images.tif"), @@ -36,6 +36,7 @@ def test_update_default(make_napari_viewer_proxy): pth("C:/test/test"), pth("C:/dataset/labels"), pth("D:/dataset/res"), + None, ] diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py index ac5d32a7..c5737f11 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_training.py @@ -9,61 +9,87 @@ ) from napari_cellseg3d.config import MODEL_LIST +im_path = Path(__file__).resolve().parent / "res/test.tif" +im_path_str = str(im_path) + + +def test_create_supervised_worker_from_config(make_napari_viewer_proxy): + widget = Trainer(make_napari_viewer_proxy()) + worker = widget._create_worker() + default_config = config.SupervisedTrainingWorkerConfig() + excluded = [ + "results_path_folder", + "loss_function", + "model_info", + "sample_size", + "weights_info", + ] + for attr in dir(default_config): + if not attr.startswith("__") and attr not in excluded: + assert getattr(default_config, attr) == getattr( + worker.config, attr + ) + + +def test_create_unspervised_worker_from_config(make_napari_viewer_proxy): + widget = Trainer(make_napari_viewer_proxy()) + widget.model_choice.setCurrentText("WNet") + widget._toggle_unsupervised_mode(enabled=True) + default_config = config.WNetTrainingWorkerConfig() + worker = widget._create_worker() + excluded = ["results_path_folder", "sample_size", "weights_info"] + for attr in dir(default_config): + if not attr.startswith("__") and attr not in excluded: + assert getattr(default_config, attr) == getattr( + worker.config, attr + ) + def test_update_loss_plot(make_napari_viewer_proxy): view = make_napari_viewer_proxy() widget = Trainer(view) widget.worker_config = config.SupervisedTrainingWorkerConfig() + assert widget._is_current_job_supervised() is True widget.worker_config.validation_interval = 1 widget.worker_config.results_path_folder = "." - epoch_loss_values = [1] + epoch_loss_values = {"loss": [1]} metric_values = [] - widget.update_loss_plot(epoch_loss_values, metric_values) - - assert widget.dice_metric_plot is None - assert widget.train_loss_plot is None + assert widget.plot_2 is None + assert widget.plot_1 is None widget.worker_config.validation_interval = 2 - epoch_loss_values = [0, 1] + epoch_loss_values = {"loss": [0, 1]} metric_values = [0.2] - widget.update_loss_plot(epoch_loss_values, metric_values) + assert widget.plot_2 is None + assert widget.plot_1 is None - assert widget.dice_metric_plot is None - assert widget.train_loss_plot is None - - epoch_loss_values = [0, 1, 0.5, 0.7] - metric_values = [0.2, 0.3] - + epoch_loss_values = {"loss": [0, 1, 0.5, 0.7]} + metric_values = [0.1, 0.2] widget.update_loss_plot(epoch_loss_values, metric_values) + assert widget.plot_2 is not None + assert widget.plot_1 is not None - assert widget.dice_metric_plot is not None - assert widget.train_loss_plot is not None - - epoch_loss_values = [0, 1, 0.5, 0.7, 0.5, 0.7] + epoch_loss_values = {"loss": [0, 1, 0.5, 0.7, 0.5, 0.7]} metric_values = [0.2, 0.3, 0.5, 0.7] - widget.update_loss_plot(epoch_loss_values, metric_values) - - assert widget.dice_metric_plot is not None - assert widget.train_loss_plot is not None + assert widget.plot_2 is not None + assert widget.plot_1 is not None def test_check_matching_losses(): plugin = Trainer(None) - config = plugin._set_supervised_worker_config() + config = plugin._set_worker_config() worker = plugin._create_supervised_worker_from_config(config) assert plugin.loss_list == list(worker.loss_dict.keys()) def test_training(make_napari_viewer_proxy, qtbot): - im_path = str(Path(__file__).resolve().parent / "res/test.tif") - viewer = make_napari_viewer_proxy() widget = Trainer(viewer) widget.log = LogFixture() @@ -74,8 +100,8 @@ def test_training(make_napari_viewer_proxy, qtbot): assert not widget.check_ready() - widget.images_filepaths = [im_path] - widget.labels_filepaths = [im_path] + widget.images_filepaths = [im_path_str] + widget.labels_filepaths = [im_path_str] widget.epoch_choice.setValue(1) widget.val_interval_choice.setValue(1) @@ -84,11 +110,16 @@ def test_training(make_napari_viewer_proxy, qtbot): MODEL_LIST["test"] = TestModel widget.model_choice.addItem("test") widget.model_choice.setCurrentText("test") - worker_config = widget._set_supervised_worker_config() + widget.unsupervised_mode = False + worker_config = widget._set_worker_config() assert worker_config.model_info.name == "test" worker = widget._create_supervised_worker_from_config(worker_config) - worker.config.train_data_dict = [{"image": im_path, "label": im_path}] - worker.config.val_data_dict = [{"image": im_path, "label": im_path}] + worker.config.train_data_dict = [ + {"image": im_path_str, "label": im_path_str} + ] + worker.config.val_data_dict = [ + {"image": im_path_str, "label": im_path_str} + ] worker.config.max_epochs = 1 worker.config.validation_interval = 2 worker.log_parameters() @@ -99,20 +130,34 @@ def test_training(make_napari_viewer_proxy, qtbot): widget.worker = worker res.show_plot = True - res.loss_values = [1, 1, 1, 1] - res.validation_metric = [1, 1, 1, 1] + res.loss_1_values = {"loss": [1, 1, 1, 1]} + res.loss_2_values = [1, 1, 1, 1] widget.on_yield(res) - assert widget.loss_values == [1, 1, 1, 1] - assert widget.validation_values == [1, 1, 1, 1] - - # def on_error(e): - # print(e) - # assert False - # - # with qtbot.waitSignal( - # signal=widget.worker.finished, timeout=10000, raising=True - # ) as blocker: - # blocker.connect(widget.worker.errored) - # widget.worker.error_signal.connect(on_error) - # widget.worker.train() - # assert widget.worker is not None + assert widget.loss_1_values["loss"] == [1, 1, 1, 1] + assert widget.loss_2_values == [1, 1, 1, 1] + + +def test_unsupervised_worker(make_napari_viewer_proxy): + viewer = make_napari_viewer_proxy() + widget = Trainer(viewer) + + widget.model_choice.setCurrentText("WNet") + widget._toggle_unsupervised_mode(enabled=True) + + widget.unsupervised_images_filewidget.text_field.setText( + str(im_path.parent) + ) + widget.data = widget.create_dataset_dict_no_labs() + worker = widget._create_worker() + dataloader, eval_dataloader, data_shape = worker._get_data() + assert eval_dataloader is None + assert data_shape == (6, 6, 6) + + widget.images_filepaths = [str(im_path.parent)] + widget.labels_filepaths = [str(im_path.parent)] + widget.unsupervised_eval_data = widget.create_train_dataset_dict() + assert widget.unsupervised_eval_data is not None + worker = widget._create_worker() + dataloader, eval_dataloader, data_shape = worker._get_data() + assert eval_dataloader is not None + assert data_shape == (6, 6, 6) diff --git a/napari_cellseg3d/code_models/model_framework.py b/napari_cellseg3d/code_models/model_framework.py index 585c53a0..9bcd67a6 100644 --- a/napari_cellseg3d/code_models/model_framework.py +++ b/napari_cellseg3d/code_models/model_framework.py @@ -245,6 +245,24 @@ def _toggle_weights_path(self): self.custom_weights_choice, self.weights_filewidget ) + def create_dataset_dict_no_labs(self): + """Creates unsupervised data dictionary for MONAI transforms and training.""" + volume_directory = Path( + self.unsupervised_images_filewidget.text_field.text() + ) + if not volume_directory.exists(): + raise ValueError(f"Data folder {volume_directory} does not exist") + images_filepaths = sorted(Path.glob(volume_directory, "*.tif")) + if len(images_filepaths) == 0: + raise ValueError(f"Data folder {volume_directory} is empty") + + logger.info("Images :") + for file in images_filepaths: + logger.info(Path(file).stem) + logger.info("*" * 10) + + return [{"image": str(image_name)} for image_name in images_filepaths] + def create_train_dataset_dict(self): """Creates data dictionary for MONAI transforms and training. @@ -255,9 +273,17 @@ def create_train_dataset_dict(self): * "label" : corresponding label """ + logger.debug(f"Images : {self.images_filepaths}") + logger.debug(f"Labels : {self.labels_filepaths}") + if len(self.images_filepaths) == 0 or len(self.labels_filepaths) == 0: raise ValueError("Data folders are empty") + if not Path(self.images_filepaths[0]).parent.exists(): + raise ValueError("Images folder does not exist") + if not Path(self.labels_filepaths[0]).parent.exists(): + raise ValueError("Labels folder does not exist") + logger.info("Images :\n") for file in self.images_filepaths: logger.info(Path(file).name) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index cd2bcb16..0f9822cd 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -98,24 +98,25 @@ def __init__( self.channels = channels self.max_pool = nn.MaxPool3d(2) self.in_b = InBlock(in_channels, self.channels[0], dropout=dropout) - self.conv1 = Block(channels[0], self.channels[1], dropout=dropout) - self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) + # self.conv1 = Block(channels[0], self.channels[1], dropout=dropout) + # self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) # self.conv3 = Block(channels[2], self.channels[3], dropout=dropout) # self.bot = Block(channels[3], self.channels[4], dropout=dropout) - self.bot = Block(channels[2], self.channels[3], dropout=dropout) + # self.bot = Block(channels[2], self.channels[3], dropout=dropout) + self.bot = Block(channels[0], self.channels[1], dropout=dropout) # self.deconv1 = Block(channels[4], self.channels[3], dropout=dropout) - self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) - self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) + # self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) + # self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) self.out_b = OutBlock(channels[1], out_channels, dropout=dropout) # self.conv_trans1 = nn.ConvTranspose3d( # self.channels[4], self.channels[3], 2, stride=2 # ) - self.conv_trans2 = nn.ConvTranspose3d( - self.channels[3], self.channels[2], 2, stride=2 - ) - self.conv_trans3 = nn.ConvTranspose3d( - self.channels[2], self.channels[1], 2, stride=2 - ) + # self.conv_trans2 = nn.ConvTranspose3d( + # self.channels[3], self.channels[2], 2, stride=2 + # ) + # self.conv_trans3 = nn.ConvTranspose3d( + # self.channels[2], self.channels[1], 2, stride=2 + # ) self.conv_trans_out = nn.ConvTranspose3d( self.channels[1], self.channels[0], 2, stride=2 ) @@ -126,11 +127,12 @@ def __init__( def forward(self, x): """Forward pass of the U-Net model.""" in_b = self.in_b(x) - c1 = self.conv1(self.max_pool(in_b)) - c2 = self.conv2(self.max_pool(c1)) + # c1 = self.conv1(self.max_pool(in_b)) + # c2 = self.conv2(self.max_pool(c1)) # c3 = self.conv3(self.max_pool(c2)) # x = self.bot(self.max_pool(c3)) - x = self.bot(self.max_pool(c2)) + # x = self.bot(self.max_pool(c2)) + x = self.bot(self.max_pool(in_b)) # x = self.deconv1( # torch.cat( # [ @@ -140,24 +142,24 @@ def forward(self, x): # dim=1, # ) # ) - x = self.deconv2( - torch.cat( - [ - c2, - self.conv_trans2(x), - ], - dim=1, - ) - ) - x = self.deconv3( - torch.cat( - [ - c1, - self.conv_trans3(x), - ], - dim=1, - ) - ) + # x = self.deconv2( + # torch.cat( + # [ + # c2, + # self.conv_trans2(x), + # ], + # dim=1, + # ) + # ) + # x = self.deconv3( + # torch.cat( + # [ + # c1, + # self.conv_trans3(x), + # ], + # dim=1, + # ) + # ) x = self.out_b( torch.cat( [ diff --git a/napari_cellseg3d/code_models/models/wnet/train_wnet.py b/napari_cellseg3d/code_models/models/wnet/train_wnet.py index 7207fe35..d999fc17 100644 --- a/napari_cellseg3d/code_models/models/wnet/train_wnet.py +++ b/napari_cellseg3d/code_models/models/wnet/train_wnet.py @@ -1,991 +1,992 @@ -""" -This file contains the code to train the WNet model. -""" -# import napari -import glob -import time -from pathlib import Path -from warnings import warn - -import numpy as np -import tifffile as tiff -import torch -import torch.nn as nn - -# MONAI -from monai.data import ( - CacheDataset, - DataLoader, - PatchDataset, - pad_list_data_collate, -) -from monai.data.meta_obj import set_track_meta -from monai.metrics import DiceMetric -from monai.transforms import ( - AsDiscrete, - Compose, - EnsureChannelFirst, - EnsureChannelFirstd, - EnsureTyped, - LoadImaged, - Orientationd, - RandFlipd, - RandRotate90d, - RandShiftIntensityd, - RandSpatialCropSamplesd, - ScaleIntensityRanged, - SpatialPadd, - ToTensor, -) -from monai.utils.misc import set_determinism - -# local -from napari_cellseg3d.code_models.models.wnet.model import WNet -from napari_cellseg3d.code_models.models.wnet.soft_Ncuts import SoftNCutsLoss -from napari_cellseg3d.utils import LOGGER as logger -from napari_cellseg3d.utils import dice_coeff, get_padding_dim, remap_image - -try: - import wandb - - WANDB_INSTALLED = True -except ImportError: - warn( - "wandb not installed, wandb config will not be taken into account", - stacklevel=1, - ) - WANDB_INSTALLED = False - -__author__ = "Yves Paychère, Colin Hofmann, Cyril Achard" - - -########################## -# Utils functions # -########################## - - -def create_dataset_dict(volume_directory, label_directory): - """Creates data dictionary for MONAI transforms and training.""" - images_filepaths = sorted( - [str(file) for file in Path(volume_directory).glob("*.tif")] - ) - - labels_filepaths = sorted( - [str(file) for file in Path(label_directory).glob("*.tif")] - ) - if len(images_filepaths) == 0 or len(labels_filepaths) == 0: - raise ValueError( - f"Data folders are empty \n{volume_directory} \n{label_directory}" - ) - - logger.info("Images :") - for file in images_filepaths: - logger.info(Path(file).stem) - logger.info("*" * 10) - logger.info("Labels :") - for file in labels_filepaths: - logger.info(Path(file).stem) - try: - data_dicts = [ - {"image": image_name, "label": label_name} - for image_name, label_name in zip( - images_filepaths, labels_filepaths - ) - ] - except ValueError as e: - raise ValueError( - f"Number of images and labels does not match : \n{volume_directory} \n{label_directory}" - ) from e - # print(f"Loaded eval image: {data_dicts}") - return data_dicts - - -def create_dataset_dict_no_labs(volume_directory): - """Creates unsupervised data dictionary for MONAI transforms and training.""" - images_filepaths = sorted(glob.glob(str(Path(volume_directory) / "*.tif"))) - if len(images_filepaths) == 0: - raise ValueError(f"Data folder {volume_directory} is empty") - - logger.info("Images :") - for file in images_filepaths: - logger.info(Path(file).stem) - logger.info("*" * 10) - - return [{"image": image_name} for image_name in images_filepaths] - - -################################ -# WNet: Config & WANDB # -################################ - - -class WNetTrainingWorkerConfig: - def __init__(self): - # WNet - self.in_channels = 1 - self.out_channels = 1 - self.num_classes = 2 - self.dropout = 0.65 - self.use_clipping = False - self.clipping = 1 - - self.lr = 1e-6 - self.scheduler = "None" # "CosineAnnealingLR" # "ReduceLROnPlateau" - self.weight_decay = 0.01 # None - - self.intensity_sigma = 1 - self.spatial_sigma = 4 - self.radius = 2 # yields to a radius depending on the data shape - - self.n_cuts_weight = 0.5 - self.reconstruction_loss = "MSE" # "BCE" - self.rec_loss_weight = 0.5 / 100 - - self.num_epochs = 100 - self.val_interval = 5 - self.batch_size = 2 - - # Data - # self.train_volume_directory = "./../dataset/VIP_full" - # self.eval_volume_directory = "./../dataset/VIP_cropped/eval/" - self.normalize_input = True - self.normalizing_function = remap_image # normalize_quantile - # self.use_patch = False - # self.patch_size = (64, 64, 64) - # self.num_patches = 30 - # self.eval_num_patches = 20 - # self.do_augmentation = True - # self.parallel = False - - # self.save_model = True - self.save_model_path = ( - r"./../results/new_model/wnet_new_model_all_data_3class.pth" - ) - # self.save_losses_path = ( - # r"./../results/new_model/wnet_new_model_all_data_3class.pkl" - # ) - self.save_every = 5 - self.weights_path = None - - -c = WNetTrainingWorkerConfig() -############### -# Scheduler config -############### -schedulers = { - "ReduceLROnPlateau": { - "factor": 0.5, - "patience": 50, - }, - "CosineAnnealingLR": { - "T_max": 25000, - "eta_min": 1e-8, - }, - "CosineAnnealingWarmRestarts": { - "T_0": 50000, - "eta_min": 1e-8, - "T_mult": 1, - }, - "CyclicLR": { - "base_lr": 2e-7, - "max_lr": 2e-4, - "step_size_up": 250, - "mode": "triangular", - }, -} - -############### -# WANDB_CONFIG -############### -WANDB_MODE = "disabled" -# WANDB_MODE = "online" - -WANDB_CONFIG = { - # data setting - "num_workers": c.num_workers, - "normalize": c.normalize_input, - "use_patch": c.use_patch, - "patch_size": c.patch_size, - "num_patches": c.num_patches, - "eval_num_patches": c.eval_num_patches, - "do_augmentation": c.do_augmentation, - "model_save_path": c.save_model_path, - # train setting - "batch_size": c.batch_size, - "learning_rate": c.lr, - "weight_decay": c.weight_decay, - "scheduler": { - "name": c.scheduler, - "ReduceLROnPlateau_config": { - "factor": schedulers["ReduceLROnPlateau"]["factor"], - "patience": schedulers["ReduceLROnPlateau"]["patience"], - }, - "CosineAnnealingLR_config": { - "T_max": schedulers["CosineAnnealingLR"]["T_max"], - "eta_min": schedulers["CosineAnnealingLR"]["eta_min"], - }, - "CosineAnnealingWarmRestarts_config": { - "T_0": schedulers["CosineAnnealingWarmRestarts"]["T_0"], - "eta_min": schedulers["CosineAnnealingWarmRestarts"]["eta_min"], - "T_mult": schedulers["CosineAnnealingWarmRestarts"]["T_mult"], - }, - "CyclicLR_config": { - "base_lr": schedulers["CyclicLR"]["base_lr"], - "max_lr": schedulers["CyclicLR"]["max_lr"], - "step_size_up": schedulers["CyclicLR"]["step_size_up"], - "mode": schedulers["CyclicLR"]["mode"], - }, - }, - "max_epochs": c.num_epochs, - "save_every": c.save_every, - "val_interval": c.val_interval, - # loss - "reconstruction_loss": c.reconstruction_loss, - "loss weights": { - "n_cuts_weight": c.n_cuts_weight, - "rec_loss_weight": c.rec_loss_weight, - }, - "loss_params": { - "intensity_sigma": c.intensity_sigma, - "spatial_sigma": c.spatial_sigma, - "radius": c.radius, - }, - # model - "model_type": "wnet", - "model_params": { - "in_channels": c.in_channels, - "out_channels": c.out_channels, - "num_classes": c.num_classes, - "dropout": c.dropout, - "use_clipping": c.use_clipping, - "clipping_value": c.clipping, - }, - # CRF - "crf_params": { - "sa": c.sa, - "sb": c.sb, - "sg": c.sg, - "w1": c.w1, - "w2": c.w2, - "n_iter": c.n_iter, - }, -} - - -def train(weights_path=None, train_config=None): - if train_config is None: - config = WNetTrainingWorkerConfig() - ############## - # disable metadata tracking in MONAI - set_track_meta(False) - ############## - if WANDB_INSTALLED: - wandb.init( - config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE - ) - - set_determinism(seed=34936339) # use default seed from NP_MAX - torch.use_deterministic_algorithms(True, warn_only=True) - - config = train_config - normalize_function = config.normalizing_function - CUDA = torch.cuda.is_available() - device = torch.device("cuda" if CUDA else "cpu") - - print(f"Using device: {device}") - - print("Config:") - [print(a) for a in config.__dict__.items()] - - print("Initializing training...") - print("Getting the data") - - if config.use_patch: - (data_shape, dataset) = get_patch_dataset(config) - else: - (data_shape, dataset) = get_dataset(config) - transform = Compose( - [ - ToTensor(), - EnsureChannelFirst(channel_dim=0), - ] - ) - dataset = [transform(im) for im in dataset] - for data in dataset: - print(f"data shape: {data.shape}") - break - - dataloader = DataLoader( - dataset, - batch_size=config.batch_size, - shuffle=True, - num_workers=config.num_workers, - collate_fn=pad_list_data_collate, - ) - - if config.eval_volume_directory is not None: - eval_dataset = get_patch_eval_dataset(config) - - eval_dataloader = DataLoader( - eval_dataset, - batch_size=config.batch_size, - shuffle=False, - num_workers=config.num_workers, - collate_fn=pad_list_data_collate, - ) - - dice_metric = DiceMetric( - include_background=False, reduction="mean", get_not_nans=False - ) - ################################################### - # Training the model # - ################################################### - print("Initializing the model:") - - print("- getting the model") - # Initialize the model - model = WNet( - in_channels=config.in_channels, - out_channels=config.out_channels, - num_classes=config.num_classes, - dropout=config.dropout, - ) - model = ( - nn.DataParallel(model).cuda() if CUDA and config.parallel else model - ) - model.to(device) - - if config.use_clipping: - for p in model.parameters(): - p.register_hook( - lambda grad: torch.clamp( - grad, min=-config.clipping, max=config.clipping - ) - ) - - if WANDB_INSTALLED: - wandb.watch(model, log_freq=100) - - if weights_path is not None: - model.load_state_dict(torch.load(weights_path, map_location=device)) - - print("- getting the optimizers") - # Initialize the optimizers - if config.weight_decay is not None: - decay = config.weight_decay - optimizer = torch.optim.Adam( - model.parameters(), lr=config.lr, weight_decay=decay - ) - else: - optimizer = torch.optim.Adam(model.parameters(), lr=config.lr) - - print("- getting the loss functions") - # Initialize the Ncuts loss function - criterionE = SoftNCutsLoss( - data_shape=data_shape, - device=device, - intensity_sigma=config.intensity_sigma, - spatial_sigma=config.spatial_sigma, - radius=config.radius, - ) - - if config.reconstruction_loss == "MSE": - criterionW = nn.MSELoss() - elif config.reconstruction_loss == "BCE": - criterionW = nn.BCELoss() - else: - raise ValueError( - f"Unknown reconstruction loss : {config.reconstruction_loss} not supported" - ) - - print("- getting the learning rate schedulers") - # Initialize the learning rate schedulers - scheduler = get_scheduler(config, optimizer) - # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - # optimizer, mode="min", factor=0.5, patience=10, verbose=True - # ) - model.train() - - print("Ready") - print("Training the model") - print("*" * 50) - - startTime = time.time() - ncuts_losses = [] - rec_losses = [] - total_losses = [] - best_dice = -1 - best_dice_epoch = -1 - - # Train the model - for epoch in range(config.num_epochs): - print(f"Epoch {epoch + 1} of {config.num_epochs}") - - epoch_ncuts_loss = 0 - epoch_rec_loss = 0 - epoch_loss = 0 - - for _i, batch in enumerate(dataloader): - # raise NotImplementedError("testing") - if config.use_patch: - image = batch["image"].to(device) - else: - image = batch.to(device) - if config.batch_size == 1: - image = image.unsqueeze(0) - else: - image = image.unsqueeze(0) - image = torch.swapaxes(image, 0, 1) - - # Forward pass - enc = model.forward_encoder(image) - # out = model.forward(image) - - # Compute the Ncuts loss - Ncuts = criterionE(enc, image) - epoch_ncuts_loss += Ncuts.item() - if WANDB_INSTALLED: - wandb.log({"Ncuts loss": Ncuts.item()}) - - # Forward pass - enc, dec = model(image) - - # Compute the reconstruction loss - if isinstance(criterionW, nn.MSELoss): - reconstruction_loss = criterionW(dec, image) - elif isinstance(criterionW, nn.BCELoss): - reconstruction_loss = criterionW( - torch.sigmoid(dec), - remap_image(image, new_max=1), - ) - - epoch_rec_loss += reconstruction_loss.item() - if WANDB_INSTALLED: - wandb.log({"Reconstruction loss": reconstruction_loss.item()}) - - # Backward pass for the reconstruction loss - optimizer.zero_grad() - alpha = config.n_cuts_weight - beta = config.rec_loss_weight - - loss = alpha * Ncuts + beta * reconstruction_loss - epoch_loss += loss.item() - if WANDB_INSTALLED: - wandb.log({"Sum of losses": loss.item()}) - loss.backward(loss) - optimizer.step() - - if config.scheduler == "CosineAnnealingWarmRestarts": - scheduler.step(epoch + _i / len(dataloader)) - if ( - config.scheduler == "CosineAnnealingLR" - or config.scheduler == "CyclicLR" - ): - scheduler.step() - - ncuts_losses.append(epoch_ncuts_loss / len(dataloader)) - rec_losses.append(epoch_rec_loss / len(dataloader)) - total_losses.append(epoch_loss / len(dataloader)) - - if WANDB_INSTALLED: - wandb.log({"Ncuts loss_epoch": ncuts_losses[-1]}) - wandb.log({"Reconstruction loss_epoch": rec_losses[-1]}) - wandb.log({"Sum of losses_epoch": total_losses[-1]}) - # wandb.log({"epoch": epoch}) - # wandb.log({"learning_rate model": optimizerW.param_groups[0]["lr"]}) - # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) - wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) - - print("Ncuts loss: ", ncuts_losses[-1]) - if epoch > 0: - print( - "Ncuts loss difference: ", - ncuts_losses[-1] - ncuts_losses[-2], - ) - print("Reconstruction loss: ", rec_losses[-1]) - if epoch > 0: - print( - "Reconstruction loss difference: ", - rec_losses[-1] - rec_losses[-2], - ) - print("Sum of losses: ", total_losses[-1]) - if epoch > 0: - print( - "Sum of losses difference: ", - total_losses[-1] - total_losses[-2], - ) - - # Update the learning rate - if config.scheduler == "ReduceLROnPlateau": - # schedulerE.step(epoch_ncuts_loss) - # schedulerW.step(epoch_rec_loss) - scheduler.step(epoch_rec_loss) - if ( - config.eval_volume_directory is not None - and (epoch + 1) % config.val_interval == 0 - ): - model.eval() - print("Validating...") - with torch.no_grad(): - for _k, val_data in enumerate(eval_dataloader): - val_inputs, val_labels = ( - val_data["image"].to(device), - val_data["label"].to(device), - ) - - # normalize val_inputs across channels - if config.normalize_input: - for i in range(val_inputs.shape[0]): - for j in range(val_inputs.shape[1]): - val_inputs[i][j] = normalize_function( - val_inputs[i][j] - ) - - val_outputs = model.forward_encoder(val_inputs) - val_outputs = AsDiscrete(threshold=0.5)(val_outputs) - - # compute metric for current iteration - for channel in range(val_outputs.shape[1]): - max_dice_channel = torch.argmax( - torch.Tensor( - [ - dice_coeff( - y_pred=val_outputs[ - :, - channel : (channel + 1), - :, - :, - :, - ], - y_true=val_labels, - ) - ] - ) - ) - - dice_metric( - y_pred=val_outputs[ - :, - max_dice_channel : (max_dice_channel + 1), - :, - :, - :, - ], - y=val_labels, - ) - # if plot_val_input: # only once - # logged_image = val_inputs.detach().cpu().numpy() - # logged_image = np.swapaxes(logged_image, 2, 4) - # logged_image = logged_image[0, :, 32, :, :] - # images = wandb.Image( - # logged_image, caption="Validation input" - # ) - # - # wandb.log({"val/input": images}) - # plot_val_input = False - - # if k == 2 and (30 <= epoch <= 50 or epoch % 100 == 0): - # logged_image = val_outputs.detach().cpu().numpy() - # logged_image = np.swapaxes(logged_image, 2, 4) - # logged_image = logged_image[ - # 0, max_dice_channel, 32, :, : - # ] - # images = wandb.Image( - # logged_image, caption="Validation output" - # ) - # - # wandb.log({"val/output": images}) - # dice_metric(y_pred=val_outputs[:, 2:, :,:,:], y=val_labels) - # dice_metric(y_pred=val_outputs[:, 1:, :, :, :], y=val_labels) - - # import napari - # view = napari.Viewer() - # view.add_image(val_inputs.cpu().numpy(), name="input") - # view.add_image(val_labels.cpu().numpy(), name="label") - # vis_out = np.array( - # [i.detach().cpu().numpy() for i in val_outputs], - # dtype=np.float32, - # ) - # crf_out = np.array( - # [i.detach().cpu().numpy() for i in crf_outputs], - # dtype=np.float32, - # ) - # view.add_image(vis_out, name="output") - # view.add_image(crf_out, name="crf_output") - # napari.run() - - # aggregate the final mean dice result - metric = dice_metric.aggregate().item() - print("Validation Dice score: ", metric) - if best_dice < metric < 2: - best_dice = metric - best_dice_epoch = epoch + 1 - if config.save_model: - save_best_path = Path(config.save_model_path).parents[ - 0 - ] - save_best_path.mkdir(parents=True, exist_ok=True) - save_best_name = Path(config.save_model_path).stem - save_path = ( - str(save_best_path / save_best_name) - + "_best_metric.pth" - ) - print(f"Saving new best model to {save_path}") - torch.save(model.state_dict(), save_path) - - if WANDB_INSTALLED: - # log validation dice score for each validation round - wandb.log({"val/dice_metric": metric}) - - # reset the status for next validation round - dice_metric.reset() - - print( - "ETA: ", - (time.time() - startTime) - * (config.num_epochs / (epoch + 1) - 1) - / 60, - "minutes", - ) - print("-" * 20) - - # Save the model - if config.save_model and epoch % config.save_every == 0: - torch.save(model.state_dict(), config.save_model_path) - # with open(config.save_losses_path, "wb") as f: - # pickle.dump((ncuts_losses, rec_losses), f) - - print("Training finished") - print(f"Best dice metric : {best_dice}") - if WANDB_INSTALLED and config.eval_volume_directory is not None: - wandb.log( - { - "best_dice_metric": best_dice, - "best_metric_epoch": best_dice_epoch, - } - ) - print("*" * 50) - - # Save the model - if config.save_model: - print("Saving the model to: ", config.save_model_path) - torch.save(model.state_dict(), config.save_model_path) - # with open(config.save_losses_path, "wb") as f: - # pickle.dump((ncuts_losses, rec_losses), f) - if WANDB_INSTALLED: - model_artifact = wandb.Artifact( - "WNet", - type="model", - description="WNet benchmark", - metadata=dict(WANDB_CONFIG), - ) - model_artifact.add_file(config.save_model_path) - wandb.log_artifact(model_artifact) - - return ncuts_losses, rec_losses, model - - -def get_dataset(config): - """Creates a Dataset from the original data using the tifffile library - - Args: - config (WNetTrainingWorkerConfig): The configuration object - - Returns: - (tuple): A tuple containing the shape of the data and the dataset - """ - train_files = create_dataset_dict_no_labs( - volume_directory=config.train_volume_directory - ) - train_files = [d.get("image") for d in train_files] - # logger.debug(f"train_files: {train_files}") - volumes = tiff.imread(train_files).astype(np.float32) - volume_shape = volumes.shape - # logger.debug(f"volume_shape: {volume_shape}") - - if len(volume_shape) == 3: - volumes = np.expand_dims(volumes, axis=0) - - if config.normalize_input: - volumes = np.array( - [ - # mad_normalization(volume) - config.normalizing_function(volume) - for volume in volumes - ] - ) - # mean = volumes.mean(axis=0) - # std = volumes.std(axis=0) - # volumes = (volumes - mean) / std - # print("NORMALIZED VOLUMES") - # print(volumes.shape) - # [print("MIN MAX", volume.flatten().min(), volume.flatten().max()) for volume in volumes] - # print(volumes.mean(axis=0), volumes.std(axis=0)) - - dataset = CacheDataset(data=volumes) - - return (volume_shape, dataset) - - # train_files = create_dataset_dict_no_labs( - # volume_directory=config.train_volume_directory - # ) - # train_files = [d.get("image") for d in train_files] - # volumes = [] - # for file in train_files: - # image = tiff.imread(file).astype(np.float32) - # image = np.expand_dims(image, axis=0) # add channel dimension - # volumes.append(image) - # # volumes = tiff.imread(train_files).astype(np.float32) - # volume_shape = volumes[0].shape - # # print(volume_shape) - # - # if config.do_augmentation: - # augmentation = Compose( - # [ - # ScaleIntensityRange( - # a_min=0, - # a_max=2000, - # b_min=0.0, - # b_max=1.0, - # clip=True, - # ), - # RandShiftIntensity(offsets=0.1, prob=0.5), - # RandFlip(spatial_axis=[1], prob=0.5), - # RandFlip(spatial_axis=[2], prob=0.5), - # RandRotate90(prob=0.1, max_k=3), - # ] - # ) - # else: - # augmentation = None - # - # dataset = CacheDataset(data=np.array(volumes), transform=augmentation) - # - # return (volume_shape, dataset) - - -def get_patch_dataset(config): - """Creates a Dataset from the original data using the tifffile library - - Args: - config (WNetTrainingWorkerConfig): The configuration object - - Returns: - (tuple): A tuple containing the shape of the data and the dataset - """ - - train_files = create_dataset_dict_no_labs( - volume_directory=config.train_volume_directory - ) - - patch_func = Compose( - [ - LoadImaged(keys=["image"], image_only=True), - EnsureChannelFirstd(keys=["image"], channel_dim="no_channel"), - RandSpatialCropSamplesd( - keys=["image"], - roi_size=( - config.patch_size - ), # multiply by axis_stretch_factor if anisotropy - # max_roi_size=(120, 120, 120), - random_size=False, - num_samples=config.num_patches, - ), - Orientationd(keys=["image"], axcodes="PLI"), - SpatialPadd( - keys=["image"], - spatial_size=(get_padding_dim(config.patch_size)), - ), - EnsureTyped(keys=["image"]), - ] - ) - - train_transforms = Compose( - [ - ScaleIntensityRanged( - keys=["image"], - a_min=0, - a_max=2000, - b_min=0.0, - b_max=1.0, - clip=True, - ), - RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), - RandRotate90d(keys=["image"], prob=0.1, max_k=3), - EnsureTyped(keys=["image"]), - ] - ) - - dataset = PatchDataset( - data=train_files, - samples_per_image=config.num_patches, - patch_func=patch_func, - transform=train_transforms, - ) - - return config.patch_size, dataset - - -def get_patch_eval_dataset(config): - eval_files = create_dataset_dict( - volume_directory=config.eval_volume_directory + "/vol", - label_directory=config.eval_volume_directory + "/lab", - ) - - patch_func = Compose( - [ - LoadImaged(keys=["image", "label"], image_only=True), - EnsureChannelFirstd( - keys=["image", "label"], channel_dim="no_channel" - ), - # NormalizeIntensityd(keys=["image"]) if config.normalize_input else lambda x: x, - RandSpatialCropSamplesd( - keys=["image", "label"], - roi_size=( - config.patch_size - ), # multiply by axis_stretch_factor if anisotropy - # max_roi_size=(120, 120, 120), - random_size=False, - num_samples=config.eval_num_patches, - ), - Orientationd(keys=["image", "label"], axcodes="PLI"), - SpatialPadd( - keys=["image", "label"], - spatial_size=(get_padding_dim(config.patch_size)), - ), - EnsureTyped(keys=["image", "label"]), - ] - ) - - eval_transforms = Compose( - [ - EnsureTyped(keys=["image", "label"]), - ] - ) - - return PatchDataset( - data=eval_files, - samples_per_image=config.eval_num_patches, - patch_func=patch_func, - transform=eval_transforms, - ) - - -def get_dataset_monai(config): - """Creates a Dataset applying some transforms/augmentation on the data using the MONAI library - - Args: - config (WNetTrainingWorkerConfig): The configuration object - - Returns: - (tuple): A tuple containing the shape of the data and the dataset - """ - train_files = create_dataset_dict_no_labs( - volume_directory=config.train_volume_directory - ) - # print(train_files) - # print(len(train_files)) - # print(train_files[0]) - first_volume = LoadImaged(keys=["image"])(train_files[0]) - first_volume_shape = first_volume["image"].shape - - # Transforms to be applied to each volume - load_single_images = Compose( - [ - LoadImaged(keys=["image"]), - EnsureChannelFirstd(keys=["image"]), - Orientationd(keys=["image"], axcodes="PLI"), - SpatialPadd( - keys=["image"], - spatial_size=(get_padding_dim(first_volume_shape)), - ), - EnsureTyped(keys=["image"]), - ] - ) - - if config.do_augmentation: - train_transforms = Compose( - [ - ScaleIntensityRanged( - keys=["image"], - a_min=0, - a_max=2000, - b_min=0.0, - b_max=1.0, - clip=True, - ), - RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), - RandRotate90d(keys=["image"], prob=0.1, max_k=3), - EnsureTyped(keys=["image"]), - ] - ) - else: - train_transforms = EnsureTyped(keys=["image"]) - - # Create the dataset - dataset = CacheDataset( - data=train_files, - transform=Compose(load_single_images, train_transforms), - ) - - return first_volume_shape, dataset - - -def get_scheduler(config, optimizer, verbose=False): - scheduler_name = config.scheduler - if scheduler_name == "None": - scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( - optimizer, - T_max=100, - eta_min=config.lr - 1e-6, - verbose=verbose, - ) - - elif scheduler_name == "ReduceLROnPlateau": - scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer, - mode="min", - factor=schedulers["ReduceLROnPlateau"]["factor"], - patience=schedulers["ReduceLROnPlateau"]["patience"], - verbose=verbose, - ) - elif scheduler_name == "CosineAnnealingLR": - scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( - optimizer, - T_max=schedulers["CosineAnnealingLR"]["T_max"], - eta_min=schedulers["CosineAnnealingLR"]["eta_min"], - verbose=verbose, - ) - elif scheduler_name == "CosineAnnealingWarmRestarts": - scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts( - optimizer, - T_0=schedulers["CosineAnnealingWarmRestarts"]["T_0"], - eta_min=schedulers["CosineAnnealingWarmRestarts"]["eta_min"], - T_mult=schedulers["CosineAnnealingWarmRestarts"]["T_mult"], - verbose=verbose, - ) - elif scheduler_name == "CyclicLR": - scheduler = torch.optim.lr_scheduler.CyclicLR( - optimizer, - base_lr=schedulers["CyclicLR"]["base_lr"], - max_lr=schedulers["CyclicLR"]["max_lr"], - step_size_up=schedulers["CyclicLR"]["step_size_up"], - mode=schedulers["CyclicLR"]["mode"], - cycle_momentum=False, - ) - else: - raise ValueError(f"Scheduler {scheduler_name} not provided") - return scheduler - - -if __name__ == "__main__": - weights_location = str( - # Path(__file__).resolve().parent / "../weights/wnet.pth" - # "../wnet_SUM_MSE_DAPI_rad2_best_metric.pth" - ) - train( - # weights_location - ) +# """ +# This file contains the code to train the WNet model. +# """ +# # import napari +# import glob +# import time +# from pathlib import Path +# from warnings import warn +# +# import numpy as np +# import tifffile as tiff +# import torch +# import torch.nn as nn +# +# # MONAI +# from monai.data import ( +# CacheDataset, +# DataLoader, +# PatchDataset, +# pad_list_data_collate, +# ) +# from monai.data.meta_obj import set_track_meta +# from monai.metrics import DiceMetric +# from monai.transforms import ( +# AsDiscrete, +# Compose, +# EnsureChannelFirst, +# EnsureChannelFirstd, +# EnsureTyped, +# LoadImaged, +# Orientationd, +# RandFlipd, +# RandRotate90d, +# RandShiftIntensityd, +# RandSpatialCropSamplesd, +# ScaleIntensityRanged, +# SpatialPadd, +# ToTensor, +# ) +# from monai.utils.misc import set_determinism +# +# # local +# from napari_cellseg3d.code_models.models.wnet.model import WNet +# from napari_cellseg3d.code_models.models.wnet.soft_Ncuts import SoftNCutsLoss +# from napari_cellseg3d.utils import LOGGER as logger +# from napari_cellseg3d.utils import dice_coeff, get_padding_dim, remap_image +# +# try: +# import wandb +# +# WANDB_INSTALLED = True +# except ImportError: +# warn( +# "wandb not installed, wandb config will not be taken into account", +# stacklevel=1, +# ) +# WANDB_INSTALLED = False +# +# __author__ = "Yves Paychère, Colin Hofmann, Cyril Achard" +# +# +# ########################## +# # Utils functions # +# ########################## +# +# +# # def create_dataset_dict(volume_directory, label_directory): +# # """Creates data dictionary for MONAI transforms and training.""" +# # images_filepaths = sorted( +# # [str(file) for file in Path(volume_directory).glob("*.tif")] +# # ) +# # +# # labels_filepaths = sorted( +# # [str(file) for file in Path(label_directory).glob("*.tif")] +# # ) +# # if len(images_filepaths) == 0 or len(labels_filepaths) == 0: +# # raise ValueError( +# # f"Data folders are empty \n{volume_directory} \n{label_directory}" +# # ) +# # +# # logger.info("Images :") +# # for file in images_filepaths: +# # logger.info(Path(file).stem) +# # logger.info("*" * 10) +# # logger.info("Labels :") +# # for file in labels_filepaths: +# # logger.info(Path(file).stem) +# # try: +# # data_dicts = [ +# # {"image": image_name, "label": label_name} +# # for image_name, label_name in zip( +# # images_filepaths, labels_filepaths +# # ) +# # ] +# # except ValueError as e: +# # raise ValueError( +# # f"Number of images and labels does not match : \n{volume_directory} \n{label_directory}" +# # ) from e +# # # print(f"Loaded eval image: {data_dicts}") +# # return data_dicts +# +# +# def create_dataset_dict_no_labs(volume_directory): +# """Creates unsupervised data dictionary for MONAI transforms and training.""" +# images_filepaths = sorted(glob.glob(str(Path(volume_directory) / "*.tif"))) +# if len(images_filepaths) == 0: +# raise ValueError(f"Data folder {volume_directory} is empty") +# +# logger.info("Images :") +# for file in images_filepaths: +# logger.info(Path(file).stem) +# logger.info("*" * 10) +# +# return [{"image": image_name} for image_name in images_filepaths] +# +# +# ################################ +# # WNet: Config & WANDB # +# ################################ +# +# +# class WNetTrainingWorkerConfig: +# def __init__(self): +# # WNet +# self.in_channels = 1 +# self.out_channels = 1 +# self.num_classes = 2 +# self.dropout = 0.65 +# self.use_clipping = False +# self.clipping = 1 +# +# self.lr = 1e-6 +# self.scheduler = "None" # "CosineAnnealingLR" # "ReduceLROnPlateau" +# self.weight_decay = 0.01 # None +# +# self.intensity_sigma = 1 +# self.spatial_sigma = 4 +# self.radius = 2 # yields to a radius depending on the data shape +# +# self.n_cuts_weight = 0.5 +# self.reconstruction_loss = "MSE" # "BCE" +# self.rec_loss_weight = 0.5 / 100 +# +# self.num_epochs = 100 +# self.val_interval = 5 +# self.batch_size = 2 +# +# # Data +# # self.train_volume_directory = "./../dataset/VIP_full" +# # self.eval_volume_directory = "./../dataset/VIP_cropped/eval/" +# self.normalize_input = True +# self.normalizing_function = remap_image # normalize_quantile +# # self.use_patch = False +# # self.patch_size = (64, 64, 64) +# # self.num_patches = 30 +# # self.eval_num_patches = 20 +# # self.do_augmentation = True +# # self.parallel = False +# +# # self.save_model = True +# self.save_model_path = ( +# r"./../results/new_model/wnet_new_model_all_data_3class.pth" +# ) +# # self.save_losses_path = ( +# # r"./../results/new_model/wnet_new_model_all_data_3class.pkl" +# # ) +# self.save_every = 5 +# self.weights_path = None +# +# +# c = WNetTrainingWorkerConfig() +# ############### +# # Scheduler config +# ############### +# schedulers = { +# "ReduceLROnPlateau": { +# "factor": 0.5, +# "patience": 50, +# }, +# "CosineAnnealingLR": { +# "T_max": 25000, +# "eta_min": 1e-8, +# }, +# "CosineAnnealingWarmRestarts": { +# "T_0": 50000, +# "eta_min": 1e-8, +# "T_mult": 1, +# }, +# "CyclicLR": { +# "base_lr": 2e-7, +# "max_lr": 2e-4, +# "step_size_up": 250, +# "mode": "triangular", +# }, +# } +# +# ############### +# # WANDB_CONFIG +# ############### +# WANDB_MODE = "disabled" +# # WANDB_MODE = "online" +# +# WANDB_CONFIG = { +# # data setting +# "num_workers": c.num_workers, +# "normalize": c.normalize_input, +# "use_patch": c.use_patch, +# "patch_size": c.patch_size, +# "num_patches": c.num_patches, +# "eval_num_patches": c.eval_num_patches, +# "do_augmentation": c.do_augmentation, +# "model_save_path": c.save_model_path, +# # train setting +# "batch_size": c.batch_size, +# "learning_rate": c.lr, +# "weight_decay": c.weight_decay, +# "scheduler": { +# "name": c.scheduler, +# "ReduceLROnPlateau_config": { +# "factor": schedulers["ReduceLROnPlateau"]["factor"], +# "patience": schedulers["ReduceLROnPlateau"]["patience"], +# }, +# "CosineAnnealingLR_config": { +# "T_max": schedulers["CosineAnnealingLR"]["T_max"], +# "eta_min": schedulers["CosineAnnealingLR"]["eta_min"], +# }, +# "CosineAnnealingWarmRestarts_config": { +# "T_0": schedulers["CosineAnnealingWarmRestarts"]["T_0"], +# "eta_min": schedulers["CosineAnnealingWarmRestarts"]["eta_min"], +# "T_mult": schedulers["CosineAnnealingWarmRestarts"]["T_mult"], +# }, +# "CyclicLR_config": { +# "base_lr": schedulers["CyclicLR"]["base_lr"], +# "max_lr": schedulers["CyclicLR"]["max_lr"], +# "step_size_up": schedulers["CyclicLR"]["step_size_up"], +# "mode": schedulers["CyclicLR"]["mode"], +# }, +# }, +# "max_epochs": c.num_epochs, +# "save_every": c.save_every, +# "val_interval": c.val_interval, +# # loss +# "reconstruction_loss": c.reconstruction_loss, +# "loss weights": { +# "n_cuts_weight": c.n_cuts_weight, +# "rec_loss_weight": c.rec_loss_weight, +# }, +# "loss_params": { +# "intensity_sigma": c.intensity_sigma, +# "spatial_sigma": c.spatial_sigma, +# "radius": c.radius, +# }, +# # model +# "model_type": "wnet", +# "model_params": { +# "in_channels": c.in_channels, +# "out_channels": c.out_channels, +# "num_classes": c.num_classes, +# "dropout": c.dropout, +# "use_clipping": c.use_clipping, +# "clipping_value": c.clipping, +# }, +# # CRF +# "crf_params": { +# "sa": c.sa, +# "sb": c.sb, +# "sg": c.sg, +# "w1": c.w1, +# "w2": c.w2, +# "n_iter": c.n_iter, +# }, +# } +# +# +# def train(weights_path=None, train_config=None): +# if train_config is None: +# config = WNetTrainingWorkerConfig() +# ############## +# # disable metadata tracking in MONAI +# set_track_meta(False) +# ############## +# if WANDB_INSTALLED: +# wandb.init( +# config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE +# ) +# +# set_determinism(seed=34936339) # use default seed from NP_MAX +# torch.use_deterministic_algorithms(True, warn_only=True) +# +# config = train_config +# normalize_function = config.normalizing_function +# CUDA = torch.cuda.is_available() +# device = torch.device("cuda" if CUDA else "cpu") +# +# print(f"Using device: {device}") +# +# print("Config:") +# [print(a) for a in config.__dict__.items()] +# +# print("Initializing training...") +# print("Getting the data") +# +# if config.use_patch: +# (data_shape, dataset) = get_patch_dataset(config) +# else: +# (data_shape, dataset) = get_dataset(config) +# transform = Compose( +# [ +# ToTensor(), +# EnsureChannelFirst(channel_dim=0), +# ] +# ) +# dataset = [transform(im) for im in dataset] +# for data in dataset: +# print(f"data shape: {data.shape}") +# break +# +# dataloader = DataLoader( +# dataset, +# batch_size=config.batch_size, +# shuffle=True, +# num_workers=config.num_workers, +# collate_fn=pad_list_data_collate, +# ) +# +# if config.eval_volume_directory is not None: +# # eval_dataset = get_patch_eval_dataset(config) +# eval_dataset = None +# +# eval_dataloader = DataLoader( +# eval_dataset, +# batch_size=config.batch_size, +# shuffle=False, +# num_workers=config.num_workers, +# collate_fn=pad_list_data_collate, +# ) +# +# dice_metric = DiceMetric( +# include_background=False, reduction="mean", get_not_nans=False +# ) +# ################################################### +# # Training the model # +# ################################################### +# print("Initializing the model:") +# +# print("- getting the model") +# # Initialize the model +# model = WNet( +# in_channels=config.in_channels, +# out_channels=config.out_channels, +# num_classes=config.num_classes, +# dropout=config.dropout, +# ) +# model = ( +# nn.DataParallel(model).cuda() if CUDA and config.parallel else model +# ) +# model.to(device) +# +# if config.use_clipping: +# for p in model.parameters(): +# p.register_hook( +# lambda grad: torch.clamp( +# grad, min=-config.clipping, max=config.clipping +# ) +# ) +# +# if WANDB_INSTALLED: +# wandb.watch(model, log_freq=100) +# +# if weights_path is not None: +# model.load_state_dict(torch.load(weights_path, map_location=device)) +# +# print("- getting the optimizers") +# # Initialize the optimizers +# if config.weight_decay is not None: +# decay = config.weight_decay +# optimizer = torch.optim.Adam( +# model.parameters(), lr=config.lr, weight_decay=decay +# ) +# else: +# optimizer = torch.optim.Adam(model.parameters(), lr=config.lr) +# +# print("- getting the loss functions") +# # Initialize the Ncuts loss function +# criterionE = SoftNCutsLoss( +# data_shape=data_shape, +# device=device, +# intensity_sigma=config.intensity_sigma, +# spatial_sigma=config.spatial_sigma, +# radius=config.radius, +# ) +# +# if config.reconstruction_loss == "MSE": +# criterionW = nn.MSELoss() +# elif config.reconstruction_loss == "BCE": +# criterionW = nn.BCELoss() +# else: +# raise ValueError( +# f"Unknown reconstruction loss : {config.reconstruction_loss} not supported" +# ) +# +# print("- getting the learning rate schedulers") +# # Initialize the learning rate schedulers +# scheduler = get_scheduler(config, optimizer) +# # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( +# # optimizer, mode="min", factor=0.5, patience=10, verbose=True +# # ) +# model.train() +# +# print("Ready") +# print("Training the model") +# print("*" * 50) +# +# startTime = time.time() +# ncuts_losses = [] +# rec_losses = [] +# total_losses = [] +# best_dice = -1 +# best_dice_epoch = -1 +# +# # Train the model +# for epoch in range(config.num_epochs): +# print(f"Epoch {epoch + 1} of {config.num_epochs}") +# +# epoch_ncuts_loss = 0 +# epoch_rec_loss = 0 +# epoch_loss = 0 +# +# for _i, batch in enumerate(dataloader): +# # raise NotImplementedError("testing") +# if config.use_patch: +# image = batch["image"].to(device) +# else: +# image = batch.to(device) +# if config.batch_size == 1: +# image = image.unsqueeze(0) +# else: +# image = image.unsqueeze(0) +# image = torch.swapaxes(image, 0, 1) +# +# # Forward pass +# enc = model.forward_encoder(image) +# # out = model.forward(image) +# +# # Compute the Ncuts loss +# Ncuts = criterionE(enc, image) +# epoch_ncuts_loss += Ncuts.item() +# if WANDB_INSTALLED: +# wandb.log({"Ncuts loss": Ncuts.item()}) +# +# # Forward pass +# enc, dec = model(image) +# +# # Compute the reconstruction loss +# if isinstance(criterionW, nn.MSELoss): +# reconstruction_loss = criterionW(dec, image) +# elif isinstance(criterionW, nn.BCELoss): +# reconstruction_loss = criterionW( +# torch.sigmoid(dec), +# remap_image(image, new_max=1), +# ) +# +# epoch_rec_loss += reconstruction_loss.item() +# if WANDB_INSTALLED: +# wandb.log({"Reconstruction loss": reconstruction_loss.item()}) +# +# # Backward pass for the reconstruction loss +# optimizer.zero_grad() +# alpha = config.n_cuts_weight +# beta = config.rec_loss_weight +# +# loss = alpha * Ncuts + beta * reconstruction_loss +# epoch_loss += loss.item() +# if WANDB_INSTALLED: +# wandb.log({"Sum of losses": loss.item()}) +# loss.backward(loss) +# optimizer.step() +# +# if config.scheduler == "CosineAnnealingWarmRestarts": +# scheduler.step(epoch + _i / len(dataloader)) +# if ( +# config.scheduler == "CosineAnnealingLR" +# or config.scheduler == "CyclicLR" +# ): +# scheduler.step() +# +# ncuts_losses.append(epoch_ncuts_loss / len(dataloader)) +# rec_losses.append(epoch_rec_loss / len(dataloader)) +# total_losses.append(epoch_loss / len(dataloader)) +# +# if WANDB_INSTALLED: +# wandb.log({"Ncuts loss_epoch": ncuts_losses[-1]}) +# wandb.log({"Reconstruction loss_epoch": rec_losses[-1]}) +# wandb.log({"Sum of losses_epoch": total_losses[-1]}) +# # wandb.log({"epoch": epoch}) +# # wandb.log({"learning_rate model": optimizerW.param_groups[0]["lr"]}) +# # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) +# wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) +# +# print("Ncuts loss: ", ncuts_losses[-1]) +# if epoch > 0: +# print( +# "Ncuts loss difference: ", +# ncuts_losses[-1] - ncuts_losses[-2], +# ) +# print("Reconstruction loss: ", rec_losses[-1]) +# if epoch > 0: +# print( +# "Reconstruction loss difference: ", +# rec_losses[-1] - rec_losses[-2], +# ) +# print("Sum of losses: ", total_losses[-1]) +# if epoch > 0: +# print( +# "Sum of losses difference: ", +# total_losses[-1] - total_losses[-2], +# ) +# +# # Update the learning rate +# if config.scheduler == "ReduceLROnPlateau": +# # schedulerE.step(epoch_ncuts_loss) +# # schedulerW.step(epoch_rec_loss) +# scheduler.step(epoch_rec_loss) +# if ( +# config.eval_volume_directory is not None +# and (epoch + 1) % config.val_interval == 0 +# ): +# model.eval() +# print("Validating...") +# with torch.no_grad(): +# for _k, val_data in enumerate(eval_dataloader): +# val_inputs, val_labels = ( +# val_data["image"].to(device), +# val_data["label"].to(device), +# ) +# +# # normalize val_inputs across channels +# if config.normalize_input: +# for i in range(val_inputs.shape[0]): +# for j in range(val_inputs.shape[1]): +# val_inputs[i][j] = normalize_function( +# val_inputs[i][j] +# ) +# +# val_outputs = model.forward_encoder(val_inputs) +# val_outputs = AsDiscrete(threshold=0.5)(val_outputs) +# +# # compute metric for current iteration +# for channel in range(val_outputs.shape[1]): +# max_dice_channel = torch.argmax( +# torch.Tensor( +# [ +# dice_coeff( +# y_pred=val_outputs[ +# :, +# channel : (channel + 1), +# :, +# :, +# :, +# ], +# y_true=val_labels, +# ) +# ] +# ) +# ) +# +# dice_metric( +# y_pred=val_outputs[ +# :, +# max_dice_channel : (max_dice_channel + 1), +# :, +# :, +# :, +# ], +# y=val_labels, +# ) +# # if plot_val_input: # only once +# # logged_image = val_inputs.detach().cpu().numpy() +# # logged_image = np.swapaxes(logged_image, 2, 4) +# # logged_image = logged_image[0, :, 32, :, :] +# # images = wandb.Image( +# # logged_image, caption="Validation input" +# # ) +# # +# # wandb.log({"val/input": images}) +# # plot_val_input = False +# +# # if k == 2 and (30 <= epoch <= 50 or epoch % 100 == 0): +# # logged_image = val_outputs.detach().cpu().numpy() +# # logged_image = np.swapaxes(logged_image, 2, 4) +# # logged_image = logged_image[ +# # 0, max_dice_channel, 32, :, : +# # ] +# # images = wandb.Image( +# # logged_image, caption="Validation output" +# # ) +# # +# # wandb.log({"val/output": images}) +# # dice_metric(y_pred=val_outputs[:, 2:, :,:,:], y=val_labels) +# # dice_metric(y_pred=val_outputs[:, 1:, :, :, :], y=val_labels) +# +# # import napari +# # view = napari.Viewer() +# # view.add_image(val_inputs.cpu().numpy(), name="input") +# # view.add_image(val_labels.cpu().numpy(), name="label") +# # vis_out = np.array( +# # [i.detach().cpu().numpy() for i in val_outputs], +# # dtype=np.float32, +# # ) +# # crf_out = np.array( +# # [i.detach().cpu().numpy() for i in crf_outputs], +# # dtype=np.float32, +# # ) +# # view.add_image(vis_out, name="output") +# # view.add_image(crf_out, name="crf_output") +# # napari.run() +# +# # aggregate the final mean dice result +# metric = dice_metric.aggregate().item() +# print("Validation Dice score: ", metric) +# if best_dice < metric < 2: +# best_dice = metric +# best_dice_epoch = epoch + 1 +# if config.save_model: +# save_best_path = Path(config.save_model_path).parents[ +# 0 +# ] +# save_best_path.mkdir(parents=True, exist_ok=True) +# save_best_name = Path(config.save_model_path).stem +# save_path = ( +# str(save_best_path / save_best_name) +# + "_best_metric.pth" +# ) +# print(f"Saving new best model to {save_path}") +# torch.save(model.state_dict(), save_path) +# +# if WANDB_INSTALLED: +# # log validation dice score for each validation round +# wandb.log({"val/dice_metric": metric}) +# +# # reset the status for next validation round +# dice_metric.reset() +# +# print( +# "ETA: ", +# (time.time() - startTime) +# * (config.num_epochs / (epoch + 1) - 1) +# / 60, +# "minutes", +# ) +# print("-" * 20) +# +# # Save the model +# if config.save_model and epoch % config.save_every == 0: +# torch.save(model.state_dict(), config.save_model_path) +# # with open(config.save_losses_path, "wb") as f: +# # pickle.dump((ncuts_losses, rec_losses), f) +# +# print("Training finished") +# print(f"Best dice metric : {best_dice}") +# if WANDB_INSTALLED and config.eval_volume_directory is not None: +# wandb.log( +# { +# "best_dice_metric": best_dice, +# "best_metric_epoch": best_dice_epoch, +# } +# ) +# print("*" * 50) +# +# # Save the model +# if config.save_model: +# print("Saving the model to: ", config.save_model_path) +# torch.save(model.state_dict(), config.save_model_path) +# # with open(config.save_losses_path, "wb") as f: +# # pickle.dump((ncuts_losses, rec_losses), f) +# if WANDB_INSTALLED: +# model_artifact = wandb.Artifact( +# "WNet", +# type="model", +# description="WNet benchmark", +# metadata=dict(WANDB_CONFIG), +# ) +# model_artifact.add_file(config.save_model_path) +# wandb.log_artifact(model_artifact) +# +# return ncuts_losses, rec_losses, model +# +# +# def get_dataset(config): +# """Creates a Dataset from the original data using the tifffile library +# +# Args: +# config (WNetTrainingWorkerConfig): The configuration object +# +# Returns: +# (tuple): A tuple containing the shape of the data and the dataset +# """ +# train_files = create_dataset_dict_no_labs( +# volume_directory=config.train_volume_directory +# ) +# train_files = [d.get("image") for d in train_files] +# # logger.debug(f"train_files: {train_files}") +# volumes = tiff.imread(train_files).astype(np.float32) +# volume_shape = volumes.shape +# # logger.debug(f"volume_shape: {volume_shape}") +# +# if len(volume_shape) == 3: +# volumes = np.expand_dims(volumes, axis=0) +# +# if config.normalize_input: +# volumes = np.array( +# [ +# # mad_normalization(volume) +# config.normalizing_function(volume) +# for volume in volumes +# ] +# ) +# # mean = volumes.mean(axis=0) +# # std = volumes.std(axis=0) +# # volumes = (volumes - mean) / std +# # print("NORMALIZED VOLUMES") +# # print(volumes.shape) +# # [print("MIN MAX", volume.flatten().min(), volume.flatten().max()) for volume in volumes] +# # print(volumes.mean(axis=0), volumes.std(axis=0)) +# +# dataset = CacheDataset(data=volumes) +# +# return (volume_shape, dataset) +# +# # train_files = create_dataset_dict_no_labs( +# # volume_directory=config.train_volume_directory +# # ) +# # train_files = [d.get("image") for d in train_files] +# # volumes = [] +# # for file in train_files: +# # image = tiff.imread(file).astype(np.float32) +# # image = np.expand_dims(image, axis=0) # add channel dimension +# # volumes.append(image) +# # # volumes = tiff.imread(train_files).astype(np.float32) +# # volume_shape = volumes[0].shape +# # # print(volume_shape) +# # +# # if config.do_augmentation: +# # augmentation = Compose( +# # [ +# # ScaleIntensityRange( +# # a_min=0, +# # a_max=2000, +# # b_min=0.0, +# # b_max=1.0, +# # clip=True, +# # ), +# # RandShiftIntensity(offsets=0.1, prob=0.5), +# # RandFlip(spatial_axis=[1], prob=0.5), +# # RandFlip(spatial_axis=[2], prob=0.5), +# # RandRotate90(prob=0.1, max_k=3), +# # ] +# # ) +# # else: +# # augmentation = None +# # +# # dataset = CacheDataset(data=np.array(volumes), transform=augmentation) +# # +# # return (volume_shape, dataset) +# +# +# def get_patch_dataset(config): +# """Creates a Dataset from the original data using the tifffile library +# +# Args: +# config (WNetTrainingWorkerConfig): The configuration object +# +# Returns: +# (tuple): A tuple containing the shape of the data and the dataset +# """ +# +# train_files = create_dataset_dict_no_labs( +# volume_directory=config.train_volume_directory +# ) +# +# patch_func = Compose( +# [ +# LoadImaged(keys=["image"], image_only=True), +# EnsureChannelFirstd(keys=["image"], channel_dim="no_channel"), +# RandSpatialCropSamplesd( +# keys=["image"], +# roi_size=( +# config.patch_size +# ), # multiply by axis_stretch_factor if anisotropy +# # max_roi_size=(120, 120, 120), +# random_size=False, +# num_samples=config.num_patches, +# ), +# Orientationd(keys=["image"], axcodes="PLI"), +# SpatialPadd( +# keys=["image"], +# spatial_size=(get_padding_dim(config.patch_size)), +# ), +# EnsureTyped(keys=["image"]), +# ] +# ) +# +# train_transforms = Compose( +# [ +# ScaleIntensityRanged( +# keys=["image"], +# a_min=0, +# a_max=2000, +# b_min=0.0, +# b_max=1.0, +# clip=True, +# ), +# RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), +# RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), +# RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), +# RandRotate90d(keys=["image"], prob=0.1, max_k=3), +# EnsureTyped(keys=["image"]), +# ] +# ) +# +# dataset = PatchDataset( +# data=train_files, +# samples_per_image=config.num_patches, +# patch_func=patch_func, +# transform=train_transforms, +# ) +# +# return config.patch_size, dataset +# +# +# # def get_patch_eval_dataset(config): +# # eval_files = create_dataset_dict( +# # volume_directory=config.eval_volume_directory + "/vol", +# # label_directory=config.eval_volume_directory + "/lab", +# # ) +# # +# # patch_func = Compose( +# # [ +# # LoadImaged(keys=["image", "label"], image_only=True), +# # EnsureChannelFirstd( +# # keys=["image", "label"], channel_dim="no_channel" +# # ), +# # # NormalizeIntensityd(keys=["image"]) if config.normalize_input else lambda x: x, +# # RandSpatialCropSamplesd( +# # keys=["image", "label"], +# # roi_size=( +# # config.patch_size +# # ), # multiply by axis_stretch_factor if anisotropy +# # # max_roi_size=(120, 120, 120), +# # random_size=False, +# # num_samples=config.eval_num_patches, +# # ), +# # Orientationd(keys=["image", "label"], axcodes="PLI"), +# # SpatialPadd( +# # keys=["image", "label"], +# # spatial_size=(get_padding_dim(config.patch_size)), +# # ), +# # EnsureTyped(keys=["image", "label"]), +# # ] +# # ) +# # +# # eval_transforms = Compose( +# # [ +# # EnsureTyped(keys=["image", "label"]), +# # ] +# # ) +# # +# # return PatchDataset( +# # data=eval_files, +# # samples_per_image=config.eval_num_patches, +# # patch_func=patch_func, +# # transform=eval_transforms, +# # ) +# +# +# def get_dataset_monai(config): +# """Creates a Dataset applying some transforms/augmentation on the data using the MONAI library +# +# Args: +# config (WNetTrainingWorkerConfig): The configuration object +# +# Returns: +# (tuple): A tuple containing the shape of the data and the dataset +# """ +# train_files = create_dataset_dict_no_labs( +# volume_directory=config.train_volume_directory +# ) +# # print(train_files) +# # print(len(train_files)) +# # print(train_files[0]) +# first_volume = LoadImaged(keys=["image"])(train_files[0]) +# first_volume_shape = first_volume["image"].shape +# +# # Transforms to be applied to each volume +# load_single_images = Compose( +# [ +# LoadImaged(keys=["image"]), +# EnsureChannelFirstd(keys=["image"]), +# Orientationd(keys=["image"], axcodes="PLI"), +# SpatialPadd( +# keys=["image"], +# spatial_size=(get_padding_dim(first_volume_shape)), +# ), +# EnsureTyped(keys=["image"]), +# ] +# ) +# +# if config.do_augmentation: +# train_transforms = Compose( +# [ +# ScaleIntensityRanged( +# keys=["image"], +# a_min=0, +# a_max=2000, +# b_min=0.0, +# b_max=1.0, +# clip=True, +# ), +# RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), +# RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), +# RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), +# RandRotate90d(keys=["image"], prob=0.1, max_k=3), +# EnsureTyped(keys=["image"]), +# ] +# ) +# else: +# train_transforms = EnsureTyped(keys=["image"]) +# +# # Create the dataset +# dataset = CacheDataset( +# data=train_files, +# transform=Compose(load_single_images, train_transforms), +# ) +# +# return first_volume_shape, dataset +# +# +# def get_scheduler(config, optimizer, verbose=False): +# scheduler_name = config.scheduler +# if scheduler_name == "None": +# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( +# optimizer, +# T_max=100, +# eta_min=config.lr - 1e-6, +# verbose=verbose, +# ) +# +# elif scheduler_name == "ReduceLROnPlateau": +# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( +# optimizer, +# mode="min", +# factor=schedulers["ReduceLROnPlateau"]["factor"], +# patience=schedulers["ReduceLROnPlateau"]["patience"], +# verbose=verbose, +# ) +# elif scheduler_name == "CosineAnnealingLR": +# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( +# optimizer, +# T_max=schedulers["CosineAnnealingLR"]["T_max"], +# eta_min=schedulers["CosineAnnealingLR"]["eta_min"], +# verbose=verbose, +# ) +# elif scheduler_name == "CosineAnnealingWarmRestarts": +# scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts( +# optimizer, +# T_0=schedulers["CosineAnnealingWarmRestarts"]["T_0"], +# eta_min=schedulers["CosineAnnealingWarmRestarts"]["eta_min"], +# T_mult=schedulers["CosineAnnealingWarmRestarts"]["T_mult"], +# verbose=verbose, +# ) +# elif scheduler_name == "CyclicLR": +# scheduler = torch.optim.lr_scheduler.CyclicLR( +# optimizer, +# base_lr=schedulers["CyclicLR"]["base_lr"], +# max_lr=schedulers["CyclicLR"]["max_lr"], +# step_size_up=schedulers["CyclicLR"]["step_size_up"], +# mode=schedulers["CyclicLR"]["mode"], +# cycle_momentum=False, +# ) +# else: +# raise ValueError(f"Scheduler {scheduler_name} not provided") +# return scheduler +# +# +# if __name__ == "__main__": +# weights_location = str( +# # Path(__file__).resolve().parent / "../weights/wnet.pth" +# # "../wnet_SUM_MSE_DAPI_rad2_best_metric.pth" +# ) +# train( +# # weights_location +# ) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index a1850e91..125466f9 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -28,7 +28,6 @@ from monai.transforms import ( AsDiscrete, Compose, - EnsureChannelFirst, EnsureChannelFirstd, EnsureType, EnsureTyped, @@ -43,7 +42,6 @@ RandSpatialCropSamplesd, ScaleIntensityRanged, SpatialPadd, - ToTensor, ) from monai.utils import set_determinism @@ -164,71 +162,16 @@ def __init__( super().__init__() self.config = worker_config - @staticmethod - def create_dataset_dict_no_labs(volume_directory): - """Creates unsupervised data dictionary for MONAI transforms and training.""" - images_filepaths = sorted( - Path.glob(str(Path(volume_directory) / "*.tif")) - ) - if len(images_filepaths) == 0: - raise ValueError(f"Data folder {volume_directory} is empty") - - logger.info("Images :") - for file in images_filepaths: - logger.info(Path(file).stem) - logger.info("*" * 10) - return [{"image": image_name} for image_name in images_filepaths] - - @staticmethod - def create_dataset_dict(volume_directory, label_directory): - """Creates data dictionary for MONAI transforms and training.""" - images_filepaths = sorted( - [str(file) for file in Path(volume_directory).glob("*.tif")] - ) - - labels_filepaths = sorted( - [str(file) for file in Path(label_directory).glob("*.tif")] - ) - if len(images_filepaths) == 0 or len(labels_filepaths) == 0: - raise ValueError( - f"Data folders are empty \n{volume_directory} \n{label_directory}" - ) - - logger.info("Images :") - for file in images_filepaths: - logger.info(Path(file).stem) - logger.info("*" * 10) - logger.info("Labels :") - for file in labels_filepaths: - logger.info(Path(file).stem) - try: - data_dicts = [ - {"image": image_name, "label": label_name} - for image_name, label_name in zip( - images_filepaths, labels_filepaths - ) - ] - except ValueError as e: - raise ValueError( - f"Number of images and labels does not match : \n{volume_directory} \n{label_directory}" - ) from e - # self.log(f"Loaded eval image: {data_dicts}") - return data_dicts - - def get_patch_dataset(self, volume_directory): + def get_patch_dataset(self, train_transforms): """Creates a Dataset from the original data using the tifffile library Args: - volume_directory (str): Path to the directory containing the data + train_data_dict (dict): dict with the Paths to the directory containing the data Returns: (tuple): A tuple containing the shape of the data and the dataset """ - train_files = self.create_dataset_dict_no_labs( - volume_directory=volume_directory - ) - patch_func = Compose( [ LoadImaged(keys=["image"], image_only=True), @@ -252,27 +195,8 @@ def get_patch_dataset(self, volume_directory): EnsureTyped(keys=["image"]), ] ) - - train_transforms = Compose( - [ - ScaleIntensityRanged( - keys=["image"], - a_min=0, - a_max=2000, - b_min=0.0, - b_max=1.0, - clip=True, - ), - RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), - RandRotate90d(keys=["image"], prob=0.1, max_k=3), - EnsureTyped(keys=["image"]), - ] - ) - dataset = PatchDataset( - data=train_files, + data=self.config.train_data_dict, samples_per_image=self.config.num_samples, patch_func=patch_func, transform=train_transforms, @@ -280,53 +204,39 @@ def get_patch_dataset(self, volume_directory): return self.config.sample_size, dataset - def get_patch_eval_dataset(self, volume_directory): - eval_files = self.create_dataset_dict( - volume_directory=volume_directory + "/vol", - label_directory=volume_directory + "/lab", - ) - - patch_func = Compose( + def get_patch_dataset_eval(self, eval_dataset_dict): + eval_transforms = Compose( [ LoadImaged(keys=["image", "label"], image_only=True), EnsureChannelFirstd( keys=["image", "label"], channel_dim="no_channel" ), - # NormalizeIntensityd(keys=["image"]) if config.normalize_input else lambda x: x, - RandSpatialCropSamplesd( - keys=["image", "label"], - roi_size=( - self.config.sample_size - ), # multiply by axis_stretch_factor if anisotropy - # max_roi_size=(120, 120, 120), - random_size=False, - num_samples=self.config.eval_num_patches, - ), + # RandSpatialCropSamplesd( + # keys=["image", "label"], + # roi_size=( + # self.config.sample_size + # ), # multiply by axis_stretch_factor if anisotropy + # # max_roi_size=(120, 120, 120), + # random_size=False, + # num_samples=self.config.num_samples, + # ), Orientationd(keys=["image", "label"], axcodes="PLI"), - SpatialPadd( - keys=["image", "label"], - spatial_size=( - utils.get_padding_dim(self.config.sample_size) - ), - ), - EnsureTyped(keys=["image", "label"]), - ] - ) - - eval_transforms = Compose( - [ + # SpatialPadd( + # keys=["image", "label"], + # spatial_size=( + # utils.get_padding_dim(self.config.sample_size) + # ), + # ), EnsureTyped(keys=["image", "label"]), ] ) - return PatchDataset( - data=eval_files, - samples_per_image=self.config.eval_num_patches, - patch_func=patch_func, + return CacheDataset( + data=eval_dataset_dict, transform=eval_transforms, ) - def get_dataset_monai(self): + def get_dataset(self, train_transforms): """Creates a Dataset applying some transforms/augmentation on the data using the MONAI library Args: @@ -360,27 +270,6 @@ def get_dataset_monai(self): ] ) - if self.config.do_augmentation: - train_transforms = Compose( - [ - ScaleIntensityRanged( - keys=["image"], - a_min=0, - a_max=2000, - b_min=0.0, - b_max=1.0, - clip=True, - ), - RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), - RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), - RandRotate90d(keys=["image"], prob=0.1, max_k=3), - EnsureTyped(keys=["image"]), - ] - ) - else: - train_transforms = EnsureTyped(keys=["image"]) - # Create the dataset dataset = CacheDataset( data=train_files, @@ -434,50 +323,46 @@ def get_dataset_monai(self): # else: # raise ValueError(f"Scheduler {scheduler_name} not provided") # return scheduler - def train(self): - if self.config is None: - self.config = config.WNetTrainingWorkerConfig() - ############## - # disable metadata tracking in MONAI - set_track_meta(False) - ############## - # if WANDB_INSTALLED: - # wandb.init( - # config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE - # ) - - set_determinism( - seed=self.config.deterministic_config.seed - ) # use default seed from NP_MAX - torch.use_deterministic_algorithms(True, warn_only=True) - - normalize_function = self.config.normalizing_function - CUDA = torch.cuda.is_available() - device = torch.device("cuda" if CUDA else "cpu") - - self.log(f"Using device: {device}") - - self.log("Config:") - [self.log(str(a)) for a in self.config.__dict__.items()] - - self.log("Initializing training...") - self.log("Getting the data") - if self.config.sampling: - (data_shape, dataset) = self.get_patch_dataset(self.config) - else: - (data_shape, dataset) = self.get_dataset(self.config) - transform = Compose( + def _get_data(self): + if self.config.do_augmentation: + train_transforms = Compose( [ - ToTensor(), - EnsureChannelFirst(channel_dim=0), + ScaleIntensityRanged( + keys=["image"], + a_min=0, + a_max=2000, + b_min=0.0, + b_max=1.0, + clip=True, + ), + RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), + RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), + RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), + RandRotate90d(keys=["image"], prob=0.1, max_k=3), + EnsureTyped(keys=["image"]), ] ) - dataset = [transform(im) for im in dataset] - for data in dataset: - self.log(f"Data shape: {data.shape}") - break + else: + train_transforms = EnsureTyped(keys=["image"]) + if self.config.sampling: + self.log("Loading patch dataset") + (data_shape, dataset) = self.get_patch_dataset(train_transforms) + else: + self.log("Loading volume dataset") + (data_shape, dataset) = self.get_dataset(train_transforms) + # transform = Compose( + # [ + # ToTensor(), + # EnsureChannelFirst(channel_dim=0), + # ] + # ) + # dataset = [transform(im) for im in dataset] + # for data in dataset: + # self.log(f"Data shape: {data.shape}") + # break + logger.debug(f"Data shape : {data_shape}") dataloader = DataLoader( dataset, batch_size=self.config.batch_size, @@ -487,9 +372,7 @@ def train(self): ) if self.config.eval_volume_dict is not None: - eval_dataset = self.get_patch_eval_dataset( - self.config.eval_volume_dict - ) # FIXME + eval_dataset = self.get_dataset(train_transforms) eval_dataloader = DataLoader( eval_dataset, @@ -498,326 +381,469 @@ def train(self): num_workers=self.config.num_workers, collate_fn=pad_list_data_collate, ) + else: + eval_dataloader = None + return dataloader, eval_dataloader, data_shape - dice_metric = DiceMetric( - include_background=False, reduction="mean", get_not_nans=False - ) - ################################################### - # Training the model # - ################################################### - self.log("Initializing the model:") - - self.log("- getting the model") - # Initialize the model - model = WNet( - in_channels=self.config.in_channels, - out_channels=self.config.out_channels, - num_classes=self.config.num_classes, - dropout=self.config.dropout, - ) - model = ( - nn.DataParallel(model).cuda() - if CUDA and self.config.parallel - else model - ) - model.to(device) - - if self.config.use_clipping: - for p in model.parameters(): - p.register_hook( - lambda grad: torch.clamp( - grad, - min=-self.config.clipping, - max=self.config.clipping, - ) - ) + def train(self): + try: + if self.config is None: + self.config = config.WNetTrainingWorkerConfig() + ############## + # disable metadata tracking in MONAI + set_track_meta(False) + ############## + # if WANDB_INSTALLED: + # wandb.init( + # config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE + # ) - if WANDB_INSTALLED: - wandb.watch(model, log_freq=100) + set_determinism( + seed=self.config.deterministic_config.seed + ) # use default seed from NP_MAX + torch.use_deterministic_algorithms(True, warn_only=True) - if self.config.weights_info.path is not None: - model.load_state_dict( - torch.load(self.config.weights_info.path, map_location=device) - ) + normalize_function = utils.remap_image + device = self.config.device - self.log("- getting the optimizers") - # Initialize the optimizers - if self.config.weight_decay is not None: - decay = self.config.weight_decay - optimizer = torch.optim.Adam( - model.parameters(), lr=self.config.lr, weight_decay=decay - ) - else: - optimizer = torch.optim.Adam(model.parameters(), lr=self.config.lr) - - self.log("- getting the loss functions") - # Initialize the Ncuts loss function - criterionE = SoftNCutsLoss( - data_shape=data_shape, - device=device, - intensity_sigma=self.config.intensity_sigma, - spatial_sigma=self.config.spatial_sigma, - radius=self.config.radius, - ) + self.log(f"Using device: {device}") - if self.config.reconstruction_loss == "MSE": - criterionW = nn.MSELoss() - elif self.config.reconstruction_loss == "BCE": - criterionW = nn.BCELoss() - else: - raise ValueError( - f"Unknown reconstruction loss : {self.config.reconstruction_loss} not supported" + self.log("Config:") + [self.log(str(a)) for a in self.config.__dict__.items()] + + self.log("Initializing training...") + self.log("Getting the data") + + dataloader, eval_dataloader, data_shape = self._get_data() + + dice_metric = DiceMetric( + include_background=False, reduction="mean", get_not_nans=False + ) + ################################################### + # Training the model # + ################################################### + self.log("Initializing the model:") + + self.log("- Getting the model") + # Initialize the model + model = WNet( + in_channels=self.config.in_channels, + out_channels=self.config.out_channels, + num_classes=self.config.num_classes, + dropout=self.config.dropout, ) + model.to(device) + + if self.config.use_clipping: + for p in model.parameters(): + p.register_hook( + lambda grad: torch.clamp( + grad, + min=-self.config.clipping, + max=self.config.clipping, + ) + ) - self.log("- getting the learning rate schedulers") - # Initialize the learning rate schedulers - # scheduler = get_scheduler(self.config, optimizer) - # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - # optimizer, mode="min", factor=0.5, patience=10, verbose=True - # ) - model.train() + if WANDB_INSTALLED: + wandb.watch(model, log_freq=100) - self.log("Ready") - self.log("Training the model") - self.log("*" * 50) + if self.config.weights_info.custom: + if self.config.weights_info.use_pretrained: + weights_file = "wnet.pth" + self.downloader.download_weights("WNet", weights_file) + weights = PRETRAINED_WEIGHTS_DIR / Path(weights_file) + self.config.weights_info.path = weights + else: + weights = str(Path(self.config.weights_info.path)) - startTime = time.time() - ncuts_losses = [] - rec_losses = [] - total_losses = [] - best_dice = -1 + try: + model.load_state_dict( + torch.load( + weights, + map_location=self.config.device, + ), + strict=True, + ) + except RuntimeError as e: + logger.error(f"Error when loading weights : {e}") + logger.exception(e) + warn = ( + "WARNING:\nIt'd seem that the weights were incompatible with the model,\n" + "the model will be trained from random weights" + ) + self.log(warn) + self.warn(warn) + self._weight_error = True + else: + self.log("Model will be trained from scratch") + self.log("- Getting the optimizer") + # Initialize the optimizers + if self.config.weight_decay is not None: + decay = self.config.weight_decay + optimizer = torch.optim.Adam( + model.parameters(), + lr=self.config.learning_rate, + weight_decay=decay, + ) + else: + optimizer = torch.optim.Adam( + model.parameters(), lr=self.config.learning_rate + ) - # Train the model - for epoch in range(self.config.num_epochs): - self.log(f"Epoch {epoch + 1} of {self.config.num_epochs}") + self.log("- Getting the loss functions") + # Initialize the Ncuts loss function + criterionE = SoftNCutsLoss( + data_shape=data_shape, + device=device, + intensity_sigma=self.config.intensity_sigma, + spatial_sigma=self.config.spatial_sigma, + radius=self.config.radius, + ) + + if self.config.reconstruction_loss == "MSE": + criterionW = nn.MSELoss() + elif self.config.reconstruction_loss == "BCE": + criterionW = nn.BCELoss() + else: + raise ValueError( + f"Unknown reconstruction loss : {self.config.reconstruction_loss} not supported" + ) - epoch_ncuts_loss = 0 - epoch_rec_loss = 0 - epoch_loss = 0 + # self.log("- getting the learning rate schedulers") + # Initialize the learning rate schedulers + # scheduler = get_scheduler(self.config, optimizer) + # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( + # optimizer, mode="min", factor=0.5, patience=10, verbose=True + # ) + model.train() + + self.log("Ready") + self.log("Training the model") + self.log("*" * 20) + + startTime = time.time() + ncuts_losses = [] + rec_losses = [] + total_losses = [] + best_dice = -1 + + # Train the model + for epoch in range(self.config.max_epochs): + self.log(f"Epoch {epoch + 1} of {self.config.max_epochs}") + + epoch_ncuts_loss = 0 + epoch_rec_loss = 0 + epoch_loss = 0 - for _i, batch in enumerate(dataloader): - # raise NotImplementedError("testing") - if self.config.sampling: + for _i, batch in enumerate(dataloader): + # raise NotImplementedError("testing") image = batch["image"].to(device) - else: - image = batch.to(device) - if self.config.batch_size == 1: - image = image.unsqueeze(0) - else: - image = image.unsqueeze(0) - image = torch.swapaxes(image, 0, 1) - - # Forward pass - enc = model.forward_encoder(image) - # Compute the Ncuts loss - Ncuts = criterionE(enc, image) - epoch_ncuts_loss += Ncuts.item() - # if WANDB_INSTALLED: - # wandb.log({"Ncuts loss": Ncuts.item()}) - - # Forward pass - enc, dec = model(image) - - # Compute the reconstruction loss - if isinstance(criterionW, nn.MSELoss): - reconstruction_loss = criterionW(dec, image) - elif isinstance(criterionW, nn.BCELoss): - reconstruction_loss = criterionW( - torch.sigmoid(dec), - utils.remap_image(image, new_max=1), - ) + # if self.config.batch_size == 1: + # image = image.unsqueeze(0) + # else: + # image = image.unsqueeze(0) + # image = torch.swapaxes(image, 0, 1) + + # Forward pass + enc = model.forward_encoder(image) + # Compute the Ncuts loss + Ncuts = criterionE(enc, image) + epoch_ncuts_loss += Ncuts.item() + # if WANDB_INSTALLED: + # wandb.log({"Ncuts loss": Ncuts.item()}) + + # Forward pass + enc, dec = model(image) + + # Compute the reconstruction loss + if isinstance(criterionW, nn.MSELoss): + reconstruction_loss = criterionW(dec, image) + elif isinstance(criterionW, nn.BCELoss): + reconstruction_loss = criterionW( + torch.sigmoid(dec), + utils.remap_image(image, new_max=1), + ) + + epoch_rec_loss += reconstruction_loss.item() + if WANDB_INSTALLED: + wandb.log( + {"Reconstruction loss": reconstruction_loss.item()} + ) + + # Backward pass for the reconstruction loss + optimizer.zero_grad() + alpha = self.config.n_cuts_weight + beta = self.config.rec_loss_weight + + loss = alpha * Ncuts + beta * reconstruction_loss + epoch_loss += loss.item() + # if WANDB_INSTALLED: + # wandb.log({"Sum of losses": loss.item()}) + loss.backward(loss) + optimizer.step() - epoch_rec_loss += reconstruction_loss.item() - if WANDB_INSTALLED: - wandb.log( - {"Reconstruction loss": reconstruction_loss.item()} + # if self.config.scheduler == "CosineAnnealingWarmRestarts": + # scheduler.step(epoch + _i / len(dataloader)) + # if ( + # self.config.scheduler == "CosineAnnealingLR" + # or self.config.scheduler == "CyclicLR" + # ): + # scheduler.step() + + yield TrainingReport( + show_plot=False, weights=model.state_dict() ) - # Backward pass for the reconstruction loss - optimizer.zero_grad() - alpha = self.config.n_cuts_weight - beta = self.config.rec_loss_weight + ncuts_losses.append(epoch_ncuts_loss / len(dataloader)) + rec_losses.append(epoch_rec_loss / len(dataloader)) + total_losses.append(epoch_loss / len(dataloader)) + + if eval_dataloader is None: + try: + enc_out = enc[0].detach().cpu().numpy() + dec_out = dec[0].detach().cpu().numpy() + image = image[0].detach().cpu().numpy() + + images_dict = { + "Encoder output": { + "data": enc_out, + "cmap": "turbo", + }, + "Encoder output (discrete)": { + "data": AsDiscrete(threshold=0.5)( + enc_out + ).numpy(), + "cmap": "turbo", + }, + "Decoder output": { + "data": dec_out, + "cmap": "gist_earth", + }, + "Input image": {"data": image, "cmap": "inferno"}, + } + + yield TrainingReport( + show_plot=True, + epoch=epoch, + loss_1_values={"SoftNCuts loss": ncuts_losses}, + loss_2_values=rec_losses, + weights=model.state_dict(), + images_dict=images_dict, + ) + except TypeError: + pass - loss = alpha * Ncuts + beta * reconstruction_loss - epoch_loss += loss.item() # if WANDB_INSTALLED: - # wandb.log({"Sum of losses": loss.item()}) - loss.backward(loss) - optimizer.step() - - # if self.config.scheduler == "CosineAnnealingWarmRestarts": - # scheduler.step(epoch + _i / len(dataloader)) - # if ( - # self.config.scheduler == "CosineAnnealingLR" - # or self.config.scheduler == "CyclicLR" - # ): - # scheduler.step() - - ncuts_losses.append(epoch_ncuts_loss / len(dataloader)) - rec_losses.append(epoch_rec_loss / len(dataloader)) - total_losses.append(epoch_loss / len(dataloader)) + # wandb.log({"Ncuts loss_epoch": ncuts_losses[-1]}) + # wandb.log({"Reconstruction loss_epoch": rec_losses[-1]}) + # wandb.log({"Sum of losses_epoch": total_losses[-1]}) + # wandb.log({"epoch": epoch}) + # wandb.log({"learning_rate model": optimizerW.param_groups[0]["lr"]}) + # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) + # wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) + + self.log("Ncuts loss: " + str(ncuts_losses[-1])) + if epoch > 0: + self.log( + "Ncuts loss difference: " + + str(ncuts_losses[-1] - ncuts_losses[-2]) + ) + self.log("Reconstruction loss: " + str(rec_losses[-1])) + if epoch > 0: + self.log( + "Reconstruction loss difference: " + + str(rec_losses[-1] - rec_losses[-2]) + ) + self.log("Sum of losses: " + str(total_losses[-1])) + if epoch > 0: + self.log( + "Sum of losses difference: " + + str(total_losses[-1] - total_losses[-2]), + ) - # if WANDB_INSTALLED: - # wandb.log({"Ncuts loss_epoch": ncuts_losses[-1]}) - # wandb.log({"Reconstruction loss_epoch": rec_losses[-1]}) - # wandb.log({"Sum of losses_epoch": total_losses[-1]}) - # wandb.log({"epoch": epoch}) - # wandb.log({"learning_rate model": optimizerW.param_groups[0]["lr"]}) - # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) - # wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) - - self.log("Ncuts loss: " + str(ncuts_losses[-1])) - if epoch > 0: - self.log( - "Ncuts loss difference: " - + str(ncuts_losses[-1] - ncuts_losses[-2]) - ) - self.log("Reconstruction loss: " + str(rec_losses[-1])) - if epoch > 0: - self.log( - "Reconstruction loss difference: " - + str(rec_losses[-1] - rec_losses[-2]) - ) - self.log("Sum of losses: " + str(total_losses[-1])) - if epoch > 0: - self.log( - "Sum of losses difference: " - + str(total_losses[-1] - total_losses[-2]), - ) + # Update the learning rate + # if self.config.scheduler == "ReduceLROnPlateau": + # # schedulerE.step(epoch_ncuts_loss) + # # schedulerW.step(epoch_rec_loss) + # scheduler.step(epoch_rec_loss) + if ( + eval_dataloader is not None + and (epoch + 1) % self.config.validation_interval == 0 + ): + model.eval() + self.log("Validating...") + with torch.no_grad(): + for _k, val_data in enumerate(eval_dataloader): + val_inputs, val_labels = ( + val_data["image"].to(device), + val_data["label"].to(device), + ) - # Update the learning rate - # if self.config.scheduler == "ReduceLROnPlateau": - # # schedulerE.step(epoch_ncuts_loss) - # # schedulerW.step(epoch_rec_loss) - # scheduler.step(epoch_rec_loss) - if ( - self.config.eval_volume_directory is not None - and (epoch + 1) % self.config.val_interval == 0 - ): - model.eval() - self.log("Validating...") - with torch.no_grad(): - for _k, val_data in enumerate(eval_dataloader): - val_inputs, val_labels = ( - val_data["image"].to(device), - val_data["label"].to(device), - ) + # normalize val_inputs across channels + for i in range(val_inputs.shape[0]): + for j in range(val_inputs.shape[1]): + val_inputs[i][j] = normalize_function( + val_inputs[i][j] + ) - # normalize val_inputs across channels - for i in range(val_inputs.shape[0]): - for j in range(val_inputs.shape[1]): - val_inputs[i][j] = normalize_function( - val_inputs[i][j] - ) + val_outputs = sliding_window_inference( + val_inputs, + roi_size=[64, 64, 64], + sw_batch_size=1, + predictor=model.forward_encoder, + overlap=0, + progress=True, + ) + val_outputs = AsDiscrete(threshold=0.5)( + val_outputs + ) + val_decoder_outputs = model.forward_decoder( + val_outputs + ) - val_outputs = model.forward_encoder(val_inputs) - val_outputs = AsDiscrete(threshold=0.5)(val_outputs) - - # compute metric for current iteration - for channel in range(val_outputs.shape[1]): - max_dice_channel = torch.argmax( - torch.Tensor( - [ - utils.dice_coeff( - y_pred=val_outputs[ - :, - channel : (channel + 1), - :, - :, - :, - ], - y_true=val_labels, - ) - ] + # compute metric for current iteration + for channel in range(val_outputs.shape[1]): + max_dice_channel = torch.argmax( + torch.Tensor( + [ + utils.dice_coeff( + y_pred=val_outputs[ + :, + channel : (channel + 1), + :, + :, + :, + ], + y_true=val_labels, + ) + ] + ) ) - ) - dice_metric( - y_pred=val_outputs[ - :, - max_dice_channel : (max_dice_channel + 1), - :, - :, - :, - ], - y=val_labels, - ) + dice_metric( + y_pred=val_outputs[ + :, + max_dice_channel : (max_dice_channel + 1), + :, + :, + :, + ], + y=val_labels, + ) - # aggregate the final mean dice result - metric = dice_metric.aggregate().item() - self.log("Validation Dice score: ", metric) - if best_dice < metric < 2: - best_dice = metric - epoch + 1 - if self.config.save_model: - save_best_path = Path( - self.config.save_model_path - ).parents[0] - save_best_path.mkdir(parents=True, exist_ok=True) - save_best_name = Path( - self.config.save_model_path - ).stem + # aggregate the final mean dice result + metric = dice_metric.aggregate().item() + self.log(f"Validation Dice score: {metric}") + if best_dice < metric <= 1: + best_dice = metric + # save the best model + save_best_path = self.config.results_path_folder + # save_best_path.mkdir(parents=True, exist_ok=True) + save_best_name = "wnet" save_path = ( - str(save_best_path / save_best_name) + str(Path(save_best_path) / save_best_name) + "_best_metric.pth" ) self.log(f"Saving new best model to {save_path}") torch.save(model.state_dict(), save_path) - if WANDB_INSTALLED: - # log validation dice score for each validation round - wandb.log({"val/dice_metric": metric}) + if WANDB_INSTALLED: + # log validation dice score for each validation round + wandb.log({"val/dice_metric": metric}) + + display_dict = { + "Decoder output": { + "data": val_decoder_outputs[0], + "cmap": "gist_earth", + }, + "Encoder output": { + "data": val_outputs[0], + "cmap": "turbo", + }, + "Labels": { + "data": val_labels[0], + "cmap": "bop blue", + }, + "Inputs": { + "data": val_inputs[0], + "cmap": "inferno", + }, + } + + yield TrainingReport( + epoch=epoch, + loss_1_values={ + "Ncuts loss": ncuts_losses, + "Dice metric": metric, + }, + loss_2_values=rec_losses, + weights=model.state_dict(), + images_dict=display_dict, + ) + + # reset the status for next validation round + dice_metric.reset() + + eta = ( + (time.time() - startTime) + * (self.config.max_epochs / (epoch + 1) - 1) + / 60 + ) + self.log( + f"ETA: {eta} minutes", + ) + self.log("-" * 20) - # reset the status for next validation round - dice_metric.reset() + # Save the model + if epoch % 5 == 0: + torch.save( + model.state_dict(), + self.config.results_path_folder + "/wnet_.pth", + ) - eta = ( - (time.time() - startTime) - * (self.config.num_epochs / (epoch + 1) - 1) - / 60 + self.log("Training finished") + if best_dice > -1: + self.log(f"Best dice metric : {best_dice}") + # if WANDB_INSTALLED and self.config.eval_volume_directory is not None: + # wandb.log( + # { + # "best_dice_metric": best_dice, + # "best_metric_epoch": best_dice_epoch, + # } + # ) + self.log("*" * 50) + + # Save the model + + print( + "Saving the model to: ", + self.config.results_path_folder + "/wnet.pth", ) - self.log( - f"ETA: {eta} minutes", + torch.save( + model.state_dict(), + self.config.results_path_folder + "/wnet.pth", ) - self.log("-" * 20) - - # Save the model # FIXME - if self.config.save_model and epoch % self.config.save_every == 0: - torch.save(model.state_dict(), self.config.save_model_path) - # with open(self.config.save_losses_path, "wb") as f: - # pickle.dump((ncuts_losses, rec_losses), f) - - self.log("Training finished") - self.log(f"Best dice metric : {best_dice}") - # if WANDB_INSTALLED and self.config.eval_volume_directory is not None: - # wandb.log( - # { - # "best_dice_metric": best_dice, - # "best_metric_epoch": best_dice_epoch, - # } - # ) - self.log("*" * 50) - - # Save the model FIXME - if self.config.save_model: - print("Saving the model to: ", self.config.save_model_path) - torch.save(model.state_dict(), self.config.save_model_path) - # with open(self.config.save_losses_path, "wb") as f: - # pickle.dump((ncuts_losses, rec_losses), f) - # if WANDB_INSTALLED: - # model_artifact = wandb.Artifact( - # "WNet", - # type="model", - # description="WNet benchmark", - # metadata=dict(WANDB_CONFIG), - # ) - # model_artifact.add_file(self.config.save_model_path) - # wandb.log_artifact(model_artifact) - - return ncuts_losses, rec_losses, model - - -class TrainingWorker(TrainingWorkerBase): + + # if WANDB_INSTALLED: + # model_artifact = wandb.Artifact( + # "WNet", + # type="model", + # description="WNet benchmark", + # metadata=dict(WANDB_CONFIG), + # ) + # model_artifact.add_file(self.config.save_model_path) + # wandb.log_artifact(model_artifact) + + return ncuts_losses, rec_losses, model + except Exception as e: + msg = f"Training failed with exception: {e}" + self.log(msg) + self.raise_error(e, msg) + self.quit() + raise e + + +class SupervisedTrainingWorker(TrainingWorkerBase): """A custom worker to run supervised training jobs in. Inherits from :py:class:`napari.qt.threading.GeneratorWorker` via :py:class:`TrainingWorkerBase` """ @@ -1436,13 +1462,32 @@ def get_loader_func(num_samples): dice_metric.reset() val_metric_values.append(metric) + images_dict = { + "Validation output": { + "data": checkpoint_output[0], + "cmap": "turbo", + }, + "Validation output (discrete)": { + "data": checkpoint_output[1], + "cmap": "bop blue", + }, + "Validation image": { + "data": checkpoint_output[2], + "cmap": "inferno", + }, + "Validation labels": { + "data": checkpoint_output[3], + "cmap": "green", + }, + } + train_report = TrainingReport( show_plot=True, epoch=epoch, - loss_values=epoch_loss_values, - validation_metric=val_metric_values, + loss_1_values={"Loss": epoch_loss_values}, + loss_2_values=val_metric_values, weights=model.state_dict(), - images=checkpoint_output, + images_dict=images_dict, ) self.log("Validation completed") yield train_report diff --git a/napari_cellseg3d/code_models/workers_utils.py b/napari_cellseg3d/code_models/workers_utils.py index 5efb93a0..b07e96c8 100644 --- a/napari_cellseg3d/code_models/workers_utils.py +++ b/napari_cellseg3d/code_models/workers_utils.py @@ -239,7 +239,11 @@ class InferenceResult: class TrainingReport: show_plot: bool = True epoch: int = 0 - loss_values: t.Dict = None # TODO(cyril) : change to dict and unpack different losses for e.g. WNet with several losses - validation_metric: t.List = None + loss_1_values: t.Dict = None # example : {"Loss" : [0.1, 0.2, 0.3]} + loss_2_values: t.List = None weights: np.array = None - images: t.List[np.array] = None + images_dict: t.Dict = ( + None # output, discrete output, target, target labels + ) + # OR decoder output, encoder output, target, target labels + # format : {"Layer name" : {"data" : np.array, "cmap" : "turbo"}} diff --git a/napari_cellseg3d/code_plugins/plugin_base.py b/napari_cellseg3d/code_plugins/plugin_base.py index f369320b..1da69bd0 100644 --- a/napari_cellseg3d/code_plugins/plugin_base.py +++ b/napari_cellseg3d/code_plugins/plugin_base.py @@ -360,6 +360,8 @@ def __init__( """array(str): paths to images for training or inference""" self.labels_filepaths = [] """array(str): paths to labels for training""" + self.validation_filepaths = [] + """array(str): paths to validation files (unsup. learning)""" self.results_path = None """str: path to output folder,to save results in""" @@ -372,24 +374,25 @@ def __init__( ####################################################### # interface - # self.image_filewidget = ui.FilePathWidget( - # "Images directory", self.load_image_dataset, self - # ) self.image_filewidget.text_field = "Images directory" self.image_filewidget.button.clicked.disconnect( self._show_dialog_images ) self.image_filewidget.button.clicked.connect(self.load_image_dataset) - # self.labels_filewidget = ui.FilePathWidget( - # "Labels directory", self.load_label_dataset, self - # ) self.labels_filewidget.text_field = "Labels directory" self.labels_filewidget.button.clicked.disconnect( self._show_dialog_labels ) self.labels_filewidget.button.clicked.connect(self.load_label_dataset) - + ################ + # Validation images widget + self.unsupervised_images_filewidget = ui.FilePathWidget( + description="Training directory", + file_function=self.load_validation_images_dataset, + parent=self, + ) + self.unsupervised_images_filewidget.setVisible(False) # self.filetype_choice = ui.DropdownMenu( # [".tif", ".tiff"], label="File format" # ) @@ -426,6 +429,19 @@ def load_image_dataset(self): self.image_filewidget.check_ready() self._update_default_paths(path) + def load_validation_images_dataset(self): + """Show file dialog to set :py:attr:`~val_images_filepaths`""" + filenames = self.load_dataset_paths() + logger.debug(f"val filenames : {filenames}") + if filenames: + self.validation_filepaths = [ + str(path) for path in sorted(filenames) + ] + path = str(Path(filenames[0]).parent) + self.unsupervised_images_filewidget.text_field.setText(path) + self.unsupervised_images_filewidget.check_ready() + self._update_default_paths(path) + def load_label_dataset(self): """Show file dialog to set :py:attr:`~labels_filepaths`""" filenames = self.load_dataset_paths() @@ -444,6 +460,7 @@ def _update_default_paths(self, path=None): self._default_path = [ self.extract_dataset_paths(self.images_filepaths), self.extract_dataset_paths(self.labels_filepaths), + self.extract_dataset_paths(self.validation_filepaths), self.results_path, ] return @@ -458,3 +475,9 @@ def extract_dataset_paths(paths): if paths[0] is None: return None return str(Path(paths[0]).parent) + + def _check_all_filepaths(self): + self.image_filewidget.check_ready() + self.labels_filewidget.check_ready() + self.results_filewidget.check_ready() + self.unsupervised_images_filewidget.check_ready() diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index e71f82cc..17ca7b11 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -23,7 +23,8 @@ from napari_cellseg3d import interface as ui from napari_cellseg3d.code_models.model_framework import ModelFramework from napari_cellseg3d.code_models.worker_training import ( - TrainingWorker, + SupervisedTrainingWorker, + WNetTrainingWorker, ) from napari_cellseg3d.code_models.workers_utils import TrainingReport @@ -80,10 +81,6 @@ def __init__( * A choice of using random or deterministic training - TODO training plugin: - * Custom model loading - - Args: viewer: napari viewer to display the widget in @@ -121,7 +118,7 @@ def __init__( self.config = config.TrainerConfig() - self.model = None # TODO : custom model loading ? + self.model = None self.worker = None """Training worker for multithreading, should be a TrainingWorker instance from :doc:model_workers.py""" self.worker_config = None @@ -130,6 +127,9 @@ def __init__( self.stop_requested = False """Whether the worker should stop or not""" self.start_time = None + """Start time of the latest job""" + self.unsupervised_mode = False + self.unsupervised_eval_data = None self.loss_list = [ # MUST BE MATCHED WITH THE LOSS FUNCTIONS IN THE TRAINING WORKER DICT "Dice", @@ -143,29 +143,45 @@ def __init__( self.canvas = None """Canvas to plot loss and dice metric in""" - self.train_loss_plot = None + self.plot_1 = None """Plot for loss""" - self.dice_metric_plot = None + self.plot_2 = None """Plot for dice metric""" self.plot_dock = None """Docked widget with plots""" self.result_layers = [] """Layers to display checkpoint""" + self.plot_1_labels = { + "title": { + "supervised": "Epoch average loss", + "unsupervised": "Metrics", + }, + "ylabel": { + "supervised": "Loss", + "unsupervised": "", + }, + } + self.plot_2_labels = { + "title": { + "supervised": "Epoch average dice metric", + "unsupervised": "Reconstruction loss", + }, + "ylabel": { + "supervised": "Metric", + "unsupervised": "Loss", + }, + } + self.df = None - self.loss_values = [] - self.validation_values = [] - - # self.model_choice.setCurrentIndex(0) - ################### - # TODO(cyril) : disable if we implement WNet training - # wnet_index = self.model_choice.findText("WNet") - # self.model_choice.removeItem(wnet_index) - ################################ + self.loss_1_values = [] + self.loss_2_values = [] + + ########### # interface + ########### self.zip_choice = ui.CheckBox("Compress results") - self.validation_percent_choice = ui.Slider( lower=10, upper=90, @@ -214,20 +230,10 @@ def __init__( self._update_validation_choice ) - learning_rate_vals = [ - "1e-2", - "1e-3", - "1e-4", - "1e-5", - "1e-6", - ] - - self.learning_rate_choice = ui.DropdownMenu( - learning_rate_vals, text_label="Learning rate" + self.learning_rate_choice = LearningRateWidget(parent=self) + self.lbl_learning_rate_choice = ( + self.learning_rate_choice.lr_value_choice.label ) - self.lbl_learning_rate_choice = self.learning_rate_choice.label - - self.learning_rate_choice.setCurrentIndex(1) self.scheduler_patience_choice = ui.IntIncrementCounter( 1, @@ -286,8 +292,10 @@ def __init__( self.progress.setVisible(False) """Dock widget containing the progress bar""" - self.start_button_supervised = None # button created later and only shown if supervised model is selected - self.loss_group = None # group box created later and only shown if supervised model is selected + # widgets created later and only shown if supervised model is selected + self.start_button_supervised = None + self.loss_group = None + self.validation_group = None ############################ ############################ # WNet parameters @@ -428,32 +436,42 @@ def check_ready(self): return False return True - def _toggle_unsupervised_mode(self): + def _toggle_unsupervised_mode(self, enabled=False): """Change all the UI elements needed for unsupervised learning mode""" - if self.model_choice.currentText() == "WNet": - self.setTabVisible(3, True) - self.setTabEnabled(3, True) - self.start_button_unsupervised.setVisible(True) - self.start_button_supervised.setVisible(False) - self.advanced_next_button.setVisible(True) + if self.model_choice.currentText() == "WNet" or enabled: + unsupervised = True self.start_btn = self.start_button_unsupervised - # loss - # self.loss_choice.setVisible(False) - self.loss_group.setVisible(False) - self.scheduler_factor_choice.setVisible(False) - self.scheduler_patience_choice.setVisible(False) + self.image_filewidget.text_field.setText("Validation images") + self.labels_filewidget.text_field.setText("Validation labels") else: - self.setTabVisible(3, False) - self.setTabEnabled(3, False) - self.start_button_unsupervised.setVisible(False) - self.start_button_supervised.setVisible(True) - self.advanced_next_button.setVisible(False) + unsupervised = False self.start_btn = self.start_button_supervised - # loss - # self.loss_choice.setVisible(True) - self.loss_group.setVisible(True) - self.scheduler_factor_choice.setVisible(True) - self.scheduler_patience_choice.setVisible(True) + self.image_filewidget.text_field.setText("Images directory") + self.labels_filewidget.text_field.setText("Labels directory") + + supervised = not unsupervised + self.unsupervised_mode = unsupervised + + self.setTabVisible(3, unsupervised) + self.setTabEnabled(3, unsupervised) + self.start_button_unsupervised.setVisible(unsupervised) + self.start_button_supervised.setVisible(supervised) + self.advanced_next_button.setVisible(unsupervised) + # loss + # self.loss_choice.setVisible(supervised) + self.loss_group.setVisible(supervised) + # scheduler + self.scheduler_factor_choice.container.setVisible(supervised) + self.scheduler_factor_choice.label.setVisible(supervised) + self.scheduler_patience_choice.setVisible(supervised) + self.scheduler_patience_choice.label.setVisible(supervised) + # data + self.unsupervised_images_filewidget.setVisible(unsupervised) + self.validation_group.setVisible(supervised) + self.image_filewidget.required = supervised + self.labels_filewidget.required = supervised + + self._check_all_filepaths() def _build(self): """Builds the layout of the widget and creates the following tabs and prompts: @@ -560,14 +578,11 @@ def _build(self): ui.add_widgets( data_layout, [ - # ui.combine_blocks( - # self.filetype_choice, self.filetype_choice.label - # ), # file extension + self.unsupervised_images_filewidget, self.image_filewidget, self.labels_filewidget, + ui.make_label("Results :", parent=self), self.results_filewidget, - # ui.combine_blocks(self.model_choice, self.model_choice.label), # model choice - # TODO : add custom model choice self.zip_choice, # save as zip ], ) @@ -645,12 +660,11 @@ def _build(self): ####################### ui.add_blank(data_tab_w, data_tab_l) ####################### - ui.GroupedWidget.create_single_widget_group( + self.validation_group = ui.GroupedWidget.create_single_widget_group( "Validation (%)", self.validation_percent_choice.container, data_tab_l, ) - ####################### ####################### ui.add_blank(self, data_tab_l) @@ -675,7 +689,7 @@ def _build(self): ################## train_tab = ui.ContainerWidget() ################## - ui.add_blank(train_tab, train_tab.layout) + # ui.add_blank(train_tab, train_tab.layout) ################## self.loss_group = ui.GroupedWidget.create_single_widget_group( "Loss", @@ -760,7 +774,7 @@ def _build(self): ############ ################## advanced_tab = ui.ContainerWidget(parent=self) - self.wnet_widgets = ui.WNetWidgets(parent=advanced_tab) + self.wnet_widgets = WNetWidgets(parent=advanced_tab) ui.add_blank(advanced_tab, advanced_tab.layout) ################## model_params_group_w, model_params_group_l = ui.make_group( @@ -934,18 +948,26 @@ def start(self): self._reset_loss_plot() - try: - self.data = self.create_train_dataset_dict() - except ValueError as err: - self.data = None - raise err - self.config = config.TrainerConfig( save_as_zip=self.zip_choice.isChecked() ) - self._set_supervised_worker_config() - self.worker = TrainingWorker(worker_config=self.worker_config) + if self.unsupervised_mode: + try: + self.data = self.create_dataset_dict_no_labs() + except ValueError as err: + self.data = None + raise err + else: + try: + self.data = self.create_train_dataset_dict() + except ValueError as err: + self.data = None + raise err + + # self._set_worker_config() + self.worker = self._create_worker() # calls _set_worker_config + self.worker.set_download_log(self.log) [btn.setVisible(False) for btn in self.close_buttons] @@ -978,13 +1000,27 @@ def _create_supervised_worker_from_config( ): if isinstance(config, config.TrainerConfig): raise TypeError( - "Expected a TrainingWorkerConfig, got a TrainerConfig" + "Expected a SupervisedTrainingWorkerConfig, got a TrainerConfig" ) - return TrainingWorker(worker_config=worker_config) + return SupervisedTrainingWorker(worker_config=worker_config) - def _set_supervised_worker_config( + def _create_unsupervised_worker_from_config( + self, worker_config: config.WNetTrainingWorkerConfig + ): + return WNetTrainingWorker(worker_config=worker_config) + + def _create_worker(self): + self._set_worker_config() + if self.unsupervised_mode: + return self._create_unsupervised_worker_from_config( + self.worker_config + ) + return self._create_supervised_worker_from_config(self.worker_config) + + def _set_worker_config( self, - ) -> config.SupervisedTrainingWorkerConfig: + ) -> config.TrainingWorkerConfig: + logger.debug("Loading config...") model_config = config.ModelInfo(name=self.model_choice.currentText()) self.weights_config.path = self.weights_config.path @@ -992,14 +1028,11 @@ def _set_supervised_worker_config( self.weights_config.use_pretrained = ( not self.use_transfer_choice.isChecked() ) - deterministic_config = config.DeterministicConfig( enabled=self.use_deterministic_choice.isChecked(), seed=self.box_seed.value(), ) - validation_percent = self.validation_percent_choice.slider_value / 100 - results_path_folder = Path( self.results_path + f"/{model_config.name}_" @@ -1010,10 +1043,36 @@ def _set_supervised_worker_config( Path(results_path_folder).mkdir( parents=True, exist_ok=False ) # avoid overwrite where possible - patch_size = [w.value() for w in self.patch_size_widgets] - logger.debug("Loading config...") + if self.unsupervised_mode: + try: + self.unsupervised_eval_data = self.create_train_dataset_dict() + except ValueError: + self.unsupervised_eval_data = None + self.worker_config = self._set_unsupervised_worker_config( + results_path_folder, + patch_size, + deterministic_config, + self.unsupervised_eval_data, + ) + else: + self.worker_config = self._set_supervised_worker_config( + model_config, + results_path_folder, + patch_size, + deterministic_config, + ) + return self.worker_config + + def _set_supervised_worker_config( + self, + model_config, + results_path_folder, + patch_size, + deterministic_config, + ): + validation_percent = self.validation_percent_choice.slider_value / 100 self.worker_config = config.SupervisedTrainingWorkerConfig( device=self.check_device_choice(), model_info=model_config, @@ -1022,7 +1081,7 @@ def _set_supervised_worker_config( validation_percent=validation_percent, max_epochs=self.epoch_choice.value(), loss_function=self.loss_choice.currentText(), - learning_rate=float(self.learning_rate_choice.currentText()), + learning_rate=self.learning_rate_choice.get_learning_rate(), scheduler_patience=self.scheduler_patience_choice.value(), scheduler_factor=self.scheduler_factor_choice.slider_value, validation_interval=self.val_interval_choice.value(), @@ -1037,6 +1096,43 @@ def _set_supervised_worker_config( return self.worker_config + def _set_unsupervised_worker_config( + self, + results_path_folder, + patch_size, + deterministic_config, + eval_volume_dict, + ) -> config.WNetTrainingWorkerConfig: + self.worker_config = config.WNetTrainingWorkerConfig( + device=self.check_device_choice(), + weights_info=self.weights_config, + train_data_dict=self.data, + max_epochs=self.epoch_choice.value(), + learning_rate=self.learning_rate_choice.get_learning_rate(), + validation_interval=self.val_interval_choice.value(), + batch_size=self.batch_choice.slider_value, + results_path_folder=str(results_path_folder), + sampling=self.patch_choice.isChecked(), + num_samples=self.sample_choice_slider.slider_value, + sample_size=patch_size, + do_augmentation=self.augment_choice.isChecked(), + deterministic_config=deterministic_config, + num_classes=int( + self.wnet_widgets.num_classes_choice.currentText() + ), + reconstruction_loss=self.wnet_widgets.loss_choice.currentText(), + n_cuts_weight=self.wnet_widgets.ncuts_weight_choice.value(), + rec_loss_weight=self.wnet_widgets.get_reconstruction_weight(), + eval_volume_dict=eval_volume_dict, + ) + + return self.worker_config + + def _is_current_job_supervised(self): + if isinstance(self.worker, WNetTrainingWorker): + return False + return True + def on_start(self): """Catches started signal from worker""" @@ -1121,61 +1217,41 @@ def _remove_result_layers(self): self._viewer.layers.remove(layer) self.result_layers = [] - def _display_results(self, images, names, complete_missing=False): + def _display_results(self, images_dict, complete_missing=False): + layer_list = [] if not complete_missing: - layer_output = self._viewer.add_image( - data=images[0], name=names[0], colormap="turbo" - ) - layer_output_discrete = self._viewer.add_image( - data=images[1], name=names[1], colormap="bop blue" - ) - layer_image = self._viewer.add_image( - data=images[2], name=names[2], colormap="inferno" - ) - layer_labels = self._viewer.add_labels( - data=images[3], name=names[3] - ) - self.result_layers += [ - layer_output, - layer_output_discrete, - layer_image, - layer_labels, - ] + for layer_name in list(images_dict.keys()): + logger.debug(f"Adding layer {layer_name}") + layer = self._viewer.add_image( + data=images_dict[layer_name]["data"], + name=layer_name, + colormap=images_dict[layer_name]["cmap"], + ) + layer_list.append(layer) + self.result_layers += layer_list self._viewer.grid.enabled = True self._viewer.dims.ndisplay = 3 self._viewer.reset_view() else: - # add only the missing layers - for i in range(3): - if names[i] not in [ + for i, layer_name in enumerate(list(images_dict.keys())): + if layer_name not in [ layer.name for layer in self._viewer.layers ]: - if i == 0: - layer_output = self._viewer.add_image( - data=images[i], name=names[i], colormap="turbo" - ) - self.result_layers[0] = layer_output - elif i == 1: - layer_output_discrete = self._viewer.add_image( - data=images[i], - name=names[i], - colormap="bop orange", - ) - self.result_layers[1] = layer_output_discrete - elif i == 2: - layer_image = self._viewer.add_image( - data=images[i], name=names[i], colormap="inferno" - ) - self.result_layers[2] = layer_image - else: - layer_labels = self._viewer.add_labels( - data=images[i], name=names[i] - ) - self.result_layers[3] = layer_labels - self.result_layers[i].data = images[i] - self.result_layers[i].refresh() - - def on_yield(self, report: TrainingReport): + logger.debug(f"Adding missing layer {layer_name}") + layer = self._viewer.add_image( + data=images_dict[layer_name]["data"], + name=layer_name, + colormap=images_dict[layer_name]["cmap"], + ) + layer_list[i] = layer + else: + logger.debug(f"Refreshing layer {layer_name}") + self.result_layers[i].data = images_dict[layer_name][ + "data" + ] + self.result_layers[i].refresh() + + def on_yield(self, report: TrainingReport): # TODO refactor for dict # logger.info( # f"\nCatching results : for epoch {data['epoch']}, # loss is {data['losses']} and validation is {data['val_metrics']}" @@ -1185,20 +1261,17 @@ def on_yield(self, report: TrainingReport): if report.show_plot: try: - layer_names = [ - "Validation output", - "Validation output (discrete)", - "Validation image", - "Validation labels", - ] - range(len(report.images)) - self.log.print_and_log(len(report.images)) - - if report.epoch + 1 == self.worker_config.validation_interval: - self._display_results(report.images, layer_names) + self.log.print_and_log(len(report.images_dict)) + + if ( + report.epoch == 0 + or report.epoch + 1 + == self.worker_config.validation_interval + ): + self._display_results(report.images_dict) else: self._display_results( - report.images, layer_names, complete_missing=True + report.images_dict, complete_missing=True ) except Exception as e: logger.exception(e) @@ -1207,9 +1280,9 @@ def on_yield(self, report: TrainingReport): 100 * (report.epoch + 1) // self.worker_config.max_epochs ) - self.update_loss_plot(report.loss_values, report.validation_metric) - self.loss_values = report.loss_values - self.validation_values = report.validation_metric + self.update_loss_plot(report.loss_1_values, report.loss_2_values) + self.loss_1_values = report.loss_1_values + self.loss_2_values = report.loss_2_values if self.stop_requested: self.log.print_and_log( @@ -1226,110 +1299,106 @@ def on_yield(self, report: TrainingReport): self.on_stop() self.stop_requested = False - # def clean_cache(self): - # """Attempts to clear memory after training""" - # # del self.worker - # self.worker = None - # # if self.model is not None: - # # del self.model - # # self.model = None - # - # # del self.data - # # self.close() - # # del self - # if self.get_device(show=False).type == "cuda": - # self.empty_cuda_cache() - def _make_csv(self): size_column = range(1, self.worker_config.max_epochs + 1) - if len(self.loss_values) == 0 or self.loss_values is None: + if len(self.loss_1_values) == 0 or self.loss_1_values is None: logger.warning("No loss values to add to csv !") return - val = utils.fill_list_in_between( - self.validation_values, - self.worker_config.validation_interval - 1, - "", - )[: len(size_column)] - - if len(val) != len(self.loss_values): - err = f"Validation and loss values don't have the same length ! Got {len(val)} and {len(self.loss_values)}" - logger.error(err) - # return None - raise ValueError(err) - - self.df = pd.DataFrame( - { - "epoch": size_column, - "loss": self.loss_values, - "validation": val, - } - ) + if self._is_current_job_supervised(): + val = utils.fill_list_in_between( + self.loss_2_values, + self.worker_config.validation_interval - 1, + "", + )[: len(size_column)] + self.df = pd.DataFrame( + { + "epoch": size_column, + "loss": self.loss_1_values, + "validation": val, + } + ) + if len(val) != len(self.loss_1_values): + err = f"Validation and loss values don't have the same length ! Got {len(val)} and {len(self.loss_1_values)}" + logger.error(err) + raise ValueError(err) + else: + self.df = pd.DataFrame( + { + "epoch": size_column, + "Ncuts loss": self.loss_1_values, + "Reconstruction loss": self.loss_2_values, + } + ) + path = Path(self.worker_config.results_path_folder) / Path( "training.csv" ) self.df.to_csv(path, index=False) - def plot_loss(self, loss, dice_metric): + def _plot_loss( + self, + loss_values_1: dict, + loss_values_2: list, + show_plot_2_max: bool = True, + ): """Creates two subplots to plot the training loss and validation metric""" + plot_key = ( + "supervised" + if self._is_current_job_supervised() + else "unsupervised" + ) with plt.style.context("dark_background"): # update loss - self.train_loss_plot.set_title("Epoch average loss") - self.train_loss_plot.set_xlabel("Epoch") - self.train_loss_plot.set_ylabel("Loss") - x = [i + 1 for i in range(len(loss))] - y = loss - self.train_loss_plot.plot(x, y) - # self.train_loss_plot.set_ylim(0, 1) - - # update metrics - x = [ - self.worker_config.validation_interval * (i + 1) - for i in range(len(dice_metric)) - ] - y = dice_metric - - epoch_min = ( - np.argmax(y) + 1 - ) * self.worker_config.validation_interval - dice_min = np.max(y) + self.plot_1.set_title(self.plot_1_labels["title"][plot_key]) + self.plot_1.set_xlabel("Epoch") + self.plot_1.set_ylabel(self.plot_2_labels["ylabel"][plot_key]) + + for metric_name in list(loss_values_1.keys()): + if metric_name == "Dice coefficient": + x = [ + self.worker_config.validation_interval * (i + 1) + for i in range(len(loss_values_1[metric_name])) + ] + else: + x = [i + 1 for i in range(len(loss_values_1[metric_name]))] + y = loss_values_1[metric_name] + self.plot_1.plot(x, y, label=metric_name) + self.plot_1.legend(loc="lower right") + + # update plot 2 + if self._is_current_job_supervised(): + x = [ + self.worker_config.validation_interval * (i + 1) + for i in range(len(loss_values_2)) + ] + else: + x = [i + 1 for i in range(len(loss_values_2))] + y = loss_values_2 - self.dice_metric_plot.plot(x, y, zorder=1) + self.plot_2.plot(x, y, zorder=1) # self.dice_metric_plot.set_ylim(0, 1) - self.dice_metric_plot.set_title( - "Validation metric : Mean Dice coefficient" - ) - self.dice_metric_plot.set_xlabel("Epoch") - self.dice_metric_plot.set_ylabel("Dice") - - self.dice_metric_plot.scatter( - epoch_min, - dice_min, - c="r", - label="Maximum Dice coeff.", - zorder=5, - ) - self.dice_metric_plot.legend( - facecolor=ui.napari_grey, loc="lower right" - ) + self.plot_2.set_title(self.plot_2_labels["title"][plot_key]) + self.plot_2.set_xlabel("Epoch") + self.plot_2.set_ylabel(self.plot_2_labels["ylabel"][plot_key]) + + if show_plot_2_max: + epoch_min = ( + np.argmax(y) + 1 + ) * self.worker_config.validation_interval + dice_min = np.max(y) + self.plot_2.scatter( + epoch_min, + dice_min, + c="r", + label="Maximum Dice coeff.", + zorder=5, + ) + self.plot_2.legend(facecolor=ui.napari_grey, loc="lower right") self.canvas.draw_idle() - # plot_path = self.worker_config.results_path_folder / Path( - # "../Loss_plots" - # ) - # Path(plot_path).mkdir(parents=True, exist_ok=True) - # - # if self.canvas is not None: - # self.canvas.figure.savefig( - # str( - # plot_path - # / f"checkpoint_metric_plots_{utils.get_date_time()}.png" - # ), - # format="png", - # ) - - def update_loss_plot(self, loss, metric): + def update_loss_plot(self, loss_1: dict, loss_2: list): """ Updates the plots on subsequent validation steps. Creates the plot on the second validation step (epoch == val_interval*2). @@ -1339,7 +1408,8 @@ def update_loss_plot(self, loss, metric): Returns: returns empty if the epoch is < than 2 * validation interval. """ - epoch = len(loss) + epoch = len(loss_1[list(loss_1.keys())[0]]) + logger.debug(f"Updating loss plot for epoch {epoch}") if epoch < self.worker_config.validation_interval * 2: return if epoch == self.worker_config.validation_interval * 2: @@ -1347,13 +1417,13 @@ def update_loss_plot(self, loss, metric): with plt.style.context("dark_background"): self.canvas = FigureCanvas(Figure(figsize=(10, 1.5))) # loss plot - self.train_loss_plot = self.canvas.figure.add_subplot(1, 2, 1) + self.plot_1 = self.canvas.figure.add_subplot(1, 2, 1) # dice metric validation plot - self.dice_metric_plot = self.canvas.figure.add_subplot(1, 2, 2) + self.plot_2 = self.canvas.figure.add_subplot(1, 2, 2) self.canvas.figure.set_facecolor(bckgrd_color) - self.dice_metric_plot.set_facecolor(bckgrd_color) - self.train_loss_plot.set_facecolor(bckgrd_color) + self.plot_2.set_facecolor(bckgrd_color) + self.plot_1.set_facecolor(bckgrd_color) # self.canvas.figure.tight_layout() @@ -1377,26 +1447,164 @@ def update_loss_plot(self, loss, metric): self.canvas, name="Loss plots", area="bottom" ) self.plot_dock._close_btn = False + self.docked_widgets.append(self.plot_dock) except AttributeError as e: logger.exception(e) logger.error( "Plot dock widget could not be added. Should occur in testing only" ) - - self.docked_widgets.append(self.plot_dock) - self.plot_loss(loss, metric) + self._plot_loss(loss_1, loss_2) else: with plt.style.context("dark_background"): - self.train_loss_plot.cla() - self.dice_metric_plot.cla() + self.plot_1.cla() + self.plot_2.cla() - self.plot_loss(loss, metric) + self._plot_loss(loss_1, loss_2) def _reset_loss_plot(self): - if ( - self.train_loss_plot is not None - and self.dice_metric_plot is not None - ): + if self.plot_1 is not None and self.plot_2 is not None: with plt.style.context("dark_background"): - self.train_loss_plot.cla() - self.dice_metric_plot.cla() + self.plot_1.cla() + self.plot_2.cla() + + +class LearningRateWidget(ui.ContainerWidget): + def __init__(self, parent=None): + super().__init__(vertical=False, parent=parent) + + self.lr_exponent_dict = { + "1e-2": 1e-2, + "1e-3": 1e-3, + "1e-4": 1e-4, + "1e-5": 1e-5, + "1e-6": 1e-6, + "1e-7": 1e-7, + "1e-8": 1e-8, + } + + self.lr_value_choice = ui.IntIncrementCounter( + lower=1, + upper=9, + default=1, + text_label="Learning rate : ", + parent=self, + fixed=False, + ) + self.lr_exponent_choice = ui.DropdownMenu( + list(self.lr_exponent_dict.keys()), + parent=self, + fixed=False, + ) + self._build() + + def _build(self): + self.lr_value_choice.setFixedWidth(20) + # self.lr_exponent_choice.setFixedWidth(100) + self.lr_exponent_choice.setCurrentIndex(1) + ui.add_widgets( + self.layout, + [ + self.lr_value_choice, + ui.make_label("x"), + self.lr_exponent_choice, + ], + ) + + def get_learning_rate(self) -> float: + return float( + self.lr_value_choice.value() + * self.lr_exponent_dict[self.lr_exponent_choice.currentText()] + ) + + +class WNetWidgets: + """A collection of widgets for the WNet training GUI""" + + default_config = config.WNetTrainingWorkerConfig() + + def __init__(self, parent): + self.num_classes_choice = ui.DropdownMenu( + entries=["2", "3", "4"], + parent=parent, + text_label="Number of classes", + ) + self.intensity_sigma_choice = ui.DoubleIncrementCounter( + lower=1.0, + upper=100.0, + default=self.default_config.intensity_sigma, + parent=parent, + text_label="Intensity sigma", + ) + self.intensity_sigma_choice.setMaximumWidth(20) + self.spatial_sigma_choice = ui.DoubleIncrementCounter( + lower=1.0, + upper=100.0, + default=self.default_config.spatial_sigma, + parent=parent, + text_label="Spatial sigma", + ) + self.spatial_sigma_choice.setMaximumWidth(20) + self.radius_choice = ui.IntIncrementCounter( + lower=1, + upper=5, + default=self.default_config.radius, + parent=parent, + text_label="Radius", + ) + self.radius_choice.setMaximumWidth(20) + self.loss_choice = ui.DropdownMenu( + entries=["MSE", "BCE"], + parent=parent, + text_label="Reconstruction loss", + ) + self.ncuts_weight_choice = ui.DoubleIncrementCounter( + lower=0.1, + upper=1.0, + default=self.default_config.n_cuts_weight, + parent=parent, + text_label="NCuts weight", + ) + self.reconstruction_weight_choice = ui.DoubleIncrementCounter( + lower=0.1, + upper=1.0, + default=0.5, + parent=parent, + text_label="Reconstruction weight", + ) + self.reconstruction_weight_choice.setMaximumWidth(20) + self.reconstruction_weight_divide_factor_choice = ( + ui.IntIncrementCounter( + lower=1, + upper=10000, + default=100, + parent=parent, + text_label="Reconstruction weight divide factor", + ) + ) + self.reconstruction_weight_divide_factor_choice.setMaximumWidth(20) + + self._set_tooltips() + + def _set_tooltips(self): + self.num_classes_choice.setToolTip("Number of classes to segment") + self.intensity_sigma_choice.setToolTip( + "Intensity sigma for the NCuts loss" + ) + self.spatial_sigma_choice.setToolTip( + "Spatial sigma for the NCuts loss" + ) + self.radius_choice.setToolTip("Radius of NCuts loss region") + self.loss_choice.setToolTip("Loss function to use for reconstruction") + self.ncuts_weight_choice.setToolTip("Weight of the NCuts loss") + self.reconstruction_weight_choice.setToolTip( + "Weight of the reconstruction loss" + ) + self.reconstruction_weight_divide_factor_choice.setToolTip( + "Divide factor for the reconstruction loss.\nThis might have to be changed depending on your images.\nIf you notice that the reconstruction loss is too high, raise this factor until the\nreconstruction loss is in the same order of magnitude as the NCuts loss." + ) + + def get_reconstruction_weight(self): + return float( + self.reconstruction_weight_choice.value() + / self.reconstruction_weight_divide_factor_choice.value() + ) diff --git a/napari_cellseg3d/config.py b/napari_cellseg3d/config.py index 84f6468c..72f8dfab 100644 --- a/napari_cellseg3d/config.py +++ b/napari_cellseg3d/config.py @@ -14,7 +14,7 @@ from napari_cellseg3d.code_models.models.model_TRAILMAP_MS import TRAILMAP_MS_ from napari_cellseg3d.code_models.models.model_VNet import VNet_ from napari_cellseg3d.code_models.models.model_WNet import WNet_ -from napari_cellseg3d.utils import LOGGER, remap_image +from napari_cellseg3d.utils import LOGGER logger = LOGGER @@ -24,10 +24,10 @@ MODEL_LIST = { "SegResNet": SegResNet_, "VNet": VNet_, - # "TRAILMAP": TRAILMAP, "TRAILMAP_MS": TRAILMAP_MS_, "SwinUNetR": SwinUNETR_, "WNet": WNet_, + # "TRAILMAP": TRAILMAP, # "test" : DO NOT USE, reserved for testing } @@ -232,7 +232,7 @@ class InferenceWorkerConfig: class DeterministicConfig: """Class to record deterministic config""" - enabled: bool = False + enabled: bool = True seed: int = 34936339 # default seed from NP_MAX @@ -256,7 +256,7 @@ class TrainingWorkerConfig: deterministic_config: DeterministicConfig = DeterministicConfig() scheduler_factor: float = 0.5 scheduler_patience: int = 10 - weights_info: WeightsInfo = None + weights_info: WeightsInfo = WeightsInfo() # data params results_path_folder: str = str(Path.home() / Path("cellseg3d/training")) sampling: bool = False @@ -287,6 +287,7 @@ class WNetTrainingWorkerConfig(TrainingWorkerConfig): dropout: float = 0.65 use_clipping: bool = False # use gradient clipping clipping: float = 1.0 # clipping value + weight_decay: float = 1e-5 # weight decay (used 0.01 historically) # NCuts loss params intensity_sigma: float = 1.0 spatial_sigma: float = 4.0 @@ -299,11 +300,10 @@ class WNetTrainingWorkerConfig(TrainingWorkerConfig): 0.5 / 100 ) # must be adjusted depending on images; compare to NCuts loss value # normalization params - normalizing_function: callable = remap_image + # normalizing_function: callable = remap_image # FIXME: call directly in worker, not a param # data params train_data_dict: dict = None eval_volume_dict: str = None - eval_num_patches: int = 10 ################ diff --git a/napari_cellseg3d/interface.py b/napari_cellseg3d/interface.py index e5f448f3..7d1ec7c5 100644 --- a/napari_cellseg3d/interface.py +++ b/napari_cellseg3d/interface.py @@ -1,4 +1,3 @@ -import contextlib import threading from functools import partial from typing import List, Optional @@ -36,7 +35,6 @@ # Local from napari_cellseg3d import utils -from napari_cellseg3d.config import WNetTrainingWorkerConfig """ User interface functions and aliases""" @@ -873,9 +871,7 @@ def __init__( self.build() self.check_ready() - - if self._required: - self._text_field.textChanged.connect(self.check_ready) + self.text_field.textChanged.connect(self.check_ready) def build(self): """Builds the layout of the widget""" @@ -914,11 +910,15 @@ def button(self): def check_ready(self): """Check if a path is correctly set""" - if self.text_field.text() in ["", self._initial_desc]: + if ( + self.text_field.text() in ["", self._initial_desc] + and self.required + ): self.update_field_color("indianred") self.text_field.setToolTip("Mandatory field !") return False self.update_field_color(f"{napari_param_darkgrey}") + self.text_field.setToolTip(f"{self.text_field.text()}") return True @property @@ -928,12 +928,6 @@ def required(self): @required.setter def required(self, is_required): """If set to True, will be colored red if incorrectly set""" - if is_required: - self.text_field.textChanged.connect(self.check_ready) - else: - with contextlib.suppress(TypeError): - self.text_field.textChanged.disconnect(self.check_ready) - self.check_ready() self._required = is_required @@ -1417,96 +1411,3 @@ def open_url(url): url (str): Url to be opened """ QDesktopServices.openUrl(QUrl(url, QUrl.TolerantMode)) - - -class WNetWidgets: - """A collection of widgets for the WNet training GUI""" - - default_config = WNetTrainingWorkerConfig() - - def __init__(self, parent): - self.num_classes_choice = DropdownMenu( - entries=["2", "3", "4"], - parent=parent, - text_label="Number of classes", - ) - self.intensity_sigma_choice = DoubleIncrementCounter( - lower=1.0, - upper=100.0, - default=self.default_config.intensity_sigma, - parent=parent, - text_label="Intensity sigma", - ) - self.intensity_sigma_choice.setMaximumWidth(20) - self.spatial_sigma_choice = DoubleIncrementCounter( - lower=1.0, - upper=100.0, - default=self.default_config.spatial_sigma, - parent=parent, - text_label="Spatial sigma", - ) - self.spatial_sigma_choice.setMaximumWidth(20) - self.radius_choice = IntIncrementCounter( - lower=1, - upper=5, - default=self.default_config.radius, - parent=parent, - text_label="Radius", - ) - self.radius_choice.setMaximumWidth(20) - self.loss_choice = DropdownMenu( - entries=["MSE", "BCE"], parent=parent, text_label="Loss function" - ) - self.ncuts_weight_choice = DoubleIncrementCounter( - lower=0.1, - upper=1.0, - default=self.default_config.n_cuts_weight, - parent=parent, - text_label="NCuts weight", - ) - self.reconstruction_weight_choice = DoubleIncrementCounter( - lower=0.1, - upper=1.0, - default=0.5, - parent=parent, - text_label="Reconstruction weight", - ) - self.reconstruction_weight_choice.setMaximumWidth(20) - self.reconstruction_weight_divide_factor_choice = IntIncrementCounter( - lower=1, - upper=10000, - default=100, - parent=parent, - text_label="Reconstruction weight divide factor", - ) - self.reconstruction_weight_divide_factor_choice.setMaximumWidth(20) - self.evaluation_patches_choice = Slider( - lower=1, - upper=100, - default=self.default_config.eval_num_patches, - parent=parent, - text_label="Number of patches for evaluation", - ) - - self._set_tooltips() - - def _set_tooltips(self): - self.num_classes_choice.setToolTip("Number of classes to segment") - self.intensity_sigma_choice.setToolTip( - "Intensity sigma for the NCuts loss" - ) - self.spatial_sigma_choice.setToolTip( - "Spatial sigma for the NCuts loss" - ) - self.radius_choice.setToolTip("Radius of NCuts loss region") - self.loss_choice.setToolTip("Loss function to use for reconstruction") - self.ncuts_weight_choice.setToolTip("Weight of the NCuts loss") - self.reconstruction_weight_choice.setToolTip( - "Weight of the reconstruction loss" - ) - self.reconstruction_weight_divide_factor_choice.setToolTip( - "Divide factor for the reconstruction loss.\nThis might have to be changed depending on your images.\nIf you notice that the reconstruction loss is too high, raise this factor until the\nreconstruction loss is in the same order of magnitude as the NCuts loss." - ) - self.evaluation_patches_choice.setToolTip( - "Number of patches to use for evaluation" - ) From ebaf5880b83304cfea9b82f8e1863ff7590a9ddc Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 27 Jul 2023 18:30:01 +0200 Subject: [PATCH 09/70] Fixes --- .../code_models/models/wnet/model.py | 68 +++++++++---------- .../code_models/worker_training.py | 5 +- .../code_plugins/plugin_model_training.py | 37 +++++++--- 3 files changed, 63 insertions(+), 47 deletions(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index 0f9822cd..0bfe8851 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -98,25 +98,25 @@ def __init__( self.channels = channels self.max_pool = nn.MaxPool3d(2) self.in_b = InBlock(in_channels, self.channels[0], dropout=dropout) - # self.conv1 = Block(channels[0], self.channels[1], dropout=dropout) - # self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) + self.conv1 = Block(channels[0], self.channels[1], dropout=dropout) + self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) # self.conv3 = Block(channels[2], self.channels[3], dropout=dropout) # self.bot = Block(channels[3], self.channels[4], dropout=dropout) - # self.bot = Block(channels[2], self.channels[3], dropout=dropout) - self.bot = Block(channels[0], self.channels[1], dropout=dropout) + self.bot = Block(channels[2], self.channels[3], dropout=dropout) + # self.bot = Block(channels[0], self.channels[1], dropout=dropout) # self.deconv1 = Block(channels[4], self.channels[3], dropout=dropout) - # self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) - # self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) + self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) + self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) self.out_b = OutBlock(channels[1], out_channels, dropout=dropout) # self.conv_trans1 = nn.ConvTranspose3d( # self.channels[4], self.channels[3], 2, stride=2 # ) - # self.conv_trans2 = nn.ConvTranspose3d( - # self.channels[3], self.channels[2], 2, stride=2 - # ) - # self.conv_trans3 = nn.ConvTranspose3d( - # self.channels[2], self.channels[1], 2, stride=2 - # ) + self.conv_trans2 = nn.ConvTranspose3d( + self.channels[3], self.channels[2], 2, stride=2 + ) + self.conv_trans3 = nn.ConvTranspose3d( + self.channels[2], self.channels[1], 2, stride=2 + ) self.conv_trans_out = nn.ConvTranspose3d( self.channels[1], self.channels[0], 2, stride=2 ) @@ -127,12 +127,12 @@ def __init__( def forward(self, x): """Forward pass of the U-Net model.""" in_b = self.in_b(x) - # c1 = self.conv1(self.max_pool(in_b)) - # c2 = self.conv2(self.max_pool(c1)) + c1 = self.conv1(self.max_pool(in_b)) + c2 = self.conv2(self.max_pool(c1)) # c3 = self.conv3(self.max_pool(c2)) # x = self.bot(self.max_pool(c3)) - # x = self.bot(self.max_pool(c2)) - x = self.bot(self.max_pool(in_b)) + x = self.bot(self.max_pool(c2)) + # x = self.bot(self.max_pool(in_b)) # x = self.deconv1( # torch.cat( # [ @@ -142,24 +142,24 @@ def forward(self, x): # dim=1, # ) # ) - # x = self.deconv2( - # torch.cat( - # [ - # c2, - # self.conv_trans2(x), - # ], - # dim=1, - # ) - # ) - # x = self.deconv3( - # torch.cat( - # [ - # c1, - # self.conv_trans3(x), - # ], - # dim=1, - # ) - # ) + x = self.deconv2( + torch.cat( + [ + c2, + self.conv_trans2(x), + ], + dim=1, + ) + ) + x = self.deconv3( + torch.cat( + [ + c1, + self.conv_trans3(x), + ], + dim=1, + ) + ) x = self.out_b( torch.cat( [ diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 125466f9..144796dd 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -92,6 +92,7 @@ # 2. Create a custom worker for WNet training # 3. Adapt UI for WNet training (Advanced tab + model choice on first tab) # 4. Adapt plots and TrainingReport for WNet training +# 5. log_parameters function class TrainingWorkerBase(GeneratorWorker): @@ -408,7 +409,7 @@ def train(self): self.log(f"Using device: {device}") - self.log("Config:") + self.log("Config:") # FIXME log_parameters func instead [self.log(str(a)) for a in self.config.__dict__.items()] self.log("Initializing training...") @@ -773,7 +774,7 @@ def train(self): yield TrainingReport( epoch=epoch, loss_1_values={ - "Ncuts loss": ncuts_losses, + "SoftNcuts loss": ncuts_losses, "Dice metric": metric, }, loss_2_values=rec_losses, diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 17ca7b11..70e98d48 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -174,7 +174,7 @@ def __init__( } self.df = None - self.loss_1_values = [] + self.loss_1_values = {} self.loss_2_values = [] ########### @@ -1267,7 +1267,8 @@ def on_yield(self, report: TrainingReport): # TODO refactor for dict report.epoch == 0 or report.epoch + 1 == self.worker_config.validation_interval - ): + ) and len(self.result_layers) == 0: + self.result_layers = [] self._display_results(report.images_dict) else: self._display_results( @@ -1312,6 +1313,7 @@ def _make_csv(self): self.worker_config.validation_interval - 1, "", )[: len(size_column)] + self.df = pd.DataFrame( { "epoch": size_column, @@ -1324,13 +1326,25 @@ def _make_csv(self): logger.error(err) raise ValueError(err) else: - self.df = pd.DataFrame( - { - "epoch": size_column, - "Ncuts loss": self.loss_1_values, - "Reconstruction loss": self.loss_2_values, - } - ) + ncuts_loss = self.loss_1_values["SoftNCuts"] + try: + dice_metric = self.loss_1_values["Dice metric"] + self.df = pd.DataFrame( + { + "epoch": size_column, + "Ncuts loss": ncuts_loss, + "Dice metric": dice_metric, + "Reconstruction loss": self.loss_2_values, + } + ) + except KeyError: + self.df = pd.DataFrame( + { + "epoch": size_column, + "Ncuts loss": ncuts_loss, + "Reconstruction loss": self.loss_2_values, + } + ) path = Path(self.worker_config.results_path_folder) / Path( "training.csv" @@ -1410,6 +1424,7 @@ def update_loss_plot(self, loss_1: dict, loss_2: list): epoch = len(loss_1[list(loss_1.keys())[0]]) logger.debug(f"Updating loss plot for epoch {epoch}") + plot_max = self._is_current_job_supervised() if epoch < self.worker_config.validation_interval * 2: return if epoch == self.worker_config.validation_interval * 2: @@ -1453,13 +1468,13 @@ def update_loss_plot(self, loss_1: dict, loss_2: list): logger.error( "Plot dock widget could not be added. Should occur in testing only" ) - self._plot_loss(loss_1, loss_2) + self._plot_loss(loss_1, loss_2, show_plot_2_max=plot_max) else: with plt.style.context("dark_background"): self.plot_1.cla() self.plot_2.cla() - self._plot_loss(loss_1, loss_2) + self._plot_loss(loss_1, loss_2, show_plot_2_max=plot_max) def _reset_loss_plot(self): if self.plot_1 is not None and self.plot_2 is not None: From 2618ae15d7cb89ea6546c128595f7b4529db3129 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 28 Jul 2023 10:43:14 +0200 Subject: [PATCH 10/70] Test fixes --- .../_tests/test_model_framework.py | 2 +- ...raining.py => test_supervised_training.py} | 40 ---------------- .../_tests/test_unsup_training.py | 44 +++++++++++++++++ .../code_models/models/wnet/model.py | 36 +++++++------- .../code_models/worker_training.py | 41 +++++++++++----- .../code_plugins/plugin_model_training.py | 47 +++++++++++++++++-- 6 files changed, 136 insertions(+), 74 deletions(-) rename napari_cellseg3d/_tests/{test_training.py => test_supervised_training.py} (72%) create mode 100644 napari_cellseg3d/_tests/test_unsup_training.py diff --git a/napari_cellseg3d/_tests/test_model_framework.py b/napari_cellseg3d/_tests/test_model_framework.py index 0a078273..1cb86569 100644 --- a/napari_cellseg3d/_tests/test_model_framework.py +++ b/napari_cellseg3d/_tests/test_model_framework.py @@ -35,8 +35,8 @@ def test_update_default(make_napari_viewer_proxy): assert widget._default_path == [ pth("C:/test/test"), pth("C:/dataset/labels"), - pth("D:/dataset/res"), None, + pth("D:/dataset/res"), ] diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_supervised_training.py similarity index 72% rename from napari_cellseg3d/_tests/test_training.py rename to napari_cellseg3d/_tests/test_supervised_training.py index c5737f11..2ce1ee03 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_supervised_training.py @@ -31,20 +31,6 @@ def test_create_supervised_worker_from_config(make_napari_viewer_proxy): ) -def test_create_unspervised_worker_from_config(make_napari_viewer_proxy): - widget = Trainer(make_napari_viewer_proxy()) - widget.model_choice.setCurrentText("WNet") - widget._toggle_unsupervised_mode(enabled=True) - default_config = config.WNetTrainingWorkerConfig() - worker = widget._create_worker() - excluded = ["results_path_folder", "sample_size", "weights_info"] - for attr in dir(default_config): - if not attr.startswith("__") and attr not in excluded: - assert getattr(default_config, attr) == getattr( - worker.config, attr - ) - - def test_update_loss_plot(make_napari_viewer_proxy): view = make_napari_viewer_proxy() widget = Trainer(view) @@ -135,29 +121,3 @@ def test_training(make_napari_viewer_proxy, qtbot): widget.on_yield(res) assert widget.loss_1_values["loss"] == [1, 1, 1, 1] assert widget.loss_2_values == [1, 1, 1, 1] - - -def test_unsupervised_worker(make_napari_viewer_proxy): - viewer = make_napari_viewer_proxy() - widget = Trainer(viewer) - - widget.model_choice.setCurrentText("WNet") - widget._toggle_unsupervised_mode(enabled=True) - - widget.unsupervised_images_filewidget.text_field.setText( - str(im_path.parent) - ) - widget.data = widget.create_dataset_dict_no_labs() - worker = widget._create_worker() - dataloader, eval_dataloader, data_shape = worker._get_data() - assert eval_dataloader is None - assert data_shape == (6, 6, 6) - - widget.images_filepaths = [str(im_path.parent)] - widget.labels_filepaths = [str(im_path.parent)] - widget.unsupervised_eval_data = widget.create_train_dataset_dict() - assert widget.unsupervised_eval_data is not None - worker = widget._create_worker() - dataloader, eval_dataloader, data_shape = worker._get_data() - assert eval_dataloader is not None - assert data_shape == (6, 6, 6) diff --git a/napari_cellseg3d/_tests/test_unsup_training.py b/napari_cellseg3d/_tests/test_unsup_training.py new file mode 100644 index 00000000..163b2a3d --- /dev/null +++ b/napari_cellseg3d/_tests/test_unsup_training.py @@ -0,0 +1,44 @@ +from pathlib import Path + +from napari_cellseg3d import config +from napari_cellseg3d.code_plugins.plugin_model_training import ( + Trainer, +) + +im_path = Path(__file__).resolve().parent / "res/test.tif" +im_path_str = str(im_path) + + +def test_unsupervised_worker(make_napari_viewer_proxy): + unsup_viewer = make_napari_viewer_proxy() + widget = Trainer(viewer=unsup_viewer) + + widget.model_choice.setCurrentText("WNet") + widget._toggle_unsupervised_mode(enabled=True) + + default_config = config.WNetTrainingWorkerConfig() + worker = widget._create_worker(additional_results_description="TEST_1") + excluded = ["results_path_folder", "sample_size", "weights_info"] + for attr in dir(default_config): + if not attr.startswith("__") and attr not in excluded: + assert getattr(default_config, attr) == getattr( + worker.config, attr + ) + + widget.unsupervised_images_filewidget.text_field.setText( + str(im_path.parent) + ) + widget.data = widget.create_dataset_dict_no_labs() + worker = widget._create_worker(additional_results_description="TEST_2") + dataloader, eval_dataloader, data_shape = worker._get_data() + assert eval_dataloader is None + assert data_shape == (6, 6, 6) + + widget.images_filepaths = [str(im_path.parent)] + widget.labels_filepaths = [str(im_path.parent)] + widget.unsupervised_eval_data = widget.create_train_dataset_dict() + assert widget.unsupervised_eval_data is not None + worker = widget._create_worker(additional_results_description="TEST_3") + dataloader, eval_dataloader, data_shape = worker._get_data() + assert eval_dataloader is not None + assert data_shape == (6, 6, 6) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index 0bfe8851..989ae3b7 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -99,21 +99,22 @@ def __init__( self.max_pool = nn.MaxPool3d(2) self.in_b = InBlock(in_channels, self.channels[0], dropout=dropout) self.conv1 = Block(channels[0], self.channels[1], dropout=dropout) - self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) + # self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) # self.conv3 = Block(channels[2], self.channels[3], dropout=dropout) # self.bot = Block(channels[3], self.channels[4], dropout=dropout) - self.bot = Block(channels[2], self.channels[3], dropout=dropout) + # self.bot = Block(channels[2], self.channels[3], dropout=dropout) + self.bot = Block(channels[1], self.channels[2], dropout=dropout) # self.bot = Block(channels[0], self.channels[1], dropout=dropout) # self.deconv1 = Block(channels[4], self.channels[3], dropout=dropout) - self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) + # self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) self.out_b = OutBlock(channels[1], out_channels, dropout=dropout) # self.conv_trans1 = nn.ConvTranspose3d( # self.channels[4], self.channels[3], 2, stride=2 # ) - self.conv_trans2 = nn.ConvTranspose3d( - self.channels[3], self.channels[2], 2, stride=2 - ) + # self.conv_trans2 = nn.ConvTranspose3d( + # self.channels[3], self.channels[2], 2, stride=2 + # ) self.conv_trans3 = nn.ConvTranspose3d( self.channels[2], self.channels[1], 2, stride=2 ) @@ -128,10 +129,11 @@ def forward(self, x): """Forward pass of the U-Net model.""" in_b = self.in_b(x) c1 = self.conv1(self.max_pool(in_b)) - c2 = self.conv2(self.max_pool(c1)) + # c2 = self.conv2(self.max_pool(c1)) # c3 = self.conv3(self.max_pool(c2)) # x = self.bot(self.max_pool(c3)) - x = self.bot(self.max_pool(c2)) + # x = self.bot(self.max_pool(c2)) + x = self.bot(self.max_pool(c1)) # x = self.bot(self.max_pool(in_b)) # x = self.deconv1( # torch.cat( @@ -142,15 +144,15 @@ def forward(self, x): # dim=1, # ) # ) - x = self.deconv2( - torch.cat( - [ - c2, - self.conv_trans2(x), - ], - dim=1, - ) - ) + # x = self.deconv2( + # torch.cat( + # [ + # c2, + # self.conv_trans2(x), + # ], + # dim=1, + # ) + # ) x = self.deconv3( torch.cat( [ diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 144796dd..d98c6ecf 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -643,23 +643,38 @@ def train(self): # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) # wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) - self.log("Ncuts loss: " + str(ncuts_losses[-1])) + # self.log("Ncuts loss: " + str(ncuts_losses[-1])) + # if epoch > 0: + # self.log( + # "Ncuts loss difference: " + # + str(ncuts_losses[-1] - ncuts_losses[-2]) + # ) + # self.log("Reconstruction loss: " + str(rec_losses[-1])) + # if epoch > 0: + # self.log( + # "Reconstruction loss difference: " + # + str(rec_losses[-1] - rec_losses[-2]) + # ) + # self.log("Sum of losses: " + str(total_losses[-1])) + # if epoch > 0: + # self.log( + # "Sum of losses difference: " + # + str(total_losses[-1] - total_losses[-2]), + # ) + + # show losses and differences with 5 points precision + self.log(f"Ncuts loss: {ncuts_losses[-1]:.5f}") + self.log(f"Reconstruction loss: {rec_losses[-1]:.5f}") + self.log(f"Sum of losses: {total_losses[-1]:.5f}") if epoch > 0: - self.log( - "Ncuts loss difference: " - + str(ncuts_losses[-1] - ncuts_losses[-2]) + self.lof( + f"Ncuts loss difference: {ncuts_losses[-1] - ncuts_losses[-2]:.5f}" ) - self.log("Reconstruction loss: " + str(rec_losses[-1])) - if epoch > 0: self.log( - "Reconstruction loss difference: " - + str(rec_losses[-1] - rec_losses[-2]) + f"Reconstruction loss difference: {rec_losses[-1] - rec_losses[-2]:.5f}" ) - self.log("Sum of losses: " + str(total_losses[-1])) - if epoch > 0: self.log( - "Sum of losses difference: " - + str(total_losses[-1] - total_losses[-2]), + f"Sum of losses difference: {total_losses[-1] - total_losses[-2]:.5f}" ) # Update the learning rate @@ -774,7 +789,7 @@ def train(self): yield TrainingReport( epoch=epoch, loss_1_values={ - "SoftNcuts loss": ncuts_losses, + "SoftNCuts loss": ncuts_losses, "Dice metric": metric, }, loss_2_values=rec_losses, diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 70e98d48..a956bd3c 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1009,8 +1009,10 @@ def _create_unsupervised_worker_from_config( ): return WNetTrainingWorker(worker_config=worker_config) - def _create_worker(self): - self._set_worker_config() + def _create_worker(self, additional_results_description=None): + self._set_worker_config( + additional_description=additional_results_description + ) if self.unsupervised_mode: return self._create_unsupervised_worker_from_config( self.worker_config @@ -1019,7 +1021,15 @@ def _create_worker(self): def _set_worker_config( self, + additional_description=None, ) -> config.TrainingWorkerConfig: + """Creates a worker config for supervised or unsupervised training + Args: + additional_description: Additional description to add to the results folder name + + Returns: + A worker config + """ logger.debug("Loading config...") model_config = config.ModelInfo(name=self.model_choice.currentText()) @@ -1033,10 +1043,21 @@ def _set_worker_config( seed=self.box_seed.value(), ) + loss_name = ( + (f"{self.loss_choice.currentText()}_") + if not self.unsupervised_mode + else "" + ) + additional_description = ( + (f"{additional_description}_") + if additional_description is not None + else "" + ) results_path_folder = Path( self.results_path + f"/{model_config.name}_" - + f"{self.loss_choice.currentText()}_" + + additional_description + + loss_name + f"{self.epoch_choice.value()}e_" + f"{utils.get_date_time()}" ) @@ -1072,6 +1093,16 @@ def _set_supervised_worker_config( patch_size, deterministic_config, ): + """Sets the worker config for supervised training + Args: + model_config: Model config + results_path_folder: Path to results folder + patch_size: Patch size + deterministic_config: Deterministic config + + Returns: + A worker config + """ validation_percent = self.validation_percent_choice.slider_value / 100 self.worker_config = config.SupervisedTrainingWorkerConfig( device=self.check_device_choice(), @@ -1103,6 +1134,16 @@ def _set_unsupervised_worker_config( deterministic_config, eval_volume_dict, ) -> config.WNetTrainingWorkerConfig: + """Sets the worker config for unsupervised training + Args: + results_path_folder: Path to results folder + patch_size: Patch size + deterministic_config: Deterministic config + eval_volume_dict: Evaluation volume dictionary + + Returns: + A worker config + """ self.worker_config = config.WNetTrainingWorkerConfig( device=self.check_device_choice(), weights_info=self.weights_config, From a4dddb89a9f5e1bacb5ae81a8f3371e51edc79b5 Mon Sep 17 00:00:00 2001 From: Cyril Achard <94955160+C-Achard@users.noreply.github.com> Date: Fri, 28 Jul 2023 11:06:55 +0200 Subject: [PATCH 11/70] Temp fix for CRF (#46) --- .github/workflows/test_and_deploy.yml | 2 +- napari_cellseg3d/code_models/crf.py | 2 +- pyproject.toml | 4 ++-- tox.ini | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/test_and_deploy.yml b/.github/workflows/test_and_deploy.yml index 406bf4f5..fafb1719 100644 --- a/.github/workflows/test_and_deploy.yml +++ b/.github/workflows/test_and_deploy.yml @@ -51,7 +51,7 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install setuptools tox tox-gh-actions -# pip install git+https://github.com/lucasb-eyer/pydensecrf.git@master#egg=pydensecrf +# pip install git+https://github.com/kodalli/pydensecrf.git@master#egg=pydensecrf # this runs the platform-specific tests declared in tox.ini - name: Test with tox diff --git a/napari_cellseg3d/code_models/crf.py b/napari_cellseg3d/code_models/crf.py index b362246a..79951fc5 100644 --- a/napari_cellseg3d/code_models/crf.py +++ b/napari_cellseg3d/code_models/crf.py @@ -7,7 +7,7 @@ Philipp Krähenbühl and Vladlen Koltun NIPS 2011 -Implemented using the pydense libary available at https://github.com/lucasb-eyer/pydensecrf. +Implemented using the pydense libary available at https://github.com/kodalli/pydensecrf. """ from warnings import warn diff --git a/pyproject.toml b/pyproject.toml index 40450f9b..5d5be93b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -120,7 +120,7 @@ line_length = 79 [project.optional-dependencies] crf = [ - "pydensecrf@git+https://github.com/lucasb-eyer/pydensecrf.git#egg=master", + "pydensecrf@git+https://github.com/kodalli/pydensecrf.git#egg=master", ] dev = [ "isort", @@ -142,7 +142,7 @@ test = [ "coverage", "tox", "twine", - "pydensecrf@git+https://github.com/lucasb-eyer/pydensecrf.git#egg=master", + "pydensecrf@git+https://github.com/kodalli/pydensecrf.git#egg=master", ] onnx-cpu = [ "onnx", diff --git a/tox.ini b/tox.ini index b8c76091..195b0dff 100644 --- a/tox.ini +++ b/tox.ini @@ -34,7 +34,7 @@ deps = magicgui pytest-qt qtpy - git+https://github.com/lucasb-eyer/pydensecrf.git@master#egg=pydensecrf + git+https://github.com/kodalli/pydensecrf.git@master#egg=pydensecrf onnx onnxruntime ; pyopencl[pocl] From e4b51d4fe8a0b867673fbb96f5035b66c75ecfa3 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 28 Jul 2023 11:51:23 +0200 Subject: [PATCH 12/70] Minor fixes --- napari_cellseg3d/_tests/test_supervised_training.py | 1 + napari_cellseg3d/code_models/models/wnet/soft_Ncuts.py | 5 +++-- napari_cellseg3d/code_models/worker_training.py | 8 ++++---- napari_cellseg3d/code_plugins/plugin_model_training.py | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/napari_cellseg3d/_tests/test_supervised_training.py b/napari_cellseg3d/_tests/test_supervised_training.py index 2ce1ee03..7a5b1d3e 100644 --- a/napari_cellseg3d/_tests/test_supervised_training.py +++ b/napari_cellseg3d/_tests/test_supervised_training.py @@ -15,6 +15,7 @@ def test_create_supervised_worker_from_config(make_napari_viewer_proxy): widget = Trainer(make_napari_viewer_proxy()) + widget.device_choice.setCurrentIndex(0) worker = widget._create_worker() default_config = config.SupervisedTrainingWorkerConfig() excluded = [ diff --git a/napari_cellseg3d/code_models/models/wnet/soft_Ncuts.py b/napari_cellseg3d/code_models/models/wnet/soft_Ncuts.py index db049526..1885ccea 100644 --- a/napari_cellseg3d/code_models/models/wnet/soft_Ncuts.py +++ b/napari_cellseg3d/code_models/models/wnet/soft_Ncuts.py @@ -5,13 +5,14 @@ """ import math - import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from scipy.stats import norm +from napari_cellseg3d.utils import LOGGER as logger + __author__ = "Yves Paychère, Colin Hofmann, Cyril Achard" __credits__ = [ "Yves Paychère", @@ -54,7 +55,7 @@ def __init__( self.W, self.D, ) - print(f"Radius set to {self.radius}") + logger.info(f"Radius set to {self.radius}") def forward(self, labels, inputs): """Forward pass of the Soft N-Cuts loss. diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index d98c6ecf..85796aff 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -205,7 +205,7 @@ def get_patch_dataset(self, train_transforms): return self.config.sample_size, dataset - def get_patch_dataset_eval(self, eval_dataset_dict): + def get_dataset_eval(self, eval_dataset_dict): eval_transforms = Compose( [ LoadImaged(keys=["image", "label"], image_only=True), @@ -373,7 +373,7 @@ def _get_data(self): ) if self.config.eval_volume_dict is not None: - eval_dataset = self.get_dataset(train_transforms) + eval_dataset = self.get_dataset_eval(train_transforms) eval_dataloader = DataLoader( eval_dataset, @@ -620,7 +620,7 @@ def train(self): "data": dec_out, "cmap": "gist_earth", }, - "Input image": {"data": image, "cmap": "inferno"}, + "Input image": {"data": np.squeeze(image), "cmap": "inferno"}, } yield TrainingReport( @@ -667,7 +667,7 @@ def train(self): self.log(f"Reconstruction loss: {rec_losses[-1]:.5f}") self.log(f"Sum of losses: {total_losses[-1]:.5f}") if epoch > 0: - self.lof( + self.log( f"Ncuts loss difference: {ncuts_losses[-1] - ncuts_losses[-2]:.5f}" ) self.log( diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index a956bd3c..91fb7ebd 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1450,7 +1450,7 @@ def _plot_loss( label="Maximum Dice coeff.", zorder=5, ) - self.plot_2.legend(facecolor=ui.napari_grey, loc="lower right") + self.plot_2.legend(facecolor=ui.napari_grey, loc="lower right") self.canvas.draw_idle() def update_loss_plot(self, loss_1: dict, loss_2: list): From d0a190d0c005c8b4d50c53d9eef6deeb09dee845 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 28 Jul 2023 13:05:08 +0200 Subject: [PATCH 13/70] Tests & training --- .../_tests/test_supervised_training.py | 5 +++-- .../_tests/test_unsup_training.py | 14 ++++++++------ .../code_models/worker_training.py | 19 ++++++++++++------- 3 files changed, 23 insertions(+), 15 deletions(-) diff --git a/napari_cellseg3d/_tests/test_supervised_training.py b/napari_cellseg3d/_tests/test_supervised_training.py index 7a5b1d3e..676133ff 100644 --- a/napari_cellseg3d/_tests/test_supervised_training.py +++ b/napari_cellseg3d/_tests/test_supervised_training.py @@ -12,9 +12,10 @@ im_path = Path(__file__).resolve().parent / "res/test.tif" im_path_str = str(im_path) - def test_create_supervised_worker_from_config(make_napari_viewer_proxy): - widget = Trainer(make_napari_viewer_proxy()) + + viewer = make_napari_viewer_proxy() + widget = Trainer(viewer=viewer) widget.device_choice.setCurrentIndex(0) worker = widget._create_worker() default_config = config.SupervisedTrainingWorkerConfig() diff --git a/napari_cellseg3d/_tests/test_unsup_training.py b/napari_cellseg3d/_tests/test_unsup_training.py index 163b2a3d..3ebd4768 100644 --- a/napari_cellseg3d/_tests/test_unsup_training.py +++ b/napari_cellseg3d/_tests/test_unsup_training.py @@ -5,13 +5,13 @@ Trainer, ) -im_path = Path(__file__).resolve().parent / "res/test.tif" -im_path_str = str(im_path) - - def test_unsupervised_worker(make_napari_viewer_proxy): + im_path = Path(__file__).resolve().parent / "res/test.tif" + # im_path_str = str(im_path) + unsup_viewer = make_napari_viewer_proxy() widget = Trainer(viewer=unsup_viewer) + widget.device_choice.setCurrentIndex(0) widget.model_choice.setCurrentText("WNet") widget._toggle_unsupervised_mode(enabled=True) @@ -36,9 +36,11 @@ def test_unsupervised_worker(make_napari_viewer_proxy): widget.images_filepaths = [str(im_path.parent)] widget.labels_filepaths = [str(im_path.parent)] - widget.unsupervised_eval_data = widget.create_train_dataset_dict() - assert widget.unsupervised_eval_data is not None + # widget.unsupervised_eval_data = widget.create_train_dataset_dict() worker = widget._create_worker(additional_results_description="TEST_3") dataloader, eval_dataloader, data_shape = worker._get_data() + assert widget.unsupervised_eval_data is not None assert eval_dataloader is not None + assert widget.unsupervised_eval_data[0]["image"] is not None + assert widget.unsupervised_eval_data[0]["label"] is not None assert data_shape == (6, 6, 6) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 85796aff..b28f8285 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -208,7 +208,7 @@ def get_patch_dataset(self, train_transforms): def get_dataset_eval(self, eval_dataset_dict): eval_transforms = Compose( [ - LoadImaged(keys=["image", "label"], image_only=True), + LoadImaged(keys=["image", "label"]), EnsureChannelFirstd( keys=["image", "label"], channel_dim="no_channel" ), @@ -373,7 +373,7 @@ def _get_data(self): ) if self.config.eval_volume_dict is not None: - eval_dataset = self.get_dataset_eval(train_transforms) + eval_dataset = self.get_dataset_eval(self.config.eval_volume_dict) eval_dataloader = DataLoader( eval_dataset, @@ -617,7 +617,7 @@ def train(self): "cmap": "turbo", }, "Decoder output": { - "data": dec_out, + "data": np.squeeze(dec_out), "cmap": "gist_earth", }, "Input image": {"data": np.squeeze(image), "cmap": "inferno"}, @@ -766,22 +766,27 @@ def train(self): if WANDB_INSTALLED: # log validation dice score for each validation round wandb.log({"val/dice_metric": metric}) + + dec_out_val = val_decoder_outputs[0].detach().cpu().numpy() + enc_out_val = val_outputs[0].detach().cpu().numpy() + lab_out_val = val_labels[0].detach().cpu().numpy() + val_in = val_inputs[0].detach.cpu().nummpy() display_dict = { "Decoder output": { - "data": val_decoder_outputs[0], + "data": np.squeeze(dec_out_val), "cmap": "gist_earth", }, "Encoder output": { - "data": val_outputs[0], + "data": enc_out_val, "cmap": "turbo", }, "Labels": { - "data": val_labels[0], + "data": lab_out_val, "cmap": "bop blue", }, "Inputs": { - "data": val_inputs[0], + "data": val_in, "cmap": "inferno", }, } From fffed34139bbb8a60cb67470cabf325383f2bca4 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 28 Jul 2023 14:14:10 +0200 Subject: [PATCH 14/70] Fix tests + new weights --- napari_cellseg3d/_tests/test_inference.py | 1 + napari_cellseg3d/code_models/models/model_SegResNet.py | 2 +- napari_cellseg3d/code_models/models/model_SwinUNetR.py | 2 +- napari_cellseg3d/code_models/models/model_TRAILMAP_MS.py | 2 +- napari_cellseg3d/code_models/models/model_VNet.py | 2 +- .../models/pretrained/pretrained_model_urls.json | 8 ++++---- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/napari_cellseg3d/_tests/test_inference.py b/napari_cellseg3d/_tests/test_inference.py index f5a89b14..4fa2c54b 100644 --- a/napari_cellseg3d/_tests/test_inference.py +++ b/napari_cellseg3d/_tests/test_inference.py @@ -89,6 +89,7 @@ def __call__(self, x): post_process_transforms=mock_work(), ) assert isinstance(res, InferenceResult) + assert res.result is not None def test_post_processing(): diff --git a/napari_cellseg3d/code_models/models/model_SegResNet.py b/napari_cellseg3d/code_models/models/model_SegResNet.py index 60b74d64..99f8cbfc 100644 --- a/napari_cellseg3d/code_models/models/model_SegResNet.py +++ b/napari_cellseg3d/code_models/models/model_SegResNet.py @@ -3,7 +3,7 @@ class SegResNet_(SegResNetVAE): use_default_training = True - weights_file = "SegResNet.pth" + weights_file = "SegResNet_latest.pth" def __init__( self, input_img_size, out_channels=1, dropout_prob=0.3, **kwargs diff --git a/napari_cellseg3d/code_models/models/model_SwinUNetR.py b/napari_cellseg3d/code_models/models/model_SwinUNetR.py index 0dbf0be5..bce316e8 100644 --- a/napari_cellseg3d/code_models/models/model_SwinUNetR.py +++ b/napari_cellseg3d/code_models/models/model_SwinUNetR.py @@ -7,7 +7,7 @@ class SwinUNETR_(SwinUNETR): use_default_training = True - weights_file = "Swin64_best_metric.pth" + weights_file = "SwinUNetR_latest.pth" def __init__( self, diff --git a/napari_cellseg3d/code_models/models/model_TRAILMAP_MS.py b/napari_cellseg3d/code_models/models/model_TRAILMAP_MS.py index bc8e43d5..4ee971e2 100644 --- a/napari_cellseg3d/code_models/models/model_TRAILMAP_MS.py +++ b/napari_cellseg3d/code_models/models/model_TRAILMAP_MS.py @@ -4,7 +4,7 @@ class TRAILMAP_MS_(UNet3D): use_default_training = True - weights_file = "TRAILMAP_MS_best_metric_epoch_26.pth" + weights_file = "TRAILMAP_MS_best_metric.pth" # original model from Liqun Luo lab, transferred to pytorch and trained on mesoSPIM-acquired data (mostly TPH2 as of July 2022) diff --git a/napari_cellseg3d/code_models/models/model_VNet.py b/napari_cellseg3d/code_models/models/model_VNet.py index 4e375a11..8fe18e2b 100644 --- a/napari_cellseg3d/code_models/models/model_VNet.py +++ b/napari_cellseg3d/code_models/models/model_VNet.py @@ -3,7 +3,7 @@ class VNet_(VNet): use_default_training = True - weights_file = "VNet_40e.pth" + weights_file = "VNet_latest.pth" def __init__(self, in_channels=1, out_channels=1, **kwargs): try: diff --git a/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json b/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json index b235a550..3c393d47 100644 --- a/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json +++ b/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json @@ -1,8 +1,8 @@ { - "TRAILMAP_MS": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/TRAILMAP_MS.tar.gz", - "SegResNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/SegResNet.tar.gz", - "VNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/VNet.tar.gz", - "SwinUNetR": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/Swin64.tar.gz", + "TRAILMAP_MS": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/TRAILMAP_latest.tar.gz", + "SegResNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/SegResNet_latest.tar.gz", + "VNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/VNet_latest.tar.gz", + "SwinUNetR": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/SwinUNetR_latest.tar.gz", "WNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/wnet.tar.gz", "WNet_ONNX": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/wnet_onnx.tar.gz", "test": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/test.tar.gz" From e235087bb60a575c73e142dc3d437fff25085be5 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 28 Jul 2023 17:35:57 +0200 Subject: [PATCH 15/70] Fix ETA precision --- napari_cellseg3d/code_models/worker_training.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index b28f8285..332e8f2d 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -620,7 +620,10 @@ def train(self): "data": np.squeeze(dec_out), "cmap": "gist_earth", }, - "Input image": {"data": np.squeeze(image), "cmap": "inferno"}, + "Input image": { + "data": np.squeeze(image), + "cmap": "inferno", + }, } yield TrainingReport( @@ -766,8 +769,10 @@ def train(self): if WANDB_INSTALLED: # log validation dice score for each validation round wandb.log({"val/dice_metric": metric}) - - dec_out_val = val_decoder_outputs[0].detach().cpu().numpy() + + dec_out_val = ( + val_decoder_outputs[0].detach().cpu().numpy() + ) enc_out_val = val_outputs[0].detach().cpu().numpy() lab_out_val = val_labels[0].detach().cpu().numpy() val_in = val_inputs[0].detach.cpu().nummpy() @@ -810,9 +815,7 @@ def train(self): * (self.config.max_epochs / (epoch + 1) - 1) / 60 ) - self.log( - f"ETA: {eta} minutes", - ) + self.log(f"ETA: {eta:.2f} minutes") self.log("-" * 20) # Save the model From 0cbd2ec8397c03f0c11f44707fa3d6569660e5f9 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 09:14:37 +0200 Subject: [PATCH 16/70] Docstring update --- napari_cellseg3d/code_models/workers_utils.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/napari_cellseg3d/code_models/workers_utils.py b/napari_cellseg3d/code_models/workers_utils.py index b07e96c8..5c695fd1 100644 --- a/napari_cellseg3d/code_models/workers_utils.py +++ b/napari_cellseg3d/code_models/workers_utils.py @@ -151,7 +151,7 @@ def __init__(self, file_location): except ImportError as e: logger.error("ONNX is not installed but ONNX model was loaded") logger.error(e) - msg = "PLEASE INSTALL ONNX CPU OR GPU USING pip install napari-cellseg3d[onnx-cpu] OR napari-cellseg3d[onnx-gpu]" + msg = "PLEASE INSTALL ONNX CPU OR GPU USING: pip install napari-cellseg3d[onnx-cpu] OR pip install napari-cellseg3d[onnx-gpu]" logger.error(msg) raise ImportError(msg) from e @@ -177,6 +177,8 @@ def to(self, device): class QuantileNormalizationd(MapTransform): + """MONAI-style dict transform to normalize each image in a batch individually by quantile normalization.""" + def __init__(self, keys, allow_missing_keys: bool = False): super().__init__(keys, allow_missing_keys) @@ -199,6 +201,8 @@ def normalizer(self, image: torch.Tensor): class QuantileNormalization(Transform): + """MONAI-style transform to normalize each image in a batch individually by quantile normalization.""" + def __call__(self, img): return utils.quantile_normalization(img) From ed135a8703c6180639fc38832c64c67b592e3579 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 09:38:18 +0200 Subject: [PATCH 17/70] Update plugin_model_training.py --- napari_cellseg3d/code_plugins/plugin_model_training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 91fb7ebd..c31001be 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -316,9 +316,9 @@ def __init__( self._set_tooltips() self._build() self.model_choice.currentTextChanged.connect( - self._toggle_unsupervised_mode + partial(self._toggle_unsupervised_mode, enabled=False) ) - self._toggle_unsupervised_mode() + self._toggle_unsupervised_mode(enabled=False) def _set_tooltips(self): # tooltips From f224e7656638f57d202b84bdda612bb830e73d5a Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 09:57:45 +0200 Subject: [PATCH 18/70] Update contrast limit when updating layers --- napari_cellseg3d/code_plugins/plugin_model_training.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index c31001be..4d1e1dfc 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1291,6 +1291,9 @@ def _display_results(self, images_dict, complete_missing=False): "data" ] self.result_layers[i].refresh() + self.result_layers[ + i + ].contrast_limits.reset_contrast_limits_range() def on_yield(self, report: TrainingReport): # TODO refactor for dict # logger.info( From cbfe4ef5bb1afdcaec96aa32cafda4db52eb59c6 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 09:57:54 +0200 Subject: [PATCH 19/70] Update config.py --- napari_cellseg3d/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/napari_cellseg3d/config.py b/napari_cellseg3d/config.py index 72f8dfab..f9536d93 100644 --- a/napari_cellseg3d/config.py +++ b/napari_cellseg3d/config.py @@ -287,7 +287,7 @@ class WNetTrainingWorkerConfig(TrainingWorkerConfig): dropout: float = 0.65 use_clipping: bool = False # use gradient clipping clipping: float = 1.0 # clipping value - weight_decay: float = 1e-5 # weight decay (used 0.01 historically) + weight_decay: float = 0.01 # 1e-5 # weight decay (used 0.01 historically) # NCuts loss params intensity_sigma: float = 1.0 spatial_sigma: float = 4.0 From 3cb6a35c3f04f1ab56d42129c3d99ac7bf1c3d25 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 10:05:29 +0200 Subject: [PATCH 20/70] Fixed normalization --- napari_cellseg3d/code_models/worker_training.py | 2 ++ napari_cellseg3d/code_models/workers_utils.py | 17 +++++++++++++++++ 2 files changed, 19 insertions(+) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 332e8f2d..fb74d1de 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -57,6 +57,7 @@ LogSignal, QuantileNormalizationd, RemapTensor, + RemapTensord, Threshold, TrainingReport, WeightsDownloader, @@ -268,6 +269,7 @@ def get_dataset(self, train_transforms): spatial_size=(utils.get_padding_dim(first_volume_shape)), ), EnsureTyped(keys=["image"]), + RemapTensord(keys=["image"], new_min=0.0, new_max=100.0), ] ) diff --git a/napari_cellseg3d/code_models/workers_utils.py b/napari_cellseg3d/code_models/workers_utils.py index 5c695fd1..e5e8b881 100644 --- a/napari_cellseg3d/code_models/workers_utils.py +++ b/napari_cellseg3d/code_models/workers_utils.py @@ -217,6 +217,23 @@ def __call__(self, img): return utils.remap_image(img, new_max=self.max, new_min=self.min) +class RemapTensord(MapTransform): + def __init__( + self, keys, new_max, new_min, allow_missing_keys: bool = False + ): + super().__init__(keys, allow_missing_keys) + self.max = new_max + self.min = new_min + + def __call__(self, data): + d = dict(data) + for key in self.keys: + d[key] = utils.remap_image( + d[key], new_max=self.max, new_min=self.min + ) + return d + + class Threshold(Transform): def __init__(self, threshold=0.5): super().__init__() From 7b14ef38c4fb01de4d8049e74e49cc1f97e1d1d5 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 10:08:31 +0200 Subject: [PATCH 21/70] Update plugin_model_training.py --- napari_cellseg3d/code_plugins/plugin_model_training.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 4d1e1dfc..4a0bb272 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1291,9 +1291,9 @@ def _display_results(self, images_dict, complete_missing=False): "data" ] self.result_layers[i].refresh() - self.result_layers[ - i - ].contrast_limits.reset_contrast_limits_range() + # self.result_layers[ + # i + # ].contrast_limits.reset_contrast_limits_range() def on_yield(self, report: TrainingReport): # TODO refactor for dict # logger.info( From 267a9c10ca537b19b50db9a153b470532a09d2a5 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 10:10:53 +0200 Subject: [PATCH 22/70] Update workers_utils.py --- napari_cellseg3d/code_models/workers_utils.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/napari_cellseg3d/code_models/workers_utils.py b/napari_cellseg3d/code_models/workers_utils.py index e5e8b881..14d8d023 100644 --- a/napari_cellseg3d/code_models/workers_utils.py +++ b/napari_cellseg3d/code_models/workers_utils.py @@ -228,9 +228,11 @@ def __init__( def __call__(self, data): d = dict(data) for key in self.keys: - d[key] = utils.remap_image( - d[key], new_max=self.max, new_min=self.min - ) + for i in range(d[key].shape[0]): + logger.debug(f"remapping across channel {i}") + d[key][i] = utils.remap_image( + d[key][i], new_max=self.max, new_min=self.min + ) return d From 2b027501a9db7fc5eefe0ace12a70a9f9aefdf65 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 10:17:24 +0200 Subject: [PATCH 23/70] Trying to fix input normalization --- .../code_models/worker_training.py | 11 +++--- napari_cellseg3d/code_models/workers_utils.py | 34 +++++++++---------- 2 files changed, 24 insertions(+), 21 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index fb74d1de..21f0f69b 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -57,7 +57,7 @@ LogSignal, QuantileNormalizationd, RemapTensor, - RemapTensord, + # RemapTensord, Threshold, TrainingReport, WeightsDownloader, @@ -269,7 +269,7 @@ def get_dataset(self, train_transforms): spatial_size=(utils.get_padding_dim(first_volume_shape)), ), EnsureTyped(keys=["image"]), - RemapTensord(keys=["image"], new_min=0.0, new_max=100.0), + # RemapTensord(keys=["image"], new_min=0.0, new_max=100.0), ] ) @@ -541,6 +541,9 @@ def train(self): for _i, batch in enumerate(dataloader): # raise NotImplementedError("testing") image = batch["image"].to(device) + for i in range(image.shape[0]): + for j in range(image.shape[1]): + image[i, j] = normalize_function(image[i, j]) # if self.config.batch_size == 1: # image = image.unsqueeze(0) # else: @@ -580,8 +583,8 @@ def train(self): loss = alpha * Ncuts + beta * reconstruction_loss epoch_loss += loss.item() - # if WANDB_INSTALLED: - # wandb.log({"Sum of losses": loss.item()}) + if WANDB_INSTALLED: + wandb.log({"Sum of losses": loss.item()}) loss.backward(loss) optimizer.step() diff --git a/napari_cellseg3d/code_models/workers_utils.py b/napari_cellseg3d/code_models/workers_utils.py index 14d8d023..600dddd5 100644 --- a/napari_cellseg3d/code_models/workers_utils.py +++ b/napari_cellseg3d/code_models/workers_utils.py @@ -217,23 +217,23 @@ def __call__(self, img): return utils.remap_image(img, new_max=self.max, new_min=self.min) -class RemapTensord(MapTransform): - def __init__( - self, keys, new_max, new_min, allow_missing_keys: bool = False - ): - super().__init__(keys, allow_missing_keys) - self.max = new_max - self.min = new_min - - def __call__(self, data): - d = dict(data) - for key in self.keys: - for i in range(d[key].shape[0]): - logger.debug(f"remapping across channel {i}") - d[key][i] = utils.remap_image( - d[key][i], new_max=self.max, new_min=self.min - ) - return d +# class RemapTensord(MapTransform): +# def __init__( +# self, keys, new_max, new_min, allow_missing_keys: bool = False +# ): +# super().__init__(keys, allow_missing_keys) +# self.max = new_max +# self.min = new_min +# +# def __call__(self, data): +# d = dict(data) +# for key in self.keys: +# for i in range(d[key].shape[0]): +# logger.debug(f"remapping across channel {i}") +# d[key][i] = utils.remap_image( +# d[key][i], new_max=self.max, new_min=self.min +# ) +# return d class Threshold(Transform): From 8e8c8274eced0bb69de5e245429607387f29c8fc Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 10:55:09 +0200 Subject: [PATCH 24/70] Fix name mismatch --- napari_cellseg3d/code_models/worker_training.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 21f0f69b..3f00b894 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -619,7 +619,7 @@ def train(self): "data": AsDiscrete(threshold=0.5)( enc_out ).numpy(), - "cmap": "turbo", + "cmap": "bop blue", }, "Decoder output": { "data": np.squeeze(dec_out), @@ -634,7 +634,7 @@ def train(self): yield TrainingReport( show_plot=True, epoch=epoch, - loss_1_values={"SoftNCuts loss": ncuts_losses}, + loss_1_values={"SoftNCuts": ncuts_losses}, loss_2_values=rec_losses, weights=model.state_dict(), images_dict=images_dict, @@ -804,7 +804,7 @@ def train(self): yield TrainingReport( epoch=epoch, loss_1_values={ - "SoftNCuts loss": ncuts_losses, + "SoftNCuts": ncuts_losses, "Dice metric": metric, }, loss_2_values=rec_losses, From e7af6f5350f8493b43a30a3e297463251bc4c182 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 11:09:15 +0200 Subject: [PATCH 25/70] Fix decoder evaluation --- .../code_models/worker_training.py | 33 +++++-------------- 1 file changed, 9 insertions(+), 24 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 3f00b894..2de05bd6 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -584,7 +584,7 @@ def train(self): loss = alpha * Ncuts + beta * reconstruction_loss epoch_loss += loss.item() if WANDB_INSTALLED: - wandb.log({"Sum of losses": loss.item()}) + wandb.log({"Weighted sum of losses": loss.item()}) loss.backward(loss) optimizer.step() @@ -651,26 +651,6 @@ def train(self): # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) # wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) - # self.log("Ncuts loss: " + str(ncuts_losses[-1])) - # if epoch > 0: - # self.log( - # "Ncuts loss difference: " - # + str(ncuts_losses[-1] - ncuts_losses[-2]) - # ) - # self.log("Reconstruction loss: " + str(rec_losses[-1])) - # if epoch > 0: - # self.log( - # "Reconstruction loss difference: " - # + str(rec_losses[-1] - rec_losses[-2]) - # ) - # self.log("Sum of losses: " + str(total_losses[-1])) - # if epoch > 0: - # self.log( - # "Sum of losses difference: " - # + str(total_losses[-1] - total_losses[-2]), - # ) - - # show losses and differences with 5 points precision self.log(f"Ncuts loss: {ncuts_losses[-1]:.5f}") self.log(f"Reconstruction loss: {rec_losses[-1]:.5f}") self.log(f"Sum of losses: {total_losses[-1]:.5f}") @@ -718,10 +698,15 @@ def train(self): overlap=0, progress=True, ) - val_outputs = AsDiscrete(threshold=0.5)( - val_outputs + val_decoder_outputs = sliding_window_inference( + val_outputs, + roi_size=[64, 64, 64], + sw_batch_size=1, + predictor=model.forward_decoder, + overlap=0, + progress=True, ) - val_decoder_outputs = model.forward_decoder( + val_outputs = AsDiscrete(threshold=0.5)( val_outputs ) From bde4cbc3f35285afac58ffe2fc62fd82e31badb8 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 11:15:35 +0200 Subject: [PATCH 26/70] Update dice calculation --- .../code_models/worker_training.py | 43 +++++++++++-------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 2de05bd6..67db9c8b 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -689,7 +689,9 @@ def train(self): val_inputs[i][j] = normalize_function( val_inputs[i][j] ) - + logger.debug( + f"Val inputs shape: {val_inputs.shape}" + ) val_outputs = sliding_window_inference( val_inputs, roi_size=[64, 64, 64], @@ -709,26 +711,33 @@ def train(self): val_outputs = AsDiscrete(threshold=0.5)( val_outputs ) + logger.debug( + f"Val outputs shape: {val_outputs.shape}" + ) + logger.debug( + f"Val labels shape: {val_labels.shape}" + ) + logger.debug( + f"Val decoder outputs shape: {val_decoder_outputs.shape}" + ) - # compute metric for current iteration + dices = [] for channel in range(val_outputs.shape[1]): - max_dice_channel = torch.argmax( - torch.Tensor( - [ - utils.dice_coeff( - y_pred=val_outputs[ - :, - channel : (channel + 1), - :, - :, - :, - ], - y_true=val_labels, - ) - ] + dices.append( + utils.dice_coeff( + y_pred=val_outputs[ + 0, channel : (channel + 1), :, :, : + ], + y_true=val_labels[0], ) ) - + logger.debug(f"DICE COEFF: {dices}") + max_dice_channel = torch.argmax( + torch.Tensor(dices) + ) + logger.debug( + f"MAX DICE CHANNEL: {max_dice_channel}" + ) dice_metric( y_pred=val_outputs[ :, From 99c2dc181eb55d532f82f5476edbbf8352328cf0 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 11:26:26 +0200 Subject: [PATCH 27/70] Update dice coeff --- napari_cellseg3d/utils.py | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/napari_cellseg3d/utils.py b/napari_cellseg3d/utils.py index c6a8bbac..db293bbe 100644 --- a/napari_cellseg3d/utils.py +++ b/napari_cellseg3d/utils.py @@ -179,21 +179,31 @@ def sphericity_axis(semi_major, semi_minor): return result -def dice_coeff(y_true, y_pred, smooth=1.0): +def dice_coeff( + y_true: Union[torch.Tensor, np.ndarray], + y_pred: Union[torch.Tensor, np.ndarray], + smooth: float = 1.0, +) -> Union[torch.Tensor, np.float64]: """Compute Dice-Sorensen coefficient between two numpy arrays - Args: y_true: Ground truth label y_pred: Prediction label - Returns: dice coefficient - """ + if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray): + sum_tensor = np.sum + elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor): + sum_tensor = torch.sum + else: + raise ValueError( + "y_true and y_pred must both be either numpy arrays or torch tensors" + ) + y_true_f = y_true.flatten() y_pred_f = y_pred.flatten() - intersection = np.sum(y_true_f * y_pred_f) + intersection = sum_tensor(y_true_f * y_pred_f) return (2.0 * intersection + smooth) / ( - np.sum(y_true_f) + np.sum(y_pred_f) + smooth + sum_tensor(y_true_f) + sum_tensor(y_pred_f) + smooth ) From 97706117649d63e71ff8148b084da58bc3e49237 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 11:31:40 +0200 Subject: [PATCH 28/70] Update worker_training.py --- napari_cellseg3d/code_models/worker_training.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 67db9c8b..41df7d9c 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -774,7 +774,7 @@ def train(self): ) enc_out_val = val_outputs[0].detach().cpu().numpy() lab_out_val = val_labels[0].detach().cpu().numpy() - val_in = val_inputs[0].detach.cpu().nummpy() + val_in = val_inputs[0].detach.cpu().numpy() display_dict = { "Decoder output": { From bca2262403bf84f288e69550b557c3a234a4feff Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 11:34:18 +0200 Subject: [PATCH 29/70] Fix eval detach --- napari_cellseg3d/code_models/worker_training.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 41df7d9c..e64427c8 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -774,7 +774,7 @@ def train(self): ) enc_out_val = val_outputs[0].detach().cpu().numpy() lab_out_val = val_labels[0].detach().cpu().numpy() - val_in = val_inputs[0].detach.cpu().numpy() + val_in = val_inputs[0].detach().cpu().numpy() display_dict = { "Decoder output": { @@ -782,15 +782,15 @@ def train(self): "cmap": "gist_earth", }, "Encoder output": { - "data": enc_out_val, + "data": np.squeeze(enc_out_val), "cmap": "turbo", }, "Labels": { - "data": lab_out_val, + "data": np.squeeze(lab_out_val), "cmap": "bop blue", }, "Inputs": { - "data": val_in, + "data": np.squeeze(val_in), "cmap": "inferno", }, } From d75dbc58fb002aea620d882381e5e73cbdf23895 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 11:39:13 +0200 Subject: [PATCH 30/70] Fix Dice list for WNet --- napari_cellseg3d/code_models/worker_training.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index e64427c8..2ad1d10a 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -529,6 +529,7 @@ def train(self): rec_losses = [] total_losses = [] best_dice = -1 + dice_values = [] # Train the model for epoch in range(self.config.max_epochs): @@ -751,6 +752,7 @@ def train(self): # aggregate the final mean dice result metric = dice_metric.aggregate().item() + dice_values.append(metric) self.log(f"Validation Dice score: {metric}") if best_dice < metric <= 1: best_dice = metric @@ -799,7 +801,7 @@ def train(self): epoch=epoch, loss_1_values={ "SoftNCuts": ncuts_losses, - "Dice metric": metric, + "Dice metric": dice_values, }, loss_2_values=rec_losses, weights=model.state_dict(), From 1283b08fb211b596835c981641a06e5f4b051f7f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 12:42:58 +0200 Subject: [PATCH 31/70] Updated validation UI --- .../code_models/worker_training.py | 18 ++++++---- .../code_plugins/plugin_model_training.py | 36 ++++++++++--------- 2 files changed, 32 insertions(+), 22 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 2ad1d10a..5a0121bc 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -698,7 +698,9 @@ def train(self): roi_size=[64, 64, 64], sw_batch_size=1, predictor=model.forward_encoder, - overlap=0, + overlap=0.1, + mode="gaussian", + sigma_scale=0.01, progress=True, ) val_decoder_outputs = sliding_window_inference( @@ -706,7 +708,9 @@ def train(self): roi_size=[64, 64, 64], sw_batch_size=1, predictor=model.forward_decoder, - overlap=0, + overlap=0.1, + mode="gaussian", + sigma_scale=0.01, progress=True, ) val_outputs = AsDiscrete(threshold=0.5)( @@ -787,14 +791,14 @@ def train(self): "data": np.squeeze(enc_out_val), "cmap": "turbo", }, - "Labels": { - "data": np.squeeze(lab_out_val), - "cmap": "bop blue", - }, "Inputs": { "data": np.squeeze(val_in), "cmap": "inferno", }, + "Labels": { + "data": np.squeeze(lab_out_val), + "cmap": "bop blue", + }, } yield TrainingReport( @@ -1414,6 +1418,8 @@ def get_loader_func(num_samples): sw_batch_size=self.config.batch_size, predictor=model, overlap=0.25, + mode="gaussian", + sigma_scale=0.01, sw_device=self.config.device, device=self.config.device, progress=False, diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 4a0bb272..f94ba961 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1259,6 +1259,7 @@ def _remove_result_layers(self): self.result_layers = [] def _display_results(self, images_dict, complete_missing=False): + """Show various model input/outputs in napari viewer as a list of layers""" layer_list = [] if not complete_missing: for layer_name in list(images_dict.keys()): @@ -1291,9 +1292,8 @@ def _display_results(self, images_dict, complete_missing=False): "data" ] self.result_layers[i].refresh() - # self.result_layers[ - # i - # ].contrast_limits.reset_contrast_limits_range() + clims = self.result_layers[i].contrast_limits + [c.reset_contrast_limits_range() for c in clims] def on_yield(self, report: TrainingReport): # TODO refactor for dict # logger.info( @@ -1395,6 +1395,17 @@ def _make_csv(self): ) self.df.to_csv(path, index=False) + def _show_plot_max(self, plot, y): + x_max = (np.argmax(y) + 1) * self.worker_config.validation_interval + dice_max = np.max(y) + plot.scatter( + x_max, + dice_max, + c="r", + label="Max. Dice.", + zorder=5, + ) + def _plot_loss( self, loss_values_1: dict, @@ -1414,7 +1425,7 @@ def _plot_loss( self.plot_1.set_ylabel(self.plot_2_labels["ylabel"][plot_key]) for metric_name in list(loss_values_1.keys()): - if metric_name == "Dice coefficient": + if metric_name == "Dice metric": x = [ self.worker_config.validation_interval * (i + 1) for i in range(len(loss_values_1[metric_name])) @@ -1423,7 +1434,10 @@ def _plot_loss( x = [i + 1 for i in range(len(loss_values_1[metric_name]))] y = loss_values_1[metric_name] self.plot_1.plot(x, y, label=metric_name) - self.plot_1.legend(loc="lower right") + if metric_name == "Dice metric": + self._show_plot_max(self.plot_1, y) + + self.plot_1.legend(loc="best") # update plot 2 if self._is_current_job_supervised(): @@ -1442,17 +1456,7 @@ def _plot_loss( self.plot_2.set_ylabel(self.plot_2_labels["ylabel"][plot_key]) if show_plot_2_max: - epoch_min = ( - np.argmax(y) + 1 - ) * self.worker_config.validation_interval - dice_min = np.max(y) - self.plot_2.scatter( - epoch_min, - dice_min, - c="r", - label="Maximum Dice coeff.", - zorder=5, - ) + self._show_plot_max(self.plot_2, y) self.plot_2.legend(facecolor=ui.napari_grey, loc="lower right") self.canvas.draw_idle() From 646c5a8930c271d8d3d92e1b698fd5ec77e4c01f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 12:54:39 +0200 Subject: [PATCH 32/70] Tooltips and show_results update --- .../code_models/worker_training.py | 6 ++--- .../code_plugins/plugin_model_training.py | 23 +++++++++---------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 5a0121bc..5b6169e5 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -654,7 +654,7 @@ def train(self): self.log(f"Ncuts loss: {ncuts_losses[-1]:.5f}") self.log(f"Reconstruction loss: {rec_losses[-1]:.5f}") - self.log(f"Sum of losses: {total_losses[-1]:.5f}") + self.log(f"Weighted sum of losses: {total_losses[-1]:.5f}") if epoch > 0: self.log( f"Ncuts loss difference: {ncuts_losses[-1] - ncuts_losses[-2]:.5f}" @@ -663,7 +663,7 @@ def train(self): f"Reconstruction loss difference: {rec_losses[-1] - rec_losses[-2]:.5f}" ) self.log( - f"Sum of losses difference: {total_losses[-1] - total_losses[-2]:.5f}" + f"Weighted sum of losses difference: {total_losses[-1] - total_losses[-2]:.5f}" ) # Update the learning rate @@ -757,7 +757,7 @@ def train(self): # aggregate the final mean dice result metric = dice_metric.aggregate().item() dice_values.append(metric) - self.log(f"Validation Dice score: {metric}") + self.log(f"Validation Dice score: {metric:.3f}") if best_dice < metric <= 1: best_dice = metric # save the best model diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index f94ba961..b7da5a01 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1,7 +1,7 @@ import shutil from functools import partial from pathlib import Path -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, List import matplotlib.pyplot as plt import numpy as np @@ -149,7 +149,7 @@ def __init__( """Plot for dice metric""" self.plot_dock = None """Docked widget with plots""" - self.result_layers = [] + self.result_layers: List[napari.layers.Layer] = [] """Layers to display checkpoint""" self.plot_1_labels = { @@ -323,9 +323,9 @@ def __init__( def _set_tooltips(self): # tooltips self.zip_choice.setToolTip( - "Checking this will save a copy of the results as a zip folder" + "Save a copy of the results as a zip folder" ) - self.validation_percent_choice.tooltips = "Choose the proportion of images to retain for training.\nThe remaining images will be used for validation" + self.validation_percent_choice.tooltips = "The percentage of images to retain for training.\nThe remaining images will be used for validation" self.epoch_choice.tooltips = "The number of epochs to train for.\nThe more you train, the better the model will fit the training data" self.loss_choice.setToolTip( "The loss function to use for training.\nSee the list in the training guide for more info" @@ -335,10 +335,10 @@ def _set_tooltips(self): ) self.batch_choice.tooltips = ( "The batch size to use for training.\n A larger value will feed more images per iteration to the model,\n" - " which is faster and possibly improves performance, but uses more memory" + " which is faster and can improve performance, but uses more memory on your selected device" ) self.val_interval_choice.tooltips = ( - "The number of epochs to perform before validating data.\n " + "The number of epochs to perform before validating on test data.\n " "The lower the value, the more often the score of the model will be computed and the more often the weights will be saved." ) self.learning_rate_choice.setToolTip( @@ -352,19 +352,19 @@ def _set_tooltips(self): ) self.augment_choice.setToolTip( "Check this to enable data augmentation, which will randomly deform, flip and shift the intensity in images" - " to provide a more general dataset. \nUse this if you're extracting more than 10 samples per image" + " to provide a more diverse dataset" ) [ w.setToolTip("Size of the sample to extract") for w in self.patch_size_widgets ] self.patch_choice.setToolTip( - "Check this to automatically crop your images in smaller, cubic images for training." - "\nShould be used if you have a small dataset (and large images)" + "Check this to automatically crop your images into smaller, cubic images for training." + "\nShould be used if you have a few large images" ) self.use_deterministic_choice.setToolTip( "Enable deterministic training for reproducibility." - "Using the same seed with all other parameters being similar should yield the exact same results between two runs." + "Using the same seed with all other parameters being similar should yield the exact same results across runs." ) self.use_transfer_choice.setToolTip( "Use this you want to initialize the model with pre-trained weights or use your own weights." @@ -1292,8 +1292,7 @@ def _display_results(self, images_dict, complete_missing=False): "data" ] self.result_layers[i].refresh() - clims = self.result_layers[i].contrast_limits - [c.reset_contrast_limits_range() for c in clims] + self.result_layers[i].reset_contrast_limits() def on_yield(self, report: TrainingReport): # TODO refactor for dict # logger.info( From a6964ab11cbc5b3b237cea1c23ac48a20f6bcbd1 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 12:58:23 +0200 Subject: [PATCH 33/70] Plots update --- napari_cellseg3d/code_plugins/plugin_model_training.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index b7da5a01..c6f3c26d 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1435,8 +1435,8 @@ def _plot_loss( self.plot_1.plot(x, y, label=metric_name) if metric_name == "Dice metric": self._show_plot_max(self.plot_1, y) - - self.plot_1.legend(loc="best") + if len(loss_values_1.keys()) > 1: + self.plot_1.legend(loc="best", fontsize="10", markerscale=0.6) # update plot 2 if self._is_current_job_supervised(): @@ -1520,7 +1520,6 @@ def update_loss_plot(self, loss_1: dict, loss_2: list): with plt.style.context("dark_background"): self.plot_1.cla() self.plot_2.cla() - self._plot_loss(loss_1, loss_2, show_plot_2_max=plot_max) def _reset_loss_plot(self): From 1eed4ead4b9c48986091f53d4111ffdb16512192 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 13:26:48 +0200 Subject: [PATCH 34/70] Plot + log_parameters --- .../code_models/worker_training.py | 53 +++++++++++++++++-- .../code_plugins/plugin_model_training.py | 7 ++- 2 files changed, 53 insertions(+), 7 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 5b6169e5..86e940b2 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -388,6 +388,48 @@ def _get_data(self): eval_dataloader = None return dataloader, eval_dataloader, data_shape + def log_parameters(self): + self.log("*" * 20) + self.log("-- Parameters --") + self.log(f"Device: {self.config.device}") + self.log(f"Batch size: {self.config.batch_size}") + self.log(f"Epochs: {self.config.max_epochs}") + self.log(f"Learning rate: {self.config.learning_rate}") + self.log(f"Validation interval: {self.config.validation_interval}") + if self.config.weights_info.custom: + self.log(f"Custom weights: {self.config.weights_info.path}") + elif self.config.weights_info.use_pretrained: + self.log(f"Pretrained weights: {self.config.weights_info.path}") + if self.config.sampling: + self.log( + f"Using {self.config.num_samples} samples of size {self.config.sample_size}" + ) + if self.config.do_augmentation: + self.log("Using data augmentation") + ############## + self.log("-- Model --") + self.log(f"Using {self.config.num_classes} classes") + self.log(f"Weight decay: {self.config.weight_decay}") + self.log("* NCuts : ") + self.log(f"- Insensity sigma {self.config.intensity_sigma}") + self.log(f"- Spatial sigma {self.config.spatial_sigma}") + self.log(f"- Radius : {self.config.radius}") + self.log(f"* Reconstruction loss : {self.config.reconstruction_loss}") + self.log( + f"Weighted sum : {self.config.n_cuts_weight}*Ncuts + {self.config.rec_loss_weight}*Reconstruction" + ) + ############## + self.log("-- Data --") + self.log("Training data :") + [self.log(f"\n{v}") for k, v in self.config.train_data_dict.items()] + if self.config.eval_volume_dict is not None: + self.log("Validation data :") + [ + self.log(f"\n{k}: {v}") + for d in self.config.eval_volume_dict + for k, v in d.items() + ] + def train(self): try: if self.config is None: @@ -411,8 +453,9 @@ def train(self): self.log(f"Using device: {device}") - self.log("Config:") # FIXME log_parameters func instead - [self.log(str(a)) for a in self.config.__dict__.items()] + # self.log("Config:") # FIXME log_parameters func instead + # [self.log(str(a)) for a in self.config.__dict__.items()] + self.log_parameters() self.log("Initializing training...") self.log("Getting the data") @@ -783,11 +826,11 @@ def train(self): val_in = val_inputs[0].detach().cpu().numpy() display_dict = { - "Decoder output": { + "Reconstruction": { "data": np.squeeze(dec_out_val), "cmap": "gist_earth", }, - "Encoder output": { + "Segmentation": { "data": np.squeeze(enc_out_val), "cmap": "turbo", }, @@ -820,7 +863,7 @@ def train(self): * (self.config.max_epochs / (epoch + 1) - 1) / 60 ) - self.log(f"ETA: {eta:.2f} minutes") + self.log(f"ETA: {eta:.1f} minutes") self.log("-" * 20) # Save the model diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index c6f3c26d..8d570525 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1036,7 +1036,8 @@ def _set_worker_config( self.weights_config.path = self.weights_config.path self.weights_config.custom = self.custom_weights_choice.isChecked() self.weights_config.use_pretrained = ( - not self.use_transfer_choice.isChecked() + self.use_transfer_choice.isChecked() + and not self.custom_weights_choice.isChecked() ) deterministic_config = config.DeterministicConfig( enabled=self.use_deterministic_choice.isChecked(), @@ -1436,7 +1437,9 @@ def _plot_loss( if metric_name == "Dice metric": self._show_plot_max(self.plot_1, y) if len(loss_values_1.keys()) > 1: - self.plot_1.legend(loc="best", fontsize="10", markerscale=0.6) + self.plot_1.legend( + loc="lower left", fontsize="10", markerscale=0.6 + ) # update plot 2 if self._is_current_job_supervised(): From 79724dd6776288af9f5c60472af8cb452f942131 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 13:28:28 +0200 Subject: [PATCH 35/70] Update worker_training.py --- napari_cellseg3d/code_models/worker_training.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 86e940b2..7e982444 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -421,7 +421,11 @@ def log_parameters(self): ############## self.log("-- Data --") self.log("Training data :") - [self.log(f"\n{v}") for k, v in self.config.train_data_dict.items()] + [ + self.log(f"\n{v}") + for d in self.config.train_data_dict + for k, v in d.items() + ] if self.config.eval_volume_dict is not None: self.log("Validation data :") [ From 7f3a118931de4bf05c30b80769fd381025bb7fd0 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 13:34:52 +0200 Subject: [PATCH 36/70] Disable WANDB for now + log param tweaks --- .../code_models/worker_training.py | 45 ++++++++----------- .../code_plugins/plugin_model_training.py | 2 +- 2 files changed, 20 insertions(+), 27 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 7e982444..18231636 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -68,8 +68,6 @@ logger.debug(f"PRETRAINED WEIGHT DIR LOCATION : {PRETRAINED_WEIGHTS_DIR}") try: - import wandb - WANDB_INSTALLED = True except ImportError: logger.warning( @@ -411,25 +409,25 @@ def log_parameters(self): self.log(f"Using {self.config.num_classes} classes") self.log(f"Weight decay: {self.config.weight_decay}") self.log("* NCuts : ") - self.log(f"- Insensity sigma {self.config.intensity_sigma}") + self.log(f"- Intensity sigma {self.config.intensity_sigma}") self.log(f"- Spatial sigma {self.config.spatial_sigma}") self.log(f"- Radius : {self.config.radius}") self.log(f"* Reconstruction loss : {self.config.reconstruction_loss}") self.log( - f"Weighted sum : {self.config.n_cuts_weight}*Ncuts + {self.config.rec_loss_weight}*Reconstruction" + f"Weighted sum : {self.config.n_cuts_weight}*NCuts + {self.config.rec_loss_weight}*Reconstruction" ) ############## self.log("-- Data --") self.log("Training data :") [ - self.log(f"\n{v}") + self.log(f"{v}") for d in self.config.train_data_dict for k, v in d.items() ] if self.config.eval_volume_dict is not None: self.log("Validation data :") [ - self.log(f"\n{k}: {v}") + self.log(f"{k}: {v}") for d in self.config.eval_volume_dict for k, v in d.items() ] @@ -443,9 +441,9 @@ def train(self): set_track_meta(False) ############## # if WANDB_INSTALLED: - # wandb.init( - # config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE - # ) + # wandb.init( + # config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE + # ) set_determinism( seed=self.config.deterministic_config.seed @@ -455,12 +453,8 @@ def train(self): normalize_function = utils.remap_image device = self.config.device - self.log(f"Using device: {device}") - - # self.log("Config:") # FIXME log_parameters func instead - # [self.log(str(a)) for a in self.config.__dict__.items()] + # self.log(f"Using device: {device}") self.log_parameters() - self.log("Initializing training...") self.log("Getting the data") @@ -473,7 +467,6 @@ def train(self): # Training the model # ################################################### self.log("Initializing the model:") - self.log("- Getting the model") # Initialize the model model = WNet( @@ -494,8 +487,8 @@ def train(self): ) ) - if WANDB_INSTALLED: - wandb.watch(model, log_freq=100) + # if WANDB_INSTALLED: + # wandb.watch(model, log_freq=100) if self.config.weights_info.custom: if self.config.weights_info.use_pretrained: @@ -619,10 +612,10 @@ def train(self): ) epoch_rec_loss += reconstruction_loss.item() - if WANDB_INSTALLED: - wandb.log( - {"Reconstruction loss": reconstruction_loss.item()} - ) + # if WANDB_INSTALLED: + # wandb.log( + # {"Reconstruction loss": reconstruction_loss.item()} + # ) # Backward pass for the reconstruction loss optimizer.zero_grad() @@ -631,8 +624,8 @@ def train(self): loss = alpha * Ncuts + beta * reconstruction_loss epoch_loss += loss.item() - if WANDB_INSTALLED: - wandb.log({"Weighted sum of losses": loss.item()}) + # if WANDB_INSTALLED: + # wandb.log({"Weighted sum of losses": loss.item()}) loss.backward(loss) optimizer.step() @@ -818,9 +811,9 @@ def train(self): self.log(f"Saving new best model to {save_path}") torch.save(model.state_dict(), save_path) - if WANDB_INSTALLED: - # log validation dice score for each validation round - wandb.log({"val/dice_metric": metric}) + # if WANDB_INSTALLED: + # log validation dice score for each validation round + # wandb.log({"val/dice_metric": metric}) dec_out_val = ( val_decoder_outputs[0].detach().cpu().numpy() diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 8d570525..799ab3e0 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1402,7 +1402,7 @@ def _show_plot_max(self, plot, y): x_max, dice_max, c="r", - label="Max. Dice.", + label="Max. Dice", zorder=5, ) From 385552b0a23e8d38d4cde198d7e1a978ed6f50e1 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 13:40:07 +0200 Subject: [PATCH 37/70] UI/log tweaks --- napari_cellseg3d/code_models/worker_training.py | 2 -- napari_cellseg3d/interface.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 18231636..2a7341d0 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -880,10 +880,8 @@ def train(self): # "best_metric_epoch": best_dice_epoch, # } # ) - self.log("*" * 50) # Save the model - print( "Saving the model to: ", self.config.results_path_folder + "/wnet.pth", diff --git a/napari_cellseg3d/interface.py b/napari_cellseg3d/interface.py index 7d1ec7c5..4efd2269 100644 --- a/napari_cellseg3d/interface.py +++ b/napari_cellseg3d/interface.py @@ -493,7 +493,7 @@ def __init__( elif self._divide_factor == 10: self._value_label.setFixedWidth(30) else: - self._value_label.setFixedWidth(40) + self._value_label.setFixedWidth(60) self._value_label.setAlignment(Qt.AlignCenter) self._value_label.setSizePolicy( QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed From c54ee268b65c38e5b2de854e0b05ff933b92c55b Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 13:55:22 +0200 Subject: [PATCH 38/70] Functional WNet training --- napari_cellseg3d/code_models/worker_training.py | 7 ------- .../code_plugins/plugin_model_training.py | 12 ++++-------- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 2a7341d0..91aafa69 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -86,13 +86,6 @@ # https://www.pythoncentral.io/pysidepyqt-tutorial-creating-your-own-signals-and-slots/ # https://napari-staging-site.github.io/guides/stable/threading.html -# TODO list for WNet training : -# 1. Create a custom base worker for training to avoid code duplication -# 2. Create a custom worker for WNet training -# 3. Adapt UI for WNet training (Advanced tab + model choice on first tab) -# 4. Adapt plots and TrainingReport for WNet training -# 5. log_parameters function - class TrainingWorkerBase(GeneratorWorker): """A basic worker abstract class, to run training jobs in. diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 799ab3e0..cec77f76 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1295,13 +1295,9 @@ def _display_results(self, images_dict, complete_missing=False): self.result_layers[i].refresh() self.result_layers[i].reset_contrast_limits() - def on_yield(self, report: TrainingReport): # TODO refactor for dict - # logger.info( - # f"\nCatching results : for epoch {data['epoch']}, - # loss is {data['losses']} and validation is {data['val_metrics']}" - # ) + def on_yield(self, report: TrainingReport): if report == TrainingReport(): - return + return # skip empty reports if report.show_plot: try: @@ -1375,7 +1371,7 @@ def _make_csv(self): dice_metric = self.loss_1_values["Dice metric"] self.df = pd.DataFrame( { - "epoch": size_column, + "Epoch": size_column, "Ncuts loss": ncuts_loss, "Dice metric": dice_metric, "Reconstruction loss": self.loss_2_values, @@ -1384,7 +1380,7 @@ def _make_csv(self): except KeyError: self.df = pd.DataFrame( { - "epoch": size_column, + "Epoch": size_column, "Ncuts loss": ncuts_loss, "Reconstruction loss": self.loss_2_values, } From 1f7c9ede5bce875b4f40f19ccc9bd81b7cec64f5 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 14:26:38 +0200 Subject: [PATCH 39/70] Clean exit / free memory attempt --- .../code_models/worker_training.py | 49 +++++++++++++++++-- .../code_plugins/plugin_model_training.py | 22 ++------- 2 files changed, 50 insertions(+), 21 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 91aafa69..f797b952 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -629,6 +629,20 @@ def train(self): # or self.config.scheduler == "CyclicLR" # ): # scheduler.step() + if self._abort_requested: + dataloader = None + del dataloader + eval_dataloader = None + del eval_dataloader + model = None + del model + optimizer = None + del optimizer + criterionE = None + del criterionE + criterionW = None + del criterionW + torch.cuda.empty_cache() yield TrainingReport( show_plot=False, weights=model.state_dict() @@ -848,6 +862,21 @@ def train(self): # reset the status for next validation round dice_metric.reset() + if self._abort_requested: + dataloader = None + del dataloader + eval_dataloader = None + del eval_dataloader + model = None + del model + optimizer = None + del optimizer + criterionE = None + del criterionE + criterionW = None + del criterionW + torch.cuda.empty_cache() + eta = ( (time.time() - startTime) * (self.config.max_epochs / (epoch + 1) - 1) @@ -875,9 +904,8 @@ def train(self): # ) # Save the model - print( - "Saving the model to: ", - self.config.results_path_folder + "/wnet.pth", + self.log( + f"Saving the model to: {self.config.results_path_folder}/wnet.pth", ) torch.save( model.state_dict(), @@ -894,7 +922,20 @@ def train(self): # model_artifact.add_file(self.config.save_model_path) # wandb.log_artifact(model_artifact) - return ncuts_losses, rec_losses, model + # return ncuts_losses, rec_losses, model + dataloader = None + del dataloader + eval_dataloader = None + del eval_dataloader + model = None + del model + optimizer = None + del optimizer + criterionE = None + del criterionE + criterionW = None + del criterionW + torch.cuda.empty_cache() except Exception as e: msg = f"Training failed with exception: {e}" self.log(msg) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index cec77f76..9a7027ed 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -124,7 +124,7 @@ def __init__( self.worker_config = None self.data = None """Data dictionary containing file paths""" - self.stop_requested = False + self._stop_requested = False """Whether the worker should stop or not""" self.start_time = None """Start time of the latest job""" @@ -926,7 +926,7 @@ def start(self): """ self.start_time = utils.get_time_filepath() - if self.stop_requested: + if self._stop_requested: self.log.print_and_log("Worker is already stopping !") return @@ -987,7 +987,7 @@ def start(self): self.log.print_and_log( f"Stop requested at {utils.get_time()}. \nWaiting for next yielding step..." ) - self.stop_requested = True + self._stop_requested = True self.start_btn.setText("Stopping... Please wait") self.log.print_and_log("*" * 20) self.worker.quit() @@ -1230,23 +1230,11 @@ def on_finish(self): ) self.worker = None - # if zipfile.is_zipfile(self.results_path_folder+".zip"): - - # if not shutil.rmtree.avoids_symlink_attacks: - # raise RuntimeError("shutil.rmtree is not safe on this platform") - - # shutil.rmtree(self.results_path_folder) - - # self.results_path_folder = "" - - # self.clean_cache() # trying to fix memory leak def on_error(self): """Catches errored signal from worker""" self.log.print_and_log(f"WORKER ERRORED at {utils.get_time()}") self.worker = None - # self.empty_cuda_cache() - # self.clean_cache() def on_stop(self): self._remove_result_layers() @@ -1325,7 +1313,7 @@ def on_yield(self, report: TrainingReport): self.loss_1_values = report.loss_1_values self.loss_2_values = report.loss_2_values - if self.stop_requested: + if self._stop_requested: self.log.print_and_log( "Saving weights from aborted training in results folder" ) @@ -1338,7 +1326,7 @@ def on_yield(self, report: TrainingReport): ) self.log.print_and_log("Saving complete") self.on_stop() - self.stop_requested = False + self._stop_requested = False def _make_csv(self): size_column = range(1, self.worker_config.max_epochs + 1) From 7bb5edc04845997f9f72d3e3d88d9c14fb613978 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 15:25:52 +0200 Subject: [PATCH 40/70] Cleanup + tests - Removed previous train script - Fix tests - Enable test workflow on GH --- .github/workflows/test_and_deploy.yml | 1 + napari_cellseg3d/_tests/test_models.py | 2 +- .../_tests/test_unsup_training.py | 5 +- napari_cellseg3d/_tests/test_wnet_training.py | 25 - .../code_models/models/wnet/train_wnet.py | 992 ------------------ 5 files changed, 5 insertions(+), 1020 deletions(-) delete mode 100644 napari_cellseg3d/code_models/models/wnet/train_wnet.py diff --git a/.github/workflows/test_and_deploy.yml b/.github/workflows/test_and_deploy.yml index fafb1719..b6c9d848 100644 --- a/.github/workflows/test_and_deploy.yml +++ b/.github/workflows/test_and_deploy.yml @@ -7,6 +7,7 @@ on: push: branches: - main + - cy/wnet-train tags: - "v*" # Push events to matching v*, i.e. v1.0, v20.15.10 pull_request: diff --git a/napari_cellseg3d/_tests/test_models.py b/napari_cellseg3d/_tests/test_models.py index a9176fa4..89043ba9 100644 --- a/napari_cellseg3d/_tests/test_models.py +++ b/napari_cellseg3d/_tests/test_models.py @@ -115,7 +115,7 @@ def test_pretrained_weights_compatibility(): for model_name in MODEL_LIST: file_name = MODEL_LIST[model_name].weights_file WeightsDownloader().download_weights(model_name, file_name) - model = MODEL_LIST[model_name](input_img_size=[128, 128, 128]) + model = MODEL_LIST[model_name](input_img_size=[64, 64, 64]) try: model.load_state_dict( torch.load( diff --git a/napari_cellseg3d/_tests/test_unsup_training.py b/napari_cellseg3d/_tests/test_unsup_training.py index 3ebd4768..9b26167a 100644 --- a/napari_cellseg3d/_tests/test_unsup_training.py +++ b/napari_cellseg3d/_tests/test_unsup_training.py @@ -5,6 +5,7 @@ Trainer, ) + def test_unsupervised_worker(make_napari_viewer_proxy): im_path = Path(__file__).resolve().parent / "res/test.tif" # im_path_str = str(im_path) @@ -34,8 +35,8 @@ def test_unsupervised_worker(make_napari_viewer_proxy): assert eval_dataloader is None assert data_shape == (6, 6, 6) - widget.images_filepaths = [str(im_path.parent)] - widget.labels_filepaths = [str(im_path.parent)] + widget.images_filepaths = [str(im_path)] + widget.labels_filepaths = [str(im_path)] # widget.unsupervised_eval_data = widget.create_train_dataset_dict() worker = widget._create_worker(additional_results_description="TEST_3") dataloader, eval_dataloader, data_shape = worker._get_data() diff --git a/napari_cellseg3d/_tests/test_wnet_training.py b/napari_cellseg3d/_tests/test_wnet_training.py index afc71479..e69de29b 100644 --- a/napari_cellseg3d/_tests/test_wnet_training.py +++ b/napari_cellseg3d/_tests/test_wnet_training.py @@ -1,25 +0,0 @@ -####################################################### -# Disabled as it takes too much memory for GH actions # -####################################################### - - -# from pathlib import Path -# from napari_cellseg3d.code_models.models.wnet import train_wnet as t -# -# def test_wnet_training(): -# config = t.Config() -# -# config.batch_size = 1 -# config.num_epochs = 1 -# -# config.train_volume_directory = str(Path(__file__).resolve().parent / "res/wnet_test") -# config.eval_volume_directory = config.train_volume_directory -# config.save_every = 1 -# config.val_interval = 2 # skip validation -# config.save_model_path = config.train_volume_directory + "/test.pth" -# -# ncuts_loss, rec_loss, model = t.train(train_config=config) -# -# assert ncuts_loss is not None -# assert rec_loss is not None -# assert model is not None diff --git a/napari_cellseg3d/code_models/models/wnet/train_wnet.py b/napari_cellseg3d/code_models/models/wnet/train_wnet.py deleted file mode 100644 index d999fc17..00000000 --- a/napari_cellseg3d/code_models/models/wnet/train_wnet.py +++ /dev/null @@ -1,992 +0,0 @@ -# """ -# This file contains the code to train the WNet model. -# """ -# # import napari -# import glob -# import time -# from pathlib import Path -# from warnings import warn -# -# import numpy as np -# import tifffile as tiff -# import torch -# import torch.nn as nn -# -# # MONAI -# from monai.data import ( -# CacheDataset, -# DataLoader, -# PatchDataset, -# pad_list_data_collate, -# ) -# from monai.data.meta_obj import set_track_meta -# from monai.metrics import DiceMetric -# from monai.transforms import ( -# AsDiscrete, -# Compose, -# EnsureChannelFirst, -# EnsureChannelFirstd, -# EnsureTyped, -# LoadImaged, -# Orientationd, -# RandFlipd, -# RandRotate90d, -# RandShiftIntensityd, -# RandSpatialCropSamplesd, -# ScaleIntensityRanged, -# SpatialPadd, -# ToTensor, -# ) -# from monai.utils.misc import set_determinism -# -# # local -# from napari_cellseg3d.code_models.models.wnet.model import WNet -# from napari_cellseg3d.code_models.models.wnet.soft_Ncuts import SoftNCutsLoss -# from napari_cellseg3d.utils import LOGGER as logger -# from napari_cellseg3d.utils import dice_coeff, get_padding_dim, remap_image -# -# try: -# import wandb -# -# WANDB_INSTALLED = True -# except ImportError: -# warn( -# "wandb not installed, wandb config will not be taken into account", -# stacklevel=1, -# ) -# WANDB_INSTALLED = False -# -# __author__ = "Yves Paychère, Colin Hofmann, Cyril Achard" -# -# -# ########################## -# # Utils functions # -# ########################## -# -# -# # def create_dataset_dict(volume_directory, label_directory): -# # """Creates data dictionary for MONAI transforms and training.""" -# # images_filepaths = sorted( -# # [str(file) for file in Path(volume_directory).glob("*.tif")] -# # ) -# # -# # labels_filepaths = sorted( -# # [str(file) for file in Path(label_directory).glob("*.tif")] -# # ) -# # if len(images_filepaths) == 0 or len(labels_filepaths) == 0: -# # raise ValueError( -# # f"Data folders are empty \n{volume_directory} \n{label_directory}" -# # ) -# # -# # logger.info("Images :") -# # for file in images_filepaths: -# # logger.info(Path(file).stem) -# # logger.info("*" * 10) -# # logger.info("Labels :") -# # for file in labels_filepaths: -# # logger.info(Path(file).stem) -# # try: -# # data_dicts = [ -# # {"image": image_name, "label": label_name} -# # for image_name, label_name in zip( -# # images_filepaths, labels_filepaths -# # ) -# # ] -# # except ValueError as e: -# # raise ValueError( -# # f"Number of images and labels does not match : \n{volume_directory} \n{label_directory}" -# # ) from e -# # # print(f"Loaded eval image: {data_dicts}") -# # return data_dicts -# -# -# def create_dataset_dict_no_labs(volume_directory): -# """Creates unsupervised data dictionary for MONAI transforms and training.""" -# images_filepaths = sorted(glob.glob(str(Path(volume_directory) / "*.tif"))) -# if len(images_filepaths) == 0: -# raise ValueError(f"Data folder {volume_directory} is empty") -# -# logger.info("Images :") -# for file in images_filepaths: -# logger.info(Path(file).stem) -# logger.info("*" * 10) -# -# return [{"image": image_name} for image_name in images_filepaths] -# -# -# ################################ -# # WNet: Config & WANDB # -# ################################ -# -# -# class WNetTrainingWorkerConfig: -# def __init__(self): -# # WNet -# self.in_channels = 1 -# self.out_channels = 1 -# self.num_classes = 2 -# self.dropout = 0.65 -# self.use_clipping = False -# self.clipping = 1 -# -# self.lr = 1e-6 -# self.scheduler = "None" # "CosineAnnealingLR" # "ReduceLROnPlateau" -# self.weight_decay = 0.01 # None -# -# self.intensity_sigma = 1 -# self.spatial_sigma = 4 -# self.radius = 2 # yields to a radius depending on the data shape -# -# self.n_cuts_weight = 0.5 -# self.reconstruction_loss = "MSE" # "BCE" -# self.rec_loss_weight = 0.5 / 100 -# -# self.num_epochs = 100 -# self.val_interval = 5 -# self.batch_size = 2 -# -# # Data -# # self.train_volume_directory = "./../dataset/VIP_full" -# # self.eval_volume_directory = "./../dataset/VIP_cropped/eval/" -# self.normalize_input = True -# self.normalizing_function = remap_image # normalize_quantile -# # self.use_patch = False -# # self.patch_size = (64, 64, 64) -# # self.num_patches = 30 -# # self.eval_num_patches = 20 -# # self.do_augmentation = True -# # self.parallel = False -# -# # self.save_model = True -# self.save_model_path = ( -# r"./../results/new_model/wnet_new_model_all_data_3class.pth" -# ) -# # self.save_losses_path = ( -# # r"./../results/new_model/wnet_new_model_all_data_3class.pkl" -# # ) -# self.save_every = 5 -# self.weights_path = None -# -# -# c = WNetTrainingWorkerConfig() -# ############### -# # Scheduler config -# ############### -# schedulers = { -# "ReduceLROnPlateau": { -# "factor": 0.5, -# "patience": 50, -# }, -# "CosineAnnealingLR": { -# "T_max": 25000, -# "eta_min": 1e-8, -# }, -# "CosineAnnealingWarmRestarts": { -# "T_0": 50000, -# "eta_min": 1e-8, -# "T_mult": 1, -# }, -# "CyclicLR": { -# "base_lr": 2e-7, -# "max_lr": 2e-4, -# "step_size_up": 250, -# "mode": "triangular", -# }, -# } -# -# ############### -# # WANDB_CONFIG -# ############### -# WANDB_MODE = "disabled" -# # WANDB_MODE = "online" -# -# WANDB_CONFIG = { -# # data setting -# "num_workers": c.num_workers, -# "normalize": c.normalize_input, -# "use_patch": c.use_patch, -# "patch_size": c.patch_size, -# "num_patches": c.num_patches, -# "eval_num_patches": c.eval_num_patches, -# "do_augmentation": c.do_augmentation, -# "model_save_path": c.save_model_path, -# # train setting -# "batch_size": c.batch_size, -# "learning_rate": c.lr, -# "weight_decay": c.weight_decay, -# "scheduler": { -# "name": c.scheduler, -# "ReduceLROnPlateau_config": { -# "factor": schedulers["ReduceLROnPlateau"]["factor"], -# "patience": schedulers["ReduceLROnPlateau"]["patience"], -# }, -# "CosineAnnealingLR_config": { -# "T_max": schedulers["CosineAnnealingLR"]["T_max"], -# "eta_min": schedulers["CosineAnnealingLR"]["eta_min"], -# }, -# "CosineAnnealingWarmRestarts_config": { -# "T_0": schedulers["CosineAnnealingWarmRestarts"]["T_0"], -# "eta_min": schedulers["CosineAnnealingWarmRestarts"]["eta_min"], -# "T_mult": schedulers["CosineAnnealingWarmRestarts"]["T_mult"], -# }, -# "CyclicLR_config": { -# "base_lr": schedulers["CyclicLR"]["base_lr"], -# "max_lr": schedulers["CyclicLR"]["max_lr"], -# "step_size_up": schedulers["CyclicLR"]["step_size_up"], -# "mode": schedulers["CyclicLR"]["mode"], -# }, -# }, -# "max_epochs": c.num_epochs, -# "save_every": c.save_every, -# "val_interval": c.val_interval, -# # loss -# "reconstruction_loss": c.reconstruction_loss, -# "loss weights": { -# "n_cuts_weight": c.n_cuts_weight, -# "rec_loss_weight": c.rec_loss_weight, -# }, -# "loss_params": { -# "intensity_sigma": c.intensity_sigma, -# "spatial_sigma": c.spatial_sigma, -# "radius": c.radius, -# }, -# # model -# "model_type": "wnet", -# "model_params": { -# "in_channels": c.in_channels, -# "out_channels": c.out_channels, -# "num_classes": c.num_classes, -# "dropout": c.dropout, -# "use_clipping": c.use_clipping, -# "clipping_value": c.clipping, -# }, -# # CRF -# "crf_params": { -# "sa": c.sa, -# "sb": c.sb, -# "sg": c.sg, -# "w1": c.w1, -# "w2": c.w2, -# "n_iter": c.n_iter, -# }, -# } -# -# -# def train(weights_path=None, train_config=None): -# if train_config is None: -# config = WNetTrainingWorkerConfig() -# ############## -# # disable metadata tracking in MONAI -# set_track_meta(False) -# ############## -# if WANDB_INSTALLED: -# wandb.init( -# config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE -# ) -# -# set_determinism(seed=34936339) # use default seed from NP_MAX -# torch.use_deterministic_algorithms(True, warn_only=True) -# -# config = train_config -# normalize_function = config.normalizing_function -# CUDA = torch.cuda.is_available() -# device = torch.device("cuda" if CUDA else "cpu") -# -# print(f"Using device: {device}") -# -# print("Config:") -# [print(a) for a in config.__dict__.items()] -# -# print("Initializing training...") -# print("Getting the data") -# -# if config.use_patch: -# (data_shape, dataset) = get_patch_dataset(config) -# else: -# (data_shape, dataset) = get_dataset(config) -# transform = Compose( -# [ -# ToTensor(), -# EnsureChannelFirst(channel_dim=0), -# ] -# ) -# dataset = [transform(im) for im in dataset] -# for data in dataset: -# print(f"data shape: {data.shape}") -# break -# -# dataloader = DataLoader( -# dataset, -# batch_size=config.batch_size, -# shuffle=True, -# num_workers=config.num_workers, -# collate_fn=pad_list_data_collate, -# ) -# -# if config.eval_volume_directory is not None: -# # eval_dataset = get_patch_eval_dataset(config) -# eval_dataset = None -# -# eval_dataloader = DataLoader( -# eval_dataset, -# batch_size=config.batch_size, -# shuffle=False, -# num_workers=config.num_workers, -# collate_fn=pad_list_data_collate, -# ) -# -# dice_metric = DiceMetric( -# include_background=False, reduction="mean", get_not_nans=False -# ) -# ################################################### -# # Training the model # -# ################################################### -# print("Initializing the model:") -# -# print("- getting the model") -# # Initialize the model -# model = WNet( -# in_channels=config.in_channels, -# out_channels=config.out_channels, -# num_classes=config.num_classes, -# dropout=config.dropout, -# ) -# model = ( -# nn.DataParallel(model).cuda() if CUDA and config.parallel else model -# ) -# model.to(device) -# -# if config.use_clipping: -# for p in model.parameters(): -# p.register_hook( -# lambda grad: torch.clamp( -# grad, min=-config.clipping, max=config.clipping -# ) -# ) -# -# if WANDB_INSTALLED: -# wandb.watch(model, log_freq=100) -# -# if weights_path is not None: -# model.load_state_dict(torch.load(weights_path, map_location=device)) -# -# print("- getting the optimizers") -# # Initialize the optimizers -# if config.weight_decay is not None: -# decay = config.weight_decay -# optimizer = torch.optim.Adam( -# model.parameters(), lr=config.lr, weight_decay=decay -# ) -# else: -# optimizer = torch.optim.Adam(model.parameters(), lr=config.lr) -# -# print("- getting the loss functions") -# # Initialize the Ncuts loss function -# criterionE = SoftNCutsLoss( -# data_shape=data_shape, -# device=device, -# intensity_sigma=config.intensity_sigma, -# spatial_sigma=config.spatial_sigma, -# radius=config.radius, -# ) -# -# if config.reconstruction_loss == "MSE": -# criterionW = nn.MSELoss() -# elif config.reconstruction_loss == "BCE": -# criterionW = nn.BCELoss() -# else: -# raise ValueError( -# f"Unknown reconstruction loss : {config.reconstruction_loss} not supported" -# ) -# -# print("- getting the learning rate schedulers") -# # Initialize the learning rate schedulers -# scheduler = get_scheduler(config, optimizer) -# # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( -# # optimizer, mode="min", factor=0.5, patience=10, verbose=True -# # ) -# model.train() -# -# print("Ready") -# print("Training the model") -# print("*" * 50) -# -# startTime = time.time() -# ncuts_losses = [] -# rec_losses = [] -# total_losses = [] -# best_dice = -1 -# best_dice_epoch = -1 -# -# # Train the model -# for epoch in range(config.num_epochs): -# print(f"Epoch {epoch + 1} of {config.num_epochs}") -# -# epoch_ncuts_loss = 0 -# epoch_rec_loss = 0 -# epoch_loss = 0 -# -# for _i, batch in enumerate(dataloader): -# # raise NotImplementedError("testing") -# if config.use_patch: -# image = batch["image"].to(device) -# else: -# image = batch.to(device) -# if config.batch_size == 1: -# image = image.unsqueeze(0) -# else: -# image = image.unsqueeze(0) -# image = torch.swapaxes(image, 0, 1) -# -# # Forward pass -# enc = model.forward_encoder(image) -# # out = model.forward(image) -# -# # Compute the Ncuts loss -# Ncuts = criterionE(enc, image) -# epoch_ncuts_loss += Ncuts.item() -# if WANDB_INSTALLED: -# wandb.log({"Ncuts loss": Ncuts.item()}) -# -# # Forward pass -# enc, dec = model(image) -# -# # Compute the reconstruction loss -# if isinstance(criterionW, nn.MSELoss): -# reconstruction_loss = criterionW(dec, image) -# elif isinstance(criterionW, nn.BCELoss): -# reconstruction_loss = criterionW( -# torch.sigmoid(dec), -# remap_image(image, new_max=1), -# ) -# -# epoch_rec_loss += reconstruction_loss.item() -# if WANDB_INSTALLED: -# wandb.log({"Reconstruction loss": reconstruction_loss.item()}) -# -# # Backward pass for the reconstruction loss -# optimizer.zero_grad() -# alpha = config.n_cuts_weight -# beta = config.rec_loss_weight -# -# loss = alpha * Ncuts + beta * reconstruction_loss -# epoch_loss += loss.item() -# if WANDB_INSTALLED: -# wandb.log({"Sum of losses": loss.item()}) -# loss.backward(loss) -# optimizer.step() -# -# if config.scheduler == "CosineAnnealingWarmRestarts": -# scheduler.step(epoch + _i / len(dataloader)) -# if ( -# config.scheduler == "CosineAnnealingLR" -# or config.scheduler == "CyclicLR" -# ): -# scheduler.step() -# -# ncuts_losses.append(epoch_ncuts_loss / len(dataloader)) -# rec_losses.append(epoch_rec_loss / len(dataloader)) -# total_losses.append(epoch_loss / len(dataloader)) -# -# if WANDB_INSTALLED: -# wandb.log({"Ncuts loss_epoch": ncuts_losses[-1]}) -# wandb.log({"Reconstruction loss_epoch": rec_losses[-1]}) -# wandb.log({"Sum of losses_epoch": total_losses[-1]}) -# # wandb.log({"epoch": epoch}) -# # wandb.log({"learning_rate model": optimizerW.param_groups[0]["lr"]}) -# # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) -# wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) -# -# print("Ncuts loss: ", ncuts_losses[-1]) -# if epoch > 0: -# print( -# "Ncuts loss difference: ", -# ncuts_losses[-1] - ncuts_losses[-2], -# ) -# print("Reconstruction loss: ", rec_losses[-1]) -# if epoch > 0: -# print( -# "Reconstruction loss difference: ", -# rec_losses[-1] - rec_losses[-2], -# ) -# print("Sum of losses: ", total_losses[-1]) -# if epoch > 0: -# print( -# "Sum of losses difference: ", -# total_losses[-1] - total_losses[-2], -# ) -# -# # Update the learning rate -# if config.scheduler == "ReduceLROnPlateau": -# # schedulerE.step(epoch_ncuts_loss) -# # schedulerW.step(epoch_rec_loss) -# scheduler.step(epoch_rec_loss) -# if ( -# config.eval_volume_directory is not None -# and (epoch + 1) % config.val_interval == 0 -# ): -# model.eval() -# print("Validating...") -# with torch.no_grad(): -# for _k, val_data in enumerate(eval_dataloader): -# val_inputs, val_labels = ( -# val_data["image"].to(device), -# val_data["label"].to(device), -# ) -# -# # normalize val_inputs across channels -# if config.normalize_input: -# for i in range(val_inputs.shape[0]): -# for j in range(val_inputs.shape[1]): -# val_inputs[i][j] = normalize_function( -# val_inputs[i][j] -# ) -# -# val_outputs = model.forward_encoder(val_inputs) -# val_outputs = AsDiscrete(threshold=0.5)(val_outputs) -# -# # compute metric for current iteration -# for channel in range(val_outputs.shape[1]): -# max_dice_channel = torch.argmax( -# torch.Tensor( -# [ -# dice_coeff( -# y_pred=val_outputs[ -# :, -# channel : (channel + 1), -# :, -# :, -# :, -# ], -# y_true=val_labels, -# ) -# ] -# ) -# ) -# -# dice_metric( -# y_pred=val_outputs[ -# :, -# max_dice_channel : (max_dice_channel + 1), -# :, -# :, -# :, -# ], -# y=val_labels, -# ) -# # if plot_val_input: # only once -# # logged_image = val_inputs.detach().cpu().numpy() -# # logged_image = np.swapaxes(logged_image, 2, 4) -# # logged_image = logged_image[0, :, 32, :, :] -# # images = wandb.Image( -# # logged_image, caption="Validation input" -# # ) -# # -# # wandb.log({"val/input": images}) -# # plot_val_input = False -# -# # if k == 2 and (30 <= epoch <= 50 or epoch % 100 == 0): -# # logged_image = val_outputs.detach().cpu().numpy() -# # logged_image = np.swapaxes(logged_image, 2, 4) -# # logged_image = logged_image[ -# # 0, max_dice_channel, 32, :, : -# # ] -# # images = wandb.Image( -# # logged_image, caption="Validation output" -# # ) -# # -# # wandb.log({"val/output": images}) -# # dice_metric(y_pred=val_outputs[:, 2:, :,:,:], y=val_labels) -# # dice_metric(y_pred=val_outputs[:, 1:, :, :, :], y=val_labels) -# -# # import napari -# # view = napari.Viewer() -# # view.add_image(val_inputs.cpu().numpy(), name="input") -# # view.add_image(val_labels.cpu().numpy(), name="label") -# # vis_out = np.array( -# # [i.detach().cpu().numpy() for i in val_outputs], -# # dtype=np.float32, -# # ) -# # crf_out = np.array( -# # [i.detach().cpu().numpy() for i in crf_outputs], -# # dtype=np.float32, -# # ) -# # view.add_image(vis_out, name="output") -# # view.add_image(crf_out, name="crf_output") -# # napari.run() -# -# # aggregate the final mean dice result -# metric = dice_metric.aggregate().item() -# print("Validation Dice score: ", metric) -# if best_dice < metric < 2: -# best_dice = metric -# best_dice_epoch = epoch + 1 -# if config.save_model: -# save_best_path = Path(config.save_model_path).parents[ -# 0 -# ] -# save_best_path.mkdir(parents=True, exist_ok=True) -# save_best_name = Path(config.save_model_path).stem -# save_path = ( -# str(save_best_path / save_best_name) -# + "_best_metric.pth" -# ) -# print(f"Saving new best model to {save_path}") -# torch.save(model.state_dict(), save_path) -# -# if WANDB_INSTALLED: -# # log validation dice score for each validation round -# wandb.log({"val/dice_metric": metric}) -# -# # reset the status for next validation round -# dice_metric.reset() -# -# print( -# "ETA: ", -# (time.time() - startTime) -# * (config.num_epochs / (epoch + 1) - 1) -# / 60, -# "minutes", -# ) -# print("-" * 20) -# -# # Save the model -# if config.save_model and epoch % config.save_every == 0: -# torch.save(model.state_dict(), config.save_model_path) -# # with open(config.save_losses_path, "wb") as f: -# # pickle.dump((ncuts_losses, rec_losses), f) -# -# print("Training finished") -# print(f"Best dice metric : {best_dice}") -# if WANDB_INSTALLED and config.eval_volume_directory is not None: -# wandb.log( -# { -# "best_dice_metric": best_dice, -# "best_metric_epoch": best_dice_epoch, -# } -# ) -# print("*" * 50) -# -# # Save the model -# if config.save_model: -# print("Saving the model to: ", config.save_model_path) -# torch.save(model.state_dict(), config.save_model_path) -# # with open(config.save_losses_path, "wb") as f: -# # pickle.dump((ncuts_losses, rec_losses), f) -# if WANDB_INSTALLED: -# model_artifact = wandb.Artifact( -# "WNet", -# type="model", -# description="WNet benchmark", -# metadata=dict(WANDB_CONFIG), -# ) -# model_artifact.add_file(config.save_model_path) -# wandb.log_artifact(model_artifact) -# -# return ncuts_losses, rec_losses, model -# -# -# def get_dataset(config): -# """Creates a Dataset from the original data using the tifffile library -# -# Args: -# config (WNetTrainingWorkerConfig): The configuration object -# -# Returns: -# (tuple): A tuple containing the shape of the data and the dataset -# """ -# train_files = create_dataset_dict_no_labs( -# volume_directory=config.train_volume_directory -# ) -# train_files = [d.get("image") for d in train_files] -# # logger.debug(f"train_files: {train_files}") -# volumes = tiff.imread(train_files).astype(np.float32) -# volume_shape = volumes.shape -# # logger.debug(f"volume_shape: {volume_shape}") -# -# if len(volume_shape) == 3: -# volumes = np.expand_dims(volumes, axis=0) -# -# if config.normalize_input: -# volumes = np.array( -# [ -# # mad_normalization(volume) -# config.normalizing_function(volume) -# for volume in volumes -# ] -# ) -# # mean = volumes.mean(axis=0) -# # std = volumes.std(axis=0) -# # volumes = (volumes - mean) / std -# # print("NORMALIZED VOLUMES") -# # print(volumes.shape) -# # [print("MIN MAX", volume.flatten().min(), volume.flatten().max()) for volume in volumes] -# # print(volumes.mean(axis=0), volumes.std(axis=0)) -# -# dataset = CacheDataset(data=volumes) -# -# return (volume_shape, dataset) -# -# # train_files = create_dataset_dict_no_labs( -# # volume_directory=config.train_volume_directory -# # ) -# # train_files = [d.get("image") for d in train_files] -# # volumes = [] -# # for file in train_files: -# # image = tiff.imread(file).astype(np.float32) -# # image = np.expand_dims(image, axis=0) # add channel dimension -# # volumes.append(image) -# # # volumes = tiff.imread(train_files).astype(np.float32) -# # volume_shape = volumes[0].shape -# # # print(volume_shape) -# # -# # if config.do_augmentation: -# # augmentation = Compose( -# # [ -# # ScaleIntensityRange( -# # a_min=0, -# # a_max=2000, -# # b_min=0.0, -# # b_max=1.0, -# # clip=True, -# # ), -# # RandShiftIntensity(offsets=0.1, prob=0.5), -# # RandFlip(spatial_axis=[1], prob=0.5), -# # RandFlip(spatial_axis=[2], prob=0.5), -# # RandRotate90(prob=0.1, max_k=3), -# # ] -# # ) -# # else: -# # augmentation = None -# # -# # dataset = CacheDataset(data=np.array(volumes), transform=augmentation) -# # -# # return (volume_shape, dataset) -# -# -# def get_patch_dataset(config): -# """Creates a Dataset from the original data using the tifffile library -# -# Args: -# config (WNetTrainingWorkerConfig): The configuration object -# -# Returns: -# (tuple): A tuple containing the shape of the data and the dataset -# """ -# -# train_files = create_dataset_dict_no_labs( -# volume_directory=config.train_volume_directory -# ) -# -# patch_func = Compose( -# [ -# LoadImaged(keys=["image"], image_only=True), -# EnsureChannelFirstd(keys=["image"], channel_dim="no_channel"), -# RandSpatialCropSamplesd( -# keys=["image"], -# roi_size=( -# config.patch_size -# ), # multiply by axis_stretch_factor if anisotropy -# # max_roi_size=(120, 120, 120), -# random_size=False, -# num_samples=config.num_patches, -# ), -# Orientationd(keys=["image"], axcodes="PLI"), -# SpatialPadd( -# keys=["image"], -# spatial_size=(get_padding_dim(config.patch_size)), -# ), -# EnsureTyped(keys=["image"]), -# ] -# ) -# -# train_transforms = Compose( -# [ -# ScaleIntensityRanged( -# keys=["image"], -# a_min=0, -# a_max=2000, -# b_min=0.0, -# b_max=1.0, -# clip=True, -# ), -# RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), -# RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), -# RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), -# RandRotate90d(keys=["image"], prob=0.1, max_k=3), -# EnsureTyped(keys=["image"]), -# ] -# ) -# -# dataset = PatchDataset( -# data=train_files, -# samples_per_image=config.num_patches, -# patch_func=patch_func, -# transform=train_transforms, -# ) -# -# return config.patch_size, dataset -# -# -# # def get_patch_eval_dataset(config): -# # eval_files = create_dataset_dict( -# # volume_directory=config.eval_volume_directory + "/vol", -# # label_directory=config.eval_volume_directory + "/lab", -# # ) -# # -# # patch_func = Compose( -# # [ -# # LoadImaged(keys=["image", "label"], image_only=True), -# # EnsureChannelFirstd( -# # keys=["image", "label"], channel_dim="no_channel" -# # ), -# # # NormalizeIntensityd(keys=["image"]) if config.normalize_input else lambda x: x, -# # RandSpatialCropSamplesd( -# # keys=["image", "label"], -# # roi_size=( -# # config.patch_size -# # ), # multiply by axis_stretch_factor if anisotropy -# # # max_roi_size=(120, 120, 120), -# # random_size=False, -# # num_samples=config.eval_num_patches, -# # ), -# # Orientationd(keys=["image", "label"], axcodes="PLI"), -# # SpatialPadd( -# # keys=["image", "label"], -# # spatial_size=(get_padding_dim(config.patch_size)), -# # ), -# # EnsureTyped(keys=["image", "label"]), -# # ] -# # ) -# # -# # eval_transforms = Compose( -# # [ -# # EnsureTyped(keys=["image", "label"]), -# # ] -# # ) -# # -# # return PatchDataset( -# # data=eval_files, -# # samples_per_image=config.eval_num_patches, -# # patch_func=patch_func, -# # transform=eval_transforms, -# # ) -# -# -# def get_dataset_monai(config): -# """Creates a Dataset applying some transforms/augmentation on the data using the MONAI library -# -# Args: -# config (WNetTrainingWorkerConfig): The configuration object -# -# Returns: -# (tuple): A tuple containing the shape of the data and the dataset -# """ -# train_files = create_dataset_dict_no_labs( -# volume_directory=config.train_volume_directory -# ) -# # print(train_files) -# # print(len(train_files)) -# # print(train_files[0]) -# first_volume = LoadImaged(keys=["image"])(train_files[0]) -# first_volume_shape = first_volume["image"].shape -# -# # Transforms to be applied to each volume -# load_single_images = Compose( -# [ -# LoadImaged(keys=["image"]), -# EnsureChannelFirstd(keys=["image"]), -# Orientationd(keys=["image"], axcodes="PLI"), -# SpatialPadd( -# keys=["image"], -# spatial_size=(get_padding_dim(first_volume_shape)), -# ), -# EnsureTyped(keys=["image"]), -# ] -# ) -# -# if config.do_augmentation: -# train_transforms = Compose( -# [ -# ScaleIntensityRanged( -# keys=["image"], -# a_min=0, -# a_max=2000, -# b_min=0.0, -# b_max=1.0, -# clip=True, -# ), -# RandShiftIntensityd(keys=["image"], offsets=0.1, prob=0.5), -# RandFlipd(keys=["image"], spatial_axis=[1], prob=0.5), -# RandFlipd(keys=["image"], spatial_axis=[2], prob=0.5), -# RandRotate90d(keys=["image"], prob=0.1, max_k=3), -# EnsureTyped(keys=["image"]), -# ] -# ) -# else: -# train_transforms = EnsureTyped(keys=["image"]) -# -# # Create the dataset -# dataset = CacheDataset( -# data=train_files, -# transform=Compose(load_single_images, train_transforms), -# ) -# -# return first_volume_shape, dataset -# -# -# def get_scheduler(config, optimizer, verbose=False): -# scheduler_name = config.scheduler -# if scheduler_name == "None": -# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( -# optimizer, -# T_max=100, -# eta_min=config.lr - 1e-6, -# verbose=verbose, -# ) -# -# elif scheduler_name == "ReduceLROnPlateau": -# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( -# optimizer, -# mode="min", -# factor=schedulers["ReduceLROnPlateau"]["factor"], -# patience=schedulers["ReduceLROnPlateau"]["patience"], -# verbose=verbose, -# ) -# elif scheduler_name == "CosineAnnealingLR": -# scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( -# optimizer, -# T_max=schedulers["CosineAnnealingLR"]["T_max"], -# eta_min=schedulers["CosineAnnealingLR"]["eta_min"], -# verbose=verbose, -# ) -# elif scheduler_name == "CosineAnnealingWarmRestarts": -# scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts( -# optimizer, -# T_0=schedulers["CosineAnnealingWarmRestarts"]["T_0"], -# eta_min=schedulers["CosineAnnealingWarmRestarts"]["eta_min"], -# T_mult=schedulers["CosineAnnealingWarmRestarts"]["T_mult"], -# verbose=verbose, -# ) -# elif scheduler_name == "CyclicLR": -# scheduler = torch.optim.lr_scheduler.CyclicLR( -# optimizer, -# base_lr=schedulers["CyclicLR"]["base_lr"], -# max_lr=schedulers["CyclicLR"]["max_lr"], -# step_size_up=schedulers["CyclicLR"]["step_size_up"], -# mode=schedulers["CyclicLR"]["mode"], -# cycle_momentum=False, -# ) -# else: -# raise ValueError(f"Scheduler {scheduler_name} not provided") -# return scheduler -# -# -# if __name__ == "__main__": -# weights_location = str( -# # Path(__file__).resolve().parent / "../weights/wnet.pth" -# # "../wnet_SUM_MSE_DAPI_rad2_best_metric.pth" -# ) -# train( -# # weights_location -# ) From 01938fb8fdd45c8b5d6c5fe6ba8ea13cc87d6116 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 16:11:45 +0200 Subject: [PATCH 41/70] Deploy memory usage fix in inference as well --- napari_cellseg3d/code_models/worker_inference.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/napari_cellseg3d/code_models/worker_inference.py b/napari_cellseg3d/code_models/worker_inference.py index ceedac53..65623b36 100644 --- a/napari_cellseg3d/code_models/worker_inference.py +++ b/napari_cellseg3d/code_models/worker_inference.py @@ -784,6 +784,12 @@ def inference(self): ) model.to("cpu") + model = None + del model + inference_loader = None + del inference_loader + if torch.cuda.is_available(): + torch.cuda.empty_cache() # self.quit() except Exception as e: logger.exception(e) From 133b8fc9975ade291864e6cc21918854643123dd Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 16:13:03 +0200 Subject: [PATCH 42/70] Memory usage fix --- napari_cellseg3d/code_models/worker_training.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index f797b952..b2c1b264 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -1633,6 +1633,14 @@ def get_loader_func(num_samples): self.log("Saving complete, exiting") model.to("cpu") # clear (V)RAM + model = None + del model + val_loader = None + del val_loader + train_loader = None + del train_loader + if torch.cuda.is_available(): + torch.cuda.empty_cache() # val_ds = None # train_ds = None # val_loader = None From 9b99c11fc272640bdb71f36b1306a83985db807c Mon Sep 17 00:00:00 2001 From: C-Achard Date: Mon, 31 Jul 2023 16:22:16 +0200 Subject: [PATCH 43/70] UI tweak --- napari_cellseg3d/interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/napari_cellseg3d/interface.py b/napari_cellseg3d/interface.py index 4efd2269..d5778442 100644 --- a/napari_cellseg3d/interface.py +++ b/napari_cellseg3d/interface.py @@ -493,7 +493,7 @@ def __init__( elif self._divide_factor == 10: self._value_label.setFixedWidth(30) else: - self._value_label.setFixedWidth(60) + self._value_label.setFixedWidth(50) self._value_label.setAlignment(Qt.AlignCenter) self._value_label.setSizePolicy( QSizePolicy.Policy.Fixed, QSizePolicy.Policy.Fixed From d3414e83871afe86953d8fa6572c592d89a40451 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 10:58:18 +0200 Subject: [PATCH 44/70] WNet cleanup + supervised training improvements --- .../code_models/worker_training.py | 234 +++++++----------- .../code_plugins/plugin_model_training.py | 8 +- 2 files changed, 90 insertions(+), 152 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index b2c1b264..f9612377 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -32,7 +32,6 @@ EnsureType, EnsureTyped, LoadImaged, - # NormalizeIntensityd, Orientationd, Rand3DElasticd, RandAffined, @@ -57,7 +56,6 @@ LogSignal, QuantileNormalizationd, RemapTensor, - # RemapTensord, Threshold, TrainingReport, WeightsDownloader, @@ -238,12 +236,6 @@ def get_dataset(self, train_transforms): Returns: (tuple): A tuple containing the shape of the data and the dataset """ - # train_files = self.create_dataset_dict_no_labs( - # volume_directory=self.config.train_volume_directory - # ) - # self.log(train_files) - # self.log(len(train_files)) - # self.log(train_files[0]) train_files = self.config.train_data_dict first_volume = LoadImaged(keys=["image"])(train_files[0]) @@ -272,52 +264,6 @@ def get_dataset(self, train_transforms): return first_volume_shape, dataset - # def get_scheduler(self, optimizer, verbose=False): - # scheduler_name = self.config.scheduler - # if scheduler_name == "None": - # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( - # optimizer, - # T_max=100, - # eta_min=config.lr - 1e-6, - # verbose=verbose, - # ) - # - # elif scheduler_name == "ReduceLROnPlateau": - # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - # optimizer, - # mode="min", - # factor=schedulers["ReduceLROnPlateau"]["factor"], - # patience=schedulers["ReduceLROnPlateau"]["patience"], - # verbose=verbose, - # ) - # elif scheduler_name == "CosineAnnealingLR": - # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR( - # optimizer, - # T_max=schedulers["CosineAnnealingLR"]["T_max"], - # eta_min=schedulers["CosineAnnealingLR"]["eta_min"], - # verbose=verbose, - # ) - # elif scheduler_name == "CosineAnnealingWarmRestarts": - # scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts( - # optimizer, - # T_0=schedulers["CosineAnnealingWarmRestarts"]["T_0"], - # eta_min=schedulers["CosineAnnealingWarmRestarts"]["eta_min"], - # T_mult=schedulers["CosineAnnealingWarmRestarts"]["T_mult"], - # verbose=verbose, - # ) - # elif scheduler_name == "CyclicLR": - # scheduler = torch.optim.lr_scheduler.CyclicLR( - # optimizer, - # base_lr=schedulers["CyclicLR"]["base_lr"], - # max_lr=schedulers["CyclicLR"]["max_lr"], - # step_size_up=schedulers["CyclicLR"]["step_size_up"], - # mode=schedulers["CyclicLR"]["mode"], - # cycle_momentum=False, - # ) - # else: - # raise ValueError(f"Scheduler {scheduler_name} not provided") - # return scheduler - def _get_data(self): if self.config.do_augmentation: train_transforms = Compose( @@ -346,16 +292,7 @@ def _get_data(self): else: self.log("Loading volume dataset") (data_shape, dataset) = self.get_dataset(train_transforms) - # transform = Compose( - # [ - # ToTensor(), - # EnsureChannelFirst(channel_dim=0), - # ] - # ) - # dataset = [transform(im) for im in dataset] - # for data in dataset: - # self.log(f"Data shape: {data.shape}") - # break + logger.debug(f"Data shape : {data_shape}") dataloader = DataLoader( dataset, @@ -438,18 +375,15 @@ def train(self): # config=WANDB_CONFIG, project="WNet-benchmark", mode=WANDB_MODE # ) - set_determinism( - seed=self.config.deterministic_config.seed - ) # use default seed from NP_MAX + set_determinism(seed=self.config.deterministic_config.seed) torch.use_deterministic_algorithms(True, warn_only=True) normalize_function = utils.remap_image device = self.config.device - # self.log(f"Using device: {device}") self.log_parameters() self.log("Initializing training...") - self.log("Getting the data") + self.log("- Getting the data") dataloader, eval_dataloader, data_shape = self._get_data() @@ -459,7 +393,6 @@ def train(self): ################################################### # Training the model # ################################################### - self.log("Initializing the model:") self.log("- Getting the model") # Initialize the model model = WNet( @@ -545,12 +478,6 @@ def train(self): f"Unknown reconstruction loss : {self.config.reconstruction_loss} not supported" ) - # self.log("- getting the learning rate schedulers") - # Initialize the learning rate schedulers - # scheduler = get_scheduler(self.config, optimizer) - # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - # optimizer, mode="min", factor=0.5, patience=10, verbose=True - # ) model.train() self.log("Ready") @@ -574,34 +501,31 @@ def train(self): for _i, batch in enumerate(dataloader): # raise NotImplementedError("testing") - image = batch["image"].to(device) - for i in range(image.shape[0]): - for j in range(image.shape[1]): - image[i, j] = normalize_function(image[i, j]) - # if self.config.batch_size == 1: - # image = image.unsqueeze(0) - # else: - # image = image.unsqueeze(0) - # image = torch.swapaxes(image, 0, 1) + image_batch = batch["image"].to(device) + # Normalize the image + for i in range(image_batch.shape[0]): + for j in range(image_batch.shape[1]): + image_batch[i, j] = normalize_function( + image_batch[i, j] + ) # Forward pass - enc = model.forward_encoder(image) + enc = model.forward_encoder(image_batch) # Compute the Ncuts loss - Ncuts = criterionE(enc, image) + Ncuts = criterionE(enc, image_batch) epoch_ncuts_loss += Ncuts.item() # if WANDB_INSTALLED: # wandb.log({"Ncuts loss": Ncuts.item()}) - # Forward pass - enc, dec = model(image) + dec = model.forward_decoder(enc) # Compute the reconstruction loss if isinstance(criterionW, nn.MSELoss): - reconstruction_loss = criterionW(dec, image) + reconstruction_loss = criterionW(dec, image_batch) elif isinstance(criterionW, nn.BCELoss): reconstruction_loss = criterionW( torch.sigmoid(dec), - utils.remap_image(image, new_max=1), + utils.remap_image(image_batch, new_max=1), ) epoch_rec_loss += reconstruction_loss.item() @@ -622,13 +546,6 @@ def train(self): loss.backward(loss) optimizer.step() - # if self.config.scheduler == "CosineAnnealingWarmRestarts": - # scheduler.step(epoch + _i / len(dataloader)) - # if ( - # self.config.scheduler == "CosineAnnealingLR" - # or self.config.scheduler == "CyclicLR" - # ): - # scheduler.step() if self._abort_requested: dataloader = None del dataloader @@ -656,7 +573,7 @@ def train(self): try: enc_out = enc[0].detach().cpu().numpy() dec_out = dec[0].detach().cpu().numpy() - image = image[0].detach().cpu().numpy() + image_batch = image_batch[0].detach().cpu().numpy() images_dict = { "Encoder output": { @@ -674,7 +591,7 @@ def train(self): "cmap": "gist_earth", }, "Input image": { - "data": np.squeeze(image), + "data": np.squeeze(image_batch), "cmap": "inferno", }, } @@ -713,11 +630,6 @@ def train(self): f"Weighted sum of losses difference: {total_losses[-1] - total_losses[-2]:.5f}" ) - # Update the learning rate - # if self.config.scheduler == "ReduceLROnPlateau": - # # schedulerE.step(epoch_ncuts_loss) - # # schedulerW.step(epoch_rec_loss) - # scheduler.step(epoch_rec_loss) if ( eval_dataloader is not None and (epoch + 1) % self.config.validation_interval == 0 @@ -774,6 +686,7 @@ def train(self): ) dices = [] + # Find in which channel the labels are (avoid background) for channel in range(val_outputs.shape[1]): dices.append( utils.dice_coeff( @@ -1020,19 +933,19 @@ def log_parameters(self): f"Percentage of dataset used for validation : {self.config.validation_percent * 100}%" ) - self.log("-" * 10) + # self.log("-" * 10) self.log("Training files :\n") [ - self.log(f"{Path(train_file['image']).name}\n") + self.log(f"- {Path(train_file['image']).name}\n") for train_file in self.train_files ] - self.log("-" * 10) + # self.log("-" * 10) self.log("Validation files :\n") [ - self.log(f"{Path(val_file['image']).name}\n") + self.log(f"- {Path(val_file['image']).name}\n") for val_file in self.val_files ] - self.log("-" * 10) + # self.log("-" * 10) if self.config.deterministic_config.enabled: self.log("Deterministic training is enabled") @@ -1067,7 +980,7 @@ def log_parameters(self): ) # self.log("\n") - self.log("-" * 20) + # self.log("-" * 20) def train(self): """Trains the PyTorch model for the given number of epochs, with the selected model and data, @@ -1147,7 +1060,8 @@ def train(self): PADDING = utils.get_padding_dim(size) model = model_class(input_img_size=PADDING, use_checkpoint=True) - model = model.to(self.config.device) + device = torch.device(self.config.device) + model = model.to(device) epoch_loss_values = [] val_metric_values = [] @@ -1204,9 +1118,9 @@ def train(self): RandFlipd(keys=["image", "label"]), RandRotate90d(keys=["image", "label"]), RandAffined( - keys=["image", "label"], + keys=["image"], ), - EnsureTyped(keys=["image", "label"]), + EnsureTyped(keys=["image"]), ] ) ) @@ -1215,19 +1129,15 @@ def train(self): val_transforms = Compose( [ - # LoadImaged(keys=["image", "label"]), - # EnsureChannelFirstd(keys=["image", "label"]), EnsureTyped(keys=["image", "label"]), ] ) - # self.log("Loading dataset...\n") - def get_loader_func(num_samples): + def get_patch_loader_func(num_samples): return Compose( [ LoadImaged(keys=["image", "label"]), EnsureChannelFirstd(keys=["image", "label"]), - QuantileNormalizationd(keys=["image"]), RandSpatialCropSamplesd( keys=["image", "label"], roi_size=( @@ -1244,7 +1154,8 @@ def get_loader_func(num_samples): utils.get_padding_dim(self.config.sample_size) ), ), - EnsureTyped(keys=["image", "label"]), + QuantileNormalizationd(keys=["image"]), + EnsureTyped(keys=["image"]), ] ) @@ -1260,15 +1171,30 @@ def get_loader_func(num_samples): self.config.num_samples * (1 - self.config.validation_percent) ) - sample_loader_train = get_loader_func(num_train_samples) - sample_loader_eval = get_loader_func(num_val_samples) + if num_train_samples < 2: + self.log( + "WARNING : not enough samples for training. Raising to 2" + ) + num_train_samples = 2 + if num_val_samples < 2: + self.log( + "WARNING : not enough samples for validation. Raising to 2" + ) + num_val_samples = 2 + + sample_loader_train = get_patch_loader_func( + num_train_samples + ) + sample_loader_eval = get_patch_loader_func(num_val_samples) else: num_train_samples = ( num_val_samples ) = self.config.num_samples - sample_loader_train = get_loader_func(num_train_samples) - sample_loader_eval = get_loader_func(num_val_samples) + sample_loader_train = get_patch_loader_func( + num_train_samples + ) + sample_loader_eval = get_patch_loader_func(num_val_samples) logger.debug(f"AMOUNT of train samples : {num_train_samples}") logger.debug( @@ -1276,20 +1202,19 @@ def get_loader_func(num_samples): ) logger.debug("train_ds") - train_ds = PatchDataset( + train_dataset = PatchDataset( data=self.train_files, transform=train_transforms, patch_func=sample_loader_train, samples_per_image=num_train_samples, ) logger.debug("val_ds") - val_ds = PatchDataset( + validation_dataset = PatchDataset( data=self.val_files, transform=val_transforms, patch_func=sample_loader_eval, samples_per_image=num_val_samples, ) - else: load_whole_images = Compose( [ @@ -1309,25 +1234,27 @@ def get_loader_func(num_samples): ] ) logger.debug("Cache dataset : train") - train_ds = CacheDataset( + train_dataset = CacheDataset( data=self.train_files, transform=Compose(load_whole_images, train_transforms), ) logger.debug("Cache dataset : val") - val_ds = CacheDataset( + validation_dataset = CacheDataset( data=self.val_files, transform=load_whole_images ) logger.debug("Dataloader") train_loader = DataLoader( - train_ds, + train_dataset, batch_size=self.config.batch_size, shuffle=True, num_workers=2, collate_fn=pad_list_data_collate, ) - val_loader = DataLoader( - val_ds, batch_size=self.config.batch_size, num_workers=2 + validation_loader = DataLoader( + validation_dataset, + batch_size=self.config.batch_size, + num_workers=2, ) logger.info("\nDone") @@ -1372,7 +1299,7 @@ def get_loader_func(num_samples): model.load_state_dict( torch.load( weights, - map_location=self.config.device, + map_location=device, ), strict=True, ) @@ -1396,7 +1323,7 @@ def get_loader_func(num_samples): self.log_parameters() - device = torch.device(self.config.device) + # device = torch.device(self.config.device) self.set_loss_from_config() # if model_name == "test": @@ -1427,7 +1354,8 @@ def get_loader_func(num_samples): batch_data["image"].to(device), batch_data["label"].to(device), ) - + # logger.debug(f"Inputs shape : {inputs.shape}") + # logger.debug(f"Labels shape : {labels.shape}") optimizer.zero_grad() outputs = model(inputs) # self.log(f"Output dimensions : {outputs.shape}") @@ -1437,14 +1365,31 @@ def get_loader_func(num_samples): ] # TODO(cyril): adapt if additional channels if len(outputs.shape) < 4: outputs = outputs.unsqueeze(0) + # logger.debug(f"Outputs shape : {outputs.shape}") loss = self.loss_function(outputs, labels) loss.backward() optimizer.step() epoch_loss += loss.detach().item() self.log( - f"* {step}/{len(train_ds) // train_loader.batch_size}, " + f"* {step}/{len(train_dataset) // train_loader.batch_size}, " f"Train loss: {loss.detach().item():.4f}" ) + + if self._abort_requested: + self.log("Aborting training...") + model = None + del model + train_loader = None + del train_loader + validation_loader = None + del validation_loader + optimizer = None + del optimizer + scheduler = None + del scheduler + if device.type == "cuda": + torch.cuda.empty_cache() + yield TrainingReport( show_plot=False, weights=model.state_dict() ) @@ -1476,7 +1421,7 @@ def get_loader_func(num_samples): model.eval() self.log("Performing validation...") with torch.no_grad(): - for val_data in val_loader: + for val_data in validation_loader: val_inputs, val_labels = ( val_data["image"].to(device), val_data["label"].to(device), @@ -1635,17 +1580,16 @@ def get_loader_func(num_samples): # clear (V)RAM model = None del model - val_loader = None - del val_loader train_loader = None del train_loader - if torch.cuda.is_available(): + validation_loader = None + del validation_loader + optimizer = None + del optimizer + scheduler = None + del scheduler + if device.type == "cuda": torch.cuda.empty_cache() - # val_ds = None - # train_ds = None - # val_loader = None - # train_loader = None - # torch.cuda.empty_cache() except Exception as e: self.raise_error(e, "Error in training") diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 9a7027ed..568089f6 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -976,10 +976,8 @@ def start(self): self.worker.warn_signal.connect(self.log.warn) self.worker.started.connect(self.on_start) - self.worker.yielded.connect(partial(self.on_yield)) self.worker.finished.connect(self.on_finish) - self.worker.errored.connect(self.on_error) if self.worker.is_running: @@ -1218,17 +1216,12 @@ def on_finish(self): self.start_btn.setText("Start") [btn.setVisible(True) for btn in self.close_buttons] - # del self.worker - - # self.empty_cuda_cache() - if self.config.save_as_zip: shutil.make_archive( self.worker_config.results_path_folder, "zip", self.worker_config.results_path_folder, ) - self.worker = None def on_error(self): @@ -1239,6 +1232,7 @@ def on_error(self): def on_stop(self): self._remove_result_layers() self.worker = None + self._stop_requested = False self.start_btn.setText("Start") [btn.setVisible(True) for btn in self.close_buttons] From 53dabb554ac46570e3e08c6e1e23be2b843f7379 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 11:12:55 +0200 Subject: [PATCH 45/70] Change Dice metric include_background for WNet To avoid Max Dice calculation --- .../code_models/models/wnet/model.py | 2 +- .../code_models/worker_training.py | 83 +++++++++++-------- 2 files changed, 50 insertions(+), 35 deletions(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index 989ae3b7..28643588 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -62,7 +62,7 @@ def __init__( ) def forward(self, x): - """Forward pass of the W-Net model.""" + """Forward pass of the W-Net model. Returns the segmentation and the reconstructed image.""" enc = self.forward_encoder(x) return enc, self.forward_decoder(enc) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index f9612377..86c0bb78 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -287,10 +287,10 @@ def _get_data(self): train_transforms = EnsureTyped(keys=["image"]) if self.config.sampling: - self.log("Loading patch dataset") + logger.debug("Loading patch dataset") (data_shape, dataset) = self.get_patch_dataset(train_transforms) else: - self.log("Loading volume dataset") + logger.debug("Loading volume dataset") (data_shape, dataset) = self.get_dataset(train_transforms) logger.debug(f"Data shape : {data_shape}") @@ -388,7 +388,7 @@ def train(self): dataloader, eval_dataloader, data_shape = self._get_data() dice_metric = DiceMetric( - include_background=False, reduction="mean", get_not_nans=False + include_background=True, reduction="mean", get_not_nans=False ) ################################################### # Training the model # @@ -510,15 +510,13 @@ def train(self): ) # Forward pass - enc = model.forward_encoder(image_batch) + enc, dec = model(image_batch) # Compute the Ncuts loss Ncuts = criterionE(enc, image_batch) epoch_ncuts_loss += Ncuts.item() # if WANDB_INSTALLED: # wandb.log({"Ncuts loss": Ncuts.item()}) - dec = model.forward_decoder(enc) - # Compute the reconstruction loss if isinstance(criterionW, nn.MSELoss): reconstruction_loss = criterionW(dec, image_batch) @@ -685,32 +683,33 @@ def train(self): f"Val decoder outputs shape: {val_decoder_outputs.shape}" ) - dices = [] + # dices = [] # Find in which channel the labels are (avoid background) - for channel in range(val_outputs.shape[1]): - dices.append( - utils.dice_coeff( - y_pred=val_outputs[ - 0, channel : (channel + 1), :, :, : - ], - y_true=val_labels[0], - ) - ) - logger.debug(f"DICE COEFF: {dices}") - max_dice_channel = torch.argmax( - torch.Tensor(dices) - ) - logger.debug( - f"MAX DICE CHANNEL: {max_dice_channel}" - ) + # for channel in range(val_outputs.shape[1]): + # dices.append( + # utils.dice_coeff( + # y_pred=val_outputs[ + # 0, channel : (channel + 1), :, :, : + # ], + # y_true=val_labels[0], + # ) + # ) + # logger.debug(f"DICE COEFF: {dices}") + # max_dice_channel = torch.argmax( + # torch.Tensor(dices) + # ) + # logger.debug( + # f"MAX DICE CHANNEL: {max_dice_channel}" + # ) dice_metric( - y_pred=val_outputs[ - :, - max_dice_channel : (max_dice_channel + 1), - :, - :, - :, - ], + y_pred=val_outputs, + # [ + # :, + # max_dice_channel : (max_dice_channel + 1), + # :, + # :, + # :, + # ], y=val_labels, ) @@ -736,11 +735,19 @@ def train(self): # wandb.log({"val/dice_metric": metric}) dec_out_val = ( - val_decoder_outputs[0].detach().cpu().numpy() + val_decoder_outputs[0] + .detach() + .cpu() + .numpy() + .copy() + ) + enc_out_val = ( + val_outputs[0].detach().cpu().numpy().copy() + ) + lab_out_val = ( + val_labels[0].detach().cpu().numpy().copy() ) - enc_out_val = val_outputs[0].detach().cpu().numpy() - lab_out_val = val_labels[0].detach().cpu().numpy() - val_in = val_inputs[0].detach().cpu().numpy() + val_in = val_inputs[0].detach().cpu().numpy().copy() display_dict = { "Reconstruction": { @@ -760,6 +767,14 @@ def train(self): "cmap": "bop blue", }, } + val_decoder_outputs = None + del val_decoder_outputs + val_outputs = None + del val_outputs + val_labels = None + del val_labels + val_inputs = None + del val_inputs yield TrainingReport( epoch=epoch, From 1b12c4a671e29515d8e56b657e3ec473e04fb4f6 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 11:18:25 +0200 Subject: [PATCH 46/70] Set better default LR across un/supervised --- napari_cellseg3d/code_plugins/plugin_model_training.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 568089f6..1db818e6 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -443,11 +443,15 @@ def _toggle_unsupervised_mode(self, enabled=False): self.start_btn = self.start_button_unsupervised self.image_filewidget.text_field.setText("Validation images") self.labels_filewidget.text_field.setText("Validation labels") + self.learning_rate_choice.lr_value_choice.setValue(1) + self.learning_rate_choice.lr_exponent_choice.setCurrentIndex(1) else: unsupervised = False self.start_btn = self.start_button_supervised self.image_filewidget.text_field.setText("Images directory") self.labels_filewidget.text_field.setText("Labels directory") + self.learning_rate_choice.lr_value_choice.setValue(2) + self.learning_rate_choice.lr_exponent_choice.setCurrentIndex(3) supervised = not unsupervised self.unsupervised_mode = unsupervised From e5a0be460a6529d6d1e3fe83c0903d8e2ad63a1b Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 11:34:29 +0200 Subject: [PATCH 47/70] Update model.py --- .../code_models/models/wnet/model.py | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/napari_cellseg3d/code_models/models/wnet/model.py b/napari_cellseg3d/code_models/models/wnet/model.py index 28643588..d8ba3a78 100644 --- a/napari_cellseg3d/code_models/models/wnet/model.py +++ b/napari_cellseg3d/code_models/models/wnet/model.py @@ -99,22 +99,22 @@ def __init__( self.max_pool = nn.MaxPool3d(2) self.in_b = InBlock(in_channels, self.channels[0], dropout=dropout) self.conv1 = Block(channels[0], self.channels[1], dropout=dropout) - # self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) + self.conv2 = Block(channels[1], self.channels[2], dropout=dropout) # self.conv3 = Block(channels[2], self.channels[3], dropout=dropout) # self.bot = Block(channels[3], self.channels[4], dropout=dropout) - # self.bot = Block(channels[2], self.channels[3], dropout=dropout) - self.bot = Block(channels[1], self.channels[2], dropout=dropout) + self.bot = Block(channels[2], self.channels[3], dropout=dropout) + # self.bot = Block(channels[1], self.channels[2], dropout=dropout) # self.bot = Block(channels[0], self.channels[1], dropout=dropout) # self.deconv1 = Block(channels[4], self.channels[3], dropout=dropout) - # self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) + self.deconv2 = Block(channels[3], self.channels[2], dropout=dropout) self.deconv3 = Block(channels[2], self.channels[1], dropout=dropout) self.out_b = OutBlock(channels[1], out_channels, dropout=dropout) # self.conv_trans1 = nn.ConvTranspose3d( # self.channels[4], self.channels[3], 2, stride=2 # ) - # self.conv_trans2 = nn.ConvTranspose3d( - # self.channels[3], self.channels[2], 2, stride=2 - # ) + self.conv_trans2 = nn.ConvTranspose3d( + self.channels[3], self.channels[2], 2, stride=2 + ) self.conv_trans3 = nn.ConvTranspose3d( self.channels[2], self.channels[1], 2, stride=2 ) @@ -129,11 +129,11 @@ def forward(self, x): """Forward pass of the U-Net model.""" in_b = self.in_b(x) c1 = self.conv1(self.max_pool(in_b)) - # c2 = self.conv2(self.max_pool(c1)) + c2 = self.conv2(self.max_pool(c1)) # c3 = self.conv3(self.max_pool(c2)) # x = self.bot(self.max_pool(c3)) - # x = self.bot(self.max_pool(c2)) - x = self.bot(self.max_pool(c1)) + x = self.bot(self.max_pool(c2)) + # x = self.bot(self.max_pool(c1)) # x = self.bot(self.max_pool(in_b)) # x = self.deconv1( # torch.cat( @@ -144,15 +144,15 @@ def forward(self, x): # dim=1, # ) # ) - # x = self.deconv2( - # torch.cat( - # [ - # c2, - # self.conv_trans2(x), - # ], - # dim=1, - # ) - # ) + x = self.deconv2( + torch.cat( + [ + c2, + self.conv_trans2(x), + ], + dim=1, + ) + ) x = self.deconv3( torch.cat( [ From c6243b8e0d8625a2f95e6c93de9b358db39e70f6 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 11:44:36 +0200 Subject: [PATCH 48/70] Update WNet weights --- napari_cellseg3d/code_models/models/model_WNet.py | 2 +- .../code_models/models/pretrained/pretrained_model_urls.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/napari_cellseg3d/code_models/models/model_WNet.py b/napari_cellseg3d/code_models/models/model_WNet.py index e50a58a1..bc1b3818 100644 --- a/napari_cellseg3d/code_models/models/model_WNet.py +++ b/napari_cellseg3d/code_models/models/model_WNet.py @@ -5,7 +5,7 @@ class WNet_(WNet_encoder): use_default_training = False - weights_file = "wnet.pth" + weights_file = "wnet_latest.pth" def __init__( self, diff --git a/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json b/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json index 3c393d47..d9e1e4b0 100644 --- a/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json +++ b/napari_cellseg3d/code_models/models/pretrained/pretrained_model_urls.json @@ -3,7 +3,7 @@ "SegResNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/SegResNet_latest.tar.gz", "VNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/VNet_latest.tar.gz", "SwinUNetR": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/SwinUNetR_latest.tar.gz", - "WNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/wnet.tar.gz", + "WNet": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/wnet_latest.tar.gz", "WNet_ONNX": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/wnet_onnx.tar.gz", "test": "https://huggingface.co/C-Achard/cellseg3d/resolve/main/test.tar.gz" } From fe1a2f87ec1ae937430873f105e8ea67b1f5fc86 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 11:53:10 +0200 Subject: [PATCH 49/70] Fix default LR + sup. test --- napari_cellseg3d/_tests/test_supervised_training.py | 7 +++++-- napari_cellseg3d/code_plugins/plugin_model_training.py | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/napari_cellseg3d/_tests/test_supervised_training.py b/napari_cellseg3d/_tests/test_supervised_training.py index 676133ff..1a7fac06 100644 --- a/napari_cellseg3d/_tests/test_supervised_training.py +++ b/napari_cellseg3d/_tests/test_supervised_training.py @@ -12,12 +12,15 @@ im_path = Path(__file__).resolve().parent / "res/test.tif" im_path_str = str(im_path) -def test_create_supervised_worker_from_config(make_napari_viewer_proxy): +def test_create_supervised_worker_from_config(make_napari_viewer_proxy): viewer = make_napari_viewer_proxy() widget = Trainer(viewer=viewer) widget.device_choice.setCurrentIndex(0) - worker = widget._create_worker() + widget.model_choice.setCurrentIndex(0) + widget._toggle_unsupervised_mode(enabled=False) + assert widget.model_choice.currentText() == list(MODEL_LIST.keys())[0] + worker = widget._create_worker(additional_results_description="test") default_config = config.SupervisedTrainingWorkerConfig() excluded = [ "results_path_folder", diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 1db818e6..811cbf7c 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -443,15 +443,15 @@ def _toggle_unsupervised_mode(self, enabled=False): self.start_btn = self.start_button_unsupervised self.image_filewidget.text_field.setText("Validation images") self.labels_filewidget.text_field.setText("Validation labels") - self.learning_rate_choice.lr_value_choice.setValue(1) - self.learning_rate_choice.lr_exponent_choice.setCurrentIndex(1) + self.learning_rate_choice.lr_value_choice.setValue(2) + self.learning_rate_choice.lr_exponent_choice.setCurrentIndex(3) else: unsupervised = False self.start_btn = self.start_button_supervised self.image_filewidget.text_field.setText("Images directory") self.labels_filewidget.text_field.setText("Labels directory") - self.learning_rate_choice.lr_value_choice.setValue(2) - self.learning_rate_choice.lr_exponent_choice.setCurrentIndex(3) + self.learning_rate_choice.lr_value_choice.setValue(1) + self.learning_rate_choice.lr_exponent_choice.setCurrentIndex(1) supervised = not unsupervised self.unsupervised_mode = unsupervised From 6e9762a761258275da40f2bc1ed444961f4a2150 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 11:59:19 +0200 Subject: [PATCH 50/70] Fix new unsup LR in tests --- napari_cellseg3d/_tests/test_unsup_training.py | 6 ++---- napari_cellseg3d/config.py | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/napari_cellseg3d/_tests/test_unsup_training.py b/napari_cellseg3d/_tests/test_unsup_training.py index 9b26167a..acdf2c01 100644 --- a/napari_cellseg3d/_tests/test_unsup_training.py +++ b/napari_cellseg3d/_tests/test_unsup_training.py @@ -5,15 +5,13 @@ Trainer, ) +im_path = Path(__file__).resolve().parent / "res/test.tif" -def test_unsupervised_worker(make_napari_viewer_proxy): - im_path = Path(__file__).resolve().parent / "res/test.tif" - # im_path_str = str(im_path) +def test_unsupervised_worker(make_napari_viewer_proxy): unsup_viewer = make_napari_viewer_proxy() widget = Trainer(viewer=unsup_viewer) widget.device_choice.setCurrentIndex(0) - widget.model_choice.setCurrentText("WNet") widget._toggle_unsupervised_mode(enabled=True) diff --git a/napari_cellseg3d/config.py b/napari_cellseg3d/config.py index f9536d93..6c8db79b 100644 --- a/napari_cellseg3d/config.py +++ b/napari_cellseg3d/config.py @@ -285,6 +285,7 @@ class WNetTrainingWorkerConfig(TrainingWorkerConfig): out_channels: int = 1 # decoder (reconstruction) output channels num_classes: int = 2 # encoder output channels dropout: float = 0.65 + learning_rate: np.float64 = 2e-5 use_clipping: bool = False # use gradient clipping clipping: float = 1.0 # clipping value weight_decay: float = 0.01 # 1e-5 # weight decay (used 0.01 historically) From 420a641090bf22c1b2690f77c7edfb4e23cd0c71 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Fri, 28 Jul 2023 15:30:01 +0200 Subject: [PATCH 51/70] Fix dir for saving in tests --- napari_cellseg3d/code_models/worker_inference.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/napari_cellseg3d/code_models/worker_inference.py b/napari_cellseg3d/code_models/worker_inference.py index 65623b36..3fb5bc95 100644 --- a/napari_cellseg3d/code_models/worker_inference.py +++ b/napari_cellseg3d/code_models/worker_inference.py @@ -436,6 +436,8 @@ def save_image( + f"_{time}" + filetype ) + if not Path(self.config.results_path).exists(): + Path(self.config.results_path).mkdir(parents=True, exist_ok=True) try: imwrite(file_path, image) except ValueError as e: From 912e6bd2790d260bff6da182605bb04ae8430631 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 13:51:40 +0200 Subject: [PATCH 52/70] Testing fixes Due to Singleton Trainer widget --- ...ed_training.py => test_training_plugin.py} | 37 +++++++++++++-- .../_tests/test_unsup_training.py | 45 ------------------- .../code_plugins/plugin_model_training.py | 6 ++- 3 files changed, 39 insertions(+), 49 deletions(-) rename napari_cellseg3d/_tests/{test_supervised_training.py => test_training_plugin.py} (71%) delete mode 100644 napari_cellseg3d/_tests/test_unsup_training.py diff --git a/napari_cellseg3d/_tests/test_supervised_training.py b/napari_cellseg3d/_tests/test_training_plugin.py similarity index 71% rename from napari_cellseg3d/_tests/test_supervised_training.py rename to napari_cellseg3d/_tests/test_training_plugin.py index 1a7fac06..09bf3e9d 100644 --- a/napari_cellseg3d/_tests/test_supervised_training.py +++ b/napari_cellseg3d/_tests/test_training_plugin.py @@ -13,9 +13,10 @@ im_path_str = str(im_path) -def test_create_supervised_worker_from_config(make_napari_viewer_proxy): +def test_worker_configs(make_napari_viewer_proxy): viewer = make_napari_viewer_proxy() widget = Trainer(viewer=viewer) + # test supervised config and worker widget.device_choice.setCurrentIndex(0) widget.model_choice.setCurrentIndex(0) widget._toggle_unsupervised_mode(enabled=False) @@ -34,6 +35,36 @@ def test_create_supervised_worker_from_config(make_napari_viewer_proxy): assert getattr(default_config, attr) == getattr( worker.config, attr ) + # test unsupervised config and worker + widget.model_choice.setCurrentText("WNet") + widget._toggle_unsupervised_mode(enabled=True) + default_config = config.WNetTrainingWorkerConfig() + worker = widget._create_worker(additional_results_description="TEST_1") + excluded = ["results_path_folder", "sample_size", "weights_info"] + for attr in dir(default_config): + if not attr.startswith("__") and attr not in excluded: + assert getattr(default_config, attr) == getattr( + worker.config, attr + ) + widget.unsupervised_images_filewidget.text_field.setText( + str(im_path.parent) + ) + widget.data = widget.create_dataset_dict_no_labs() + worker = widget._create_worker(additional_results_description="TEST_2") + dataloader, eval_dataloader, data_shape = worker._get_data() + assert eval_dataloader is None + assert data_shape == (6, 6, 6) + + widget.images_filepaths = [str(im_path)] + widget.labels_filepaths = [str(im_path)] + # widget.unsupervised_eval_data = widget.create_train_dataset_dict() + worker = widget._create_worker(additional_results_description="TEST_3") + dataloader, eval_dataloader, data_shape = worker._get_data() + assert widget.unsupervised_eval_data is not None + assert eval_dataloader is not None + assert widget.unsupervised_eval_data[0]["image"] is not None + assert widget.unsupervised_eval_data[0]["label"] is not None + assert data_shape == (6, 6, 6) def test_update_loss_plot(make_napari_viewer_proxy): @@ -86,8 +117,8 @@ def test_training(make_napari_viewer_proxy, qtbot): widget.log = LogFixture() viewer.window.add_dock_widget(widget) - widget.images_filepath = None - widget.labels_filepaths = None + widget.images_filepath = [] + widget.labels_filepaths = [] assert not widget.check_ready() diff --git a/napari_cellseg3d/_tests/test_unsup_training.py b/napari_cellseg3d/_tests/test_unsup_training.py deleted file mode 100644 index acdf2c01..00000000 --- a/napari_cellseg3d/_tests/test_unsup_training.py +++ /dev/null @@ -1,45 +0,0 @@ -from pathlib import Path - -from napari_cellseg3d import config -from napari_cellseg3d.code_plugins.plugin_model_training import ( - Trainer, -) - -im_path = Path(__file__).resolve().parent / "res/test.tif" - - -def test_unsupervised_worker(make_napari_viewer_proxy): - unsup_viewer = make_napari_viewer_proxy() - widget = Trainer(viewer=unsup_viewer) - widget.device_choice.setCurrentIndex(0) - widget.model_choice.setCurrentText("WNet") - widget._toggle_unsupervised_mode(enabled=True) - - default_config = config.WNetTrainingWorkerConfig() - worker = widget._create_worker(additional_results_description="TEST_1") - excluded = ["results_path_folder", "sample_size", "weights_info"] - for attr in dir(default_config): - if not attr.startswith("__") and attr not in excluded: - assert getattr(default_config, attr) == getattr( - worker.config, attr - ) - - widget.unsupervised_images_filewidget.text_field.setText( - str(im_path.parent) - ) - widget.data = widget.create_dataset_dict_no_labs() - worker = widget._create_worker(additional_results_description="TEST_2") - dataloader, eval_dataloader, data_shape = worker._get_data() - assert eval_dataloader is None - assert data_shape == (6, 6, 6) - - widget.images_filepaths = [str(im_path)] - widget.labels_filepaths = [str(im_path)] - # widget.unsupervised_eval_data = widget.create_train_dataset_dict() - worker = widget._create_worker(additional_results_description="TEST_3") - dataloader, eval_dataloader, data_shape = worker._get_data() - assert widget.unsupervised_eval_data is not None - assert eval_dataloader is not None - assert widget.unsupervised_eval_data[0]["image"] is not None - assert widget.unsupervised_eval_data[0]["label"] is not None - assert data_shape == (6, 6, 6) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 811cbf7c..c4211ee3 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -431,7 +431,11 @@ def check_ready(self): * False and displays a warning if not """ - if self.images_filepaths == [] and self.labels_filepaths != []: + if ( + self.images_filepaths == [] + or self.labels_filepaths == [] + or len(self.images_filepaths) != len(self.labels_filepaths) + ): logger.warning("Image and label paths are not correctly set") return False return True From c1aecb88efb97740bcfc1598f6dfb75d4861581f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 14:35:02 +0200 Subject: [PATCH 53/70] Test unsupervised training and raise coverage --- napari_cellseg3d/_tests/fixtures.py | 39 ++++++++ ...ning_plugin.py => test_plugin_training.py} | 51 ---------- napari_cellseg3d/_tests/test_training.py | 94 +++++++++++++++++++ .../code_models/worker_training.py | 23 +++-- 4 files changed, 149 insertions(+), 58 deletions(-) rename napari_cellseg3d/_tests/{test_training_plugin.py => test_plugin_training.py} (70%) create mode 100644 napari_cellseg3d/_tests/test_training.py diff --git a/napari_cellseg3d/_tests/fixtures.py b/napari_cellseg3d/_tests/fixtures.py index b3044799..001b1d64 100644 --- a/napari_cellseg3d/_tests/fixtures.py +++ b/napari_cellseg3d/_tests/fixtures.py @@ -1,3 +1,4 @@ +import torch from qtpy.QtWidgets import QTextEdit from napari_cellseg3d.utils import LOGGER as logger @@ -17,3 +18,41 @@ def warn(self, warning): def error(self, e): raise (e) + + +class WNetFixture(torch.nn.Module): + def __init__(self): + super().__init__() + self.mock_conv = torch.nn.Conv3d(1, 1, 1) + self.mock_conv.requires_grad_(False) + + def forward_encoder(self, x): + return x + + def forward_decoder(self, x): + return x + + def forward(self, x): + return self.forward_encoder(x), self.forward_decoder(x) + + +class OptimizerFixture: + def __call__(self, x): + return x + + def zero_grad(self): + pass + + def step(self): + pass + + +class LossFixture: + def __call__(self, x): + return x + + def backward(self, x): + pass + + def item(self): + return 0 diff --git a/napari_cellseg3d/_tests/test_training_plugin.py b/napari_cellseg3d/_tests/test_plugin_training.py similarity index 70% rename from napari_cellseg3d/_tests/test_training_plugin.py rename to napari_cellseg3d/_tests/test_plugin_training.py index 09bf3e9d..3e6dfe8e 100644 --- a/napari_cellseg3d/_tests/test_training_plugin.py +++ b/napari_cellseg3d/_tests/test_plugin_training.py @@ -1,9 +1,6 @@ from pathlib import Path from napari_cellseg3d import config -from napari_cellseg3d._tests.fixtures import LogFixture -from napari_cellseg3d.code_models.models.model_test import TestModel -from napari_cellseg3d.code_models.workers_utils import TrainingReport from napari_cellseg3d.code_plugins.plugin_model_training import ( Trainer, ) @@ -109,51 +106,3 @@ def test_check_matching_losses(): worker = plugin._create_supervised_worker_from_config(config) assert plugin.loss_list == list(worker.loss_dict.keys()) - - -def test_training(make_napari_viewer_proxy, qtbot): - viewer = make_napari_viewer_proxy() - widget = Trainer(viewer) - widget.log = LogFixture() - viewer.window.add_dock_widget(widget) - - widget.images_filepath = [] - widget.labels_filepaths = [] - - assert not widget.check_ready() - - widget.images_filepaths = [im_path_str] - widget.labels_filepaths = [im_path_str] - widget.epoch_choice.setValue(1) - widget.val_interval_choice.setValue(1) - - assert widget.check_ready() - - MODEL_LIST["test"] = TestModel - widget.model_choice.addItem("test") - widget.model_choice.setCurrentText("test") - widget.unsupervised_mode = False - worker_config = widget._set_worker_config() - assert worker_config.model_info.name == "test" - worker = widget._create_supervised_worker_from_config(worker_config) - worker.config.train_data_dict = [ - {"image": im_path_str, "label": im_path_str} - ] - worker.config.val_data_dict = [ - {"image": im_path_str, "label": im_path_str} - ] - worker.config.max_epochs = 1 - worker.config.validation_interval = 2 - worker.log_parameters() - res = next(worker.train()) - - assert isinstance(res, TrainingReport) - assert res.epoch == 0 - - widget.worker = worker - res.show_plot = True - res.loss_1_values = {"loss": [1, 1, 1, 1]} - res.loss_2_values = [1, 1, 1, 1] - widget.on_yield(res) - assert widget.loss_1_values["loss"] == [1, 1, 1, 1] - assert widget.loss_2_values == [1, 1, 1, 1] diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py new file mode 100644 index 00000000..14d4b1da --- /dev/null +++ b/napari_cellseg3d/_tests/test_training.py @@ -0,0 +1,94 @@ +from pathlib import Path + +from napari_cellseg3d._tests.fixtures import ( + LogFixture, + LossFixture, + OptimizerFixture, + WNetFixture, +) +from napari_cellseg3d.code_models.models.model_test import TestModel +from napari_cellseg3d.code_models.workers_utils import TrainingReport +from napari_cellseg3d.code_plugins.plugin_model_training import ( + Trainer, +) +from napari_cellseg3d.config import MODEL_LIST + +im_path = Path(__file__).resolve().parent / "res/test.tif" +im_path_str = str(im_path) + + +def test_supervised_training(make_napari_viewer_proxy): + viewer = make_napari_viewer_proxy() + widget = Trainer(viewer) + widget.log = LogFixture() + + widget.images_filepath = [] + widget.labels_filepaths = [] + + assert not widget.check_ready() + + widget.images_filepaths = [im_path_str] + widget.labels_filepaths = [im_path_str] + widget.epoch_choice.setValue(1) + widget.val_interval_choice.setValue(1) + + assert widget.check_ready() + + MODEL_LIST["test"] = TestModel + widget.model_choice.addItem("test") + widget.model_choice.setCurrentText("test") + widget.unsupervised_mode = False + worker_config = widget._set_worker_config() + assert worker_config.model_info.name == "test" + worker = widget._create_supervised_worker_from_config(worker_config) + worker.config.train_data_dict = [ + {"image": im_path_str, "label": im_path_str} + ] + worker.config.val_data_dict = [ + {"image": im_path_str, "label": im_path_str} + ] + worker.config.max_epochs = 1 + worker.config.validation_interval = 2 + worker.log_parameters() + res = next(worker.train()) + + assert isinstance(res, TrainingReport) + assert res.epoch == 0 + + widget.worker = worker + res.show_plot = True + res.loss_1_values = {"loss": [1, 1, 1, 1]} + res.loss_2_values = [1, 1, 1, 1] + widget.on_yield(res) + assert widget.loss_1_values["loss"] == [1, 1, 1, 1] + assert widget.loss_2_values == [1, 1, 1, 1] + + +def test_unsupervised_training(make_napari_viewer_proxy): + viewer = make_napari_viewer_proxy() + widget = Trainer(viewer) + widget.log = LogFixture() + widget.worker = None + widget._toggle_unsupervised_mode(enabled=True) + widget.model_choice.setCurrentText("WNet") + + widget.patch_choice.setChecked(True) + [w.setValue(4) for w in widget.patch_size_widgets] + + widget.unsupervised_images_filewidget.text_field.setText( + str(im_path.parent) + ) + # widget.start() + widget.data = widget.create_dataset_dict_no_labs() + widget.worker = widget._create_worker( + additional_results_description="wnet_test" + ) + assert widget.worker.config.train_data_dict is not None + res = next( + widget.worker.train( + provided_model=WNetFixture(), + provided_optimizer=OptimizerFixture(), + provided_loss=LossFixture(), + ) + ) + assert isinstance(res, TrainingReport) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 86c0bb78..7d5d2c92 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -362,7 +362,9 @@ def log_parameters(self): for k, v in d.items() ] - def train(self): + def train( + self, provided_model=None, provided_optimizer=None, provided_loss=None + ): try: if self.config is None: self.config = config.WNetTrainingWorkerConfig() @@ -395,11 +397,15 @@ def train(self): ################################################### self.log("- Getting the model") # Initialize the model - model = WNet( - in_channels=self.config.in_channels, - out_channels=self.config.out_channels, - num_classes=self.config.num_classes, - dropout=self.config.dropout, + model = ( + WNet( + in_channels=self.config.in_channels, + out_channels=self.config.out_channels, + num_classes=self.config.num_classes, + dropout=self.config.dropout, + ) + if provided_model is None + else provided_model ) model.to(device) @@ -458,7 +464,8 @@ def train(self): optimizer = torch.optim.Adam( model.parameters(), lr=self.config.learning_rate ) - + if provided_optimizer is not None: + optimizer = provided_optimizer self.log("- Getting the loss functions") # Initialize the Ncuts loss function criterionE = SoftNCutsLoss( @@ -538,6 +545,8 @@ def train(self): beta = self.config.rec_loss_weight loss = alpha * Ncuts + beta * reconstruction_loss + if provided_loss is not None: + loss = provided_loss epoch_loss += loss.item() # if WANDB_INSTALLED: # wandb.log({"Weighted sum of losses": loss.item()}) From d35da411c038ed9050db398d7be6d52da6ab1f27 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 15:11:10 +0200 Subject: [PATCH 54/70] WNet eval test --- napari_cellseg3d/_tests/test_training.py | 12 + .../code_models/worker_training.py | 429 +++++++++--------- 2 files changed, 223 insertions(+), 218 deletions(-) diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py index 14d4b1da..1ae0c2d3 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_training.py @@ -92,3 +92,15 @@ def test_unsupervised_training(make_napari_viewer_proxy): ) ) assert isinstance(res, TrainingReport) + assert not res.show_plot + widget.worker.config.eval_volume_dict = [ + {"image": im_path_str, "label": im_path_str} + ] + widget.worker._get_data() + eval_res = widget.worker._eval( + model=WNetFixture(), + epoch=-10, + ) + assert isinstance(eval_res, TrainingReport) + assert eval_res.show_plot + assert eval_res.epoch == -10 diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 7d5d2c92..8522b183 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -153,6 +153,21 @@ def __init__( super().__init__() self.config = worker_config + self.dice_metric = DiceMetric( + include_background=True, reduction="mean", get_not_nans=False + ) + self.normalize_function = utils.remap_image + self.start_time = time.time() + self.ncuts_losses = [] + self.rec_losses = [] + self.total_losses = [] + self.best_dice = -1 + self.dice_values = [] + + self.dataloader: DataLoader = None + self.eval_dataloader: DataLoader = None + self.data_shape = None + def get_patch_dataset(self, train_transforms): """Creates a Dataset from the original data using the tifffile library @@ -288,13 +303,15 @@ def _get_data(self): if self.config.sampling: logger.debug("Loading patch dataset") - (data_shape, dataset) = self.get_patch_dataset(train_transforms) + (self.data_shape, dataset) = self.get_patch_dataset( + train_transforms + ) else: logger.debug("Loading volume dataset") - (data_shape, dataset) = self.get_dataset(train_transforms) + (self.data_shape, dataset) = self.get_dataset(train_transforms) - logger.debug(f"Data shape : {data_shape}") - dataloader = DataLoader( + logger.debug(f"Data shape : {self.data_shape}") + self.dataloader = DataLoader( dataset, batch_size=self.config.batch_size, shuffle=True, @@ -305,7 +322,7 @@ def _get_data(self): if self.config.eval_volume_dict is not None: eval_dataset = self.get_dataset_eval(self.config.eval_volume_dict) - eval_dataloader = DataLoader( + self.eval_dataloader = DataLoader( eval_dataset, batch_size=self.config.batch_size, shuffle=False, @@ -313,8 +330,8 @@ def _get_data(self): collate_fn=pad_list_data_collate, ) else: - eval_dataloader = None - return dataloader, eval_dataloader, data_shape + self.eval_dataloader = None + return self.dataloader, self.eval_dataloader, self.data_shape def log_parameters(self): self.log("*" * 20) @@ -380,18 +397,14 @@ def train( set_determinism(seed=self.config.deterministic_config.seed) torch.use_deterministic_algorithms(True, warn_only=True) - normalize_function = utils.remap_image device = self.config.device self.log_parameters() self.log("Initializing training...") self.log("- Getting the data") - dataloader, eval_dataloader, data_shape = self._get_data() + self._get_data() - dice_metric = DiceMetric( - include_background=True, reduction="mean", get_not_nans=False - ) ################################################### # Training the model # ################################################### @@ -469,7 +482,7 @@ def train( self.log("- Getting the loss functions") # Initialize the Ncuts loss function criterionE = SoftNCutsLoss( - data_shape=data_shape, + data_shape=self.data_shape, device=device, intensity_sigma=self.config.intensity_sigma, spatial_sigma=self.config.spatial_sigma, @@ -491,13 +504,6 @@ def train( self.log("Training the model") self.log("*" * 20) - startTime = time.time() - ncuts_losses = [] - rec_losses = [] - total_losses = [] - best_dice = -1 - dice_values = [] - # Train the model for epoch in range(self.config.max_epochs): self.log(f"Epoch {epoch + 1} of {self.config.max_epochs}") @@ -506,13 +512,13 @@ def train( epoch_rec_loss = 0 epoch_loss = 0 - for _i, batch in enumerate(dataloader): + for _i, batch in enumerate(self.dataloader): # raise NotImplementedError("testing") image_batch = batch["image"].to(device) # Normalize the image for i in range(image_batch.shape[0]): for j in range(image_batch.shape[1]): - image_batch[i, j] = normalize_function( + image_batch[i, j] = self.normalize_function( image_batch[i, j] ) @@ -554,10 +560,10 @@ def train( optimizer.step() if self._abort_requested: - dataloader = None - del dataloader - eval_dataloader = None - del eval_dataloader + self.dataloader = None + del self.dataloader + self.eval_dataloader = None + del self.eval_dataloader model = None del model optimizer = None @@ -572,11 +578,13 @@ def train( show_plot=False, weights=model.state_dict() ) - ncuts_losses.append(epoch_ncuts_loss / len(dataloader)) - rec_losses.append(epoch_rec_loss / len(dataloader)) - total_losses.append(epoch_loss / len(dataloader)) + self.ncuts_losses.append( + epoch_ncuts_loss / len(self.dataloader) + ) + self.rec_losses.append(epoch_rec_loss / len(self.dataloader)) + self.total_losses.append(epoch_loss / len(self.dataloader)) - if eval_dataloader is None: + if self.eval_dataloader is None: try: enc_out = enc[0].detach().cpu().numpy() dec_out = dec[0].detach().cpu().numpy() @@ -606,8 +614,8 @@ def train( yield TrainingReport( show_plot=True, epoch=epoch, - loss_1_values={"SoftNCuts": ncuts_losses}, - loss_2_values=rec_losses, + loss_1_values={"SoftNCuts": self.ncuts_losses}, + loss_2_values=self.rec_losses, weights=model.state_dict(), images_dict=images_dict, ) @@ -615,207 +623,55 @@ def train( pass # if WANDB_INSTALLED: - # wandb.log({"Ncuts loss_epoch": ncuts_losses[-1]}) - # wandb.log({"Reconstruction loss_epoch": rec_losses[-1]}) - # wandb.log({"Sum of losses_epoch": total_losses[-1]}) + # wandb.log({"Ncuts loss_epoch": self.ncuts_losses[-1]}) + # wandb.log({"Reconstruction loss_epoch": self.rec_losses[-1]}) + # wandb.log({"Sum of losses_epoch": self.total_losses[-1]}) # wandb.log({"epoch": epoch}) # wandb.log({"learning_rate model": optimizerW.param_groups[0]["lr"]}) # wandb.log({"learning_rate encoder": optimizerE.param_groups[0]["lr"]}) # wandb.log({"learning_rate model": optimizer.param_groups[0]["lr"]}) - self.log(f"Ncuts loss: {ncuts_losses[-1]:.5f}") - self.log(f"Reconstruction loss: {rec_losses[-1]:.5f}") - self.log(f"Weighted sum of losses: {total_losses[-1]:.5f}") + self.log(f"Ncuts loss: {self.ncuts_losses[-1]:.5f}") + self.log(f"Reconstruction loss: {self.rec_losses[-1]:.5f}") + self.log( + f"Weighted sum of losses: {self.total_losses[-1]:.5f}" + ) if epoch > 0: self.log( - f"Ncuts loss difference: {ncuts_losses[-1] - ncuts_losses[-2]:.5f}" + f"Ncuts loss difference: {self.ncuts_losses[-1] - self.ncuts_losses[-2]:.5f}" ) self.log( - f"Reconstruction loss difference: {rec_losses[-1] - rec_losses[-2]:.5f}" + f"Reconstruction loss difference: {self.rec_losses[-1] - self.rec_losses[-2]:.5f}" ) self.log( - f"Weighted sum of losses difference: {total_losses[-1] - total_losses[-2]:.5f}" + f"Weighted sum of losses difference: {self.total_losses[-1] - self.total_losses[-2]:.5f}" ) if ( - eval_dataloader is not None + self.eval_dataloader is not None and (epoch + 1) % self.config.validation_interval == 0 ): model.eval() self.log("Validating...") - with torch.no_grad(): - for _k, val_data in enumerate(eval_dataloader): - val_inputs, val_labels = ( - val_data["image"].to(device), - val_data["label"].to(device), - ) - - # normalize val_inputs across channels - for i in range(val_inputs.shape[0]): - for j in range(val_inputs.shape[1]): - val_inputs[i][j] = normalize_function( - val_inputs[i][j] - ) - logger.debug( - f"Val inputs shape: {val_inputs.shape}" - ) - val_outputs = sliding_window_inference( - val_inputs, - roi_size=[64, 64, 64], - sw_batch_size=1, - predictor=model.forward_encoder, - overlap=0.1, - mode="gaussian", - sigma_scale=0.01, - progress=True, - ) - val_decoder_outputs = sliding_window_inference( - val_outputs, - roi_size=[64, 64, 64], - sw_batch_size=1, - predictor=model.forward_decoder, - overlap=0.1, - mode="gaussian", - sigma_scale=0.01, - progress=True, - ) - val_outputs = AsDiscrete(threshold=0.5)( - val_outputs - ) - logger.debug( - f"Val outputs shape: {val_outputs.shape}" - ) - logger.debug( - f"Val labels shape: {val_labels.shape}" - ) - logger.debug( - f"Val decoder outputs shape: {val_decoder_outputs.shape}" - ) - - # dices = [] - # Find in which channel the labels are (avoid background) - # for channel in range(val_outputs.shape[1]): - # dices.append( - # utils.dice_coeff( - # y_pred=val_outputs[ - # 0, channel : (channel + 1), :, :, : - # ], - # y_true=val_labels[0], - # ) - # ) - # logger.debug(f"DICE COEFF: {dices}") - # max_dice_channel = torch.argmax( - # torch.Tensor(dices) - # ) - # logger.debug( - # f"MAX DICE CHANNEL: {max_dice_channel}" - # ) - dice_metric( - y_pred=val_outputs, - # [ - # :, - # max_dice_channel : (max_dice_channel + 1), - # :, - # :, - # :, - # ], - y=val_labels, - ) - - # aggregate the final mean dice result - metric = dice_metric.aggregate().item() - dice_values.append(metric) - self.log(f"Validation Dice score: {metric:.3f}") - if best_dice < metric <= 1: - best_dice = metric - # save the best model - save_best_path = self.config.results_path_folder - # save_best_path.mkdir(parents=True, exist_ok=True) - save_best_name = "wnet" - save_path = ( - str(Path(save_best_path) / save_best_name) - + "_best_metric.pth" - ) - self.log(f"Saving new best model to {save_path}") - torch.save(model.state_dict(), save_path) - - # if WANDB_INSTALLED: - # log validation dice score for each validation round - # wandb.log({"val/dice_metric": metric}) - - dec_out_val = ( - val_decoder_outputs[0] - .detach() - .cpu() - .numpy() - .copy() - ) - enc_out_val = ( - val_outputs[0].detach().cpu().numpy().copy() - ) - lab_out_val = ( - val_labels[0].detach().cpu().numpy().copy() - ) - val_in = val_inputs[0].detach().cpu().numpy().copy() - - display_dict = { - "Reconstruction": { - "data": np.squeeze(dec_out_val), - "cmap": "gist_earth", - }, - "Segmentation": { - "data": np.squeeze(enc_out_val), - "cmap": "turbo", - }, - "Inputs": { - "data": np.squeeze(val_in), - "cmap": "inferno", - }, - "Labels": { - "data": np.squeeze(lab_out_val), - "cmap": "bop blue", - }, - } - val_decoder_outputs = None - del val_decoder_outputs - val_outputs = None - del val_outputs - val_labels = None - del val_labels - val_inputs = None - del val_inputs + yield self._eval(model, epoch) # validation - yield TrainingReport( - epoch=epoch, - loss_1_values={ - "SoftNCuts": ncuts_losses, - "Dice metric": dice_values, - }, - loss_2_values=rec_losses, - weights=model.state_dict(), - images_dict=display_dict, - ) - - # reset the status for next validation round - dice_metric.reset() - - if self._abort_requested: - dataloader = None - del dataloader - eval_dataloader = None - del eval_dataloader - model = None - del model - optimizer = None - del optimizer - criterionE = None - del criterionE - criterionW = None - del criterionW - torch.cuda.empty_cache() + if self._abort_requested: + self.dataloader = None + del self.dataloader + self.eval_dataloader = None + del self.eval_dataloader + model = None + del model + optimizer = None + del optimizer + criterionE = None + del criterionE + criterionW = None + del criterionW + torch.cuda.empty_cache() eta = ( - (time.time() - startTime) + (time.time() - self.start_time) * (self.config.max_epochs / (epoch + 1) - 1) / 60 ) @@ -830,12 +686,12 @@ def train( ) self.log("Training finished") - if best_dice > -1: - self.log(f"Best dice metric : {best_dice}") + if self.best_dice > -1: + self.log(f"Best dice metric : {self.best_dice}") # if WANDB_INSTALLED and self.config.eval_volume_directory is not None: # wandb.log( # { - # "best_dice_metric": best_dice, + # "self.best_dice_metric": self.best_dice, # "best_metric_epoch": best_dice_epoch, # } # ) @@ -859,11 +715,11 @@ def train( # model_artifact.add_file(self.config.save_model_path) # wandb.log_artifact(model_artifact) - # return ncuts_losses, rec_losses, model + # return self.ncuts_losses, self.rec_losses, model dataloader = None del dataloader - eval_dataloader = None - del eval_dataloader + self.eval_dataloader = None + del self.eval_dataloader model = None del model optimizer = None @@ -880,6 +736,143 @@ def train( self.quit() raise e + def _eval(self, model, epoch) -> TrainingReport: + with torch.no_grad(): + device = self.config.device + for _k, val_data in enumerate(self.eval_dataloader): + val_inputs, val_labels = ( + val_data["image"].to(device), + val_data["label"].to(device), + ) + + # normalize val_inputs across channels + for i in range(val_inputs.shape[0]): + for j in range(val_inputs.shape[1]): + val_inputs[i][j] = self.normalize_function( + val_inputs[i][j] + ) + logger.debug(f"Val inputs shape: {val_inputs.shape}") + val_outputs = sliding_window_inference( + val_inputs, + roi_size=[64, 64, 64], + sw_batch_size=1, + predictor=model.forward_encoder, + overlap=0.1, + mode="gaussian", + sigma_scale=0.01, + progress=True, + ) + val_decoder_outputs = sliding_window_inference( + val_outputs, + roi_size=[64, 64, 64], + sw_batch_size=1, + predictor=model.forward_decoder, + overlap=0.1, + mode="gaussian", + sigma_scale=0.01, + progress=True, + ) + val_outputs = AsDiscrete(threshold=0.5)(val_outputs) + logger.debug(f"Val outputs shape: {val_outputs.shape}") + logger.debug(f"Val labels shape: {val_labels.shape}") + logger.debug( + f"Val decoder outputs shape: {val_decoder_outputs.shape}" + ) + + # dices = [] + # Find in which channel the labels are (avoid background) + # for channel in range(val_outputs.shape[1]): + # dices.append( + # utils.dice_coeff( + # y_pred=val_outputs[ + # 0, channel : (channel + 1), :, :, : + # ], + # y_true=val_labels[0], + # ) + # ) + # logger.debug(f"DICE COEFF: {dices}") + # max_dice_channel = torch.argmax( + # torch.Tensor(dices) + # ) + # logger.debug( + # f"MAX DICE CHANNEL: {max_dice_channel}" + # ) + self.dice_metric( + y_pred=val_outputs, + # [ + # :, + # max_dice_channel : (max_dice_channel + 1), + # :, + # :, + # :, + # ], + y=val_labels, + ) + + # aggregate the final mean dice result + metric = self.dice_metric.aggregate().item() + self.dice_values.append(metric) + self.log(f"Validation Dice score: {metric:.3f}") + if self.best_dice < metric <= 1: + self.best_dice = metric + # save the best model + save_best_path = self.config.results_path_folder + # save_best_path.mkdir(parents=True, exist_ok=True) + save_best_name = "wnet" + save_path = ( + str(Path(save_best_path) / save_best_name) + + "_best_metric.pth" + ) + self.log(f"Saving new best model to {save_path}") + torch.save(model.state_dict(), save_path) + + # if WANDB_INSTALLED: + # log validation dice score for each validation round + # wandb.log({"val/dice_metric": metric}) + self.dice_metric.reset() + dec_out_val = val_decoder_outputs[0].detach().cpu().numpy().copy() + enc_out_val = val_outputs[0].detach().cpu().numpy().copy() + lab_out_val = val_labels[0].detach().cpu().numpy().copy() + val_in = val_inputs[0].detach().cpu().numpy().copy() + + display_dict = { + "Reconstruction": { + "data": np.squeeze(dec_out_val), + "cmap": "gist_earth", + }, + "Segmentation": { + "data": np.squeeze(enc_out_val), + "cmap": "turbo", + }, + "Inputs": { + "data": np.squeeze(val_in), + "cmap": "inferno", + }, + "Labels": { + "data": np.squeeze(lab_out_val), + "cmap": "bop blue", + }, + } + val_decoder_outputs = None + del val_decoder_outputs + val_outputs = None + del val_outputs + val_labels = None + del val_labels + val_inputs = None + del val_inputs + + return TrainingReport( + epoch=epoch, + loss_1_values={ + "SoftNCuts": self.ncuts_losses, + "Dice metric": self.dice_values, + }, + loss_2_values=self.rec_losses, + weights=model.state_dict(), + images_dict=display_dict, + ) + class SupervisedTrainingWorker(TrainingWorkerBase): """A custom worker to run supervised training jobs in. From fb1b130629106abb5d829d33d4414f26e75bb9d2 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 15:19:47 +0200 Subject: [PATCH 55/70] Fix order for model deletion --- napari_cellseg3d/_tests/test_training.py | 18 ++++++++++++++++++ .../code_models/worker_training.py | 8 ++++---- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py index 1ae0c2d3..14afd94e 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_training.py @@ -1,5 +1,7 @@ from pathlib import Path +import pytest + from napari_cellseg3d._tests.fixtures import ( LogFixture, LossFixture, @@ -93,6 +95,22 @@ def test_unsupervised_training(make_napari_viewer_proxy): ) assert isinstance(res, TrainingReport) assert not res.show_plot + widget.worker._abort_requested = True + res = next( + widget.worker.train( + provided_model=WNetFixture(), + provided_optimizer=OptimizerFixture(), + provided_loss=LossFixture(), + ) + ) + assert isinstance(res, TrainingReport) + assert not res.show_plot + with pytest.raises( + AttributeError, + match="'WNetTrainingWorker' object has no attribute 'model'", + ): + assert widget.worker.model is None + widget.worker.config.eval_volume_dict = [ {"image": im_path_str, "label": im_path_str} ] diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 8522b183..895b261b 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -559,6 +559,10 @@ def train( loss.backward(loss) optimizer.step() + yield TrainingReport( + show_plot=False, weights=model.state_dict() + ) + if self._abort_requested: self.dataloader = None del self.dataloader @@ -574,10 +578,6 @@ def train( del criterionW torch.cuda.empty_cache() - yield TrainingReport( - show_plot=False, weights=model.state_dict() - ) - self.ncuts_losses.append( epoch_ncuts_loss / len(self.dataloader) ) From e4b10a34daf9b1924d5e7fa2a66e9e603f8938d6 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 16:28:04 +0200 Subject: [PATCH 56/70] Extend supervised train tests --- napari_cellseg3d/_tests/fixtures.py | 31 ++++++++++--- napari_cellseg3d/_tests/test_training.py | 37 ++++++++++------ .../code_models/worker_training.py | 43 +++++++++++++------ 3 files changed, 78 insertions(+), 33 deletions(-) diff --git a/napari_cellseg3d/_tests/fixtures.py b/napari_cellseg3d/_tests/fixtures.py index 001b1d64..4dba351f 100644 --- a/napari_cellseg3d/_tests/fixtures.py +++ b/napari_cellseg3d/_tests/fixtures.py @@ -36,23 +36,42 @@ def forward(self, x): return self.forward_encoder(x), self.forward_decoder(x) -class OptimizerFixture: - def __call__(self, x): +class ModelFixture(torch.nn.Module): + def __init__(self): + super().__init__() + self.mock_conv = torch.nn.Conv3d(1, 1, 1) + self.mock_conv.requires_grad_(False) + + def forward(self, x): return x + +class OptimizerFixture: + def __init__(self): + self.param_groups = [] + self.param_groups.append({"lr": 0}) + def zero_grad(self): pass - def step(self): + def step(self, *args): + pass + + +class SchedulerFixture: + def step(self, *args): pass class LossFixture: - def __call__(self, x): - return x + def __call__(self, *args): + return self - def backward(self, x): + def backward(self, *args): pass def item(self): return 0 + + def detach(self): + return self diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py index 14afd94e..dc9d17ba 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_training.py @@ -5,7 +5,9 @@ from napari_cellseg3d._tests.fixtures import ( LogFixture, LossFixture, + ModelFixture, OptimizerFixture, + SchedulerFixture, WNetFixture, ) from napari_cellseg3d.code_models.models.model_test import TestModel @@ -33,6 +35,7 @@ def test_supervised_training(make_napari_viewer_proxy): widget.labels_filepaths = [im_path_str] widget.epoch_choice.setValue(1) widget.val_interval_choice.setValue(1) + widget.device_choice.setCurrentIndex(0) assert widget.check_ready() @@ -49,13 +52,19 @@ def test_supervised_training(make_napari_viewer_proxy): worker.config.val_data_dict = [ {"image": im_path_str, "label": im_path_str} ] - worker.config.max_epochs = 1 + worker.config.max_epochs = 2 worker.config.validation_interval = 2 - worker.log_parameters() - res = next(worker.train()) - assert isinstance(res, TrainingReport) - assert res.epoch == 0 + worker.log_parameters() + for res_i in worker.train( + provided_model=ModelFixture(), + provided_optimizer=OptimizerFixture(), + provided_loss=LossFixture(), + provided_scheduler=SchedulerFixture(), + ): + assert isinstance(res_i, TrainingReport) + res = res_i + assert res.epoch == 1 widget.worker = worker res.show_plot = True @@ -86,15 +95,15 @@ def test_unsupervised_training(make_napari_viewer_proxy): additional_results_description="wnet_test" ) assert widget.worker.config.train_data_dict is not None - res = next( - widget.worker.train( - provided_model=WNetFixture(), - provided_optimizer=OptimizerFixture(), - provided_loss=LossFixture(), - ) - ) - assert isinstance(res, TrainingReport) - assert not res.show_plot + widget.worker.config.max_epochs = 1 + for res_i in widget.worker.train( + provided_model=WNetFixture(), + provided_optimizer=OptimizerFixture(), + provided_loss=LossFixture(), + ): + assert isinstance(res_i, TrainingReport) + res = res_i + assert res.epoch == 0 widget.worker._abort_requested = True res = next( widget.worker.train( diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 895b261b..0d0c0659 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -999,7 +999,13 @@ def log_parameters(self): # self.log("\n") # self.log("-" * 20) - def train(self): + def train( + self, + provided_model=None, + provided_optimizer=None, + provided_loss=None, + provided_scheduler=None, + ): """Trains the PyTorch model for the given number of epochs, with the selected model and data, using the chosen batch size, validation interval, loss function, and number of samples. Will perform validation once every :py:obj:`val_interval` and save results if the mean dice is better @@ -1070,13 +1076,16 @@ def train(self): self.config.train_data_dict[0] ) check = data_check["image"].shape - do_sampling = self.config.sampling - size = self.config.sample_size if do_sampling else check - PADDING = utils.get_padding_dim(size) - model = model_class(input_img_size=PADDING, use_checkpoint=True) + + model = ( + model_class(input_img_size=PADDING, use_checkpoint=True) + if provided_model is None + else provided_model + ) + device = torch.device(self.config.device) model = model.to(device) @@ -1276,8 +1285,10 @@ def get_patch_loader_func(num_samples): logger.info("\nDone") logger.debug("Optimizer") - optimizer = torch.optim.Adam( - model.parameters(), self.config.learning_rate + optimizer = ( + torch.optim.Adam(model.parameters(), self.config.learning_rate) + if provided_optimizer is None + else provided_optimizer ) factor = self.config.scheduler_factor @@ -1286,12 +1297,16 @@ def get_patch_loader_func(num_samples): self.log("Setting it to 0.5") factor = 0.5 - scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( - optimizer=optimizer, - mode="min", - factor=factor, - patience=self.config.scheduler_patience, - verbose=VERBOSE_SCHEDULER, + scheduler = ( + torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer=optimizer, + mode="min", + factor=factor, + patience=self.config.scheduler_patience, + verbose=VERBOSE_SCHEDULER, + ) + if provided_scheduler is None + else provided_scheduler ) dice_metric = DiceMetric( include_background=True, reduction="mean", ignore_empty=False @@ -1342,6 +1357,8 @@ def get_patch_loader_func(num_samples): # device = torch.device(self.config.device) self.set_loss_from_config() + if provided_loss is not None: + self.loss_function = provided_loss # if model_name == "test": # self.quit() From 0c3450aa939948f5563f9906140dae8376969f84 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 17:11:06 +0200 Subject: [PATCH 57/70] Started docs update --- docs/res/code/model_framework.rst | 2 +- docs/res/code/plugin_model_training.rst | 4 +-- docs/res/code/workers.rst | 34 +++++++++++++++--- docs/res/guides/detailed_walkthrough.rst | 17 +++++---- docs/res/guides/training_module_guide.rst | 7 ++-- docs/res/guides/training_wnet.rst | 36 +++++++++++++++---- napari_cellseg3d/_tests/test_training.py | 2 +- .../code_models/worker_training.py | 8 ++--- 8 files changed, 80 insertions(+), 30 deletions(-) diff --git a/docs/res/code/model_framework.rst b/docs/res/code/model_framework.rst index a3483f5a..63eef232 100644 --- a/docs/res/code/model_framework.rst +++ b/docs/res/code/model_framework.rst @@ -12,7 +12,7 @@ Class : ModelFramework Methods ********************** .. autoclass:: napari_cellseg3d.code_models.model_framework::ModelFramework - :members: __init__, send_log, save_log, save_log_to_path, display_status_report, create_train_dataset_dict, get_model, get_available_models, get_device, empty_cuda_cache + :members: __init__, send_log, save_log, save_log_to_path, display_status_report, create_train_dataset_dict, get_available_models, get_device, empty_cuda_cache :noindex: diff --git a/docs/res/code/plugin_model_training.rst b/docs/res/code/plugin_model_training.rst index dc1271fc..6a2a39b8 100644 --- a/docs/res/code/plugin_model_training.rst +++ b/docs/res/code/plugin_model_training.rst @@ -11,7 +11,7 @@ Class : Trainer Methods ********************** .. autoclass:: napari_cellseg3d.code_plugins.plugin_model_training::Trainer - :members: __init__, get_loss, check_ready, send_log, start, on_start, on_finish, on_error, on_yield, plot_loss, update_loss_plot + :members: __init__, check_ready, send_log, start, on_start, on_finish, on_error, on_yield, update_loss_plot :noindex: @@ -19,4 +19,4 @@ Methods Attributes ********************* .. autoclass:: napari_cellseg3d.code_plugins.plugin_model_training::Trainer - :members: _viewer, worker, loss_dict, canvas, train_loss_plot, dice_metric_plot + :members: _viewer, worker, canvas diff --git a/docs/res/code/workers.rst b/docs/res/code/workers.rst index 1f5167ad..5964e004 100644 --- a/docs/res/code/workers.rst +++ b/docs/res/code/workers.rst @@ -10,7 +10,7 @@ Class : LogSignal Attributes ************************ -.. autoclass:: napari_cellseg3d.code_models.workers::LogSignal +.. autoclass:: napari_cellseg3d.code_models.workers_utils::LogSignal :members: log_signal :noindex: @@ -24,14 +24,14 @@ Class : InferenceWorker Methods ************************ -.. autoclass:: napari_cellseg3d.code_models.workers::InferenceWorker +.. autoclass:: napari_cellseg3d.code_models.worker_inference::InferenceWorker :members: __init__, log, create_inference_dict, inference :noindex: .. _here: https://napari-staging-site.github.io/guides/stable/threading.html -Class : TrainingWorker +Class : TrainingWorkerBase ------------------------------------------- .. important:: @@ -39,6 +39,32 @@ Class : TrainingWorker Methods ************************ -.. autoclass:: napari_cellseg3d.code_models.workers::TrainingWorker +.. autoclass:: napari_cellseg3d.code_models.worker_training::TrainingWorkerBase :members: __init__, log, train :noindex: + + +Class : WNetTrainingWorker +------------------------------------------- + +.. important:: + Inherits from :py:class:`TrainingWorkerBase` + +Methods +************************ +.. autoclass:: napari_cellseg3d.code_models.worker_training::WNetTrainingWorker + :members: __init__, train, eval, get_patch_dataset, get_dataset_eval, get_dataset + :noindex: + + +Class : SupervisedTrainingWorker +------------------------------------------- + +.. important:: + Inherits from :py:class:`TrainingWorkerBase` + +Methods +************************ +.. autoclass:: napari_cellseg3d.code_models.worker_training::SupervisedTrainingWorker + :members: __init__, train + :noindex: diff --git a/docs/res/guides/detailed_walkthrough.rst b/docs/res/guides/detailed_walkthrough.rst index 56ef54ed..4fd04510 100644 --- a/docs/res/guides/detailed_walkthrough.rst +++ b/docs/res/guides/detailed_walkthrough.rst @@ -120,9 +120,9 @@ Finally, the last tab lets you choose : * SegResNet is a lightweight model (low memory requirements) from MONAI originally designed for 3D fMRI data. * VNet is a larger (than SegResNet) CNN from MONAI designed for medical image segmentation. - * TRAILMAP is our PyTorch implementation of a 3D CNN model trained for axonal detection in cleared tissue. * TRAILMAP_MS is our implementation in PyTorch additionally trained on mouse cortical neural nuclei from mesoSPIM data. - * Note, the code is very modular, so it is relatively straightforward to use (and contribute) your model as well. + * SwinUNetR is a MONAI implementation of the SwinUNetR model. It is costly in compute and memory, but can achieve high performance. + * WNet is our reimplementation of an unsupervised model, which can be used to produce segmentation without labels. * The loss : for object detection in 3D volumes you'll likely want to use the Dice or Dice-focal Loss. @@ -239,13 +239,12 @@ Scoring, review, analysis ---------------------------- -.. Using the metrics utility module, you can compare the model's predictions to any ground truth -labels you might have. -Simply provide your prediction and ground truth labels, and compute the results. -A Dice metric of 1 indicates perfect matching, whereas a score of 0 indicates complete mismatch. -Select which score **you consider as sub-optimal**, and all results below this will be **shown in napari**. -If at any time the **orientation of your prediction labels changed compared to the ground truth**, check the -"Find best orientation" option to compensate for it. +.. Using the metrics utility module, you can compare the model's predictions to any ground truth labels you might have. + Simply provide your prediction and ground truth labels, and compute the results. + A Dice metric of 1 indicates perfect matching, whereas a score of 0 indicates complete mismatch. + Select which score **you consider as sub-optimal**, and all results below this will be **shown in napari**. + If at any time the **orientation of your prediction labels changed compared to the ground truth**, check the + "Find best orientation" option to compensate for it. Labels review diff --git a/docs/res/guides/training_module_guide.rst b/docs/res/guides/training_module_guide.rst index 0a577b86..1a424e98 100644 --- a/docs/res/guides/training_module_guide.rst +++ b/docs/res/guides/training_module_guide.rst @@ -4,7 +4,7 @@ Training module guide - Unsupervised models ============================================== .. important:: - The WNet training is for now only available in the provided jupyter notebook, in the ``notebooks`` folder. + The WNet training is for now available as part of the plugin in the Training module. Please see the :ref:`training_wnet` section for more information. Training module guide - Supervised models @@ -25,14 +25,15 @@ Model Link to original paper ============== ================================================================================================ VNet `Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation`_ SegResNet `3D MRI brain tumor segmentation using autoencoder regularization`_ -TRAILMAP_MS A PyTorch implementation of the `TRAILMAP project on GitHub`_ pretrained with MesoSpim data -TRAILMAP An implementation of the `TRAILMAP project on GitHub`_ using a `3DUNet for PyTorch`_ +TRAILMAP_MS An implementation of the `TRAILMAP project on GitHub`_ using `3DUNet for PyTorch`_ +SwinUNetR `Swin UNETR, Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images`_ ============== ================================================================================================ .. _Fully Convolutional Neural Networks for Volumetric Medical Image Segmentation: https://arxiv.org/pdf/1606.04797.pdf .. _3D MRI brain tumor segmentation using autoencoder regularization: https://arxiv.org/pdf/1810.11654.pdf .. _TRAILMAP project on GitHub: https://github.com/AlbertPun/TRAILMAP .. _3DUnet for Pytorch: https://github.com/wolny/pytorch-3dunet +.. _Swin UNETR, Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images: https://arxiv.org/abs/2201.01266 .. important:: | The machine learning models used by this program require all images of a dataset to be of the same size. diff --git a/docs/res/guides/training_wnet.rst b/docs/res/guides/training_wnet.rst index ecd20542..974a90e9 100644 --- a/docs/res/guides/training_wnet.rst +++ b/docs/res/guides/training_wnet.rst @@ -15,21 +15,45 @@ the model was trained on; you can retrain from our pretrained model to your set The model has two losses, the SoftNCut loss which clusters pixels according to brightness, and a reconstruction loss, either Mean Square Error (MSE) or Binary Cross Entropy (BCE). Unlike the original paper, these losses are added in a weighted sum and the backward pass is performed for the whole model at once. -The SoftNcuts is bounded between 0 and 1; the MSE may take large values. +The SoftNcuts is bounded between 0 and 1; the MSE may take large positive values. -For good performance, one should wait for the SoftNCut to reach a plateau, the reconstruction loss must also diminish but it's generally less critical. +For good performance, one should wait for the SoftNCut to reach a plateau; the reconstruction loss must also diminish but it's generally less critical. +Parameters +------------------------------- + +When using the WNet training module, additional options will be provided in the Advanced tab of the training module: + +- Number of classes : number of classes to segment (default 2). Additional classes will result in a more progressive segmentation according to brightness; can be useful if you have "halos" around your objects or artifacts with a significantly different brightness. +- Reconstruction loss : either MSE or BCE (default MSE). MSE is more sensitive to outliers, but can be more precise; BCE is more robust to outliers but can be less precise. + +- NCuts parameters: + - Intensity sigma : standard deviation of the feature similarity term (brightness here, default 1) + - Spatial sigma : standard deviation of the spatial proximity term (default 4) + - Radius : radius of the loss computation in pixels (default 2) + +.. note:: + Intensity sigma depends on pixel values in the image. The default of 1 is tailored to images being mapped between 0 and 100, which is done automatically by the plugin. +.. note:: + Raising the radius might improve performance in some cases, but will also greatly increase computation time. + +- Weights for the sum of losses : + - NCuts weight : weight of the NCuts loss (default 0.5) + - Reconstruction weight : weight of the reconstruction loss (default 0.5*1e-2) + +.. note:: + The weight of the reconstruction loss should be adjusted according to its empirical value; ideally the reconstruction loss should be of the same order of magnitude as the NCuts loss after being multiplied by its weight. Common issues troubleshooting ------------------------------ -If you do not find a satisfactory answer here, please `open an issue`_ ! +If you do not find a satisfactory answer here, please do not hesitate to `open an issue`_ on GitHub. -- **The NCuts loss explodes after a few epochs** : Lower the learning rate +- **The NCuts loss explodes after a few epochs** : Lower the learning rate, first by a factor of two, then ten. - **The NCuts loss does not converge and is unstable** : - The normalization step might not be adapted to your images. Disable normalization and change intensity_sigma according to the distribution of values in your image; for reference, by default images are remapped to values between 0 and 100, and intensity_sigma=1. + The normalization step might not be adapted to your images. Disable normalization and change intensity_sigma according to the distribution of values in your image. For reference, by default images are remapped to values between 0 and 100, and intensity_sigma=1. -- **Reconstruction (decoder) performance is poor** : switch to BCE and set the scaling factor of the reconstruction loss ot 0.5, OR adjust the weight of the MSE loss to make it closer to 1. +- **Reconstruction (decoder) performance is poor** : switch to BCE and set the scaling factor of the reconstruction loss ot 0.5, OR adjust the weight of the MSE loss to make it closer to 1 in the weighted sum. .. _WNet, A Deep Model for Fully Unsupervised Image Segmentation: https://arxiv.org/abs/1711.08506 diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py index dc9d17ba..2fe49a76 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_training.py @@ -124,7 +124,7 @@ def test_unsupervised_training(make_napari_viewer_proxy): {"image": im_path_str, "label": im_path_str} ] widget.worker._get_data() - eval_res = widget.worker._eval( + eval_res = widget.worker.eval( model=WNetFixture(), epoch=-10, ) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 0d0c0659..ffdae104 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -108,10 +108,10 @@ def set_download_log(self, widget): self.downloader.log_widget = widget def log(self, text): - """Sends a signal that ``text`` should be logged + """Sends a Qt signal that the provided text should be logged Goes in a Log object, defined in :py:mod:`napari_cellseg3d.interface Sends a signal to the main thread to log the text. - Signal is defined in napari_cellseg3d.workers_utils.LogSignal + Signal is defined in napari_cellseg3d.workers_utils.LogSignal. Args: text (str): text to logged @@ -653,7 +653,7 @@ def train( ): model.eval() self.log("Validating...") - yield self._eval(model, epoch) # validation + yield self.eval(model, epoch) # validation if self._abort_requested: self.dataloader = None @@ -736,7 +736,7 @@ def train( self.quit() raise e - def _eval(self, model, epoch) -> TrainingReport: + def eval(self, model, epoch) -> TrainingReport: with torch.no_grad(): device = self.config.device for _k, val_data in enumerate(self.eval_dataloader): From eaabb1198abce1b52abd1ff3ae61a36d328dd991 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 2 Aug 2023 17:31:08 +0200 Subject: [PATCH 58/70] Update plugin_model_training.py --- napari_cellseg3d/code_plugins/plugin_model_training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index c4211ee3..4f980b8a 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1608,14 +1608,14 @@ def __init__(self, parent): text_label="Reconstruction loss", ) self.ncuts_weight_choice = ui.DoubleIncrementCounter( - lower=0.1, + lower=0.01, upper=1.0, default=self.default_config.n_cuts_weight, parent=parent, text_label="NCuts weight", ) self.reconstruction_weight_choice = ui.DoubleIncrementCounter( - lower=0.1, + lower=0.01, upper=1.0, default=0.5, parent=parent, From 7e397f932e99988a52467e03d0864f3cc89ffd55 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Wed, 9 Aug 2023 16:54:27 +0200 Subject: [PATCH 59/70] Fixed filepaths --- napari_cellseg3d/code_plugins/plugin_convert.py | 12 +++++------- napari_cellseg3d/code_plugins/plugin_crf.py | 2 +- napari_cellseg3d/code_plugins/plugin_crop.py | 2 +- napari_cellseg3d/code_plugins/plugin_review.py | 2 +- napari_cellseg3d/config.py | 6 +++--- 5 files changed, 11 insertions(+), 13 deletions(-) diff --git a/napari_cellseg3d/code_plugins/plugin_convert.py b/napari_cellseg3d/code_plugins/plugin_convert.py index aa70bc73..18af29c5 100644 --- a/napari_cellseg3d/code_plugins/plugin_convert.py +++ b/napari_cellseg3d/code_plugins/plugin_convert.py @@ -46,7 +46,7 @@ def __init__(self, viewer: "napari.Viewer.viewer", parent=None): self.aniso_widgets = ui.AnisotropyWidgets(self, always_visible=True) self.start_btn = ui.Button("Start", self._start) - self.results_path = str(Path.home() / Path("cellseg3d/anisotropy")) + self.results_path = str(Path.home() / "cellseg3d" / "anisotropy") self.results_filewidget.text_field.setText(str(self.results_path)) self.results_filewidget.check_ready() @@ -145,7 +145,7 @@ def __init__(self, viewer: "napari.viewer.Viewer", parent=None): text_label="Remove all smaller than (pxs):", ) - self.results_path = Path.home() / Path("cellseg3d/small_removed") + self.results_path = Path.home() / "cellseg3d" / "small_removed" self.results_filewidget.text_field.setText(str(self.results_path)) self.results_filewidget.check_ready() @@ -233,9 +233,7 @@ def __init__(self, viewer: "napari.viewer.Viewer", parent=None): self.start_btn = ui.Button("Start", self._start) - self.results_path = str( - Path.home() / Path("cellseg3d/instance_labels") - ) + self.results_path = str(Path.home() / "cellseg3d" / "instance_labels") self.results_filewidget.text_field.setText(self.results_path) self.results_filewidget.check_ready() @@ -326,7 +324,7 @@ def __init__(self, viewer: "napari.viewer.Viewer", parent=None): self.start_btn = ui.Button("Start", self._start) - self.results_path = Path.home() / Path("cellseg3d/instance") + self.results_path = Path.home() / "cellseg3d" / "instance" self.results_filewidget.text_field.setText(str(self.results_path)) self.results_filewidget.check_ready() @@ -417,7 +415,7 @@ def __init__(self, viewer: "napari.viewer.Viewer", parent=None): text_label="Remove all smaller than (value):", ) - self.results_path = str(Path.home() / Path("cellseg3d/threshold")) + self.results_path = str(Path.home() / "cellseg3d" / "threshold") self.results_filewidget.text_field.setText(self.results_path) self.results_filewidget.check_ready() diff --git a/napari_cellseg3d/code_plugins/plugin_crf.py b/napari_cellseg3d/code_plugins/plugin_crf.py index 76194e87..dcc0af57 100644 --- a/napari_cellseg3d/code_plugins/plugin_crf.py +++ b/napari_cellseg3d/code_plugins/plugin_crf.py @@ -138,7 +138,7 @@ def __init__(self, viewer, parent=None): self.result_name = None self.crf_results = [] - self.results_path = Path.home() / Path("cellseg3d/crf") + self.results_path = Path.home() / "cellseg3d" / "crf" self.results_filewidget.text_field.setText(str(self.results_path)) self.results_filewidget.check_ready() diff --git a/napari_cellseg3d/code_plugins/plugin_crop.py b/napari_cellseg3d/code_plugins/plugin_crop.py index c6e822d4..37b26b13 100644 --- a/napari_cellseg3d/code_plugins/plugin_crop.py +++ b/napari_cellseg3d/code_plugins/plugin_crop.py @@ -38,7 +38,7 @@ def __init__(self, viewer: "napari.viewer.Viewer", parent=None): super().__init__(viewer) self.docked_widgets = [] - self.results_path = Path.home() / Path("cellseg3d/cropped") + self.results_path = Path.home() / "cellseg3d" / "cropped" self.btn_start = ui.Button("Start", self._start) diff --git a/napari_cellseg3d/code_plugins/plugin_review.py b/napari_cellseg3d/code_plugins/plugin_review.py index d3216436..712b3193 100644 --- a/napari_cellseg3d/code_plugins/plugin_review.py +++ b/napari_cellseg3d/code_plugins/plugin_review.py @@ -144,7 +144,7 @@ def _build(self): # self._show_io_element(self.results_filewidget) self.results_filewidget.text_field.setText( - str(Path.home() / Path("cellseg3d/review")) + str(Path.home() / "cellseg3d" / "review") ) csv_param_w.setLayout(csv_param_l) diff --git a/napari_cellseg3d/config.py b/napari_cellseg3d/config.py index 6c8db79b..449b58b5 100644 --- a/napari_cellseg3d/config.py +++ b/napari_cellseg3d/config.py @@ -45,7 +45,7 @@ class ReviewConfig: image: np.array = None labels: np.array = None - csv_path: str = Path.home() / Path("cellseg3d/review") + csv_path: str = Path.home() / "cellseg3d" / "review" model_name: str = "" new_csv: bool = True filetype: str = ".tif" @@ -210,7 +210,7 @@ class InferenceWorkerConfig: device: str = "cpu" model_info: ModelInfo = ModelInfo() weights_config: WeightsInfo = WeightsInfo() - results_path: str = str(Path.home() / Path("cellseg3d/inference")) + results_path: str = str(Path.home() / "cellseg3d/inference") filetype: str = ".tif" keep_on_cpu: bool = False compute_stats: bool = False @@ -258,7 +258,7 @@ class TrainingWorkerConfig: scheduler_patience: int = 10 weights_info: WeightsInfo = WeightsInfo() # data params - results_path_folder: str = str(Path.home() / Path("cellseg3d/training")) + results_path_folder: str = str(Path.home() / "cellseg3d" / "training") sampling: bool = False num_samples: int = 2 sample_size: List[int] = None From 4e454c0406c10842660b9e05300004cc6d97c4b8 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 15 Aug 2023 16:17:31 +0200 Subject: [PATCH 60/70] Fix paths in test (use pathlib) --- napari_cellseg3d/_tests/test_utils.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/napari_cellseg3d/_tests/test_utils.py b/napari_cellseg3d/_tests/test_utils.py index a5ac7fdb..71362e57 100644 --- a/napari_cellseg3d/_tests/test_utils.py +++ b/napari_cellseg3d/_tests/test_utils.py @@ -209,17 +209,29 @@ def test_parse_default_path(): user_path = Path().home() assert utils.parse_default_path([None]) == str(user_path) - test_path = "C:/test/test/test/test" + test_path = Path("C:") / "test" / "test" / "test" / "test" path = [test_path, None, None] assert utils.parse_default_path(path, check_existence=False) == test_path - test_path = "C:/test/does/not/exist" + test_path = Path("C:") / "test" / "does" / "not" / "exist" path = [test_path, None, None] assert utils.parse_default_path(path, check_existence=True) == str( Path.home() ) - long_path = "D:/very/long/path/what/a/bore/ifonlytherewas/something/tohelpmenotsearchit/allthetime" + long_path = Path("D:") + long_path = ( + long_path + / "very" + / "long" + / "path" + / "what" + / "a" + / "bore" + / "ifonlytherewassomething" + / "tohelpmenotsearchit" + / "allthetime" + ) path = [test_path, None, None, long_path, ""] assert utils.parse_default_path(path, check_existence=False) == long_path From c72c5cc5ca3de9a3a769614ae451d7e625502040 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 15 Aug 2023 16:17:45 +0200 Subject: [PATCH 61/70] Updated workers config --- napari_cellseg3d/code_models/worker_inference.py | 1 - napari_cellseg3d/code_models/worker_training.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_inference.py b/napari_cellseg3d/code_models/worker_inference.py index 3fb5bc95..8dcd084b 100644 --- a/napari_cellseg3d/code_models/worker_inference.py +++ b/napari_cellseg3d/code_models/worker_inference.py @@ -181,7 +181,6 @@ def log_parameters(self): def load_folder(self): images_dict = self.create_inference_dict(self.config.images_filepaths) - # TODO : better solution than loading first image always ? data_check = LoadImaged(keys=["image"])(images_dict[0]) check = data_check["image"].shape pad = utils.get_padding_dim(check) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index ffdae104..39b730c5 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -1273,14 +1273,14 @@ def get_patch_loader_func(num_samples): train_dataset, batch_size=self.config.batch_size, shuffle=True, - num_workers=2, + num_workers=self.config.num_workers, collate_fn=pad_list_data_collate, ) validation_loader = DataLoader( validation_dataset, batch_size=self.config.batch_size, - num_workers=2, + num_workers=self.config.num_workers, ) logger.info("\nDone") From d533a3b951fc003447e5df9e5b7a732458927e2a Mon Sep 17 00:00:00 2001 From: C-Achard Date: Tue, 19 Sep 2023 13:02:43 +0200 Subject: [PATCH 62/70] Fixed parse_default_path test --- napari_cellseg3d/_tests/test_utils.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/napari_cellseg3d/_tests/test_utils.py b/napari_cellseg3d/_tests/test_utils.py index 71362e57..5d4677ac 100644 --- a/napari_cellseg3d/_tests/test_utils.py +++ b/napari_cellseg3d/_tests/test_utils.py @@ -206,20 +206,22 @@ def test_load_images(): def test_parse_default_path(): - user_path = Path().home() + user_path = Path.home() assert utils.parse_default_path([None]) == str(user_path) - test_path = Path("C:") / "test" / "test" / "test" / "test" + test_path = (Path.home() / "test" / "test" / "test" / "test").as_posix() path = [test_path, None, None] - assert utils.parse_default_path(path, check_existence=False) == test_path + assert utils.parse_default_path(path, check_existence=False) == str( + test_path + ) - test_path = Path("C:") / "test" / "does" / "not" / "exist" + test_path = (Path.home() / "test" / "does" / "not" / "exist").as_posix() path = [test_path, None, None] assert utils.parse_default_path(path, check_existence=True) == str( Path.home() ) - long_path = Path("D:") + long_path = Path("D:/") long_path = ( long_path / "very" @@ -233,7 +235,9 @@ def test_parse_default_path(): / "allthetime" ) path = [test_path, None, None, long_path, ""] - assert utils.parse_default_path(path, check_existence=False) == long_path + assert utils.parse_default_path(path, check_existence=False) == str( + long_path.as_posix() + ) def test_thread_test(make_napari_viewer_proxy): From ef9c18c5daa3aa4735c9ab38e40ab05cd33b5bbe Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 15:03:55 +0200 Subject: [PATCH 63/70] Ignore wandb results in gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 7dbc9185..6ee49040 100644 --- a/.gitignore +++ b/.gitignore @@ -103,6 +103,7 @@ venv/ /docs/res/logo/old_logo/ /reqs/ /loss_plots/ +/wandb/ notebooks/csv_cell_plot.html notebooks/full_plot.html *.csv From c43c9955b3865d57ac12d7a6d11fd59d0396f814 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 15:05:07 +0200 Subject: [PATCH 64/70] Enable GH Actions tests on branch temporarily --- .github/workflows/test_and_deploy.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/test_and_deploy.yml b/.github/workflows/test_and_deploy.yml index fafb1719..b6c9d848 100644 --- a/.github/workflows/test_and_deploy.yml +++ b/.github/workflows/test_and_deploy.yml @@ -7,6 +7,7 @@ on: push: branches: - main + - cy/wnet-train tags: - "v*" # Push events to matching v*, i.e. v1.0, v20.15.10 pull_request: From f12577a3655c74b375eecbea26f36780b160a66a Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 15:41:36 +0200 Subject: [PATCH 65/70] Fixed deletion of Qt imports in interface --- napari_cellseg3d/interface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/napari_cellseg3d/interface.py b/napari_cellseg3d/interface.py index c3ecd50f..3c6699c4 100644 --- a/napari_cellseg3d/interface.py +++ b/napari_cellseg3d/interface.py @@ -7,7 +7,7 @@ # Qt # from qtpy.QtCore import QtWarningMsg from qtpy import QtCore -from qtpy.QtCore import QObject, Q +from qtpy.QtCore import QObject, Qt, QtWarningMsg, QUrl from qtpy.QtGui import QCursor, QDesktopServices, QTextCursor from qtpy.QtWidgets import ( QAbstractSpinBox, From b4b86f8574a6ee5a8e4df98a0db6851a52348a74 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 15:46:19 +0200 Subject: [PATCH 66/70] Reverted include_background=True in Dice --- napari_cellseg3d/code_models/worker_training.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 39b730c5..21df039d 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -154,7 +154,7 @@ def __init__( self.config = worker_config self.dice_metric = DiceMetric( - include_background=True, reduction="mean", get_not_nans=False + include_background=False, reduction="mean", get_not_nans=False ) self.normalize_function = utils.remap_image self.start_time = time.time() @@ -1309,7 +1309,7 @@ def get_patch_loader_func(num_samples): else provided_scheduler ) dice_metric = DiceMetric( - include_background=True, reduction="mean", ignore_empty=False + include_background=False, reduction="mean", ignore_empty=False ) best_metric = -1 From 6605081c20743e7e8efcca62a5d2377b6b2a2dc1 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 16:37:22 +0200 Subject: [PATCH 67/70] Reintroduced best Dice channel seeking + refacto --- .../code_models/worker_training.py | 38 ++++++------------- napari_cellseg3d/utils.py | 23 +++++++++++ 2 files changed, 34 insertions(+), 27 deletions(-) diff --git a/napari_cellseg3d/code_models/worker_training.py b/napari_cellseg3d/code_models/worker_training.py index 21df039d..1c26c5c7 100644 --- a/napari_cellseg3d/code_models/worker_training.py +++ b/napari_cellseg3d/code_models/worker_training.py @@ -779,33 +779,17 @@ def eval(self, model, epoch) -> TrainingReport: f"Val decoder outputs shape: {val_decoder_outputs.shape}" ) - # dices = [] - # Find in which channel the labels are (avoid background) - # for channel in range(val_outputs.shape[1]): - # dices.append( - # utils.dice_coeff( - # y_pred=val_outputs[ - # 0, channel : (channel + 1), :, :, : - # ], - # y_true=val_labels[0], - # ) - # ) - # logger.debug(f"DICE COEFF: {dices}") - # max_dice_channel = torch.argmax( - # torch.Tensor(dices) - # ) - # logger.debug( - # f"MAX DICE CHANNEL: {max_dice_channel}" - # ) + max_dice_channel = utils.seek_best_dice_coeff_channel( + y_pred=val_outputs, y_true=val_labels + ) self.dice_metric( - y_pred=val_outputs, - # [ - # :, - # max_dice_channel : (max_dice_channel + 1), - # :, - # :, - # :, - # ], + y_pred=val_outputs[ + :, + max_dice_channel : (max_dice_channel + 1), + :, + :, + :, + ], y=val_labels, ) @@ -1282,7 +1266,7 @@ def get_patch_loader_func(num_samples): batch_size=self.config.batch_size, num_workers=self.config.num_workers, ) - logger.info("\nDone") + logger.debug("\nDone") logger.debug("Optimizer") optimizer = ( diff --git a/napari_cellseg3d/utils.py b/napari_cellseg3d/utils.py index 15eae20c..dabcda1f 100644 --- a/napari_cellseg3d/utils.py +++ b/napari_cellseg3d/utils.py @@ -229,6 +229,29 @@ def dice_coeff( ) +def seek_best_dice_coeff_channel(y_pred, y_true) -> torch.Tensor: + """Compute Dice-Sorensen coefficient between unsupervised model output and ground truth labels; + returns the channel with the highest dice coefficient. + Args: + y_true: Ground truth label + y_pred: Prediction label + Returns: best Dice coefficient channel + """ + dices = [] + # Find in which channel the labels are (to avoid background) + for channel in range(y_pred.shape[1]): + dices.append( + dice_coeff( + y_pred=y_pred[0, channel : (channel + 1), :, :, :], + y_true=y_true[0], + ) + ) + LOGGER.debug(f"DICE COEFF: {dices}") + max_dice_channel = torch.argmax(torch.Tensor(dices)) + LOGGER.debug(f"MAX DICE CHANNEL: {max_dice_channel}") + return max_dice_channel + + def correct_rotation(image): """Rotates the exes 0 and 2 in [DHW] section of image array""" extra_dims = len(image.shape) - 3 From f6711c39c3ec4b3913893d2bde37c4aecd1692b3 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 16:38:47 +0200 Subject: [PATCH 68/70] Improve filepath messages --- .../code_models/model_framework.py | 14 ++++++++++---- napari_cellseg3d/code_plugins/plugin_base.py | 19 ++++++++++++------- napari_cellseg3d/interface.py | 2 +- 3 files changed, 23 insertions(+), 12 deletions(-) diff --git a/napari_cellseg3d/code_models/model_framework.py b/napari_cellseg3d/code_models/model_framework.py index 9bcd67a6..d4e7af06 100644 --- a/napari_cellseg3d/code_models/model_framework.py +++ b/napari_cellseg3d/code_models/model_framework.py @@ -245,16 +245,22 @@ def _toggle_weights_path(self): self.custom_weights_choice, self.weights_filewidget ) - def create_dataset_dict_no_labs(self): - """Creates unsupervised data dictionary for MONAI transforms and training.""" + def get_unsupervised_image_filepaths(self): volume_directory = Path( self.unsupervised_images_filewidget.text_field.text() ) + if not volume_directory.exists(): raise ValueError(f"Data folder {volume_directory} does not exist") - images_filepaths = sorted(Path.glob(volume_directory, "*.tif")) + return sorted(Path.glob(volume_directory, "*.tif")) + + def create_dataset_dict_no_labs(self): + """Creates unsupervised data dictionary for MONAI transforms and training.""" + images_filepaths = self.get_unsupervised_image_filepaths() if len(images_filepaths) == 0: - raise ValueError(f"Data folder {volume_directory} is empty") + raise ValueError( + f"Data folder {self.unsupervised_images_filewidget.text_field.text()} is empty" + ) logger.info("Images :") for file in images_filepaths: diff --git a/napari_cellseg3d/code_plugins/plugin_base.py b/napari_cellseg3d/code_plugins/plugin_base.py index 2fbbe8d3..90c61adf 100644 --- a/napari_cellseg3d/code_plugins/plugin_base.py +++ b/napari_cellseg3d/code_plugins/plugin_base.py @@ -389,7 +389,7 @@ def __init__( # Validation images widget self.unsupervised_images_filewidget = ui.FilePathWidget( description="Training directory", - file_function=self.load_validation_images_dataset, + file_function=self.load_unsup_images_dataset, parent=self, ) self.unsupervised_images_filewidget.setVisible(False) @@ -421,19 +421,23 @@ def load_dataset_paths(self): def load_image_dataset(self): """Show file dialog to set :py:attr:`~images_filepaths`""" filenames = self.load_dataset_paths() - logger.debug(f"image filenames : {filenames}") if filenames: + logger.info("Images loaded :") + for f in filenames: + logger.info(f"{str(Path(f).name)}") self.images_filepaths = [str(path) for path in sorted(filenames)] path = str(Path(filenames[0]).parent) self.image_filewidget.text_field.setText(path) self.image_filewidget.check_ready() self._update_default_paths(path) - def load_validation_images_dataset(self): + def load_unsup_images_dataset(self): """Show file dialog to set :py:attr:`~val_images_filepaths`""" filenames = self.load_dataset_paths() - logger.debug(f"val filenames : {filenames}") if filenames: + logger.info("Images loaded (unsupervised training) :") + for f in filenames: + logger.info(f"{str(Path(f).name)}") self.validation_filepaths = [ str(path) for path in sorted(filenames) ] @@ -445,8 +449,10 @@ def load_validation_images_dataset(self): def load_label_dataset(self): """Show file dialog to set :py:attr:`~labels_filepaths`""" filenames = self.load_dataset_paths() - logger.debug(f"labels filenames : {filenames}") if filenames: + logger.info("Labels loaded :") + for f in filenames: + logger.info(f"{str(Path(f).name)}") self.labels_filepaths = [str(path) for path in sorted(filenames)] path = str(Path(filenames[0]).parent) self.labels_filewidget.text_field.setText(path) @@ -477,13 +483,13 @@ def extract_dataset_paths(paths): return None return str(Path(paths[0]).parent) - def _check_all_filepaths(self): self.image_filewidget.check_ready() self.labels_filewidget.check_ready() self.results_filewidget.check_ready() self.unsupervised_images_filewidget.check_ready() + class BasePluginUtils(BasePluginFolder): """Small subclass used to have centralized widgets layer and result path selection in utilities""" @@ -516,4 +522,3 @@ def _update_default_paths(self, path=None): logger.debug(f"Trying to update default with {default_path}") if default_path is not None: self.utils_default_paths.append(default_path) - diff --git a/napari_cellseg3d/interface.py b/napari_cellseg3d/interface.py index 3c6699c4..fa7dedbf 100644 --- a/napari_cellseg3d/interface.py +++ b/napari_cellseg3d/interface.py @@ -1258,7 +1258,7 @@ def open_folder_dialog( ): default_path = utils.parse_default_path(possible_paths) - logger.info(f"Default : {default_path}") + logger.debug(f"Default : {default_path}") return QFileDialog.getExistingDirectory( widget, "Open directory", default_path # + "/.." ) From cdc7dde35f995d62a478f7452a3221bb9e46184f Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 16:41:13 +0200 Subject: [PATCH 69/70] Fix unsup image loading when not validating --- .../code_plugins/plugin_model_training.py | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/napari_cellseg3d/code_plugins/plugin_model_training.py b/napari_cellseg3d/code_plugins/plugin_model_training.py index 4f980b8a..53ae5ce4 100644 --- a/napari_cellseg3d/code_plugins/plugin_model_training.py +++ b/napari_cellseg3d/code_plugins/plugin_model_training.py @@ -1,4 +1,5 @@ import shutil +import warnings from functools import partial from pathlib import Path from typing import TYPE_CHECKING, List @@ -431,13 +432,18 @@ def check_ready(self): * False and displays a warning if not """ - if ( - self.images_filepaths == [] - or self.labels_filepaths == [] - or len(self.images_filepaths) != len(self.labels_filepaths) - ): - logger.warning("Image and label paths are not correctly set") - return False + if not self.unsupervised_mode: + if ( + self.images_filepaths == [] + or self.labels_filepaths == [] + or len(self.images_filepaths) != len(self.labels_filepaths) + ): + logger.warning("Image and label paths are not correctly set") + return False + else: + if self.get_unsupervised_image_filepaths() == []: + logger.warning("Image paths are not correctly set") + return False return True def _toggle_unsupervised_mode(self, enabled=False): @@ -940,8 +946,9 @@ def start(self): if not self.check_ready(): # issues a warning if not ready err = "Aborting, please set all required paths" - self.log.print_and_log(err) + # self.log.print_and_log(err) logger.warning(err) + warnings.warn(err, stacklevel=1) return if self.worker is not None: From 328ef81ae9da5a615afd9b7dc39c35bf00d4f134 Mon Sep 17 00:00:00 2001 From: C-Achard Date: Thu, 21 Sep 2023 17:00:19 +0200 Subject: [PATCH 70/70] Fix training tests --- napari_cellseg3d/_tests/test_training.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/napari_cellseg3d/_tests/test_training.py b/napari_cellseg3d/_tests/test_training.py index 2fe49a76..e764ff37 100644 --- a/napari_cellseg3d/_tests/test_training.py +++ b/napari_cellseg3d/_tests/test_training.py @@ -25,10 +25,12 @@ def test_supervised_training(make_napari_viewer_proxy): viewer = make_napari_viewer_proxy() widget = Trainer(viewer) widget.log = LogFixture() + widget.model_choice.setCurrentIndex(0) widget.images_filepath = [] widget.labels_filepaths = [] + assert not widget.unsupervised_mode assert not widget.check_ready() widget.images_filepaths = [im_path_str]