Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 34 additions & 38 deletions examples/experimental/FL Training Plan/Create Plan.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
},
{
"data": {
"text/plain": "<torch._C.Generator at 0x22bc39b3cb0>"
"text/plain": "<torch._C.Generator at 0x22f01652890>"
},
"metadata": {},
"output_type": "execute_result",
Expand Down Expand Up @@ -352,7 +352,7 @@
"W2\n",
"b2\n",
"\n",
"Operations:\n",
"Actions:\n",
"var_2 = W1.t()\n",
"var_4 = X.__matmul__(var_2)\n",
"var_6 = var_4.__add__(b1)\n",
Expand All @@ -364,7 +364,7 @@
"var_14 = torch.log(var_13)\n",
"var_16 = y.__mul__(var_14)\n",
"var_17 = var_16.mean()\n",
"upd_W1 = var_17.__neg__()\n",
"loss = var_17.__neg__()\n",
"var_19 = var_13.__sub__(y)\n",
"var_21 = batch_size.__mul__(10)\n",
"var_22 = var_19.__truediv__(var_21)\n",
Expand All @@ -379,27 +379,27 @@
"var_31 = var_30.__matmul__(var_7)\n",
"var_32 = var_22.sum(0)\n",
"var_34 = var_28.__mul__(lr)\n",
"upd_W2 = W1.__sub__(var_34)\n",
"upd_W1 = W1.__sub__(var_34)\n",
"var_36 = var_29.__mul__(lr)\n",
"upd_b2 = b1.__sub__(var_36)\n",
"upd_b1 = b1.__sub__(var_36)\n",
"var_38 = var_31.__mul__(lr)\n",
"loss = W2.__sub__(var_38)\n",
"upd_W2 = W2.__sub__(var_38)\n",
"var_40 = var_32.__mul__(lr)\n",
"acc = b2.__sub__(var_40)\n",
"upd_b2 = b2.__sub__(var_40)\n",
"var_42 = torch.argmax(var_13, {'dim': 1})\n",
"var_43 = torch.argmax(y, {'dim': 1})\n",
"var_44 = var_42.eq(var_43)\n",
"var_45 = var_44.float()\n",
"var_46 = var_45.sum()\n",
"upd_b1 = var_46.__truediv__(batch_size)\n",
"acc = var_46.__truediv__(batch_size)\n",
"\n",
"Outputs:\n",
"loss\n",
"acc\n",
"upd_W1\n",
"upd_b1\n",
"upd_W2\n",
"upd_b2\n",
"loss\n",
"acc\n"
"upd_b2\n"
],
"output_type": "stream"
}
Expand All @@ -413,8 +413,8 @@
"]\n",
"\n",
"output_names = [\n",
" \"upd_W1\", \"upd_b1\", \"upd_W2\", \"upd_b2\",\n",
" \"loss\", \"acc\"\n",
" \"loss\", \"acc\",\n",
" \"upd_W1\", \"upd_b1\", \"upd_W2\", \"upd_b2\"\n",
"]\n",
"\n",
"def placeholderToStr(ph: PlaceHolder):\n",
Expand Down Expand Up @@ -449,19 +449,19 @@
"for inp in sorted(training_plan.find_placeholders(\"input\"), key=tag_sort(\"input\")):\n",
" print(argToStr(inp))\n",
"\n",
"print(\"\\nOperations:\")\n",
"for op in training_plan.operations:\n",
" expr = [argToStr(op.return_ids), ' = ']\n",
" if op.cmd_owner is None:\n",
" expr += [op.cmd_name, '(']\n",
"print(\"\\nActions:\")\n",
"for action in training_plan.actions:\n",
" expr = [argToStr(action.return_ids), ' = ']\n",
" if action.target is None:\n",
" expr += [action.name, '(']\n",
" else:\n",
" expr += [argToStr(op.cmd_owner), '.', op.cmd_name, '(']\n",
" expr += [argToStr(action.target), '.', action.name, '(']\n",
"\n",
" if len(op.cmd_args):\n",
" expr += argToStr(op.cmd_args)\n",
" if len(action.args):\n",
" expr += argToStr(action.args)\n",
"\n",
" if op.cmd_kwargs:\n",
" expr += ', ', str(op.cmd_kwargs)\n",
" if action.kwargs:\n",
" expr += ', ', str(action.kwargs)\n",
" \n",
" expr += [')']\n",
" print(\"\".join(expr))\n",
Expand Down Expand Up @@ -563,8 +563,16 @@
"source": [
"## Step 4: Serialize!\n",
"\n",
"Note that we don't serialize full Model, only weights.\n",
"State is suitable protobuf class to wrap list of Model params tensors. "
"Now it's time to serialize model params and plans to protobuf and save them for further usage:\n",
" * In \"Execute Plan\" notebook, we load and execute these plans & model, from Python.\n",
" * In \"Host Plan\" notebook, we send these plans & model to PyGrid, so it can be executed from other worker (e.g. syft.js).\n",
"\n",
"**NOTE:**\n",
" * We don't serialize full Model, only weights. How the Model is serialized is TBD.\n",
" State is suitable protobuf class to wrap list of Model params tensors.\n",
" * Plan containing list of operations is serialized to syft_proto.execution.v1.Plan \n",
" while torchscript Plan is serialized to syft_proto.types.torch.v1.ScriptFunction. \n",
" In the future they will converge to syft_proto.execution.v1.Plan, see https://github.com/OpenMined/PySyft/issues/2994#issuecomment-595333791"
],
"metadata": {
"collapsed": false,
Expand Down Expand Up @@ -597,7 +605,7 @@
" state_placeholders=[PlaceHolder().instantiate(param) for param in model_params]\n",
")\n",
"\n",
"serializeToBinPb(hook.local_worker, model_params_state, \"model_params.pb\")"
"serializeToBinPb(hook.local_worker, model_params_state, \"model_params.pb\")\n"
],
"metadata": {
"collapsed": false,
Expand All @@ -606,18 +614,6 @@
"is_executing": false
}
}
},
{
"cell_type": "markdown",
"source": [
"In next notebook, we load and execute this plan."
],
"metadata": {
"collapsed": false,
"pycharm": {
"name": "#%% md\n"
}
}
}
],
"metadata": {
Expand Down
12 changes: 7 additions & 5 deletions examples/experimental/FL Training Plan/Execute Plan.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@
" \n",
"# Federated Learning Training Plan: Execute Plan\n",
"\n",
"Here we load and execute Plan and Model params created earlier in \"Create Plan\" notebook. "
"Here we load and execute Plan and Model params created earlier in \"Create Plan\" notebook. \n",
"\n",
"This represents PySyft (python) worker."
],
"metadata": {
"collapsed": false,
Expand Down Expand Up @@ -131,7 +133,7 @@
{
"name": "stdout",
"text": [
"Loaded plan (# of ops): 39\n",
"Loaded plan (# of actions): 39\n",
"Loaded tracescript plan code: def forward(self,\n",
" argument_1: Tensor,\n",
" argument_2: Tensor,\n",
Expand Down Expand Up @@ -177,7 +179,7 @@
"# unwrap tensors from State\n",
"model_params = model_params_state.tensors()\n",
"\n",
"print(\"Loaded plan (# of ops):\", len(training_plan_ops.operations))\n",
"print(\"Loaded plan (# of actions):\", len(training_plan_ops.actions))\n",
"print(\"Loaded tracescript plan code:\", training_plan_ts.code)\n",
"print(\"Loaded params count:\", len(model_params))"
],
Expand Down Expand Up @@ -258,7 +260,7 @@
{
"name": "stdout",
"text": [
"Epoch 1, avg loss: 0.207546, avg training accuracy: 0.443713\n"
"Epoch 1, avg loss: 0.207544, avg training accuracy: 0.441848\n"
],
"output_type": "stream"
}
Expand All @@ -282,7 +284,7 @@
{
"name": "stdout",
"text": [
"Epoch 1, avg loss: 0.157928, avg training accuracy: 0.732259\n"
"Epoch 1, avg loss: 0.157933, avg training accuracy: 0.731726\n"
],
"output_type": "stream"
}
Expand Down
Loading