Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
99 changes: 99 additions & 0 deletions examples/experimental/NumpyTensor.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Setting up Sandbox...\n",
"Done!\n"
]
}
],
"source": [
"import syft as sy\n",
"import torch as th\n",
"import numpy as np\n",
"\n",
"sy.create_sandbox(globals(), False, False)"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from syft.frameworks.torch.tensors.interpreters.numpy import NumpyTensor"
]
},
{
"cell_type": "code",
"execution_count": 46,
"metadata": {},
"outputs": [],
"source": [
"x = NumpyTensor(numpy_tensor=np.array([[1,2,3,4]])).wrap()\n",
"y = x.dot(x.transpose())\n",
"assert (y.child.child == np.array([[30]])).all()"
]
},
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 42,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 42,
"metadata": {},
"output_type": "execute_result"
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
1 change: 1 addition & 0 deletions syft/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
from syft.frameworks.torch.tensors.interpreters.crt_precision import CRTPrecisionTensor
from syft.frameworks.torch.tensors.interpreters.autograd import AutogradTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
from syft.frameworks.torch.tensors.interpreters.numpy import create_numpy_tensor as NumpyTensor
from syft.frameworks.torch.tensors.interpreters.private import PrivateTensor
from syft.frameworks.torch.tensors.interpreters.large_precision import LargePrecisionTensor
from syft.frameworks.torch.tensors.interpreters.promise import PromiseTensor
Expand Down
4 changes: 4 additions & 0 deletions syft/frameworks/torch/hook/hook.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
from syft.frameworks.torch.tensors.interpreters.autograd import AutogradTensor
from syft.frameworks.torch.tensors.interpreters.native import TorchTensor
from syft.frameworks.torch.tensors.interpreters.promise import PromiseTensor
from syft.frameworks.torch.tensors.interpreters.hook import HookedTensor
from syft.frameworks.torch.tensors.interpreters.paillier import PaillierTensor
from syft.frameworks.torch.tensors.decorators.logging import LoggingTensor
from syft.frameworks.torch.tensors.interpreters.precision import FixedPrecisionTensor
Expand Down Expand Up @@ -176,6 +177,9 @@ def __init__(
# Add all hooked tensor methods to LargePrecisionTensor tensor
self._hook_syft_tensor_methods(LargePrecisionTensor)

# Add all hooked tensor methods to NumpyTensor tensor
self._hook_syft_tensor_methods(HookedTensor)

# Add all built-in 'str' methods to String
self._hook_string_methods(owner=self.local_worker)

Expand Down
27 changes: 27 additions & 0 deletions syft/frameworks/torch/tensors/interpreters/hook.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
from syft.generic.frameworks.hook import hook_args
from syft.generic.tensor import AbstractTensor


class HookedTensor(AbstractTensor):
"""HookedTensor is an abstraction which should not be used directly on its own. Its purpose
is only to allow other tensors to extend it so that they automatically have all of the Torch
method hooked without having to add it to the hook.py file.
"""

def __init__(self, owner=None, id=None, tags=None, description=None, verbose=False):
"""Initializes a HookedTensor.

Args:
numpy_tensor (np.array): The numpy array which this tensor should wrap.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hey I'm new here, but I dont see a numpy_tensor argument?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

whoops - looks like a mistake. PR welcome :)

owner (BaseWorker): An optional BaseWorker object to specify the worker on which
the tensor is located.
id (str or int): An optional string or integer id of the LargePrecisionTensor.
tags (list): list of tags for searching.
description (str): a description of this tensor.
"""
super().__init__(id=id, owner=owner, tags=tags, description=description)
self.verbose = verbose


### Register the tensor with hook_args.py ###
hook_args.default_register_tensor(HookedTensor)
13 changes: 13 additions & 0 deletions syft/frameworks/torch/tensors/interpreters/native.py
Original file line number Diff line number Diff line change
Expand Up @@ -959,3 +959,16 @@ def decrypt(self, private_key):
"""

return self.child.decrypt(private_key)

def numpy_tensor(self):
"""This method will cast the current tensor to one with numpy as the underlying
representation. The tensor chain will be Wrapper > NumpyTensor > np.ndarray"""

if not self.is_wrapper:
return syft.NumpyTensor(self.numpy())
else:
raise Exception(
"Can only cast a data tensor to NumpyTensor. You called this ",
"on a wrapper. Add NumpyTensor to the chain by hand if you want "
"this functionality.",
)
52 changes: 52 additions & 0 deletions syft/frameworks/torch/tensors/interpreters/numpy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import numpy as np

from syft.generic.frameworks.hook import hook_args
from syft.generic.frameworks.overload import overloaded
from syft.frameworks.torch.tensors.interpreters.hook import HookedTensor


class NumpyTensor(HookedTensor):
"""NumpyTensor is a tensor which seeks to wrap the Numpy API with the PyTorch tensor API.
This is useful because Numpy can offer a wide range of existing functionality ranging from
large precision, custom scalar types, and polynomial arithmetic.
"""

def __init__(
self, numpy_tensor=None, owner=None, id=None, tags=None, description=None, verbose=False
):
"""Initializes a NumpyTensor.

Args:
numpy_tensor (np.array): The numpy array which this tensor should wrap.
owner (BaseWorker): An optional BaseWorker object to specify the worker on which
the tensor is located.
id (str or int): An optional string or integer id of the LargePrecisionTensor.
tags (list): list of tags for searching.
description (str): a description of this tensor.
"""
super().__init__(id=id, owner=owner, tags=tags, description=description)
self.verbose = verbose

if isinstance(numpy_tensor, list):
numpy_tensor = np.array(numpy_tensor)

self.child = numpy_tensor

@overloaded.method
def mm(self, _self, other):
return _self.dot(other)

@overloaded.method
def transpose(self, _self, *dims):
# TODO: the semantics of the .transpose() dimensions are a bit different
# for Numpy than they are for PyTorch. Fix this.
# Related: https://github.com/pytorch/pytorch/issues/7609
return _self.transpose(*reversed(dims))


def create_numpy_tensor(numpy_tensor):
return NumpyTensor(numpy_tensor).wrap()


### Register the tensor with hook_args.py ###
hook_args.default_register_tensor(NumpyTensor)
2 changes: 1 addition & 1 deletion syft/grid/private_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ def _query_encrypted_model_hosts(self, id: str) -> Tuple["NodeClient"]:
obj = obj.child

# Get a list of mpc nodes.
nodes = map(lambda x: hook.local_worker._known_workers.get(x), obj.child.keys(),)
nodes = map(lambda x: hook.local_worker._known_workers.get(x), obj.child.keys())

mpc_nodes.update(set(nodes))

Expand Down
5 changes: 1 addition & 4 deletions syft/workers/node_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,10 +264,7 @@ def delete_model(self, model_id: str) -> bool:
Returns:
result (bool) : If succeeded, return True.
"""
message = {
REQUEST_MSG.TYPE_FIELD: REQUEST_MSG.DELETE_MODEL,
"model_id": model_id,
}
message = {REQUEST_MSG.TYPE_FIELD: REQUEST_MSG.DELETE_MODEL, "model_id": model_id}
response = self._forward_json_to_websocket_server_worker(message)
return self._return_bool_result(response)

Expand Down
90 changes: 90 additions & 0 deletions test/torch/tensors/test_numpy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
import pytest
import torch as th
import numpy as np
import syft as sy


def test_numpy_add():
"""
Test basic NumpyTensor addition
"""

x = sy.NumpyTensor(numpy_tensor=[[1, 2, 3, 4]])
y = x + x
assert (y.child.child == np.array([2, 4, 6, 8])).all()


def test_numpy_subtract():
"""
Test basic NumpyTensor subtraction
"""

x = sy.NumpyTensor(numpy_tensor=np.array([[1, 2, 3, 4]]))
y = x - x
assert (y.child.child == np.array([0, 0, 0, 0])).all()


def test_numpy_multiply():
"""
Test basic NumpyTensor multiplication
"""

x = sy.NumpyTensor(numpy_tensor=np.array([[1, 2, 3, 4]]))
y = x * x
assert (y.child.child == np.array([1, 4, 9, 16])).all()


def test_numpy_divide():
"""
Test basic NumpyTensor division
"""

x = sy.NumpyTensor(numpy_tensor=np.array([[1, 2, 3, 4]]))
y = x / x
assert (y.child.child == np.array([1, 1, 1, 1])).all()


def test_numpy_dot():
"""
Test basic NumpyTensor dot product
"""
x = sy.NumpyTensor(numpy_tensor=np.array([[1, 2, 3, 4]]))
y = x.dot(x.transpose())
assert (y.child.child == np.array([[30]])).all()


def test_numpy_mm():
"""
Test basic NumpyTensor matrix multiply
"""
x = sy.NumpyTensor(numpy_tensor=np.array([[1, 2, 3, 4]]))
y = x.mm(x.transpose())
assert (y.child.child == np.array([[30]])).all()


def test_numpy_mm2():
"""
Test @ based NumpyTensor matrix multiply
"""
x = sy.NumpyTensor(numpy_tensor=np.array([[1, 2, 3, 4]]))
y = x @ (x.transpose())
assert (y.child.child == np.array([[30]])).all()


def test_numpy_transpose():
"""
Test basic NumpyTensor transpose
"""
x = sy.NumpyTensor(numpy_tensor=np.array([[1, 2, 3, 4]]))
y = x.transpose(0, 1)
assert (y.child.child == np.array([[1], [2], [3], [4]])).all()


def test_numpy_casting():
"""
This tests the ability to cast a data tensor to a tensor chain
with an underlying Numpy representation.
"""

out = th.tensor([1, 2, 23, 4]).numpy_tensor()
assert (out.child.child == np.array([1, 2, 23, 4])).all()