Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file modified demonstrations/adjoint_diff/scaling.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
23 changes: 14 additions & 9 deletions demonstrations/ensemble_multi_qpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@

from collections import Counter

import dask
import matplotlib.pyplot as plt
import numpy as np
import pennylane as qml
Expand Down Expand Up @@ -229,14 +230,14 @@ def circuit1(params, x=None):


##############################################################################
# We finally combine the two devices into a :class:`~.pennylane.QNodeCollection` that uses the
# We finally combine the two devices into a :class:`~.pennylane.QNode` list that uses the
# PyTorch interface:


qnodes = qml.QNodeCollection(
[qml.QNode(circuit0, dev0, interface="torch"),
qml.QNode(circuit1, dev1, interface="torch")]
)
qnodes = [
qml.QNode(circuit0, dev0, interface="torch"),
qml.QNode(circuit1, dev1, interface="torch"),
]

##############################################################################
# Postprocessing into a prediction
Expand All @@ -245,19 +246,23 @@ def circuit1(params, x=None):
# The ``predict_point`` function below allows us to find the ensemble prediction, as well as keeping
# track of the individual predictions from each QPU.
#
# We include a ``parallel`` keyword argument for evaluating the :class:`~.pennylane.QNodeCollection`
# We include a ``parallel`` keyword argument for evaluating the :class:`~.pennylane.QNode` list
# in a parallel asynchronous manner. This feature requires the ``dask`` library, which can be
# installed using ``pip install "dask[delayed]"``. When ``parallel=True``, we are able to make
# predictions faster because we do not need to wait for one QPU to output before running on the
# other.


def decision(softmax):
return int(torch.argmax(softmax))


def predict_point(params, x_point=None, parallel=True):
results = qnodes(params, x=x_point, parallel=parallel)
if parallel:
results = tuple(dask.delayed(q)(params, x=x_point) for q in qnodes)
results = torch.tensor(dask.compute(*results, scheduler="threads"))
else:
results = tuple(q(params, x=x_point) for q in qnodes)
results = torch.tensor(results)
softmax = torch.nn.functional.softmax(results, dim=1)
choice = torch.where(softmax == torch.max(softmax))[0][0]
chosen_softmax = softmax[choice]
Expand Down Expand Up @@ -364,7 +369,7 @@ def accuracy(predictions, actuals):
#
# Training accuracy (ensemble): 0.824
# Training accuracy (QPU0): 0.648
# Training accuracy (QPU1): 0.28
# Training accuracy (QPU1): 0.296

##############################################################################

Expand Down
Binary file modified demonstrations/ensemble_multi_qpu/ensemble_multi_qpu_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified demonstrations/ensemble_multi_qpu/ensemble_multi_qpu_002.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified demonstrations/ensemble_multi_qpu/ensemble_multi_qpu_003.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified demonstrations/ensemble_multi_qpu/ensemble_multi_qpu_004.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion demonstrations/tutorial_quantum_transfer_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ def forward(self, input_features):
q_out = torch.Tensor(0, n_qubits)
q_out = q_out.to(device)
for elem in q_in:
q_out_elem = torch.Tensor(quantum_net(elem, self.q_params)).float().unsqueeze(0)
q_out_elem = torch.hstack(quantum_net(elem, self.q_params)).float().unsqueeze(0)
q_out = torch.cat((q_out, q_out_elem))

# return the two-dimensional prediction from the postprocessing layer
Expand Down
Binary file modified demonstrations/vqe_parallel.npz
Binary file not shown.