Skip to content

Commit

Permalink
feat: drafted a compatible PSR and the custom pytorch autograd
Browse files Browse the repository at this point in the history
  • Loading branch information
BrunoLiegiBastonLiegi committed Sep 4, 2024
1 parent 51037fd commit a09f9b2
Show file tree
Hide file tree
Showing 3 changed files with 68 additions and 3 deletions.
6 changes: 6 additions & 0 deletions src/qiboml/models/abstract.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,9 @@ def parameters(self, params: ndarray):
@property
def circuit(self) -> Circuit:
return self._circuit


def _run_layers(x: ndarray, layers: list[QuantumCircuitLayer]):
for layer in layers:
x = layer.forward(x)
return x
23 changes: 20 additions & 3 deletions src/qiboml/models/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,14 @@
from qibo.config import raise_error

import qiboml.models.encoding_decoding as ed
from qiboml.models.abstract import QuantumCircuitLayer
from qiboml.models.abstract import QuantumCircuitLayer, _run_layers


@dataclass
class QuantumModel(torch.nn.Module):

layers: list[QuantumCircuitLayer]
differentiation: str = "psr"

def __post_init__(self):
super().__init__()
Expand Down Expand Up @@ -50,8 +51,10 @@ def forward(self, x: torch.Tensor):
if self.backend.name != "pytorch":
x = x.detach().numpy()
x = self.backend.cast(x, dtype=x.dtype)
for layer in self.layers:
x = layer.forward(x)
if torch.is_grad_enabled():
x = QuantumModelAutoGrad.apply(x, self.layers)
else:
x = _run_layers(x, self.layers)
if self.backend.name != "pytorch":
x = torch.as_tensor(np.array(x))
return x
Expand All @@ -67,3 +70,17 @@ def backend(self) -> Backend:
@property
def output_shape(self):
return self.layers[-1].output_shape


class QuantumModelAutoGrad(torch.autograd.Function):

@staticmethod
def forward(ctx, x: torch.Tensor, layers: list[QuantumCircuitLayer]):
ctx.save_for_backward(x)
ctx.layers = layers
return _run_layers(x, layers)

@staticmethod
def backward(ctx, grad_output: torch.Tensor):
(x,) = ctx.saved_tensors
return grad_output * self.differentiation.evaluate(x, ctx.layers)
42 changes: 42 additions & 0 deletions src/qiboml/operations/differentiation.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,50 @@
import numpy as np
from qibo import parameter
from qibo.backends import construct_backend
from qibo.config import raise_error
from qibo.hamiltonians.abstract import AbstractHamiltonian

from qiboml import ndarray
from qiboml.models.abstract import QuantumCircuitLayer, _run_layers


class PSR:

def __init__(
self,
):
self.scale_factor = 1.0

def evaluate(self, x: ndarray, layers: list[QuantumCircuitLayer]):
gradients = []
for layer in layers:
if len(layer.parameters) == 0:
continue
parameters_bkup = layer.parameters.copy()
gradients.append(
[
self._evaluate_parameter(x, layers, layer, i, parameters_bkup)
for i in range(len(layer.parameters))
]
)
return gradients

def _evaluate_parameter(self, x, layers, layer, index, parameters_bkup):
outputs = []
for shift in self._shift_parameters(layer.parameters, index, self.epsilon):
layer.parameters = shift
outputs.append(_run_layers(x, layers))
layer.parameters = parameters_bkup
return (outputs[0] - outputs[1]) * self.scale_factor

@staticmethod
def _shift_parameters(parameters: ndarray, index: int, epsilon: float):
forward = parameters.copy()
backward = parameters.copy()
forward[index] += epsilon
backward[index] -= epsilon
return forward, backward


def parameter_shift(
hamiltonian,
Expand Down

0 comments on commit a09f9b2

Please sign in to comment.