Skip to content

Commit

Permalink
fix: small fix + found a problem
Browse files Browse the repository at this point in the history
  • Loading branch information
BrunoLiegiBastonLiegi committed Oct 23, 2024
1 parent d01ff34 commit e57a0ec
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 8 deletions.
2 changes: 1 addition & 1 deletion src/qiboml/models/pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def forward(self, x: torch.Tensor):
self.circuit.set_parameters(list(self.parameters())[0])
x = self.encoding(x) + self.circuit
x = self.decoding(x)

print(f"QuantumModel x: {x}")
return x

@property
Expand Down
22 changes: 15 additions & 7 deletions tests/test_models_interfaces.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,11 @@ def build_linear_layer(frontend, input_dim, output_dim):
raise_error(RuntimeError, f"Unknown frontend {frontend}.")


def build_sequential_model(frontend, layers):
def build_sequential_model(frontend, layers, binary=False):
if frontend.__name__ == "qiboml.models.pytorch":
activation = frontend.torch.nn.Threshold(1, 0)
return frontend.torch.nn.Sequential(*(layers[:1] + [activation] + layers[1:]))
layers = layers[:1] + [activation] + layers[1:] if binary else layers
return frontend.torch.nn.Sequential(*layers)
elif frontend.__name__ == "qiboml.models.keras":
return frontend.keras.Sequential(layers)
else:
Expand Down Expand Up @@ -78,7 +79,8 @@ def train_model(frontend, model, data, target):
ep += 1
avg_grad = 0.0
avg_loss = 0.0
for x, y in zip(data, target):
permutation = frontend.torch.randint(0, len(data), (len(data),))
for x, y in zip(data[permutation], target[permutation]):
optimizer.zero_grad()
loss = loss_f(model(x), y)
loss.backward()
Expand Down Expand Up @@ -106,13 +108,15 @@ def train_model(frontend, model, data, target):
def eval_model(frontend, model, data, target=None):
loss = None
outputs = []

if frontend.__name__ == "qiboml.models.pytorch":
loss_f = torch.nn.MSELoss()
with torch.no_grad():
for x in data:
outputs.append(model(x))
shape = model(data[0]).shape
outputs = frontend.torch.vstack(outputs).reshape((data.shape[0],) + shape)

elif frontend.__name__ == "qiboml.models.keras":
loss_f = frontend.keras.losses.MeanSquaredError(
reduction="sum_over_batch_size",
Expand All @@ -129,7 +133,7 @@ def random_parameters(frontend, model):
if frontend.__name__ == "qiboml.models.pytorch":
new_params = {}
for k, v in model.state_dict().items():
new_params.update({k: v + frontend.torch.randn(v.shape) / 10})
new_params.update({k: v + frontend.torch.randn(v.shape) / 2})
elif frontend.__name__ == "qiboml.models.keras":
new_params = [frontend.tf.random.uniform(model.get_weights()[0].shape)]
return new_params
Expand All @@ -154,6 +158,11 @@ def prepare_targets(frontend, model, data):
init_params = get_parameters(frontend, model)
set_parameters(frontend, model, target_params)
target, _ = eval_model(frontend, model, data)
# if len(target.unique(dim=0)) == 1:
# breakpoint()
# print(model)
# print(f"Target: {target[0]}")
# assert False
set_parameters(frontend, model, init_params)
return target

Expand Down Expand Up @@ -204,6 +213,7 @@ def test_encoding(backend, frontend, layer):
q_model,
build_linear_layer(frontend, 2**nqubits, 1),
],
binary=binary,
)
target = prepare_targets(frontend, model, data)
backprop_test(frontend, model, data, target)
Expand Down Expand Up @@ -243,9 +253,7 @@ def test_decoding(backend, frontend, layer, analytic):
if not decoding_layer.analytic:
pytest.skip("PSR differentiation is not working yet.")

q_model = frontend.QuantumModel(
encoding_layer, training_layer, decoding_layer, differentiation="Jax"
)
q_model = frontend.QuantumModel(encoding_layer, training_layer, decoding_layer)

data = random_tensor(frontend, (200, dim))
target = prepare_targets(frontend, q_model, data)
Expand Down

0 comments on commit e57a0ec

Please sign in to comment.