Skip to content

Commit

Permalink
Multiple changes to pass cli tests (CBLOF, skip solver, xfail, device…
Browse files Browse the repository at this point in the history
… management) (#21)

- `test_parameters` added in datasets
- Device management for DIF Solver
- Removed GPU condition for skipping solver
  • Loading branch information
Jad-yehya authored Nov 18, 2024
1 parent 02343e7 commit d2f32d1
Show file tree
Hide file tree
Showing 8 changed files with 44 additions and 15 deletions.
4 changes: 4 additions & 0 deletions datasets/msl.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,10 @@ class Dataset(BaseDataset):
"debug": [False],
}

test_parameters = {
"debug": [True],
}

def get_data(self):
path = config.get_data_path(key="MSL")
# Check if the data is already here
Expand Down
4 changes: 4 additions & 0 deletions datasets/psm.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ class Dataset(BaseDataset):
"debug": [False],
}

test_parameters = {
"debug": [True],
}

def get_data(self):
# Check if the data is already here
path = config.get_data_path(key="PSM")
Expand Down
7 changes: 7 additions & 0 deletions datasets/simulated.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,13 @@ class Dataset(BaseDataset):
"n_anomaly": [90],
}

test_parameters = {
"n_samples": [500],
"n_features": [5],
"noise": [0.1],
"n_anomaly": [90],
}

def get_data(self):
X_train, _ = make_regression(
n_samples=self.n_samples,
Expand Down
6 changes: 6 additions & 0 deletions datasets/smap.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,12 @@ class Dataset(BaseDataset):
"validation_size": [0.2],
}

test_parameters = {
"debug": [True],
"n_splits": [2],
"validation_size": [0.2],
}

def get_data(self):
path = config.get_data_path(key="SMAP")

Expand Down
6 changes: 5 additions & 1 deletion solvers/cblof.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ class Solver(BaseSolver):
parameters = {
"contamination": [5e-4, 0.01, 0.02, 0.03, 0.04],
"window": [True],
"n_clusters": [10],
"window_size": [20],
"stride": [1],
}
Expand All @@ -26,7 +27,10 @@ class Solver(BaseSolver):
def set_objective(self, X_train, y_test, X_test):
self.X_train = X_train
self.X_test, self.y_test = X_test, y_test
self.clf = CBLOF(contamination=self.contamination, n_clusters=8)
self.clf = CBLOF(
contamination=self.contamination,
n_clusters=self.n_clusters
)

def run(self, _):

Expand Down
12 changes: 8 additions & 4 deletions solvers/dif.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
from benchopt import safe_import_context

with safe_import_context() as import_ctx:
from benchopt.utils.sys_info import get_cuda_version
from pyod.models.dif import DIF
import numpy as np

Expand All @@ -25,7 +26,10 @@ class Solver(BaseSolver):
def set_objective(self, X_train, y_test, X_test):
self.X_train = X_train
self.X_test, self.y_test = X_test, y_test
self.clf = DIF(contamination=self.contamination, device="cuda")
if get_cuda_version() is None:
self.clf = DIF(contamination=self.contamination)
else:
self.clf = DIF(contamination=self.contamination, device="cuda")

def run(self, _):

Expand Down Expand Up @@ -72,9 +76,9 @@ def run(self, _):

def skip(self, X_train, X_test, y_test):
# If cuda is not available, we skip the test because deep method
from benchopt.utils.sys_info import get_cuda_version
if get_cuda_version() is None:
return True, "Cuda is not available"
# from benchopt.utils.sys_info import get_cuda_version
# if get_cuda_version() is None:
# return True, "Cuda is not available"
if X_train.shape[0] < self.window_size:
return True, "Not enough samples to create a window"
return False, None
Expand Down
8 changes: 4 additions & 4 deletions solvers/lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,10 +147,10 @@ def run(self, _):
)

def skip(self, X_train, X_test, y_test):
from benchopt.utils.sys_info import get_cuda_version
if get_cuda_version() is None:
return True, "CUDA is not available. Skipping this solver."
elif X_train.shape[0] < self.window_size:
# from benchopt.utils.sys_info import get_cuda_version
# if get_cuda_version() is None:
# return True, "CUDA is not available. Skipping this solver."
if X_train.shape[0] < self.window_size:
return True, "Not enough samples to create a window."
return False, None

Expand Down
12 changes: 6 additions & 6 deletions test_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ def check_test_solver_install(solver_class):
if get_cuda_version() is None:
pytest.xfail("Deep IsolationForest needs a working GPU hardware.")

if solver_class.name.lower() == "lstm":
if get_cuda_version() is None:
pytest.xfail("LSTM needs a working GPU hardware.")
# if solver_class.name.lower() == "lstm":
# if get_cuda_version() is None:
# pytest.xfail("LSTM needs a working GPU hardware.")

if solver_class.name.lower() == "transformer":
if get_cuda_version() is None:
pytest.xfail("Transformer needs a working GPU hardware.")
# if solver_class.name.lower() == "transformer":
# if get_cuda_version() is None:
# pytest.xfail("Transformer needs a working GPU hardware.")

0 comments on commit d2f32d1

Please sign in to comment.