diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..390ee12 --- /dev/null +++ b/404.html @@ -0,0 +1,503 @@ + + + + + + + + + + + + + + + + + + + + + SGP-Tools + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + + + + + + + +
+
+
+ + + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/API-reference.html b/API-reference.html new file mode 100644 index 0000000..0950185 --- /dev/null +++ b/API-reference.html @@ -0,0 +1,14019 @@ + + + + + + + + + + + + + + + + + + + + + + + + + API reference - SGP-Tools + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ +
+ + + + + + + + + + + +
+ + + + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + + + + + + + + + +
+
+
+ + + + +
+
+ + + + + + + +

API reference

+ +

Image title +Image title

+ + +
+ + + + +
+ +

Sensor placement and informative path planning methods in this package:

+
    +
  • continuous_sgp: Provides an SGP-based sensor placement approach that is optimized using gradient descent
  • +
  • greedy_sgp: Provides an SGP-based sensor placement approach that is optimized using a greedy algorithm
  • +
  • cma_es: Provides a genetic algorithm (CMA-ES) based approach that maximizes mutual-information to get sensor placements
  • +
  • greedy_mi: Provides a greedy algorithm based approach that maximizes mutual-information to get sensor placements
  • +
  • bo: Provides a Bayesian optimization based approach that maximizes mutual-information to get sensor placements
  • +
+ + + +
+ + + + + + + + + + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ continuous_sgp(num_inducing, X_train, noise_variance, kernel, transform=None, Xu_init=None, Xu_time=None, orientation=False, **kwargs) + +

+ + +
+ +

Get sensor placement solutions using the Continuous-SGP method

+ + +
+ Refer to the following papers for more details +
    +
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • +
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_inducing + int + +
+

Number of inducing points

+
+
+ required +
X_train + ndarray + +
+

(n, d); Unlabeled random sampled training points

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
transform + Transform + +
+

Transform object

+
+
+ None +
Xu_init + ndarray + +
+

(m, d); Initial inducing points

+
+
+ None +
Xu_time + ndarray + +
+

(t, d); Temporal inducing points used in spatio-temporal models

+
+
+ None +
orientation + bool + +
+

If True, a additionl dimension is added to the + inducing points to represent the FoV orientation

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
sgpr + AugmentedSGPR + +
+

Optimized sparse Gaussian process model

+
+
loss + ndarray + +
+

Loss values computed during training

+
+
+ +
+ Source code in sgptools/models/continuous_sgp.py +
23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
def continuous_sgp(num_inducing, X_train, noise_variance, kernel, 
+                   transform=None,
+                   Xu_init=None, 
+                   Xu_time=None, 
+                   orientation=False,
+                   **kwargs):
+    """Get sensor placement solutions using the Continuous-SGP method
+
+    Refer to the following papers for more details:
+        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]
+        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [[Jakkala and Akella, 2024](https://www.itskalvik.com/publication/sgp-ipp/)]
+
+    Args:
+        num_inducing (int): Number of inducing points
+        X_train (ndarray): (n, d); Unlabeled random sampled training points
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        transform (Transform): Transform object
+        Xu_init (ndarray): (m, d); Initial inducing points
+        Xu_time (ndarray): (t, d); Temporal inducing points used in spatio-temporal models
+        orientation (bool): If True, a additionl dimension is added to the 
+                            inducing points to represent the FoV orientation
+
+    Returns:
+        sgpr (AugmentedSGPR): Optimized sparse Gaussian process model
+        loss (ndarray): Loss values computed during training
+    """
+    # Generate init inducing points
+    if Xu_init is None:
+        Xu_init = get_inducing_pts(X_train, num_inducing, 
+                                   orientation=orientation)
+
+    # Fit spare GP
+    sgpr = AugmentedSGPR((X_train, np.zeros((len(X_train), 1)).astype(X_train.dtype)),
+                         noise_variance=noise_variance,
+                         kernel=kernel, 
+                         inducing_variable=Xu_init,
+                         inducing_variable_time=Xu_time,
+                         transform=transform)
+
+    # Train the mode
+    loss = optimize_model(sgpr,
+                          kernel_grad=False, 
+                          **kwargs)
+
+    return sgpr, loss
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ GreedySGP + + +

+ + +
+ + +

Helper class to compute SGP's ELBO/optimization bound for a given set of sensor locations. +Used by get_greedy_sgp_sol function to compute the solution sensor placements using the Greedy-SGP method.

+ + +
+ Refer to the following papers for more details +
    +
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_inducing + int + +
+

Number of inducing points

+
+
+ required +
S + ndarray + +
+

(n, d); Candidate sensor placement locations

+
+
+ required +
V + ndarray + +
+

(n, d); Locations in the environment used to approximate the monitoring regions

+
+
+ required +
noise_variance + float + +
+

Data noise variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
Xu_fixed + ndarray + +
+

(m, d); Inducing points that are not optimized and are always + added to the inducing points set during loss function computation

+
+
+ None +
transform + Transform + +
+

Transform object

+
+
+ None +
+ +
+ Source code in sgptools/models/greedy_sgp.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
class GreedySGP:
+    """Helper class to compute SGP's ELBO/optimization bound for a given set of sensor locations.
+    Used by `get_greedy_sgp_sol` function to compute the solution sensor placements using the Greedy-SGP method.
+
+    Refer to the following papers for more details:
+        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]
+
+    Args:
+        num_inducing (int): Number of inducing points
+        S (ndarray): (n, d); Candidate sensor placement locations
+        V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
+        noise_variance (float): Data noise variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        Xu_fixed (ndarray): (m, d); Inducing points that are not optimized and are always 
+                                    added to the inducing points set during loss function computation
+        transform (Transform): Transform object
+    """
+    def __init__(self, num_inducing, S, V, noise_variance, kernel, 
+                 Xu_fixed=None, 
+                 transform=None):
+        self.gp = AugmentedSGPR((V, np.zeros((len(V), 1))),
+                                noise_variance=noise_variance,
+                                kernel=kernel, 
+                                inducing_variable=S[:num_inducing],
+                                transform=transform)
+        self.locs = S
+        self.Xu_fixed = Xu_fixed
+        self.num_inducing = num_inducing
+        self.inducing_dim = S.shape[1]
+
+    def bound(self, x):
+        """Computes the SGP's optimization bound using the inducing points `x` 
+
+        Args:
+            x (ndarray): (n, d); Inducing points
+
+        Returns:
+            elbo (float): Evidence lower bound/SGP's optimization bound value
+        """
+        x = np.array(x).reshape(-1).astype(int)
+        Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)
+        Xu *= self.locs[x][0]
+        Xu[-len(x):] = self.locs[x]
+
+        if self.Xu_fixed is not None:
+            Xu[:len(self.Xu_fixed)] = self.Xu_fixed
+
+        self.gp.inducing_variable.Z.assign(Xu)
+        return self.gp.elbo().numpy()
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ bound(x) + +

+ + +
+ +

Computes the SGP's optimization bound using the inducing points x

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
x + ndarray + +
+

(n, d); Inducing points

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
elbo + float + +
+

Evidence lower bound/SGP's optimization bound value

+
+
+ +
+ Source code in sgptools/models/greedy_sgp.py +
50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
def bound(self, x):
+    """Computes the SGP's optimization bound using the inducing points `x` 
+
+    Args:
+        x (ndarray): (n, d); Inducing points
+
+    Returns:
+        elbo (float): Evidence lower bound/SGP's optimization bound value
+    """
+    x = np.array(x).reshape(-1).astype(int)
+    Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)
+    Xu *= self.locs[x][0]
+    Xu[-len(x):] = self.locs[x]
+
+    if self.Xu_fixed is not None:
+        Xu[:len(self.Xu_fixed)] = self.Xu_fixed
+
+    self.gp.inducing_variable.Z.assign(Xu)
+    return self.gp.elbo().numpy()
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel, transform=None) + +

+ + +
+ +

Get sensor placement solutions using the Greedy-SGP method. Uses a greedy algorithm to +select sensor placements from a given discrete set of candidates locations.

+ + +
+ Refer to the following papers for more details +
    +
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_sensors + int + +
+

Number of sensor locations to optimize

+
+
+ required +
candidates + ndarray + +
+

(n, d); Candidate sensor placement locations

+
+
+ required +
X_train + ndarray + +
+

(n, d); Locations in the environment used to approximate the monitoring regions

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
transform + Transform + +
+

Transform object

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(m, d); Solution sensor placement locations

+
+
+ +
+ Source code in sgptools/models/greedy_sgp.py +
71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
+95
+96
+97
def get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel, 
+                       transform=None):
+    """Get sensor placement solutions using the Greedy-SGP method. Uses a greedy algorithm to 
+    select sensor placements from a given discrete set of candidates locations.
+
+    Refer to the following papers for more details:
+        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]
+
+    Args:
+        num_sensors (int): Number of sensor locations to optimize
+        candidates (ndarray): (n, d); Candidate sensor placement locations
+        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        transform (Transform): Transform object
+
+    Returns:
+        Xu (ndarray): (m, d); Solution sensor placement locations
+    """
+    sgp_model = GreedySGP(num_sensors, candidates, X_train, 
+                          noise_variance, kernel, transform=transform)
+    model = CustomSelection(num_sensors,
+                            sgp_model.bound,
+                            optimizer='naive',
+                            verbose=False)
+    sol = model.fit_transform(np.arange(len(candidates)).reshape(-1, 1))
+    return candidates[sol.reshape(-1)]
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ GreedyMI + + +

+ + +
+ + +

Helper class to compute mutual information using a Gaussian process for a given set of sensor locations. +Used by get_greedy_mi_sol function to compute the solution sensor placements using the Greedy-MI method.

+ + +
+ Refer to the following papers for more details +
    +
  • Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
  • +
  • Data-driven learning and planning for environmental sampling [Ma et al., 2018]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
S + ndarray + +
+

(n, d); Candidate sensor placement locations

+
+
+ required +
V + ndarray + +
+

(n, d); Locations in the environment used to approximate the monitoring regions

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
transform + Transform + +
+

Transform object

+
+
+ None +
+ +
+ Source code in sgptools/models/greedy_mi.py +
20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
class GreedyMI:
+    """Helper class to compute mutual information using a Gaussian process for a given set of sensor locations.
+    Used by `get_greedy_mi_sol` function to compute the solution sensor placements using the Greedy-MI method.
+
+    Refer to the following papers for more details:
+        - Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
+        - Data-driven learning and planning for environmental sampling [Ma et al., 2018]
+
+    Args:
+        S (ndarray): (n, d); Candidate sensor placement locations
+        V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        transform (Transform): Transform object
+    """
+    def __init__(self, S, V, noise_variance, kernel, transform=None):
+        self.S = S
+        self.V = V
+        self.kernel = kernel
+        self.input_dim = S.shape[1]
+        self.noise_variance = noise_variance
+        self.transform = transform
+
+    def mutual_info(self, x):
+        x = np.array(x).reshape(-1).astype(int)
+        A = self.S[x[:-1]].reshape(-1, self.input_dim)
+        y = self.S[x[-1]].reshape(-1, self.input_dim)
+
+        if len(A) == 0:
+            sigma_a = 1.0
+        else:
+            if self.transform is not None:
+                A = self.transform.expand(A)
+            a_gp = AugmentedGPR(data=(A, np.zeros((len(A), 1))),
+                                kernel=self.kernel,
+                                noise_variance=self.noise_variance,
+                                transform=self.transform)
+            _, sigma_a = a_gp.predict_f(y, aggregate_train=True)
+
+        # Remove locations in A to build A bar
+        V_ = self.V.copy()
+        V_rows = V_.view([('', V_.dtype)] * V_.shape[1])
+        if self.transform is not None:
+            A_ = self.transform.expand(self.S[x]).numpy()
+        else:
+            A_ = self.S[x]
+        A_rows = A_.view([('', V_.dtype)] * A_.shape[1])
+        V_ = np.setdiff1d(V_rows, A_rows).view(V_.dtype).reshape(-1, V_.shape[1])
+
+        self.v_gp = AugmentedGPR(data=(V_, np.zeros((len(V_), 1))), 
+                                 kernel=self.kernel,
+                                 noise_variance=self.noise_variance,
+                                 transform=self.transform)
+        _, sigma_v = self.v_gp.predict_f(y)
+
+        return (sigma_a/sigma_v).numpy().squeeze()
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + +
+ + +

+ get_greedy_mi_sol(num_sensors, candidates, X_train, noise_variance, kernel, transform=None, optimizer='naive') + +

+ + +
+ +

Get sensor placement solutions using the GP-based mutual information approach (submodular objective function). +Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.

+ + +
+ Refer to the following papers for more details +
    +
  • Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
  • +
  • Data-driven learning and planning for environmental sampling [Ma et al., 2018]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_sensors + int + +
+

Number of sensor locations to optimize

+
+
+ required +
candidates + ndarray + +
+

(n, d); Candidate sensor placement locations

+
+
+ required +
X_train + ndarray + +
+

(n, d); Locations in the environment used to approximate the monitoring regions

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
transform + Transform + +
+

Transform object

+
+
+ None +
optimizer + str + +
+

Name of an optimizer available in the apricot library

+
+
+ 'naive' +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(m, d); Solution sensor placement locations

+
+
+ +
+ Source code in sgptools/models/greedy_mi.py +
 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
def get_greedy_mi_sol(num_sensors, candidates, X_train, noise_variance, kernel, 
+                      transform=None, optimizer='naive'):
+    """Get sensor placement solutions using the GP-based mutual information approach (submodular objective function). 
+    Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.
+
+    Refer to the following papers for more details:
+        - Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
+        - Data-driven learning and planning for environmental sampling [Ma et al., 2018]
+
+    Args:
+        num_sensors (int): Number of sensor locations to optimize
+        candidates (ndarray): (n, d); Candidate sensor placement locations
+        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        transform (Transform): Transform object
+        optimizer (str): Name of an optimizer available in the apricot library
+
+    Returns:
+        Xu (ndarray): (m, d); Solution sensor placement locations
+    """
+    mi_model = GreedyMI(candidates, X_train, noise_variance, kernel, transform)
+    model = CustomSelection(num_sensors,
+                            mi_model.mutual_info,
+                            optimizer=optimizer,
+                            verbose=False)
+    sol = model.fit_transform(np.arange(len(candidates)).reshape(-1, 1))
+    return candidates[sol.reshape(-1)]
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ BayesianOpt + + +

+ + +
+ + +

Class for optimizing sensor placements using Bayesian Optimization

+ + +
+ Refer to the following papers for more details +
    +
  • UAV route planning for active disease classification [Vivaldini et al., 2019]
  • +
  • Occupancy map building through Bayesian exploration [Francis et al., 2019]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X_train + ndarray + +
+

(n, d); Locations in the environment used to approximate the monitoring regions

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
+ +
+ Source code in sgptools/models/bo.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
class BayesianOpt:
+    """Class for optimizing sensor placements using Bayesian Optimization
+
+    Refer to the following papers for more details:
+        - UAV route planning for active disease classification [Vivaldini et al., 2019]
+        - Occupancy map building through Bayesian exploration [Francis et al., 2019]
+
+    Args:
+        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+    """
+    def __init__(self, X_train, noise_variance, kernel):
+        self.X_train = X_train
+        self.noise_variance = noise_variance
+        self.kernel = kernel
+        self.num_dim = X_train.shape[-1]
+
+        # use the boundaries of the region as the search space
+        self.pbounds_dim = []
+        for i in range(self.num_dim):
+            self.pbounds_dim.append((np.min(X_train[:, i]), np.max(X_train[:, i])))
+
+    def objective(self, **kwargs):
+        """Computes the objective function (mutual information) for the sensor placement problem
+        """
+        X = []
+        for i in range(len(kwargs)):
+            X.append(kwargs['x{}'.format(i)])
+        X = np.array(X).reshape(-1, self.num_dim)
+        return -get_mi(X, self.noise_variance, self.kernel, self.X_train)
+
+    def optimize(self, 
+                 num_sensors=10, 
+                 max_steps=100,  
+                 X_init=None,
+                 init_points=10):
+        """Optimizes the sensor placements using Bayesian Optimization without any constraints
+
+        Args:
+            num_sensors (int): Number of sensor locations to optimize
+            max_steps (int): Maximum number of optimization steps 
+            X_init (ndarray): (m, d); Initial inducing points
+            init_points (int): How many steps of random exploration you want to perform. 
+                               Random exploration can help by diversifying the exploration space. 
+
+        Returns:
+            Xu (ndarray): (m, d); Solution sensor placement locations
+        """
+        if X_init is None:
+            X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
+        X_init = X_init.reshape(-1)
+
+        pbounds = {}
+        for i in range(self.num_dim*num_sensors):
+            pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]
+
+        optimizer = BayesianOptimization(
+            f=self.objective,
+            pbounds=pbounds,
+            verbose=0,
+            random_state=1,
+            allow_duplicate_points=True
+        )
+
+        optimizer.maximize(
+            init_points=init_points,
+            n_iter=max_steps,
+        )
+
+        sol = []
+        for i in range(self.num_dim*num_sensors):
+            sol.append(optimizer.max['params']['x{}'.format(i)])
+        return np.array(sol).reshape(-1, self.num_dim)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ objective(**kwargs) + +

+ + +
+ +

Computes the objective function (mutual information) for the sensor placement problem

+ +
+ Source code in sgptools/models/bo.py +
44
+45
+46
+47
+48
+49
+50
+51
def objective(self, **kwargs):
+    """Computes the objective function (mutual information) for the sensor placement problem
+    """
+    X = []
+    for i in range(len(kwargs)):
+        X.append(kwargs['x{}'.format(i)])
+    X = np.array(X).reshape(-1, self.num_dim)
+    return -get_mi(X, self.noise_variance, self.kernel, self.X_train)
+
+
+
+ +
+ +
+ + +

+ optimize(num_sensors=10, max_steps=100, X_init=None, init_points=10) + +

+ + +
+ +

Optimizes the sensor placements using Bayesian Optimization without any constraints

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_sensors + int + +
+

Number of sensor locations to optimize

+
+
+ 10 +
max_steps + int + +
+

Maximum number of optimization steps

+
+
+ 100 +
X_init + ndarray + +
+

(m, d); Initial inducing points

+
+
+ None +
init_points + int + +
+

How many steps of random exploration you want to perform. + Random exploration can help by diversifying the exploration space.

+
+
+ 10 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(m, d); Solution sensor placement locations

+
+
+ +
+ Source code in sgptools/models/bo.py +
53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
+94
def optimize(self, 
+             num_sensors=10, 
+             max_steps=100,  
+             X_init=None,
+             init_points=10):
+    """Optimizes the sensor placements using Bayesian Optimization without any constraints
+
+    Args:
+        num_sensors (int): Number of sensor locations to optimize
+        max_steps (int): Maximum number of optimization steps 
+        X_init (ndarray): (m, d); Initial inducing points
+        init_points (int): How many steps of random exploration you want to perform. 
+                           Random exploration can help by diversifying the exploration space. 
+
+    Returns:
+        Xu (ndarray): (m, d); Solution sensor placement locations
+    """
+    if X_init is None:
+        X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
+    X_init = X_init.reshape(-1)
+
+    pbounds = {}
+    for i in range(self.num_dim*num_sensors):
+        pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]
+
+    optimizer = BayesianOptimization(
+        f=self.objective,
+        pbounds=pbounds,
+        verbose=0,
+        random_state=1,
+        allow_duplicate_points=True
+    )
+
+    optimizer.maximize(
+        init_points=init_points,
+        n_iter=max_steps,
+    )
+
+    sol = []
+    for i in range(self.num_dim*num_sensors):
+        sol.append(optimizer.max['params']['x{}'.format(i)])
+    return np.array(sol).reshape(-1, self.num_dim)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + +
+ + + +

+ CMA_ES + + +

+ + +
+ + +

Class for optimizing sensor placements using CMA-ES (a genetic algorithm)

+ + +
+ Refer to the following paper for more details +
    +
  • Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X_train + ndarray + +
+

(n, d); Locations in the environment used to approximate the monitoring regions

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
distance_budget + float + +
+

Distance budget for when treating the inducing points + as waypoints of a path

+
+
+ None +
num_robots + int + +
+

Number of robots, used when modeling + multi-robot IPP with a distance budget

+
+
+ 1 +
transform + Transform + +
+

Transform object

+
+
+ None +
+ +
+ Source code in sgptools/models/cma_es.py +
 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
class CMA_ES:
+    """Class for optimizing sensor placements using CMA-ES (a genetic algorithm)
+
+    Refer to the following paper for more details:
+        - Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]
+
+    Args:
+        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        distance_budget (float): Distance budget for when treating the inducing points 
+                                 as waypoints of a path
+        num_robots (int): Number of robots, used when modeling 
+                          multi-robot IPP with a distance budget
+        transform (Transform): Transform object
+    """
+    def __init__(self, X_train, noise_variance, kernel,
+                 distance_budget=None,
+                 num_robots=1,
+                 transform=None):
+        self.boundaries = geometry.MultiPoint([[p[0], p[1]] for p in X_train]).convex_hull
+        self.X_train = X_train
+        self.noise_variance = noise_variance
+        self.kernel = kernel
+        self.num_dim = X_train.shape[-1]
+        self.distance_budget = distance_budget
+        self.num_robots = num_robots
+        self.transform = transform
+
+    def update(self, noise_variance, kernel):
+        """Update GP noise variance and kernel function parameters
+
+        Args:
+            noise_variance (float): data variance
+            kernel (gpflow.kernels.Kernel): gpflow kernel function
+        """
+        self.noise_variance = noise_variance
+        self.kernel = kernel
+
+    def constraint(self, X):
+        """Constraint function for the optimization problem (constraint to limit the boundary of the region)
+        Does not work well with CMA-ES as it is a step function and is not continuous
+
+        Args:
+            X (ndarray): (n, d); Current sensor placement locations
+        """
+        X = np.array(X).reshape(-1, self.num_dim)
+        lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]
+        lagrangian = np.logical_not(lagrangian).astype(float)
+        return lagrangian
+
+    def distance_constraint(self, X):
+        """Constraint function for the optimization problem (constraint to limit the total travel distance)
+        Does not work well with CMA-ES as it is a step function and is not continuous
+
+        Args:
+            X (ndarray): (n, d); Current sensor placement locations
+        """
+        X = np.array(X).reshape(self.num_robots, -1, self.num_dim)
+        dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)
+        lagrangian = dists - self.distance_budget
+        lagrangian_mask = np.logical_not(lagrangian <= 0)
+        lagrangian[lagrangian_mask] = 0
+        lagrangian = np.sum(lagrangian)
+        return lagrangian
+
+    def objective(self, X):
+        """Objective function (GP-based Mutual Information)
+
+        Args:
+            X (ndarray): (n, d); Initial sensor placement locations
+        """
+        # MI does not depend on waypoint order (reshape to -1, num_dim)
+        X = np.array(X).reshape(-1, self.num_dim)
+        if self.transform is not None:
+            X = self.transform.expand(X, 
+                                      expand_sensor_model=False).numpy()
+
+        try:
+            mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)
+        except:
+            mi = 0.0 # if the cholskey decomposition fails
+        return mi
+
+    def optimize(self, 
+                 num_sensors=10, 
+                 max_steps=5000, 
+                 tol=1e-11, 
+                 X_init=None):
+        """Optimizes the SP objective function using CMA-ES without any constraints
+
+        Args:
+            num_sensors (int): Number of sensor locations to optimize
+            max_steps (int): Maximum number of optimization steps
+            tol (float): Convergence tolerance to decide when to stop optimization
+            X_init (ndarray): (m, d); Initial inducing points
+
+        Returns:
+            Xu (ndarray): (m, d); Solution sensor placement locations
+        """
+        sigma0 = 1.0
+
+        if X_init is None:
+            X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
+        X_init = X_init.reshape(-1)
+
+        xopt, _ = cma.fmin2(self.objective, X_init, sigma0, 
+                            options={'maxfevals': max_steps,
+                                     'verb_disp': 0,
+                                     'tolfun': tol,
+                                     'seed': 1234},
+                            restarts=5)
+
+        xopt = np.array(xopt).reshape(-1, self.num_dim)
+        if self.transform is not None:
+            xopt = self.transform.expand(xopt, 
+                                         expand_sensor_model=False).numpy()
+
+        return xopt.reshape(-1, self.num_dim)
+
+    def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
+        """Optimizes the SP objective function using CMA-ES with a distance budget constraint
+
+        Args:
+            num_sensors (int): Number of sensor locations to optimize
+            max_steps (int): Maximum number of optimization steps
+            tol (float): Convergence tolerance to decide when to stop optimization
+
+        Returns:
+            Xu (ndarray): (m, d); Solution sensor placement locations
+        """
+        sigma0 = 1.0
+        idx = np.random.randint(len(self.X_train), size=num_sensors)
+        x_init = self.X_train[idx].reshape(-1)
+        cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)
+        xopt, _ = cma.fmin2(cfun, x_init, sigma0, 
+                            options={'maxfevals': max_steps,
+                                     'verb_disp': 0,
+                                     'tolfun': tol,
+                                     'seed': 1234},
+                            callback=cfun.update,
+                            restarts=5)
+        return xopt.reshape(-1, self.num_dim)
+
+    def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
+        """Optimizes the SP objective function using CMA-ES with the constraints
+        to ensure that the sensors are placed within the boundaries of the region
+
+        Args:
+            num_sensors (int): Number of sensor locations to optimize
+            max_steps (int): Maximum number of optimization steps
+            tol (float): Convergence tolerance to decide when to stop optimization
+
+        Returns:
+            Xu (ndarray): (m, d); Solution sensor placement locations
+        """
+        sigma0 = 1.0
+        idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)
+        x_init = self.X_train[idx].reshape(-1)
+        cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)
+        xopt, _ = cma.fmin2(cfun, x_init, sigma0, 
+                            options={'maxfevals': max_steps,
+                                     'verb_disp': 0,
+                                     'tolfun': tol,
+                                     'seed': 1234},
+                            callback=cfun.update,
+                            restarts=5)
+        return xopt.reshape(-1, self.num_dim)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ constraint(X) + +

+ + +
+ +

Constraint function for the optimization problem (constraint to limit the boundary of the region) +Does not work well with CMA-ES as it is a step function and is not continuous

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X + ndarray + +
+

(n, d); Current sensor placement locations

+
+
+ required +
+ +
+ Source code in sgptools/models/cma_es.py +
61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
def constraint(self, X):
+    """Constraint function for the optimization problem (constraint to limit the boundary of the region)
+    Does not work well with CMA-ES as it is a step function and is not continuous
+
+    Args:
+        X (ndarray): (n, d); Current sensor placement locations
+    """
+    X = np.array(X).reshape(-1, self.num_dim)
+    lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]
+    lagrangian = np.logical_not(lagrangian).astype(float)
+    return lagrangian
+
+
+
+ +
+ +
+ + +

+ coptimize(num_sensors=10, max_steps=100, tol=1e-11) + +

+ + +
+ +

Optimizes the SP objective function using CMA-ES with the constraints +to ensure that the sensors are placed within the boundaries of the region

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_sensors + int + +
+

Number of sensor locations to optimize

+
+
+ 10 +
max_steps + int + +
+

Maximum number of optimization steps

+
+
+ 100 +
tol + float + +
+

Convergence tolerance to decide when to stop optimization

+
+
+ 1e-11 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(m, d); Solution sensor placement locations

+
+
+ +
+ Source code in sgptools/models/cma_es.py +
166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
+    """Optimizes the SP objective function using CMA-ES with the constraints
+    to ensure that the sensors are placed within the boundaries of the region
+
+    Args:
+        num_sensors (int): Number of sensor locations to optimize
+        max_steps (int): Maximum number of optimization steps
+        tol (float): Convergence tolerance to decide when to stop optimization
+
+    Returns:
+        Xu (ndarray): (m, d); Solution sensor placement locations
+    """
+    sigma0 = 1.0
+    idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)
+    x_init = self.X_train[idx].reshape(-1)
+    cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)
+    xopt, _ = cma.fmin2(cfun, x_init, sigma0, 
+                        options={'maxfevals': max_steps,
+                                 'verb_disp': 0,
+                                 'tolfun': tol,
+                                 'seed': 1234},
+                        callback=cfun.update,
+                        restarts=5)
+    return xopt.reshape(-1, self.num_dim)
+
+
+
+ +
+ +
+ + +

+ distance_constraint(X) + +

+ + +
+ +

Constraint function for the optimization problem (constraint to limit the total travel distance) +Does not work well with CMA-ES as it is a step function and is not continuous

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X + ndarray + +
+

(n, d); Current sensor placement locations

+
+
+ required +
+ +
+ Source code in sgptools/models/cma_es.py +
73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
def distance_constraint(self, X):
+    """Constraint function for the optimization problem (constraint to limit the total travel distance)
+    Does not work well with CMA-ES as it is a step function and is not continuous
+
+    Args:
+        X (ndarray): (n, d); Current sensor placement locations
+    """
+    X = np.array(X).reshape(self.num_robots, -1, self.num_dim)
+    dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)
+    lagrangian = dists - self.distance_budget
+    lagrangian_mask = np.logical_not(lagrangian <= 0)
+    lagrangian[lagrangian_mask] = 0
+    lagrangian = np.sum(lagrangian)
+    return lagrangian
+
+
+
+ +
+ +
+ + +

+ doptimize(num_sensors=10, max_steps=100, tol=1e-11) + +

+ + +
+ +

Optimizes the SP objective function using CMA-ES with a distance budget constraint

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_sensors + int + +
+

Number of sensor locations to optimize

+
+
+ 10 +
max_steps + int + +
+

Maximum number of optimization steps

+
+
+ 100 +
tol + float + +
+

Convergence tolerance to decide when to stop optimization

+
+
+ 1e-11 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(m, d); Solution sensor placement locations

+
+
+ +
+ Source code in sgptools/models/cma_es.py +
142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):
+    """Optimizes the SP objective function using CMA-ES with a distance budget constraint
+
+    Args:
+        num_sensors (int): Number of sensor locations to optimize
+        max_steps (int): Maximum number of optimization steps
+        tol (float): Convergence tolerance to decide when to stop optimization
+
+    Returns:
+        Xu (ndarray): (m, d); Solution sensor placement locations
+    """
+    sigma0 = 1.0
+    idx = np.random.randint(len(self.X_train), size=num_sensors)
+    x_init = self.X_train[idx].reshape(-1)
+    cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)
+    xopt, _ = cma.fmin2(cfun, x_init, sigma0, 
+                        options={'maxfevals': max_steps,
+                                 'verb_disp': 0,
+                                 'tolfun': tol,
+                                 'seed': 1234},
+                        callback=cfun.update,
+                        restarts=5)
+    return xopt.reshape(-1, self.num_dim)
+
+
+
+ +
+ +
+ + +

+ objective(X) + +

+ + +
+ +

Objective function (GP-based Mutual Information)

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X + ndarray + +
+

(n, d); Initial sensor placement locations

+
+
+ required +
+ +
+ Source code in sgptools/models/cma_es.py +
 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
def objective(self, X):
+    """Objective function (GP-based Mutual Information)
+
+    Args:
+        X (ndarray): (n, d); Initial sensor placement locations
+    """
+    # MI does not depend on waypoint order (reshape to -1, num_dim)
+    X = np.array(X).reshape(-1, self.num_dim)
+    if self.transform is not None:
+        X = self.transform.expand(X, 
+                                  expand_sensor_model=False).numpy()
+
+    try:
+        mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)
+    except:
+        mi = 0.0 # if the cholskey decomposition fails
+    return mi
+
+
+
+ +
+ +
+ + +

+ optimize(num_sensors=10, max_steps=5000, tol=1e-11, X_init=None) + +

+ + +
+ +

Optimizes the SP objective function using CMA-ES without any constraints

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_sensors + int + +
+

Number of sensor locations to optimize

+
+
+ 10 +
max_steps + int + +
+

Maximum number of optimization steps

+
+
+ 5000 +
tol + float + +
+

Convergence tolerance to decide when to stop optimization

+
+
+ 1e-11 +
X_init + ndarray + +
+

(m, d); Initial inducing points

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(m, d); Solution sensor placement locations

+
+
+ +
+ Source code in sgptools/models/cma_es.py +
106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
def optimize(self, 
+             num_sensors=10, 
+             max_steps=5000, 
+             tol=1e-11, 
+             X_init=None):
+    """Optimizes the SP objective function using CMA-ES without any constraints
+
+    Args:
+        num_sensors (int): Number of sensor locations to optimize
+        max_steps (int): Maximum number of optimization steps
+        tol (float): Convergence tolerance to decide when to stop optimization
+        X_init (ndarray): (m, d); Initial inducing points
+
+    Returns:
+        Xu (ndarray): (m, d); Solution sensor placement locations
+    """
+    sigma0 = 1.0
+
+    if X_init is None:
+        X_init = get_inducing_pts(self.X_train, num_sensors, random=True)
+    X_init = X_init.reshape(-1)
+
+    xopt, _ = cma.fmin2(self.objective, X_init, sigma0, 
+                        options={'maxfevals': max_steps,
+                                 'verb_disp': 0,
+                                 'tolfun': tol,
+                                 'seed': 1234},
+                        restarts=5)
+
+    xopt = np.array(xopt).reshape(-1, self.num_dim)
+    if self.transform is not None:
+        xopt = self.transform.expand(xopt, 
+                                     expand_sensor_model=False).numpy()
+
+    return xopt.reshape(-1, self.num_dim)
+
+
+
+ +
+ +
+ + +

+ update(noise_variance, kernel) + +

+ + +
+ +

Update GP noise variance and kernel function parameters

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
+ +
+ Source code in sgptools/models/cma_es.py +
51
+52
+53
+54
+55
+56
+57
+58
+59
def update(self, noise_variance, kernel):
+    """Update GP noise variance and kernel function parameters
+
+    Args:
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+    """
+    self.noise_variance = noise_variance
+    self.kernel = kernel
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +

________________________

+ + +
+ + + + +
+ +

Core modules in this package:

+
    +
  • augmented_gpr: Provides a Gaussian process model with expand and aggregate functions
  • +
  • augmented_sgpr: Provides a sparse Gaussian process model with update, expand, and aggregate functions
  • +
  • osgpr: Provides a streaming sparse Gaussian process model along with initialization function
  • +
  • transformations: Provides transforms to model complex sensor field of views and handle informative path planning
  • +
+ + + +
+ + + + + + + + + + + +
+ +
+ +

+ + +
+ + + + +
+ +

Provides a Gaussian process model with expand and aggregate functions

+ + + +
+ + + + + + + + +
+ + + +

+ AugmentedGPR + + +

+ + +
+

+ Bases: GPR

+ + +

GPR model from the GPFlow library augmented to use a transform object's +expand and aggregate functions on the data points where necessary.

+ + +
+ Refer to the following papers for more details +
    +
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • +
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data + tuple + +
+

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
transform + Transform + +
+

Transform object

+
+
+ required +
+ +
+ Source code in sgptools/models/core/augmented_gpr.py +
28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
+89
+90
+91
+92
+93
class AugmentedGPR(GPR):
+    """GPR model from the GPFlow library augmented to use a transform object's
+    expand and aggregate functions on the data points where necessary.  
+
+    Refer to the following papers for more details:
+        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
+        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]
+
+    Args:
+        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        noise_variance (float): data variance
+        transform (Transform): Transform object
+    """
+    def __init__(
+        self,
+        *args,
+        transform,
+        **kwargs
+    ):
+        super().__init__(
+            *args,
+            **kwargs
+        )
+        if transform is None:
+            self.transform = Transform()
+        else:
+            self.transform = transform
+
+    def predict_f(
+        self, Xnew: InputData, 
+        full_cov: bool = True, 
+        full_output_cov: bool = False,
+        aggregate_train: bool = False,
+    ) -> MeanAndVariance:
+        assert_params_false(self.predict_f, full_output_cov=full_output_cov)
+        if self.transform is not None:
+            Xnew = self.transform.expand(Xnew)
+
+        X, Y = self.data
+        err = Y - self.mean_function(X)
+
+        kmm = self.kernel(X)
+        knn = self.kernel(Xnew, full_cov=full_cov)
+        kmn = self.kernel(X, Xnew)
+        kmm_plus_s = add_likelihood_noise_cov(kmm, self.likelihood, X)
+
+        if self.transform is not None:
+            kmn = self.transform.aggregate(tf.transpose(kmn))
+            kmn = tf.transpose(kmn)
+            knn = self.transform.aggregate(knn)
+
+        if aggregate_train:
+            kmm_plus_s = self.transform.aggregate(kmm_plus_s)
+            err = self.transform.aggregate(err)
+            # reduce kmn only if it was not reduced before
+            # which can when train and test data are the same size
+            if kmn.shape[0] != kmn.shape[1]:
+                kmn = self.transform.aggregate(kmn)
+
+        conditional = gpflow.conditionals.base_conditional
+        f_mean_zero, f_var = conditional(
+            kmn, kmm_plus_s, knn, err, full_cov=full_cov, white=False
+        )  # [N, P], [N, P] or [P, N, N]
+        f_mean = f_mean_zero + self.mean_function(Xnew)
+        return f_mean, f_var
+
+
+ + + +
+ + + + + + + + + + + +
+ +
+ +
+ + + + +
+ +
+ +

+ + +
+ + + + +
+ +

Provides a sparse Gaussian process model with update, expand, and aggregate functions

+ + + +
+ + + + + + + + +
+ + + +

+ AugmentedSGPR + + +

+ + +
+

+ Bases: SGPR

+ + +

SGPR model from the GPFlow library augmented to use a transform object's +expand and aggregate functions on the inducing points where necessary. The object +has an additional update function to update the kernel and noise variance parameters +(currently, the online updates part works only with RBF kernels).

+ + +
+ Refer to the following papers for more details +
    +
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • +
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data + tuple + +
+

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
inducing_variable + ndarray + +
+

(m, d); Initial inducing points

+
+
+ required +
transform + Transform + +
+

Transform object

+
+
+ required +
inducing_variable_time + ndarray + +
+

(m, d); Temporal dimensions of the inducing points, + used when modeling spatio-temporal IPP

+
+
+ None +
+ +
+ Source code in sgptools/models/core/augmented_sgpr.py +
 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
class AugmentedSGPR(SGPR):
+    """SGPR model from the GPFlow library augmented to use a transform object's
+    expand and aggregate functions on the inducing points where necessary. The object
+    has an additional update function to update the kernel and noise variance parameters 
+    (currently, the online updates part works only with RBF kernels).  
+
+
+    Refer to the following papers for more details:
+        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
+        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]
+
+    Args:
+        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        noise_variance (float): data variance
+        inducing_variable (ndarray): (m, d); Initial inducing points
+        transform (Transform): Transform object
+        inducing_variable_time (ndarray): (m, d); Temporal dimensions of the inducing points, 
+                                            used when modeling spatio-temporal IPP
+    """
+    def __init__(
+        self,
+        *args,
+        transform,
+        inducing_variable_time=None,
+        **kwargs
+    ):
+        super().__init__(
+            *args,
+            **kwargs
+        )
+        if transform is None:
+            self.transform = Transform()
+        else:
+            self.transform = transform
+
+        if inducing_variable_time is not None:
+            self.inducing_variable_time = inducingpoint_wrapper(inducing_variable_time)
+            self.transform.inducing_variable_time = self.inducing_variable_time
+        else:
+            self.inducing_variable_time = None
+
+    def update(self, noise_variance, kernel):
+        """Update SGP noise variance and kernel function parameters
+
+        Args:
+            noise_variance (float): data variance
+            kernel (gpflow.kernels.Kernel): gpflow kernel function
+        """
+        self.likelihood.variance.assign(noise_variance)
+        self.kernel.lengthscales.assign(kernel.lengthscales)
+        self.kernel.variance.assign(kernel.variance)
+
+    def _common_calculation(self) -> "SGPR.CommonTensors":
+        """
+        Matrices used in log-det calculation
+        :return: A , B, LB, AAT with :math:`LLᵀ = Kᵤᵤ , A = L⁻¹K_{uf}/σ, AAT = AAᵀ,
+            B = AAT+I, LBLBᵀ = B`
+            A is M x N, B is M x M, LB is M x M, AAT is M x M
+        """
+        x, _ = self.data
+
+        iv = self.inducing_variable.Z  # [M]
+        iv = self.transform.expand(iv)
+
+        kuf = self.kernel(iv, x)
+        kuf = self.transform.aggregate(kuf)
+
+        kuu = self.kernel(iv) + 1e-6 * tf.eye(tf.shape(iv)[0], dtype=iv.dtype)
+        kuu = self.transform.aggregate(kuu)
+
+        L = tf.linalg.cholesky(kuu)
+
+        sigma_sq = self.likelihood.variance
+        sigma = tf.sqrt(sigma_sq)
+
+        # Compute intermediate matrices
+        A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
+        AAT = tf.linalg.matmul(A, A, transpose_b=True)
+        B = add_noise_cov(AAT, tf.cast(1.0, AAT.dtype))
+        LB = tf.linalg.cholesky(B)
+
+        return self.CommonTensors(sigma_sq, sigma, A, B, LB, AAT, L)
+
+    def elbo(self) -> tf.Tensor:
+        """
+        Construct a tensorflow function to compute the bound on the marginal
+        likelihood. For a derivation of the terms in here, see the associated
+        SGPR notebook.
+        """
+        common = self._common_calculation()
+        output_shape = tf.shape(self.data[-1])
+        num_data = to_default_float(output_shape[0])
+        output_dim = to_default_float(output_shape[1])
+        const = -0.5 * num_data * output_dim * np.log(2 * np.pi)
+        logdet = self.logdet_term(common)
+        quad = self.quad_term(common)
+        constraints = self.transform.constraints(self.inducing_variable.Z)
+        return const + logdet + quad + constraints
+
+    def predict_f(
+        self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
+    ) -> MeanAndVariance:
+
+        # could copy into posterior into a fused version
+        """
+        Compute the mean and variance of the latent function at some new points
+        Xnew. For a derivation of the terms in here, see the associated SGPR
+        notebook.
+        """
+        X_data, Y_data = self.data
+
+        iv = self.inducing_variable.Z
+        iv = self.transform.expand(iv)
+
+        num_inducing = tf.shape(iv)[0]
+
+        err = Y_data - self.mean_function(X_data)
+        kuf = self.kernel(iv, X_data)
+        kuu = self.kernel(iv) + 1e-6 * tf.eye(num_inducing, dtype=iv.dtype)
+        Kus = self.kernel(iv, Xnew)
+        sigma = tf.sqrt(self.likelihood.variance)
+        L = tf.linalg.cholesky(kuu)
+        A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
+        B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(
+            num_inducing, dtype=default_float()
+        )  # cache qinv
+        LB = tf.linalg.cholesky(B)
+        Aerr = tf.linalg.matmul(A, err)
+        c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
+        tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
+        tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
+        mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
+        if full_cov:
+            var = (
+                self.kernel(Xnew)
+                + tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
+                - tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
+            )
+            var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1])  # [P, N, N]
+        else:
+            var = (
+                self.kernel(Xnew, full_cov=False)
+                + tf.reduce_sum(tf.square(tmp2), 0)
+                - tf.reduce_sum(tf.square(tmp1), 0)
+            )
+            var = tf.tile(var[:, None], [1, self.num_latent_gps])
+
+        return mean + self.mean_function(Xnew), var
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ elbo() + +

+ + +
+ +

Construct a tensorflow function to compute the bound on the marginal +likelihood. For a derivation of the terms in here, see the associated +SGPR notebook.

+ +
+ Source code in sgptools/models/core/augmented_sgpr.py +
116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
def elbo(self) -> tf.Tensor:
+    """
+    Construct a tensorflow function to compute the bound on the marginal
+    likelihood. For a derivation of the terms in here, see the associated
+    SGPR notebook.
+    """
+    common = self._common_calculation()
+    output_shape = tf.shape(self.data[-1])
+    num_data = to_default_float(output_shape[0])
+    output_dim = to_default_float(output_shape[1])
+    const = -0.5 * num_data * output_dim * np.log(2 * np.pi)
+    logdet = self.logdet_term(common)
+    quad = self.quad_term(common)
+    constraints = self.transform.constraints(self.inducing_variable.Z)
+    return const + logdet + quad + constraints
+
+
+
+ +
+ +
+ + +

+ predict_f(Xnew, full_cov=False, full_output_cov=False) + +

+ + +
+ +

Compute the mean and variance of the latent function at some new points +Xnew. For a derivation of the terms in here, see the associated SGPR +notebook.

+ +
+ Source code in sgptools/models/core/augmented_sgpr.py +
132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
def predict_f(
+    self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False
+) -> MeanAndVariance:
+
+    # could copy into posterior into a fused version
+    """
+    Compute the mean and variance of the latent function at some new points
+    Xnew. For a derivation of the terms in here, see the associated SGPR
+    notebook.
+    """
+    X_data, Y_data = self.data
+
+    iv = self.inducing_variable.Z
+    iv = self.transform.expand(iv)
+
+    num_inducing = tf.shape(iv)[0]
+
+    err = Y_data - self.mean_function(X_data)
+    kuf = self.kernel(iv, X_data)
+    kuu = self.kernel(iv) + 1e-6 * tf.eye(num_inducing, dtype=iv.dtype)
+    Kus = self.kernel(iv, Xnew)
+    sigma = tf.sqrt(self.likelihood.variance)
+    L = tf.linalg.cholesky(kuu)
+    A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma
+    B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(
+        num_inducing, dtype=default_float()
+    )  # cache qinv
+    LB = tf.linalg.cholesky(B)
+    Aerr = tf.linalg.matmul(A, err)
+    c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma
+    tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)
+    tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)
+    mean = tf.linalg.matmul(tmp2, c, transpose_a=True)
+    if full_cov:
+        var = (
+            self.kernel(Xnew)
+            + tf.linalg.matmul(tmp2, tmp2, transpose_a=True)
+            - tf.linalg.matmul(tmp1, tmp1, transpose_a=True)
+        )
+        var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1])  # [P, N, N]
+    else:
+        var = (
+            self.kernel(Xnew, full_cov=False)
+            + tf.reduce_sum(tf.square(tmp2), 0)
+            - tf.reduce_sum(tf.square(tmp1), 0)
+        )
+        var = tf.tile(var[:, None], [1, self.num_latent_gps])
+
+    return mean + self.mean_function(Xnew), var
+
+
+
+ +
+ +
+ + +

+ update(noise_variance, kernel) + +

+ + +
+ +

Update SGP noise variance and kernel function parameters

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
+ +
+ Source code in sgptools/models/core/augmented_sgpr.py +
74
+75
+76
+77
+78
+79
+80
+81
+82
+83
def update(self, noise_variance, kernel):
+    """Update SGP noise variance and kernel function parameters
+
+    Args:
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+    """
+    self.likelihood.variance.assign(noise_variance)
+    self.kernel.lengthscales.assign(kernel.lengthscales)
+    self.kernel.variance.assign(kernel.variance)
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +

+ + +
+ + + + +
+ +

Provides a streaming sparse Gaussian process model along with initialization function

+ + + +
+ + + + + + + + +
+ + + +

+ OSGPR_VFE + + +

+ + +
+

+ Bases: GPModel, InternalDataTrainingLossMixin

+ + +

Online Sparse Variational GP regression model from streaming_sparse_gp

+ + +
+ Refer to the following paper for more details +
    +
  • Streaming Gaussian process approximations [Bui et al., 2017]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data + tuple + +
+

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
mu_old + ndarray + +
+

mean of old q(u); here u are the latents corresponding to the inducing points Z_old

+
+
+ required +
Su_old + ndarray + +
+

posterior covariance of old q(u)

+
+
+ required +
Kaa_old + ndarray + +
+

prior covariance of old q(u)

+
+
+ required +
Z_old + ndarray + +
+

(m_old, d): Old initial inducing points

+
+
+ required +
Z + ndarray + +
+

(m_new, d): New initial inducing points

+
+
+ required +
mean_function + function + +
+

GP mean function

+
+
+ None +
+ +
+ Source code in sgptools/models/core/osgpr.py +
 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
class OSGPR_VFE(GPModel, InternalDataTrainingLossMixin):
+    """Online Sparse Variational GP regression model from [streaming_sparse_gp](https://github.com/thangbui/streaming_sparse_gp/tree/master)
+
+    Refer to the following paper for more details:
+        - Streaming Gaussian process approximations [Bui et al., 2017]
+
+    Args:
+        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        mu_old (ndarray): mean of old `q(u)`; here `u` are the latents corresponding to the inducing points `Z_old`
+        Su_old (ndarray): posterior covariance of old `q(u)`
+        Kaa_old (ndarray): prior covariance of old `q(u)`
+        Z_old (ndarray): (m_old, d): Old initial inducing points
+        Z (ndarray): (m_new, d): New initial inducing points
+        mean_function (function): GP mean function
+    """
+    def __init__(self, data, kernel, mu_old, Su_old, Kaa_old, Z_old, Z, mean_function=None):
+        self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)
+        likelihood = gpflow.likelihoods.Gaussian()
+        num_latent_gps = GPModel.calc_num_latent_gps_from_data(data, kernel, likelihood)
+        super().__init__(kernel, likelihood, mean_function, num_latent_gps)
+
+        self.inducing_variable = InducingPoints(Z)
+        self.num_data = self.X.shape[0]
+
+        self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)
+        self.M_old = Z_old.shape[0]
+        self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)
+        self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)
+        self.Z_old = tf.Variable(Z_old, shape=tf.TensorShape(None), trainable=False)
+
+    def update(self, data):
+        """Configure the OSGPR to adapt to a new batch of data. 
+        Note: The OSGPR needs to be trained using gradient-based approaches after update.
+
+        Args:
+            data (tuple): (X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)
+        """
+        self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)
+        self.num_data = self.X.shape[0]
+
+        self.Z_old = tf.Variable(self.inducing_variable.Z.numpy(), 
+                                 shape=tf.TensorShape(None), 
+                                 trainable=False)
+
+        # Get posterior mean and covariance for the old inducing points
+        mu_old, Su_old = self.predict_f(self.Z_old, full_cov=True)
+        self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)
+        self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)
+
+        # Get the prior covariance matrix for the old inducing points
+        Kaa_old = self.kernel(self.Z_old)
+        self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)
+
+    def _common_terms(self):
+        Mb = self.inducing_variable.num_inducing
+        Ma = self.M_old
+        # jitter = gpflow.default_jitter()
+        jitter = gpflow.utilities.to_default_float(1e-4)
+        sigma2 = self.likelihood.variance
+        sigma = tf.sqrt(sigma2)
+
+        Saa = self.Su_old
+        ma = self.mu_old
+
+        # a is old inducing points, b is new
+        # f is training points
+        # s is test points
+        Kbf = covariances.Kuf(self.inducing_variable, self.kernel, self.X)
+        Kbb = covariances.Kuu(self.inducing_variable, self.kernel, jitter=jitter)
+        Kba = covariances.Kuf(self.inducing_variable, self.kernel, self.Z_old)
+        Kaa_cur = gpflow.utilities.add_noise_cov(self.kernel(self.Z_old), jitter)
+        Kaa = gpflow.utilities.add_noise_cov(self.Kaa_old, jitter)
+
+        err = self.Y - self.mean_function(self.X)
+
+        Sainv_ma = tf.linalg.solve(Saa, ma)
+        Sinv_y = self.Y / sigma2
+        c1 = tf.matmul(Kbf, Sinv_y)
+        c2 = tf.matmul(Kba, Sainv_ma)
+        c = c1 + c2
+
+        Lb = tf.linalg.cholesky(Kbb)
+        Lbinv_c = tf.linalg.triangular_solve(Lb, c, lower=True)
+        Lbinv_Kba = tf.linalg.triangular_solve(Lb, Kba, lower=True)
+        Lbinv_Kbf = tf.linalg.triangular_solve(Lb, Kbf, lower=True) / sigma
+        d1 = tf.matmul(Lbinv_Kbf, Lbinv_Kbf, transpose_b=True)
+
+        LSa = tf.linalg.cholesky(Saa)
+        Kab_Lbinv = tf.linalg.matrix_transpose(Lbinv_Kba)
+        LSainv_Kab_Lbinv = tf.linalg.triangular_solve(
+            LSa, Kab_Lbinv, lower=True)
+        d2 = tf.matmul(LSainv_Kab_Lbinv, LSainv_Kab_Lbinv, transpose_a=True)
+
+        La = tf.linalg.cholesky(Kaa)
+        Lainv_Kab_Lbinv = tf.linalg.triangular_solve(
+            La, Kab_Lbinv, lower=True)
+        d3 = tf.matmul(Lainv_Kab_Lbinv, Lainv_Kab_Lbinv, transpose_a=True)
+
+        D = tf.eye(Mb, dtype=gpflow.default_float()) + d1 + d2 - d3
+        D = gpflow.utilities.add_noise_cov(D, jitter)
+        LD = tf.linalg.cholesky(D)
+
+        LDinv_Lbinv_c = tf.linalg.triangular_solve(LD, Lbinv_c, lower=True)
+
+        return (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,
+                Lbinv_Kba, LDinv_Lbinv_c, err, d1)
+
+    def maximum_log_likelihood_objective(self):
+        """
+        Construct a tensorflow function to compute the bound on the marginal
+        likelihood. 
+        """
+
+        Mb = self.inducing_variable.num_inducing
+        Ma = self.M_old
+        jitter = gpflow.default_jitter()
+        # jitter = gpflow.utilities.to_default_float(1e-4)
+        sigma2 = self.likelihood.variance
+        sigma = tf.sqrt(sigma2)
+        N = self.num_data
+
+        Saa = self.Su_old
+        ma = self.mu_old
+
+        # a is old inducing points, b is new
+        # f is training points
+        Kfdiag = self.kernel(self.X, full_cov=False)
+        (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,
+            Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()
+
+        LSa = tf.linalg.cholesky(Saa)
+        Lainv_ma = tf.linalg.triangular_solve(LSa, ma, lower=True)
+
+        # constant term
+        bound = -0.5 * N * np.log(2 * np.pi)
+        # quadratic term
+        bound += -0.5 * tf.reduce_sum(tf.square(err)) / sigma2
+        # bound += -0.5 * tf.reduce_sum(ma * Sainv_ma)
+        bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma))
+        bound += 0.5 * tf.reduce_sum(tf.square(LDinv_Lbinv_c))
+        # log det term
+        bound += -0.5 * N * tf.reduce_sum(tf.math.log(sigma2))
+        bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LD)))
+
+        # delta 1: trace term
+        bound += -0.5 * tf.reduce_sum(Kfdiag) / sigma2
+        bound += 0.5 * tf.reduce_sum(tf.linalg.diag_part(Qff))
+
+        # delta 2: a and b difference
+        bound += tf.reduce_sum(tf.math.log(tf.linalg.diag_part(La)))
+        bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LSa)))
+
+        Kaadiff = Kaa_cur - tf.matmul(Lbinv_Kba, Lbinv_Kba, transpose_a=True)
+        Sainv_Kaadiff = tf.linalg.solve(Saa, Kaadiff)
+        Kainv_Kaadiff = tf.linalg.solve(Kaa, Kaadiff)
+
+        bound += -0.5 * tf.reduce_sum(
+            tf.linalg.diag_part(Sainv_Kaadiff) - tf.linalg.diag_part(Kainv_Kaadiff))
+
+        return bound
+
+    def predict_f(self, Xnew, full_cov=False):
+        """
+        Compute the mean and variance of the latent function at some new points
+        Xnew. 
+        """
+
+        # jitter = gpflow.default_jitter()
+        jitter = gpflow.utilities.to_default_float(1e-4)
+
+        # a is old inducing points, b is new
+        # f is training points
+        # s is test points
+        Kbs = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)
+        (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,
+            Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()
+
+        Lbinv_Kbs = tf.linalg.triangular_solve(Lb, Kbs, lower=True)
+        LDinv_Lbinv_Kbs = tf.linalg.triangular_solve(LD, Lbinv_Kbs, lower=True)
+        mean = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_c, transpose_a=True)
+
+        if full_cov:
+            Kss = self.kernel(Xnew) + jitter * tf.eye(tf.shape(Xnew)[0], dtype=gpflow.default_float())
+            var1 = Kss
+            var2 = - tf.matmul(Lbinv_Kbs, Lbinv_Kbs, transpose_a=True)
+            var3 = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_Kbs, transpose_a=True)
+            var = var1 + var2 + var3
+        else:
+            var1 = self.kernel(Xnew, full_cov=False)
+            var2 = -tf.reduce_sum(tf.square(Lbinv_Kbs), axis=0)
+            var3 = tf.reduce_sum(tf.square(LDinv_Lbinv_Kbs), axis=0)
+            var = var1 + var2 + var3
+
+        return mean + self.mean_function(Xnew), var
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ maximum_log_likelihood_objective() + +

+ + +
+ +

Construct a tensorflow function to compute the bound on the marginal +likelihood.

+ +
+ Source code in sgptools/models/core/osgpr.py +
137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
def maximum_log_likelihood_objective(self):
+    """
+    Construct a tensorflow function to compute the bound on the marginal
+    likelihood. 
+    """
+
+    Mb = self.inducing_variable.num_inducing
+    Ma = self.M_old
+    jitter = gpflow.default_jitter()
+    # jitter = gpflow.utilities.to_default_float(1e-4)
+    sigma2 = self.likelihood.variance
+    sigma = tf.sqrt(sigma2)
+    N = self.num_data
+
+    Saa = self.Su_old
+    ma = self.mu_old
+
+    # a is old inducing points, b is new
+    # f is training points
+    Kfdiag = self.kernel(self.X, full_cov=False)
+    (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,
+        Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()
+
+    LSa = tf.linalg.cholesky(Saa)
+    Lainv_ma = tf.linalg.triangular_solve(LSa, ma, lower=True)
+
+    # constant term
+    bound = -0.5 * N * np.log(2 * np.pi)
+    # quadratic term
+    bound += -0.5 * tf.reduce_sum(tf.square(err)) / sigma2
+    # bound += -0.5 * tf.reduce_sum(ma * Sainv_ma)
+    bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma))
+    bound += 0.5 * tf.reduce_sum(tf.square(LDinv_Lbinv_c))
+    # log det term
+    bound += -0.5 * N * tf.reduce_sum(tf.math.log(sigma2))
+    bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LD)))
+
+    # delta 1: trace term
+    bound += -0.5 * tf.reduce_sum(Kfdiag) / sigma2
+    bound += 0.5 * tf.reduce_sum(tf.linalg.diag_part(Qff))
+
+    # delta 2: a and b difference
+    bound += tf.reduce_sum(tf.math.log(tf.linalg.diag_part(La)))
+    bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LSa)))
+
+    Kaadiff = Kaa_cur - tf.matmul(Lbinv_Kba, Lbinv_Kba, transpose_a=True)
+    Sainv_Kaadiff = tf.linalg.solve(Saa, Kaadiff)
+    Kainv_Kaadiff = tf.linalg.solve(Kaa, Kaadiff)
+
+    bound += -0.5 * tf.reduce_sum(
+        tf.linalg.diag_part(Sainv_Kaadiff) - tf.linalg.diag_part(Kainv_Kaadiff))
+
+    return bound
+
+
+
+ +
+ +
+ + +

+ predict_f(Xnew, full_cov=False) + +

+ + +
+ +

Compute the mean and variance of the latent function at some new points +Xnew.

+ +
+ Source code in sgptools/models/core/osgpr.py +
191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
def predict_f(self, Xnew, full_cov=False):
+    """
+    Compute the mean and variance of the latent function at some new points
+    Xnew. 
+    """
+
+    # jitter = gpflow.default_jitter()
+    jitter = gpflow.utilities.to_default_float(1e-4)
+
+    # a is old inducing points, b is new
+    # f is training points
+    # s is test points
+    Kbs = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)
+    (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,
+        Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()
+
+    Lbinv_Kbs = tf.linalg.triangular_solve(Lb, Kbs, lower=True)
+    LDinv_Lbinv_Kbs = tf.linalg.triangular_solve(LD, Lbinv_Kbs, lower=True)
+    mean = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_c, transpose_a=True)
+
+    if full_cov:
+        Kss = self.kernel(Xnew) + jitter * tf.eye(tf.shape(Xnew)[0], dtype=gpflow.default_float())
+        var1 = Kss
+        var2 = - tf.matmul(Lbinv_Kbs, Lbinv_Kbs, transpose_a=True)
+        var3 = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_Kbs, transpose_a=True)
+        var = var1 + var2 + var3
+    else:
+        var1 = self.kernel(Xnew, full_cov=False)
+        var2 = -tf.reduce_sum(tf.square(Lbinv_Kbs), axis=0)
+        var3 = tf.reduce_sum(tf.square(LDinv_Lbinv_Kbs), axis=0)
+        var = var1 + var2 + var3
+
+    return mean + self.mean_function(Xnew), var
+
+
+
+ +
+ +
+ + +

+ update(data) + +

+ + +
+ +

Configure the OSGPR to adapt to a new batch of data. +Note: The OSGPR needs to be trained using gradient-based approaches after update.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data + tuple + +
+

(X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)

+
+
+ required +
+ +
+ Source code in sgptools/models/core/osgpr.py +
60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
def update(self, data):
+    """Configure the OSGPR to adapt to a new batch of data. 
+    Note: The OSGPR needs to be trained using gradient-based approaches after update.
+
+    Args:
+        data (tuple): (X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)
+    """
+    self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)
+    self.num_data = self.X.shape[0]
+
+    self.Z_old = tf.Variable(self.inducing_variable.Z.numpy(), 
+                             shape=tf.TensorShape(None), 
+                             trainable=False)
+
+    # Get posterior mean and covariance for the old inducing points
+    mu_old, Su_old = self.predict_f(self.Z_old, full_cov=True)
+    self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)
+    self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)
+
+    # Get the prior covariance matrix for the old inducing points
+    Kaa_old = self.kernel(self.Z_old)
+    self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ init_osgpr(X_train, num_inducing=10, lengthscales=1.0, variance=1.0, noise_variance=0.001) + +

+ + +
+ +

Initialize a VFE OSGPR model with an RBF kernel with +unit variance and lengthcales, and 0.001 noise variance. +Used in the Online Continuous SGP approach.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X_train + ndarray + +
+

(n, d); Unlabeled random sampled training points. + They only effect the initial inducing point locations, + i.e., limits them to the bounds of the data

+
+
+ required +
num_inducing + int + +
+

Number of inducing points

+
+
+ 10 +
lengthscales + ndarray or list + +
+

Kernel lengthscale of each dimension of the data

+
+
+ 1.0 +
variance + float + +
+

Kernel variance

+
+
+ 1.0 +
noise_variance + float + +
+

Data variance

+
+
+ 0.001 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
online_param + OSGPR_VFE + +
+

Initialized online sparse Gaussian process model

+
+
+ +
+ Source code in sgptools/models/core/osgpr.py +
226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
def init_osgpr(X_train, 
+               num_inducing=10, 
+               lengthscales=1.0, 
+               variance=1.0,
+               noise_variance=0.001):
+    """Initialize a VFE OSGPR model with an RBF kernel with 
+    unit variance and lengthcales, and 0.001 noise variance.
+    Used in the Online Continuous SGP approach. 
+
+    Args:
+        X_train (ndarray): (n, d); Unlabeled random sampled training points. 
+                        They only effect the initial inducing point locations, 
+                        i.e., limits them to the bounds of the data
+        num_inducing (int): Number of inducing points
+        lengthscales (ndarray or list): Kernel lengthscale of each dimension of the data
+        variance (float): Kernel variance
+        noise_variance (float): Data variance
+
+    Returns:
+        online_param (OSGPR_VFE): Initialized online sparse Gaussian process model
+    """
+
+    y_train = np.zeros((len(X_train), 1), dtype=X_train.dtype)
+    Z_init = get_inducing_pts(X_train, num_inducing)
+    init_param = gpflow.models.SGPR((X_train, y_train),
+                                    gpflow.kernels.RBF(variance=variance, 
+                                                       lengthscales=lengthscales), 
+                                    inducing_variable=Z_init, 
+                                    noise_variance=noise_variance)
+
+    # Initialize the OSGPR model using the parameters from the SGPR model
+    # The X_train and y_train here will be overwritten in the online phase 
+    X_train = np.array([[0, 0], [0, 0]])
+    y_train = np.array([0, 0]).reshape(-1, 1)
+    Zopt = init_param.inducing_variable.Z.numpy()
+    mu, Su = init_param.predict_f(Zopt, full_cov=True)
+    Kaa = init_param.kernel(Zopt)
+    online_param = OSGPR_VFE((X_train[:2], y_train[:2]),
+                             init_param.kernel,
+                             mu, Su[0], Kaa,
+                             Zopt, Zopt)
+    online_param.likelihood.variance.assign(init_param.likelihood.variance)
+
+    return online_param
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ +

Provides transforms to model complex sensor field of views and handle informative path planning

+ + + +
+ + + + + + + + +
+ + + +

+ IPPTransform + + +

+ + +
+

+ Bases: Transform

+ + +

Transform to model IPP problems

+ + +
+ Usage details +
    +
  • For point sensing, set sampling_rate = 2
  • +
  • For continuous sensing, set sampling_rate > 2 (approx the data collected along the path)
  • +
  • For multi-robot case, set num_robots > 1
  • +
  • For onlineIPP use update_fixed to freeze the visited waypoints
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
sampling_rate + int + +
+

Number of points to sample between each pair of inducing points

+
+
+ 2 +
distance_budget + float + +
+

Distance budget for the path

+
+
+ None +
num_robots + int + +
+

Number of robots

+
+
+ 1 +
Xu_fixed + ndarray + +
+

(num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized

+
+
+ None +
num_dim + int + +
+

Dimension of the data collection environment

+
+
+ 2 +
sensor_model + Transform + +
+

Transform object to expand each inducing point to p points + approximating each sensor's FoV

+
+
+ None +
+ +
+ Source code in sgptools/models/core/transformations.py +
180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
class IPPTransform(Transform):
+    """Transform to model IPP problems
+
+    Usage details: 
+        * For point sensing, set `sampling_rate = 2`
+        * For continuous sensing, set `sampling_rate > 2` (approx the data collected along the path)
+        * For multi-robot case, set `num_robots > 1`
+        * For onlineIPP use `update_fixed` to freeze the visited waypoints
+
+    Args:
+        sampling_rate (int): Number of points to sample between each pair of inducing points
+        distance_budget (float): Distance budget for the path
+        num_robots (int): Number of robots
+        Xu_fixed (ndarray): (num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized
+        num_dim (int): Dimension of the data collection environment
+        sensor_model (Transform): Transform object to expand each inducing point to `p` points 
+                                  approximating each sensor's FoV
+    """
+    def __init__(self, 
+                 sampling_rate=2, 
+                 distance_budget=None, 
+                 num_robots=1,
+                 Xu_fixed=None,
+                 num_dim=2,
+                 sensor_model=None,
+                 **kwargs):
+        super().__init__(**kwargs)
+        if sampling_rate < 2:
+            raise ValueError('Sampling rate must be greater than 2.')
+
+        self.sampling_rate = sampling_rate
+        self.distance_budget = distance_budget
+        self.num_robots = num_robots
+        self.num_dim = num_dim
+        self.sensor_model = sensor_model
+
+        # Disable aggregation if aggregation size was explicitly set to 0
+        if self.aggregation_size == 0:
+            self.aggregation_size = None
+        # Set aggregation size to sampling rate if aggregation size was not set
+        # and sampling rate is enabled (greater than 2)
+        elif self.aggregation_size is None and sampling_rate > 2:
+            self.aggregation_size = sampling_rate
+
+        # Initilize variable to store visited waypoints for onlineIPP
+        if Xu_fixed is not None:
+            self.Xu_fixed = tf.Variable(Xu_fixed, 
+                                        shape=tf.TensorShape(None), 
+                                        trainable=False)
+        else:
+            self.Xu_fixed = None
+
+    def update_Xu_fixed(self, Xu_fixed):
+        """Function to update the visited waypoints
+
+        Args:
+            Xu_fixed (ndarray): numpy array (num_robots, num_visited_waypoints, num_dim)
+        """
+        self.num_fixed = Xu_fixed.shape[1]
+        if self.Xu_fixed is not None:
+            self.Xu_fixed.assign(Xu_fixed)
+        else:
+            # ToDo: Use binary mask of fixed size to avoid retracing
+            self.Xu_fixed = tf.Variable(Xu_fixed, 
+                                        shape=tf.TensorShape(None), 
+                                        trainable=False)
+
+    def expand(self, Xu, expand_sensor_model=True):
+        """Sample points between each pair of inducing points to form the path
+
+        Args:
+            Xu (ndarray): (num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space
+            expand_sensor_model (bool): Only add the fixed inducing points without other sensor/path transforms, 
+                                        used for online IPP
+
+        Returns:
+            Xu (ndarray): Expansion transformed inducing points
+        """
+        # If using single-robot offline IPP with point sensing, return inducing points as is.
+        if self.sampling_rate == 2 and self.Xu_fixed is None and self.sensor_model is None:
+            return Xu
+
+        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
+
+        # If using online IPP, add visited waypoints that won't be optimized anymore
+        if self.Xu_fixed is not None:
+            Xu = tf.concat([self.Xu_fixed, Xu[:, self.num_fixed:]], axis=1)
+
+        if not expand_sensor_model:
+            return tf.reshape(Xu, (-1, self.num_dim))
+
+        # Interpolate additional inducing points between waypoints to approximate 
+        # the continuous data sensing model
+        if self.sampling_rate > 2:
+            Xu = tf.linspace(Xu[:, :-1], Xu[:, 1:], self.sampling_rate)
+            Xu = tf.transpose(Xu, perm=[1, 2, 0, 3])
+            Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
+
+        if self.sensor_model is not None:
+            Xu = self.sensor_model.expand(Xu)
+            return Xu
+
+        Xu = tf.reshape(Xu, (-1, self.num_dim))
+        return Xu
+
+    def aggregate(self, k):
+        """Applies the aggregation transform to kernel matrices. Checks `sensor_model` 
+           and uses the appropriate aggregation transform. 
+
+        Args:
+            k (tensor): (mp, mp)/(mp, n); Kernel matrix. 
+                        `m` is the number of inducing points,
+                        `p` is the number of points each inducing point is mapped,
+                        `n` is the number of training data points.
+
+        Returns:
+            k (tensor): (m, m)/(m, n); Aggregated kernel matrix
+        """
+        if self.sensor_model is not None:
+            return self.sensor_model.aggregate(k)
+        else:
+            return super().aggregate(k)
+
+    def constraints(self, Xu):
+        """Computes the distance constraint term that is added to the SGP's optimization function.
+        Each robot can be assigned a different distance budget.
+
+        Args:
+            Xu (ndarray): Inducing points from which to compute the distance constraints
+
+        Returns:
+            loss (float): distance constraint term
+        """
+        if self.distance_budget is None:
+            return 0.
+        else:
+            Xu = self.expand(Xu, expand_sensor_model=False)
+            dist = self.distance(Xu)-self.distance_budget
+            dist = tf.reduce_sum(tf.nn.relu(dist))
+            loss = -dist*self.constraint_weight
+            return loss
+
+    def distance(self, Xu):
+        """Computes the distance incured by sequentially visiting the inducing points
+        ToDo: Change distance from 2d to nd. Currently limited to 2d 
+            to ensure the rotation angle is not included when using
+            a square FoV sensor.
+
+        Args:
+            Xu (ndarray): Inducing points from which to compute the path lengths
+
+        Returns:
+            dist (float): path lengths
+        """
+        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
+        dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)
+        dist = tf.reduce_sum(dist, axis=1)
+        return dist
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ aggregate(k) + +

+ + +
+ +

Applies the aggregation transform to kernel matrices. Checks sensor_model + and uses the appropriate aggregation transform.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
k + tensor + +
+

(mp, mp)/(mp, n); Kernel matrix. + m is the number of inducing points, + p is the number of points each inducing point is mapped, + n is the number of training data points.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
k + tensor + +
+

(m, m)/(m, n); Aggregated kernel matrix

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
def aggregate(self, k):
+    """Applies the aggregation transform to kernel matrices. Checks `sensor_model` 
+       and uses the appropriate aggregation transform. 
+
+    Args:
+        k (tensor): (mp, mp)/(mp, n); Kernel matrix. 
+                    `m` is the number of inducing points,
+                    `p` is the number of points each inducing point is mapped,
+                    `n` is the number of training data points.
+
+    Returns:
+        k (tensor): (m, m)/(m, n); Aggregated kernel matrix
+    """
+    if self.sensor_model is not None:
+        return self.sensor_model.aggregate(k)
+    else:
+        return super().aggregate(k)
+
+
+
+ +
+ +
+ + +

+ constraints(Xu) + +

+ + +
+ +

Computes the distance constraint term that is added to the SGP's optimization function. +Each robot can be assigned a different distance budget.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

Inducing points from which to compute the distance constraints

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
loss + float + +
+

distance constraint term

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
def constraints(self, Xu):
+    """Computes the distance constraint term that is added to the SGP's optimization function.
+    Each robot can be assigned a different distance budget.
+
+    Args:
+        Xu (ndarray): Inducing points from which to compute the distance constraints
+
+    Returns:
+        loss (float): distance constraint term
+    """
+    if self.distance_budget is None:
+        return 0.
+    else:
+        Xu = self.expand(Xu, expand_sensor_model=False)
+        dist = self.distance(Xu)-self.distance_budget
+        dist = tf.reduce_sum(tf.nn.relu(dist))
+        loss = -dist*self.constraint_weight
+        return loss
+
+
+
+ +
+ +
+ + +

+ distance(Xu) + +

+ + +
+ +

Computes the distance incured by sequentially visiting the inducing points +ToDo: Change distance from 2d to nd. Currently limited to 2d + to ensure the rotation angle is not included when using + a square FoV sensor.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

Inducing points from which to compute the path lengths

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
dist + float + +
+

path lengths

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
def distance(self, Xu):
+    """Computes the distance incured by sequentially visiting the inducing points
+    ToDo: Change distance from 2d to nd. Currently limited to 2d 
+        to ensure the rotation angle is not included when using
+        a square FoV sensor.
+
+    Args:
+        Xu (ndarray): Inducing points from which to compute the path lengths
+
+    Returns:
+        dist (float): path lengths
+    """
+    Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
+    dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)
+    dist = tf.reduce_sum(dist, axis=1)
+    return dist
+
+
+
+ +
+ +
+ + +

+ expand(Xu, expand_sensor_model=True) + +

+ + +
+ +

Sample points between each pair of inducing points to form the path

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

(num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space

+
+
+ required +
expand_sensor_model + bool + +
+

Only add the fixed inducing points without other sensor/path transforms, + used for online IPP

+
+
+ True +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

Expansion transformed inducing points

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
+279
+280
+281
+282
+283
def expand(self, Xu, expand_sensor_model=True):
+    """Sample points between each pair of inducing points to form the path
+
+    Args:
+        Xu (ndarray): (num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space
+        expand_sensor_model (bool): Only add the fixed inducing points without other sensor/path transforms, 
+                                    used for online IPP
+
+    Returns:
+        Xu (ndarray): Expansion transformed inducing points
+    """
+    # If using single-robot offline IPP with point sensing, return inducing points as is.
+    if self.sampling_rate == 2 and self.Xu_fixed is None and self.sensor_model is None:
+        return Xu
+
+    Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
+
+    # If using online IPP, add visited waypoints that won't be optimized anymore
+    if self.Xu_fixed is not None:
+        Xu = tf.concat([self.Xu_fixed, Xu[:, self.num_fixed:]], axis=1)
+
+    if not expand_sensor_model:
+        return tf.reshape(Xu, (-1, self.num_dim))
+
+    # Interpolate additional inducing points between waypoints to approximate 
+    # the continuous data sensing model
+    if self.sampling_rate > 2:
+        Xu = tf.linspace(Xu[:, :-1], Xu[:, 1:], self.sampling_rate)
+        Xu = tf.transpose(Xu, perm=[1, 2, 0, 3])
+        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))
+
+    if self.sensor_model is not None:
+        Xu = self.sensor_model.expand(Xu)
+        return Xu
+
+    Xu = tf.reshape(Xu, (-1, self.num_dim))
+    return Xu
+
+
+
+ +
+ +
+ + +

+ update_Xu_fixed(Xu_fixed) + +

+ + +
+ +

Function to update the visited waypoints

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu_fixed + ndarray + +
+

numpy array (num_robots, num_visited_waypoints, num_dim)

+
+
+ required +
+ +
+ Source code in sgptools/models/core/transformations.py +
232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
def update_Xu_fixed(self, Xu_fixed):
+    """Function to update the visited waypoints
+
+    Args:
+        Xu_fixed (ndarray): numpy array (num_robots, num_visited_waypoints, num_dim)
+    """
+    self.num_fixed = Xu_fixed.shape[1]
+    if self.Xu_fixed is not None:
+        self.Xu_fixed.assign(Xu_fixed)
+    else:
+        # ToDo: Use binary mask of fixed size to avoid retracing
+        self.Xu_fixed = tf.Variable(Xu_fixed, 
+                                    shape=tf.TensorShape(None), 
+                                    trainable=False)
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ SquareHeightTransform + + +

+ + +
+

+ Bases: Transform

+ + +

Non-point Transform to model a height-dependent square FoV. Only works for single robot cases. +ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
num_points + int + +
+

Number of points along each side of the FoV

+
+
+ required +
distance_budget + float + +
+

Distance budget for the path

+
+
+ None +
+ +
+ Source code in sgptools/models/core/transformations.py +
340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
class SquareHeightTransform(Transform):
+    """Non-point Transform to model a height-dependent square FoV. Only works for single robot cases. 
+    ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform
+
+    Args:
+        num_points (int): Number of points along each side of the FoV
+        distance_budget (float): Distance budget for the path
+    """
+    def __init__(self, num_points, distance_budget=None, **kwargs):
+        super().__init__(**kwargs)
+        self.num_points = num_points
+        self.distance_budget = distance_budget
+
+        if self.aggregation_size == 0:
+            self.aggregation_size = None
+        elif self.aggregation_size is None:
+            self.aggregation_size = num_points**2
+
+    def expand(self, Xu):     
+        """
+        Applies the expansion transform to the inducing points
+
+        Args:
+            Xu (ndarray): (m, 3); Inducing points in the 3D position space.
+                        `m` is the number of inducing points,
+                        `3` is the dimension of the space (x, y, z)
+
+        Returns:
+            Xu (ndarray): (mp, 2); Inducing points in input space.
+                        `p` is the number of points each inducing point is mapped 
+                        to in order to form the FoV.
+        """
+        x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)
+        x = tf.squeeze(x)
+        y = tf.squeeze(y)
+        h = tf.squeeze(h)
+
+        delta = h / (self.num_points - 1)
+
+        pts = []
+        for i in range(self.num_points):
+            pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)], 
+                                   [x + h/2, y - (h/2) + (delta * i)], 
+                                   self.num_points, 
+                                   axis=1))
+        xy = tf.concat(pts, axis=1)
+        xy = tf.transpose(xy, [2, 1, 0])
+        xy = tf.reshape(xy, [-1, 2])
+        xy = self._reshape(xy, tf.shape(Xu)[0])
+        return xy
+
+    def _reshape(self, X, num_inducing):
+        """Reorder the inducing points to be in the correct order for aggregation with square height FoV
+
+        Args:
+            X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each 
+                        inducing point is mapped to in order to form the FoV.
+
+        Returns:
+            Xu (ndarray): (mp, 2); Reorder inducing points
+        """
+        X = tf.reshape(X, (num_inducing, -1, self.num_points, self.num_points, 2))
+        X = tf.transpose(X, (0, 2, 1, 3, 4))
+        X = tf.reshape(X, (-1, 2))
+        return X
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ expand(Xu) + +

+ + +
+ +

Applies the expansion transform to the inducing points

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

(m, 3); Inducing points in the 3D position space. + m is the number of inducing points, + 3 is the dimension of the space (x, y, z)

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(mp, 2); Inducing points in input space. + p is the number of points each inducing point is mapped + to in order to form the FoV.

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
def expand(self, Xu):     
+    """
+    Applies the expansion transform to the inducing points
+
+    Args:
+        Xu (ndarray): (m, 3); Inducing points in the 3D position space.
+                    `m` is the number of inducing points,
+                    `3` is the dimension of the space (x, y, z)
+
+    Returns:
+        Xu (ndarray): (mp, 2); Inducing points in input space.
+                    `p` is the number of points each inducing point is mapped 
+                    to in order to form the FoV.
+    """
+    x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)
+    x = tf.squeeze(x)
+    y = tf.squeeze(y)
+    h = tf.squeeze(h)
+
+    delta = h / (self.num_points - 1)
+
+    pts = []
+    for i in range(self.num_points):
+        pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)], 
+                               [x + h/2, y - (h/2) + (delta * i)], 
+                               self.num_points, 
+                               axis=1))
+    xy = tf.concat(pts, axis=1)
+    xy = tf.transpose(xy, [2, 1, 0])
+    xy = tf.reshape(xy, [-1, 2])
+    xy = self._reshape(xy, tf.shape(Xu)[0])
+    return xy
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ SquareTransform + + +

+ + +
+

+ Bases: Transform

+ + +

Non-point Transform to model a square FoV. Only works for single robot cases. +ToDo: update expand function to handle multi-robot case.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
length + float + +
+

Length of the square FoV

+
+
+ required +
num_side + int + +
+

Number of points along each side of the FoV

+
+
+ required +
+ +
+ Source code in sgptools/models/core/transformations.py +
114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
class SquareTransform(Transform):
+    """Non-point Transform to model a square FoV. Only works for single robot cases. 
+    ToDo: update expand function to handle multi-robot case.
+
+    Args:
+        length (float): Length of the square FoV
+        num_side (int): Number of points along each side of the FoV
+    """
+    def __init__(self, length, num_side, **kwargs):
+        super().__init__(**kwargs)
+        self.length = length
+        self.num_side = num_side
+        self.length_factor=length/(self.num_side)
+        self.num_length = int(length/self.length_factor)
+
+        if self.aggregation_size == 0:
+            self.aggregation_size = None
+        elif self.aggregation_size is None:
+            self.aggregation_size = num_side**2
+
+    def expand(self, Xu):
+        """Applies the expansion transformation to the inducing points
+
+        Args:
+            Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.
+                            `m` is the number of inducing points,
+                            `3` is the dimension of the space (x, y, angle in radians)
+
+        Returns:
+            Xu (ndarray): (mp, 2); Inducing points in input space.
+                        `p` is the number of points each inducing point is mapped 
+                         to in order to form the FoV.
+        """
+        x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)
+        x = tf.squeeze(x)
+        y = tf.squeeze(y)
+        theta = tf.squeeze(theta)
+
+        points = []
+        for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):
+            points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2), 
+                                       (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)], 
+                                      [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2), 
+                                       (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)], 
+                                      self.num_side, axis=1))
+        xy = tf.concat(points, axis=1)
+        xy = tf.transpose(xy, [2, 1, 0])
+        xy = tf.reshape(xy, [-1, 2])
+        xy = self._reshape(xy, tf.shape(Xu)[1])
+        return xy
+
+    def _reshape(self, X, num_inducing):
+        """Reorder the inducing points to be in the correct order for aggregation with square FoV.
+
+        Args:
+            X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each 
+                        inducing point is mapped to in order to form the FoV.
+
+        Returns:
+            Xu (ndarray): (mp, 2); Reorder inducing points
+        """
+        X = tf.reshape(X, (num_inducing, -1, self.num_side, self.num_side, 2))
+        X = tf.transpose(X, (0, 2, 1, 3, 4))
+        X = tf.reshape(X, (-1, 2))
+        return X
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ expand(Xu) + +

+ + +
+ +

Applies the expansion transformation to the inducing points

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

(1, m, 3); Inducing points in the position and orientation space. + m is the number of inducing points, + 3 is the dimension of the space (x, y, angle in radians)

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(mp, 2); Inducing points in input space. + p is the number of points each inducing point is mapped + to in order to form the FoV.

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
def expand(self, Xu):
+    """Applies the expansion transformation to the inducing points
+
+    Args:
+        Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.
+                        `m` is the number of inducing points,
+                        `3` is the dimension of the space (x, y, angle in radians)
+
+    Returns:
+        Xu (ndarray): (mp, 2); Inducing points in input space.
+                    `p` is the number of points each inducing point is mapped 
+                     to in order to form the FoV.
+    """
+    x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)
+    x = tf.squeeze(x)
+    y = tf.squeeze(y)
+    theta = tf.squeeze(theta)
+
+    points = []
+    for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):
+        points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2), 
+                                   (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)], 
+                                  [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2), 
+                                   (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)], 
+                                  self.num_side, axis=1))
+    xy = tf.concat(points, axis=1)
+    xy = tf.transpose(xy, [2, 1, 0])
+    xy = tf.reshape(xy, [-1, 2])
+    xy = self._reshape(xy, tf.shape(Xu)[1])
+    return xy
+
+
+
+ +
+ + + +
+ +
+ +
+ +
+ + + +

+ Transform + + +

+ + +
+ + +

Base class for transformations of the inducing points, including expansion and aggregation transforms.

+ + +
+ Refer to the following papers for more details +
    +
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • +
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
aggregation_size + int + +
+

Number of consecutive inducing points to aggregate

+
+
+ None +
constraint_weight + float + +
+

Weight term that controls the importance of the + constraint terms in the SGP's optimization objective

+
+
+ 1.0 +
+ +
+ Source code in sgptools/models/core/transformations.py +
 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
class Transform:
+    """Base class for transformations of the inducing points, including expansion and aggregation transforms.
+
+    Refer to the following papers for more details:
+        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
+        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]
+
+    Args:
+        aggregation_size (int): Number of consecutive inducing points to aggregate
+        constraint_weight (float): Weight term that controls the importance of the 
+                                   constraint terms in the SGP's optimization objective 
+    """
+    def __init__(self, 
+                 aggregation_size=None, 
+                 constraint_weight=1.0,
+                 **kwargs):
+        self.aggregation_size = aggregation_size
+        self.constraint_weight = constraint_weight
+
+    def expand(self, Xu):
+        """Applies the expansion transform to the inducing points
+
+        Args:
+            Xu (ndarray): Expansion transformed inducing points
+        """
+        return Xu
+
+    def aggregate(self, k):
+        """Applies the aggregation transform to kernel matrices
+
+        Args:
+            k (tensor): (mp, mp)/(mp, n); Kernel matrix. 
+                        `m` is the number of inducing points,
+                        `p` is the number of points each inducing point is mapped,
+                        `n` is the number of training data points.
+
+        Returns:
+            k (tensor): (m, m)/(m, n); Aggregated kernel matrix
+        """
+        if self.aggregation_size is None:
+            return k
+
+        if k.shape[0] == k.shape[1]:
+            # Handle Kuu which is a square matrix
+            k = tf.expand_dims(tf.expand_dims(k, axis=0), axis=-1)
+            k = tf.nn.avg_pool(k,
+                               ksize=[1, self.aggregation_size, self.aggregation_size, 1],
+                               strides=[1, self.aggregation_size, self.aggregation_size, 1],
+                               padding='VALID')
+            k = tf.squeeze(k, axis=[0, -1])
+        else:
+            # Handle Kuf which is a rectangular matrix
+            k = tf.expand_dims(k, axis=0)
+            k = tf.nn.avg_pool(k,
+                               ksize=[1, self.aggregation_size, 1],
+                               strides=[1, self.aggregation_size, 1],
+                               padding='VALID')
+            k = tf.squeeze(k, axis=[0])
+        return k
+
+    def constraints(self, Xu):
+        """Computes the constraint terms that are added to the SGP's optimization function
+
+        Args:
+            Xu (ndarray): Inducing points from which to compute the constraints
+
+        Returns:
+            c (float): constraint terms (eg., distance constraint)
+        """
+        return 0.
+
+    def distance(self, Xu):
+        """Computes the distance incured by sequentially visiting the inducing points
+
+        Args:
+            Xu (ndarray): Inducing points from which to compute the path length
+
+        Returns:
+            dist (float): path length
+        """
+        dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))
+        return dist
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ aggregate(k) + +

+ + +
+ +

Applies the aggregation transform to kernel matrices

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
k + tensor + +
+

(mp, mp)/(mp, n); Kernel matrix. + m is the number of inducing points, + p is the number of points each inducing point is mapped, + n is the number of training data points.

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
k + tensor + +
+

(m, m)/(m, n); Aggregated kernel matrix

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
def aggregate(self, k):
+    """Applies the aggregation transform to kernel matrices
+
+    Args:
+        k (tensor): (mp, mp)/(mp, n); Kernel matrix. 
+                    `m` is the number of inducing points,
+                    `p` is the number of points each inducing point is mapped,
+                    `n` is the number of training data points.
+
+    Returns:
+        k (tensor): (m, m)/(m, n); Aggregated kernel matrix
+    """
+    if self.aggregation_size is None:
+        return k
+
+    if k.shape[0] == k.shape[1]:
+        # Handle Kuu which is a square matrix
+        k = tf.expand_dims(tf.expand_dims(k, axis=0), axis=-1)
+        k = tf.nn.avg_pool(k,
+                           ksize=[1, self.aggregation_size, self.aggregation_size, 1],
+                           strides=[1, self.aggregation_size, self.aggregation_size, 1],
+                           padding='VALID')
+        k = tf.squeeze(k, axis=[0, -1])
+    else:
+        # Handle Kuf which is a rectangular matrix
+        k = tf.expand_dims(k, axis=0)
+        k = tf.nn.avg_pool(k,
+                           ksize=[1, self.aggregation_size, 1],
+                           strides=[1, self.aggregation_size, 1],
+                           padding='VALID')
+        k = tf.squeeze(k, axis=[0])
+    return k
+
+
+
+ +
+ +
+ + +

+ constraints(Xu) + +

+ + +
+ +

Computes the constraint terms that are added to the SGP's optimization function

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

Inducing points from which to compute the constraints

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
c + float + +
+

constraint terms (eg., distance constraint)

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
90
+91
+92
+93
+94
+95
+96
+97
+98
+99
def constraints(self, Xu):
+    """Computes the constraint terms that are added to the SGP's optimization function
+
+    Args:
+        Xu (ndarray): Inducing points from which to compute the constraints
+
+    Returns:
+        c (float): constraint terms (eg., distance constraint)
+    """
+    return 0.
+
+
+
+ +
+ +
+ + +

+ distance(Xu) + +

+ + +
+ +

Computes the distance incured by sequentially visiting the inducing points

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

Inducing points from which to compute the path length

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
dist + float + +
+

path length

+
+
+ +
+ Source code in sgptools/models/core/transformations.py +
101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
def distance(self, Xu):
+    """Computes the distance incured by sequentially visiting the inducing points
+
+    Args:
+        Xu (ndarray): Inducing points from which to compute the path length
+
+    Returns:
+        dist (float): path length
+    """
+    dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))
+    return dist
+
+
+
+ +
+ +
+ + +

+ expand(Xu) + +

+ + +
+ +

Applies the expansion transform to the inducing points

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

Expansion transformed inducing points

+
+
+ required +
+ +
+ Source code in sgptools/models/core/transformations.py +
49
+50
+51
+52
+53
+54
+55
def expand(self, Xu):
+    """Applies the expansion transform to the inducing points
+
+    Args:
+        Xu (ndarray): Expansion transformed inducing points
+    """
+    return Xu
+
+
+
+ +
+ + + +
+ +
+ +
+ + + + +
+ +
+ +

________________________

+ + +
+ + + + +
+ +

General utilities to support the functionalities of this package:

+
    +
  • data: Provides utilities to preprocess datasets
  • +
  • gpflow: Provides utilities to interface with GPflow
  • +
  • metrics: Provides utilities to quantify the solution quality
  • +
  • misc: Provides miscellaneous helper functions
  • +
  • tsp: Provides utilities to run TSP/VRP solver
  • +
+ + + +
+ + + + + + + + + + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ resample_path(waypoints, num_inducing=10) + +

+ + +
+ +

Function to map path with arbitrary number of waypoints to +inducing points path with fixed number of waypoints

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
waypoints + ndarray + +
+

(num_waypoints, n_dim); waypoints of path from vrp solver

+
+
+ required +
num_inducing + int + +
+

Number of inducing points (waypoints) in the returned path

+
+
+ 10 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
points + ndarray + +
+

(num_inducing, n_dim); Resampled path

+
+
+ +
+ Source code in sgptools/utils/tsp.py +
161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
def resample_path(waypoints, num_inducing=10):
+    """Function to map path with arbitrary number of waypoints to 
+    inducing points path with fixed number of waypoints
+
+    Args:
+        waypoints (ndarray): (num_waypoints, n_dim); waypoints of path from vrp solver
+        num_inducing (int): Number of inducing points (waypoints) in the returned path
+
+    Returns:
+        points (ndarray): (num_inducing, n_dim); Resampled path
+    """
+    line = LineString(waypoints)
+    distances = np.linspace(0, line.length, num_inducing)
+    points = [line.interpolate(distance) for distance in distances]
+    points = np.array([[p.x, p.y] for p in points])
+    return points
+
+
+
+ +
+ +
+ + +

+ run_tsp(nodes, num_vehicles=1, max_dist=25, depth=1, resample=None, start_idx=None, end_idx=None) + +

+ + +
+ +

Method to run TSP/VRP with arbitrary start and end nodes, +and without any distance constraint

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
nodes + ndarray + +
+

(# nodes, n_dim); Nodes to visit

+
+
+ required +
num_vehicles + int + +
+

Number of robots/vehicles

+
+
+ 1 +
max_dist + float + +
+

Maximum distance allowed for each path when handling mutli-robot case

+
+
+ 25 +
depth + int + +
+

Internal parameter used to track re-try recursion depth

+
+
+ 1 +
resample + int + +
+

Each solution path will be resampled to have + resample number of points

+
+
+ None +
start_idx + list + +
+

Optionl list of start node indices from which to start the solution path

+
+
+ None +
end_idx + list + +
+

Optionl list of end node indices from which to start the solution path

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
paths + ndarray + +
+

Solution paths

+
+
distances + list + +
+

List of path lengths

+
+
+ +
+ Source code in sgptools/utils/tsp.py +
 22
+ 23
+ 24
+ 25
+ 26
+ 27
+ 28
+ 29
+ 30
+ 31
+ 32
+ 33
+ 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
def run_tsp(nodes, 
+            num_vehicles=1, 
+            max_dist=25, 
+            depth=1, 
+            resample=None, 
+            start_idx=None,
+            end_idx=None):
+    """Method to run TSP/VRP with arbitrary start and end nodes, 
+    and without any distance constraint
+
+    Args:
+        nodes (ndarray): (# nodes, n_dim); Nodes to visit 
+        num_vehicles (int): Number of robots/vehicles
+        max_dist (float): Maximum distance allowed for each path when handling mutli-robot case
+        depth (int): Internal parameter used to track re-try recursion depth
+        resample (int): Each solution path will be resampled to have
+                        `resample` number of points
+        start_idx (list): Optionl list of start node indices from which to start the solution path 
+        end_idx (list): Optionl list of end node indices from which to start the solution path 
+
+    Returns:
+        paths (ndarray): Solution paths
+        distances (list): List of path lengths
+    """
+    if depth > 5:
+        print('Warning: Max depth reached')
+        return None, None
+
+    # Add dummy 0 location to get arbitrary start and end node sols
+    if start_idx is None or end_idx is None:
+        distance_mat = np.zeros((len(nodes)+1, len(nodes)+1))
+        distance_mat[1:, 1:] = pairwise_distances(nodes, nodes)*1e4
+        trim_paths = True
+    else:
+        distance_mat = pairwise_distances(nodes, nodes)*1e4
+        trim_paths = False
+    distance_mat = distance_mat.astype(int)
+    max_dist = int(max_dist*1e4)
+
+    if start_idx is None:
+        start_idx = [0]*num_vehicles
+    elif trim_paths:
+        start_idx = [i+1 for i in start_idx]
+
+    if end_idx is None:
+        end_idx = [0]*num_vehicles
+    elif trim_paths:
+        end_idx = [i+1 for i in end_idx]
+
+    def distance_callback(from_index, to_index):
+        from_node = manager.IndexToNode(from_index)
+        to_node = manager.IndexToNode(to_index)
+        return distance_mat[from_node][to_node]
+
+    # num_locations, num vehicles, start, end
+    manager = pywrapcp.RoutingIndexManager(len(distance_mat), 
+                                           num_vehicles, 
+                                           start_idx,
+                                           end_idx)
+    routing = pywrapcp.RoutingModel(manager)
+    transit_callback_index = routing.RegisterTransitCallback(distance_callback)
+    routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
+
+    if num_vehicles > 1:
+        # Dummy distaance constraint to ensure all paths have similar length
+        dimension_name = "Distance"
+        routing.AddDimension(
+            transit_callback_index,
+            0,  # no slack
+            max_dist,  # vehicle maximum travel distance
+            True,  # start cumul to zero
+            dimension_name,
+        )
+        distance_dimension = routing.GetDimensionOrDie(dimension_name)
+        distance_dimension.SetGlobalSpanCostCoefficient(100)
+
+    search_parameters = pywrapcp.DefaultRoutingSearchParameters()
+    search_parameters.first_solution_strategy = (
+        routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
+    )
+    search_parameters.local_search_metaheuristic = (
+        routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
+    )
+    search_parameters.time_limit.seconds = 10
+    solution = routing.SolveWithParameters(search_parameters)
+
+    paths = None
+    if solution is not None:
+        paths, distances = get_routes(manager, routing, 
+                                      solution, num_vehicles, 
+                                      start_idx, end_idx, trim_paths)
+        for path in paths:
+            if len(path) < 2:
+                print('TSP Warning: Empty path detected')
+                return run_tsp(nodes, num_vehicles, int(np.mean(distances)*(1.5/depth)), depth+1)
+    else:
+        print('TSP Warning: No solution found')
+        return run_tsp(nodes, num_vehicles, int(max_dist*1.5), depth+1)
+
+    # Map paths from node indices to node locations
+    paths = [nodes[path] for path in paths]
+
+    # Resample each solution path to have resample number of points
+    if resample is not None:
+        paths = np.array([resample_path(path, resample) for path in paths])
+
+    # Convert distances back to floats in the original scale of the nodes
+    distances = np.array(distances)/1e4
+    return paths, distances
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ cont2disc(Xu, candidates, candidate_labels=None) + +

+ + +
+ +

Map continuous space locations to a discrete set of candidate location

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

(m, 2); Continuous space points

+
+
+ required +
candidates + ndarray + +
+

(n, 2); Discrete set of candidate locations

+
+
+ required +
candidate_labels + ndarray + +
+

(n, 1); Labels corresponding to the discrete set of candidate locations

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
Xu_x + ndarray + +
+

Discrete space points' locations

+
+
Xu_y + ndarray + +
+

Labels of the discrete space points. Returned only if candidate_labels + was passed to the function

+
+
+ +
+ Source code in sgptools/utils/misc.py +
38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
def cont2disc(Xu, candidates, candidate_labels=None):
+    """Map continuous space locations to a discrete set of candidate location
+
+    Args:
+        Xu (ndarray): (m, 2); Continuous space points
+        candidates (ndarray): (n, 2); Discrete set of candidate locations
+        candidate_labels (ndarray): (n, 1); Labels corresponding to the discrete set of candidate locations
+
+    Returns:
+        Xu_x (ndarray): Discrete space points' locations 
+        Xu_y (ndarray): Labels of the discrete space points. Returned only if `candidate_labels`
+                        was passed to the function
+
+    """
+    # Sanity check to ensure that there are candidates to match
+    if len(candidates)==0:
+        return []
+    dists = pairwise_distances(candidates, Y=Xu, metric='euclidean')
+    row_ind, _ = linear_sum_assignment(dists)
+    Xu_X = candidates[row_ind].copy()
+    if candidate_labels is not None:
+        Xu_y = candidate_labels[row_ind].copy()
+        return Xu_X, Xu_y
+    else:
+        return Xu_X
+
+
+
+ +
+ +
+ + +

+ get_inducing_pts(data, num_inducing, orientation=False, random=False) + +

+ + +
+ +

Selects a subset of the data points to be used as inducing points. +The default approach uses kmeans to select the subset.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
data + ndarray + +
+

(n, 2); Data points to select the inducing points from

+
+
+ required +
num_inducing + int + +
+

Number of inducing points

+
+
+ required +
orientation + bool + +
+

If True, add an additional dimension to model the sensor + FoV rotation angle

+
+
+ False +
random + bool + +
+

If True, the subset of inducing points are selected randomly + instead of using kmeans

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
Xu + ndarray + +
+

(m, d); Inducing points in the position and orientation space. + m is the number of inducing points, + d is the dimension of the space (x, y, optional - angle in radians)

+
+
+ +
+ Source code in sgptools/utils/misc.py +
11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
def get_inducing_pts(data, num_inducing, orientation=False, random=False):
+    """Selects a subset of the data points to be used as inducing points. 
+    The default approach uses kmeans to select the subset. 
+
+    Args:
+        data (ndarray): (n, 2); Data points to select the inducing points from 
+        num_inducing (int): Number of inducing points
+        orientation (bool): If True, add an additional dimension to model the sensor 
+                            FoV rotation angle
+        random (bool): If True, the subset of inducing points are selected randomly 
+                       instead of using kmeans
+
+    Returns:
+        Xu (ndarray): (m, d); Inducing points in the position and orientation space.
+                        `m` is the number of inducing points, 
+                        `d` is the dimension of the space (x, y, optional - angle in radians)
+    """
+    if random:
+        idx = np.random.randint(len(data), size=num_inducing)
+        Xu = data[idx]
+    else:
+        Xu = kmeans2(data, num_inducing, minit="points")[0]
+    if orientation:
+        thetas = np.random.uniform(0, 2 * np.pi, size=(Xu.shape[0], 1))
+        Xu = np.concatenate([Xu, thetas], axis=1)
+    return Xu
+
+
+
+ +
+ +
+ + +

+ interpolate_path(waypoints, sampling_rate=0.05) + +

+ + +
+ +

Interpolate additional points between the given waypoints to simulate continuous sensing robots

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
waypoints + (n, d) + +
+

Waypoints of the robot's path

+
+
+ required +
sampling_rate + float + +
+

Distance between each pair of interpolated points

+
+
+ 0.05 +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
path + ndarray + +
+

(p, d) Interpolated path, p depends on the sampling_rate rate

+
+
+ +
+ Source code in sgptools/utils/misc.py +
 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
def interpolate_path(waypoints, sampling_rate=0.05):
+    """Interpolate additional points between the given waypoints to simulate continuous sensing robots
+
+    Args:
+        waypoints (n, d): Waypoints of the robot's path
+        sampling_rate (float): Distance between each pair of interpolated points
+
+    Returns:
+        path (ndarray): (p, d) Interpolated path, `p` depends on the sampling_rate rate
+    """
+    interpolated_path = []
+    for i in range(2, len(waypoints)+1):
+        dist = get_distance(waypoints[i-2:i])
+        num_samples = int(dist / sampling_rate)
+        points = np.linspace(waypoints[i-1], waypoints[i-2], num_samples)
+        interpolated_path.extend(points)
+    return np.array(interpolated_path)
+
+
+
+ +
+ +
+ + +

+ plot_paths(paths, candidates=None, title=None) + +

+ + +
+ +

Function to plot the IPP solution paths

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
paths + ndarray + +
+

(r, m, 2); r paths with m waypoints each

+
+
+ required +
candidates + ndarray + +
+

(n, 2); Candidate unlabeled locations used in the SGP-based sensor placement approach

+
+
+ None +
title + str + +
+

Title of the plot

+
+
+ None +
+ +
+ Source code in sgptools/utils/misc.py +
64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
def plot_paths(paths, candidates=None, title=None):
+    """Function to plot the IPP solution paths
+
+    Args:
+        paths (ndarray): (r, m, 2); `r` paths with `m` waypoints each
+        candidates (ndarray): (n, 2); Candidate unlabeled locations used in the SGP-based sensor placement approach
+        title (str): Title of the plot
+    """
+    plt.figure()
+    for i, path in enumerate(paths):
+        plt.plot(path[:, 0], path[:, 1], 
+                    c='r', label='Path', zorder=1, marker='o')
+        plt.scatter(path[0, 0], path[0, 1], 
+                    c='g', label='Start', zorder=2, marker='o')
+        if candidates is not None:
+            plt.scatter(candidates[:, 0], candidates[:, 1], 
+                        c='k', s=1, label='Unlabeled Train-Set Points', zorder=0)
+        if i==0:
+            plt.legend(bbox_to_anchor=(1.0, 1.02))
+    if title is not None:
+        plt.title(title)
+    plt.xlabel('X')
+    plt.ylabel('Y')
+
+
+
+ +
+ +
+ + +

+ project_waypoints(waypoints, candidates) + +

+ + +
+ +

Project the waypoints back to the candidate set while retaining the +waypoint visitation order.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
waypoints + (n, d) + +
+

Waypoints of the robot's path

+
+
+ required +
candidates + ndarray + +
+

(n, 2); Discrete set of candidate locations

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
waypoints + (n, d) + +
+

Projected waypoints of the robot's path

+
+
+ +
+ Source code in sgptools/utils/misc.py +
122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
def project_waypoints(waypoints, candidates):
+    """Project the waypoints back to the candidate set while retaining the 
+    waypoint visitation order.
+
+    Args:
+        waypoints (n, d): Waypoints of the robot's path
+        candidates (ndarray): (n, 2); Discrete set of candidate locations
+
+    Returns:
+        waypoints (n, d): Projected waypoints of the robot's path
+    """
+    waypoints_disc = cont2disc(waypoints, candidates)
+    waypoints_valid = _reoder_path(waypoints, waypoints_disc)
+    return waypoints_valid
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ gaussian_entropy(K) + +

+ + +
+ +

Computes GP-based entropy from a kernel matrix

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
K + ndarray + +
+

(n, n); kernel matrix

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
entropy + float + +
+

Entropy computed from the kernel matrix

+
+
+ +
+ Source code in sgptools/utils/metrics.py +
21
+22
+23
+24
+25
+26
+27
+28
+29
+30
def gaussian_entropy(K):
+    """Computes GP-based entropy from a kernel matrix
+
+    Args:
+        K (ndarray): (n, n); kernel matrix
+
+    Returns:
+        entropy (float): Entropy computed from the kernel matrix
+    """
+    return multivariate_normal(mean=None, cov=K, allow_singular=True).entropy()
+
+
+
+ +
+ +
+ + +

+ get_distance(X) + +

+ + +
+ +

Compute the length of a path (L2-norm)

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X + ndarray + +
+

(m, d); Waypoints of a path

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
dist + float + +
+

Total path length

+
+
+ +
+ Source code in sgptools/utils/metrics.py +
159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
def get_distance(X):
+    """Compute the length of a path (L2-norm)
+
+    Args:
+        X (ndarray): (m, d); Waypoints of a path
+
+    Returns:
+        dist (float): Total path length
+    """
+    dist = np.linalg.norm(X[1:] - X[:-1], axis=-1)
+    dist = np.sum(dist)
+    return dist
+
+
+
+ +
+ +
+ + +

+ get_elbo(Xu, X_env, noise_variance, kernel, baseline=False) + +

+ + +
+ +

Computes the ELBO of the SGP, corrected to be positive

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

(m, d); Sensing locations

+
+
+ required +
X_env + ndarray + +
+

(n, d); Data points used to approximate the bounds of the environment

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
baseline + bool + +
+

If True, the ELBO is adjusted to be positive

+
+
+ False +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
elbo + float + +
+

ELBO of the SGP

+
+
+ +
+ Source code in sgptools/utils/metrics.py +
60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
def get_elbo(Xu, X_env, noise_variance, kernel, baseline=False):
+    """Computes the ELBO of the SGP, corrected to be positive
+
+    Args:
+        Xu (ndarray): (m, d); Sensing locations
+        X_env (ndarray): (n, d); Data points used to approximate the bounds of the environment
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+        baseline (bool): If True, the ELBO is adjusted to be positive
+
+    Returns:
+        elbo (float): ELBO of the SGP
+    """
+    if baseline:
+        sgpr = gpflow.models.SGPR(X_env,
+                                  noise_variance=noise_variance,
+                                  kernel=kernel,
+                                  inducing_variable=[[0, 0]])
+        baseline = sgpr.elbo().numpy()
+    else:
+        baseline = 0.0
+
+    sgpr = gpflow.models.SGPR(X_env,
+                              noise_variance=noise_variance,
+                              kernel=kernel, 
+                              inducing_variable=Xu)
+    return (sgpr.elbo() - baseline).numpy()
+
+
+
+ +
+ +
+ + +

+ get_kl(Xu, X_env, noise_variance, kernel) + +

+ + +
+ +

Computes the KL divergence between the SGP and the GP

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

(m, d); Sensing locations

+
+
+ required +
X_env + ndarray + +
+

(n, d); Data points used to approximate the bounds of the environment

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
kl + float + +
+

KL divergence between the SGP and the GP

+
+
+ +
+ Source code in sgptools/utils/metrics.py +
 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
def get_kl(Xu, X_env, noise_variance, kernel):
+    """Computes the KL divergence between the SGP and the GP
+
+    Args:
+        Xu (ndarray): (m, d); Sensing locations
+        X_env (ndarray): (n, d); Data points used to approximate the bounds of the environment
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+
+    Returns:
+        kl (float): KL divergence between the SGP and the GP
+    """
+    sgpr = gpflow.models.SGPR(X_env,
+                              noise_variance=noise_variance,
+                              kernel=kernel,
+                              inducing_variable=Xu)
+
+    common = sgpr._common_calculation()
+    sigma_sq = common.sigma_sq
+    AAT = common.AAT
+
+    x, _ = sgpr.data
+    kdiag = sgpr.kernel(x, full_cov=False)
+
+    # tr(K) / σ²
+    trace_k = tf.reduce_sum(kdiag / sigma_sq)
+    # tr(Q) / σ²
+    trace_q = tf.reduce_sum(tf.linalg.diag_part(AAT))
+    # tr(K - Q) / σ²
+    trace = trace_k - trace_q
+    trace = 0.5 * trace
+
+    return float(trace.numpy())
+
+
+
+ +
+ +
+ + +

+ get_mi(Xu, candidate_locs, noise_variance, kernel) + +

+ + +
+ +

Computes mutual information between the sensing locations and the candidate locations

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + ndarray + +
+

(m, d); Sensing locations

+
+
+ required +
candidate_locs + ndarray + +
+

(n, d); Candidate sensing locations

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
mi + float + +
+

Mutual information computed using a GP

+
+
+ +
+ Source code in sgptools/utils/metrics.py +
32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
def get_mi(Xu, candidate_locs, noise_variance, kernel):
+    """Computes mutual information between the sensing locations and the candidate locations
+
+    Args:
+        Xu (ndarray): (m, d); Sensing locations
+        candidate_locs (ndarray): (n, d); Candidate sensing locations 
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+
+    Returns:
+        mi (float): Mutual information computed using a GP
+    """
+    Xu = np.array(Xu)
+    candidate_locs = np.array(candidate_locs)
+
+    gp = gpflow.models.GPR(data=(Xu, np.zeros((len(Xu), 1))),
+                           kernel=kernel,
+                           noise_variance=noise_variance)
+    _, sigma_a = gp.predict_f(candidate_locs, full_cov=True)
+    sigma_a = sigma_a.numpy()[0]
+    cond_entropy = gaussian_entropy(sigma_a)
+
+    K = kernel(candidate_locs, full_cov=True).numpy()
+    K += noise_variance * np.eye(len(candidate_locs))
+    entropy = gaussian_entropy(K)
+
+    return float(entropy - cond_entropy)
+
+
+
+ +
+ +
+ + +

+ get_reconstruction(Xu, X_test, noise_variance, kernel) + +

+ + +
+ +

Computes the GP-based data field estimates with the solution placements as the training set

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
Xu + tuple + +
+

(ndarray (m, d); ndarray (m, 1)); Sensing locations' input + and corresponding ground truth labels

+
+
+ required +
X_test + ndarray + +
+

(n, d); Testing data input locations

+
+
+ required +
noise_variance + float + +
+

data variance

+
+
+ required +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
y_pred + ndarray + +
+

(n, 1); Predicted data field estimates

+
+
y_var + ndarray + +
+

(n, 1); Prediction variance at each location in the data field

+
+
+ +
+ Source code in sgptools/utils/metrics.py +
134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
def get_reconstruction(Xu, X_test, noise_variance, kernel):
+    """Computes the GP-based data field estimates with the solution placements as the training set
+
+    Args:
+        Xu (tuple): (ndarray (m, d); ndarray (m, 1)); Sensing locations' input 
+                    and corresponding ground truth labels
+        X_test (ndarray): (n, d); Testing data input locations
+        noise_variance (float): data variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+
+    Returns:
+        y_pred (ndarray): (n, 1); Predicted data field estimates
+        y_var (ndarray): (n, 1); Prediction variance at each location in the data field
+    """
+    Xu_X, Xu_y = Xu
+
+    # Get the GP predictions
+    gpr = gpflow.models.GPR((Xu_X, Xu_y),
+                            noise_variance=noise_variance,
+                            kernel=kernel)
+    y_pred, y_var = gpr.predict_f(X_test)
+    y_pred = y_pred.numpy().reshape(-1, 1)
+
+    return y_pred, y_var
+
+
+
+ +
+ +
+ + +

+ get_rmse(y_pred, y_test) + +

+ + +
+ +

Computes the root-mean-square error between y_pred and y_test

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
y_pred + ndarray + +
+

(n, 1); Predicted data field estimate

+
+
+ required +
y_test + ndarray + +
+

(n, 1); Ground truth data field

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
rmse + float + +
+

Computed RMSE

+
+
+ +
+ Source code in sgptools/utils/metrics.py +
122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
def get_rmse(y_pred, y_test):
+    """Computes the root-mean-square error between `y_pred` and `y_test`
+
+    Args:
+        y_pred (ndarray): (n, 1); Predicted data field estimate
+        y_test (ndarray): (n, 1); Ground truth data field 
+
+    Returns:
+        rmse (float): Computed RMSE
+    """
+    return np.sqrt(np.mean(np.square(y_pred - y_test)))
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ get_model_params(X_train, y_train, max_steps=1500, lr=0.01, print_params=True, lengthscales=1.0, variance=1.0, noise_variance=0.1, kernel=None, **kwargs) + +

+ + +
+ +

Train a GP on the given training set

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X_train + ndarray + +
+

(n, d); Training set inputs

+
+
+ required +
y_train + ndarray + +
+

(n, 1); Training set labels

+
+
+ required +
max_steps + int + +
+

Maximum number of optimization steps

+
+
+ 1500 +
lr + float + +
+

Optimization learning rate

+
+
+ 0.01 +
print_params + bool + +
+

If True, prints the optimized GP parameters

+
+
+ True +
lengthscales + float or list + +
+

Kernel lengthscale(s), if passed as a list, + each element corresponds to each data dimension

+
+
+ 1.0 +
variance + float + +
+

Kernel variance

+
+
+ 1.0 +
noise_variance + float + +
+

Data noise variance

+
+
+ 0.1 +
kernel + Kernel + +
+

gpflow kernel function

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + + + + + +
Name TypeDescription
loss + list + +
+

Loss values obtained during training

+
+
variance + float + +
+

Optimized data noise variance

+
+
kernel + Kernel + +
+

Optimized gpflow kernel function

+
+
+ +
+ Source code in sgptools/utils/gpflow.py +
44
+45
+46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
+63
+64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
+78
+79
+80
+81
+82
+83
+84
+85
+86
+87
+88
def get_model_params(X_train, y_train, 
+                     max_steps=1500, 
+                     lr=1e-2, 
+                     print_params=True, 
+                     lengthscales=1.0, 
+                     variance=1.0, 
+                     noise_variance=0.1,
+                     kernel=None,
+                     **kwargs):
+    """Train a GP on the given training set
+
+    Args:
+        X_train (ndarray): (n, d); Training set inputs
+        y_train (ndarray): (n, 1); Training set labels
+        max_steps (int): Maximum number of optimization steps
+        lr (float): Optimization learning rate
+        print_params (bool): If True, prints the optimized GP parameters
+        lengthscales (float or list): Kernel lengthscale(s), if passed as a list, 
+                                each element corresponds to each data dimension
+        variance (float): Kernel variance
+        noise_variance (float): Data noise variance
+        kernel (gpflow.kernels.Kernel): gpflow kernel function
+
+    Returns:
+        loss (list): Loss values obtained during training
+        variance (float): Optimized data noise variance
+        kernel (gpflow.kernels.Kernel): Optimized gpflow kernel function
+    """
+    if kernel is None:
+        kernel = gpflow.kernels.SquaredExponential(lengthscales=lengthscales, 
+                                                   variance=variance)
+
+    gpr_gt = gpflow.models.GPR(data=(X_train, y_train), 
+                               kernel=kernel,
+                               noise_variance=noise_variance)
+
+    if max_steps > 0:
+        loss = optimize_model(gpr_gt, max_steps=max_steps, lr=lr, **kwargs)
+    else:
+        loss = 0
+
+    if print_params:
+        print_summary(gpr_gt)
+
+    return loss, gpr_gt.likelihood.variance, kernel
+
+
+
+ +
+ +
+ + +

+ optimize_model(model, max_steps=2000, kernel_grad=True, lr=0.01, optimizer='tf', method=None, verbose=False, trace_fn=None, convergence_criterion=True, trainable_variables=None, tol=None) + +

+ + +
+ +

Trains a GP/SGP model

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
model + models + +
+

GPflow GP/SGP model to train

+
+
+ required +
max_steps + int + +
+

Maximum number of training steps

+
+
+ 2000 +
kernel_grad + bool + +
+

If False, the kernel parameters will not be optimized

+
+
+ True +
lr + float + +
+

Optimization learning rate

+
+
+ 0.01 +
optimizer + str + +
+

Optimizer to use for training (scipy or tf)

+
+
+ 'tf' +
method + str + +
+

Optimization method refer to scipy minimize and tf optimizers for full list

+
+
+ None +
verbose + bool + +
+

If true, the training progress will be printed

+
+
+ False +
trace_fn + str + +
+

Function to trace metrics during training. + If None, the loss values are traced; + if traceXu, it the inducing points states at each optimization step are traced

+
+
+ None +
convergence_criterion + bool + +
+

It True, enables early stopping when the loss plateaus

+
+
+ True +
trainable_variables + list + +
+

List of model variables to train + (can be used to limit training to a subset of variables)

+
+
+ None +
tol + float + +
+

Convergence tolerance to decide when to stop optimization

+
+
+ None +
+ +
+ Source code in sgptools/utils/gpflow.py +
 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
def optimize_model(model, 
+                   max_steps=2000, 
+                   kernel_grad=True, 
+                   lr=1e-2, 
+                   optimizer='tf', 
+                   method=None,
+                   verbose=False, 
+                   trace_fn=None,
+                   convergence_criterion=True,
+                   trainable_variables=None,
+                   tol=None):
+    """
+    Trains a GP/SGP model
+
+    Args:
+        model (gpflow.models): GPflow GP/SGP model to train
+        max_steps (int): Maximum number of training steps
+        kernel_grad (bool): If False, the kernel parameters will not be optimized
+        lr (float): Optimization learning rate
+        optimizer (str): Optimizer to use for training (`scipy` or `tf`)
+        method (str): Optimization method refer to scipy minimize and tf optimizers for full list
+        verbose (bool): If true, the training progress will be printed
+        trace_fn (str): Function to trace metrics during training. 
+                        If `None`, the loss values are traced;
+                        if `traceXu`, it the inducing points states at each optimization step are traced
+        convergence_criterion (bool): It True, enables early stopping when the loss plateaus
+        trainable_variables (list): List of model variables to train 
+                                    (can be used to limit training to a subset of variables)
+        tol (float): Convergence tolerance to decide when to stop optimization
+    """
+    # Train all variables if trainable_variables are not provided
+    # If kernel_gradient is False, disable the kernel parameter gradient updates
+    if trainable_variables is None and kernel_grad:
+        trainable_variables=model.trainable_variables
+    elif trainable_variables is None and not kernel_grad:
+        trainable_variables=model.trainable_variables[:1]
+
+    if optimizer == 'scipy':
+        if method is None:
+            method = 'L-BFGS-B'
+        opt = gpflow.optimizers.Scipy()
+        losses = opt.minimize(model.training_loss,
+                              trainable_variables,
+                              method=method,
+                              options=dict(disp=verbose, maxiter=max_steps),
+                              tol=tol)
+        losses = losses.fun
+    else:
+        if trace_fn is None:
+            trace_fn = lambda x: x.loss
+        elif trace_fn == 'traceXu':
+            def trace_fn(traceable_quantities):
+                return model.inducing_variable.Z.numpy()
+
+        if method is None:
+            method = 'adam'
+        opt = tf.keras.optimizers.get(method)
+        opt.learning_rate = lr
+        loss_fn = model.training_loss
+        if convergence_criterion:
+            convergence_criterion = tfp.optimizer.convergence_criteria.LossNotDecreasing(
+                                            atol=1e-5, 
+                                            window_size=50,
+                                            min_num_steps=int(max_steps*0.1))
+        else:
+            convergence_criterion = None
+        losses = tfp.math.minimize(loss_fn,
+                                   trainable_variables=trainable_variables,
+                                   num_steps=max_steps,
+                                   optimizer=opt,
+                                   convergence_criterion=convergence_criterion,
+                                   trace_fn=trace_fn)
+        losses = losses.numpy()
+
+    return losses
+
+
+
+ +
+ +
+ + +

+ plot_loss(losses, save_file=None) + +

+ + +
+ +

Helper function to plot the training loss

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
losses + list + +
+

list of loss values

+
+
+ required +
save_file + str + +
+

If passed, the loss plot will be saved to the save_file

+
+
+ None +
+ +
+ Source code in sgptools/utils/gpflow.py +
24
+25
+26
+27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
def plot_loss(losses, save_file=None):
+    """Helper function to plot the training loss
+
+    Args:
+        losses (list): list of loss values
+        save_file (str): If passed, the loss plot will be saved to the `save_file`
+    """
+    plt.plot(losses)
+    plt.title('Log Likelihood')
+    plt.xlabel('Iteration')
+    plt.ylabel('Log Likelihood')
+    ax = plt.gca()
+    ax.ticklabel_format(useOffset=False)
+
+    if save_file is not None:
+        plt.savefig(save_file, bbox_inches='tight')
+        plt.close()
+    else:
+        plt.show()
+
+
+
+ +
+ + + +
+ +
+ +

+ + +
+ + + + +
+ + + +
+ + + + + + + + + +
+ + +

+ get_dataset(dataset_type, dataset_path=None, num_train=1000, num_test=2500, num_candidates=150) + +

+ + +
+ +

Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to +generate train and test sets.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
dataset_type + str + +
+

'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file. + 'synthetic' will use the diamond square algorithm to generate synthetic elevation data.

+
+
+ required +
dataset_path + str + +
+

Path to the dataset file, used only when dataset_type is 'tif'.

+
+
+ None +
num_train + int + +
+

Number of training samples to generate.

+
+
+ 1000 +
num_test + int + +
+

Number of testing samples to generate.

+
+
+ 2500 +
num_candidates + int + +
+

Number of candidate locations to generate.

+
+
+ 150 +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Name TypeDescription
X_train + ndarray + +
+

(n, d); Training set inputs

+
+
y_train + ndarray + +
+

(n, 1); Training set labels

+
+
X_test + ndarray + +
+

(n, d); Testing set inputs

+
+
y_test + ndarray + +
+

(n, 1); Testing set labels

+
+
candidates + ndarray + +
+

(n, d); Candidate sensor placement locations

+
+
X + +
+

(n, d); Full dataset inputs

+
+
y + +
+

(n, 1); Full dataset labels

+
+
+ +
+ Source code in sgptools/utils/data.py +
146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
def get_dataset(dataset_type, dataset_path=None,
+                num_train=1000,
+                num_test=2500, 
+                num_candidates=150):
+    """Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to 
+    generate train and test sets.
+
+    Args:
+        dataset_type (str): 'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file. 
+                        'synthetic' will use the diamond square algorithm to generate synthetic elevation data.
+        dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.
+        num_train (int): Number of training samples to generate.
+        num_test (int): Number of testing samples to generate.
+        num_candidates (int): Number of candidate locations to generate.
+
+    Returns:
+       X_train (ndarray): (n, d); Training set inputs
+       y_train (ndarray): (n, 1); Training set labels
+       X_test (ndarray): (n, d); Testing set inputs
+       y_test (ndarray): (n, 1); Testing set labels
+       candidates (ndarray): (n, d); Candidate sensor placement locations
+       X: (n, d); Full dataset inputs
+       y: (n, 1); Full dataset labels
+    """
+    # Load the data
+    if dataset_type == 'tif':
+        X, y = prep_tif_dataset(dataset_path=dataset_path)
+    elif dataset_type == 'synthetic':
+        X, y = prep_synthetic_dataset()
+
+    X_train = get_inducing_pts(X, num_train)
+    X_train, y_train = cont2disc(X_train, X, y)
+
+    X_test = get_inducing_pts(X, num_test)
+    X_test, y_test = cont2disc(X_test, X, y)
+
+    candidates = get_inducing_pts(X, num_candidates)
+    candidates = cont2disc(candidates, X)
+
+    # Standardize data
+    X_scaler = StandardScaler()
+    X_scaler.fit(X_train)
+    X_train = X_scaler.transform(X_train)*10.0
+    X_test = X_scaler.transform(X_test)*10.0
+    X = X_scaler.transform(X)*10.0
+
+    y_scaler = StandardScaler()
+    y_scaler.fit(y_train)
+    y_train = y_scaler.transform(y_train)
+    y_test = y_scaler.transform(y_test)
+    y = y_scaler.transform(y)
+
+    return X_train, y_train, X_test, y_test, candidates, X, y
+
+
+
+ +
+ +
+ + +

+ point_pos(point, d, theta) + +

+ + +
+ +

Generate a point at a distance d from a point at angle theta.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
point + ndarray + +
+

(N, 2); array of points

+
+
+ required +
d + float + +
+

distance

+
+
+ required +
theta + float + +
+

angle in radians

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
X + ndarray + +
+

(N,); array of x-coordinate

+
+
Y + ndarray + +
+

(N,); array of y-coordinate

+
+
+ +
+ Source code in sgptools/utils/data.py +
64
+65
+66
+67
+68
+69
+70
+71
+72
+73
+74
+75
+76
+77
def point_pos(point, d, theta):
+    '''
+    Generate a point at a distance d from a point at angle theta.
+
+    Args:
+        point (ndarray): (N, 2); array of points
+        d (float): distance
+        theta (float): angle in radians
+
+    Returns:
+        X  (ndarray): (N,); array of x-coordinate
+        Y  (ndarray): (N,); array of y-coordinate
+    '''
+    return np.c_[point[:, 0] + d*np.cos(theta), point[:, 1] + d*np.sin(theta)]
+
+
+
+ +
+ +
+ + +

+ prep_synthetic_dataset() + +

+ + +
+ +

Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm. +https://github.com/buckinha/DiamondSquare

+

Args:

+

Returns: + X: (n, d); Dataset input features + y: (n, 1); Dataset labels

+ +
+ Source code in sgptools/utils/data.py +
119
+120
+121
+122
+123
+124
+125
+126
+127
+128
+129
+130
+131
+132
+133
+134
+135
+136
+137
+138
+139
+140
+141
+142
def prep_synthetic_dataset():
+    '''Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm.
+    ```https://github.com/buckinha/DiamondSquare```
+
+    Args:
+
+    Returns:
+       X: (n, d); Dataset input features
+       y: (n, 1); Dataset labels
+    '''
+    data = diamond_square(shape=(50,50), 
+                          min_height=0, 
+                          max_height=30, 
+                          roughness=0.5)
+
+    # create x and y coordinates from the extent
+    x_coords = np.arange(0, data.shape[0])/10
+    y_coords = np.arange(0, data.shape[1])/10
+    xx, yy = np.meshgrid(x_coords, y_coords)
+    X = np.c_[xx.ravel(), yy.ravel()]
+    y = data.ravel()
+    y = y.reshape(-1, 1)
+
+    return X.astype(float), y.astype(float)
+
+
+
+ +
+ +
+ + +

+ prep_tif_dataset(dataset_path) + +

+ + +
+ +

Load and preprocess a dataset from a GeoTIFF file (.tif file). The input features +are set to the x and y pixel block coordinates and the labels are read from the file. +The method also removes all invalid points.

+

Large tif files +need to be downsampled using the following command: +gdalwarp -tr 50 50 <input>.tif <output>.tif

+

Args: + dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.

+

Returns: + X: (n, d); Dataset input features + y: (n, 1); Dataset labels

+ +
+ Source code in sgptools/utils/data.py +
 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
def prep_tif_dataset(dataset_path):
+    '''Load and preprocess a dataset from a GeoTIFF file (.tif file). The input features 
+    are set to the x and y pixel block coordinates and the labels are read from the file.
+    The method also removes all invalid points.
+
+    Large tif files 
+    need to be downsampled using the following command: 
+    ```gdalwarp -tr 50 50 <input>.tif <output>.tif```
+
+    Args:
+        dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.
+
+    Returns:
+       X: (n, d); Dataset input features
+       y: (n, 1); Dataset labels
+    '''
+    data = PIL.Image.open(dataset_path)
+    data = np.array(data)
+
+    # create x and y coordinates from the extent
+    x_coords = np.arange(0, data.shape[1])/10
+    y_coords = np.arange(data.shape[0], 0, -1)/10
+    xx, yy = np.meshgrid(x_coords, y_coords)
+    X = np.c_[xx.ravel(), yy.ravel()]
+    y = data.ravel()
+
+    # Remove invalid labels
+    y[np.where(y==-999999.0)] = np.nan
+    X = X[~np.isnan(y)]
+    y = y[~np.isnan(y)]
+
+    X = X.reshape(-1, 2)
+    y = y.reshape(-1, 1)
+
+    return X.astype(float), y.astype(float)
+
+
+
+ +
+ +
+ + +

+ remove_circle_patches(X, Y, circle_patches) + +

+ + +
+ +

Remove points inside polycircle patchesgons.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X + (ndarray + +
+

(N,); array of x-coordinate

+
+
+ required +
Y + (ndarray + +
+

(N,); array of y-coordinate

+
+
+ required +
polygons + list of matplotlib circle patches + +
+

Circle patches to remove from the X, Y points

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
X + ndarray + +
+

(N,); array of x-coordinate

+
+
Y + ndarray + +
+

(N,); array of y-coordinate

+
+
+ +
+ Source code in sgptools/utils/data.py +
46
+47
+48
+49
+50
+51
+52
+53
+54
+55
+56
+57
+58
+59
+60
+61
+62
def remove_circle_patches(X, Y, circle_patches):
+    '''
+    Remove points inside polycircle patchesgons.
+
+    Args:
+        X  (ndarray): (N,); array of x-coordinate
+        Y  (ndarray): (N,); array of y-coordinate
+        polygons (list of matplotlib circle patches): Circle patches to remove from the X, Y points
+
+    Returns:
+        X  (ndarray): (N,); array of x-coordinate
+        Y  (ndarray): (N,); array of y-coordinate
+    '''
+    points = np.array([X.flatten(), Y.flatten()]).T
+    for circle_patch in circle_patches:
+        points = points[~circle_patch.contains_points(points)]
+    return points[:, 0], points[:, 1]
+
+
+
+ +
+ +
+ + +

+ remove_polygons(X, Y, polygons) + +

+ + +
+ +

Remove points inside polygons.

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X + (ndarray + +
+

(N,); array of x-coordinate

+
+
+ required +
Y + (ndarray + +
+

(N,); array of y-coordinate

+
+
+ required +
polygons + list of matplotlib path polygon + +
+

Polygons to remove from the X, Y points

+
+
+ required +
+ + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
X + ndarray + +
+

(N,); array of x-coordinate

+
+
Y + ndarray + +
+

(N,); array of y-coordinate

+
+
+ +
+ Source code in sgptools/utils/data.py +
27
+28
+29
+30
+31
+32
+33
+34
+35
+36
+37
+38
+39
+40
+41
+42
+43
+44
def remove_polygons(X, Y, polygons):
+    '''
+    Remove points inside polygons.
+
+    Args:
+        X  (ndarray): (N,); array of x-coordinate
+        Y  (ndarray): (N,); array of y-coordinate
+        polygons (list of matplotlib path polygon): Polygons to remove from the X, Y points
+
+    Returns:
+        X  (ndarray): (N,); array of x-coordinate
+        Y  (ndarray): (N,); array of y-coordinate
+    '''
+    points = np.array([X.flatten(), Y.flatten()]).T
+    for polygon in polygons:
+        p = path.Path(polygon)
+        points = points[~p.contains_points(points)]
+    return points[:, 0], points[:, 1]
+
+
+
+ +
+ + + +
+ +
+ +

________________________

+ + +
+ + + + +
+ +

Provides a neural spectral kernel function along with an initialization function

+ + + +
+ + + + + + + + +
+ + + +

+ NeuralSpectralKernel + + +

+ + +
+

+ Bases: Kernel

+ + +

Neural Spectral Kernel function (non-stationary kernel function). +Based on the implementation from the following repo

+ + +
+ Refer to the following papers for more details +
    +
  • Neural Non-Stationary Spectral Kernel [Remes et al., 2018]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
input_dim + int + +
+

Number of data dimensions

+
+
+ required +
active_dims + int + +
+

Number of data dimensions that are used for computing the covariances

+
+
+ None +
Q + int + +
+

Number of MLP mixture components used in the kernel function

+
+
+ 1 +
hidden_sizes + list + +
+

Number of hidden units in each MLP layer. Length of the list determines the number of layers.

+
+
+ [32, 32] +
+ +
+ Source code in sgptools/kernels/neural_kernel.py +
 34
+ 35
+ 36
+ 37
+ 38
+ 39
+ 40
+ 41
+ 42
+ 43
+ 44
+ 45
+ 46
+ 47
+ 48
+ 49
+ 50
+ 51
+ 52
+ 53
+ 54
+ 55
+ 56
+ 57
+ 58
+ 59
+ 60
+ 61
+ 62
+ 63
+ 64
+ 65
+ 66
+ 67
+ 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
+119
+120
+121
+122
+123
+124
class NeuralSpectralKernel(gpflow.kernels.Kernel):
+    """Neural Spectral Kernel function (non-stationary kernel function). 
+    Based on the implementation from the following [repo](https://github.com/sremes/nssm-gp/tree/master?tab=readme-ov-file)
+
+    Refer to the following papers for more details:
+        - Neural Non-Stationary Spectral Kernel [Remes et al., 2018]
+
+    Args:
+        input_dim (int): Number of data dimensions
+        active_dims (int): Number of data dimensions that are used for computing the covariances
+        Q (int): Number of MLP mixture components used in the kernel function
+        hidden_sizes (list): Number of hidden units in each MLP layer. Length of the list determines the number of layers.
+    """
+    def __init__(self, input_dim, active_dims=None, Q=1, hidden_sizes=[32, 32]):
+        super().__init__(active_dims=active_dims)
+
+        self.input_dim = input_dim
+        self.Q = Q
+        self.num_hidden = len(hidden_sizes)
+
+        self.freq = []
+        self.length = []
+        self.var = []
+        for q in range(self.Q):
+            freq = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] + 
+                                    [layers.Dense(input_dim, activation='softplus')])
+            length = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] +
+                                   [layers.Dense(input_dim, activation='softplus')])
+            var = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] +
+                                   [layers.Dense(1, activation='softplus')])
+            self.freq.append(freq)
+            self.length.append(length)
+            self.var.append(var)
+
+    def K(self, X, X2=None):
+        """Computes the covariances between/amongst the input variables
+
+        Args:
+            X (ndarray): Variables to compute the covariance matrix
+            X2 (ndarray): If passed, the covariance between X and X2 is computed. Otherwise, 
+                          the covariance between X and X is computed.
+
+        Returns:
+            cov (ndarray): covariance matrix
+        """
+        if X2 is None:
+            X2 = X
+            equal = True
+        else:
+            equal = False
+
+        kern = 0.0
+        for q in range(self.Q):
+            # compute latent function values by the neural network
+            freq, freq2 = self.freq[q](X), self.freq[q](X2)
+            lens, lens2 = self.length[q](X), self.length[q](X2)
+            var, var2 = self.var[q](X), self.var[q](X2)
+
+            # compute length-scale term
+            Xr = tf.expand_dims(X, 1)  # N1 1 D
+            X2r = tf.expand_dims(X2, 0)  # 1 N2 D
+            l1 = tf.expand_dims(lens, 1)  # N1 1 D
+            l2 = tf.expand_dims(lens2, 0)  # 1 N2 D
+            L = tf.square(l1) + tf.square(l2)  # N1 N2 D
+            #D = tf.square((Xr - X2r) / L)  # N1 N2 D
+            D = tf.square(Xr - X2r) / L  # N1 N2 D
+            D = tf.reduce_sum(D, 2)  # N1 N2
+            det = tf.sqrt(2 * l1 * l2 / L)  # N1 N2 D
+            det = tf.reduce_prod(det, 2)  # N1 N2
+            E = det * tf.exp(-D)  # N1 N2
+
+            # compute cosine term
+            muX = (tf.reduce_sum(freq * X, 1, keepdims=True)
+                   - tf.transpose(tf.reduce_sum(freq2 * X2, 1, keepdims=True)))
+            COS = tf.cos(2 * np.pi * muX)
+
+            # compute kernel variance term
+            WW = tf.matmul(var, var2, transpose_b=True)  # w*w'^T
+
+            # compute the q'th kernel component
+            kern += WW * E * COS
+        if equal:
+            return robust_kernel(kern, tf.shape(X)[0])
+        else:
+            return kern
+
+    def K_diag(self, X):
+        kd = default_jitter()
+        for q in range(self.Q):
+            kd += tf.square(self.var[q](X))
+        return tf.squeeze(kd)
+
+
+ + + +
+ + + + + + + + + +
+ + +

+ K(X, X2=None) + +

+ + +
+ +

Computes the covariances between/amongst the input variables

+ + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
X + ndarray + +
+

Variables to compute the covariance matrix

+
+
+ required +
X2 + ndarray + +
+

If passed, the covariance between X and X2 is computed. Otherwise, + the covariance between X and X is computed.

+
+
+ None +
+ + +

Returns:

+ + + + + + + + + + + + + +
Name TypeDescription
cov + ndarray + +
+

covariance matrix

+
+
+ +
+ Source code in sgptools/kernels/neural_kernel.py +
 68
+ 69
+ 70
+ 71
+ 72
+ 73
+ 74
+ 75
+ 76
+ 77
+ 78
+ 79
+ 80
+ 81
+ 82
+ 83
+ 84
+ 85
+ 86
+ 87
+ 88
+ 89
+ 90
+ 91
+ 92
+ 93
+ 94
+ 95
+ 96
+ 97
+ 98
+ 99
+100
+101
+102
+103
+104
+105
+106
+107
+108
+109
+110
+111
+112
+113
+114
+115
+116
+117
+118
def K(self, X, X2=None):
+    """Computes the covariances between/amongst the input variables
+
+    Args:
+        X (ndarray): Variables to compute the covariance matrix
+        X2 (ndarray): If passed, the covariance between X and X2 is computed. Otherwise, 
+                      the covariance between X and X is computed.
+
+    Returns:
+        cov (ndarray): covariance matrix
+    """
+    if X2 is None:
+        X2 = X
+        equal = True
+    else:
+        equal = False
+
+    kern = 0.0
+    for q in range(self.Q):
+        # compute latent function values by the neural network
+        freq, freq2 = self.freq[q](X), self.freq[q](X2)
+        lens, lens2 = self.length[q](X), self.length[q](X2)
+        var, var2 = self.var[q](X), self.var[q](X2)
+
+        # compute length-scale term
+        Xr = tf.expand_dims(X, 1)  # N1 1 D
+        X2r = tf.expand_dims(X2, 0)  # 1 N2 D
+        l1 = tf.expand_dims(lens, 1)  # N1 1 D
+        l2 = tf.expand_dims(lens2, 0)  # 1 N2 D
+        L = tf.square(l1) + tf.square(l2)  # N1 N2 D
+        #D = tf.square((Xr - X2r) / L)  # N1 N2 D
+        D = tf.square(Xr - X2r) / L  # N1 N2 D
+        D = tf.reduce_sum(D, 2)  # N1 N2
+        det = tf.sqrt(2 * l1 * l2 / L)  # N1 N2 D
+        det = tf.reduce_prod(det, 2)  # N1 N2
+        E = det * tf.exp(-D)  # N1 N2
+
+        # compute cosine term
+        muX = (tf.reduce_sum(freq * X, 1, keepdims=True)
+               - tf.transpose(tf.reduce_sum(freq2 * X2, 1, keepdims=True)))
+        COS = tf.cos(2 * np.pi * muX)
+
+        # compute kernel variance term
+        WW = tf.matmul(var, var2, transpose_b=True)  # w*w'^T
+
+        # compute the q'th kernel component
+        kern += WW * E * COS
+    if equal:
+        return robust_kernel(kern, tf.shape(X)[0])
+    else:
+        return kern
+
+
+
+ +
+ + + +
+ +
+ +
+ + +
+ + +

+ init_neural_kernel(x, y, inducing_variable, Q, n_inits=1, hidden_sizes=None) + +

+ + +
+ +

Helper function to initialize a Neural Spectral Kernel function (non-stationary kernel function). +Based on the implementation from the following repo

+ + +
+ Refer to the following papers for more details +
    +
  • Neural Non-Stationary Spectral Kernel [Remes et al., 2018]
  • +
+
+ +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
x + ndarray + +
+

(n, d); Input training set points

+
+
+ required +
y + ndarray + +
+

(n, 1); Training set labels

+
+
+ required +
inducing_variable + ndarray + +
+

(m, d); Initial inducing points

+
+
+ required +
Q + int + +
+

Number of MLP mixture components used in the kernel function

+
+
+ required +
n_inits + int + +
+

Number of times to initalize the kernel function (returns the best model)

+
+
+ 1 +
hidden_sizes + list + +
+

Number of hidden units in each MLP layer. Length of the list determines the number of layers.

+
+
+ None +
+ +
+ Source code in sgptools/kernels/neural_kernel.py +
133
+134
+135
+136
+137
+138
+139
+140
+141
+142
+143
+144
+145
+146
+147
+148
+149
+150
+151
+152
+153
+154
+155
+156
+157
+158
+159
+160
+161
+162
+163
+164
+165
+166
+167
+168
+169
def init_neural_kernel(x, y, inducing_variable, Q, n_inits=1, hidden_sizes=None):
+    """Helper function to initialize a Neural Spectral Kernel function (non-stationary kernel function). 
+    Based on the implementation from the following [repo](https://github.com/sremes/nssm-gp/tree/master?tab=readme-ov-file)
+
+    Refer to the following papers for more details:
+        - Neural Non-Stationary Spectral Kernel [Remes et al., 2018]
+
+    Args:
+        x (ndarray): (n, d); Input training set points
+        y (ndarray): (n, 1); Training set labels
+        inducing_variable (ndarray): (m, d); Initial inducing points
+        Q (int): Number of MLP mixture components used in the kernel function
+        n_inits (int): Number of times to initalize the kernel function (returns the best model)
+        hidden_sizes (list): Number of hidden units in each MLP layer. Length of the list determines the number of layers.
+    """
+    x, y = data_input_to_tensor((x, y))
+
+    print('Initializing neural spectral kernel...')
+    best_loglik = -np.inf
+    best_m = None
+    N, input_dim = x.shape
+
+    for k in range(n_inits):
+        # gpflow.reset_default_graph_and_session()
+        k = NeuralSpectralKernel(input_dim=input_dim, Q=Q, 
+                                    hidden_sizes=hidden_sizes)
+        model = SGPR((x, y), inducing_variable=inducing_variable, 
+                        kernel=k)
+        loglik = model.elbo()
+        if loglik > best_loglik:
+            best_loglik = loglik
+            best_m = model
+        del model
+        gc.collect()
+    print('Best init: %f' % best_loglik)
+
+    return best_m
+
+
+
+ +
+ + + +
+ +
+ +

+ + + + + + + + + + + + + + + +
+
+ + + +
+ + + +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/SGP-Tools.png b/assets/SGP-Tools.png new file mode 100644 index 0000000..6ec3506 Binary files /dev/null and b/assets/SGP-Tools.png differ diff --git a/assets/_mkdocstrings.css b/assets/_mkdocstrings.css new file mode 100644 index 0000000..85449ec --- /dev/null +++ b/assets/_mkdocstrings.css @@ -0,0 +1,119 @@ + +/* Avoid breaking parameter names, etc. in table cells. */ +.doc-contents td code { + word-break: normal !important; +} + +/* No line break before first paragraph of descriptions. */ +.doc-md-description, +.doc-md-description>p:first-child { + display: inline; +} + +/* Max width for docstring sections tables. */ +.doc .md-typeset__table, +.doc .md-typeset__table table { + display: table !important; + width: 100%; +} + +.doc .md-typeset__table tr { + display: table-row; +} + +/* Defaults in Spacy table style. */ +.doc-param-default { + float: right; +} + +/* Backward-compatibility: docstring section titles in bold. */ +.doc-section-title { + font-weight: bold; +} + +/* Symbols in Navigation and ToC. */ +:root, +[data-md-color-scheme="default"] { + --doc-symbol-attribute-fg-color: #953800; + --doc-symbol-function-fg-color: #8250df; + --doc-symbol-method-fg-color: #8250df; + --doc-symbol-class-fg-color: #0550ae; + --doc-symbol-module-fg-color: #5cad0f; + + --doc-symbol-attribute-bg-color: #9538001a; + --doc-symbol-function-bg-color: #8250df1a; + --doc-symbol-method-bg-color: #8250df1a; + --doc-symbol-class-bg-color: #0550ae1a; + --doc-symbol-module-bg-color: #5cad0f1a; +} + +[data-md-color-scheme="slate"] { + --doc-symbol-attribute-fg-color: #ffa657; + --doc-symbol-function-fg-color: #d2a8ff; + --doc-symbol-method-fg-color: #d2a8ff; + --doc-symbol-class-fg-color: #79c0ff; + --doc-symbol-module-fg-color: #baff79; + + --doc-symbol-attribute-bg-color: #ffa6571a; + --doc-symbol-function-bg-color: #d2a8ff1a; + --doc-symbol-method-bg-color: #d2a8ff1a; + --doc-symbol-class-bg-color: #79c0ff1a; + --doc-symbol-module-bg-color: #baff791a; +} + +code.doc-symbol { + border-radius: .1rem; + font-size: .85em; + padding: 0 .3em; + font-weight: bold; +} + +code.doc-symbol-attribute { + color: var(--doc-symbol-attribute-fg-color); + background-color: var(--doc-symbol-attribute-bg-color); +} + +code.doc-symbol-attribute::after { + content: "attr"; +} + +code.doc-symbol-function { + color: var(--doc-symbol-function-fg-color); + background-color: var(--doc-symbol-function-bg-color); +} + +code.doc-symbol-function::after { + content: "func"; +} + +code.doc-symbol-method { + color: var(--doc-symbol-method-fg-color); + background-color: var(--doc-symbol-method-bg-color); +} + +code.doc-symbol-method::after { + content: "meth"; +} + +code.doc-symbol-class { + color: var(--doc-symbol-class-fg-color); + background-color: var(--doc-symbol-class-bg-color); +} + +code.doc-symbol-class::after { + content: "class"; +} + +code.doc-symbol-module { + color: var(--doc-symbol-module-fg-color); + background-color: var(--doc-symbol-module-bg-color); +} + +code.doc-symbol-module::after { + content: "mod"; +} + +.doc-signature .autorefs { + color: inherit; + border-bottom: 1px dotted currentcolor; +} diff --git a/assets/favicon.png b/assets/favicon.png new file mode 100644 index 0000000..51669cf Binary files /dev/null and b/assets/favicon.png differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 0000000..1cf13b9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js b/assets/javascripts/bundle.fe8b6f2b.min.js new file mode 100644 index 0000000..cf778d4 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(M){return typeof M}:Ie=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,M){return _&&ro(V.prototype,_),M&&ro(V,M),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Wt(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ri=function(V){Ci(M,V);var _=Hi(M);function M(j,D){var Y;return _i(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),M}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var O=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return O}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?O:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),G(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():O))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function B(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!B("announce.dismiss")||!e.childElementCount)return O;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:O),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?O:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):O})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:O)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>B("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return O;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?O:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return O;let r=e.target.closest("a");if(r===null)return O;if(r.target||e.metaKey||e.ctrlKey)return O;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):O}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),O}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return O;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),O)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>O)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?O:(i.preventDefault(),I(p))}}return O}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),G(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?O:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>O),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>O),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>O),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return O}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return O}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>O),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):O})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),G(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;B("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),G(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>B("search.highlight")?mi(e,{index$:Mi,location$:jt}):O),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),G(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.fe8b6f2b.min.js.map + diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js.map b/assets/javascripts/bundle.fe8b6f2b.min.js.map new file mode 100644 index 0000000..8263585 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an

+ +

About

+

Please consider citing the following papers if you use SGP-Tools in your academic work 😄

+
@misc{JakkalaA23SP,
+AUTHOR={Kalvik Jakkala and Srinivas Akella},
+TITLE={Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces},
+NOTE= {Preprint},
+YEAR={2023},
+URL={https://itskalvik.github.io/publication/sgp-sp},
+}
+
+@inproceedings{JakkalaA24IPP,
+AUTHOR={Kalvik Jakkala and Srinivas Akella},
+TITLE={Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes},
+booktitle={IEEE International Conference on Robotics and Automation, {ICRA}},
+YEAR={2024},
+PUBLISHER = {{IEEE}},
+URL={https://itskalvik.github.io/publication/sgp-ipp}
+}
+
+

Acknowledgements

+

This work was funded in part by the UNC Charlotte Office of Research and Economic Development and by NSF under Award Number IIP-1919233.

+

License

+

The SGP-Tools software suite is licensed under the terms of the Apache License 2.0. +See LICENSE for more information.

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 0000000..76530bc Binary files /dev/null and b/objects.inv differ diff --git a/search/search_index.js b/search/search_index.js new file mode 100644 index 0000000..e55dcf0 --- /dev/null +++ b/search/search_index.js @@ -0,0 +1 @@ +var __index = {"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"index.html","title":"Home","text":"

SGP-Tools is a software suite for Sensor Placement and Informative Path Planning.

The library includes python code for the following:

  • Greedy algorithm-based approaches
  • Bayesian optimization-based approaches
  • Genetic algorithm-based approaches
  • Sparse Gaussian process (SGP)-based approaches

"},{"location":"index.html#installation","title":"Installation","text":"

The library is available as a pip package. To install the package, run the following command:

python3 -m pip install sgptools\n

Installation from source:

git clone https://github.com/itskalvik/sgp-tools.git\ncd sgp-tools/\npython3 -m pip install -r requirements.txt\npython3 -m pip install -e .\n

Note: The requirements.txt file contains packages and their latest versions that were last verified to be working without any issues.

"},{"location":"index.html#quick-start","title":"Quick Start","text":"

Please refer to the examples folder for Jupyter notebooks demonstrating all the methods included in the library \ud83d\ude04

"},{"location":"index.html#method-summary","title":"Method Summary","text":""},{"location":"index.html#about","title":"About","text":"

Please consider citing the following papers if you use SGP-Tools in your academic work \ud83d\ude04

@misc{JakkalaA23SP,\nAUTHOR={Kalvik Jakkala and Srinivas Akella},\nTITLE={Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces},\nNOTE= {Preprint},\nYEAR={2023},\nURL={https://itskalvik.github.io/publication/sgp-sp},\n}\n\n@inproceedings{JakkalaA24IPP,\nAUTHOR={Kalvik Jakkala and Srinivas Akella},\nTITLE={Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes},\nbooktitle={IEEE International Conference on Robotics and Automation, {ICRA}},\nYEAR={2024},\nPUBLISHER = {{IEEE}},\nURL={https://itskalvik.github.io/publication/sgp-ipp}\n}\n
"},{"location":"index.html#acknowledgements","title":"Acknowledgements","text":"

This work was funded in part by the UNC Charlotte Office of Research and Economic Development and by NSF under Award Number IIP-1919233.

"},{"location":"index.html#license","title":"License","text":"

The SGP-Tools software suite is licensed under the terms of the Apache License 2.0. See LICENSE for more information.

"},{"location":"API-reference.html","title":"API reference","text":"

Sensor placement and informative path planning methods in this package:

  • continuous_sgp: Provides an SGP-based sensor placement approach that is optimized using gradient descent
  • greedy_sgp: Provides an SGP-based sensor placement approach that is optimized using a greedy algorithm
  • cma_es: Provides a genetic algorithm (CMA-ES) based approach that maximizes mutual-information to get sensor placements
  • greedy_mi: Provides a greedy algorithm based approach that maximizes mutual-information to get sensor placements
  • bo: Provides a Bayesian optimization based approach that maximizes mutual-information to get sensor placements
"},{"location":"API-reference.html#sgptools.models.continuous_sgp.continuous_sgp","title":"continuous_sgp(num_inducing, X_train, noise_variance, kernel, transform=None, Xu_init=None, Xu_time=None, orientation=False, **kwargs)","text":"

Get sensor placement solutions using the Continuous-SGP method

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default num_inducing int

Number of inducing points

required X_train ndarray

(n, d); Unlabeled random sampled training points

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None Xu_init ndarray

(m, d); Initial inducing points

None Xu_time ndarray

(t, d); Temporal inducing points used in spatio-temporal models

None orientation bool

If True, a additionl dimension is added to the inducing points to represent the FoV orientation

False

Returns:

Name Type Description sgpr AugmentedSGPR

Optimized sparse Gaussian process model

loss ndarray

Loss values computed during training

Source code in sgptools/models/continuous_sgp.py
def continuous_sgp(num_inducing, X_train, noise_variance, kernel, \n                   transform=None,\n                   Xu_init=None, \n                   Xu_time=None, \n                   orientation=False,\n                   **kwargs):\n    \"\"\"Get sensor placement solutions using the Continuous-SGP method\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [[Jakkala and Akella, 2024](https://www.itskalvik.com/publication/sgp-ipp/)]\n\n    Args:\n        num_inducing (int): Number of inducing points\n        X_train (ndarray): (n, d); Unlabeled random sampled training points\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n        Xu_init (ndarray): (m, d); Initial inducing points\n        Xu_time (ndarray): (t, d); Temporal inducing points used in spatio-temporal models\n        orientation (bool): If True, a additionl dimension is added to the \n                            inducing points to represent the FoV orientation\n\n    Returns:\n        sgpr (AugmentedSGPR): Optimized sparse Gaussian process model\n        loss (ndarray): Loss values computed during training\n    \"\"\"\n    # Generate init inducing points\n    if Xu_init is None:\n        Xu_init = get_inducing_pts(X_train, num_inducing, \n                                   orientation=orientation)\n\n    # Fit spare GP\n    sgpr = AugmentedSGPR((X_train, np.zeros((len(X_train), 1)).astype(X_train.dtype)),\n                         noise_variance=noise_variance,\n                         kernel=kernel, \n                         inducing_variable=Xu_init,\n                         inducing_variable_time=Xu_time,\n                         transform=transform)\n\n    # Train the mode\n    loss = optimize_model(sgpr,\n                          kernel_grad=False, \n                          **kwargs)\n\n    return sgpr, loss\n
"},{"location":"API-reference.html#sgptools.models.greedy_sgp.GreedySGP","title":"GreedySGP","text":"

Helper class to compute SGP's ELBO/optimization bound for a given set of sensor locations. Used by get_greedy_sgp_sol function to compute the solution sensor placements using the Greedy-SGP method.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]

Parameters:

Name Type Description Default num_inducing int

Number of inducing points

required S ndarray

(n, d); Candidate sensor placement locations

required V ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

Data noise variance

required kernel Kernel

gpflow kernel function

required Xu_fixed ndarray

(m, d); Inducing points that are not optimized and are always added to the inducing points set during loss function computation

None transform Transform

Transform object

None Source code in sgptools/models/greedy_sgp.py
class GreedySGP:\n    \"\"\"Helper class to compute SGP's ELBO/optimization bound for a given set of sensor locations.\n    Used by `get_greedy_sgp_sol` function to compute the solution sensor placements using the Greedy-SGP method.\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]\n\n    Args:\n        num_inducing (int): Number of inducing points\n        S (ndarray): (n, d); Candidate sensor placement locations\n        V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): Data noise variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        Xu_fixed (ndarray): (m, d); Inducing points that are not optimized and are always \n                                    added to the inducing points set during loss function computation\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(self, num_inducing, S, V, noise_variance, kernel, \n                 Xu_fixed=None, \n                 transform=None):\n        self.gp = AugmentedSGPR((V, np.zeros((len(V), 1))),\n                                noise_variance=noise_variance,\n                                kernel=kernel, \n                                inducing_variable=S[:num_inducing],\n                                transform=transform)\n        self.locs = S\n        self.Xu_fixed = Xu_fixed\n        self.num_inducing = num_inducing\n        self.inducing_dim = S.shape[1]\n\n    def bound(self, x):\n        \"\"\"Computes the SGP's optimization bound using the inducing points `x` \n\n        Args:\n            x (ndarray): (n, d); Inducing points\n\n        Returns:\n            elbo (float): Evidence lower bound/SGP's optimization bound value\n        \"\"\"\n        x = np.array(x).reshape(-1).astype(int)\n        Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)\n        Xu *= self.locs[x][0]\n        Xu[-len(x):] = self.locs[x]\n\n        if self.Xu_fixed is not None:\n            Xu[:len(self.Xu_fixed)] = self.Xu_fixed\n\n        self.gp.inducing_variable.Z.assign(Xu)\n        return self.gp.elbo().numpy()\n
"},{"location":"API-reference.html#sgptools.models.greedy_sgp.GreedySGP.bound","title":"bound(x)","text":"

Computes the SGP's optimization bound using the inducing points x

Parameters:

Name Type Description Default x ndarray

(n, d); Inducing points

required

Returns:

Name Type Description elbo float

Evidence lower bound/SGP's optimization bound value

Source code in sgptools/models/greedy_sgp.py
def bound(self, x):\n    \"\"\"Computes the SGP's optimization bound using the inducing points `x` \n\n    Args:\n        x (ndarray): (n, d); Inducing points\n\n    Returns:\n        elbo (float): Evidence lower bound/SGP's optimization bound value\n    \"\"\"\n    x = np.array(x).reshape(-1).astype(int)\n    Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)\n    Xu *= self.locs[x][0]\n    Xu[-len(x):] = self.locs[x]\n\n    if self.Xu_fixed is not None:\n        Xu[:len(self.Xu_fixed)] = self.Xu_fixed\n\n    self.gp.inducing_variable.Z.assign(Xu)\n    return self.gp.elbo().numpy()\n
"},{"location":"API-reference.html#sgptools.models.greedy_sgp.get_greedy_sgp_sol","title":"get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel, transform=None)","text":"

Get sensor placement solutions using the Greedy-SGP method. Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

required candidates ndarray

(n, d); Candidate sensor placement locations

required X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/greedy_sgp.py
def get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel, \n                       transform=None):\n    \"\"\"Get sensor placement solutions using the Greedy-SGP method. Uses a greedy algorithm to \n    select sensor placements from a given discrete set of candidates locations.\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        candidates (ndarray): (n, d); Candidate sensor placement locations\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sgp_model = GreedySGP(num_sensors, candidates, X_train, \n                          noise_variance, kernel, transform=transform)\n    model = CustomSelection(num_sensors,\n                            sgp_model.bound,\n                            optimizer='naive',\n                            verbose=False)\n    sol = model.fit_transform(np.arange(len(candidates)).reshape(-1, 1))\n    return candidates[sol.reshape(-1)]\n
"},{"location":"API-reference.html#sgptools.models.greedy_mi.GreedyMI","title":"GreedyMI","text":"

Helper class to compute mutual information using a Gaussian process for a given set of sensor locations. Used by get_greedy_mi_sol function to compute the solution sensor placements using the Greedy-MI method.

Refer to the following papers for more details
  • Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
  • Data-driven learning and planning for environmental sampling [Ma et al., 2018]

Parameters:

Name Type Description Default S ndarray

(n, d); Candidate sensor placement locations

required V ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None Source code in sgptools/models/greedy_mi.py
class GreedyMI:\n    \"\"\"Helper class to compute mutual information using a Gaussian process for a given set of sensor locations.\n    Used by `get_greedy_mi_sol` function to compute the solution sensor placements using the Greedy-MI method.\n\n    Refer to the following papers for more details:\n        - Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]\n        - Data-driven learning and planning for environmental sampling [Ma et al., 2018]\n\n    Args:\n        S (ndarray): (n, d); Candidate sensor placement locations\n        V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(self, S, V, noise_variance, kernel, transform=None):\n        self.S = S\n        self.V = V\n        self.kernel = kernel\n        self.input_dim = S.shape[1]\n        self.noise_variance = noise_variance\n        self.transform = transform\n\n    def mutual_info(self, x):\n        x = np.array(x).reshape(-1).astype(int)\n        A = self.S[x[:-1]].reshape(-1, self.input_dim)\n        y = self.S[x[-1]].reshape(-1, self.input_dim)\n\n        if len(A) == 0:\n            sigma_a = 1.0\n        else:\n            if self.transform is not None:\n                A = self.transform.expand(A)\n            a_gp = AugmentedGPR(data=(A, np.zeros((len(A), 1))),\n                                kernel=self.kernel,\n                                noise_variance=self.noise_variance,\n                                transform=self.transform)\n            _, sigma_a = a_gp.predict_f(y, aggregate_train=True)\n\n        # Remove locations in A to build A bar\n        V_ = self.V.copy()\n        V_rows = V_.view([('', V_.dtype)] * V_.shape[1])\n        if self.transform is not None:\n            A_ = self.transform.expand(self.S[x]).numpy()\n        else:\n            A_ = self.S[x]\n        A_rows = A_.view([('', V_.dtype)] * A_.shape[1])\n        V_ = np.setdiff1d(V_rows, A_rows).view(V_.dtype).reshape(-1, V_.shape[1])\n\n        self.v_gp = AugmentedGPR(data=(V_, np.zeros((len(V_), 1))), \n                                 kernel=self.kernel,\n                                 noise_variance=self.noise_variance,\n                                 transform=self.transform)\n        _, sigma_v = self.v_gp.predict_f(y)\n\n        return (sigma_a/sigma_v).numpy().squeeze()\n
"},{"location":"API-reference.html#sgptools.models.greedy_mi.get_greedy_mi_sol","title":"get_greedy_mi_sol(num_sensors, candidates, X_train, noise_variance, kernel, transform=None, optimizer='naive')","text":"

Get sensor placement solutions using the GP-based mutual information approach (submodular objective function). Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.

Refer to the following papers for more details
  • Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
  • Data-driven learning and planning for environmental sampling [Ma et al., 2018]

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

required candidates ndarray

(n, d); Candidate sensor placement locations

required X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None optimizer str

Name of an optimizer available in the apricot library

'naive'

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/greedy_mi.py
def get_greedy_mi_sol(num_sensors, candidates, X_train, noise_variance, kernel, \n                      transform=None, optimizer='naive'):\n    \"\"\"Get sensor placement solutions using the GP-based mutual information approach (submodular objective function). \n    Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.\n\n    Refer to the following papers for more details:\n        - Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]\n        - Data-driven learning and planning for environmental sampling [Ma et al., 2018]\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        candidates (ndarray): (n, d); Candidate sensor placement locations\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n        optimizer (str): Name of an optimizer available in the apricot library\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    mi_model = GreedyMI(candidates, X_train, noise_variance, kernel, transform)\n    model = CustomSelection(num_sensors,\n                            mi_model.mutual_info,\n                            optimizer=optimizer,\n                            verbose=False)\n    sol = model.fit_transform(np.arange(len(candidates)).reshape(-1, 1))\n    return candidates[sol.reshape(-1)]\n
"},{"location":"API-reference.html#sgptools.models.bo.BayesianOpt","title":"BayesianOpt","text":"

Class for optimizing sensor placements using Bayesian Optimization

Refer to the following papers for more details
  • UAV route planning for active disease classification [Vivaldini et al., 2019]
  • Occupancy map building through Bayesian exploration [Francis et al., 2019]

Parameters:

Name Type Description Default X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required Source code in sgptools/models/bo.py
class BayesianOpt:\n    \"\"\"Class for optimizing sensor placements using Bayesian Optimization\n\n    Refer to the following papers for more details:\n        - UAV route planning for active disease classification [Vivaldini et al., 2019]\n        - Occupancy map building through Bayesian exploration [Francis et al., 2019]\n\n    Args:\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n    \"\"\"\n    def __init__(self, X_train, noise_variance, kernel):\n        self.X_train = X_train\n        self.noise_variance = noise_variance\n        self.kernel = kernel\n        self.num_dim = X_train.shape[-1]\n\n        # use the boundaries of the region as the search space\n        self.pbounds_dim = []\n        for i in range(self.num_dim):\n            self.pbounds_dim.append((np.min(X_train[:, i]), np.max(X_train[:, i])))\n\n    def objective(self, **kwargs):\n        \"\"\"Computes the objective function (mutual information) for the sensor placement problem\n        \"\"\"\n        X = []\n        for i in range(len(kwargs)):\n            X.append(kwargs['x{}'.format(i)])\n        X = np.array(X).reshape(-1, self.num_dim)\n        return -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n\n    def optimize(self, \n                 num_sensors=10, \n                 max_steps=100,  \n                 X_init=None,\n                 init_points=10):\n        \"\"\"Optimizes the sensor placements using Bayesian Optimization without any constraints\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps \n            X_init (ndarray): (m, d); Initial inducing points\n            init_points (int): How many steps of random exploration you want to perform. \n                               Random exploration can help by diversifying the exploration space. \n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        if X_init is None:\n            X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n        X_init = X_init.reshape(-1)\n\n        pbounds = {}\n        for i in range(self.num_dim*num_sensors):\n            pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]\n\n        optimizer = BayesianOptimization(\n            f=self.objective,\n            pbounds=pbounds,\n            verbose=0,\n            random_state=1,\n            allow_duplicate_points=True\n        )\n\n        optimizer.maximize(\n            init_points=init_points,\n            n_iter=max_steps,\n        )\n\n        sol = []\n        for i in range(self.num_dim*num_sensors):\n            sol.append(optimizer.max['params']['x{}'.format(i)])\n        return np.array(sol).reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.bo.BayesianOpt.objective","title":"objective(**kwargs)","text":"

Computes the objective function (mutual information) for the sensor placement problem

Source code in sgptools/models/bo.py
def objective(self, **kwargs):\n    \"\"\"Computes the objective function (mutual information) for the sensor placement problem\n    \"\"\"\n    X = []\n    for i in range(len(kwargs)):\n        X.append(kwargs['x{}'.format(i)])\n    X = np.array(X).reshape(-1, self.num_dim)\n    return -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n
"},{"location":"API-reference.html#sgptools.models.bo.BayesianOpt.optimize","title":"optimize(num_sensors=10, max_steps=100, X_init=None, init_points=10)","text":"

Optimizes the sensor placements using Bayesian Optimization without any constraints

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

100 X_init ndarray

(m, d); Initial inducing points

None init_points int

How many steps of random exploration you want to perform. Random exploration can help by diversifying the exploration space.

10

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/bo.py
def optimize(self, \n             num_sensors=10, \n             max_steps=100,  \n             X_init=None,\n             init_points=10):\n    \"\"\"Optimizes the sensor placements using Bayesian Optimization without any constraints\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps \n        X_init (ndarray): (m, d); Initial inducing points\n        init_points (int): How many steps of random exploration you want to perform. \n                           Random exploration can help by diversifying the exploration space. \n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    if X_init is None:\n        X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n    X_init = X_init.reshape(-1)\n\n    pbounds = {}\n    for i in range(self.num_dim*num_sensors):\n        pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]\n\n    optimizer = BayesianOptimization(\n        f=self.objective,\n        pbounds=pbounds,\n        verbose=0,\n        random_state=1,\n        allow_duplicate_points=True\n    )\n\n    optimizer.maximize(\n        init_points=init_points,\n        n_iter=max_steps,\n    )\n\n    sol = []\n    for i in range(self.num_dim*num_sensors):\n        sol.append(optimizer.max['params']['x{}'.format(i)])\n    return np.array(sol).reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES","title":"CMA_ES","text":"

Class for optimizing sensor placements using CMA-ES (a genetic algorithm)

Refer to the following paper for more details
  • Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]

Parameters:

Name Type Description Default X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required distance_budget float

Distance budget for when treating the inducing points as waypoints of a path

None num_robots int

Number of robots, used when modeling multi-robot IPP with a distance budget

1 transform Transform

Transform object

None Source code in sgptools/models/cma_es.py
class CMA_ES:\n    \"\"\"Class for optimizing sensor placements using CMA-ES (a genetic algorithm)\n\n    Refer to the following paper for more details:\n        - Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]\n\n    Args:\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        distance_budget (float): Distance budget for when treating the inducing points \n                                 as waypoints of a path\n        num_robots (int): Number of robots, used when modeling \n                          multi-robot IPP with a distance budget\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(self, X_train, noise_variance, kernel,\n                 distance_budget=None,\n                 num_robots=1,\n                 transform=None):\n        self.boundaries = geometry.MultiPoint([[p[0], p[1]] for p in X_train]).convex_hull\n        self.X_train = X_train\n        self.noise_variance = noise_variance\n        self.kernel = kernel\n        self.num_dim = X_train.shape[-1]\n        self.distance_budget = distance_budget\n        self.num_robots = num_robots\n        self.transform = transform\n\n    def update(self, noise_variance, kernel):\n        \"\"\"Update GP noise variance and kernel function parameters\n\n        Args:\n            noise_variance (float): data variance\n            kernel (gpflow.kernels.Kernel): gpflow kernel function\n        \"\"\"\n        self.noise_variance = noise_variance\n        self.kernel = kernel\n\n    def constraint(self, X):\n        \"\"\"Constraint function for the optimization problem (constraint to limit the boundary of the region)\n        Does not work well with CMA-ES as it is a step function and is not continuous\n\n        Args:\n            X (ndarray): (n, d); Current sensor placement locations\n        \"\"\"\n        X = np.array(X).reshape(-1, self.num_dim)\n        lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]\n        lagrangian = np.logical_not(lagrangian).astype(float)\n        return lagrangian\n\n    def distance_constraint(self, X):\n        \"\"\"Constraint function for the optimization problem (constraint to limit the total travel distance)\n        Does not work well with CMA-ES as it is a step function and is not continuous\n\n        Args:\n            X (ndarray): (n, d); Current sensor placement locations\n        \"\"\"\n        X = np.array(X).reshape(self.num_robots, -1, self.num_dim)\n        dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)\n        lagrangian = dists - self.distance_budget\n        lagrangian_mask = np.logical_not(lagrangian <= 0)\n        lagrangian[lagrangian_mask] = 0\n        lagrangian = np.sum(lagrangian)\n        return lagrangian\n\n    def objective(self, X):\n        \"\"\"Objective function (GP-based Mutual Information)\n\n        Args:\n            X (ndarray): (n, d); Initial sensor placement locations\n        \"\"\"\n        # MI does not depend on waypoint order (reshape to -1, num_dim)\n        X = np.array(X).reshape(-1, self.num_dim)\n        if self.transform is not None:\n            X = self.transform.expand(X, \n                                      expand_sensor_model=False).numpy()\n\n        try:\n            mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n        except:\n            mi = 0.0 # if the cholskey decomposition fails\n        return mi\n\n    def optimize(self, \n                 num_sensors=10, \n                 max_steps=5000, \n                 tol=1e-11, \n                 X_init=None):\n        \"\"\"Optimizes the SP objective function using CMA-ES without any constraints\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps\n            tol (float): Convergence tolerance to decide when to stop optimization\n            X_init (ndarray): (m, d); Initial inducing points\n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        sigma0 = 1.0\n\n        if X_init is None:\n            X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n        X_init = X_init.reshape(-1)\n\n        xopt, _ = cma.fmin2(self.objective, X_init, sigma0, \n                            options={'maxfevals': max_steps,\n                                     'verb_disp': 0,\n                                     'tolfun': tol,\n                                     'seed': 1234},\n                            restarts=5)\n\n        xopt = np.array(xopt).reshape(-1, self.num_dim)\n        if self.transform is not None:\n            xopt = self.transform.expand(xopt, \n                                         expand_sensor_model=False).numpy()\n\n        return xopt.reshape(-1, self.num_dim)\n\n    def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n        \"\"\"Optimizes the SP objective function using CMA-ES with a distance budget constraint\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps\n            tol (float): Convergence tolerance to decide when to stop optimization\n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        sigma0 = 1.0\n        idx = np.random.randint(len(self.X_train), size=num_sensors)\n        x_init = self.X_train[idx].reshape(-1)\n        cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)\n        xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                            options={'maxfevals': max_steps,\n                                     'verb_disp': 0,\n                                     'tolfun': tol,\n                                     'seed': 1234},\n                            callback=cfun.update,\n                            restarts=5)\n        return xopt.reshape(-1, self.num_dim)\n\n    def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n        \"\"\"Optimizes the SP objective function using CMA-ES with the constraints\n        to ensure that the sensors are placed within the boundaries of the region\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps\n            tol (float): Convergence tolerance to decide when to stop optimization\n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        sigma0 = 1.0\n        idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)\n        x_init = self.X_train[idx].reshape(-1)\n        cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)\n        xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                            options={'maxfevals': max_steps,\n                                     'verb_disp': 0,\n                                     'tolfun': tol,\n                                     'seed': 1234},\n                            callback=cfun.update,\n                            restarts=5)\n        return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.constraint","title":"constraint(X)","text":"

Constraint function for the optimization problem (constraint to limit the boundary of the region) Does not work well with CMA-ES as it is a step function and is not continuous

Parameters:

Name Type Description Default X ndarray

(n, d); Current sensor placement locations

required Source code in sgptools/models/cma_es.py
def constraint(self, X):\n    \"\"\"Constraint function for the optimization problem (constraint to limit the boundary of the region)\n    Does not work well with CMA-ES as it is a step function and is not continuous\n\n    Args:\n        X (ndarray): (n, d); Current sensor placement locations\n    \"\"\"\n    X = np.array(X).reshape(-1, self.num_dim)\n    lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]\n    lagrangian = np.logical_not(lagrangian).astype(float)\n    return lagrangian\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.coptimize","title":"coptimize(num_sensors=10, max_steps=100, tol=1e-11)","text":"

Optimizes the SP objective function using CMA-ES with the constraints to ensure that the sensors are placed within the boundaries of the region

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

100 tol float

Convergence tolerance to decide when to stop optimization

1e-11

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/cma_es.py
def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n    \"\"\"Optimizes the SP objective function using CMA-ES with the constraints\n    to ensure that the sensors are placed within the boundaries of the region\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps\n        tol (float): Convergence tolerance to decide when to stop optimization\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sigma0 = 1.0\n    idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)\n    x_init = self.X_train[idx].reshape(-1)\n    cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)\n    xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                        options={'maxfevals': max_steps,\n                                 'verb_disp': 0,\n                                 'tolfun': tol,\n                                 'seed': 1234},\n                        callback=cfun.update,\n                        restarts=5)\n    return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.distance_constraint","title":"distance_constraint(X)","text":"

Constraint function for the optimization problem (constraint to limit the total travel distance) Does not work well with CMA-ES as it is a step function and is not continuous

Parameters:

Name Type Description Default X ndarray

(n, d); Current sensor placement locations

required Source code in sgptools/models/cma_es.py
def distance_constraint(self, X):\n    \"\"\"Constraint function for the optimization problem (constraint to limit the total travel distance)\n    Does not work well with CMA-ES as it is a step function and is not continuous\n\n    Args:\n        X (ndarray): (n, d); Current sensor placement locations\n    \"\"\"\n    X = np.array(X).reshape(self.num_robots, -1, self.num_dim)\n    dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)\n    lagrangian = dists - self.distance_budget\n    lagrangian_mask = np.logical_not(lagrangian <= 0)\n    lagrangian[lagrangian_mask] = 0\n    lagrangian = np.sum(lagrangian)\n    return lagrangian\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.doptimize","title":"doptimize(num_sensors=10, max_steps=100, tol=1e-11)","text":"

Optimizes the SP objective function using CMA-ES with a distance budget constraint

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

100 tol float

Convergence tolerance to decide when to stop optimization

1e-11

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/cma_es.py
def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n    \"\"\"Optimizes the SP objective function using CMA-ES with a distance budget constraint\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps\n        tol (float): Convergence tolerance to decide when to stop optimization\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sigma0 = 1.0\n    idx = np.random.randint(len(self.X_train), size=num_sensors)\n    x_init = self.X_train[idx].reshape(-1)\n    cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)\n    xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                        options={'maxfevals': max_steps,\n                                 'verb_disp': 0,\n                                 'tolfun': tol,\n                                 'seed': 1234},\n                        callback=cfun.update,\n                        restarts=5)\n    return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.objective","title":"objective(X)","text":"

Objective function (GP-based Mutual Information)

Parameters:

Name Type Description Default X ndarray

(n, d); Initial sensor placement locations

required Source code in sgptools/models/cma_es.py
def objective(self, X):\n    \"\"\"Objective function (GP-based Mutual Information)\n\n    Args:\n        X (ndarray): (n, d); Initial sensor placement locations\n    \"\"\"\n    # MI does not depend on waypoint order (reshape to -1, num_dim)\n    X = np.array(X).reshape(-1, self.num_dim)\n    if self.transform is not None:\n        X = self.transform.expand(X, \n                                  expand_sensor_model=False).numpy()\n\n    try:\n        mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n    except:\n        mi = 0.0 # if the cholskey decomposition fails\n    return mi\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.optimize","title":"optimize(num_sensors=10, max_steps=5000, tol=1e-11, X_init=None)","text":"

Optimizes the SP objective function using CMA-ES without any constraints

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

5000 tol float

Convergence tolerance to decide when to stop optimization

1e-11 X_init ndarray

(m, d); Initial inducing points

None

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/cma_es.py
def optimize(self, \n             num_sensors=10, \n             max_steps=5000, \n             tol=1e-11, \n             X_init=None):\n    \"\"\"Optimizes the SP objective function using CMA-ES without any constraints\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps\n        tol (float): Convergence tolerance to decide when to stop optimization\n        X_init (ndarray): (m, d); Initial inducing points\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sigma0 = 1.0\n\n    if X_init is None:\n        X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n    X_init = X_init.reshape(-1)\n\n    xopt, _ = cma.fmin2(self.objective, X_init, sigma0, \n                        options={'maxfevals': max_steps,\n                                 'verb_disp': 0,\n                                 'tolfun': tol,\n                                 'seed': 1234},\n                        restarts=5)\n\n    xopt = np.array(xopt).reshape(-1, self.num_dim)\n    if self.transform is not None:\n        xopt = self.transform.expand(xopt, \n                                     expand_sensor_model=False).numpy()\n\n    return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.update","title":"update(noise_variance, kernel)","text":"

Update GP noise variance and kernel function parameters

Parameters:

Name Type Description Default noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required Source code in sgptools/models/cma_es.py
def update(self, noise_variance, kernel):\n    \"\"\"Update GP noise variance and kernel function parameters\n\n    Args:\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n    \"\"\"\n    self.noise_variance = noise_variance\n    self.kernel = kernel\n
"},{"location":"API-reference.html#________________________","title":"________________________","text":"

Core modules in this package:

  • augmented_gpr: Provides a Gaussian process model with expand and aggregate functions
  • augmented_sgpr: Provides a sparse Gaussian process model with update, expand, and aggregate functions
  • osgpr: Provides a streaming sparse Gaussian process model along with initialization function
  • transformations: Provides transforms to model complex sensor field of views and handle informative path planning

Provides a Gaussian process model with expand and aggregate functions

Provides a sparse Gaussian process model with update, expand, and aggregate functions

Provides a streaming sparse Gaussian process model along with initialization function

Provides transforms to model complex sensor field of views and handle informative path planning

"},{"location":"API-reference.html#sgptools.models.core.augmented_gpr.AugmentedGPR","title":"AugmentedGPR","text":"

Bases: GPR

GPR model from the GPFlow library augmented to use a transform object's expand and aggregate functions on the data points where necessary.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

required kernel Kernel

gpflow kernel function

required noise_variance float

data variance

required transform Transform

Transform object

required Source code in sgptools/models/core/augmented_gpr.py
class AugmentedGPR(GPR):\n    \"\"\"GPR model from the GPFlow library augmented to use a transform object's\n    expand and aggregate functions on the data points where necessary.  \n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]\n\n    Args:\n        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        noise_variance (float): data variance\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(\n        self,\n        *args,\n        transform,\n        **kwargs\n    ):\n        super().__init__(\n            *args,\n            **kwargs\n        )\n        if transform is None:\n            self.transform = Transform()\n        else:\n            self.transform = transform\n\n    def predict_f(\n        self, Xnew: InputData, \n        full_cov: bool = True, \n        full_output_cov: bool = False,\n        aggregate_train: bool = False,\n    ) -> MeanAndVariance:\n        assert_params_false(self.predict_f, full_output_cov=full_output_cov)\n        if self.transform is not None:\n            Xnew = self.transform.expand(Xnew)\n\n        X, Y = self.data\n        err = Y - self.mean_function(X)\n\n        kmm = self.kernel(X)\n        knn = self.kernel(Xnew, full_cov=full_cov)\n        kmn = self.kernel(X, Xnew)\n        kmm_plus_s = add_likelihood_noise_cov(kmm, self.likelihood, X)\n\n        if self.transform is not None:\n            kmn = self.transform.aggregate(tf.transpose(kmn))\n            kmn = tf.transpose(kmn)\n            knn = self.transform.aggregate(knn)\n\n        if aggregate_train:\n            kmm_plus_s = self.transform.aggregate(kmm_plus_s)\n            err = self.transform.aggregate(err)\n            # reduce kmn only if it was not reduced before\n            # which can when train and test data are the same size\n            if kmn.shape[0] != kmn.shape[1]:\n                kmn = self.transform.aggregate(kmn)\n\n        conditional = gpflow.conditionals.base_conditional\n        f_mean_zero, f_var = conditional(\n            kmn, kmm_plus_s, knn, err, full_cov=full_cov, white=False\n        )  # [N, P], [N, P] or [P, N, N]\n        f_mean = f_mean_zero + self.mean_function(Xnew)\n        return f_mean, f_var\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR","title":"AugmentedSGPR","text":"

Bases: SGPR

SGPR model from the GPFlow library augmented to use a transform object's expand and aggregate functions on the inducing points where necessary. The object has an additional update function to update the kernel and noise variance parameters (currently, the online updates part works only with RBF kernels).

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

required kernel Kernel

gpflow kernel function

required noise_variance float

data variance

required inducing_variable ndarray

(m, d); Initial inducing points

required transform Transform

Transform object

required inducing_variable_time ndarray

(m, d); Temporal dimensions of the inducing points, used when modeling spatio-temporal IPP

None Source code in sgptools/models/core/augmented_sgpr.py
class AugmentedSGPR(SGPR):\n    \"\"\"SGPR model from the GPFlow library augmented to use a transform object's\n    expand and aggregate functions on the inducing points where necessary. The object\n    has an additional update function to update the kernel and noise variance parameters \n    (currently, the online updates part works only with RBF kernels).  \n\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]\n\n    Args:\n        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        noise_variance (float): data variance\n        inducing_variable (ndarray): (m, d); Initial inducing points\n        transform (Transform): Transform object\n        inducing_variable_time (ndarray): (m, d); Temporal dimensions of the inducing points, \n                                            used when modeling spatio-temporal IPP\n    \"\"\"\n    def __init__(\n        self,\n        *args,\n        transform,\n        inducing_variable_time=None,\n        **kwargs\n    ):\n        super().__init__(\n            *args,\n            **kwargs\n        )\n        if transform is None:\n            self.transform = Transform()\n        else:\n            self.transform = transform\n\n        if inducing_variable_time is not None:\n            self.inducing_variable_time = inducingpoint_wrapper(inducing_variable_time)\n            self.transform.inducing_variable_time = self.inducing_variable_time\n        else:\n            self.inducing_variable_time = None\n\n    def update(self, noise_variance, kernel):\n        \"\"\"Update SGP noise variance and kernel function parameters\n\n        Args:\n            noise_variance (float): data variance\n            kernel (gpflow.kernels.Kernel): gpflow kernel function\n        \"\"\"\n        self.likelihood.variance.assign(noise_variance)\n        self.kernel.lengthscales.assign(kernel.lengthscales)\n        self.kernel.variance.assign(kernel.variance)\n\n    def _common_calculation(self) -> \"SGPR.CommonTensors\":\n        \"\"\"\n        Matrices used in log-det calculation\n        :return: A , B, LB, AAT with :math:`LL\u1d40 = K\u1d64\u1d64 , A = L\u207b\u00b9K_{uf}/\u03c3, AAT = AA\u1d40,\n            B = AAT+I, LBLB\u1d40 = B`\n            A is M x N, B is M x M, LB is M x M, AAT is M x M\n        \"\"\"\n        x, _ = self.data\n\n        iv = self.inducing_variable.Z  # [M]\n        iv = self.transform.expand(iv)\n\n        kuf = self.kernel(iv, x)\n        kuf = self.transform.aggregate(kuf)\n\n        kuu = self.kernel(iv) + 1e-6 * tf.eye(tf.shape(iv)[0], dtype=iv.dtype)\n        kuu = self.transform.aggregate(kuu)\n\n        L = tf.linalg.cholesky(kuu)\n\n        sigma_sq = self.likelihood.variance\n        sigma = tf.sqrt(sigma_sq)\n\n        # Compute intermediate matrices\n        A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma\n        AAT = tf.linalg.matmul(A, A, transpose_b=True)\n        B = add_noise_cov(AAT, tf.cast(1.0, AAT.dtype))\n        LB = tf.linalg.cholesky(B)\n\n        return self.CommonTensors(sigma_sq, sigma, A, B, LB, AAT, L)\n\n    def elbo(self) -> tf.Tensor:\n        \"\"\"\n        Construct a tensorflow function to compute the bound on the marginal\n        likelihood. For a derivation of the terms in here, see the associated\n        SGPR notebook.\n        \"\"\"\n        common = self._common_calculation()\n        output_shape = tf.shape(self.data[-1])\n        num_data = to_default_float(output_shape[0])\n        output_dim = to_default_float(output_shape[1])\n        const = -0.5 * num_data * output_dim * np.log(2 * np.pi)\n        logdet = self.logdet_term(common)\n        quad = self.quad_term(common)\n        constraints = self.transform.constraints(self.inducing_variable.Z)\n        return const + logdet + quad + constraints\n\n    def predict_f(\n        self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n    ) -> MeanAndVariance:\n\n        # could copy into posterior into a fused version\n        \"\"\"\n        Compute the mean and variance of the latent function at some new points\n        Xnew. For a derivation of the terms in here, see the associated SGPR\n        notebook.\n        \"\"\"\n        X_data, Y_data = self.data\n\n        iv = self.inducing_variable.Z\n        iv = self.transform.expand(iv)\n\n        num_inducing = tf.shape(iv)[0]\n\n        err = Y_data - self.mean_function(X_data)\n        kuf = self.kernel(iv, X_data)\n        kuu = self.kernel(iv) + 1e-6 * tf.eye(num_inducing, dtype=iv.dtype)\n        Kus = self.kernel(iv, Xnew)\n        sigma = tf.sqrt(self.likelihood.variance)\n        L = tf.linalg.cholesky(kuu)\n        A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma\n        B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(\n            num_inducing, dtype=default_float()\n        )  # cache qinv\n        LB = tf.linalg.cholesky(B)\n        Aerr = tf.linalg.matmul(A, err)\n        c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma\n        tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)\n        tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)\n        mean = tf.linalg.matmul(tmp2, c, transpose_a=True)\n        if full_cov:\n            var = (\n                self.kernel(Xnew)\n                + tf.linalg.matmul(tmp2, tmp2, transpose_a=True)\n                - tf.linalg.matmul(tmp1, tmp1, transpose_a=True)\n            )\n            var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1])  # [P, N, N]\n        else:\n            var = (\n                self.kernel(Xnew, full_cov=False)\n                + tf.reduce_sum(tf.square(tmp2), 0)\n                - tf.reduce_sum(tf.square(tmp1), 0)\n            )\n            var = tf.tile(var[:, None], [1, self.num_latent_gps])\n\n        return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR.elbo","title":"elbo()","text":"

Construct a tensorflow function to compute the bound on the marginal likelihood. For a derivation of the terms in here, see the associated SGPR notebook.

Source code in sgptools/models/core/augmented_sgpr.py
def elbo(self) -> tf.Tensor:\n    \"\"\"\n    Construct a tensorflow function to compute the bound on the marginal\n    likelihood. For a derivation of the terms in here, see the associated\n    SGPR notebook.\n    \"\"\"\n    common = self._common_calculation()\n    output_shape = tf.shape(self.data[-1])\n    num_data = to_default_float(output_shape[0])\n    output_dim = to_default_float(output_shape[1])\n    const = -0.5 * num_data * output_dim * np.log(2 * np.pi)\n    logdet = self.logdet_term(common)\n    quad = self.quad_term(common)\n    constraints = self.transform.constraints(self.inducing_variable.Z)\n    return const + logdet + quad + constraints\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR.predict_f","title":"predict_f(Xnew, full_cov=False, full_output_cov=False)","text":"

Compute the mean and variance of the latent function at some new points Xnew. For a derivation of the terms in here, see the associated SGPR notebook.

Source code in sgptools/models/core/augmented_sgpr.py
def predict_f(\n    self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n) -> MeanAndVariance:\n\n    # could copy into posterior into a fused version\n    \"\"\"\n    Compute the mean and variance of the latent function at some new points\n    Xnew. For a derivation of the terms in here, see the associated SGPR\n    notebook.\n    \"\"\"\n    X_data, Y_data = self.data\n\n    iv = self.inducing_variable.Z\n    iv = self.transform.expand(iv)\n\n    num_inducing = tf.shape(iv)[0]\n\n    err = Y_data - self.mean_function(X_data)\n    kuf = self.kernel(iv, X_data)\n    kuu = self.kernel(iv) + 1e-6 * tf.eye(num_inducing, dtype=iv.dtype)\n    Kus = self.kernel(iv, Xnew)\n    sigma = tf.sqrt(self.likelihood.variance)\n    L = tf.linalg.cholesky(kuu)\n    A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma\n    B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(\n        num_inducing, dtype=default_float()\n    )  # cache qinv\n    LB = tf.linalg.cholesky(B)\n    Aerr = tf.linalg.matmul(A, err)\n    c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma\n    tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)\n    tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)\n    mean = tf.linalg.matmul(tmp2, c, transpose_a=True)\n    if full_cov:\n        var = (\n            self.kernel(Xnew)\n            + tf.linalg.matmul(tmp2, tmp2, transpose_a=True)\n            - tf.linalg.matmul(tmp1, tmp1, transpose_a=True)\n        )\n        var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1])  # [P, N, N]\n    else:\n        var = (\n            self.kernel(Xnew, full_cov=False)\n            + tf.reduce_sum(tf.square(tmp2), 0)\n            - tf.reduce_sum(tf.square(tmp1), 0)\n        )\n        var = tf.tile(var[:, None], [1, self.num_latent_gps])\n\n    return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR.update","title":"update(noise_variance, kernel)","text":"

Update SGP noise variance and kernel function parameters

Parameters:

Name Type Description Default noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required Source code in sgptools/models/core/augmented_sgpr.py
def update(self, noise_variance, kernel):\n    \"\"\"Update SGP noise variance and kernel function parameters\n\n    Args:\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n    \"\"\"\n    self.likelihood.variance.assign(noise_variance)\n    self.kernel.lengthscales.assign(kernel.lengthscales)\n    self.kernel.variance.assign(kernel.variance)\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE","title":"OSGPR_VFE","text":"

Bases: GPModel, InternalDataTrainingLossMixin

Online Sparse Variational GP regression model from streaming_sparse_gp

Refer to the following paper for more details
  • Streaming Gaussian process approximations [Bui et al., 2017]

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

required kernel Kernel

gpflow kernel function

required mu_old ndarray

mean of old q(u); here u are the latents corresponding to the inducing points Z_old

required Su_old ndarray

posterior covariance of old q(u)

required Kaa_old ndarray

prior covariance of old q(u)

required Z_old ndarray

(m_old, d): Old initial inducing points

required Z ndarray

(m_new, d): New initial inducing points

required mean_function function

GP mean function

None Source code in sgptools/models/core/osgpr.py
class OSGPR_VFE(GPModel, InternalDataTrainingLossMixin):\n    \"\"\"Online Sparse Variational GP regression model from [streaming_sparse_gp](https://github.com/thangbui/streaming_sparse_gp/tree/master)\n\n    Refer to the following paper for more details:\n        - Streaming Gaussian process approximations [Bui et al., 2017]\n\n    Args:\n        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        mu_old (ndarray): mean of old `q(u)`; here `u` are the latents corresponding to the inducing points `Z_old`\n        Su_old (ndarray): posterior covariance of old `q(u)`\n        Kaa_old (ndarray): prior covariance of old `q(u)`\n        Z_old (ndarray): (m_old, d): Old initial inducing points\n        Z (ndarray): (m_new, d): New initial inducing points\n        mean_function (function): GP mean function\n    \"\"\"\n    def __init__(self, data, kernel, mu_old, Su_old, Kaa_old, Z_old, Z, mean_function=None):\n        self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)\n        likelihood = gpflow.likelihoods.Gaussian()\n        num_latent_gps = GPModel.calc_num_latent_gps_from_data(data, kernel, likelihood)\n        super().__init__(kernel, likelihood, mean_function, num_latent_gps)\n\n        self.inducing_variable = InducingPoints(Z)\n        self.num_data = self.X.shape[0]\n\n        self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)\n        self.M_old = Z_old.shape[0]\n        self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)\n        self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)\n        self.Z_old = tf.Variable(Z_old, shape=tf.TensorShape(None), trainable=False)\n\n    def update(self, data):\n        \"\"\"Configure the OSGPR to adapt to a new batch of data. \n        Note: The OSGPR needs to be trained using gradient-based approaches after update.\n\n        Args:\n            data (tuple): (X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)\n        \"\"\"\n        self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)\n        self.num_data = self.X.shape[0]\n\n        self.Z_old = tf.Variable(self.inducing_variable.Z.numpy(), \n                                 shape=tf.TensorShape(None), \n                                 trainable=False)\n\n        # Get posterior mean and covariance for the old inducing points\n        mu_old, Su_old = self.predict_f(self.Z_old, full_cov=True)\n        self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)\n        self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)\n\n        # Get the prior covariance matrix for the old inducing points\n        Kaa_old = self.kernel(self.Z_old)\n        self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)\n\n    def _common_terms(self):\n        Mb = self.inducing_variable.num_inducing\n        Ma = self.M_old\n        # jitter = gpflow.default_jitter()\n        jitter = gpflow.utilities.to_default_float(1e-4)\n        sigma2 = self.likelihood.variance\n        sigma = tf.sqrt(sigma2)\n\n        Saa = self.Su_old\n        ma = self.mu_old\n\n        # a is old inducing points, b is new\n        # f is training points\n        # s is test points\n        Kbf = covariances.Kuf(self.inducing_variable, self.kernel, self.X)\n        Kbb = covariances.Kuu(self.inducing_variable, self.kernel, jitter=jitter)\n        Kba = covariances.Kuf(self.inducing_variable, self.kernel, self.Z_old)\n        Kaa_cur = gpflow.utilities.add_noise_cov(self.kernel(self.Z_old), jitter)\n        Kaa = gpflow.utilities.add_noise_cov(self.Kaa_old, jitter)\n\n        err = self.Y - self.mean_function(self.X)\n\n        Sainv_ma = tf.linalg.solve(Saa, ma)\n        Sinv_y = self.Y / sigma2\n        c1 = tf.matmul(Kbf, Sinv_y)\n        c2 = tf.matmul(Kba, Sainv_ma)\n        c = c1 + c2\n\n        Lb = tf.linalg.cholesky(Kbb)\n        Lbinv_c = tf.linalg.triangular_solve(Lb, c, lower=True)\n        Lbinv_Kba = tf.linalg.triangular_solve(Lb, Kba, lower=True)\n        Lbinv_Kbf = tf.linalg.triangular_solve(Lb, Kbf, lower=True) / sigma\n        d1 = tf.matmul(Lbinv_Kbf, Lbinv_Kbf, transpose_b=True)\n\n        LSa = tf.linalg.cholesky(Saa)\n        Kab_Lbinv = tf.linalg.matrix_transpose(Lbinv_Kba)\n        LSainv_Kab_Lbinv = tf.linalg.triangular_solve(\n            LSa, Kab_Lbinv, lower=True)\n        d2 = tf.matmul(LSainv_Kab_Lbinv, LSainv_Kab_Lbinv, transpose_a=True)\n\n        La = tf.linalg.cholesky(Kaa)\n        Lainv_Kab_Lbinv = tf.linalg.triangular_solve(\n            La, Kab_Lbinv, lower=True)\n        d3 = tf.matmul(Lainv_Kab_Lbinv, Lainv_Kab_Lbinv, transpose_a=True)\n\n        D = tf.eye(Mb, dtype=gpflow.default_float()) + d1 + d2 - d3\n        D = gpflow.utilities.add_noise_cov(D, jitter)\n        LD = tf.linalg.cholesky(D)\n\n        LDinv_Lbinv_c = tf.linalg.triangular_solve(LD, Lbinv_c, lower=True)\n\n        return (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n                Lbinv_Kba, LDinv_Lbinv_c, err, d1)\n\n    def maximum_log_likelihood_objective(self):\n        \"\"\"\n        Construct a tensorflow function to compute the bound on the marginal\n        likelihood. \n        \"\"\"\n\n        Mb = self.inducing_variable.num_inducing\n        Ma = self.M_old\n        jitter = gpflow.default_jitter()\n        # jitter = gpflow.utilities.to_default_float(1e-4)\n        sigma2 = self.likelihood.variance\n        sigma = tf.sqrt(sigma2)\n        N = self.num_data\n\n        Saa = self.Su_old\n        ma = self.mu_old\n\n        # a is old inducing points, b is new\n        # f is training points\n        Kfdiag = self.kernel(self.X, full_cov=False)\n        (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n            Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n        LSa = tf.linalg.cholesky(Saa)\n        Lainv_ma = tf.linalg.triangular_solve(LSa, ma, lower=True)\n\n        # constant term\n        bound = -0.5 * N * np.log(2 * np.pi)\n        # quadratic term\n        bound += -0.5 * tf.reduce_sum(tf.square(err)) / sigma2\n        # bound += -0.5 * tf.reduce_sum(ma * Sainv_ma)\n        bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma))\n        bound += 0.5 * tf.reduce_sum(tf.square(LDinv_Lbinv_c))\n        # log det term\n        bound += -0.5 * N * tf.reduce_sum(tf.math.log(sigma2))\n        bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LD)))\n\n        # delta 1: trace term\n        bound += -0.5 * tf.reduce_sum(Kfdiag) / sigma2\n        bound += 0.5 * tf.reduce_sum(tf.linalg.diag_part(Qff))\n\n        # delta 2: a and b difference\n        bound += tf.reduce_sum(tf.math.log(tf.linalg.diag_part(La)))\n        bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LSa)))\n\n        Kaadiff = Kaa_cur - tf.matmul(Lbinv_Kba, Lbinv_Kba, transpose_a=True)\n        Sainv_Kaadiff = tf.linalg.solve(Saa, Kaadiff)\n        Kainv_Kaadiff = tf.linalg.solve(Kaa, Kaadiff)\n\n        bound += -0.5 * tf.reduce_sum(\n            tf.linalg.diag_part(Sainv_Kaadiff) - tf.linalg.diag_part(Kainv_Kaadiff))\n\n        return bound\n\n    def predict_f(self, Xnew, full_cov=False):\n        \"\"\"\n        Compute the mean and variance of the latent function at some new points\n        Xnew. \n        \"\"\"\n\n        # jitter = gpflow.default_jitter()\n        jitter = gpflow.utilities.to_default_float(1e-4)\n\n        # a is old inducing points, b is new\n        # f is training points\n        # s is test points\n        Kbs = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)\n        (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n            Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n        Lbinv_Kbs = tf.linalg.triangular_solve(Lb, Kbs, lower=True)\n        LDinv_Lbinv_Kbs = tf.linalg.triangular_solve(LD, Lbinv_Kbs, lower=True)\n        mean = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_c, transpose_a=True)\n\n        if full_cov:\n            Kss = self.kernel(Xnew) + jitter * tf.eye(tf.shape(Xnew)[0], dtype=gpflow.default_float())\n            var1 = Kss\n            var2 = - tf.matmul(Lbinv_Kbs, Lbinv_Kbs, transpose_a=True)\n            var3 = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_Kbs, transpose_a=True)\n            var = var1 + var2 + var3\n        else:\n            var1 = self.kernel(Xnew, full_cov=False)\n            var2 = -tf.reduce_sum(tf.square(Lbinv_Kbs), axis=0)\n            var3 = tf.reduce_sum(tf.square(LDinv_Lbinv_Kbs), axis=0)\n            var = var1 + var2 + var3\n\n        return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE.maximum_log_likelihood_objective","title":"maximum_log_likelihood_objective()","text":"

Construct a tensorflow function to compute the bound on the marginal likelihood.

Source code in sgptools/models/core/osgpr.py
def maximum_log_likelihood_objective(self):\n    \"\"\"\n    Construct a tensorflow function to compute the bound on the marginal\n    likelihood. \n    \"\"\"\n\n    Mb = self.inducing_variable.num_inducing\n    Ma = self.M_old\n    jitter = gpflow.default_jitter()\n    # jitter = gpflow.utilities.to_default_float(1e-4)\n    sigma2 = self.likelihood.variance\n    sigma = tf.sqrt(sigma2)\n    N = self.num_data\n\n    Saa = self.Su_old\n    ma = self.mu_old\n\n    # a is old inducing points, b is new\n    # f is training points\n    Kfdiag = self.kernel(self.X, full_cov=False)\n    (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n        Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n    LSa = tf.linalg.cholesky(Saa)\n    Lainv_ma = tf.linalg.triangular_solve(LSa, ma, lower=True)\n\n    # constant term\n    bound = -0.5 * N * np.log(2 * np.pi)\n    # quadratic term\n    bound += -0.5 * tf.reduce_sum(tf.square(err)) / sigma2\n    # bound += -0.5 * tf.reduce_sum(ma * Sainv_ma)\n    bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma))\n    bound += 0.5 * tf.reduce_sum(tf.square(LDinv_Lbinv_c))\n    # log det term\n    bound += -0.5 * N * tf.reduce_sum(tf.math.log(sigma2))\n    bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LD)))\n\n    # delta 1: trace term\n    bound += -0.5 * tf.reduce_sum(Kfdiag) / sigma2\n    bound += 0.5 * tf.reduce_sum(tf.linalg.diag_part(Qff))\n\n    # delta 2: a and b difference\n    bound += tf.reduce_sum(tf.math.log(tf.linalg.diag_part(La)))\n    bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LSa)))\n\n    Kaadiff = Kaa_cur - tf.matmul(Lbinv_Kba, Lbinv_Kba, transpose_a=True)\n    Sainv_Kaadiff = tf.linalg.solve(Saa, Kaadiff)\n    Kainv_Kaadiff = tf.linalg.solve(Kaa, Kaadiff)\n\n    bound += -0.5 * tf.reduce_sum(\n        tf.linalg.diag_part(Sainv_Kaadiff) - tf.linalg.diag_part(Kainv_Kaadiff))\n\n    return bound\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE.predict_f","title":"predict_f(Xnew, full_cov=False)","text":"

Compute the mean and variance of the latent function at some new points Xnew.

Source code in sgptools/models/core/osgpr.py
def predict_f(self, Xnew, full_cov=False):\n    \"\"\"\n    Compute the mean and variance of the latent function at some new points\n    Xnew. \n    \"\"\"\n\n    # jitter = gpflow.default_jitter()\n    jitter = gpflow.utilities.to_default_float(1e-4)\n\n    # a is old inducing points, b is new\n    # f is training points\n    # s is test points\n    Kbs = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)\n    (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n        Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n    Lbinv_Kbs = tf.linalg.triangular_solve(Lb, Kbs, lower=True)\n    LDinv_Lbinv_Kbs = tf.linalg.triangular_solve(LD, Lbinv_Kbs, lower=True)\n    mean = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_c, transpose_a=True)\n\n    if full_cov:\n        Kss = self.kernel(Xnew) + jitter * tf.eye(tf.shape(Xnew)[0], dtype=gpflow.default_float())\n        var1 = Kss\n        var2 = - tf.matmul(Lbinv_Kbs, Lbinv_Kbs, transpose_a=True)\n        var3 = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_Kbs, transpose_a=True)\n        var = var1 + var2 + var3\n    else:\n        var1 = self.kernel(Xnew, full_cov=False)\n        var2 = -tf.reduce_sum(tf.square(Lbinv_Kbs), axis=0)\n        var3 = tf.reduce_sum(tf.square(LDinv_Lbinv_Kbs), axis=0)\n        var = var1 + var2 + var3\n\n    return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE.update","title":"update(data)","text":"

Configure the OSGPR to adapt to a new batch of data. Note: The OSGPR needs to be trained using gradient-based approaches after update.

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)

required Source code in sgptools/models/core/osgpr.py
def update(self, data):\n    \"\"\"Configure the OSGPR to adapt to a new batch of data. \n    Note: The OSGPR needs to be trained using gradient-based approaches after update.\n\n    Args:\n        data (tuple): (X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)\n    \"\"\"\n    self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)\n    self.num_data = self.X.shape[0]\n\n    self.Z_old = tf.Variable(self.inducing_variable.Z.numpy(), \n                             shape=tf.TensorShape(None), \n                             trainable=False)\n\n    # Get posterior mean and covariance for the old inducing points\n    mu_old, Su_old = self.predict_f(self.Z_old, full_cov=True)\n    self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)\n    self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)\n\n    # Get the prior covariance matrix for the old inducing points\n    Kaa_old = self.kernel(self.Z_old)\n    self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.init_osgpr","title":"init_osgpr(X_train, num_inducing=10, lengthscales=1.0, variance=1.0, noise_variance=0.001)","text":"

Initialize a VFE OSGPR model with an RBF kernel with unit variance and lengthcales, and 0.001 noise variance. Used in the Online Continuous SGP approach.

Parameters:

Name Type Description Default X_train ndarray

(n, d); Unlabeled random sampled training points. They only effect the initial inducing point locations, i.e., limits them to the bounds of the data

required num_inducing int

Number of inducing points

10 lengthscales ndarray or list

Kernel lengthscale of each dimension of the data

1.0 variance float

Kernel variance

1.0 noise_variance float

Data variance

0.001

Returns:

Name Type Description online_param OSGPR_VFE

Initialized online sparse Gaussian process model

Source code in sgptools/models/core/osgpr.py
def init_osgpr(X_train, \n               num_inducing=10, \n               lengthscales=1.0, \n               variance=1.0,\n               noise_variance=0.001):\n    \"\"\"Initialize a VFE OSGPR model with an RBF kernel with \n    unit variance and lengthcales, and 0.001 noise variance.\n    Used in the Online Continuous SGP approach. \n\n    Args:\n        X_train (ndarray): (n, d); Unlabeled random sampled training points. \n                        They only effect the initial inducing point locations, \n                        i.e., limits them to the bounds of the data\n        num_inducing (int): Number of inducing points\n        lengthscales (ndarray or list): Kernel lengthscale of each dimension of the data\n        variance (float): Kernel variance\n        noise_variance (float): Data variance\n\n    Returns:\n        online_param (OSGPR_VFE): Initialized online sparse Gaussian process model\n    \"\"\"\n\n    y_train = np.zeros((len(X_train), 1), dtype=X_train.dtype)\n    Z_init = get_inducing_pts(X_train, num_inducing)\n    init_param = gpflow.models.SGPR((X_train, y_train),\n                                    gpflow.kernels.RBF(variance=variance, \n                                                       lengthscales=lengthscales), \n                                    inducing_variable=Z_init, \n                                    noise_variance=noise_variance)\n\n    # Initialize the OSGPR model using the parameters from the SGPR model\n    # The X_train and y_train here will be overwritten in the online phase \n    X_train = np.array([[0, 0], [0, 0]])\n    y_train = np.array([0, 0]).reshape(-1, 1)\n    Zopt = init_param.inducing_variable.Z.numpy()\n    mu, Su = init_param.predict_f(Zopt, full_cov=True)\n    Kaa = init_param.kernel(Zopt)\n    online_param = OSGPR_VFE((X_train[:2], y_train[:2]),\n                             init_param.kernel,\n                             mu, Su[0], Kaa,\n                             Zopt, Zopt)\n    online_param.likelihood.variance.assign(init_param.likelihood.variance)\n\n    return online_param\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform","title":"IPPTransform","text":"

Bases: Transform

Transform to model IPP problems

Usage details
  • For point sensing, set sampling_rate = 2
  • For continuous sensing, set sampling_rate > 2 (approx the data collected along the path)
  • For multi-robot case, set num_robots > 1
  • For onlineIPP use update_fixed to freeze the visited waypoints

Parameters:

Name Type Description Default sampling_rate int

Number of points to sample between each pair of inducing points

2 distance_budget float

Distance budget for the path

None num_robots int

Number of robots

1 Xu_fixed ndarray

(num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized

None num_dim int

Dimension of the data collection environment

2 sensor_model Transform

Transform object to expand each inducing point to p points approximating each sensor's FoV

None Source code in sgptools/models/core/transformations.py
class IPPTransform(Transform):\n    \"\"\"Transform to model IPP problems\n\n    Usage details: \n        * For point sensing, set `sampling_rate = 2`\n        * For continuous sensing, set `sampling_rate > 2` (approx the data collected along the path)\n        * For multi-robot case, set `num_robots > 1`\n        * For onlineIPP use `update_fixed` to freeze the visited waypoints\n\n    Args:\n        sampling_rate (int): Number of points to sample between each pair of inducing points\n        distance_budget (float): Distance budget for the path\n        num_robots (int): Number of robots\n        Xu_fixed (ndarray): (num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized\n        num_dim (int): Dimension of the data collection environment\n        sensor_model (Transform): Transform object to expand each inducing point to `p` points \n                                  approximating each sensor's FoV\n    \"\"\"\n    def __init__(self, \n                 sampling_rate=2, \n                 distance_budget=None, \n                 num_robots=1,\n                 Xu_fixed=None,\n                 num_dim=2,\n                 sensor_model=None,\n                 **kwargs):\n        super().__init__(**kwargs)\n        if sampling_rate < 2:\n            raise ValueError('Sampling rate must be greater than 2.')\n\n        self.sampling_rate = sampling_rate\n        self.distance_budget = distance_budget\n        self.num_robots = num_robots\n        self.num_dim = num_dim\n        self.sensor_model = sensor_model\n\n        # Disable aggregation if aggregation size was explicitly set to 0\n        if self.aggregation_size == 0:\n            self.aggregation_size = None\n        # Set aggregation size to sampling rate if aggregation size was not set\n        # and sampling rate is enabled (greater than 2)\n        elif self.aggregation_size is None and sampling_rate > 2:\n            self.aggregation_size = sampling_rate\n\n        # Initilize variable to store visited waypoints for onlineIPP\n        if Xu_fixed is not None:\n            self.Xu_fixed = tf.Variable(Xu_fixed, \n                                        shape=tf.TensorShape(None), \n                                        trainable=False)\n        else:\n            self.Xu_fixed = None\n\n    def update_Xu_fixed(self, Xu_fixed):\n        \"\"\"Function to update the visited waypoints\n\n        Args:\n            Xu_fixed (ndarray): numpy array (num_robots, num_visited_waypoints, num_dim)\n        \"\"\"\n        self.num_fixed = Xu_fixed.shape[1]\n        if self.Xu_fixed is not None:\n            self.Xu_fixed.assign(Xu_fixed)\n        else:\n            # ToDo: Use binary mask of fixed size to avoid retracing\n            self.Xu_fixed = tf.Variable(Xu_fixed, \n                                        shape=tf.TensorShape(None), \n                                        trainable=False)\n\n    def expand(self, Xu, expand_sensor_model=True):\n        \"\"\"Sample points between each pair of inducing points to form the path\n\n        Args:\n            Xu (ndarray): (num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space\n            expand_sensor_model (bool): Only add the fixed inducing points without other sensor/path transforms, \n                                        used for online IPP\n\n        Returns:\n            Xu (ndarray): Expansion transformed inducing points\n        \"\"\"\n        # If using single-robot offline IPP with point sensing, return inducing points as is.\n        if self.sampling_rate == 2 and self.Xu_fixed is None and self.sensor_model is None:\n            return Xu\n\n        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n        # If using online IPP, add visited waypoints that won't be optimized anymore\n        if self.Xu_fixed is not None:\n            Xu = tf.concat([self.Xu_fixed, Xu[:, self.num_fixed:]], axis=1)\n\n        if not expand_sensor_model:\n            return tf.reshape(Xu, (-1, self.num_dim))\n\n        # Interpolate additional inducing points between waypoints to approximate \n        # the continuous data sensing model\n        if self.sampling_rate > 2:\n            Xu = tf.linspace(Xu[:, :-1], Xu[:, 1:], self.sampling_rate)\n            Xu = tf.transpose(Xu, perm=[1, 2, 0, 3])\n            Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n        if self.sensor_model is not None:\n            Xu = self.sensor_model.expand(Xu)\n            return Xu\n\n        Xu = tf.reshape(Xu, (-1, self.num_dim))\n        return Xu\n\n    def aggregate(self, k):\n        \"\"\"Applies the aggregation transform to kernel matrices. Checks `sensor_model` \n           and uses the appropriate aggregation transform. \n\n        Args:\n            k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                        `m` is the number of inducing points,\n                        `p` is the number of points each inducing point is mapped,\n                        `n` is the number of training data points.\n\n        Returns:\n            k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n        \"\"\"\n        if self.sensor_model is not None:\n            return self.sensor_model.aggregate(k)\n        else:\n            return super().aggregate(k)\n\n    def constraints(self, Xu):\n        \"\"\"Computes the distance constraint term that is added to the SGP's optimization function.\n        Each robot can be assigned a different distance budget.\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the distance constraints\n\n        Returns:\n            loss (float): distance constraint term\n        \"\"\"\n        if self.distance_budget is None:\n            return 0.\n        else:\n            Xu = self.expand(Xu, expand_sensor_model=False)\n            dist = self.distance(Xu)-self.distance_budget\n            dist = tf.reduce_sum(tf.nn.relu(dist))\n            loss = -dist*self.constraint_weight\n            return loss\n\n    def distance(self, Xu):\n        \"\"\"Computes the distance incured by sequentially visiting the inducing points\n        ToDo: Change distance from 2d to nd. Currently limited to 2d \n            to ensure the rotation angle is not included when using\n            a square FoV sensor.\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the path lengths\n\n        Returns:\n            dist (float): path lengths\n        \"\"\"\n        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n        dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)\n        dist = tf.reduce_sum(dist, axis=1)\n        return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.aggregate","title":"aggregate(k)","text":"

Applies the aggregation transform to kernel matrices. Checks sensor_model and uses the appropriate aggregation transform.

Parameters:

Name Type Description Default k tensor

(mp, mp)/(mp, n); Kernel matrix. m is the number of inducing points, p is the number of points each inducing point is mapped, n is the number of training data points.

required

Returns:

Name Type Description k tensor

(m, m)/(m, n); Aggregated kernel matrix

Source code in sgptools/models/core/transformations.py
def aggregate(self, k):\n    \"\"\"Applies the aggregation transform to kernel matrices. Checks `sensor_model` \n       and uses the appropriate aggregation transform. \n\n    Args:\n        k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                    `m` is the number of inducing points,\n                    `p` is the number of points each inducing point is mapped,\n                    `n` is the number of training data points.\n\n    Returns:\n        k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n    \"\"\"\n    if self.sensor_model is not None:\n        return self.sensor_model.aggregate(k)\n    else:\n        return super().aggregate(k)\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.constraints","title":"constraints(Xu)","text":"

Computes the distance constraint term that is added to the SGP's optimization function. Each robot can be assigned a different distance budget.

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the distance constraints

required

Returns:

Name Type Description loss float

distance constraint term

Source code in sgptools/models/core/transformations.py
def constraints(self, Xu):\n    \"\"\"Computes the distance constraint term that is added to the SGP's optimization function.\n    Each robot can be assigned a different distance budget.\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the distance constraints\n\n    Returns:\n        loss (float): distance constraint term\n    \"\"\"\n    if self.distance_budget is None:\n        return 0.\n    else:\n        Xu = self.expand(Xu, expand_sensor_model=False)\n        dist = self.distance(Xu)-self.distance_budget\n        dist = tf.reduce_sum(tf.nn.relu(dist))\n        loss = -dist*self.constraint_weight\n        return loss\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.distance","title":"distance(Xu)","text":"

Computes the distance incured by sequentially visiting the inducing points ToDo: Change distance from 2d to nd. Currently limited to 2d to ensure the rotation angle is not included when using a square FoV sensor.

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the path lengths

required

Returns:

Name Type Description dist float

path lengths

Source code in sgptools/models/core/transformations.py
def distance(self, Xu):\n    \"\"\"Computes the distance incured by sequentially visiting the inducing points\n    ToDo: Change distance from 2d to nd. Currently limited to 2d \n        to ensure the rotation angle is not included when using\n        a square FoV sensor.\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the path lengths\n\n    Returns:\n        dist (float): path lengths\n    \"\"\"\n    Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n    dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)\n    dist = tf.reduce_sum(dist, axis=1)\n    return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.expand","title":"expand(Xu, expand_sensor_model=True)","text":"

Sample points between each pair of inducing points to form the path

Parameters:

Name Type Description Default Xu ndarray

(num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space

required expand_sensor_model bool

Only add the fixed inducing points without other sensor/path transforms, used for online IPP

True

Returns:

Name Type Description Xu ndarray

Expansion transformed inducing points

Source code in sgptools/models/core/transformations.py
def expand(self, Xu, expand_sensor_model=True):\n    \"\"\"Sample points between each pair of inducing points to form the path\n\n    Args:\n        Xu (ndarray): (num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space\n        expand_sensor_model (bool): Only add the fixed inducing points without other sensor/path transforms, \n                                    used for online IPP\n\n    Returns:\n        Xu (ndarray): Expansion transformed inducing points\n    \"\"\"\n    # If using single-robot offline IPP with point sensing, return inducing points as is.\n    if self.sampling_rate == 2 and self.Xu_fixed is None and self.sensor_model is None:\n        return Xu\n\n    Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n    # If using online IPP, add visited waypoints that won't be optimized anymore\n    if self.Xu_fixed is not None:\n        Xu = tf.concat([self.Xu_fixed, Xu[:, self.num_fixed:]], axis=1)\n\n    if not expand_sensor_model:\n        return tf.reshape(Xu, (-1, self.num_dim))\n\n    # Interpolate additional inducing points between waypoints to approximate \n    # the continuous data sensing model\n    if self.sampling_rate > 2:\n        Xu = tf.linspace(Xu[:, :-1], Xu[:, 1:], self.sampling_rate)\n        Xu = tf.transpose(Xu, perm=[1, 2, 0, 3])\n        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n    if self.sensor_model is not None:\n        Xu = self.sensor_model.expand(Xu)\n        return Xu\n\n    Xu = tf.reshape(Xu, (-1, self.num_dim))\n    return Xu\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.update_Xu_fixed","title":"update_Xu_fixed(Xu_fixed)","text":"

Function to update the visited waypoints

Parameters:

Name Type Description Default Xu_fixed ndarray

numpy array (num_robots, num_visited_waypoints, num_dim)

required Source code in sgptools/models/core/transformations.py
def update_Xu_fixed(self, Xu_fixed):\n    \"\"\"Function to update the visited waypoints\n\n    Args:\n        Xu_fixed (ndarray): numpy array (num_robots, num_visited_waypoints, num_dim)\n    \"\"\"\n    self.num_fixed = Xu_fixed.shape[1]\n    if self.Xu_fixed is not None:\n        self.Xu_fixed.assign(Xu_fixed)\n    else:\n        # ToDo: Use binary mask of fixed size to avoid retracing\n        self.Xu_fixed = tf.Variable(Xu_fixed, \n                                    shape=tf.TensorShape(None), \n                                    trainable=False)\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareHeightTransform","title":"SquareHeightTransform","text":"

Bases: Transform

Non-point Transform to model a height-dependent square FoV. Only works for single robot cases. ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform

Parameters:

Name Type Description Default num_points int

Number of points along each side of the FoV

required distance_budget float

Distance budget for the path

None Source code in sgptools/models/core/transformations.py
class SquareHeightTransform(Transform):\n    \"\"\"Non-point Transform to model a height-dependent square FoV. Only works for single robot cases. \n    ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform\n\n    Args:\n        num_points (int): Number of points along each side of the FoV\n        distance_budget (float): Distance budget for the path\n    \"\"\"\n    def __init__(self, num_points, distance_budget=None, **kwargs):\n        super().__init__(**kwargs)\n        self.num_points = num_points\n        self.distance_budget = distance_budget\n\n        if self.aggregation_size == 0:\n            self.aggregation_size = None\n        elif self.aggregation_size is None:\n            self.aggregation_size = num_points**2\n\n    def expand(self, Xu):     \n        \"\"\"\n        Applies the expansion transform to the inducing points\n\n        Args:\n            Xu (ndarray): (m, 3); Inducing points in the 3D position space.\n                        `m` is the number of inducing points,\n                        `3` is the dimension of the space (x, y, z)\n\n        Returns:\n            Xu (ndarray): (mp, 2); Inducing points in input space.\n                        `p` is the number of points each inducing point is mapped \n                        to in order to form the FoV.\n        \"\"\"\n        x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)\n        x = tf.squeeze(x)\n        y = tf.squeeze(y)\n        h = tf.squeeze(h)\n\n        delta = h / (self.num_points - 1)\n\n        pts = []\n        for i in range(self.num_points):\n            pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)], \n                                   [x + h/2, y - (h/2) + (delta * i)], \n                                   self.num_points, \n                                   axis=1))\n        xy = tf.concat(pts, axis=1)\n        xy = tf.transpose(xy, [2, 1, 0])\n        xy = tf.reshape(xy, [-1, 2])\n        xy = self._reshape(xy, tf.shape(Xu)[0])\n        return xy\n\n    def _reshape(self, X, num_inducing):\n        \"\"\"Reorder the inducing points to be in the correct order for aggregation with square height FoV\n\n        Args:\n            X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each \n                        inducing point is mapped to in order to form the FoV.\n\n        Returns:\n            Xu (ndarray): (mp, 2); Reorder inducing points\n        \"\"\"\n        X = tf.reshape(X, (num_inducing, -1, self.num_points, self.num_points, 2))\n        X = tf.transpose(X, (0, 2, 1, 3, 4))\n        X = tf.reshape(X, (-1, 2))\n        return X\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareHeightTransform.expand","title":"expand(Xu)","text":"

Applies the expansion transform to the inducing points

Parameters:

Name Type Description Default Xu ndarray

(m, 3); Inducing points in the 3D position space. m is the number of inducing points, 3 is the dimension of the space (x, y, z)

required

Returns:

Name Type Description Xu ndarray

(mp, 2); Inducing points in input space. p is the number of points each inducing point is mapped to in order to form the FoV.

Source code in sgptools/models/core/transformations.py
def expand(self, Xu):     \n    \"\"\"\n    Applies the expansion transform to the inducing points\n\n    Args:\n        Xu (ndarray): (m, 3); Inducing points in the 3D position space.\n                    `m` is the number of inducing points,\n                    `3` is the dimension of the space (x, y, z)\n\n    Returns:\n        Xu (ndarray): (mp, 2); Inducing points in input space.\n                    `p` is the number of points each inducing point is mapped \n                    to in order to form the FoV.\n    \"\"\"\n    x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)\n    x = tf.squeeze(x)\n    y = tf.squeeze(y)\n    h = tf.squeeze(h)\n\n    delta = h / (self.num_points - 1)\n\n    pts = []\n    for i in range(self.num_points):\n        pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)], \n                               [x + h/2, y - (h/2) + (delta * i)], \n                               self.num_points, \n                               axis=1))\n    xy = tf.concat(pts, axis=1)\n    xy = tf.transpose(xy, [2, 1, 0])\n    xy = tf.reshape(xy, [-1, 2])\n    xy = self._reshape(xy, tf.shape(Xu)[0])\n    return xy\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareTransform","title":"SquareTransform","text":"

Bases: Transform

Non-point Transform to model a square FoV. Only works for single robot cases. ToDo: update expand function to handle multi-robot case.

Parameters:

Name Type Description Default length float

Length of the square FoV

required num_side int

Number of points along each side of the FoV

required Source code in sgptools/models/core/transformations.py
class SquareTransform(Transform):\n    \"\"\"Non-point Transform to model a square FoV. Only works for single robot cases. \n    ToDo: update expand function to handle multi-robot case.\n\n    Args:\n        length (float): Length of the square FoV\n        num_side (int): Number of points along each side of the FoV\n    \"\"\"\n    def __init__(self, length, num_side, **kwargs):\n        super().__init__(**kwargs)\n        self.length = length\n        self.num_side = num_side\n        self.length_factor=length/(self.num_side)\n        self.num_length = int(length/self.length_factor)\n\n        if self.aggregation_size == 0:\n            self.aggregation_size = None\n        elif self.aggregation_size is None:\n            self.aggregation_size = num_side**2\n\n    def expand(self, Xu):\n        \"\"\"Applies the expansion transformation to the inducing points\n\n        Args:\n            Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.\n                            `m` is the number of inducing points,\n                            `3` is the dimension of the space (x, y, angle in radians)\n\n        Returns:\n            Xu (ndarray): (mp, 2); Inducing points in input space.\n                        `p` is the number of points each inducing point is mapped \n                         to in order to form the FoV.\n        \"\"\"\n        x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)\n        x = tf.squeeze(x)\n        y = tf.squeeze(y)\n        theta = tf.squeeze(theta)\n\n        points = []\n        for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):\n            points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2), \n                                       (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)], \n                                      [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2), \n                                       (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)], \n                                      self.num_side, axis=1))\n        xy = tf.concat(points, axis=1)\n        xy = tf.transpose(xy, [2, 1, 0])\n        xy = tf.reshape(xy, [-1, 2])\n        xy = self._reshape(xy, tf.shape(Xu)[1])\n        return xy\n\n    def _reshape(self, X, num_inducing):\n        \"\"\"Reorder the inducing points to be in the correct order for aggregation with square FoV.\n\n        Args:\n            X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each \n                        inducing point is mapped to in order to form the FoV.\n\n        Returns:\n            Xu (ndarray): (mp, 2); Reorder inducing points\n        \"\"\"\n        X = tf.reshape(X, (num_inducing, -1, self.num_side, self.num_side, 2))\n        X = tf.transpose(X, (0, 2, 1, 3, 4))\n        X = tf.reshape(X, (-1, 2))\n        return X\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareTransform.expand","title":"expand(Xu)","text":"

Applies the expansion transformation to the inducing points

Parameters:

Name Type Description Default Xu ndarray

(1, m, 3); Inducing points in the position and orientation space. m is the number of inducing points, 3 is the dimension of the space (x, y, angle in radians)

required

Returns:

Name Type Description Xu ndarray

(mp, 2); Inducing points in input space. p is the number of points each inducing point is mapped to in order to form the FoV.

Source code in sgptools/models/core/transformations.py
def expand(self, Xu):\n    \"\"\"Applies the expansion transformation to the inducing points\n\n    Args:\n        Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.\n                        `m` is the number of inducing points,\n                        `3` is the dimension of the space (x, y, angle in radians)\n\n    Returns:\n        Xu (ndarray): (mp, 2); Inducing points in input space.\n                    `p` is the number of points each inducing point is mapped \n                     to in order to form the FoV.\n    \"\"\"\n    x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)\n    x = tf.squeeze(x)\n    y = tf.squeeze(y)\n    theta = tf.squeeze(theta)\n\n    points = []\n    for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):\n        points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2), \n                                   (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)], \n                                  [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2), \n                                   (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)], \n                                  self.num_side, axis=1))\n    xy = tf.concat(points, axis=1)\n    xy = tf.transpose(xy, [2, 1, 0])\n    xy = tf.reshape(xy, [-1, 2])\n    xy = self._reshape(xy, tf.shape(Xu)[1])\n    return xy\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform","title":"Transform","text":"

Base class for transformations of the inducing points, including expansion and aggregation transforms.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default aggregation_size int

Number of consecutive inducing points to aggregate

None constraint_weight float

Weight term that controls the importance of the constraint terms in the SGP's optimization objective

1.0 Source code in sgptools/models/core/transformations.py
class Transform:\n    \"\"\"Base class for transformations of the inducing points, including expansion and aggregation transforms.\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]\n\n    Args:\n        aggregation_size (int): Number of consecutive inducing points to aggregate\n        constraint_weight (float): Weight term that controls the importance of the \n                                   constraint terms in the SGP's optimization objective \n    \"\"\"\n    def __init__(self, \n                 aggregation_size=None, \n                 constraint_weight=1.0,\n                 **kwargs):\n        self.aggregation_size = aggregation_size\n        self.constraint_weight = constraint_weight\n\n    def expand(self, Xu):\n        \"\"\"Applies the expansion transform to the inducing points\n\n        Args:\n            Xu (ndarray): Expansion transformed inducing points\n        \"\"\"\n        return Xu\n\n    def aggregate(self, k):\n        \"\"\"Applies the aggregation transform to kernel matrices\n\n        Args:\n            k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                        `m` is the number of inducing points,\n                        `p` is the number of points each inducing point is mapped,\n                        `n` is the number of training data points.\n\n        Returns:\n            k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n        \"\"\"\n        if self.aggregation_size is None:\n            return k\n\n        if k.shape[0] == k.shape[1]:\n            # Handle Kuu which is a square matrix\n            k = tf.expand_dims(tf.expand_dims(k, axis=0), axis=-1)\n            k = tf.nn.avg_pool(k,\n                               ksize=[1, self.aggregation_size, self.aggregation_size, 1],\n                               strides=[1, self.aggregation_size, self.aggregation_size, 1],\n                               padding='VALID')\n            k = tf.squeeze(k, axis=[0, -1])\n        else:\n            # Handle Kuf which is a rectangular matrix\n            k = tf.expand_dims(k, axis=0)\n            k = tf.nn.avg_pool(k,\n                               ksize=[1, self.aggregation_size, 1],\n                               strides=[1, self.aggregation_size, 1],\n                               padding='VALID')\n            k = tf.squeeze(k, axis=[0])\n        return k\n\n    def constraints(self, Xu):\n        \"\"\"Computes the constraint terms that are added to the SGP's optimization function\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the constraints\n\n        Returns:\n            c (float): constraint terms (eg., distance constraint)\n        \"\"\"\n        return 0.\n\n    def distance(self, Xu):\n        \"\"\"Computes the distance incured by sequentially visiting the inducing points\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the path length\n\n        Returns:\n            dist (float): path length\n        \"\"\"\n        dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))\n        return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.aggregate","title":"aggregate(k)","text":"

Applies the aggregation transform to kernel matrices

Parameters:

Name Type Description Default k tensor

(mp, mp)/(mp, n); Kernel matrix. m is the number of inducing points, p is the number of points each inducing point is mapped, n is the number of training data points.

required

Returns:

Name Type Description k tensor

(m, m)/(m, n); Aggregated kernel matrix

Source code in sgptools/models/core/transformations.py
def aggregate(self, k):\n    \"\"\"Applies the aggregation transform to kernel matrices\n\n    Args:\n        k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                    `m` is the number of inducing points,\n                    `p` is the number of points each inducing point is mapped,\n                    `n` is the number of training data points.\n\n    Returns:\n        k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n    \"\"\"\n    if self.aggregation_size is None:\n        return k\n\n    if k.shape[0] == k.shape[1]:\n        # Handle Kuu which is a square matrix\n        k = tf.expand_dims(tf.expand_dims(k, axis=0), axis=-1)\n        k = tf.nn.avg_pool(k,\n                           ksize=[1, self.aggregation_size, self.aggregation_size, 1],\n                           strides=[1, self.aggregation_size, self.aggregation_size, 1],\n                           padding='VALID')\n        k = tf.squeeze(k, axis=[0, -1])\n    else:\n        # Handle Kuf which is a rectangular matrix\n        k = tf.expand_dims(k, axis=0)\n        k = tf.nn.avg_pool(k,\n                           ksize=[1, self.aggregation_size, 1],\n                           strides=[1, self.aggregation_size, 1],\n                           padding='VALID')\n        k = tf.squeeze(k, axis=[0])\n    return k\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.constraints","title":"constraints(Xu)","text":"

Computes the constraint terms that are added to the SGP's optimization function

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the constraints

required

Returns:

Name Type Description c float

constraint terms (eg., distance constraint)

Source code in sgptools/models/core/transformations.py
def constraints(self, Xu):\n    \"\"\"Computes the constraint terms that are added to the SGP's optimization function\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the constraints\n\n    Returns:\n        c (float): constraint terms (eg., distance constraint)\n    \"\"\"\n    return 0.\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.distance","title":"distance(Xu)","text":"

Computes the distance incured by sequentially visiting the inducing points

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the path length

required

Returns:

Name Type Description dist float

path length

Source code in sgptools/models/core/transformations.py
def distance(self, Xu):\n    \"\"\"Computes the distance incured by sequentially visiting the inducing points\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the path length\n\n    Returns:\n        dist (float): path length\n    \"\"\"\n    dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))\n    return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.expand","title":"expand(Xu)","text":"

Applies the expansion transform to the inducing points

Parameters:

Name Type Description Default Xu ndarray

Expansion transformed inducing points

required Source code in sgptools/models/core/transformations.py
def expand(self, Xu):\n    \"\"\"Applies the expansion transform to the inducing points\n\n    Args:\n        Xu (ndarray): Expansion transformed inducing points\n    \"\"\"\n    return Xu\n
"},{"location":"API-reference.html#_________________________1","title":"________________________","text":"

General utilities to support the functionalities of this package:

  • data: Provides utilities to preprocess datasets
  • gpflow: Provides utilities to interface with GPflow
  • metrics: Provides utilities to quantify the solution quality
  • misc: Provides miscellaneous helper functions
  • tsp: Provides utilities to run TSP/VRP solver
"},{"location":"API-reference.html#sgptools.utils.tsp.resample_path","title":"resample_path(waypoints, num_inducing=10)","text":"

Function to map path with arbitrary number of waypoints to inducing points path with fixed number of waypoints

Parameters:

Name Type Description Default waypoints ndarray

(num_waypoints, n_dim); waypoints of path from vrp solver

required num_inducing int

Number of inducing points (waypoints) in the returned path

10

Returns:

Name Type Description points ndarray

(num_inducing, n_dim); Resampled path

Source code in sgptools/utils/tsp.py
def resample_path(waypoints, num_inducing=10):\n    \"\"\"Function to map path with arbitrary number of waypoints to \n    inducing points path with fixed number of waypoints\n\n    Args:\n        waypoints (ndarray): (num_waypoints, n_dim); waypoints of path from vrp solver\n        num_inducing (int): Number of inducing points (waypoints) in the returned path\n\n    Returns:\n        points (ndarray): (num_inducing, n_dim); Resampled path\n    \"\"\"\n    line = LineString(waypoints)\n    distances = np.linspace(0, line.length, num_inducing)\n    points = [line.interpolate(distance) for distance in distances]\n    points = np.array([[p.x, p.y] for p in points])\n    return points\n
"},{"location":"API-reference.html#sgptools.utils.tsp.run_tsp","title":"run_tsp(nodes, num_vehicles=1, max_dist=25, depth=1, resample=None, start_idx=None, end_idx=None)","text":"

Method to run TSP/VRP with arbitrary start and end nodes, and without any distance constraint

Parameters:

Name Type Description Default nodes ndarray

(# nodes, n_dim); Nodes to visit

required num_vehicles int

Number of robots/vehicles

1 max_dist float

Maximum distance allowed for each path when handling mutli-robot case

25 depth int

Internal parameter used to track re-try recursion depth

1 resample int

Each solution path will be resampled to have resample number of points

None start_idx list

Optionl list of start node indices from which to start the solution path

None end_idx list

Optionl list of end node indices from which to start the solution path

None

Returns:

Name Type Description paths ndarray

Solution paths

distances list

List of path lengths

Source code in sgptools/utils/tsp.py
def run_tsp(nodes, \n            num_vehicles=1, \n            max_dist=25, \n            depth=1, \n            resample=None, \n            start_idx=None,\n            end_idx=None):\n    \"\"\"Method to run TSP/VRP with arbitrary start and end nodes, \n    and without any distance constraint\n\n    Args:\n        nodes (ndarray): (# nodes, n_dim); Nodes to visit \n        num_vehicles (int): Number of robots/vehicles\n        max_dist (float): Maximum distance allowed for each path when handling mutli-robot case\n        depth (int): Internal parameter used to track re-try recursion depth\n        resample (int): Each solution path will be resampled to have\n                        `resample` number of points\n        start_idx (list): Optionl list of start node indices from which to start the solution path \n        end_idx (list): Optionl list of end node indices from which to start the solution path \n\n    Returns:\n        paths (ndarray): Solution paths\n        distances (list): List of path lengths\n    \"\"\"\n    if depth > 5:\n        print('Warning: Max depth reached')\n        return None, None\n\n    # Add dummy 0 location to get arbitrary start and end node sols\n    if start_idx is None or end_idx is None:\n        distance_mat = np.zeros((len(nodes)+1, len(nodes)+1))\n        distance_mat[1:, 1:] = pairwise_distances(nodes, nodes)*1e4\n        trim_paths = True\n    else:\n        distance_mat = pairwise_distances(nodes, nodes)*1e4\n        trim_paths = False\n    distance_mat = distance_mat.astype(int)\n    max_dist = int(max_dist*1e4)\n\n    if start_idx is None:\n        start_idx = [0]*num_vehicles\n    elif trim_paths:\n        start_idx = [i+1 for i in start_idx]\n\n    if end_idx is None:\n        end_idx = [0]*num_vehicles\n    elif trim_paths:\n        end_idx = [i+1 for i in end_idx]\n\n    def distance_callback(from_index, to_index):\n        from_node = manager.IndexToNode(from_index)\n        to_node = manager.IndexToNode(to_index)\n        return distance_mat[from_node][to_node]\n\n    # num_locations, num vehicles, start, end\n    manager = pywrapcp.RoutingIndexManager(len(distance_mat), \n                                           num_vehicles, \n                                           start_idx,\n                                           end_idx)\n    routing = pywrapcp.RoutingModel(manager)\n    transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n    routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n    if num_vehicles > 1:\n        # Dummy distaance constraint to ensure all paths have similar length\n        dimension_name = \"Distance\"\n        routing.AddDimension(\n            transit_callback_index,\n            0,  # no slack\n            max_dist,  # vehicle maximum travel distance\n            True,  # start cumul to zero\n            dimension_name,\n        )\n        distance_dimension = routing.GetDimensionOrDie(dimension_name)\n        distance_dimension.SetGlobalSpanCostCoefficient(100)\n\n    search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n    search_parameters.first_solution_strategy = (\n        routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n    )\n    search_parameters.local_search_metaheuristic = (\n        routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n    )\n    search_parameters.time_limit.seconds = 10\n    solution = routing.SolveWithParameters(search_parameters)\n\n    paths = None\n    if solution is not None:\n        paths, distances = get_routes(manager, routing, \n                                      solution, num_vehicles, \n                                      start_idx, end_idx, trim_paths)\n        for path in paths:\n            if len(path) < 2:\n                print('TSP Warning: Empty path detected')\n                return run_tsp(nodes, num_vehicles, int(np.mean(distances)*(1.5/depth)), depth+1)\n    else:\n        print('TSP Warning: No solution found')\n        return run_tsp(nodes, num_vehicles, int(max_dist*1.5), depth+1)\n\n    # Map paths from node indices to node locations\n    paths = [nodes[path] for path in paths]\n\n    # Resample each solution path to have resample number of points\n    if resample is not None:\n        paths = np.array([resample_path(path, resample) for path in paths])\n\n    # Convert distances back to floats in the original scale of the nodes\n    distances = np.array(distances)/1e4\n    return paths, distances\n
"},{"location":"API-reference.html#sgptools.utils.misc.cont2disc","title":"cont2disc(Xu, candidates, candidate_labels=None)","text":"

Map continuous space locations to a discrete set of candidate location

Parameters:

Name Type Description Default Xu ndarray

(m, 2); Continuous space points

required candidates ndarray

(n, 2); Discrete set of candidate locations

required candidate_labels ndarray

(n, 1); Labels corresponding to the discrete set of candidate locations

None

Returns:

Name Type Description Xu_x ndarray

Discrete space points' locations

Xu_y ndarray

Labels of the discrete space points. Returned only if candidate_labels was passed to the function

Source code in sgptools/utils/misc.py
def cont2disc(Xu, candidates, candidate_labels=None):\n    \"\"\"Map continuous space locations to a discrete set of candidate location\n\n    Args:\n        Xu (ndarray): (m, 2); Continuous space points\n        candidates (ndarray): (n, 2); Discrete set of candidate locations\n        candidate_labels (ndarray): (n, 1); Labels corresponding to the discrete set of candidate locations\n\n    Returns:\n        Xu_x (ndarray): Discrete space points' locations \n        Xu_y (ndarray): Labels of the discrete space points. Returned only if `candidate_labels`\n                        was passed to the function\n\n    \"\"\"\n    # Sanity check to ensure that there are candidates to match\n    if len(candidates)==0:\n        return []\n    dists = pairwise_distances(candidates, Y=Xu, metric='euclidean')\n    row_ind, _ = linear_sum_assignment(dists)\n    Xu_X = candidates[row_ind].copy()\n    if candidate_labels is not None:\n        Xu_y = candidate_labels[row_ind].copy()\n        return Xu_X, Xu_y\n    else:\n        return Xu_X\n
"},{"location":"API-reference.html#sgptools.utils.misc.get_inducing_pts","title":"get_inducing_pts(data, num_inducing, orientation=False, random=False)","text":"

Selects a subset of the data points to be used as inducing points. The default approach uses kmeans to select the subset.

Parameters:

Name Type Description Default data ndarray

(n, 2); Data points to select the inducing points from

required num_inducing int

Number of inducing points

required orientation bool

If True, add an additional dimension to model the sensor FoV rotation angle

False random bool

If True, the subset of inducing points are selected randomly instead of using kmeans

False

Returns:

Name Type Description Xu ndarray

(m, d); Inducing points in the position and orientation space. m is the number of inducing points, d is the dimension of the space (x, y, optional - angle in radians)

Source code in sgptools/utils/misc.py
def get_inducing_pts(data, num_inducing, orientation=False, random=False):\n    \"\"\"Selects a subset of the data points to be used as inducing points. \n    The default approach uses kmeans to select the subset. \n\n    Args:\n        data (ndarray): (n, 2); Data points to select the inducing points from \n        num_inducing (int): Number of inducing points\n        orientation (bool): If True, add an additional dimension to model the sensor \n                            FoV rotation angle\n        random (bool): If True, the subset of inducing points are selected randomly \n                       instead of using kmeans\n\n    Returns:\n        Xu (ndarray): (m, d); Inducing points in the position and orientation space.\n                        `m` is the number of inducing points, \n                        `d` is the dimension of the space (x, y, optional - angle in radians)\n    \"\"\"\n    if random:\n        idx = np.random.randint(len(data), size=num_inducing)\n        Xu = data[idx]\n    else:\n        Xu = kmeans2(data, num_inducing, minit=\"points\")[0]\n    if orientation:\n        thetas = np.random.uniform(0, 2 * np.pi, size=(Xu.shape[0], 1))\n        Xu = np.concatenate([Xu, thetas], axis=1)\n    return Xu\n
"},{"location":"API-reference.html#sgptools.utils.misc.interpolate_path","title":"interpolate_path(waypoints, sampling_rate=0.05)","text":"

Interpolate additional points between the given waypoints to simulate continuous sensing robots

Parameters:

Name Type Description Default waypoints (n, d)

Waypoints of the robot's path

required sampling_rate float

Distance between each pair of interpolated points

0.05

Returns:

Name Type Description path ndarray

(p, d) Interpolated path, p depends on the sampling_rate rate

Source code in sgptools/utils/misc.py
def interpolate_path(waypoints, sampling_rate=0.05):\n    \"\"\"Interpolate additional points between the given waypoints to simulate continuous sensing robots\n\n    Args:\n        waypoints (n, d): Waypoints of the robot's path\n        sampling_rate (float): Distance between each pair of interpolated points\n\n    Returns:\n        path (ndarray): (p, d) Interpolated path, `p` depends on the sampling_rate rate\n    \"\"\"\n    interpolated_path = []\n    for i in range(2, len(waypoints)+1):\n        dist = get_distance(waypoints[i-2:i])\n        num_samples = int(dist / sampling_rate)\n        points = np.linspace(waypoints[i-1], waypoints[i-2], num_samples)\n        interpolated_path.extend(points)\n    return np.array(interpolated_path)\n
"},{"location":"API-reference.html#sgptools.utils.misc.plot_paths","title":"plot_paths(paths, candidates=None, title=None)","text":"

Function to plot the IPP solution paths

Parameters:

Name Type Description Default paths ndarray

(r, m, 2); r paths with m waypoints each

required candidates ndarray

(n, 2); Candidate unlabeled locations used in the SGP-based sensor placement approach

None title str

Title of the plot

None Source code in sgptools/utils/misc.py
def plot_paths(paths, candidates=None, title=None):\n    \"\"\"Function to plot the IPP solution paths\n\n    Args:\n        paths (ndarray): (r, m, 2); `r` paths with `m` waypoints each\n        candidates (ndarray): (n, 2); Candidate unlabeled locations used in the SGP-based sensor placement approach\n        title (str): Title of the plot\n    \"\"\"\n    plt.figure()\n    for i, path in enumerate(paths):\n        plt.plot(path[:, 0], path[:, 1], \n                    c='r', label='Path', zorder=1, marker='o')\n        plt.scatter(path[0, 0], path[0, 1], \n                    c='g', label='Start', zorder=2, marker='o')\n        if candidates is not None:\n            plt.scatter(candidates[:, 0], candidates[:, 1], \n                        c='k', s=1, label='Unlabeled Train-Set Points', zorder=0)\n        if i==0:\n            plt.legend(bbox_to_anchor=(1.0, 1.02))\n    if title is not None:\n        plt.title(title)\n    plt.xlabel('X')\n    plt.ylabel('Y')\n
"},{"location":"API-reference.html#sgptools.utils.misc.project_waypoints","title":"project_waypoints(waypoints, candidates)","text":"

Project the waypoints back to the candidate set while retaining the waypoint visitation order.

Parameters:

Name Type Description Default waypoints (n, d)

Waypoints of the robot's path

required candidates ndarray

(n, 2); Discrete set of candidate locations

required

Returns:

Name Type Description waypoints (n, d)

Projected waypoints of the robot's path

Source code in sgptools/utils/misc.py
def project_waypoints(waypoints, candidates):\n    \"\"\"Project the waypoints back to the candidate set while retaining the \n    waypoint visitation order.\n\n    Args:\n        waypoints (n, d): Waypoints of the robot's path\n        candidates (ndarray): (n, 2); Discrete set of candidate locations\n\n    Returns:\n        waypoints (n, d): Projected waypoints of the robot's path\n    \"\"\"\n    waypoints_disc = cont2disc(waypoints, candidates)\n    waypoints_valid = _reoder_path(waypoints, waypoints_disc)\n    return waypoints_valid\n
"},{"location":"API-reference.html#sgptools.utils.metrics.gaussian_entropy","title":"gaussian_entropy(K)","text":"

Computes GP-based entropy from a kernel matrix

Parameters:

Name Type Description Default K ndarray

(n, n); kernel matrix

required

Returns:

Name Type Description entropy float

Entropy computed from the kernel matrix

Source code in sgptools/utils/metrics.py
def gaussian_entropy(K):\n    \"\"\"Computes GP-based entropy from a kernel matrix\n\n    Args:\n        K (ndarray): (n, n); kernel matrix\n\n    Returns:\n        entropy (float): Entropy computed from the kernel matrix\n    \"\"\"\n    return multivariate_normal(mean=None, cov=K, allow_singular=True).entropy()\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_distance","title":"get_distance(X)","text":"

Compute the length of a path (L2-norm)

Parameters:

Name Type Description Default X ndarray

(m, d); Waypoints of a path

required

Returns:

Name Type Description dist float

Total path length

Source code in sgptools/utils/metrics.py
def get_distance(X):\n    \"\"\"Compute the length of a path (L2-norm)\n\n    Args:\n        X (ndarray): (m, d); Waypoints of a path\n\n    Returns:\n        dist (float): Total path length\n    \"\"\"\n    dist = np.linalg.norm(X[1:] - X[:-1], axis=-1)\n    dist = np.sum(dist)\n    return dist\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_elbo","title":"get_elbo(Xu, X_env, noise_variance, kernel, baseline=False)","text":"

Computes the ELBO of the SGP, corrected to be positive

Parameters:

Name Type Description Default Xu ndarray

(m, d); Sensing locations

required X_env ndarray

(n, d); Data points used to approximate the bounds of the environment

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required baseline bool

If True, the ELBO is adjusted to be positive

False

Returns:

Name Type Description elbo float

ELBO of the SGP

Source code in sgptools/utils/metrics.py
def get_elbo(Xu, X_env, noise_variance, kernel, baseline=False):\n    \"\"\"Computes the ELBO of the SGP, corrected to be positive\n\n    Args:\n        Xu (ndarray): (m, d); Sensing locations\n        X_env (ndarray): (n, d); Data points used to approximate the bounds of the environment\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        baseline (bool): If True, the ELBO is adjusted to be positive\n\n    Returns:\n        elbo (float): ELBO of the SGP\n    \"\"\"\n    if baseline:\n        sgpr = gpflow.models.SGPR(X_env,\n                                  noise_variance=noise_variance,\n                                  kernel=kernel,\n                                  inducing_variable=[[0, 0]])\n        baseline = sgpr.elbo().numpy()\n    else:\n        baseline = 0.0\n\n    sgpr = gpflow.models.SGPR(X_env,\n                              noise_variance=noise_variance,\n                              kernel=kernel, \n                              inducing_variable=Xu)\n    return (sgpr.elbo() - baseline).numpy()\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_kl","title":"get_kl(Xu, X_env, noise_variance, kernel)","text":"

Computes the KL divergence between the SGP and the GP

Parameters:

Name Type Description Default Xu ndarray

(m, d); Sensing locations

required X_env ndarray

(n, d); Data points used to approximate the bounds of the environment

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required

Returns:

Name Type Description kl float

KL divergence between the SGP and the GP

Source code in sgptools/utils/metrics.py
def get_kl(Xu, X_env, noise_variance, kernel):\n    \"\"\"Computes the KL divergence between the SGP and the GP\n\n    Args:\n        Xu (ndarray): (m, d); Sensing locations\n        X_env (ndarray): (n, d); Data points used to approximate the bounds of the environment\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        kl (float): KL divergence between the SGP and the GP\n    \"\"\"\n    sgpr = gpflow.models.SGPR(X_env,\n                              noise_variance=noise_variance,\n                              kernel=kernel,\n                              inducing_variable=Xu)\n\n    common = sgpr._common_calculation()\n    sigma_sq = common.sigma_sq\n    AAT = common.AAT\n\n    x, _ = sgpr.data\n    kdiag = sgpr.kernel(x, full_cov=False)\n\n    # tr(K) / \u03c3\u00b2\n    trace_k = tf.reduce_sum(kdiag / sigma_sq)\n    # tr(Q) / \u03c3\u00b2\n    trace_q = tf.reduce_sum(tf.linalg.diag_part(AAT))\n    # tr(K - Q) / \u03c3\u00b2\n    trace = trace_k - trace_q\n    trace = 0.5 * trace\n\n    return float(trace.numpy())\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_mi","title":"get_mi(Xu, candidate_locs, noise_variance, kernel)","text":"

Computes mutual information between the sensing locations and the candidate locations

Parameters:

Name Type Description Default Xu ndarray

(m, d); Sensing locations

required candidate_locs ndarray

(n, d); Candidate sensing locations

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required

Returns:

Name Type Description mi float

Mutual information computed using a GP

Source code in sgptools/utils/metrics.py
def get_mi(Xu, candidate_locs, noise_variance, kernel):\n    \"\"\"Computes mutual information between the sensing locations and the candidate locations\n\n    Args:\n        Xu (ndarray): (m, d); Sensing locations\n        candidate_locs (ndarray): (n, d); Candidate sensing locations \n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        mi (float): Mutual information computed using a GP\n    \"\"\"\n    Xu = np.array(Xu)\n    candidate_locs = np.array(candidate_locs)\n\n    gp = gpflow.models.GPR(data=(Xu, np.zeros((len(Xu), 1))),\n                           kernel=kernel,\n                           noise_variance=noise_variance)\n    _, sigma_a = gp.predict_f(candidate_locs, full_cov=True)\n    sigma_a = sigma_a.numpy()[0]\n    cond_entropy = gaussian_entropy(sigma_a)\n\n    K = kernel(candidate_locs, full_cov=True).numpy()\n    K += noise_variance * np.eye(len(candidate_locs))\n    entropy = gaussian_entropy(K)\n\n    return float(entropy - cond_entropy)\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_reconstruction","title":"get_reconstruction(Xu, X_test, noise_variance, kernel)","text":"

Computes the GP-based data field estimates with the solution placements as the training set

Parameters:

Name Type Description Default Xu tuple

(ndarray (m, d); ndarray (m, 1)); Sensing locations' input and corresponding ground truth labels

required X_test ndarray

(n, d); Testing data input locations

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required

Returns:

Name Type Description y_pred ndarray

(n, 1); Predicted data field estimates

y_var ndarray

(n, 1); Prediction variance at each location in the data field

Source code in sgptools/utils/metrics.py
def get_reconstruction(Xu, X_test, noise_variance, kernel):\n    \"\"\"Computes the GP-based data field estimates with the solution placements as the training set\n\n    Args:\n        Xu (tuple): (ndarray (m, d); ndarray (m, 1)); Sensing locations' input \n                    and corresponding ground truth labels\n        X_test (ndarray): (n, d); Testing data input locations\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        y_pred (ndarray): (n, 1); Predicted data field estimates\n        y_var (ndarray): (n, 1); Prediction variance at each location in the data field\n    \"\"\"\n    Xu_X, Xu_y = Xu\n\n    # Get the GP predictions\n    gpr = gpflow.models.GPR((Xu_X, Xu_y),\n                            noise_variance=noise_variance,\n                            kernel=kernel)\n    y_pred, y_var = gpr.predict_f(X_test)\n    y_pred = y_pred.numpy().reshape(-1, 1)\n\n    return y_pred, y_var\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_rmse","title":"get_rmse(y_pred, y_test)","text":"

Computes the root-mean-square error between y_pred and y_test

Parameters:

Name Type Description Default y_pred ndarray

(n, 1); Predicted data field estimate

required y_test ndarray

(n, 1); Ground truth data field

required

Returns:

Name Type Description rmse float

Computed RMSE

Source code in sgptools/utils/metrics.py
def get_rmse(y_pred, y_test):\n    \"\"\"Computes the root-mean-square error between `y_pred` and `y_test`\n\n    Args:\n        y_pred (ndarray): (n, 1); Predicted data field estimate\n        y_test (ndarray): (n, 1); Ground truth data field \n\n    Returns:\n        rmse (float): Computed RMSE\n    \"\"\"\n    return np.sqrt(np.mean(np.square(y_pred - y_test)))\n
"},{"location":"API-reference.html#sgptools.utils.gpflow.get_model_params","title":"get_model_params(X_train, y_train, max_steps=1500, lr=0.01, print_params=True, lengthscales=1.0, variance=1.0, noise_variance=0.1, kernel=None, **kwargs)","text":"

Train a GP on the given training set

Parameters:

Name Type Description Default X_train ndarray

(n, d); Training set inputs

required y_train ndarray

(n, 1); Training set labels

required max_steps int

Maximum number of optimization steps

1500 lr float

Optimization learning rate

0.01 print_params bool

If True, prints the optimized GP parameters

True lengthscales float or list

Kernel lengthscale(s), if passed as a list, each element corresponds to each data dimension

1.0 variance float

Kernel variance

1.0 noise_variance float

Data noise variance

0.1 kernel Kernel

gpflow kernel function

None

Returns:

Name Type Description loss list

Loss values obtained during training

variance float

Optimized data noise variance

kernel Kernel

Optimized gpflow kernel function

Source code in sgptools/utils/gpflow.py
def get_model_params(X_train, y_train, \n                     max_steps=1500, \n                     lr=1e-2, \n                     print_params=True, \n                     lengthscales=1.0, \n                     variance=1.0, \n                     noise_variance=0.1,\n                     kernel=None,\n                     **kwargs):\n    \"\"\"Train a GP on the given training set\n\n    Args:\n        X_train (ndarray): (n, d); Training set inputs\n        y_train (ndarray): (n, 1); Training set labels\n        max_steps (int): Maximum number of optimization steps\n        lr (float): Optimization learning rate\n        print_params (bool): If True, prints the optimized GP parameters\n        lengthscales (float or list): Kernel lengthscale(s), if passed as a list, \n                                each element corresponds to each data dimension\n        variance (float): Kernel variance\n        noise_variance (float): Data noise variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        loss (list): Loss values obtained during training\n        variance (float): Optimized data noise variance\n        kernel (gpflow.kernels.Kernel): Optimized gpflow kernel function\n    \"\"\"\n    if kernel is None:\n        kernel = gpflow.kernels.SquaredExponential(lengthscales=lengthscales, \n                                                   variance=variance)\n\n    gpr_gt = gpflow.models.GPR(data=(X_train, y_train), \n                               kernel=kernel,\n                               noise_variance=noise_variance)\n\n    if max_steps > 0:\n        loss = optimize_model(gpr_gt, max_steps=max_steps, lr=lr, **kwargs)\n    else:\n        loss = 0\n\n    if print_params:\n        print_summary(gpr_gt)\n\n    return loss, gpr_gt.likelihood.variance, kernel\n
"},{"location":"API-reference.html#sgptools.utils.gpflow.optimize_model","title":"optimize_model(model, max_steps=2000, kernel_grad=True, lr=0.01, optimizer='tf', method=None, verbose=False, trace_fn=None, convergence_criterion=True, trainable_variables=None, tol=None)","text":"

Trains a GP/SGP model

Parameters:

Name Type Description Default model models

GPflow GP/SGP model to train

required max_steps int

Maximum number of training steps

2000 kernel_grad bool

If False, the kernel parameters will not be optimized

True lr float

Optimization learning rate

0.01 optimizer str

Optimizer to use for training (scipy or tf)

'tf' method str

Optimization method refer to scipy minimize and tf optimizers for full list

None verbose bool

If true, the training progress will be printed

False trace_fn str

Function to trace metrics during training. If None, the loss values are traced; if traceXu, it the inducing points states at each optimization step are traced

None convergence_criterion bool

It True, enables early stopping when the loss plateaus

True trainable_variables list

List of model variables to train (can be used to limit training to a subset of variables)

None tol float

Convergence tolerance to decide when to stop optimization

None Source code in sgptools/utils/gpflow.py
def optimize_model(model, \n                   max_steps=2000, \n                   kernel_grad=True, \n                   lr=1e-2, \n                   optimizer='tf', \n                   method=None,\n                   verbose=False, \n                   trace_fn=None,\n                   convergence_criterion=True,\n                   trainable_variables=None,\n                   tol=None):\n    \"\"\"\n    Trains a GP/SGP model\n\n    Args:\n        model (gpflow.models): GPflow GP/SGP model to train\n        max_steps (int): Maximum number of training steps\n        kernel_grad (bool): If False, the kernel parameters will not be optimized\n        lr (float): Optimization learning rate\n        optimizer (str): Optimizer to use for training (`scipy` or `tf`)\n        method (str): Optimization method refer to scipy minimize and tf optimizers for full list\n        verbose (bool): If true, the training progress will be printed\n        trace_fn (str): Function to trace metrics during training. \n                        If `None`, the loss values are traced;\n                        if `traceXu`, it the inducing points states at each optimization step are traced\n        convergence_criterion (bool): It True, enables early stopping when the loss plateaus\n        trainable_variables (list): List of model variables to train \n                                    (can be used to limit training to a subset of variables)\n        tol (float): Convergence tolerance to decide when to stop optimization\n    \"\"\"\n    # Train all variables if trainable_variables are not provided\n    # If kernel_gradient is False, disable the kernel parameter gradient updates\n    if trainable_variables is None and kernel_grad:\n        trainable_variables=model.trainable_variables\n    elif trainable_variables is None and not kernel_grad:\n        trainable_variables=model.trainable_variables[:1]\n\n    if optimizer == 'scipy':\n        if method is None:\n            method = 'L-BFGS-B'\n        opt = gpflow.optimizers.Scipy()\n        losses = opt.minimize(model.training_loss,\n                              trainable_variables,\n                              method=method,\n                              options=dict(disp=verbose, maxiter=max_steps),\n                              tol=tol)\n        losses = losses.fun\n    else:\n        if trace_fn is None:\n            trace_fn = lambda x: x.loss\n        elif trace_fn == 'traceXu':\n            def trace_fn(traceable_quantities):\n                return model.inducing_variable.Z.numpy()\n\n        if method is None:\n            method = 'adam'\n        opt = tf.keras.optimizers.get(method)\n        opt.learning_rate = lr\n        loss_fn = model.training_loss\n        if convergence_criterion:\n            convergence_criterion = tfp.optimizer.convergence_criteria.LossNotDecreasing(\n                                            atol=1e-5, \n                                            window_size=50,\n                                            min_num_steps=int(max_steps*0.1))\n        else:\n            convergence_criterion = None\n        losses = tfp.math.minimize(loss_fn,\n                                   trainable_variables=trainable_variables,\n                                   num_steps=max_steps,\n                                   optimizer=opt,\n                                   convergence_criterion=convergence_criterion,\n                                   trace_fn=trace_fn)\n        losses = losses.numpy()\n\n    return losses\n
"},{"location":"API-reference.html#sgptools.utils.gpflow.plot_loss","title":"plot_loss(losses, save_file=None)","text":"

Helper function to plot the training loss

Parameters:

Name Type Description Default losses list

list of loss values

required save_file str

If passed, the loss plot will be saved to the save_file

None Source code in sgptools/utils/gpflow.py
def plot_loss(losses, save_file=None):\n    \"\"\"Helper function to plot the training loss\n\n    Args:\n        losses (list): list of loss values\n        save_file (str): If passed, the loss plot will be saved to the `save_file`\n    \"\"\"\n    plt.plot(losses)\n    plt.title('Log Likelihood')\n    plt.xlabel('Iteration')\n    plt.ylabel('Log Likelihood')\n    ax = plt.gca()\n    ax.ticklabel_format(useOffset=False)\n\n    if save_file is not None:\n        plt.savefig(save_file, bbox_inches='tight')\n        plt.close()\n    else:\n        plt.show()\n
"},{"location":"API-reference.html#sgptools.utils.data.get_dataset","title":"get_dataset(dataset_type, dataset_path=None, num_train=1000, num_test=2500, num_candidates=150)","text":"

Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to generate train and test sets.

Parameters:

Name Type Description Default dataset_type str

'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file. 'synthetic' will use the diamond square algorithm to generate synthetic elevation data.

required dataset_path str

Path to the dataset file, used only when dataset_type is 'tif'.

None num_train int

Number of training samples to generate.

1000 num_test int

Number of testing samples to generate.

2500 num_candidates int

Number of candidate locations to generate.

150

Returns:

Name Type Description X_train ndarray

(n, d); Training set inputs

y_train ndarray

(n, 1); Training set labels

X_test ndarray

(n, d); Testing set inputs

y_test ndarray

(n, 1); Testing set labels

candidates ndarray

(n, d); Candidate sensor placement locations

X

(n, d); Full dataset inputs

y

(n, 1); Full dataset labels

Source code in sgptools/utils/data.py
def get_dataset(dataset_type, dataset_path=None,\n                num_train=1000,\n                num_test=2500, \n                num_candidates=150):\n    \"\"\"Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to \n    generate train and test sets.\n\n    Args:\n        dataset_type (str): 'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file. \n                        'synthetic' will use the diamond square algorithm to generate synthetic elevation data.\n        dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.\n        num_train (int): Number of training samples to generate.\n        num_test (int): Number of testing samples to generate.\n        num_candidates (int): Number of candidate locations to generate.\n\n    Returns:\n       X_train (ndarray): (n, d); Training set inputs\n       y_train (ndarray): (n, 1); Training set labels\n       X_test (ndarray): (n, d); Testing set inputs\n       y_test (ndarray): (n, 1); Testing set labels\n       candidates (ndarray): (n, d); Candidate sensor placement locations\n       X: (n, d); Full dataset inputs\n       y: (n, 1); Full dataset labels\n    \"\"\"\n    # Load the data\n    if dataset_type == 'tif':\n        X, y = prep_tif_dataset(dataset_path=dataset_path)\n    elif dataset_type == 'synthetic':\n        X, y = prep_synthetic_dataset()\n\n    X_train = get_inducing_pts(X, num_train)\n    X_train, y_train = cont2disc(X_train, X, y)\n\n    X_test = get_inducing_pts(X, num_test)\n    X_test, y_test = cont2disc(X_test, X, y)\n\n    candidates = get_inducing_pts(X, num_candidates)\n    candidates = cont2disc(candidates, X)\n\n    # Standardize data\n    X_scaler = StandardScaler()\n    X_scaler.fit(X_train)\n    X_train = X_scaler.transform(X_train)*10.0\n    X_test = X_scaler.transform(X_test)*10.0\n    X = X_scaler.transform(X)*10.0\n\n    y_scaler = StandardScaler()\n    y_scaler.fit(y_train)\n    y_train = y_scaler.transform(y_train)\n    y_test = y_scaler.transform(y_test)\n    y = y_scaler.transform(y)\n\n    return X_train, y_train, X_test, y_test, candidates, X, y\n
"},{"location":"API-reference.html#sgptools.utils.data.point_pos","title":"point_pos(point, d, theta)","text":"

Generate a point at a distance d from a point at angle theta.

Parameters:

Name Type Description Default point ndarray

(N, 2); array of points

required d float

distance

required theta float

angle in radians

required

Returns:

Name Type Description X ndarray

(N,); array of x-coordinate

Y ndarray

(N,); array of y-coordinate

Source code in sgptools/utils/data.py
def point_pos(point, d, theta):\n    '''\n    Generate a point at a distance d from a point at angle theta.\n\n    Args:\n        point (ndarray): (N, 2); array of points\n        d (float): distance\n        theta (float): angle in radians\n\n    Returns:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n    '''\n    return np.c_[point[:, 0] + d*np.cos(theta), point[:, 1] + d*np.sin(theta)]\n
"},{"location":"API-reference.html#sgptools.utils.data.prep_synthetic_dataset","title":"prep_synthetic_dataset()","text":"

Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm. https://github.com/buckinha/DiamondSquare

Args:

Returns: X: (n, d); Dataset input features y: (n, 1); Dataset labels

Source code in sgptools/utils/data.py
def prep_synthetic_dataset():\n    '''Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm.\n    ```https://github.com/buckinha/DiamondSquare```\n\n    Args:\n\n    Returns:\n       X: (n, d); Dataset input features\n       y: (n, 1); Dataset labels\n    '''\n    data = diamond_square(shape=(50,50), \n                          min_height=0, \n                          max_height=30, \n                          roughness=0.5)\n\n    # create x and y coordinates from the extent\n    x_coords = np.arange(0, data.shape[0])/10\n    y_coords = np.arange(0, data.shape[1])/10\n    xx, yy = np.meshgrid(x_coords, y_coords)\n    X = np.c_[xx.ravel(), yy.ravel()]\n    y = data.ravel()\n    y = y.reshape(-1, 1)\n\n    return X.astype(float), y.astype(float)\n
"},{"location":"API-reference.html#sgptools.utils.data.prep_tif_dataset","title":"prep_tif_dataset(dataset_path)","text":"

Load and preprocess a dataset from a GeoTIFF file (.tif file). The input features are set to the x and y pixel block coordinates and the labels are read from the file. The method also removes all invalid points.

Large tif files need to be downsampled using the following command: gdalwarp -tr 50 50 <input>.tif <output>.tif

Args: dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.

Returns: X: (n, d); Dataset input features y: (n, 1); Dataset labels

Source code in sgptools/utils/data.py
def prep_tif_dataset(dataset_path):\n    '''Load and preprocess a dataset from a GeoTIFF file (.tif file). The input features \n    are set to the x and y pixel block coordinates and the labels are read from the file.\n    The method also removes all invalid points.\n\n    Large tif files \n    need to be downsampled using the following command: \n    ```gdalwarp -tr 50 50 <input>.tif <output>.tif```\n\n    Args:\n        dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.\n\n    Returns:\n       X: (n, d); Dataset input features\n       y: (n, 1); Dataset labels\n    '''\n    data = PIL.Image.open(dataset_path)\n    data = np.array(data)\n\n    # create x and y coordinates from the extent\n    x_coords = np.arange(0, data.shape[1])/10\n    y_coords = np.arange(data.shape[0], 0, -1)/10\n    xx, yy = np.meshgrid(x_coords, y_coords)\n    X = np.c_[xx.ravel(), yy.ravel()]\n    y = data.ravel()\n\n    # Remove invalid labels\n    y[np.where(y==-999999.0)] = np.nan\n    X = X[~np.isnan(y)]\n    y = y[~np.isnan(y)]\n\n    X = X.reshape(-1, 2)\n    y = y.reshape(-1, 1)\n\n    return X.astype(float), y.astype(float)\n
"},{"location":"API-reference.html#sgptools.utils.data.remove_circle_patches","title":"remove_circle_patches(X, Y, circle_patches)","text":"

Remove points inside polycircle patchesgons.

Parameters:

Name Type Description Default X (ndarray

(N,); array of x-coordinate

required Y (ndarray

(N,); array of y-coordinate

required polygons list of matplotlib circle patches

Circle patches to remove from the X, Y points

required

Returns:

Name Type Description X ndarray

(N,); array of x-coordinate

Y ndarray

(N,); array of y-coordinate

Source code in sgptools/utils/data.py
def remove_circle_patches(X, Y, circle_patches):\n    '''\n    Remove points inside polycircle patchesgons.\n\n    Args:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n        polygons (list of matplotlib circle patches): Circle patches to remove from the X, Y points\n\n    Returns:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n    '''\n    points = np.array([X.flatten(), Y.flatten()]).T\n    for circle_patch in circle_patches:\n        points = points[~circle_patch.contains_points(points)]\n    return points[:, 0], points[:, 1]\n
"},{"location":"API-reference.html#sgptools.utils.data.remove_polygons","title":"remove_polygons(X, Y, polygons)","text":"

Remove points inside polygons.

Parameters:

Name Type Description Default X (ndarray

(N,); array of x-coordinate

required Y (ndarray

(N,); array of y-coordinate

required polygons list of matplotlib path polygon

Polygons to remove from the X, Y points

required

Returns:

Name Type Description X ndarray

(N,); array of x-coordinate

Y ndarray

(N,); array of y-coordinate

Source code in sgptools/utils/data.py
def remove_polygons(X, Y, polygons):\n    '''\n    Remove points inside polygons.\n\n    Args:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n        polygons (list of matplotlib path polygon): Polygons to remove from the X, Y points\n\n    Returns:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n    '''\n    points = np.array([X.flatten(), Y.flatten()]).T\n    for polygon in polygons:\n        p = path.Path(polygon)\n        points = points[~p.contains_points(points)]\n    return points[:, 0], points[:, 1]\n
"},{"location":"API-reference.html#_________________________2","title":"________________________","text":"

Provides a neural spectral kernel function along with an initialization function

"},{"location":"API-reference.html#sgptools.kernels.neural_kernel.NeuralSpectralKernel","title":"NeuralSpectralKernel","text":"

Bases: Kernel

Neural Spectral Kernel function (non-stationary kernel function). Based on the implementation from the following repo

Refer to the following papers for more details
  • Neural Non-Stationary Spectral Kernel [Remes et al., 2018]

Parameters:

Name Type Description Default input_dim int

Number of data dimensions

required active_dims int

Number of data dimensions that are used for computing the covariances

None Q int

Number of MLP mixture components used in the kernel function

1 hidden_sizes list

Number of hidden units in each MLP layer. Length of the list determines the number of layers.

[32, 32] Source code in sgptools/kernels/neural_kernel.py
class NeuralSpectralKernel(gpflow.kernels.Kernel):\n    \"\"\"Neural Spectral Kernel function (non-stationary kernel function). \n    Based on the implementation from the following [repo](https://github.com/sremes/nssm-gp/tree/master?tab=readme-ov-file)\n\n    Refer to the following papers for more details:\n        - Neural Non-Stationary Spectral Kernel [Remes et al., 2018]\n\n    Args:\n        input_dim (int): Number of data dimensions\n        active_dims (int): Number of data dimensions that are used for computing the covariances\n        Q (int): Number of MLP mixture components used in the kernel function\n        hidden_sizes (list): Number of hidden units in each MLP layer. Length of the list determines the number of layers.\n    \"\"\"\n    def __init__(self, input_dim, active_dims=None, Q=1, hidden_sizes=[32, 32]):\n        super().__init__(active_dims=active_dims)\n\n        self.input_dim = input_dim\n        self.Q = Q\n        self.num_hidden = len(hidden_sizes)\n\n        self.freq = []\n        self.length = []\n        self.var = []\n        for q in range(self.Q):\n            freq = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] + \n                                    [layers.Dense(input_dim, activation='softplus')])\n            length = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] +\n                                   [layers.Dense(input_dim, activation='softplus')])\n            var = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] +\n                                   [layers.Dense(1, activation='softplus')])\n            self.freq.append(freq)\n            self.length.append(length)\n            self.var.append(var)\n\n    def K(self, X, X2=None):\n        \"\"\"Computes the covariances between/amongst the input variables\n\n        Args:\n            X (ndarray): Variables to compute the covariance matrix\n            X2 (ndarray): If passed, the covariance between X and X2 is computed. Otherwise, \n                          the covariance between X and X is computed.\n\n        Returns:\n            cov (ndarray): covariance matrix\n        \"\"\"\n        if X2 is None:\n            X2 = X\n            equal = True\n        else:\n            equal = False\n\n        kern = 0.0\n        for q in range(self.Q):\n            # compute latent function values by the neural network\n            freq, freq2 = self.freq[q](X), self.freq[q](X2)\n            lens, lens2 = self.length[q](X), self.length[q](X2)\n            var, var2 = self.var[q](X), self.var[q](X2)\n\n            # compute length-scale term\n            Xr = tf.expand_dims(X, 1)  # N1 1 D\n            X2r = tf.expand_dims(X2, 0)  # 1 N2 D\n            l1 = tf.expand_dims(lens, 1)  # N1 1 D\n            l2 = tf.expand_dims(lens2, 0)  # 1 N2 D\n            L = tf.square(l1) + tf.square(l2)  # N1 N2 D\n            #D = tf.square((Xr - X2r) / L)  # N1 N2 D\n            D = tf.square(Xr - X2r) / L  # N1 N2 D\n            D = tf.reduce_sum(D, 2)  # N1 N2\n            det = tf.sqrt(2 * l1 * l2 / L)  # N1 N2 D\n            det = tf.reduce_prod(det, 2)  # N1 N2\n            E = det * tf.exp(-D)  # N1 N2\n\n            # compute cosine term\n            muX = (tf.reduce_sum(freq * X, 1, keepdims=True)\n                   - tf.transpose(tf.reduce_sum(freq2 * X2, 1, keepdims=True)))\n            COS = tf.cos(2 * np.pi * muX)\n\n            # compute kernel variance term\n            WW = tf.matmul(var, var2, transpose_b=True)  # w*w'^T\n\n            # compute the q'th kernel component\n            kern += WW * E * COS\n        if equal:\n            return robust_kernel(kern, tf.shape(X)[0])\n        else:\n            return kern\n\n    def K_diag(self, X):\n        kd = default_jitter()\n        for q in range(self.Q):\n            kd += tf.square(self.var[q](X))\n        return tf.squeeze(kd)\n
"},{"location":"API-reference.html#sgptools.kernels.neural_kernel.NeuralSpectralKernel.K","title":"K(X, X2=None)","text":"

Computes the covariances between/amongst the input variables

Parameters:

Name Type Description Default X ndarray

Variables to compute the covariance matrix

required X2 ndarray

If passed, the covariance between X and X2 is computed. Otherwise, the covariance between X and X is computed.

None

Returns:

Name Type Description cov ndarray

covariance matrix

Source code in sgptools/kernels/neural_kernel.py
def K(self, X, X2=None):\n    \"\"\"Computes the covariances between/amongst the input variables\n\n    Args:\n        X (ndarray): Variables to compute the covariance matrix\n        X2 (ndarray): If passed, the covariance between X and X2 is computed. Otherwise, \n                      the covariance between X and X is computed.\n\n    Returns:\n        cov (ndarray): covariance matrix\n    \"\"\"\n    if X2 is None:\n        X2 = X\n        equal = True\n    else:\n        equal = False\n\n    kern = 0.0\n    for q in range(self.Q):\n        # compute latent function values by the neural network\n        freq, freq2 = self.freq[q](X), self.freq[q](X2)\n        lens, lens2 = self.length[q](X), self.length[q](X2)\n        var, var2 = self.var[q](X), self.var[q](X2)\n\n        # compute length-scale term\n        Xr = tf.expand_dims(X, 1)  # N1 1 D\n        X2r = tf.expand_dims(X2, 0)  # 1 N2 D\n        l1 = tf.expand_dims(lens, 1)  # N1 1 D\n        l2 = tf.expand_dims(lens2, 0)  # 1 N2 D\n        L = tf.square(l1) + tf.square(l2)  # N1 N2 D\n        #D = tf.square((Xr - X2r) / L)  # N1 N2 D\n        D = tf.square(Xr - X2r) / L  # N1 N2 D\n        D = tf.reduce_sum(D, 2)  # N1 N2\n        det = tf.sqrt(2 * l1 * l2 / L)  # N1 N2 D\n        det = tf.reduce_prod(det, 2)  # N1 N2\n        E = det * tf.exp(-D)  # N1 N2\n\n        # compute cosine term\n        muX = (tf.reduce_sum(freq * X, 1, keepdims=True)\n               - tf.transpose(tf.reduce_sum(freq2 * X2, 1, keepdims=True)))\n        COS = tf.cos(2 * np.pi * muX)\n\n        # compute kernel variance term\n        WW = tf.matmul(var, var2, transpose_b=True)  # w*w'^T\n\n        # compute the q'th kernel component\n        kern += WW * E * COS\n    if equal:\n        return robust_kernel(kern, tf.shape(X)[0])\n    else:\n        return kern\n
"},{"location":"API-reference.html#sgptools.kernels.neural_kernel.init_neural_kernel","title":"init_neural_kernel(x, y, inducing_variable, Q, n_inits=1, hidden_sizes=None)","text":"

Helper function to initialize a Neural Spectral Kernel function (non-stationary kernel function). Based on the implementation from the following repo

Refer to the following papers for more details
  • Neural Non-Stationary Spectral Kernel [Remes et al., 2018]

Parameters:

Name Type Description Default x ndarray

(n, d); Input training set points

required y ndarray

(n, 1); Training set labels

required inducing_variable ndarray

(m, d); Initial inducing points

required Q int

Number of MLP mixture components used in the kernel function

required n_inits int

Number of times to initalize the kernel function (returns the best model)

1 hidden_sizes list

Number of hidden units in each MLP layer. Length of the list determines the number of layers.

None Source code in sgptools/kernels/neural_kernel.py
def init_neural_kernel(x, y, inducing_variable, Q, n_inits=1, hidden_sizes=None):\n    \"\"\"Helper function to initialize a Neural Spectral Kernel function (non-stationary kernel function). \n    Based on the implementation from the following [repo](https://github.com/sremes/nssm-gp/tree/master?tab=readme-ov-file)\n\n    Refer to the following papers for more details:\n        - Neural Non-Stationary Spectral Kernel [Remes et al., 2018]\n\n    Args:\n        x (ndarray): (n, d); Input training set points\n        y (ndarray): (n, 1); Training set labels\n        inducing_variable (ndarray): (m, d); Initial inducing points\n        Q (int): Number of MLP mixture components used in the kernel function\n        n_inits (int): Number of times to initalize the kernel function (returns the best model)\n        hidden_sizes (list): Number of hidden units in each MLP layer. Length of the list determines the number of layers.\n    \"\"\"\n    x, y = data_input_to_tensor((x, y))\n\n    print('Initializing neural spectral kernel...')\n    best_loglik = -np.inf\n    best_m = None\n    N, input_dim = x.shape\n\n    for k in range(n_inits):\n        # gpflow.reset_default_graph_and_session()\n        k = NeuralSpectralKernel(input_dim=input_dim, Q=Q, \n                                    hidden_sizes=hidden_sizes)\n        model = SGPR((x, y), inducing_variable=inducing_variable, \n                        kernel=k)\n        loglik = model.elbo()\n        if loglik > best_loglik:\n            best_loglik = loglik\n            best_m = model\n        del model\n        gc.collect()\n    print('Best init: %f' % best_loglik)\n\n    return best_m\n
"}]} \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..634e4aa --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"index.html","title":"Home","text":"

SGP-Tools is a software suite for Sensor Placement and Informative Path Planning.

The library includes python code for the following:

  • Greedy algorithm-based approaches
  • Bayesian optimization-based approaches
  • Genetic algorithm-based approaches
  • Sparse Gaussian process (SGP)-based approaches

"},{"location":"index.html#installation","title":"Installation","text":"

The library is available as a pip package. To install the package, run the following command:

python3 -m pip install sgptools\n

Installation from source:

git clone https://github.com/itskalvik/sgp-tools.git\ncd sgp-tools/\npython3 -m pip install -r requirements.txt\npython3 -m pip install -e .\n

Note: The requirements.txt file contains packages and their latest versions that were last verified to be working without any issues.

"},{"location":"index.html#quick-start","title":"Quick Start","text":"

Please refer to the examples folder for Jupyter notebooks demonstrating all the methods included in the library \ud83d\ude04

"},{"location":"index.html#method-summary","title":"Method Summary","text":""},{"location":"index.html#about","title":"About","text":"

Please consider citing the following papers if you use SGP-Tools in your academic work \ud83d\ude04

@misc{JakkalaA23SP,\nAUTHOR={Kalvik Jakkala and Srinivas Akella},\nTITLE={Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces},\nNOTE= {Preprint},\nYEAR={2023},\nURL={https://itskalvik.github.io/publication/sgp-sp},\n}\n\n@inproceedings{JakkalaA24IPP,\nAUTHOR={Kalvik Jakkala and Srinivas Akella},\nTITLE={Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes},\nbooktitle={IEEE International Conference on Robotics and Automation, {ICRA}},\nYEAR={2024},\nPUBLISHER = {{IEEE}},\nURL={https://itskalvik.github.io/publication/sgp-ipp}\n}\n
"},{"location":"index.html#acknowledgements","title":"Acknowledgements","text":"

This work was funded in part by the UNC Charlotte Office of Research and Economic Development and by NSF under Award Number IIP-1919233.

"},{"location":"index.html#license","title":"License","text":"

The SGP-Tools software suite is licensed under the terms of the Apache License 2.0. See LICENSE for more information.

"},{"location":"API-reference.html","title":"API reference","text":"

Sensor placement and informative path planning methods in this package:

  • continuous_sgp: Provides an SGP-based sensor placement approach that is optimized using gradient descent
  • greedy_sgp: Provides an SGP-based sensor placement approach that is optimized using a greedy algorithm
  • cma_es: Provides a genetic algorithm (CMA-ES) based approach that maximizes mutual-information to get sensor placements
  • greedy_mi: Provides a greedy algorithm based approach that maximizes mutual-information to get sensor placements
  • bo: Provides a Bayesian optimization based approach that maximizes mutual-information to get sensor placements
"},{"location":"API-reference.html#sgptools.models.continuous_sgp.continuous_sgp","title":"continuous_sgp(num_inducing, X_train, noise_variance, kernel, transform=None, Xu_init=None, Xu_time=None, orientation=False, **kwargs)","text":"

Get sensor placement solutions using the Continuous-SGP method

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default num_inducing int

Number of inducing points

required X_train ndarray

(n, d); Unlabeled random sampled training points

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None Xu_init ndarray

(m, d); Initial inducing points

None Xu_time ndarray

(t, d); Temporal inducing points used in spatio-temporal models

None orientation bool

If True, a additionl dimension is added to the inducing points to represent the FoV orientation

False

Returns:

Name Type Description sgpr AugmentedSGPR

Optimized sparse Gaussian process model

loss ndarray

Loss values computed during training

Source code in sgptools/models/continuous_sgp.py
def continuous_sgp(num_inducing, X_train, noise_variance, kernel, \n                   transform=None,\n                   Xu_init=None, \n                   Xu_time=None, \n                   orientation=False,\n                   **kwargs):\n    \"\"\"Get sensor placement solutions using the Continuous-SGP method\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [[Jakkala and Akella, 2024](https://www.itskalvik.com/publication/sgp-ipp/)]\n\n    Args:\n        num_inducing (int): Number of inducing points\n        X_train (ndarray): (n, d); Unlabeled random sampled training points\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n        Xu_init (ndarray): (m, d); Initial inducing points\n        Xu_time (ndarray): (t, d); Temporal inducing points used in spatio-temporal models\n        orientation (bool): If True, a additionl dimension is added to the \n                            inducing points to represent the FoV orientation\n\n    Returns:\n        sgpr (AugmentedSGPR): Optimized sparse Gaussian process model\n        loss (ndarray): Loss values computed during training\n    \"\"\"\n    # Generate init inducing points\n    if Xu_init is None:\n        Xu_init = get_inducing_pts(X_train, num_inducing, \n                                   orientation=orientation)\n\n    # Fit spare GP\n    sgpr = AugmentedSGPR((X_train, np.zeros((len(X_train), 1)).astype(X_train.dtype)),\n                         noise_variance=noise_variance,\n                         kernel=kernel, \n                         inducing_variable=Xu_init,\n                         inducing_variable_time=Xu_time,\n                         transform=transform)\n\n    # Train the mode\n    loss = optimize_model(sgpr,\n                          kernel_grad=False, \n                          **kwargs)\n\n    return sgpr, loss\n
"},{"location":"API-reference.html#sgptools.models.greedy_sgp.GreedySGP","title":"GreedySGP","text":"

Helper class to compute SGP's ELBO/optimization bound for a given set of sensor locations. Used by get_greedy_sgp_sol function to compute the solution sensor placements using the Greedy-SGP method.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]

Parameters:

Name Type Description Default num_inducing int

Number of inducing points

required S ndarray

(n, d); Candidate sensor placement locations

required V ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

Data noise variance

required kernel Kernel

gpflow kernel function

required Xu_fixed ndarray

(m, d); Inducing points that are not optimized and are always added to the inducing points set during loss function computation

None transform Transform

Transform object

None Source code in sgptools/models/greedy_sgp.py
class GreedySGP:\n    \"\"\"Helper class to compute SGP's ELBO/optimization bound for a given set of sensor locations.\n    Used by `get_greedy_sgp_sol` function to compute the solution sensor placements using the Greedy-SGP method.\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]\n\n    Args:\n        num_inducing (int): Number of inducing points\n        S (ndarray): (n, d); Candidate sensor placement locations\n        V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): Data noise variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        Xu_fixed (ndarray): (m, d); Inducing points that are not optimized and are always \n                                    added to the inducing points set during loss function computation\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(self, num_inducing, S, V, noise_variance, kernel, \n                 Xu_fixed=None, \n                 transform=None):\n        self.gp = AugmentedSGPR((V, np.zeros((len(V), 1))),\n                                noise_variance=noise_variance,\n                                kernel=kernel, \n                                inducing_variable=S[:num_inducing],\n                                transform=transform)\n        self.locs = S\n        self.Xu_fixed = Xu_fixed\n        self.num_inducing = num_inducing\n        self.inducing_dim = S.shape[1]\n\n    def bound(self, x):\n        \"\"\"Computes the SGP's optimization bound using the inducing points `x` \n\n        Args:\n            x (ndarray): (n, d); Inducing points\n\n        Returns:\n            elbo (float): Evidence lower bound/SGP's optimization bound value\n        \"\"\"\n        x = np.array(x).reshape(-1).astype(int)\n        Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)\n        Xu *= self.locs[x][0]\n        Xu[-len(x):] = self.locs[x]\n\n        if self.Xu_fixed is not None:\n            Xu[:len(self.Xu_fixed)] = self.Xu_fixed\n\n        self.gp.inducing_variable.Z.assign(Xu)\n        return self.gp.elbo().numpy()\n
"},{"location":"API-reference.html#sgptools.models.greedy_sgp.GreedySGP.bound","title":"bound(x)","text":"

Computes the SGP's optimization bound using the inducing points x

Parameters:

Name Type Description Default x ndarray

(n, d); Inducing points

required

Returns:

Name Type Description elbo float

Evidence lower bound/SGP's optimization bound value

Source code in sgptools/models/greedy_sgp.py
def bound(self, x):\n    \"\"\"Computes the SGP's optimization bound using the inducing points `x` \n\n    Args:\n        x (ndarray): (n, d); Inducing points\n\n    Returns:\n        elbo (float): Evidence lower bound/SGP's optimization bound value\n    \"\"\"\n    x = np.array(x).reshape(-1).astype(int)\n    Xu = np.ones((self.num_inducing, self.inducing_dim), dtype=np.float32)\n    Xu *= self.locs[x][0]\n    Xu[-len(x):] = self.locs[x]\n\n    if self.Xu_fixed is not None:\n        Xu[:len(self.Xu_fixed)] = self.Xu_fixed\n\n    self.gp.inducing_variable.Z.assign(Xu)\n    return self.gp.elbo().numpy()\n
"},{"location":"API-reference.html#sgptools.models.greedy_sgp.get_greedy_sgp_sol","title":"get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel, transform=None)","text":"

Get sensor placement solutions using the Greedy-SGP method. Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

required candidates ndarray

(n, d); Candidate sensor placement locations

required X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/greedy_sgp.py
def get_greedy_sgp_sol(num_sensors, candidates, X_train, noise_variance, kernel, \n                       transform=None):\n    \"\"\"Get sensor placement solutions using the Greedy-SGP method. Uses a greedy algorithm to \n    select sensor placements from a given discrete set of candidates locations.\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [[Jakkala and Akella, 2023](https://www.itskalvik.com/publication/sgp-sp/)]\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        candidates (ndarray): (n, d); Candidate sensor placement locations\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sgp_model = GreedySGP(num_sensors, candidates, X_train, \n                          noise_variance, kernel, transform=transform)\n    model = CustomSelection(num_sensors,\n                            sgp_model.bound,\n                            optimizer='naive',\n                            verbose=False)\n    sol = model.fit_transform(np.arange(len(candidates)).reshape(-1, 1))\n    return candidates[sol.reshape(-1)]\n
"},{"location":"API-reference.html#sgptools.models.greedy_mi.GreedyMI","title":"GreedyMI","text":"

Helper class to compute mutual information using a Gaussian process for a given set of sensor locations. Used by get_greedy_mi_sol function to compute the solution sensor placements using the Greedy-MI method.

Refer to the following papers for more details
  • Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
  • Data-driven learning and planning for environmental sampling [Ma et al., 2018]

Parameters:

Name Type Description Default S ndarray

(n, d); Candidate sensor placement locations

required V ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None Source code in sgptools/models/greedy_mi.py
class GreedyMI:\n    \"\"\"Helper class to compute mutual information using a Gaussian process for a given set of sensor locations.\n    Used by `get_greedy_mi_sol` function to compute the solution sensor placements using the Greedy-MI method.\n\n    Refer to the following papers for more details:\n        - Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]\n        - Data-driven learning and planning for environmental sampling [Ma et al., 2018]\n\n    Args:\n        S (ndarray): (n, d); Candidate sensor placement locations\n        V (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(self, S, V, noise_variance, kernel, transform=None):\n        self.S = S\n        self.V = V\n        self.kernel = kernel\n        self.input_dim = S.shape[1]\n        self.noise_variance = noise_variance\n        self.transform = transform\n\n    def mutual_info(self, x):\n        x = np.array(x).reshape(-1).astype(int)\n        A = self.S[x[:-1]].reshape(-1, self.input_dim)\n        y = self.S[x[-1]].reshape(-1, self.input_dim)\n\n        if len(A) == 0:\n            sigma_a = 1.0\n        else:\n            if self.transform is not None:\n                A = self.transform.expand(A)\n            a_gp = AugmentedGPR(data=(A, np.zeros((len(A), 1))),\n                                kernel=self.kernel,\n                                noise_variance=self.noise_variance,\n                                transform=self.transform)\n            _, sigma_a = a_gp.predict_f(y, aggregate_train=True)\n\n        # Remove locations in A to build A bar\n        V_ = self.V.copy()\n        V_rows = V_.view([('', V_.dtype)] * V_.shape[1])\n        if self.transform is not None:\n            A_ = self.transform.expand(self.S[x]).numpy()\n        else:\n            A_ = self.S[x]\n        A_rows = A_.view([('', V_.dtype)] * A_.shape[1])\n        V_ = np.setdiff1d(V_rows, A_rows).view(V_.dtype).reshape(-1, V_.shape[1])\n\n        self.v_gp = AugmentedGPR(data=(V_, np.zeros((len(V_), 1))), \n                                 kernel=self.kernel,\n                                 noise_variance=self.noise_variance,\n                                 transform=self.transform)\n        _, sigma_v = self.v_gp.predict_f(y)\n\n        return (sigma_a/sigma_v).numpy().squeeze()\n
"},{"location":"API-reference.html#sgptools.models.greedy_mi.get_greedy_mi_sol","title":"get_greedy_mi_sol(num_sensors, candidates, X_train, noise_variance, kernel, transform=None, optimizer='naive')","text":"

Get sensor placement solutions using the GP-based mutual information approach (submodular objective function). Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.

Refer to the following papers for more details
  • Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]
  • Data-driven learning and planning for environmental sampling [Ma et al., 2018]

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

required candidates ndarray

(n, d); Candidate sensor placement locations

required X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required transform Transform

Transform object

None optimizer str

Name of an optimizer available in the apricot library

'naive'

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/greedy_mi.py
def get_greedy_mi_sol(num_sensors, candidates, X_train, noise_variance, kernel, \n                      transform=None, optimizer='naive'):\n    \"\"\"Get sensor placement solutions using the GP-based mutual information approach (submodular objective function). \n    Uses a greedy algorithm to select sensor placements from a given discrete set of candidates locations.\n\n    Refer to the following papers for more details:\n        - Near-Optimal Sensor Placements in Gaussian Processes: Theory, Efficient Algorithms and Empirical Studies [Krause et al., 2008]\n        - Data-driven learning and planning for environmental sampling [Ma et al., 2018]\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        candidates (ndarray): (n, d); Candidate sensor placement locations\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        transform (Transform): Transform object\n        optimizer (str): Name of an optimizer available in the apricot library\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    mi_model = GreedyMI(candidates, X_train, noise_variance, kernel, transform)\n    model = CustomSelection(num_sensors,\n                            mi_model.mutual_info,\n                            optimizer=optimizer,\n                            verbose=False)\n    sol = model.fit_transform(np.arange(len(candidates)).reshape(-1, 1))\n    return candidates[sol.reshape(-1)]\n
"},{"location":"API-reference.html#sgptools.models.bo.BayesianOpt","title":"BayesianOpt","text":"

Class for optimizing sensor placements using Bayesian Optimization

Refer to the following papers for more details
  • UAV route planning for active disease classification [Vivaldini et al., 2019]
  • Occupancy map building through Bayesian exploration [Francis et al., 2019]

Parameters:

Name Type Description Default X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required Source code in sgptools/models/bo.py
class BayesianOpt:\n    \"\"\"Class for optimizing sensor placements using Bayesian Optimization\n\n    Refer to the following papers for more details:\n        - UAV route planning for active disease classification [Vivaldini et al., 2019]\n        - Occupancy map building through Bayesian exploration [Francis et al., 2019]\n\n    Args:\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n    \"\"\"\n    def __init__(self, X_train, noise_variance, kernel):\n        self.X_train = X_train\n        self.noise_variance = noise_variance\n        self.kernel = kernel\n        self.num_dim = X_train.shape[-1]\n\n        # use the boundaries of the region as the search space\n        self.pbounds_dim = []\n        for i in range(self.num_dim):\n            self.pbounds_dim.append((np.min(X_train[:, i]), np.max(X_train[:, i])))\n\n    def objective(self, **kwargs):\n        \"\"\"Computes the objective function (mutual information) for the sensor placement problem\n        \"\"\"\n        X = []\n        for i in range(len(kwargs)):\n            X.append(kwargs['x{}'.format(i)])\n        X = np.array(X).reshape(-1, self.num_dim)\n        return -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n\n    def optimize(self, \n                 num_sensors=10, \n                 max_steps=100,  \n                 X_init=None,\n                 init_points=10):\n        \"\"\"Optimizes the sensor placements using Bayesian Optimization without any constraints\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps \n            X_init (ndarray): (m, d); Initial inducing points\n            init_points (int): How many steps of random exploration you want to perform. \n                               Random exploration can help by diversifying the exploration space. \n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        if X_init is None:\n            X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n        X_init = X_init.reshape(-1)\n\n        pbounds = {}\n        for i in range(self.num_dim*num_sensors):\n            pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]\n\n        optimizer = BayesianOptimization(\n            f=self.objective,\n            pbounds=pbounds,\n            verbose=0,\n            random_state=1,\n            allow_duplicate_points=True\n        )\n\n        optimizer.maximize(\n            init_points=init_points,\n            n_iter=max_steps,\n        )\n\n        sol = []\n        for i in range(self.num_dim*num_sensors):\n            sol.append(optimizer.max['params']['x{}'.format(i)])\n        return np.array(sol).reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.bo.BayesianOpt.objective","title":"objective(**kwargs)","text":"

Computes the objective function (mutual information) for the sensor placement problem

Source code in sgptools/models/bo.py
def objective(self, **kwargs):\n    \"\"\"Computes the objective function (mutual information) for the sensor placement problem\n    \"\"\"\n    X = []\n    for i in range(len(kwargs)):\n        X.append(kwargs['x{}'.format(i)])\n    X = np.array(X).reshape(-1, self.num_dim)\n    return -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n
"},{"location":"API-reference.html#sgptools.models.bo.BayesianOpt.optimize","title":"optimize(num_sensors=10, max_steps=100, X_init=None, init_points=10)","text":"

Optimizes the sensor placements using Bayesian Optimization without any constraints

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

100 X_init ndarray

(m, d); Initial inducing points

None init_points int

How many steps of random exploration you want to perform. Random exploration can help by diversifying the exploration space.

10

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/bo.py
def optimize(self, \n             num_sensors=10, \n             max_steps=100,  \n             X_init=None,\n             init_points=10):\n    \"\"\"Optimizes the sensor placements using Bayesian Optimization without any constraints\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps \n        X_init (ndarray): (m, d); Initial inducing points\n        init_points (int): How many steps of random exploration you want to perform. \n                           Random exploration can help by diversifying the exploration space. \n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    if X_init is None:\n        X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n    X_init = X_init.reshape(-1)\n\n    pbounds = {}\n    for i in range(self.num_dim*num_sensors):\n        pbounds['x{}'.format(i)] = self.pbounds_dim[i%self.num_dim]\n\n    optimizer = BayesianOptimization(\n        f=self.objective,\n        pbounds=pbounds,\n        verbose=0,\n        random_state=1,\n        allow_duplicate_points=True\n    )\n\n    optimizer.maximize(\n        init_points=init_points,\n        n_iter=max_steps,\n    )\n\n    sol = []\n    for i in range(self.num_dim*num_sensors):\n        sol.append(optimizer.max['params']['x{}'.format(i)])\n    return np.array(sol).reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES","title":"CMA_ES","text":"

Class for optimizing sensor placements using CMA-ES (a genetic algorithm)

Refer to the following paper for more details
  • Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]

Parameters:

Name Type Description Default X_train ndarray

(n, d); Locations in the environment used to approximate the monitoring regions

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required distance_budget float

Distance budget for when treating the inducing points as waypoints of a path

None num_robots int

Number of robots, used when modeling multi-robot IPP with a distance budget

1 transform Transform

Transform object

None Source code in sgptools/models/cma_es.py
class CMA_ES:\n    \"\"\"Class for optimizing sensor placements using CMA-ES (a genetic algorithm)\n\n    Refer to the following paper for more details:\n        - Adaptive Continuous-Space Informative Path Planning for Online Environmental Monitoring [Hitz et al., 2017]\n\n    Args:\n        X_train (ndarray): (n, d); Locations in the environment used to approximate the monitoring regions\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        distance_budget (float): Distance budget for when treating the inducing points \n                                 as waypoints of a path\n        num_robots (int): Number of robots, used when modeling \n                          multi-robot IPP with a distance budget\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(self, X_train, noise_variance, kernel,\n                 distance_budget=None,\n                 num_robots=1,\n                 transform=None):\n        self.boundaries = geometry.MultiPoint([[p[0], p[1]] for p in X_train]).convex_hull\n        self.X_train = X_train\n        self.noise_variance = noise_variance\n        self.kernel = kernel\n        self.num_dim = X_train.shape[-1]\n        self.distance_budget = distance_budget\n        self.num_robots = num_robots\n        self.transform = transform\n\n    def update(self, noise_variance, kernel):\n        \"\"\"Update GP noise variance and kernel function parameters\n\n        Args:\n            noise_variance (float): data variance\n            kernel (gpflow.kernels.Kernel): gpflow kernel function\n        \"\"\"\n        self.noise_variance = noise_variance\n        self.kernel = kernel\n\n    def constraint(self, X):\n        \"\"\"Constraint function for the optimization problem (constraint to limit the boundary of the region)\n        Does not work well with CMA-ES as it is a step function and is not continuous\n\n        Args:\n            X (ndarray): (n, d); Current sensor placement locations\n        \"\"\"\n        X = np.array(X).reshape(-1, self.num_dim)\n        lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]\n        lagrangian = np.logical_not(lagrangian).astype(float)\n        return lagrangian\n\n    def distance_constraint(self, X):\n        \"\"\"Constraint function for the optimization problem (constraint to limit the total travel distance)\n        Does not work well with CMA-ES as it is a step function and is not continuous\n\n        Args:\n            X (ndarray): (n, d); Current sensor placement locations\n        \"\"\"\n        X = np.array(X).reshape(self.num_robots, -1, self.num_dim)\n        dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)\n        lagrangian = dists - self.distance_budget\n        lagrangian_mask = np.logical_not(lagrangian <= 0)\n        lagrangian[lagrangian_mask] = 0\n        lagrangian = np.sum(lagrangian)\n        return lagrangian\n\n    def objective(self, X):\n        \"\"\"Objective function (GP-based Mutual Information)\n\n        Args:\n            X (ndarray): (n, d); Initial sensor placement locations\n        \"\"\"\n        # MI does not depend on waypoint order (reshape to -1, num_dim)\n        X = np.array(X).reshape(-1, self.num_dim)\n        if self.transform is not None:\n            X = self.transform.expand(X, \n                                      expand_sensor_model=False).numpy()\n\n        try:\n            mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n        except:\n            mi = 0.0 # if the cholskey decomposition fails\n        return mi\n\n    def optimize(self, \n                 num_sensors=10, \n                 max_steps=5000, \n                 tol=1e-11, \n                 X_init=None):\n        \"\"\"Optimizes the SP objective function using CMA-ES without any constraints\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps\n            tol (float): Convergence tolerance to decide when to stop optimization\n            X_init (ndarray): (m, d); Initial inducing points\n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        sigma0 = 1.0\n\n        if X_init is None:\n            X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n        X_init = X_init.reshape(-1)\n\n        xopt, _ = cma.fmin2(self.objective, X_init, sigma0, \n                            options={'maxfevals': max_steps,\n                                     'verb_disp': 0,\n                                     'tolfun': tol,\n                                     'seed': 1234},\n                            restarts=5)\n\n        xopt = np.array(xopt).reshape(-1, self.num_dim)\n        if self.transform is not None:\n            xopt = self.transform.expand(xopt, \n                                         expand_sensor_model=False).numpy()\n\n        return xopt.reshape(-1, self.num_dim)\n\n    def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n        \"\"\"Optimizes the SP objective function using CMA-ES with a distance budget constraint\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps\n            tol (float): Convergence tolerance to decide when to stop optimization\n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        sigma0 = 1.0\n        idx = np.random.randint(len(self.X_train), size=num_sensors)\n        x_init = self.X_train[idx].reshape(-1)\n        cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)\n        xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                            options={'maxfevals': max_steps,\n                                     'verb_disp': 0,\n                                     'tolfun': tol,\n                                     'seed': 1234},\n                            callback=cfun.update,\n                            restarts=5)\n        return xopt.reshape(-1, self.num_dim)\n\n    def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n        \"\"\"Optimizes the SP objective function using CMA-ES with the constraints\n        to ensure that the sensors are placed within the boundaries of the region\n\n        Args:\n            num_sensors (int): Number of sensor locations to optimize\n            max_steps (int): Maximum number of optimization steps\n            tol (float): Convergence tolerance to decide when to stop optimization\n\n        Returns:\n            Xu (ndarray): (m, d); Solution sensor placement locations\n        \"\"\"\n        sigma0 = 1.0\n        idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)\n        x_init = self.X_train[idx].reshape(-1)\n        cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)\n        xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                            options={'maxfevals': max_steps,\n                                     'verb_disp': 0,\n                                     'tolfun': tol,\n                                     'seed': 1234},\n                            callback=cfun.update,\n                            restarts=5)\n        return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.constraint","title":"constraint(X)","text":"

Constraint function for the optimization problem (constraint to limit the boundary of the region) Does not work well with CMA-ES as it is a step function and is not continuous

Parameters:

Name Type Description Default X ndarray

(n, d); Current sensor placement locations

required Source code in sgptools/models/cma_es.py
def constraint(self, X):\n    \"\"\"Constraint function for the optimization problem (constraint to limit the boundary of the region)\n    Does not work well with CMA-ES as it is a step function and is not continuous\n\n    Args:\n        X (ndarray): (n, d); Current sensor placement locations\n    \"\"\"\n    X = np.array(X).reshape(-1, self.num_dim)\n    lagrangian = [self.boundaries.contains(geometry.Point(x[0], x[1])) for x in X]\n    lagrangian = np.logical_not(lagrangian).astype(float)\n    return lagrangian\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.coptimize","title":"coptimize(num_sensors=10, max_steps=100, tol=1e-11)","text":"

Optimizes the SP objective function using CMA-ES with the constraints to ensure that the sensors are placed within the boundaries of the region

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

100 tol float

Convergence tolerance to decide when to stop optimization

1e-11

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/cma_es.py
def coptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n    \"\"\"Optimizes the SP objective function using CMA-ES with the constraints\n    to ensure that the sensors are placed within the boundaries of the region\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps\n        tol (float): Convergence tolerance to decide when to stop optimization\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sigma0 = 1.0\n    idx = np.random.randint(len(self.X_train), size=num_sensors*self.num_robots)\n    x_init = self.X_train[idx].reshape(-1)\n    cfun = cma.ConstrainedFitnessAL(self.objective, self.constraint)\n    xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                        options={'maxfevals': max_steps,\n                                 'verb_disp': 0,\n                                 'tolfun': tol,\n                                 'seed': 1234},\n                        callback=cfun.update,\n                        restarts=5)\n    return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.distance_constraint","title":"distance_constraint(X)","text":"

Constraint function for the optimization problem (constraint to limit the total travel distance) Does not work well with CMA-ES as it is a step function and is not continuous

Parameters:

Name Type Description Default X ndarray

(n, d); Current sensor placement locations

required Source code in sgptools/models/cma_es.py
def distance_constraint(self, X):\n    \"\"\"Constraint function for the optimization problem (constraint to limit the total travel distance)\n    Does not work well with CMA-ES as it is a step function and is not continuous\n\n    Args:\n        X (ndarray): (n, d); Current sensor placement locations\n    \"\"\"\n    X = np.array(X).reshape(self.num_robots, -1, self.num_dim)\n    dists = np.linalg.norm(X[:, 1:] - X[:, :-1], axis=-1)\n    lagrangian = dists - self.distance_budget\n    lagrangian_mask = np.logical_not(lagrangian <= 0)\n    lagrangian[lagrangian_mask] = 0\n    lagrangian = np.sum(lagrangian)\n    return lagrangian\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.doptimize","title":"doptimize(num_sensors=10, max_steps=100, tol=1e-11)","text":"

Optimizes the SP objective function using CMA-ES with a distance budget constraint

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

100 tol float

Convergence tolerance to decide when to stop optimization

1e-11

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/cma_es.py
def doptimize(self, num_sensors=10, max_steps=100, tol=1e-11):\n    \"\"\"Optimizes the SP objective function using CMA-ES with a distance budget constraint\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps\n        tol (float): Convergence tolerance to decide when to stop optimization\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sigma0 = 1.0\n    idx = np.random.randint(len(self.X_train), size=num_sensors)\n    x_init = self.X_train[idx].reshape(-1)\n    cfun = cma.ConstrainedFitnessAL(self.objective, self.distance_constraint)\n    xopt, _ = cma.fmin2(cfun, x_init, sigma0, \n                        options={'maxfevals': max_steps,\n                                 'verb_disp': 0,\n                                 'tolfun': tol,\n                                 'seed': 1234},\n                        callback=cfun.update,\n                        restarts=5)\n    return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.objective","title":"objective(X)","text":"

Objective function (GP-based Mutual Information)

Parameters:

Name Type Description Default X ndarray

(n, d); Initial sensor placement locations

required Source code in sgptools/models/cma_es.py
def objective(self, X):\n    \"\"\"Objective function (GP-based Mutual Information)\n\n    Args:\n        X (ndarray): (n, d); Initial sensor placement locations\n    \"\"\"\n    # MI does not depend on waypoint order (reshape to -1, num_dim)\n    X = np.array(X).reshape(-1, self.num_dim)\n    if self.transform is not None:\n        X = self.transform.expand(X, \n                                  expand_sensor_model=False).numpy()\n\n    try:\n        mi = -get_mi(X, self.noise_variance, self.kernel, self.X_train)\n    except:\n        mi = 0.0 # if the cholskey decomposition fails\n    return mi\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.optimize","title":"optimize(num_sensors=10, max_steps=5000, tol=1e-11, X_init=None)","text":"

Optimizes the SP objective function using CMA-ES without any constraints

Parameters:

Name Type Description Default num_sensors int

Number of sensor locations to optimize

10 max_steps int

Maximum number of optimization steps

5000 tol float

Convergence tolerance to decide when to stop optimization

1e-11 X_init ndarray

(m, d); Initial inducing points

None

Returns:

Name Type Description Xu ndarray

(m, d); Solution sensor placement locations

Source code in sgptools/models/cma_es.py
def optimize(self, \n             num_sensors=10, \n             max_steps=5000, \n             tol=1e-11, \n             X_init=None):\n    \"\"\"Optimizes the SP objective function using CMA-ES without any constraints\n\n    Args:\n        num_sensors (int): Number of sensor locations to optimize\n        max_steps (int): Maximum number of optimization steps\n        tol (float): Convergence tolerance to decide when to stop optimization\n        X_init (ndarray): (m, d); Initial inducing points\n\n    Returns:\n        Xu (ndarray): (m, d); Solution sensor placement locations\n    \"\"\"\n    sigma0 = 1.0\n\n    if X_init is None:\n        X_init = get_inducing_pts(self.X_train, num_sensors, random=True)\n    X_init = X_init.reshape(-1)\n\n    xopt, _ = cma.fmin2(self.objective, X_init, sigma0, \n                        options={'maxfevals': max_steps,\n                                 'verb_disp': 0,\n                                 'tolfun': tol,\n                                 'seed': 1234},\n                        restarts=5)\n\n    xopt = np.array(xopt).reshape(-1, self.num_dim)\n    if self.transform is not None:\n        xopt = self.transform.expand(xopt, \n                                     expand_sensor_model=False).numpy()\n\n    return xopt.reshape(-1, self.num_dim)\n
"},{"location":"API-reference.html#sgptools.models.cma_es.CMA_ES.update","title":"update(noise_variance, kernel)","text":"

Update GP noise variance and kernel function parameters

Parameters:

Name Type Description Default noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required Source code in sgptools/models/cma_es.py
def update(self, noise_variance, kernel):\n    \"\"\"Update GP noise variance and kernel function parameters\n\n    Args:\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n    \"\"\"\n    self.noise_variance = noise_variance\n    self.kernel = kernel\n
"},{"location":"API-reference.html#________________________","title":"________________________","text":"

Core modules in this package:

  • augmented_gpr: Provides a Gaussian process model with expand and aggregate functions
  • augmented_sgpr: Provides a sparse Gaussian process model with update, expand, and aggregate functions
  • osgpr: Provides a streaming sparse Gaussian process model along with initialization function
  • transformations: Provides transforms to model complex sensor field of views and handle informative path planning

Provides a Gaussian process model with expand and aggregate functions

Provides a sparse Gaussian process model with update, expand, and aggregate functions

Provides a streaming sparse Gaussian process model along with initialization function

Provides transforms to model complex sensor field of views and handle informative path planning

"},{"location":"API-reference.html#sgptools.models.core.augmented_gpr.AugmentedGPR","title":"AugmentedGPR","text":"

Bases: GPR

GPR model from the GPFlow library augmented to use a transform object's expand and aggregate functions on the data points where necessary.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

required kernel Kernel

gpflow kernel function

required noise_variance float

data variance

required transform Transform

Transform object

required Source code in sgptools/models/core/augmented_gpr.py
class AugmentedGPR(GPR):\n    \"\"\"GPR model from the GPFlow library augmented to use a transform object's\n    expand and aggregate functions on the data points where necessary.  \n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]\n\n    Args:\n        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        noise_variance (float): data variance\n        transform (Transform): Transform object\n    \"\"\"\n    def __init__(\n        self,\n        *args,\n        transform,\n        **kwargs\n    ):\n        super().__init__(\n            *args,\n            **kwargs\n        )\n        if transform is None:\n            self.transform = Transform()\n        else:\n            self.transform = transform\n\n    def predict_f(\n        self, Xnew: InputData, \n        full_cov: bool = True, \n        full_output_cov: bool = False,\n        aggregate_train: bool = False,\n    ) -> MeanAndVariance:\n        assert_params_false(self.predict_f, full_output_cov=full_output_cov)\n        if self.transform is not None:\n            Xnew = self.transform.expand(Xnew)\n\n        X, Y = self.data\n        err = Y - self.mean_function(X)\n\n        kmm = self.kernel(X)\n        knn = self.kernel(Xnew, full_cov=full_cov)\n        kmn = self.kernel(X, Xnew)\n        kmm_plus_s = add_likelihood_noise_cov(kmm, self.likelihood, X)\n\n        if self.transform is not None:\n            kmn = self.transform.aggregate(tf.transpose(kmn))\n            kmn = tf.transpose(kmn)\n            knn = self.transform.aggregate(knn)\n\n        if aggregate_train:\n            kmm_plus_s = self.transform.aggregate(kmm_plus_s)\n            err = self.transform.aggregate(err)\n            # reduce kmn only if it was not reduced before\n            # which can when train and test data are the same size\n            if kmn.shape[0] != kmn.shape[1]:\n                kmn = self.transform.aggregate(kmn)\n\n        conditional = gpflow.conditionals.base_conditional\n        f_mean_zero, f_var = conditional(\n            kmn, kmm_plus_s, knn, err, full_cov=full_cov, white=False\n        )  # [N, P], [N, P] or [P, N, N]\n        f_mean = f_mean_zero + self.mean_function(Xnew)\n        return f_mean, f_var\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR","title":"AugmentedSGPR","text":"

Bases: SGPR

SGPR model from the GPFlow library augmented to use a transform object's expand and aggregate functions on the inducing points where necessary. The object has an additional update function to update the kernel and noise variance parameters (currently, the online updates part works only with RBF kernels).

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

required kernel Kernel

gpflow kernel function

required noise_variance float

data variance

required inducing_variable ndarray

(m, d); Initial inducing points

required transform Transform

Transform object

required inducing_variable_time ndarray

(m, d); Temporal dimensions of the inducing points, used when modeling spatio-temporal IPP

None Source code in sgptools/models/core/augmented_sgpr.py
class AugmentedSGPR(SGPR):\n    \"\"\"SGPR model from the GPFlow library augmented to use a transform object's\n    expand and aggregate functions on the inducing points where necessary. The object\n    has an additional update function to update the kernel and noise variance parameters \n    (currently, the online updates part works only with RBF kernels).  \n\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]\n\n    Args:\n        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        noise_variance (float): data variance\n        inducing_variable (ndarray): (m, d); Initial inducing points\n        transform (Transform): Transform object\n        inducing_variable_time (ndarray): (m, d); Temporal dimensions of the inducing points, \n                                            used when modeling spatio-temporal IPP\n    \"\"\"\n    def __init__(\n        self,\n        *args,\n        transform,\n        inducing_variable_time=None,\n        **kwargs\n    ):\n        super().__init__(\n            *args,\n            **kwargs\n        )\n        if transform is None:\n            self.transform = Transform()\n        else:\n            self.transform = transform\n\n        if inducing_variable_time is not None:\n            self.inducing_variable_time = inducingpoint_wrapper(inducing_variable_time)\n            self.transform.inducing_variable_time = self.inducing_variable_time\n        else:\n            self.inducing_variable_time = None\n\n    def update(self, noise_variance, kernel):\n        \"\"\"Update SGP noise variance and kernel function parameters\n\n        Args:\n            noise_variance (float): data variance\n            kernel (gpflow.kernels.Kernel): gpflow kernel function\n        \"\"\"\n        self.likelihood.variance.assign(noise_variance)\n        self.kernel.lengthscales.assign(kernel.lengthscales)\n        self.kernel.variance.assign(kernel.variance)\n\n    def _common_calculation(self) -> \"SGPR.CommonTensors\":\n        \"\"\"\n        Matrices used in log-det calculation\n        :return: A , B, LB, AAT with :math:`LL\u1d40 = K\u1d64\u1d64 , A = L\u207b\u00b9K_{uf}/\u03c3, AAT = AA\u1d40,\n            B = AAT+I, LBLB\u1d40 = B`\n            A is M x N, B is M x M, LB is M x M, AAT is M x M\n        \"\"\"\n        x, _ = self.data\n\n        iv = self.inducing_variable.Z  # [M]\n        iv = self.transform.expand(iv)\n\n        kuf = self.kernel(iv, x)\n        kuf = self.transform.aggregate(kuf)\n\n        kuu = self.kernel(iv) + 1e-6 * tf.eye(tf.shape(iv)[0], dtype=iv.dtype)\n        kuu = self.transform.aggregate(kuu)\n\n        L = tf.linalg.cholesky(kuu)\n\n        sigma_sq = self.likelihood.variance\n        sigma = tf.sqrt(sigma_sq)\n\n        # Compute intermediate matrices\n        A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma\n        AAT = tf.linalg.matmul(A, A, transpose_b=True)\n        B = add_noise_cov(AAT, tf.cast(1.0, AAT.dtype))\n        LB = tf.linalg.cholesky(B)\n\n        return self.CommonTensors(sigma_sq, sigma, A, B, LB, AAT, L)\n\n    def elbo(self) -> tf.Tensor:\n        \"\"\"\n        Construct a tensorflow function to compute the bound on the marginal\n        likelihood. For a derivation of the terms in here, see the associated\n        SGPR notebook.\n        \"\"\"\n        common = self._common_calculation()\n        output_shape = tf.shape(self.data[-1])\n        num_data = to_default_float(output_shape[0])\n        output_dim = to_default_float(output_shape[1])\n        const = -0.5 * num_data * output_dim * np.log(2 * np.pi)\n        logdet = self.logdet_term(common)\n        quad = self.quad_term(common)\n        constraints = self.transform.constraints(self.inducing_variable.Z)\n        return const + logdet + quad + constraints\n\n    def predict_f(\n        self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n    ) -> MeanAndVariance:\n\n        # could copy into posterior into a fused version\n        \"\"\"\n        Compute the mean and variance of the latent function at some new points\n        Xnew. For a derivation of the terms in here, see the associated SGPR\n        notebook.\n        \"\"\"\n        X_data, Y_data = self.data\n\n        iv = self.inducing_variable.Z\n        iv = self.transform.expand(iv)\n\n        num_inducing = tf.shape(iv)[0]\n\n        err = Y_data - self.mean_function(X_data)\n        kuf = self.kernel(iv, X_data)\n        kuu = self.kernel(iv) + 1e-6 * tf.eye(num_inducing, dtype=iv.dtype)\n        Kus = self.kernel(iv, Xnew)\n        sigma = tf.sqrt(self.likelihood.variance)\n        L = tf.linalg.cholesky(kuu)\n        A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma\n        B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(\n            num_inducing, dtype=default_float()\n        )  # cache qinv\n        LB = tf.linalg.cholesky(B)\n        Aerr = tf.linalg.matmul(A, err)\n        c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma\n        tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)\n        tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)\n        mean = tf.linalg.matmul(tmp2, c, transpose_a=True)\n        if full_cov:\n            var = (\n                self.kernel(Xnew)\n                + tf.linalg.matmul(tmp2, tmp2, transpose_a=True)\n                - tf.linalg.matmul(tmp1, tmp1, transpose_a=True)\n            )\n            var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1])  # [P, N, N]\n        else:\n            var = (\n                self.kernel(Xnew, full_cov=False)\n                + tf.reduce_sum(tf.square(tmp2), 0)\n                - tf.reduce_sum(tf.square(tmp1), 0)\n            )\n            var = tf.tile(var[:, None], [1, self.num_latent_gps])\n\n        return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR.elbo","title":"elbo()","text":"

Construct a tensorflow function to compute the bound on the marginal likelihood. For a derivation of the terms in here, see the associated SGPR notebook.

Source code in sgptools/models/core/augmented_sgpr.py
def elbo(self) -> tf.Tensor:\n    \"\"\"\n    Construct a tensorflow function to compute the bound on the marginal\n    likelihood. For a derivation of the terms in here, see the associated\n    SGPR notebook.\n    \"\"\"\n    common = self._common_calculation()\n    output_shape = tf.shape(self.data[-1])\n    num_data = to_default_float(output_shape[0])\n    output_dim = to_default_float(output_shape[1])\n    const = -0.5 * num_data * output_dim * np.log(2 * np.pi)\n    logdet = self.logdet_term(common)\n    quad = self.quad_term(common)\n    constraints = self.transform.constraints(self.inducing_variable.Z)\n    return const + logdet + quad + constraints\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR.predict_f","title":"predict_f(Xnew, full_cov=False, full_output_cov=False)","text":"

Compute the mean and variance of the latent function at some new points Xnew. For a derivation of the terms in here, see the associated SGPR notebook.

Source code in sgptools/models/core/augmented_sgpr.py
def predict_f(\n    self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False\n) -> MeanAndVariance:\n\n    # could copy into posterior into a fused version\n    \"\"\"\n    Compute the mean and variance of the latent function at some new points\n    Xnew. For a derivation of the terms in here, see the associated SGPR\n    notebook.\n    \"\"\"\n    X_data, Y_data = self.data\n\n    iv = self.inducing_variable.Z\n    iv = self.transform.expand(iv)\n\n    num_inducing = tf.shape(iv)[0]\n\n    err = Y_data - self.mean_function(X_data)\n    kuf = self.kernel(iv, X_data)\n    kuu = self.kernel(iv) + 1e-6 * tf.eye(num_inducing, dtype=iv.dtype)\n    Kus = self.kernel(iv, Xnew)\n    sigma = tf.sqrt(self.likelihood.variance)\n    L = tf.linalg.cholesky(kuu)\n    A = tf.linalg.triangular_solve(L, kuf, lower=True) / sigma\n    B = tf.linalg.matmul(A, A, transpose_b=True) + tf.eye(\n        num_inducing, dtype=default_float()\n    )  # cache qinv\n    LB = tf.linalg.cholesky(B)\n    Aerr = tf.linalg.matmul(A, err)\n    c = tf.linalg.triangular_solve(LB, Aerr, lower=True) / sigma\n    tmp1 = tf.linalg.triangular_solve(L, Kus, lower=True)\n    tmp2 = tf.linalg.triangular_solve(LB, tmp1, lower=True)\n    mean = tf.linalg.matmul(tmp2, c, transpose_a=True)\n    if full_cov:\n        var = (\n            self.kernel(Xnew)\n            + tf.linalg.matmul(tmp2, tmp2, transpose_a=True)\n            - tf.linalg.matmul(tmp1, tmp1, transpose_a=True)\n        )\n        var = tf.tile(var[None, ...], [self.num_latent_gps, 1, 1])  # [P, N, N]\n    else:\n        var = (\n            self.kernel(Xnew, full_cov=False)\n            + tf.reduce_sum(tf.square(tmp2), 0)\n            - tf.reduce_sum(tf.square(tmp1), 0)\n        )\n        var = tf.tile(var[:, None], [1, self.num_latent_gps])\n\n    return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.augmented_sgpr.AugmentedSGPR.update","title":"update(noise_variance, kernel)","text":"

Update SGP noise variance and kernel function parameters

Parameters:

Name Type Description Default noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required Source code in sgptools/models/core/augmented_sgpr.py
def update(self, noise_variance, kernel):\n    \"\"\"Update SGP noise variance and kernel function parameters\n\n    Args:\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n    \"\"\"\n    self.likelihood.variance.assign(noise_variance)\n    self.kernel.lengthscales.assign(kernel.lengthscales)\n    self.kernel.variance.assign(kernel.variance)\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE","title":"OSGPR_VFE","text":"

Bases: GPModel, InternalDataTrainingLossMixin

Online Sparse Variational GP regression model from streaming_sparse_gp

Refer to the following paper for more details
  • Streaming Gaussian process approximations [Bui et al., 2017]

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with inputs (n, d) and labels (n, 1)

required kernel Kernel

gpflow kernel function

required mu_old ndarray

mean of old q(u); here u are the latents corresponding to the inducing points Z_old

required Su_old ndarray

posterior covariance of old q(u)

required Kaa_old ndarray

prior covariance of old q(u)

required Z_old ndarray

(m_old, d): Old initial inducing points

required Z ndarray

(m_new, d): New initial inducing points

required mean_function function

GP mean function

None Source code in sgptools/models/core/osgpr.py
class OSGPR_VFE(GPModel, InternalDataTrainingLossMixin):\n    \"\"\"Online Sparse Variational GP regression model from [streaming_sparse_gp](https://github.com/thangbui/streaming_sparse_gp/tree/master)\n\n    Refer to the following paper for more details:\n        - Streaming Gaussian process approximations [Bui et al., 2017]\n\n    Args:\n        data (tuple): (X, y) ndarrays with inputs (n, d) and labels (n, 1)\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        mu_old (ndarray): mean of old `q(u)`; here `u` are the latents corresponding to the inducing points `Z_old`\n        Su_old (ndarray): posterior covariance of old `q(u)`\n        Kaa_old (ndarray): prior covariance of old `q(u)`\n        Z_old (ndarray): (m_old, d): Old initial inducing points\n        Z (ndarray): (m_new, d): New initial inducing points\n        mean_function (function): GP mean function\n    \"\"\"\n    def __init__(self, data, kernel, mu_old, Su_old, Kaa_old, Z_old, Z, mean_function=None):\n        self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)\n        likelihood = gpflow.likelihoods.Gaussian()\n        num_latent_gps = GPModel.calc_num_latent_gps_from_data(data, kernel, likelihood)\n        super().__init__(kernel, likelihood, mean_function, num_latent_gps)\n\n        self.inducing_variable = InducingPoints(Z)\n        self.num_data = self.X.shape[0]\n\n        self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)\n        self.M_old = Z_old.shape[0]\n        self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)\n        self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)\n        self.Z_old = tf.Variable(Z_old, shape=tf.TensorShape(None), trainable=False)\n\n    def update(self, data):\n        \"\"\"Configure the OSGPR to adapt to a new batch of data. \n        Note: The OSGPR needs to be trained using gradient-based approaches after update.\n\n        Args:\n            data (tuple): (X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)\n        \"\"\"\n        self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)\n        self.num_data = self.X.shape[0]\n\n        self.Z_old = tf.Variable(self.inducing_variable.Z.numpy(), \n                                 shape=tf.TensorShape(None), \n                                 trainable=False)\n\n        # Get posterior mean and covariance for the old inducing points\n        mu_old, Su_old = self.predict_f(self.Z_old, full_cov=True)\n        self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)\n        self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)\n\n        # Get the prior covariance matrix for the old inducing points\n        Kaa_old = self.kernel(self.Z_old)\n        self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)\n\n    def _common_terms(self):\n        Mb = self.inducing_variable.num_inducing\n        Ma = self.M_old\n        # jitter = gpflow.default_jitter()\n        jitter = gpflow.utilities.to_default_float(1e-4)\n        sigma2 = self.likelihood.variance\n        sigma = tf.sqrt(sigma2)\n\n        Saa = self.Su_old\n        ma = self.mu_old\n\n        # a is old inducing points, b is new\n        # f is training points\n        # s is test points\n        Kbf = covariances.Kuf(self.inducing_variable, self.kernel, self.X)\n        Kbb = covariances.Kuu(self.inducing_variable, self.kernel, jitter=jitter)\n        Kba = covariances.Kuf(self.inducing_variable, self.kernel, self.Z_old)\n        Kaa_cur = gpflow.utilities.add_noise_cov(self.kernel(self.Z_old), jitter)\n        Kaa = gpflow.utilities.add_noise_cov(self.Kaa_old, jitter)\n\n        err = self.Y - self.mean_function(self.X)\n\n        Sainv_ma = tf.linalg.solve(Saa, ma)\n        Sinv_y = self.Y / sigma2\n        c1 = tf.matmul(Kbf, Sinv_y)\n        c2 = tf.matmul(Kba, Sainv_ma)\n        c = c1 + c2\n\n        Lb = tf.linalg.cholesky(Kbb)\n        Lbinv_c = tf.linalg.triangular_solve(Lb, c, lower=True)\n        Lbinv_Kba = tf.linalg.triangular_solve(Lb, Kba, lower=True)\n        Lbinv_Kbf = tf.linalg.triangular_solve(Lb, Kbf, lower=True) / sigma\n        d1 = tf.matmul(Lbinv_Kbf, Lbinv_Kbf, transpose_b=True)\n\n        LSa = tf.linalg.cholesky(Saa)\n        Kab_Lbinv = tf.linalg.matrix_transpose(Lbinv_Kba)\n        LSainv_Kab_Lbinv = tf.linalg.triangular_solve(\n            LSa, Kab_Lbinv, lower=True)\n        d2 = tf.matmul(LSainv_Kab_Lbinv, LSainv_Kab_Lbinv, transpose_a=True)\n\n        La = tf.linalg.cholesky(Kaa)\n        Lainv_Kab_Lbinv = tf.linalg.triangular_solve(\n            La, Kab_Lbinv, lower=True)\n        d3 = tf.matmul(Lainv_Kab_Lbinv, Lainv_Kab_Lbinv, transpose_a=True)\n\n        D = tf.eye(Mb, dtype=gpflow.default_float()) + d1 + d2 - d3\n        D = gpflow.utilities.add_noise_cov(D, jitter)\n        LD = tf.linalg.cholesky(D)\n\n        LDinv_Lbinv_c = tf.linalg.triangular_solve(LD, Lbinv_c, lower=True)\n\n        return (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n                Lbinv_Kba, LDinv_Lbinv_c, err, d1)\n\n    def maximum_log_likelihood_objective(self):\n        \"\"\"\n        Construct a tensorflow function to compute the bound on the marginal\n        likelihood. \n        \"\"\"\n\n        Mb = self.inducing_variable.num_inducing\n        Ma = self.M_old\n        jitter = gpflow.default_jitter()\n        # jitter = gpflow.utilities.to_default_float(1e-4)\n        sigma2 = self.likelihood.variance\n        sigma = tf.sqrt(sigma2)\n        N = self.num_data\n\n        Saa = self.Su_old\n        ma = self.mu_old\n\n        # a is old inducing points, b is new\n        # f is training points\n        Kfdiag = self.kernel(self.X, full_cov=False)\n        (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n            Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n        LSa = tf.linalg.cholesky(Saa)\n        Lainv_ma = tf.linalg.triangular_solve(LSa, ma, lower=True)\n\n        # constant term\n        bound = -0.5 * N * np.log(2 * np.pi)\n        # quadratic term\n        bound += -0.5 * tf.reduce_sum(tf.square(err)) / sigma2\n        # bound += -0.5 * tf.reduce_sum(ma * Sainv_ma)\n        bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma))\n        bound += 0.5 * tf.reduce_sum(tf.square(LDinv_Lbinv_c))\n        # log det term\n        bound += -0.5 * N * tf.reduce_sum(tf.math.log(sigma2))\n        bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LD)))\n\n        # delta 1: trace term\n        bound += -0.5 * tf.reduce_sum(Kfdiag) / sigma2\n        bound += 0.5 * tf.reduce_sum(tf.linalg.diag_part(Qff))\n\n        # delta 2: a and b difference\n        bound += tf.reduce_sum(tf.math.log(tf.linalg.diag_part(La)))\n        bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LSa)))\n\n        Kaadiff = Kaa_cur - tf.matmul(Lbinv_Kba, Lbinv_Kba, transpose_a=True)\n        Sainv_Kaadiff = tf.linalg.solve(Saa, Kaadiff)\n        Kainv_Kaadiff = tf.linalg.solve(Kaa, Kaadiff)\n\n        bound += -0.5 * tf.reduce_sum(\n            tf.linalg.diag_part(Sainv_Kaadiff) - tf.linalg.diag_part(Kainv_Kaadiff))\n\n        return bound\n\n    def predict_f(self, Xnew, full_cov=False):\n        \"\"\"\n        Compute the mean and variance of the latent function at some new points\n        Xnew. \n        \"\"\"\n\n        # jitter = gpflow.default_jitter()\n        jitter = gpflow.utilities.to_default_float(1e-4)\n\n        # a is old inducing points, b is new\n        # f is training points\n        # s is test points\n        Kbs = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)\n        (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n            Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n        Lbinv_Kbs = tf.linalg.triangular_solve(Lb, Kbs, lower=True)\n        LDinv_Lbinv_Kbs = tf.linalg.triangular_solve(LD, Lbinv_Kbs, lower=True)\n        mean = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_c, transpose_a=True)\n\n        if full_cov:\n            Kss = self.kernel(Xnew) + jitter * tf.eye(tf.shape(Xnew)[0], dtype=gpflow.default_float())\n            var1 = Kss\n            var2 = - tf.matmul(Lbinv_Kbs, Lbinv_Kbs, transpose_a=True)\n            var3 = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_Kbs, transpose_a=True)\n            var = var1 + var2 + var3\n        else:\n            var1 = self.kernel(Xnew, full_cov=False)\n            var2 = -tf.reduce_sum(tf.square(Lbinv_Kbs), axis=0)\n            var3 = tf.reduce_sum(tf.square(LDinv_Lbinv_Kbs), axis=0)\n            var = var1 + var2 + var3\n\n        return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE.maximum_log_likelihood_objective","title":"maximum_log_likelihood_objective()","text":"

Construct a tensorflow function to compute the bound on the marginal likelihood.

Source code in sgptools/models/core/osgpr.py
def maximum_log_likelihood_objective(self):\n    \"\"\"\n    Construct a tensorflow function to compute the bound on the marginal\n    likelihood. \n    \"\"\"\n\n    Mb = self.inducing_variable.num_inducing\n    Ma = self.M_old\n    jitter = gpflow.default_jitter()\n    # jitter = gpflow.utilities.to_default_float(1e-4)\n    sigma2 = self.likelihood.variance\n    sigma = tf.sqrt(sigma2)\n    N = self.num_data\n\n    Saa = self.Su_old\n    ma = self.mu_old\n\n    # a is old inducing points, b is new\n    # f is training points\n    Kfdiag = self.kernel(self.X, full_cov=False)\n    (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n        Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n    LSa = tf.linalg.cholesky(Saa)\n    Lainv_ma = tf.linalg.triangular_solve(LSa, ma, lower=True)\n\n    # constant term\n    bound = -0.5 * N * np.log(2 * np.pi)\n    # quadratic term\n    bound += -0.5 * tf.reduce_sum(tf.square(err)) / sigma2\n    # bound += -0.5 * tf.reduce_sum(ma * Sainv_ma)\n    bound += -0.5 * tf.reduce_sum(tf.square(Lainv_ma))\n    bound += 0.5 * tf.reduce_sum(tf.square(LDinv_Lbinv_c))\n    # log det term\n    bound += -0.5 * N * tf.reduce_sum(tf.math.log(sigma2))\n    bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LD)))\n\n    # delta 1: trace term\n    bound += -0.5 * tf.reduce_sum(Kfdiag) / sigma2\n    bound += 0.5 * tf.reduce_sum(tf.linalg.diag_part(Qff))\n\n    # delta 2: a and b difference\n    bound += tf.reduce_sum(tf.math.log(tf.linalg.diag_part(La)))\n    bound += - tf.reduce_sum(tf.math.log(tf.linalg.diag_part(LSa)))\n\n    Kaadiff = Kaa_cur - tf.matmul(Lbinv_Kba, Lbinv_Kba, transpose_a=True)\n    Sainv_Kaadiff = tf.linalg.solve(Saa, Kaadiff)\n    Kainv_Kaadiff = tf.linalg.solve(Kaa, Kaadiff)\n\n    bound += -0.5 * tf.reduce_sum(\n        tf.linalg.diag_part(Sainv_Kaadiff) - tf.linalg.diag_part(Kainv_Kaadiff))\n\n    return bound\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE.predict_f","title":"predict_f(Xnew, full_cov=False)","text":"

Compute the mean and variance of the latent function at some new points Xnew.

Source code in sgptools/models/core/osgpr.py
def predict_f(self, Xnew, full_cov=False):\n    \"\"\"\n    Compute the mean and variance of the latent function at some new points\n    Xnew. \n    \"\"\"\n\n    # jitter = gpflow.default_jitter()\n    jitter = gpflow.utilities.to_default_float(1e-4)\n\n    # a is old inducing points, b is new\n    # f is training points\n    # s is test points\n    Kbs = covariances.Kuf(self.inducing_variable, self.kernel, Xnew)\n    (Kbf, Kba, Kaa, Kaa_cur, La, Kbb, Lb, D, LD,\n        Lbinv_Kba, LDinv_Lbinv_c, err, Qff) = self._common_terms()\n\n    Lbinv_Kbs = tf.linalg.triangular_solve(Lb, Kbs, lower=True)\n    LDinv_Lbinv_Kbs = tf.linalg.triangular_solve(LD, Lbinv_Kbs, lower=True)\n    mean = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_c, transpose_a=True)\n\n    if full_cov:\n        Kss = self.kernel(Xnew) + jitter * tf.eye(tf.shape(Xnew)[0], dtype=gpflow.default_float())\n        var1 = Kss\n        var2 = - tf.matmul(Lbinv_Kbs, Lbinv_Kbs, transpose_a=True)\n        var3 = tf.matmul(LDinv_Lbinv_Kbs, LDinv_Lbinv_Kbs, transpose_a=True)\n        var = var1 + var2 + var3\n    else:\n        var1 = self.kernel(Xnew, full_cov=False)\n        var2 = -tf.reduce_sum(tf.square(Lbinv_Kbs), axis=0)\n        var3 = tf.reduce_sum(tf.square(LDinv_Lbinv_Kbs), axis=0)\n        var = var1 + var2 + var3\n\n    return mean + self.mean_function(Xnew), var\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.OSGPR_VFE.update","title":"update(data)","text":"

Configure the OSGPR to adapt to a new batch of data. Note: The OSGPR needs to be trained using gradient-based approaches after update.

Parameters:

Name Type Description Default data tuple

(X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)

required Source code in sgptools/models/core/osgpr.py
def update(self, data):\n    \"\"\"Configure the OSGPR to adapt to a new batch of data. \n    Note: The OSGPR needs to be trained using gradient-based approaches after update.\n\n    Args:\n        data (tuple): (X, y) ndarrays with new batch of inputs (n, d) and labels (n, 1)\n    \"\"\"\n    self.X, self.Y = self.data = gpflow.models.util.data_input_to_tensor(data)\n    self.num_data = self.X.shape[0]\n\n    self.Z_old = tf.Variable(self.inducing_variable.Z.numpy(), \n                             shape=tf.TensorShape(None), \n                             trainable=False)\n\n    # Get posterior mean and covariance for the old inducing points\n    mu_old, Su_old = self.predict_f(self.Z_old, full_cov=True)\n    self.mu_old = tf.Variable(mu_old, shape=tf.TensorShape(None), trainable=False)\n    self.Su_old = tf.Variable(Su_old, shape=tf.TensorShape(None), trainable=False)\n\n    # Get the prior covariance matrix for the old inducing points\n    Kaa_old = self.kernel(self.Z_old)\n    self.Kaa_old = tf.Variable(Kaa_old, shape=tf.TensorShape(None), trainable=False)\n
"},{"location":"API-reference.html#sgptools.models.core.osgpr.init_osgpr","title":"init_osgpr(X_train, num_inducing=10, lengthscales=1.0, variance=1.0, noise_variance=0.001)","text":"

Initialize a VFE OSGPR model with an RBF kernel with unit variance and lengthcales, and 0.001 noise variance. Used in the Online Continuous SGP approach.

Parameters:

Name Type Description Default X_train ndarray

(n, d); Unlabeled random sampled training points. They only effect the initial inducing point locations, i.e., limits them to the bounds of the data

required num_inducing int

Number of inducing points

10 lengthscales ndarray or list

Kernel lengthscale of each dimension of the data

1.0 variance float

Kernel variance

1.0 noise_variance float

Data variance

0.001

Returns:

Name Type Description online_param OSGPR_VFE

Initialized online sparse Gaussian process model

Source code in sgptools/models/core/osgpr.py
def init_osgpr(X_train, \n               num_inducing=10, \n               lengthscales=1.0, \n               variance=1.0,\n               noise_variance=0.001):\n    \"\"\"Initialize a VFE OSGPR model with an RBF kernel with \n    unit variance and lengthcales, and 0.001 noise variance.\n    Used in the Online Continuous SGP approach. \n\n    Args:\n        X_train (ndarray): (n, d); Unlabeled random sampled training points. \n                        They only effect the initial inducing point locations, \n                        i.e., limits them to the bounds of the data\n        num_inducing (int): Number of inducing points\n        lengthscales (ndarray or list): Kernel lengthscale of each dimension of the data\n        variance (float): Kernel variance\n        noise_variance (float): Data variance\n\n    Returns:\n        online_param (OSGPR_VFE): Initialized online sparse Gaussian process model\n    \"\"\"\n\n    y_train = np.zeros((len(X_train), 1), dtype=X_train.dtype)\n    Z_init = get_inducing_pts(X_train, num_inducing)\n    init_param = gpflow.models.SGPR((X_train, y_train),\n                                    gpflow.kernels.RBF(variance=variance, \n                                                       lengthscales=lengthscales), \n                                    inducing_variable=Z_init, \n                                    noise_variance=noise_variance)\n\n    # Initialize the OSGPR model using the parameters from the SGPR model\n    # The X_train and y_train here will be overwritten in the online phase \n    X_train = np.array([[0, 0], [0, 0]])\n    y_train = np.array([0, 0]).reshape(-1, 1)\n    Zopt = init_param.inducing_variable.Z.numpy()\n    mu, Su = init_param.predict_f(Zopt, full_cov=True)\n    Kaa = init_param.kernel(Zopt)\n    online_param = OSGPR_VFE((X_train[:2], y_train[:2]),\n                             init_param.kernel,\n                             mu, Su[0], Kaa,\n                             Zopt, Zopt)\n    online_param.likelihood.variance.assign(init_param.likelihood.variance)\n\n    return online_param\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform","title":"IPPTransform","text":"

Bases: Transform

Transform to model IPP problems

Usage details
  • For point sensing, set sampling_rate = 2
  • For continuous sensing, set sampling_rate > 2 (approx the data collected along the path)
  • For multi-robot case, set num_robots > 1
  • For onlineIPP use update_fixed to freeze the visited waypoints

Parameters:

Name Type Description Default sampling_rate int

Number of points to sample between each pair of inducing points

2 distance_budget float

Distance budget for the path

None num_robots int

Number of robots

1 Xu_fixed ndarray

(num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized

None num_dim int

Dimension of the data collection environment

2 sensor_model Transform

Transform object to expand each inducing point to p points approximating each sensor's FoV

None Source code in sgptools/models/core/transformations.py
class IPPTransform(Transform):\n    \"\"\"Transform to model IPP problems\n\n    Usage details: \n        * For point sensing, set `sampling_rate = 2`\n        * For continuous sensing, set `sampling_rate > 2` (approx the data collected along the path)\n        * For multi-robot case, set `num_robots > 1`\n        * For onlineIPP use `update_fixed` to freeze the visited waypoints\n\n    Args:\n        sampling_rate (int): Number of points to sample between each pair of inducing points\n        distance_budget (float): Distance budget for the path\n        num_robots (int): Number of robots\n        Xu_fixed (ndarray): (num_robots, num_visited, num_dim); Visited waypoints that don't need to be optimized\n        num_dim (int): Dimension of the data collection environment\n        sensor_model (Transform): Transform object to expand each inducing point to `p` points \n                                  approximating each sensor's FoV\n    \"\"\"\n    def __init__(self, \n                 sampling_rate=2, \n                 distance_budget=None, \n                 num_robots=1,\n                 Xu_fixed=None,\n                 num_dim=2,\n                 sensor_model=None,\n                 **kwargs):\n        super().__init__(**kwargs)\n        if sampling_rate < 2:\n            raise ValueError('Sampling rate must be greater than 2.')\n\n        self.sampling_rate = sampling_rate\n        self.distance_budget = distance_budget\n        self.num_robots = num_robots\n        self.num_dim = num_dim\n        self.sensor_model = sensor_model\n\n        # Disable aggregation if aggregation size was explicitly set to 0\n        if self.aggregation_size == 0:\n            self.aggregation_size = None\n        # Set aggregation size to sampling rate if aggregation size was not set\n        # and sampling rate is enabled (greater than 2)\n        elif self.aggregation_size is None and sampling_rate > 2:\n            self.aggregation_size = sampling_rate\n\n        # Initilize variable to store visited waypoints for onlineIPP\n        if Xu_fixed is not None:\n            self.Xu_fixed = tf.Variable(Xu_fixed, \n                                        shape=tf.TensorShape(None), \n                                        trainable=False)\n        else:\n            self.Xu_fixed = None\n\n    def update_Xu_fixed(self, Xu_fixed):\n        \"\"\"Function to update the visited waypoints\n\n        Args:\n            Xu_fixed (ndarray): numpy array (num_robots, num_visited_waypoints, num_dim)\n        \"\"\"\n        self.num_fixed = Xu_fixed.shape[1]\n        if self.Xu_fixed is not None:\n            self.Xu_fixed.assign(Xu_fixed)\n        else:\n            # ToDo: Use binary mask of fixed size to avoid retracing\n            self.Xu_fixed = tf.Variable(Xu_fixed, \n                                        shape=tf.TensorShape(None), \n                                        trainable=False)\n\n    def expand(self, Xu, expand_sensor_model=True):\n        \"\"\"Sample points between each pair of inducing points to form the path\n\n        Args:\n            Xu (ndarray): (num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space\n            expand_sensor_model (bool): Only add the fixed inducing points without other sensor/path transforms, \n                                        used for online IPP\n\n        Returns:\n            Xu (ndarray): Expansion transformed inducing points\n        \"\"\"\n        # If using single-robot offline IPP with point sensing, return inducing points as is.\n        if self.sampling_rate == 2 and self.Xu_fixed is None and self.sensor_model is None:\n            return Xu\n\n        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n        # If using online IPP, add visited waypoints that won't be optimized anymore\n        if self.Xu_fixed is not None:\n            Xu = tf.concat([self.Xu_fixed, Xu[:, self.num_fixed:]], axis=1)\n\n        if not expand_sensor_model:\n            return tf.reshape(Xu, (-1, self.num_dim))\n\n        # Interpolate additional inducing points between waypoints to approximate \n        # the continuous data sensing model\n        if self.sampling_rate > 2:\n            Xu = tf.linspace(Xu[:, :-1], Xu[:, 1:], self.sampling_rate)\n            Xu = tf.transpose(Xu, perm=[1, 2, 0, 3])\n            Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n        if self.sensor_model is not None:\n            Xu = self.sensor_model.expand(Xu)\n            return Xu\n\n        Xu = tf.reshape(Xu, (-1, self.num_dim))\n        return Xu\n\n    def aggregate(self, k):\n        \"\"\"Applies the aggregation transform to kernel matrices. Checks `sensor_model` \n           and uses the appropriate aggregation transform. \n\n        Args:\n            k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                        `m` is the number of inducing points,\n                        `p` is the number of points each inducing point is mapped,\n                        `n` is the number of training data points.\n\n        Returns:\n            k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n        \"\"\"\n        if self.sensor_model is not None:\n            return self.sensor_model.aggregate(k)\n        else:\n            return super().aggregate(k)\n\n    def constraints(self, Xu):\n        \"\"\"Computes the distance constraint term that is added to the SGP's optimization function.\n        Each robot can be assigned a different distance budget.\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the distance constraints\n\n        Returns:\n            loss (float): distance constraint term\n        \"\"\"\n        if self.distance_budget is None:\n            return 0.\n        else:\n            Xu = self.expand(Xu, expand_sensor_model=False)\n            dist = self.distance(Xu)-self.distance_budget\n            dist = tf.reduce_sum(tf.nn.relu(dist))\n            loss = -dist*self.constraint_weight\n            return loss\n\n    def distance(self, Xu):\n        \"\"\"Computes the distance incured by sequentially visiting the inducing points\n        ToDo: Change distance from 2d to nd. Currently limited to 2d \n            to ensure the rotation angle is not included when using\n            a square FoV sensor.\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the path lengths\n\n        Returns:\n            dist (float): path lengths\n        \"\"\"\n        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n        dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)\n        dist = tf.reduce_sum(dist, axis=1)\n        return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.aggregate","title":"aggregate(k)","text":"

Applies the aggregation transform to kernel matrices. Checks sensor_model and uses the appropriate aggregation transform.

Parameters:

Name Type Description Default k tensor

(mp, mp)/(mp, n); Kernel matrix. m is the number of inducing points, p is the number of points each inducing point is mapped, n is the number of training data points.

required

Returns:

Name Type Description k tensor

(m, m)/(m, n); Aggregated kernel matrix

Source code in sgptools/models/core/transformations.py
def aggregate(self, k):\n    \"\"\"Applies the aggregation transform to kernel matrices. Checks `sensor_model` \n       and uses the appropriate aggregation transform. \n\n    Args:\n        k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                    `m` is the number of inducing points,\n                    `p` is the number of points each inducing point is mapped,\n                    `n` is the number of training data points.\n\n    Returns:\n        k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n    \"\"\"\n    if self.sensor_model is not None:\n        return self.sensor_model.aggregate(k)\n    else:\n        return super().aggregate(k)\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.constraints","title":"constraints(Xu)","text":"

Computes the distance constraint term that is added to the SGP's optimization function. Each robot can be assigned a different distance budget.

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the distance constraints

required

Returns:

Name Type Description loss float

distance constraint term

Source code in sgptools/models/core/transformations.py
def constraints(self, Xu):\n    \"\"\"Computes the distance constraint term that is added to the SGP's optimization function.\n    Each robot can be assigned a different distance budget.\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the distance constraints\n\n    Returns:\n        loss (float): distance constraint term\n    \"\"\"\n    if self.distance_budget is None:\n        return 0.\n    else:\n        Xu = self.expand(Xu, expand_sensor_model=False)\n        dist = self.distance(Xu)-self.distance_budget\n        dist = tf.reduce_sum(tf.nn.relu(dist))\n        loss = -dist*self.constraint_weight\n        return loss\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.distance","title":"distance(Xu)","text":"

Computes the distance incured by sequentially visiting the inducing points ToDo: Change distance from 2d to nd. Currently limited to 2d to ensure the rotation angle is not included when using a square FoV sensor.

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the path lengths

required

Returns:

Name Type Description dist float

path lengths

Source code in sgptools/models/core/transformations.py
def distance(self, Xu):\n    \"\"\"Computes the distance incured by sequentially visiting the inducing points\n    ToDo: Change distance from 2d to nd. Currently limited to 2d \n        to ensure the rotation angle is not included when using\n        a square FoV sensor.\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the path lengths\n\n    Returns:\n        dist (float): path lengths\n    \"\"\"\n    Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n    dist = tf.norm(Xu[:, 1:, :2] - Xu[:, :-1, :2], axis=-1)\n    dist = tf.reduce_sum(dist, axis=1)\n    return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.expand","title":"expand(Xu, expand_sensor_model=True)","text":"

Sample points between each pair of inducing points to form the path

Parameters:

Name Type Description Default Xu ndarray

(num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space

required expand_sensor_model bool

Only add the fixed inducing points without other sensor/path transforms, used for online IPP

True

Returns:

Name Type Description Xu ndarray

Expansion transformed inducing points

Source code in sgptools/models/core/transformations.py
def expand(self, Xu, expand_sensor_model=True):\n    \"\"\"Sample points between each pair of inducing points to form the path\n\n    Args:\n        Xu (ndarray): (num_robots x num_inducing, num_dim); Inducing points in the num_dim dimensional space\n        expand_sensor_model (bool): Only add the fixed inducing points without other sensor/path transforms, \n                                    used for online IPP\n\n    Returns:\n        Xu (ndarray): Expansion transformed inducing points\n    \"\"\"\n    # If using single-robot offline IPP with point sensing, return inducing points as is.\n    if self.sampling_rate == 2 and self.Xu_fixed is None and self.sensor_model is None:\n        return Xu\n\n    Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n    # If using online IPP, add visited waypoints that won't be optimized anymore\n    if self.Xu_fixed is not None:\n        Xu = tf.concat([self.Xu_fixed, Xu[:, self.num_fixed:]], axis=1)\n\n    if not expand_sensor_model:\n        return tf.reshape(Xu, (-1, self.num_dim))\n\n    # Interpolate additional inducing points between waypoints to approximate \n    # the continuous data sensing model\n    if self.sampling_rate > 2:\n        Xu = tf.linspace(Xu[:, :-1], Xu[:, 1:], self.sampling_rate)\n        Xu = tf.transpose(Xu, perm=[1, 2, 0, 3])\n        Xu = tf.reshape(Xu, (self.num_robots, -1, self.num_dim))\n\n    if self.sensor_model is not None:\n        Xu = self.sensor_model.expand(Xu)\n        return Xu\n\n    Xu = tf.reshape(Xu, (-1, self.num_dim))\n    return Xu\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.IPPTransform.update_Xu_fixed","title":"update_Xu_fixed(Xu_fixed)","text":"

Function to update the visited waypoints

Parameters:

Name Type Description Default Xu_fixed ndarray

numpy array (num_robots, num_visited_waypoints, num_dim)

required Source code in sgptools/models/core/transformations.py
def update_Xu_fixed(self, Xu_fixed):\n    \"\"\"Function to update the visited waypoints\n\n    Args:\n        Xu_fixed (ndarray): numpy array (num_robots, num_visited_waypoints, num_dim)\n    \"\"\"\n    self.num_fixed = Xu_fixed.shape[1]\n    if self.Xu_fixed is not None:\n        self.Xu_fixed.assign(Xu_fixed)\n    else:\n        # ToDo: Use binary mask of fixed size to avoid retracing\n        self.Xu_fixed = tf.Variable(Xu_fixed, \n                                    shape=tf.TensorShape(None), \n                                    trainable=False)\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareHeightTransform","title":"SquareHeightTransform","text":"

Bases: Transform

Non-point Transform to model a height-dependent square FoV. Only works for single robot cases. ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform

Parameters:

Name Type Description Default num_points int

Number of points along each side of the FoV

required distance_budget float

Distance budget for the path

None Source code in sgptools/models/core/transformations.py
class SquareHeightTransform(Transform):\n    \"\"\"Non-point Transform to model a height-dependent square FoV. Only works for single robot cases. \n    ToDo: Convert from single to multi-robot setup and make it compatible with IPPTransform\n\n    Args:\n        num_points (int): Number of points along each side of the FoV\n        distance_budget (float): Distance budget for the path\n    \"\"\"\n    def __init__(self, num_points, distance_budget=None, **kwargs):\n        super().__init__(**kwargs)\n        self.num_points = num_points\n        self.distance_budget = distance_budget\n\n        if self.aggregation_size == 0:\n            self.aggregation_size = None\n        elif self.aggregation_size is None:\n            self.aggregation_size = num_points**2\n\n    def expand(self, Xu):     \n        \"\"\"\n        Applies the expansion transform to the inducing points\n\n        Args:\n            Xu (ndarray): (m, 3); Inducing points in the 3D position space.\n                        `m` is the number of inducing points,\n                        `3` is the dimension of the space (x, y, z)\n\n        Returns:\n            Xu (ndarray): (mp, 2); Inducing points in input space.\n                        `p` is the number of points each inducing point is mapped \n                        to in order to form the FoV.\n        \"\"\"\n        x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)\n        x = tf.squeeze(x)\n        y = tf.squeeze(y)\n        h = tf.squeeze(h)\n\n        delta = h / (self.num_points - 1)\n\n        pts = []\n        for i in range(self.num_points):\n            pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)], \n                                   [x + h/2, y - (h/2) + (delta * i)], \n                                   self.num_points, \n                                   axis=1))\n        xy = tf.concat(pts, axis=1)\n        xy = tf.transpose(xy, [2, 1, 0])\n        xy = tf.reshape(xy, [-1, 2])\n        xy = self._reshape(xy, tf.shape(Xu)[0])\n        return xy\n\n    def _reshape(self, X, num_inducing):\n        \"\"\"Reorder the inducing points to be in the correct order for aggregation with square height FoV\n\n        Args:\n            X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each \n                        inducing point is mapped to in order to form the FoV.\n\n        Returns:\n            Xu (ndarray): (mp, 2); Reorder inducing points\n        \"\"\"\n        X = tf.reshape(X, (num_inducing, -1, self.num_points, self.num_points, 2))\n        X = tf.transpose(X, (0, 2, 1, 3, 4))\n        X = tf.reshape(X, (-1, 2))\n        return X\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareHeightTransform.expand","title":"expand(Xu)","text":"

Applies the expansion transform to the inducing points

Parameters:

Name Type Description Default Xu ndarray

(m, 3); Inducing points in the 3D position space. m is the number of inducing points, 3 is the dimension of the space (x, y, z)

required

Returns:

Name Type Description Xu ndarray

(mp, 2); Inducing points in input space. p is the number of points each inducing point is mapped to in order to form the FoV.

Source code in sgptools/models/core/transformations.py
def expand(self, Xu):     \n    \"\"\"\n    Applies the expansion transform to the inducing points\n\n    Args:\n        Xu (ndarray): (m, 3); Inducing points in the 3D position space.\n                    `m` is the number of inducing points,\n                    `3` is the dimension of the space (x, y, z)\n\n    Returns:\n        Xu (ndarray): (mp, 2); Inducing points in input space.\n                    `p` is the number of points each inducing point is mapped \n                    to in order to form the FoV.\n    \"\"\"\n    x, y, h = tf.split(Xu, num_or_size_splits=3, axis=1)\n    x = tf.squeeze(x)\n    y = tf.squeeze(y)\n    h = tf.squeeze(h)\n\n    delta = h / (self.num_points - 1)\n\n    pts = []\n    for i in range(self.num_points):\n        pts.append(tf.linspace([x - h/2, y - (h/2) + (delta * i)], \n                               [x + h/2, y - (h/2) + (delta * i)], \n                               self.num_points, \n                               axis=1))\n    xy = tf.concat(pts, axis=1)\n    xy = tf.transpose(xy, [2, 1, 0])\n    xy = tf.reshape(xy, [-1, 2])\n    xy = self._reshape(xy, tf.shape(Xu)[0])\n    return xy\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareTransform","title":"SquareTransform","text":"

Bases: Transform

Non-point Transform to model a square FoV. Only works for single robot cases. ToDo: update expand function to handle multi-robot case.

Parameters:

Name Type Description Default length float

Length of the square FoV

required num_side int

Number of points along each side of the FoV

required Source code in sgptools/models/core/transformations.py
class SquareTransform(Transform):\n    \"\"\"Non-point Transform to model a square FoV. Only works for single robot cases. \n    ToDo: update expand function to handle multi-robot case.\n\n    Args:\n        length (float): Length of the square FoV\n        num_side (int): Number of points along each side of the FoV\n    \"\"\"\n    def __init__(self, length, num_side, **kwargs):\n        super().__init__(**kwargs)\n        self.length = length\n        self.num_side = num_side\n        self.length_factor=length/(self.num_side)\n        self.num_length = int(length/self.length_factor)\n\n        if self.aggregation_size == 0:\n            self.aggregation_size = None\n        elif self.aggregation_size is None:\n            self.aggregation_size = num_side**2\n\n    def expand(self, Xu):\n        \"\"\"Applies the expansion transformation to the inducing points\n\n        Args:\n            Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.\n                            `m` is the number of inducing points,\n                            `3` is the dimension of the space (x, y, angle in radians)\n\n        Returns:\n            Xu (ndarray): (mp, 2); Inducing points in input space.\n                        `p` is the number of points each inducing point is mapped \n                         to in order to form the FoV.\n        \"\"\"\n        x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)\n        x = tf.squeeze(x)\n        y = tf.squeeze(y)\n        theta = tf.squeeze(theta)\n\n        points = []\n        for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):\n            points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2), \n                                       (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)], \n                                      [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2), \n                                       (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)], \n                                      self.num_side, axis=1))\n        xy = tf.concat(points, axis=1)\n        xy = tf.transpose(xy, [2, 1, 0])\n        xy = tf.reshape(xy, [-1, 2])\n        xy = self._reshape(xy, tf.shape(Xu)[1])\n        return xy\n\n    def _reshape(self, X, num_inducing):\n        \"\"\"Reorder the inducing points to be in the correct order for aggregation with square FoV.\n\n        Args:\n            X (ndarray): (mp, 2); Inducing points in input space. `p` is the number of points each \n                        inducing point is mapped to in order to form the FoV.\n\n        Returns:\n            Xu (ndarray): (mp, 2); Reorder inducing points\n        \"\"\"\n        X = tf.reshape(X, (num_inducing, -1, self.num_side, self.num_side, 2))\n        X = tf.transpose(X, (0, 2, 1, 3, 4))\n        X = tf.reshape(X, (-1, 2))\n        return X\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.SquareTransform.expand","title":"expand(Xu)","text":"

Applies the expansion transformation to the inducing points

Parameters:

Name Type Description Default Xu ndarray

(1, m, 3); Inducing points in the position and orientation space. m is the number of inducing points, 3 is the dimension of the space (x, y, angle in radians)

required

Returns:

Name Type Description Xu ndarray

(mp, 2); Inducing points in input space. p is the number of points each inducing point is mapped to in order to form the FoV.

Source code in sgptools/models/core/transformations.py
def expand(self, Xu):\n    \"\"\"Applies the expansion transformation to the inducing points\n\n    Args:\n        Xu (ndarray): (1, m, 3); Inducing points in the position and orientation space.\n                        `m` is the number of inducing points,\n                        `3` is the dimension of the space (x, y, angle in radians)\n\n    Returns:\n        Xu (ndarray): (mp, 2); Inducing points in input space.\n                    `p` is the number of points each inducing point is mapped \n                     to in order to form the FoV.\n    \"\"\"\n    x, y, theta = tf.split(Xu, num_or_size_splits=3, axis=2)\n    x = tf.squeeze(x)\n    y = tf.squeeze(y)\n    theta = tf.squeeze(theta)\n\n    points = []\n    for i in range(-int(np.floor((self.num_side)/2)), int(np.ceil((self.num_side)/2))):\n        points.append(tf.linspace([(x + (i * self.length_factor) * tf.cos(theta)) - self.length/2 * tf.cos(theta+np.pi/2), \n                                   (y + (i * self.length_factor) * tf.sin(theta)) - self.length/2 * tf.sin(theta+np.pi/2)], \n                                  [(x + (i * self.length_factor) * tf.cos(theta)) + self.length/2 * tf.cos(theta+np.pi/2), \n                                   (y + (i * self.length_factor) * tf.sin(theta)) + self.length/2 * tf.sin(theta+np.pi/2)], \n                                  self.num_side, axis=1))\n    xy = tf.concat(points, axis=1)\n    xy = tf.transpose(xy, [2, 1, 0])\n    xy = tf.reshape(xy, [-1, 2])\n    xy = self._reshape(xy, tf.shape(Xu)[1])\n    return xy\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform","title":"Transform","text":"

Base class for transformations of the inducing points, including expansion and aggregation transforms.

Refer to the following papers for more details
  • Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]
  • Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]

Parameters:

Name Type Description Default aggregation_size int

Number of consecutive inducing points to aggregate

None constraint_weight float

Weight term that controls the importance of the constraint terms in the SGP's optimization objective

1.0 Source code in sgptools/models/core/transformations.py
class Transform:\n    \"\"\"Base class for transformations of the inducing points, including expansion and aggregation transforms.\n\n    Refer to the following papers for more details:\n        - Efficient Sensor Placement from Regression with Sparse Gaussian Processes in Continuous and Discrete Spaces [Jakkala and Akella, 2023]\n        - Multi-Robot Informative Path Planning from Regression with Sparse Gaussian Processes [Jakkala and Akella, 2024]\n\n    Args:\n        aggregation_size (int): Number of consecutive inducing points to aggregate\n        constraint_weight (float): Weight term that controls the importance of the \n                                   constraint terms in the SGP's optimization objective \n    \"\"\"\n    def __init__(self, \n                 aggregation_size=None, \n                 constraint_weight=1.0,\n                 **kwargs):\n        self.aggregation_size = aggregation_size\n        self.constraint_weight = constraint_weight\n\n    def expand(self, Xu):\n        \"\"\"Applies the expansion transform to the inducing points\n\n        Args:\n            Xu (ndarray): Expansion transformed inducing points\n        \"\"\"\n        return Xu\n\n    def aggregate(self, k):\n        \"\"\"Applies the aggregation transform to kernel matrices\n\n        Args:\n            k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                        `m` is the number of inducing points,\n                        `p` is the number of points each inducing point is mapped,\n                        `n` is the number of training data points.\n\n        Returns:\n            k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n        \"\"\"\n        if self.aggregation_size is None:\n            return k\n\n        if k.shape[0] == k.shape[1]:\n            # Handle Kuu which is a square matrix\n            k = tf.expand_dims(tf.expand_dims(k, axis=0), axis=-1)\n            k = tf.nn.avg_pool(k,\n                               ksize=[1, self.aggregation_size, self.aggregation_size, 1],\n                               strides=[1, self.aggregation_size, self.aggregation_size, 1],\n                               padding='VALID')\n            k = tf.squeeze(k, axis=[0, -1])\n        else:\n            # Handle Kuf which is a rectangular matrix\n            k = tf.expand_dims(k, axis=0)\n            k = tf.nn.avg_pool(k,\n                               ksize=[1, self.aggregation_size, 1],\n                               strides=[1, self.aggregation_size, 1],\n                               padding='VALID')\n            k = tf.squeeze(k, axis=[0])\n        return k\n\n    def constraints(self, Xu):\n        \"\"\"Computes the constraint terms that are added to the SGP's optimization function\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the constraints\n\n        Returns:\n            c (float): constraint terms (eg., distance constraint)\n        \"\"\"\n        return 0.\n\n    def distance(self, Xu):\n        \"\"\"Computes the distance incured by sequentially visiting the inducing points\n\n        Args:\n            Xu (ndarray): Inducing points from which to compute the path length\n\n        Returns:\n            dist (float): path length\n        \"\"\"\n        dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))\n        return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.aggregate","title":"aggregate(k)","text":"

Applies the aggregation transform to kernel matrices

Parameters:

Name Type Description Default k tensor

(mp, mp)/(mp, n); Kernel matrix. m is the number of inducing points, p is the number of points each inducing point is mapped, n is the number of training data points.

required

Returns:

Name Type Description k tensor

(m, m)/(m, n); Aggregated kernel matrix

Source code in sgptools/models/core/transformations.py
def aggregate(self, k):\n    \"\"\"Applies the aggregation transform to kernel matrices\n\n    Args:\n        k (tensor): (mp, mp)/(mp, n); Kernel matrix. \n                    `m` is the number of inducing points,\n                    `p` is the number of points each inducing point is mapped,\n                    `n` is the number of training data points.\n\n    Returns:\n        k (tensor): (m, m)/(m, n); Aggregated kernel matrix\n    \"\"\"\n    if self.aggregation_size is None:\n        return k\n\n    if k.shape[0] == k.shape[1]:\n        # Handle Kuu which is a square matrix\n        k = tf.expand_dims(tf.expand_dims(k, axis=0), axis=-1)\n        k = tf.nn.avg_pool(k,\n                           ksize=[1, self.aggregation_size, self.aggregation_size, 1],\n                           strides=[1, self.aggregation_size, self.aggregation_size, 1],\n                           padding='VALID')\n        k = tf.squeeze(k, axis=[0, -1])\n    else:\n        # Handle Kuf which is a rectangular matrix\n        k = tf.expand_dims(k, axis=0)\n        k = tf.nn.avg_pool(k,\n                           ksize=[1, self.aggregation_size, 1],\n                           strides=[1, self.aggregation_size, 1],\n                           padding='VALID')\n        k = tf.squeeze(k, axis=[0])\n    return k\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.constraints","title":"constraints(Xu)","text":"

Computes the constraint terms that are added to the SGP's optimization function

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the constraints

required

Returns:

Name Type Description c float

constraint terms (eg., distance constraint)

Source code in sgptools/models/core/transformations.py
def constraints(self, Xu):\n    \"\"\"Computes the constraint terms that are added to the SGP's optimization function\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the constraints\n\n    Returns:\n        c (float): constraint terms (eg., distance constraint)\n    \"\"\"\n    return 0.\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.distance","title":"distance(Xu)","text":"

Computes the distance incured by sequentially visiting the inducing points

Parameters:

Name Type Description Default Xu ndarray

Inducing points from which to compute the path length

required

Returns:

Name Type Description dist float

path length

Source code in sgptools/models/core/transformations.py
def distance(self, Xu):\n    \"\"\"Computes the distance incured by sequentially visiting the inducing points\n\n    Args:\n        Xu (ndarray): Inducing points from which to compute the path length\n\n    Returns:\n        dist (float): path length\n    \"\"\"\n    dist = tf.math.reduce_sum(tf.norm(Xu[1:]-Xu[:-1], axis=1))\n    return dist\n
"},{"location":"API-reference.html#sgptools.models.core.transformations.Transform.expand","title":"expand(Xu)","text":"

Applies the expansion transform to the inducing points

Parameters:

Name Type Description Default Xu ndarray

Expansion transformed inducing points

required Source code in sgptools/models/core/transformations.py
def expand(self, Xu):\n    \"\"\"Applies the expansion transform to the inducing points\n\n    Args:\n        Xu (ndarray): Expansion transformed inducing points\n    \"\"\"\n    return Xu\n
"},{"location":"API-reference.html#_________________________1","title":"________________________","text":"

General utilities to support the functionalities of this package:

  • data: Provides utilities to preprocess datasets
  • gpflow: Provides utilities to interface with GPflow
  • metrics: Provides utilities to quantify the solution quality
  • misc: Provides miscellaneous helper functions
  • tsp: Provides utilities to run TSP/VRP solver
"},{"location":"API-reference.html#sgptools.utils.tsp.resample_path","title":"resample_path(waypoints, num_inducing=10)","text":"

Function to map path with arbitrary number of waypoints to inducing points path with fixed number of waypoints

Parameters:

Name Type Description Default waypoints ndarray

(num_waypoints, n_dim); waypoints of path from vrp solver

required num_inducing int

Number of inducing points (waypoints) in the returned path

10

Returns:

Name Type Description points ndarray

(num_inducing, n_dim); Resampled path

Source code in sgptools/utils/tsp.py
def resample_path(waypoints, num_inducing=10):\n    \"\"\"Function to map path with arbitrary number of waypoints to \n    inducing points path with fixed number of waypoints\n\n    Args:\n        waypoints (ndarray): (num_waypoints, n_dim); waypoints of path from vrp solver\n        num_inducing (int): Number of inducing points (waypoints) in the returned path\n\n    Returns:\n        points (ndarray): (num_inducing, n_dim); Resampled path\n    \"\"\"\n    line = LineString(waypoints)\n    distances = np.linspace(0, line.length, num_inducing)\n    points = [line.interpolate(distance) for distance in distances]\n    points = np.array([[p.x, p.y] for p in points])\n    return points\n
"},{"location":"API-reference.html#sgptools.utils.tsp.run_tsp","title":"run_tsp(nodes, num_vehicles=1, max_dist=25, depth=1, resample=None, start_idx=None, end_idx=None)","text":"

Method to run TSP/VRP with arbitrary start and end nodes, and without any distance constraint

Parameters:

Name Type Description Default nodes ndarray

(# nodes, n_dim); Nodes to visit

required num_vehicles int

Number of robots/vehicles

1 max_dist float

Maximum distance allowed for each path when handling mutli-robot case

25 depth int

Internal parameter used to track re-try recursion depth

1 resample int

Each solution path will be resampled to have resample number of points

None start_idx list

Optionl list of start node indices from which to start the solution path

None end_idx list

Optionl list of end node indices from which to start the solution path

None

Returns:

Name Type Description paths ndarray

Solution paths

distances list

List of path lengths

Source code in sgptools/utils/tsp.py
def run_tsp(nodes, \n            num_vehicles=1, \n            max_dist=25, \n            depth=1, \n            resample=None, \n            start_idx=None,\n            end_idx=None):\n    \"\"\"Method to run TSP/VRP with arbitrary start and end nodes, \n    and without any distance constraint\n\n    Args:\n        nodes (ndarray): (# nodes, n_dim); Nodes to visit \n        num_vehicles (int): Number of robots/vehicles\n        max_dist (float): Maximum distance allowed for each path when handling mutli-robot case\n        depth (int): Internal parameter used to track re-try recursion depth\n        resample (int): Each solution path will be resampled to have\n                        `resample` number of points\n        start_idx (list): Optionl list of start node indices from which to start the solution path \n        end_idx (list): Optionl list of end node indices from which to start the solution path \n\n    Returns:\n        paths (ndarray): Solution paths\n        distances (list): List of path lengths\n    \"\"\"\n    if depth > 5:\n        print('Warning: Max depth reached')\n        return None, None\n\n    # Add dummy 0 location to get arbitrary start and end node sols\n    if start_idx is None or end_idx is None:\n        distance_mat = np.zeros((len(nodes)+1, len(nodes)+1))\n        distance_mat[1:, 1:] = pairwise_distances(nodes, nodes)*1e4\n        trim_paths = True\n    else:\n        distance_mat = pairwise_distances(nodes, nodes)*1e4\n        trim_paths = False\n    distance_mat = distance_mat.astype(int)\n    max_dist = int(max_dist*1e4)\n\n    if start_idx is None:\n        start_idx = [0]*num_vehicles\n    elif trim_paths:\n        start_idx = [i+1 for i in start_idx]\n\n    if end_idx is None:\n        end_idx = [0]*num_vehicles\n    elif trim_paths:\n        end_idx = [i+1 for i in end_idx]\n\n    def distance_callback(from_index, to_index):\n        from_node = manager.IndexToNode(from_index)\n        to_node = manager.IndexToNode(to_index)\n        return distance_mat[from_node][to_node]\n\n    # num_locations, num vehicles, start, end\n    manager = pywrapcp.RoutingIndexManager(len(distance_mat), \n                                           num_vehicles, \n                                           start_idx,\n                                           end_idx)\n    routing = pywrapcp.RoutingModel(manager)\n    transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n    routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n    if num_vehicles > 1:\n        # Dummy distaance constraint to ensure all paths have similar length\n        dimension_name = \"Distance\"\n        routing.AddDimension(\n            transit_callback_index,\n            0,  # no slack\n            max_dist,  # vehicle maximum travel distance\n            True,  # start cumul to zero\n            dimension_name,\n        )\n        distance_dimension = routing.GetDimensionOrDie(dimension_name)\n        distance_dimension.SetGlobalSpanCostCoefficient(100)\n\n    search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n    search_parameters.first_solution_strategy = (\n        routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n    )\n    search_parameters.local_search_metaheuristic = (\n        routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n    )\n    search_parameters.time_limit.seconds = 10\n    solution = routing.SolveWithParameters(search_parameters)\n\n    paths = None\n    if solution is not None:\n        paths, distances = get_routes(manager, routing, \n                                      solution, num_vehicles, \n                                      start_idx, end_idx, trim_paths)\n        for path in paths:\n            if len(path) < 2:\n                print('TSP Warning: Empty path detected')\n                return run_tsp(nodes, num_vehicles, int(np.mean(distances)*(1.5/depth)), depth+1)\n    else:\n        print('TSP Warning: No solution found')\n        return run_tsp(nodes, num_vehicles, int(max_dist*1.5), depth+1)\n\n    # Map paths from node indices to node locations\n    paths = [nodes[path] for path in paths]\n\n    # Resample each solution path to have resample number of points\n    if resample is not None:\n        paths = np.array([resample_path(path, resample) for path in paths])\n\n    # Convert distances back to floats in the original scale of the nodes\n    distances = np.array(distances)/1e4\n    return paths, distances\n
"},{"location":"API-reference.html#sgptools.utils.misc.cont2disc","title":"cont2disc(Xu, candidates, candidate_labels=None)","text":"

Map continuous space locations to a discrete set of candidate location

Parameters:

Name Type Description Default Xu ndarray

(m, 2); Continuous space points

required candidates ndarray

(n, 2); Discrete set of candidate locations

required candidate_labels ndarray

(n, 1); Labels corresponding to the discrete set of candidate locations

None

Returns:

Name Type Description Xu_x ndarray

Discrete space points' locations

Xu_y ndarray

Labels of the discrete space points. Returned only if candidate_labels was passed to the function

Source code in sgptools/utils/misc.py
def cont2disc(Xu, candidates, candidate_labels=None):\n    \"\"\"Map continuous space locations to a discrete set of candidate location\n\n    Args:\n        Xu (ndarray): (m, 2); Continuous space points\n        candidates (ndarray): (n, 2); Discrete set of candidate locations\n        candidate_labels (ndarray): (n, 1); Labels corresponding to the discrete set of candidate locations\n\n    Returns:\n        Xu_x (ndarray): Discrete space points' locations \n        Xu_y (ndarray): Labels of the discrete space points. Returned only if `candidate_labels`\n                        was passed to the function\n\n    \"\"\"\n    # Sanity check to ensure that there are candidates to match\n    if len(candidates)==0:\n        return []\n    dists = pairwise_distances(candidates, Y=Xu, metric='euclidean')\n    row_ind, _ = linear_sum_assignment(dists)\n    Xu_X = candidates[row_ind].copy()\n    if candidate_labels is not None:\n        Xu_y = candidate_labels[row_ind].copy()\n        return Xu_X, Xu_y\n    else:\n        return Xu_X\n
"},{"location":"API-reference.html#sgptools.utils.misc.get_inducing_pts","title":"get_inducing_pts(data, num_inducing, orientation=False, random=False)","text":"

Selects a subset of the data points to be used as inducing points. The default approach uses kmeans to select the subset.

Parameters:

Name Type Description Default data ndarray

(n, 2); Data points to select the inducing points from

required num_inducing int

Number of inducing points

required orientation bool

If True, add an additional dimension to model the sensor FoV rotation angle

False random bool

If True, the subset of inducing points are selected randomly instead of using kmeans

False

Returns:

Name Type Description Xu ndarray

(m, d); Inducing points in the position and orientation space. m is the number of inducing points, d is the dimension of the space (x, y, optional - angle in radians)

Source code in sgptools/utils/misc.py
def get_inducing_pts(data, num_inducing, orientation=False, random=False):\n    \"\"\"Selects a subset of the data points to be used as inducing points. \n    The default approach uses kmeans to select the subset. \n\n    Args:\n        data (ndarray): (n, 2); Data points to select the inducing points from \n        num_inducing (int): Number of inducing points\n        orientation (bool): If True, add an additional dimension to model the sensor \n                            FoV rotation angle\n        random (bool): If True, the subset of inducing points are selected randomly \n                       instead of using kmeans\n\n    Returns:\n        Xu (ndarray): (m, d); Inducing points in the position and orientation space.\n                        `m` is the number of inducing points, \n                        `d` is the dimension of the space (x, y, optional - angle in radians)\n    \"\"\"\n    if random:\n        idx = np.random.randint(len(data), size=num_inducing)\n        Xu = data[idx]\n    else:\n        Xu = kmeans2(data, num_inducing, minit=\"points\")[0]\n    if orientation:\n        thetas = np.random.uniform(0, 2 * np.pi, size=(Xu.shape[0], 1))\n        Xu = np.concatenate([Xu, thetas], axis=1)\n    return Xu\n
"},{"location":"API-reference.html#sgptools.utils.misc.interpolate_path","title":"interpolate_path(waypoints, sampling_rate=0.05)","text":"

Interpolate additional points between the given waypoints to simulate continuous sensing robots

Parameters:

Name Type Description Default waypoints (n, d)

Waypoints of the robot's path

required sampling_rate float

Distance between each pair of interpolated points

0.05

Returns:

Name Type Description path ndarray

(p, d) Interpolated path, p depends on the sampling_rate rate

Source code in sgptools/utils/misc.py
def interpolate_path(waypoints, sampling_rate=0.05):\n    \"\"\"Interpolate additional points between the given waypoints to simulate continuous sensing robots\n\n    Args:\n        waypoints (n, d): Waypoints of the robot's path\n        sampling_rate (float): Distance between each pair of interpolated points\n\n    Returns:\n        path (ndarray): (p, d) Interpolated path, `p` depends on the sampling_rate rate\n    \"\"\"\n    interpolated_path = []\n    for i in range(2, len(waypoints)+1):\n        dist = get_distance(waypoints[i-2:i])\n        num_samples = int(dist / sampling_rate)\n        points = np.linspace(waypoints[i-1], waypoints[i-2], num_samples)\n        interpolated_path.extend(points)\n    return np.array(interpolated_path)\n
"},{"location":"API-reference.html#sgptools.utils.misc.plot_paths","title":"plot_paths(paths, candidates=None, title=None)","text":"

Function to plot the IPP solution paths

Parameters:

Name Type Description Default paths ndarray

(r, m, 2); r paths with m waypoints each

required candidates ndarray

(n, 2); Candidate unlabeled locations used in the SGP-based sensor placement approach

None title str

Title of the plot

None Source code in sgptools/utils/misc.py
def plot_paths(paths, candidates=None, title=None):\n    \"\"\"Function to plot the IPP solution paths\n\n    Args:\n        paths (ndarray): (r, m, 2); `r` paths with `m` waypoints each\n        candidates (ndarray): (n, 2); Candidate unlabeled locations used in the SGP-based sensor placement approach\n        title (str): Title of the plot\n    \"\"\"\n    plt.figure()\n    for i, path in enumerate(paths):\n        plt.plot(path[:, 0], path[:, 1], \n                    c='r', label='Path', zorder=1, marker='o')\n        plt.scatter(path[0, 0], path[0, 1], \n                    c='g', label='Start', zorder=2, marker='o')\n        if candidates is not None:\n            plt.scatter(candidates[:, 0], candidates[:, 1], \n                        c='k', s=1, label='Unlabeled Train-Set Points', zorder=0)\n        if i==0:\n            plt.legend(bbox_to_anchor=(1.0, 1.02))\n    if title is not None:\n        plt.title(title)\n    plt.xlabel('X')\n    plt.ylabel('Y')\n
"},{"location":"API-reference.html#sgptools.utils.misc.project_waypoints","title":"project_waypoints(waypoints, candidates)","text":"

Project the waypoints back to the candidate set while retaining the waypoint visitation order.

Parameters:

Name Type Description Default waypoints (n, d)

Waypoints of the robot's path

required candidates ndarray

(n, 2); Discrete set of candidate locations

required

Returns:

Name Type Description waypoints (n, d)

Projected waypoints of the robot's path

Source code in sgptools/utils/misc.py
def project_waypoints(waypoints, candidates):\n    \"\"\"Project the waypoints back to the candidate set while retaining the \n    waypoint visitation order.\n\n    Args:\n        waypoints (n, d): Waypoints of the robot's path\n        candidates (ndarray): (n, 2); Discrete set of candidate locations\n\n    Returns:\n        waypoints (n, d): Projected waypoints of the robot's path\n    \"\"\"\n    waypoints_disc = cont2disc(waypoints, candidates)\n    waypoints_valid = _reoder_path(waypoints, waypoints_disc)\n    return waypoints_valid\n
"},{"location":"API-reference.html#sgptools.utils.metrics.gaussian_entropy","title":"gaussian_entropy(K)","text":"

Computes GP-based entropy from a kernel matrix

Parameters:

Name Type Description Default K ndarray

(n, n); kernel matrix

required

Returns:

Name Type Description entropy float

Entropy computed from the kernel matrix

Source code in sgptools/utils/metrics.py
def gaussian_entropy(K):\n    \"\"\"Computes GP-based entropy from a kernel matrix\n\n    Args:\n        K (ndarray): (n, n); kernel matrix\n\n    Returns:\n        entropy (float): Entropy computed from the kernel matrix\n    \"\"\"\n    return multivariate_normal(mean=None, cov=K, allow_singular=True).entropy()\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_distance","title":"get_distance(X)","text":"

Compute the length of a path (L2-norm)

Parameters:

Name Type Description Default X ndarray

(m, d); Waypoints of a path

required

Returns:

Name Type Description dist float

Total path length

Source code in sgptools/utils/metrics.py
def get_distance(X):\n    \"\"\"Compute the length of a path (L2-norm)\n\n    Args:\n        X (ndarray): (m, d); Waypoints of a path\n\n    Returns:\n        dist (float): Total path length\n    \"\"\"\n    dist = np.linalg.norm(X[1:] - X[:-1], axis=-1)\n    dist = np.sum(dist)\n    return dist\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_elbo","title":"get_elbo(Xu, X_env, noise_variance, kernel, baseline=False)","text":"

Computes the ELBO of the SGP, corrected to be positive

Parameters:

Name Type Description Default Xu ndarray

(m, d); Sensing locations

required X_env ndarray

(n, d); Data points used to approximate the bounds of the environment

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required baseline bool

If True, the ELBO is adjusted to be positive

False

Returns:

Name Type Description elbo float

ELBO of the SGP

Source code in sgptools/utils/metrics.py
def get_elbo(Xu, X_env, noise_variance, kernel, baseline=False):\n    \"\"\"Computes the ELBO of the SGP, corrected to be positive\n\n    Args:\n        Xu (ndarray): (m, d); Sensing locations\n        X_env (ndarray): (n, d); Data points used to approximate the bounds of the environment\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n        baseline (bool): If True, the ELBO is adjusted to be positive\n\n    Returns:\n        elbo (float): ELBO of the SGP\n    \"\"\"\n    if baseline:\n        sgpr = gpflow.models.SGPR(X_env,\n                                  noise_variance=noise_variance,\n                                  kernel=kernel,\n                                  inducing_variable=[[0, 0]])\n        baseline = sgpr.elbo().numpy()\n    else:\n        baseline = 0.0\n\n    sgpr = gpflow.models.SGPR(X_env,\n                              noise_variance=noise_variance,\n                              kernel=kernel, \n                              inducing_variable=Xu)\n    return (sgpr.elbo() - baseline).numpy()\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_kl","title":"get_kl(Xu, X_env, noise_variance, kernel)","text":"

Computes the KL divergence between the SGP and the GP

Parameters:

Name Type Description Default Xu ndarray

(m, d); Sensing locations

required X_env ndarray

(n, d); Data points used to approximate the bounds of the environment

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required

Returns:

Name Type Description kl float

KL divergence between the SGP and the GP

Source code in sgptools/utils/metrics.py
def get_kl(Xu, X_env, noise_variance, kernel):\n    \"\"\"Computes the KL divergence between the SGP and the GP\n\n    Args:\n        Xu (ndarray): (m, d); Sensing locations\n        X_env (ndarray): (n, d); Data points used to approximate the bounds of the environment\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        kl (float): KL divergence between the SGP and the GP\n    \"\"\"\n    sgpr = gpflow.models.SGPR(X_env,\n                              noise_variance=noise_variance,\n                              kernel=kernel,\n                              inducing_variable=Xu)\n\n    common = sgpr._common_calculation()\n    sigma_sq = common.sigma_sq\n    AAT = common.AAT\n\n    x, _ = sgpr.data\n    kdiag = sgpr.kernel(x, full_cov=False)\n\n    # tr(K) / \u03c3\u00b2\n    trace_k = tf.reduce_sum(kdiag / sigma_sq)\n    # tr(Q) / \u03c3\u00b2\n    trace_q = tf.reduce_sum(tf.linalg.diag_part(AAT))\n    # tr(K - Q) / \u03c3\u00b2\n    trace = trace_k - trace_q\n    trace = 0.5 * trace\n\n    return float(trace.numpy())\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_mi","title":"get_mi(Xu, candidate_locs, noise_variance, kernel)","text":"

Computes mutual information between the sensing locations and the candidate locations

Parameters:

Name Type Description Default Xu ndarray

(m, d); Sensing locations

required candidate_locs ndarray

(n, d); Candidate sensing locations

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required

Returns:

Name Type Description mi float

Mutual information computed using a GP

Source code in sgptools/utils/metrics.py
def get_mi(Xu, candidate_locs, noise_variance, kernel):\n    \"\"\"Computes mutual information between the sensing locations and the candidate locations\n\n    Args:\n        Xu (ndarray): (m, d); Sensing locations\n        candidate_locs (ndarray): (n, d); Candidate sensing locations \n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        mi (float): Mutual information computed using a GP\n    \"\"\"\n    Xu = np.array(Xu)\n    candidate_locs = np.array(candidate_locs)\n\n    gp = gpflow.models.GPR(data=(Xu, np.zeros((len(Xu), 1))),\n                           kernel=kernel,\n                           noise_variance=noise_variance)\n    _, sigma_a = gp.predict_f(candidate_locs, full_cov=True)\n    sigma_a = sigma_a.numpy()[0]\n    cond_entropy = gaussian_entropy(sigma_a)\n\n    K = kernel(candidate_locs, full_cov=True).numpy()\n    K += noise_variance * np.eye(len(candidate_locs))\n    entropy = gaussian_entropy(K)\n\n    return float(entropy - cond_entropy)\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_reconstruction","title":"get_reconstruction(Xu, X_test, noise_variance, kernel)","text":"

Computes the GP-based data field estimates with the solution placements as the training set

Parameters:

Name Type Description Default Xu tuple

(ndarray (m, d); ndarray (m, 1)); Sensing locations' input and corresponding ground truth labels

required X_test ndarray

(n, d); Testing data input locations

required noise_variance float

data variance

required kernel Kernel

gpflow kernel function

required

Returns:

Name Type Description y_pred ndarray

(n, 1); Predicted data field estimates

y_var ndarray

(n, 1); Prediction variance at each location in the data field

Source code in sgptools/utils/metrics.py
def get_reconstruction(Xu, X_test, noise_variance, kernel):\n    \"\"\"Computes the GP-based data field estimates with the solution placements as the training set\n\n    Args:\n        Xu (tuple): (ndarray (m, d); ndarray (m, 1)); Sensing locations' input \n                    and corresponding ground truth labels\n        X_test (ndarray): (n, d); Testing data input locations\n        noise_variance (float): data variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        y_pred (ndarray): (n, 1); Predicted data field estimates\n        y_var (ndarray): (n, 1); Prediction variance at each location in the data field\n    \"\"\"\n    Xu_X, Xu_y = Xu\n\n    # Get the GP predictions\n    gpr = gpflow.models.GPR((Xu_X, Xu_y),\n                            noise_variance=noise_variance,\n                            kernel=kernel)\n    y_pred, y_var = gpr.predict_f(X_test)\n    y_pred = y_pred.numpy().reshape(-1, 1)\n\n    return y_pred, y_var\n
"},{"location":"API-reference.html#sgptools.utils.metrics.get_rmse","title":"get_rmse(y_pred, y_test)","text":"

Computes the root-mean-square error between y_pred and y_test

Parameters:

Name Type Description Default y_pred ndarray

(n, 1); Predicted data field estimate

required y_test ndarray

(n, 1); Ground truth data field

required

Returns:

Name Type Description rmse float

Computed RMSE

Source code in sgptools/utils/metrics.py
def get_rmse(y_pred, y_test):\n    \"\"\"Computes the root-mean-square error between `y_pred` and `y_test`\n\n    Args:\n        y_pred (ndarray): (n, 1); Predicted data field estimate\n        y_test (ndarray): (n, 1); Ground truth data field \n\n    Returns:\n        rmse (float): Computed RMSE\n    \"\"\"\n    return np.sqrt(np.mean(np.square(y_pred - y_test)))\n
"},{"location":"API-reference.html#sgptools.utils.gpflow.get_model_params","title":"get_model_params(X_train, y_train, max_steps=1500, lr=0.01, print_params=True, lengthscales=1.0, variance=1.0, noise_variance=0.1, kernel=None, **kwargs)","text":"

Train a GP on the given training set

Parameters:

Name Type Description Default X_train ndarray

(n, d); Training set inputs

required y_train ndarray

(n, 1); Training set labels

required max_steps int

Maximum number of optimization steps

1500 lr float

Optimization learning rate

0.01 print_params bool

If True, prints the optimized GP parameters

True lengthscales float or list

Kernel lengthscale(s), if passed as a list, each element corresponds to each data dimension

1.0 variance float

Kernel variance

1.0 noise_variance float

Data noise variance

0.1 kernel Kernel

gpflow kernel function

None

Returns:

Name Type Description loss list

Loss values obtained during training

variance float

Optimized data noise variance

kernel Kernel

Optimized gpflow kernel function

Source code in sgptools/utils/gpflow.py
def get_model_params(X_train, y_train, \n                     max_steps=1500, \n                     lr=1e-2, \n                     print_params=True, \n                     lengthscales=1.0, \n                     variance=1.0, \n                     noise_variance=0.1,\n                     kernel=None,\n                     **kwargs):\n    \"\"\"Train a GP on the given training set\n\n    Args:\n        X_train (ndarray): (n, d); Training set inputs\n        y_train (ndarray): (n, 1); Training set labels\n        max_steps (int): Maximum number of optimization steps\n        lr (float): Optimization learning rate\n        print_params (bool): If True, prints the optimized GP parameters\n        lengthscales (float or list): Kernel lengthscale(s), if passed as a list, \n                                each element corresponds to each data dimension\n        variance (float): Kernel variance\n        noise_variance (float): Data noise variance\n        kernel (gpflow.kernels.Kernel): gpflow kernel function\n\n    Returns:\n        loss (list): Loss values obtained during training\n        variance (float): Optimized data noise variance\n        kernel (gpflow.kernels.Kernel): Optimized gpflow kernel function\n    \"\"\"\n    if kernel is None:\n        kernel = gpflow.kernels.SquaredExponential(lengthscales=lengthscales, \n                                                   variance=variance)\n\n    gpr_gt = gpflow.models.GPR(data=(X_train, y_train), \n                               kernel=kernel,\n                               noise_variance=noise_variance)\n\n    if max_steps > 0:\n        loss = optimize_model(gpr_gt, max_steps=max_steps, lr=lr, **kwargs)\n    else:\n        loss = 0\n\n    if print_params:\n        print_summary(gpr_gt)\n\n    return loss, gpr_gt.likelihood.variance, kernel\n
"},{"location":"API-reference.html#sgptools.utils.gpflow.optimize_model","title":"optimize_model(model, max_steps=2000, kernel_grad=True, lr=0.01, optimizer='tf', method=None, verbose=False, trace_fn=None, convergence_criterion=True, trainable_variables=None, tol=None)","text":"

Trains a GP/SGP model

Parameters:

Name Type Description Default model models

GPflow GP/SGP model to train

required max_steps int

Maximum number of training steps

2000 kernel_grad bool

If False, the kernel parameters will not be optimized

True lr float

Optimization learning rate

0.01 optimizer str

Optimizer to use for training (scipy or tf)

'tf' method str

Optimization method refer to scipy minimize and tf optimizers for full list

None verbose bool

If true, the training progress will be printed

False trace_fn str

Function to trace metrics during training. If None, the loss values are traced; if traceXu, it the inducing points states at each optimization step are traced

None convergence_criterion bool

It True, enables early stopping when the loss plateaus

True trainable_variables list

List of model variables to train (can be used to limit training to a subset of variables)

None tol float

Convergence tolerance to decide when to stop optimization

None Source code in sgptools/utils/gpflow.py
def optimize_model(model, \n                   max_steps=2000, \n                   kernel_grad=True, \n                   lr=1e-2, \n                   optimizer='tf', \n                   method=None,\n                   verbose=False, \n                   trace_fn=None,\n                   convergence_criterion=True,\n                   trainable_variables=None,\n                   tol=None):\n    \"\"\"\n    Trains a GP/SGP model\n\n    Args:\n        model (gpflow.models): GPflow GP/SGP model to train\n        max_steps (int): Maximum number of training steps\n        kernel_grad (bool): If False, the kernel parameters will not be optimized\n        lr (float): Optimization learning rate\n        optimizer (str): Optimizer to use for training (`scipy` or `tf`)\n        method (str): Optimization method refer to scipy minimize and tf optimizers for full list\n        verbose (bool): If true, the training progress will be printed\n        trace_fn (str): Function to trace metrics during training. \n                        If `None`, the loss values are traced;\n                        if `traceXu`, it the inducing points states at each optimization step are traced\n        convergence_criterion (bool): It True, enables early stopping when the loss plateaus\n        trainable_variables (list): List of model variables to train \n                                    (can be used to limit training to a subset of variables)\n        tol (float): Convergence tolerance to decide when to stop optimization\n    \"\"\"\n    # Train all variables if trainable_variables are not provided\n    # If kernel_gradient is False, disable the kernel parameter gradient updates\n    if trainable_variables is None and kernel_grad:\n        trainable_variables=model.trainable_variables\n    elif trainable_variables is None and not kernel_grad:\n        trainable_variables=model.trainable_variables[:1]\n\n    if optimizer == 'scipy':\n        if method is None:\n            method = 'L-BFGS-B'\n        opt = gpflow.optimizers.Scipy()\n        losses = opt.minimize(model.training_loss,\n                              trainable_variables,\n                              method=method,\n                              options=dict(disp=verbose, maxiter=max_steps),\n                              tol=tol)\n        losses = losses.fun\n    else:\n        if trace_fn is None:\n            trace_fn = lambda x: x.loss\n        elif trace_fn == 'traceXu':\n            def trace_fn(traceable_quantities):\n                return model.inducing_variable.Z.numpy()\n\n        if method is None:\n            method = 'adam'\n        opt = tf.keras.optimizers.get(method)\n        opt.learning_rate = lr\n        loss_fn = model.training_loss\n        if convergence_criterion:\n            convergence_criterion = tfp.optimizer.convergence_criteria.LossNotDecreasing(\n                                            atol=1e-5, \n                                            window_size=50,\n                                            min_num_steps=int(max_steps*0.1))\n        else:\n            convergence_criterion = None\n        losses = tfp.math.minimize(loss_fn,\n                                   trainable_variables=trainable_variables,\n                                   num_steps=max_steps,\n                                   optimizer=opt,\n                                   convergence_criterion=convergence_criterion,\n                                   trace_fn=trace_fn)\n        losses = losses.numpy()\n\n    return losses\n
"},{"location":"API-reference.html#sgptools.utils.gpflow.plot_loss","title":"plot_loss(losses, save_file=None)","text":"

Helper function to plot the training loss

Parameters:

Name Type Description Default losses list

list of loss values

required save_file str

If passed, the loss plot will be saved to the save_file

None Source code in sgptools/utils/gpflow.py
def plot_loss(losses, save_file=None):\n    \"\"\"Helper function to plot the training loss\n\n    Args:\n        losses (list): list of loss values\n        save_file (str): If passed, the loss plot will be saved to the `save_file`\n    \"\"\"\n    plt.plot(losses)\n    plt.title('Log Likelihood')\n    plt.xlabel('Iteration')\n    plt.ylabel('Log Likelihood')\n    ax = plt.gca()\n    ax.ticklabel_format(useOffset=False)\n\n    if save_file is not None:\n        plt.savefig(save_file, bbox_inches='tight')\n        plt.close()\n    else:\n        plt.show()\n
"},{"location":"API-reference.html#sgptools.utils.data.get_dataset","title":"get_dataset(dataset_type, dataset_path=None, num_train=1000, num_test=2500, num_candidates=150)","text":"

Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to generate train and test sets.

Parameters:

Name Type Description Default dataset_type str

'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file. 'synthetic' will use the diamond square algorithm to generate synthetic elevation data.

required dataset_path str

Path to the dataset file, used only when dataset_type is 'tif'.

None num_train int

Number of training samples to generate.

1000 num_test int

Number of testing samples to generate.

2500 num_candidates int

Number of candidate locations to generate.

150

Returns:

Name Type Description X_train ndarray

(n, d); Training set inputs

y_train ndarray

(n, 1); Training set labels

X_test ndarray

(n, d); Testing set inputs

y_test ndarray

(n, 1); Testing set labels

candidates ndarray

(n, d); Candidate sensor placement locations

X

(n, d); Full dataset inputs

y

(n, 1); Full dataset labels

Source code in sgptools/utils/data.py
def get_dataset(dataset_type, dataset_path=None,\n                num_train=1000,\n                num_test=2500, \n                num_candidates=150):\n    \"\"\"Method to generate/load datasets and preprocess them for SP/IPP. The method uses kmeans to \n    generate train and test sets.\n\n    Args:\n        dataset_type (str): 'tif' or 'synthetic'. 'tif' will load and proprocess data from a GeoTIFF file. \n                        'synthetic' will use the diamond square algorithm to generate synthetic elevation data.\n        dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.\n        num_train (int): Number of training samples to generate.\n        num_test (int): Number of testing samples to generate.\n        num_candidates (int): Number of candidate locations to generate.\n\n    Returns:\n       X_train (ndarray): (n, d); Training set inputs\n       y_train (ndarray): (n, 1); Training set labels\n       X_test (ndarray): (n, d); Testing set inputs\n       y_test (ndarray): (n, 1); Testing set labels\n       candidates (ndarray): (n, d); Candidate sensor placement locations\n       X: (n, d); Full dataset inputs\n       y: (n, 1); Full dataset labels\n    \"\"\"\n    # Load the data\n    if dataset_type == 'tif':\n        X, y = prep_tif_dataset(dataset_path=dataset_path)\n    elif dataset_type == 'synthetic':\n        X, y = prep_synthetic_dataset()\n\n    X_train = get_inducing_pts(X, num_train)\n    X_train, y_train = cont2disc(X_train, X, y)\n\n    X_test = get_inducing_pts(X, num_test)\n    X_test, y_test = cont2disc(X_test, X, y)\n\n    candidates = get_inducing_pts(X, num_candidates)\n    candidates = cont2disc(candidates, X)\n\n    # Standardize data\n    X_scaler = StandardScaler()\n    X_scaler.fit(X_train)\n    X_train = X_scaler.transform(X_train)*10.0\n    X_test = X_scaler.transform(X_test)*10.0\n    X = X_scaler.transform(X)*10.0\n\n    y_scaler = StandardScaler()\n    y_scaler.fit(y_train)\n    y_train = y_scaler.transform(y_train)\n    y_test = y_scaler.transform(y_test)\n    y = y_scaler.transform(y)\n\n    return X_train, y_train, X_test, y_test, candidates, X, y\n
"},{"location":"API-reference.html#sgptools.utils.data.point_pos","title":"point_pos(point, d, theta)","text":"

Generate a point at a distance d from a point at angle theta.

Parameters:

Name Type Description Default point ndarray

(N, 2); array of points

required d float

distance

required theta float

angle in radians

required

Returns:

Name Type Description X ndarray

(N,); array of x-coordinate

Y ndarray

(N,); array of y-coordinate

Source code in sgptools/utils/data.py
def point_pos(point, d, theta):\n    '''\n    Generate a point at a distance d from a point at angle theta.\n\n    Args:\n        point (ndarray): (N, 2); array of points\n        d (float): distance\n        theta (float): angle in radians\n\n    Returns:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n    '''\n    return np.c_[point[:, 0] + d*np.cos(theta), point[:, 1] + d*np.sin(theta)]\n
"},{"location":"API-reference.html#sgptools.utils.data.prep_synthetic_dataset","title":"prep_synthetic_dataset()","text":"

Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm. https://github.com/buckinha/DiamondSquare

Args:

Returns: X: (n, d); Dataset input features y: (n, 1); Dataset labels

Source code in sgptools/utils/data.py
def prep_synthetic_dataset():\n    '''Generates a 50x50 grid of synthetic elevation data using the diamond square algorithm.\n    ```https://github.com/buckinha/DiamondSquare```\n\n    Args:\n\n    Returns:\n       X: (n, d); Dataset input features\n       y: (n, 1); Dataset labels\n    '''\n    data = diamond_square(shape=(50,50), \n                          min_height=0, \n                          max_height=30, \n                          roughness=0.5)\n\n    # create x and y coordinates from the extent\n    x_coords = np.arange(0, data.shape[0])/10\n    y_coords = np.arange(0, data.shape[1])/10\n    xx, yy = np.meshgrid(x_coords, y_coords)\n    X = np.c_[xx.ravel(), yy.ravel()]\n    y = data.ravel()\n    y = y.reshape(-1, 1)\n\n    return X.astype(float), y.astype(float)\n
"},{"location":"API-reference.html#sgptools.utils.data.prep_tif_dataset","title":"prep_tif_dataset(dataset_path)","text":"

Load and preprocess a dataset from a GeoTIFF file (.tif file). The input features are set to the x and y pixel block coordinates and the labels are read from the file. The method also removes all invalid points.

Large tif files need to be downsampled using the following command: gdalwarp -tr 50 50 <input>.tif <output>.tif

Args: dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.

Returns: X: (n, d); Dataset input features y: (n, 1); Dataset labels

Source code in sgptools/utils/data.py
def prep_tif_dataset(dataset_path):\n    '''Load and preprocess a dataset from a GeoTIFF file (.tif file). The input features \n    are set to the x and y pixel block coordinates and the labels are read from the file.\n    The method also removes all invalid points.\n\n    Large tif files \n    need to be downsampled using the following command: \n    ```gdalwarp -tr 50 50 <input>.tif <output>.tif```\n\n    Args:\n        dataset_path (str): Path to the dataset file, used only when dataset_type is 'tif'.\n\n    Returns:\n       X: (n, d); Dataset input features\n       y: (n, 1); Dataset labels\n    '''\n    data = PIL.Image.open(dataset_path)\n    data = np.array(data)\n\n    # create x and y coordinates from the extent\n    x_coords = np.arange(0, data.shape[1])/10\n    y_coords = np.arange(data.shape[0], 0, -1)/10\n    xx, yy = np.meshgrid(x_coords, y_coords)\n    X = np.c_[xx.ravel(), yy.ravel()]\n    y = data.ravel()\n\n    # Remove invalid labels\n    y[np.where(y==-999999.0)] = np.nan\n    X = X[~np.isnan(y)]\n    y = y[~np.isnan(y)]\n\n    X = X.reshape(-1, 2)\n    y = y.reshape(-1, 1)\n\n    return X.astype(float), y.astype(float)\n
"},{"location":"API-reference.html#sgptools.utils.data.remove_circle_patches","title":"remove_circle_patches(X, Y, circle_patches)","text":"

Remove points inside polycircle patchesgons.

Parameters:

Name Type Description Default X (ndarray

(N,); array of x-coordinate

required Y (ndarray

(N,); array of y-coordinate

required polygons list of matplotlib circle patches

Circle patches to remove from the X, Y points

required

Returns:

Name Type Description X ndarray

(N,); array of x-coordinate

Y ndarray

(N,); array of y-coordinate

Source code in sgptools/utils/data.py
def remove_circle_patches(X, Y, circle_patches):\n    '''\n    Remove points inside polycircle patchesgons.\n\n    Args:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n        polygons (list of matplotlib circle patches): Circle patches to remove from the X, Y points\n\n    Returns:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n    '''\n    points = np.array([X.flatten(), Y.flatten()]).T\n    for circle_patch in circle_patches:\n        points = points[~circle_patch.contains_points(points)]\n    return points[:, 0], points[:, 1]\n
"},{"location":"API-reference.html#sgptools.utils.data.remove_polygons","title":"remove_polygons(X, Y, polygons)","text":"

Remove points inside polygons.

Parameters:

Name Type Description Default X (ndarray

(N,); array of x-coordinate

required Y (ndarray

(N,); array of y-coordinate

required polygons list of matplotlib path polygon

Polygons to remove from the X, Y points

required

Returns:

Name Type Description X ndarray

(N,); array of x-coordinate

Y ndarray

(N,); array of y-coordinate

Source code in sgptools/utils/data.py
def remove_polygons(X, Y, polygons):\n    '''\n    Remove points inside polygons.\n\n    Args:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n        polygons (list of matplotlib path polygon): Polygons to remove from the X, Y points\n\n    Returns:\n        X  (ndarray): (N,); array of x-coordinate\n        Y  (ndarray): (N,); array of y-coordinate\n    '''\n    points = np.array([X.flatten(), Y.flatten()]).T\n    for polygon in polygons:\n        p = path.Path(polygon)\n        points = points[~p.contains_points(points)]\n    return points[:, 0], points[:, 1]\n
"},{"location":"API-reference.html#_________________________2","title":"________________________","text":"

Provides a neural spectral kernel function along with an initialization function

"},{"location":"API-reference.html#sgptools.kernels.neural_kernel.NeuralSpectralKernel","title":"NeuralSpectralKernel","text":"

Bases: Kernel

Neural Spectral Kernel function (non-stationary kernel function). Based on the implementation from the following repo

Refer to the following papers for more details
  • Neural Non-Stationary Spectral Kernel [Remes et al., 2018]

Parameters:

Name Type Description Default input_dim int

Number of data dimensions

required active_dims int

Number of data dimensions that are used for computing the covariances

None Q int

Number of MLP mixture components used in the kernel function

1 hidden_sizes list

Number of hidden units in each MLP layer. Length of the list determines the number of layers.

[32, 32] Source code in sgptools/kernels/neural_kernel.py
class NeuralSpectralKernel(gpflow.kernels.Kernel):\n    \"\"\"Neural Spectral Kernel function (non-stationary kernel function). \n    Based on the implementation from the following [repo](https://github.com/sremes/nssm-gp/tree/master?tab=readme-ov-file)\n\n    Refer to the following papers for more details:\n        - Neural Non-Stationary Spectral Kernel [Remes et al., 2018]\n\n    Args:\n        input_dim (int): Number of data dimensions\n        active_dims (int): Number of data dimensions that are used for computing the covariances\n        Q (int): Number of MLP mixture components used in the kernel function\n        hidden_sizes (list): Number of hidden units in each MLP layer. Length of the list determines the number of layers.\n    \"\"\"\n    def __init__(self, input_dim, active_dims=None, Q=1, hidden_sizes=[32, 32]):\n        super().__init__(active_dims=active_dims)\n\n        self.input_dim = input_dim\n        self.Q = Q\n        self.num_hidden = len(hidden_sizes)\n\n        self.freq = []\n        self.length = []\n        self.var = []\n        for q in range(self.Q):\n            freq = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] + \n                                    [layers.Dense(input_dim, activation='softplus')])\n            length = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] +\n                                   [layers.Dense(input_dim, activation='softplus')])\n            var = keras.Sequential([layers.Dense(hidden_sizes[i], activation='selu') for i in range(self.num_hidden)] +\n                                   [layers.Dense(1, activation='softplus')])\n            self.freq.append(freq)\n            self.length.append(length)\n            self.var.append(var)\n\n    def K(self, X, X2=None):\n        \"\"\"Computes the covariances between/amongst the input variables\n\n        Args:\n            X (ndarray): Variables to compute the covariance matrix\n            X2 (ndarray): If passed, the covariance between X and X2 is computed. Otherwise, \n                          the covariance between X and X is computed.\n\n        Returns:\n            cov (ndarray): covariance matrix\n        \"\"\"\n        if X2 is None:\n            X2 = X\n            equal = True\n        else:\n            equal = False\n\n        kern = 0.0\n        for q in range(self.Q):\n            # compute latent function values by the neural network\n            freq, freq2 = self.freq[q](X), self.freq[q](X2)\n            lens, lens2 = self.length[q](X), self.length[q](X2)\n            var, var2 = self.var[q](X), self.var[q](X2)\n\n            # compute length-scale term\n            Xr = tf.expand_dims(X, 1)  # N1 1 D\n            X2r = tf.expand_dims(X2, 0)  # 1 N2 D\n            l1 = tf.expand_dims(lens, 1)  # N1 1 D\n            l2 = tf.expand_dims(lens2, 0)  # 1 N2 D\n            L = tf.square(l1) + tf.square(l2)  # N1 N2 D\n            #D = tf.square((Xr - X2r) / L)  # N1 N2 D\n            D = tf.square(Xr - X2r) / L  # N1 N2 D\n            D = tf.reduce_sum(D, 2)  # N1 N2\n            det = tf.sqrt(2 * l1 * l2 / L)  # N1 N2 D\n            det = tf.reduce_prod(det, 2)  # N1 N2\n            E = det * tf.exp(-D)  # N1 N2\n\n            # compute cosine term\n            muX = (tf.reduce_sum(freq * X, 1, keepdims=True)\n                   - tf.transpose(tf.reduce_sum(freq2 * X2, 1, keepdims=True)))\n            COS = tf.cos(2 * np.pi * muX)\n\n            # compute kernel variance term\n            WW = tf.matmul(var, var2, transpose_b=True)  # w*w'^T\n\n            # compute the q'th kernel component\n            kern += WW * E * COS\n        if equal:\n            return robust_kernel(kern, tf.shape(X)[0])\n        else:\n            return kern\n\n    def K_diag(self, X):\n        kd = default_jitter()\n        for q in range(self.Q):\n            kd += tf.square(self.var[q](X))\n        return tf.squeeze(kd)\n
"},{"location":"API-reference.html#sgptools.kernels.neural_kernel.NeuralSpectralKernel.K","title":"K(X, X2=None)","text":"

Computes the covariances between/amongst the input variables

Parameters:

Name Type Description Default X ndarray

Variables to compute the covariance matrix

required X2 ndarray

If passed, the covariance between X and X2 is computed. Otherwise, the covariance between X and X is computed.

None

Returns:

Name Type Description cov ndarray

covariance matrix

Source code in sgptools/kernels/neural_kernel.py
def K(self, X, X2=None):\n    \"\"\"Computes the covariances between/amongst the input variables\n\n    Args:\n        X (ndarray): Variables to compute the covariance matrix\n        X2 (ndarray): If passed, the covariance between X and X2 is computed. Otherwise, \n                      the covariance between X and X is computed.\n\n    Returns:\n        cov (ndarray): covariance matrix\n    \"\"\"\n    if X2 is None:\n        X2 = X\n        equal = True\n    else:\n        equal = False\n\n    kern = 0.0\n    for q in range(self.Q):\n        # compute latent function values by the neural network\n        freq, freq2 = self.freq[q](X), self.freq[q](X2)\n        lens, lens2 = self.length[q](X), self.length[q](X2)\n        var, var2 = self.var[q](X), self.var[q](X2)\n\n        # compute length-scale term\n        Xr = tf.expand_dims(X, 1)  # N1 1 D\n        X2r = tf.expand_dims(X2, 0)  # 1 N2 D\n        l1 = tf.expand_dims(lens, 1)  # N1 1 D\n        l2 = tf.expand_dims(lens2, 0)  # 1 N2 D\n        L = tf.square(l1) + tf.square(l2)  # N1 N2 D\n        #D = tf.square((Xr - X2r) / L)  # N1 N2 D\n        D = tf.square(Xr - X2r) / L  # N1 N2 D\n        D = tf.reduce_sum(D, 2)  # N1 N2\n        det = tf.sqrt(2 * l1 * l2 / L)  # N1 N2 D\n        det = tf.reduce_prod(det, 2)  # N1 N2\n        E = det * tf.exp(-D)  # N1 N2\n\n        # compute cosine term\n        muX = (tf.reduce_sum(freq * X, 1, keepdims=True)\n               - tf.transpose(tf.reduce_sum(freq2 * X2, 1, keepdims=True)))\n        COS = tf.cos(2 * np.pi * muX)\n\n        # compute kernel variance term\n        WW = tf.matmul(var, var2, transpose_b=True)  # w*w'^T\n\n        # compute the q'th kernel component\n        kern += WW * E * COS\n    if equal:\n        return robust_kernel(kern, tf.shape(X)[0])\n    else:\n        return kern\n
"},{"location":"API-reference.html#sgptools.kernels.neural_kernel.init_neural_kernel","title":"init_neural_kernel(x, y, inducing_variable, Q, n_inits=1, hidden_sizes=None)","text":"

Helper function to initialize a Neural Spectral Kernel function (non-stationary kernel function). Based on the implementation from the following repo

Refer to the following papers for more details
  • Neural Non-Stationary Spectral Kernel [Remes et al., 2018]

Parameters:

Name Type Description Default x ndarray

(n, d); Input training set points

required y ndarray

(n, 1); Training set labels

required inducing_variable ndarray

(m, d); Initial inducing points

required Q int

Number of MLP mixture components used in the kernel function

required n_inits int

Number of times to initalize the kernel function (returns the best model)

1 hidden_sizes list

Number of hidden units in each MLP layer. Length of the list determines the number of layers.

None Source code in sgptools/kernels/neural_kernel.py
def init_neural_kernel(x, y, inducing_variable, Q, n_inits=1, hidden_sizes=None):\n    \"\"\"Helper function to initialize a Neural Spectral Kernel function (non-stationary kernel function). \n    Based on the implementation from the following [repo](https://github.com/sremes/nssm-gp/tree/master?tab=readme-ov-file)\n\n    Refer to the following papers for more details:\n        - Neural Non-Stationary Spectral Kernel [Remes et al., 2018]\n\n    Args:\n        x (ndarray): (n, d); Input training set points\n        y (ndarray): (n, 1); Training set labels\n        inducing_variable (ndarray): (m, d); Initial inducing points\n        Q (int): Number of MLP mixture components used in the kernel function\n        n_inits (int): Number of times to initalize the kernel function (returns the best model)\n        hidden_sizes (list): Number of hidden units in each MLP layer. Length of the list determines the number of layers.\n    \"\"\"\n    x, y = data_input_to_tensor((x, y))\n\n    print('Initializing neural spectral kernel...')\n    best_loglik = -np.inf\n    best_m = None\n    N, input_dim = x.shape\n\n    for k in range(n_inits):\n        # gpflow.reset_default_graph_and_session()\n        k = NeuralSpectralKernel(input_dim=input_dim, Q=Q, \n                                    hidden_sizes=hidden_sizes)\n        model = SGPR((x, y), inducing_variable=inducing_variable, \n                        kernel=k)\n        loglik = model.elbo()\n        if loglik > best_loglik:\n            best_loglik = loglik\n            best_m = model\n        del model\n        gc.collect()\n    print('Best init: %f' % best_loglik)\n\n    return best_m\n
"}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..937c2ef --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,13 @@ + + + + https://www.itskalvik.com/sgp-tools/index.html + 2024-08-17 + daily + + + https://www.itskalvik.com/sgp-tools/API-reference.html + 2024-08-17 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000..678be84 Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/stylesheets/extra.css b/stylesheets/extra.css new file mode 100644 index 0000000..130824f --- /dev/null +++ b/stylesheets/extra.css @@ -0,0 +1,25 @@ +:root { + --md-primary-fg-color: #be8afc; + --md-primary-bg-color: #000000; + --md-accent-fg-color: #be8afc; + --md-accent-bg-color: #be8afc; +} + + +.video-con { + position: relative; + padding-bottom: 56.25%; + padding-top: 30px; + height: 0; + overflow: hidden; +} + +.video-con iframe, +.video-con object, +.video-con embed { + position: absolute; + top: 0; + left: 0; + width: 100%; + height: 100%; +}