From 8c383ca19596e1d47284cc7ff2328adb8d68c85d Mon Sep 17 00:00:00 2001 From: tonifinger <129007376+tonifinger@users.noreply.github.com> Date: Tue, 19 Nov 2024 23:17:36 +0100 Subject: [PATCH 01/11] Enable compliance tests to use plugins for cluster provisioning (#753) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Toni Finger Signed-off-by: Matthias Büchse Co-authored-by: Matthias Büchse --- Tests/config.toml | 40 ++++++++++ Tests/kaas/clusterspec.yaml | 11 +++ Tests/kaas/kind_config.yaml | 5 ++ Tests/kaas/plugin/README.md | 38 +++++++++ Tests/kaas/plugin/interface.py | 54 +++++++++++++ Tests/kaas/plugin/plugin_kind.py | 50 ++++++++++++ Tests/kaas/plugin/plugin_static.py | 19 +++++ Tests/kaas/plugin/requirements.in | 2 + Tests/kaas/plugin/requirements.txt | 60 ++++++++++++++ Tests/kaas/plugin/run_plugin.py | 58 ++++++++++++++ Tests/scs-compatible-kaas.yaml | 22 ++---- Tests/scs-test-runner.py | 122 +++++++++++++++++++++++------ 12 files changed, 443 insertions(+), 38 deletions(-) create mode 100644 Tests/kaas/clusterspec.yaml create mode 100644 Tests/kaas/kind_config.yaml create mode 100644 Tests/kaas/plugin/README.md create mode 100644 Tests/kaas/plugin/interface.py create mode 100644 Tests/kaas/plugin/plugin_kind.py create mode 100644 Tests/kaas/plugin/plugin_static.py create mode 100644 Tests/kaas/plugin/requirements.in create mode 100644 Tests/kaas/plugin/requirements.txt create mode 100755 Tests/kaas/plugin/run_plugin.py diff --git a/Tests/config.toml b/Tests/config.toml index a0173c25d..6b7a5c71f 100644 --- a/Tests/config.toml +++ b/Tests/config.toml @@ -34,11 +34,51 @@ subjects = [ workers = 4 +[presets.kaas-dev] +scopes = [ + "scs-compatible-kaas", +] +subjects = [ + "kind-current", + "kind-current-1", + "kind-current-2", +] +workers = 1 # better restrict this with clusters running on local machine + + [scopes.scs-compatible-iaas] spec = "./scs-compatible-iaas.yaml" +[scopes.scs-compatible-kaas] +spec = "./scs-compatible-kaas.yaml" + + # default subject (not a real subject, but used to declare a default mapping) # (this is the only mapping declaration that supports using Python string interpolation) [subjects._.mapping] os_cloud = "{subject}" +subject_root = "{subject}" + + +[subjects._.kubernetes_setup] +clusterspec = "kaas/clusterspec.yaml" + + +[subjects.kind-current.kubernetes_setup] +kube_plugin = "kind" +kube_plugin_config = "kaas/kind_config.yaml" +clusterspec_cluster = "current-k8s-release" + + +[subjects.kind-current-1.kubernetes_setup] +kube_plugin = "kind" +kube_plugin_config = "kaas/kind_config.yaml" +clusterspec_cluster = "current-k8s-release-1" + + +[subjects.kind-current-2.kubernetes_setup] +kube_plugin = "kind" +kube_plugin_config = "kaas/kind_config.yaml" +clusterspec_cluster = "current-k8s-release-2" + diff --git a/Tests/kaas/clusterspec.yaml b/Tests/kaas/clusterspec.yaml new file mode 100644 index 000000000..c8439a89f --- /dev/null +++ b/Tests/kaas/clusterspec.yaml @@ -0,0 +1,11 @@ +# this file specifies all clusters that have to be provisioned for the tests to run +clusters: + current-k8s-release: + branch: "1.31" + kubeconfig: kubeconfig.yaml + current-k8s-release-1: + branch: "1.30" + kubeconfig: kubeconfig.yaml + current-k8s-release-2: + branch: "1.29" + kubeconfig: kubeconfig.yaml diff --git a/Tests/kaas/kind_config.yaml b/Tests/kaas/kind_config.yaml new file mode 100644 index 000000000..ead21eb72 --- /dev/null +++ b/Tests/kaas/kind_config.yaml @@ -0,0 +1,5 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: worker diff --git a/Tests/kaas/plugin/README.md b/Tests/kaas/plugin/README.md new file mode 100644 index 000000000..e54cf1864 --- /dev/null +++ b/Tests/kaas/plugin/README.md @@ -0,0 +1,38 @@ +# Plugin for provisioning k8s clusters and performing conformance tests on these clusters + +## Development environment + +### requirements + +* [docker](https://docs.docker.com/engine/install/) +* [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) + +### setup for development + +1. Generate python 3.10 env + + ```bash + sudo apt-get install python3.10-dev + virtualenv -p /usr/bin/python3.10 venv + echo "*" >> venv/.gitignore + source venv/bin/activate + (venv) curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 + (venv) python3.10 -m pip install --upgrade pip + (venv) python3.10 -m pip --version + + ``` + +2. Install dependencies: + + ```bash + (venv) pip install pip-tools + (venv) pip-compile requirements.in + (venv) pip-sync requirements.txt + ``` + +3. Set environment variables and launch the process: + + ```bash + (venv) export CLUSTER_PROVIDER="kind" + (venv) python run.py + ``` diff --git a/Tests/kaas/plugin/interface.py b/Tests/kaas/plugin/interface.py new file mode 100644 index 000000000..f62e3b3e2 --- /dev/null +++ b/Tests/kaas/plugin/interface.py @@ -0,0 +1,54 @@ + + +class KubernetesClusterPlugin(): + """ + An abstract base class for custom Kubernetes cluster provider plugins. + It represents an interface class from which the api provider-specific + plugins must be derived as child classes + + To implement fill the methods `create_cluster` and `delete_cluster` with + api provider-specific functionalities for creating and deleting clusters. + The `create_cluster` method must ensure that the kubeconfigfile is provided + at the position in the file system defined by the parameter + `kubeconfig_filepath` + + - Implement `create_cluster` and `delete_cluster` methods + - Create `__init__(self, config_file)` method to handle api specific + configurations. + + Example: + .. code:: python + + from interface import KubernetesClusterPlugin + from apiX_library import cluster_api_class as ClusterAPI + + class PluginX(KubernetesClusterPlugin): + + def __init__(self, config_file): + self.config = config_file + + def create_cluster(self, cluster_name, version, kubeconfig_filepath): + self.cluster = ClusterAPI(name=cluster_name, image=cluster_image, kubeconfig_filepath) + self.cluster.create(self.config) + + def delete_cluster(self, cluster_name): + self.cluster = ClusterAPI(cluster_name) + self.cluster.delete() + .. + """ + + def create_cluster(self, cluster_name, version, kubeconfig_filepath): + """ + This method is to be called to create a k8s cluster + :param: cluster_name: + :param: version: + :param: kubeconfig_filepath: + """ + raise NotImplementedError + + def delete_cluster(self, cluster_name): + """ + This method is to be called in order to unprovision a cluster + :param: cluster_name: + """ + raise NotImplementedError diff --git a/Tests/kaas/plugin/plugin_kind.py b/Tests/kaas/plugin/plugin_kind.py new file mode 100644 index 000000000..26cd3f23d --- /dev/null +++ b/Tests/kaas/plugin/plugin_kind.py @@ -0,0 +1,50 @@ +import logging +import os +import os.path +from pathlib import Path + +from interface import KubernetesClusterPlugin +from pytest_kind import KindCluster + +logger = logging.getLogger(__name__) + + +class PluginKind(KubernetesClusterPlugin): + """ + Plugin to handle the provisioning of kubernetes cluster for + conformance testing purpose with the use of Kind + """ + def __init__(self, config_path): + logger.info("Init PluginKind") + self.config = config_path + logger.debug(self.config) + self.working_directory = os.getcwd() + logger.debug(f"Working from {self.working_directory}") + + def create_cluster(self, cluster_name, version, kubeconfig): + """ + This method is to be called to create a k8s cluster + :param: kubernetes_version: + :return: kubeconfig_filepath + """ + cluster_version = version + if cluster_version == '1.29': + cluster_version = 'v1.29.8' + elif cluster_version == '1.30': + cluster_version = 'v1.30.4' + elif cluster_version == '1.31' or cluster_version == 'default': + cluster_version = 'v1.31.1' + cluster_image = f"kindest/node:{cluster_version}" + kubeconfig_filepath = Path(kubeconfig) + if kubeconfig_filepath is None: + raise ValueError("kubeconfig_filepath is missing") + else: + self.cluster = KindCluster(name=cluster_name, image=cluster_image, kubeconfig=kubeconfig_filepath) + if self.config is None: + self.cluster.create() + else: + self.cluster.create(self.config) + + def delete_cluster(self, cluster_name): + self.cluster = KindCluster(cluster_name) + self.cluster.delete() diff --git a/Tests/kaas/plugin/plugin_static.py b/Tests/kaas/plugin/plugin_static.py new file mode 100644 index 000000000..0bd24707e --- /dev/null +++ b/Tests/kaas/plugin/plugin_static.py @@ -0,0 +1,19 @@ +import shutil + +from interface import KubernetesClusterPlugin + + +class PluginStatic(KubernetesClusterPlugin): + """ + Plugin to handle the provisioning of kubernetes + using a kubeconfig file + """ + + def __init__(self, config_path): + self.kubeconfig_path = config_path + + def create_cluster(self, cluster_name, version, kubeconfig): + shutil.copyfile(self.kubeconfig_path, kubeconfig) + + def delete_cluster(self, cluster_name, version): + pass diff --git a/Tests/kaas/plugin/requirements.in b/Tests/kaas/plugin/requirements.in new file mode 100644 index 000000000..0a60c3c3c --- /dev/null +++ b/Tests/kaas/plugin/requirements.in @@ -0,0 +1,2 @@ +pytest-kind +kubernetes diff --git a/Tests/kaas/plugin/requirements.txt b/Tests/kaas/plugin/requirements.txt new file mode 100644 index 000000000..a04a03167 --- /dev/null +++ b/Tests/kaas/plugin/requirements.txt @@ -0,0 +1,60 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile requirements.in +# +cachetools==5.5.0 + # via google-auth +certifi==2024.8.30 + # via + # kubernetes + # requests +charset-normalizer==3.3.2 + # via requests +google-auth==2.34.0 + # via kubernetes +idna==3.8 + # via requests +kubernetes==30.1.0 + # via -r requirements.in +oauthlib==3.2.2 + # via + # kubernetes + # requests-oauthlib +pyasn1==0.6.0 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.0 + # via google-auth +pykube-ng==23.6.0 + # via pytest-kind +pytest-kind==22.11.1 + # via -r requirements.in +python-dateutil==2.9.0.post0 + # via kubernetes +pyyaml==6.0.2 + # via + # kubernetes + # pykube-ng +requests==2.32.3 + # via + # kubernetes + # pykube-ng + # requests-oauthlib +requests-oauthlib==2.0.0 + # via kubernetes +rsa==4.9 + # via google-auth +six==1.16.0 + # via + # kubernetes + # python-dateutil +urllib3==2.2.2 + # via + # kubernetes + # pykube-ng + # requests +websocket-client==1.8.0 + # via kubernetes diff --git a/Tests/kaas/plugin/run_plugin.py b/Tests/kaas/plugin/run_plugin.py new file mode 100755 index 000000000..7b4084107 --- /dev/null +++ b/Tests/kaas/plugin/run_plugin.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +import logging +import os.path + +import click +import yaml + +from plugin_kind import PluginKind +from plugin_static import PluginStatic + +PLUGIN_LOOKUP = { + "kind": PluginKind, + "static": PluginStatic, +} + + +def init_plugin(plugin_kind, config_path): + plugin_maker = PLUGIN_LOOKUP.get(plugin_kind) + if plugin_maker is None: + raise ValueError(f"unknown plugin '{plugin_kind}'") + return plugin_maker(config_path) + + +def load_spec(clusterspec_path): + with open(clusterspec_path, "rb") as fileobj: + return yaml.load(fileobj, Loader=yaml.SafeLoader) + + +@click.group() +def cli(): + pass + + +@cli.command() +@click.argument('plugin_kind', type=click.Choice(list(PLUGIN_LOOKUP), case_sensitive=False)) +@click.argument('plugin_config', type=click.Path(exists=True, dir_okay=False)) +@click.argument('clusterspec_path', type=click.Path(exists=True, dir_okay=False)) +@click.argument('cluster_id', type=str, default="default") +def create(plugin_kind, plugin_config, clusterspec_path, cluster_id): + clusterspec = load_spec(clusterspec_path)['clusters'] + plugin = init_plugin(plugin_kind, plugin_config) + clusterinfo = clusterspec[cluster_id] + plugin.create_cluster(cluster_id, clusterinfo['branch'], os.path.abspath(clusterinfo['kubeconfig'])) + + +@cli.command() +@click.argument('plugin_kind', type=click.Choice(list(PLUGIN_LOOKUP), case_sensitive=False)) +@click.argument('plugin_config', type=click.Path(exists=True, dir_okay=False)) +@click.argument('clusterspec_path', type=click.Path(exists=True, dir_okay=False)) +@click.argument('cluster_id', type=str, default="default") +def delete(plugin_kind, plugin_config, clusterspec_path, cluster_id): + plugin = init_plugin(plugin_kind, plugin_config) + plugin.delete_cluster(cluster_id) + + +if __name__ == '__main__': + logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) + cli() diff --git a/Tests/scs-compatible-kaas.yaml b/Tests/scs-compatible-kaas.yaml index 4aa540999..a4010c64e 100644 --- a/Tests/scs-compatible-kaas.yaml +++ b/Tests/scs-compatible-kaas.yaml @@ -2,7 +2,9 @@ name: SCS-compatible KaaS uuid: 1fffebe6-fd4b-44d3-a36c-fc58b4bb0180 url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Tests/scs-compatible-kaas.yaml variables: - - kubeconfig + - subject_root + # directory containing the kubeconfig file for the subject under test + # (note that we consider each kubernetes branch a test subject of its own) modules: - id: cncf-k8s-conformance name: CNCF Kubernetes conformance @@ -12,38 +14,30 @@ modules: tags: [mandatory] - id: scs-0210-v2 name: Kubernetes version policy - url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0210-v2-k8s-version-policy.md + url: https://docs.scs.community/standards/scs-0210-v2-k8s-version-policy run: - executable: ./kaas/k8s-version-policy/k8s_version_policy.py - args: -k {kubeconfig} + args: -k {subject_root}/kubeconfig.yaml testcases: - id: version-policy-check tags: [mandatory] - id: scs-0214-v2 name: Kubernetes node distribution and availability - url: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Standards/scs-0214-v1-k8s-node-distribution.md + url: https://docs.scs.community/standards/scs-0214-v2-k8s-node-distribution run: - executable: ./kaas/k8s-node-distribution/k8s_node_distribution_check.py - args: -k {kubeconfig} + args: -k {subject_root}/kubeconfig.yaml testcases: - id: node-distribution-check tags: [mandatory] timeline: - # empty timeline might confuse tools, so put one "dummy" entry here - date: 2024-02-28 versions: v1: draft - v2: draft versions: - - version: v2 - include: - - cncf-k8s-conformance - - scs-0210-v2 - - scs-0214-v2 - targets: - main: mandatory - version: v1 include: + - cncf-k8s-conformance - scs-0210-v2 - scs-0214-v2 targets: diff --git a/Tests/scs-test-runner.py b/Tests/scs-test-runner.py index de7152428..780601e96 100755 --- a/Tests/scs-test-runner.py +++ b/Tests/scs-test-runner.py @@ -17,16 +17,23 @@ import click import tomli - logger = logging.getLogger(__name__) MONITOR_URL = "https://compliance.sovereignit.cloud/" +def ensure_dir(path): + try: + os.makedirs(path) + except FileExistsError: + pass + + class Config: def __init__(self): self.cwd = os.path.abspath(os.path.dirname(sys.argv[0]) or os.getcwd()) self.scs_compliance_check = os.path.join(self.cwd, 'scs-compliance-check.py') self.cleanup_py = os.path.join(self.cwd, 'cleanup.py') + self.run_plugin_py = os.path.join(self.cwd, 'kaas', 'plugin', 'run_plugin.py') self.ssh_keygen = shutil.which('ssh-keygen') self.curl = shutil.which('curl') self.secrets = {} @@ -58,42 +65,80 @@ def get_subject_mapping(self, subject): mapping.update(self.subjects.get(subject, {}).get('mapping', {})) return mapping + def get_kubernetes_setup(self, subject): + default_kubernetes_setup = self.subjects.get('_', {}).get('kubernetes_setup', {}) + kubernetes_setup = dict(default_kubernetes_setup) + kubernetes_setup.update(self.subjects.get(subject, {}).get('kubernetes_setup', {})) + return kubernetes_setup + def abspath(self, path): return os.path.join(self.cwd, path) def build_check_command(self, scope, subject, output): # TODO figure out when to supply --debug here (but keep separated from our --debug) - cmd = [ + args = [ sys.executable, self.scs_compliance_check, self.abspath(self.scopes[scope]['spec']), '--debug', '-C', '-o', output, '-s', subject, ] for key, value in self.get_subject_mapping(subject).items(): - cmd.extend(['-a', f'{key}={value}']) - return cmd + args.extend(['-a', f'{key}={value}']) + return {'args': args} + + def build_provision_command(self, subject): + kubernetes_setup = self.get_kubernetes_setup(subject) + subject_root = self.abspath(self.get_subject_mapping(subject).get('subject_root') or '.') + ensure_dir(subject_root) + return { + 'args': [ + sys.executable, self.run_plugin_py, + 'create', + kubernetes_setup['kube_plugin'], + self.abspath(kubernetes_setup['kube_plugin_config']), + self.abspath(kubernetes_setup['clusterspec']), + kubernetes_setup['clusterspec_cluster'], + ], + 'cwd': subject_root, + } + + def build_unprovision_command(self, subject): + kubernetes_setup = self.get_kubernetes_setup(subject) + subject_root = self.abspath(self.get_subject_mapping(subject).get('subject_root') or '.') + ensure_dir(subject_root) + return { + 'args': [ + sys.executable, self.run_plugin_py, + 'delete', + kubernetes_setup['kube_plugin'], + self.abspath(kubernetes_setup['kube_plugin_config']), + self.abspath(kubernetes_setup['clusterspec']), + kubernetes_setup['clusterspec_cluster'], + ], + 'cwd': subject_root, + } def build_cleanup_command(self, subject): # TODO figure out when to supply --debug here (but keep separated from our --debug) - return [ + return {'args': [ sys.executable, self.cleanup_py, '-c', self.get_subject_mapping(subject)['os_cloud'], '--prefix', '_scs-', '--ipaddr', '10.1.0.', '--debug', - ] + ]} def build_sign_command(self, target_path): - return [ + return {'args': [ self.ssh_keygen, '-Y', 'sign', '-f', self.abspath(self.secrets['keyfile']), '-n', 'report', target_path, - ] + ]} def build_upload_command(self, target_path, monitor_url): if not monitor_url.endswith('/'): monitor_url += '/' - return [ + return {'args': [ self.curl, '--fail-with-body', '--data-binary', f'@{target_path}.sig', @@ -101,7 +146,7 @@ def build_upload_command(self, target_path, monitor_url): '-H', 'Content-Type: application/x-signed-yaml', '-H', f'Authorization: Basic {self.auth_token}', f'{monitor_url}reports', - ] + ]} @click.group() @@ -123,7 +168,7 @@ def _run_commands(commands, num_workers=5): processes = [] while commands or processes: while commands and len(processes) < num_workers: - processes.append(subprocess.Popen(commands.pop())) + processes.append(subprocess.Popen(**commands.pop())) processes[:] = [p for p in processes if p.poll() is None] time.sleep(0.5) @@ -180,22 +225,14 @@ def run(cfg, scopes, subjects, preset, num_workers, monitor_url, report_yaml): commands = [cfg.build_check_command(job[0], job[1], output) for job, output in zip(jobs, outputs)] _run_commands(commands, num_workers=num_workers) _concat_files(outputs, report_yaml_tmp) - subprocess.run(cfg.build_sign_command(report_yaml_tmp)) - subprocess.run(cfg.build_upload_command(report_yaml_tmp, monitor_url)) + subprocess.run(**cfg.build_sign_command(report_yaml_tmp)) + subprocess.run(**cfg.build_upload_command(report_yaml_tmp, monitor_url)) if report_yaml is not None: _move_file(report_yaml_tmp, report_yaml) return 0 -@cli.command() -@click.option('--subject', 'subjects', type=str) -@click.option('--preset', 'preset', type=str) -@click.option('--num-workers', 'num_workers', type=int, default=5) -@click.pass_obj -def cleanup(cfg, subjects, preset, num_workers): - """ - clean up any lingering resources - """ +def _run_command_for_subjects(cfg, subjects, preset, num_workers, command): if not subjects and not preset: preset = 'default' if preset: @@ -208,12 +245,49 @@ def cleanup(cfg, subjects, preset, num_workers): subjects = [subject.strip() for subject in subjects.split(',')] if subjects else [] if not subjects: raise click.UsageError('subject(s) must be non-empty') - logger.debug(f'cleaning up for subject(s) {", ".join(subjects)}, num_workers: {num_workers}') - commands = [cfg.build_cleanup_command(subject) for subject in subjects] + logger.debug(f'running {command} for subject(s) {", ".join(subjects)}, num_workers: {num_workers}') + m = getattr(cfg, f'build_{command}_command') + commands = [m(subject) for subject in subjects] _run_commands(commands, num_workers=num_workers) return 0 +@cli.command() +@click.option('--subject', 'subjects', type=str) +@click.option('--preset', 'preset', type=str) +@click.option('--num-workers', 'num_workers', type=int, default=5) +@click.pass_obj +def cleanup(cfg, subjects, preset, num_workers): + """ + clean up any lingering IaaS resources + """ + return _run_command_for_subjects(cfg, subjects, preset, num_workers, "cleanup") + + +@cli.command() +@click.option('--subject', 'subjects', type=str) +@click.option('--preset', 'preset', type=str) +@click.option('--num-workers', 'num_workers', type=int, default=5) +@click.pass_obj +def provision(cfg, subjects, preset, num_workers): + """ + create k8s clusters + """ + return _run_command_for_subjects(cfg, subjects, preset, num_workers, "provision") + + +@cli.command() +@click.option('--subject', 'subjects', type=str) +@click.option('--preset', 'preset', type=str) +@click.option('--num-workers', 'num_workers', type=int, default=5) +@click.pass_obj +def unprovision(cfg, subjects, preset, num_workers): + """ + clean up k8s clusters + """ + return _run_command_for_subjects(cfg, subjects, preset, num_workers, "unprovision") + + if __name__ == '__main__': logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) cli(obj=Config()) From a3bc95c797f9099d908048c489d1dbcfc12063c0 Mon Sep 17 00:00:00 2001 From: josephineSei <128813814+josephineSei@users.noreply.github.com> Date: Wed, 20 Nov 2024 10:50:58 +0100 Subject: [PATCH 02/11] Create a standard for the security of iaas service software (#765) * Create scs-XXXX-v1-minimum-iaas-service-version.md * First Draft of the Standard and Proposal of tests * change link * Update and rename scs-XXXX-v1-minimum-iaas-service-version.md to scs-XXXX-v1-security-of-iaas-service-software.md * Create scs-XXXX-w1-security-of-iaas-service-software.md * rework glossary * Multiple updates scs-XXXX-v1-security-of-iaas-service-software.md * Multiple applications of suggestions from code review * Update scs-XXXX-w1-security-of-iaas-service-software.md Signed-off-by: josephineSei <128813814+josephineSei@users.noreply.github.com> Co-authored-by: Markus Hentsch <129268441+markus-hentsch@users.noreply.github.com> Co-authored-by: anjastrunk <119566837+anjastrunk@users.noreply.github.com> Co-authored-by: Kurt Garloff --- ...XX-v1-security-of-iaas-service-software.md | 138 ++++++++++++++++++ ...XX-w1-security-of-iaas-service-software.md | 45 ++++++ 2 files changed, 183 insertions(+) create mode 100644 Standards/scs-XXXX-v1-security-of-iaas-service-software.md create mode 100644 Standards/scs-XXXX-w1-security-of-iaas-service-software.md diff --git a/Standards/scs-XXXX-v1-security-of-iaas-service-software.md b/Standards/scs-XXXX-v1-security-of-iaas-service-software.md new file mode 100644 index 000000000..94b1200dd --- /dev/null +++ b/Standards/scs-XXXX-v1-security-of-iaas-service-software.md @@ -0,0 +1,138 @@ +--- +title: Standard for the security of IaaS service software +type: Standard +status: Draft +track: IaaS +--- + +## Introduction + +Software security relies on bug patches and security updates being available for specific versions of the software. +The services, which build the IaaS Layer should be updated on a regular basis based on updates provided by their respective authors or distributors. +But older releases or versions of the software of these services may not receive updates anymore. +Unpatched versions should not be used in deployments as they are a security risk, so this standard will define how CSPs should deal with software versions and security updates. + +## Terminology + +| Term | Explanation | +| ------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| CSP | Cloud Service Provider, provider managing the OpenStack infrastructure. | +| SLURP | Skip Level Upgrade Release Process - A Process that allows upgrades between two releases, while skipping the one in between them. | +| OSSN | [OpenStack Security Note](https://wiki.openstack.org/wiki/Security_Notes) - security issues from 3rd parties or due to misconfigurations. | +| OSSA | [OpenStack Security Advisories](https://security.openstack.org/ossalist.html) - security issues and advices for OpenStack. | + +## Motivation + +On the IaaS Layer the software, that needs to be considered in the scope of this standard, is mainly the APIs of IaaS Services. +Also there might be shared libraries and other dependencies, that could be considered part of the IaaS Layer. +In software projects like e.g. OpenStack that provide the main services and all APIs, the software will be modified and receive bug fixes continuously and will receive releases of new versions on a regular basis. +Older releases will at some point not receive updates anymore, because maintaining more and more releases simultaneously requires too much manpower. +Thus older versions will also eventually not receive security updates anymore. +Using versions which do not receive updates anymore threatens the baseline security of deployments and should be avoided under all circumstances. + +## Design Considerations + +It would be possible to define a minimum version of IaaS Layer software to avoid security risks. +In the following paragraphs several options of defining a minimum version or dealing with security patches otherwise are discussed. + +### Options considered + +#### Only Allow the current versions of Software + +Considering that OpenStack as one provider of IaaS Layer Software has two releases per year, with one SLURP release per year, this option would require CSPs to update their deployment once or twice a year. +Updating a whole deployment is a lot of work and requires also good life-cycle management. +Following only the SLURP releases would reduce this work to once per year. + +While following new releases closely already provides a deployment with recent bug fixes and new features, it also makes developing standards easier. +Differences between releases will accumulate eventually and may render older releases non-compliant to the SCS standards at some point. + +On the other hand on the IaaS Level there aren't many breaking changes introduced by releases and also most standards will also work with older releases. +Security updates and bug fixes are also provided by OpenStack for a few older releases with the state `maintained` according to the OpenStack releases overview[^2]. +Additionally the [SCS reference implementation](https://github.com/SovereignCloudStack/release-notes/blob/main/Release7.md) is integrating OpenStack releases after half a year - so about the time when a new release is published by OpenStack. +Considering a CSP that wants to use only SLURP releases and waits for the reference implementation to adopt them, will already lag over a year (i.e. 2 OpenStack releases) behind the latest release, this cannot be considered as using the current version of IaaS Layer Software. +Thus this option can be discarded. + +#### Allow only maintained versions of Software + +While following closely to the newest releases could be advised, there are several downsides to requiring this workflow, even if it would be only for SLURP releases. +Following the SCS reference implementation for example would also lead into being a little bit behind the newest OpenStack release. +But this is not as bad as it may seem to be, because security related fixes and bug fixes are backported to older but still `maintained` releases. +All releases that are still maintained can be looked up at the releases page from OpenStack[^2]. + +Allowing maintained versions would give CSPs a little bit more time to update and test their environments, while still receiving relevant security updates and bug fixes. +Also CSPs that want to become SCS-compliant will not have to take on the burden to upgrade their deployments to very recent releases immediately, but can instead test with an existing release before an upgrade and identify where they need to put in additional work to become SCS-compliant. + +One problem is, that there might be new features implemented in the newest versions of the software, which are desired by other SCS standards to be SCS-compliant. +In that case allowing all maintained versions would lead to a two-year timespan customers would need to wait for before such a feature becomes available in all SCS-compliant deployments. +In case of security relevant features this is not advisable. + +#### Standards implicitly define the minimum versions of Software + +Instead of requiring a defined minimum software version centrally, it could be derived from the individual standards. +Because: Whenever there is a new wanted behavior a standard should be created and a resonable timeframe given to CSPs to adopt a software version that can fulfill the new standard. +Through the combination of all standards that are in place, the minimum version for the IaaS service software is implicitly given. + +This would avoid to have conflicting versions of software in terms of feature parity, while also allowing older software. +Using this approach requires an additional advise to CSPs to update or implement patches for security issues. + +#### Advise CSPs to integrate software updates + +As long as maintained versions of software are used, updates with security patches are available and only need to be integrated. +This can and should be done in a reasonable short timeframe. + +But CSPs may even use releases of IaaS software, that are either not maintained anymore by an open source community or may be even closed source implementations of the mandatory IaaS APIs. +Allowing older versions or closed source software would only be acceptable, when CSPs assure (e.g. in documentation), that they themself will patch the software within their deployments. +Security bug fixes must be implemented and proof of the fix then provided. +Only under these circumstances deployments with older or alternative IaaS Layer software may be handled as compliant. + +This option could be taken for granted, but to actually advise using it may encourage CSPs to take a closer look on their life-cycle management and security risk handling. +And CSPs using OpenStack could even be encouraged to upgrade their deployments. + +#### Dependencies of the IaaS Layer Software + +While the IaaS service software like OpenStack itself is monitored and security issues announced in OSSNs and OSSAs, these services have lots of dependecies, that are not monitored by the same entity. +When dependencies have security issues, there might be no OSSN or OSSA, so CSPs also need to watch CVEs concerning these dependencies themselves. +Those dependencies must also be updated in a reasonable timeframe, when a security issue is disclosed. + +#### What timeframe is needed to fix the issue? + +CSPs should be encouraged to fix security issues as fast as possible. +Some security issues are very easy to exploit so as soon as the vulnerability is disclosed attacks on deployments will start. +Other vulnerabilities may need much knowledge and more time to be exploited. +Also the impact of different vulnerabilities will differ. + +So it can be concluded that some security issues need to be fixed immediately while for others it is okay to take some time. +The BSI already has some guidance[^1] on how fast CSPs should respond. +From the moment a vulnerability is disclosed these are the advised reaction times ranked by the severity of the vulnerability: + +1. Critical (CVSS = 9.0 – 10.0): 3 hours +2. High (CVSS = 7.0 – 8.9): 3 days +3. Mid (CVSS = 4.0 – 6.9): 1 month +4. Low (CVSS = 0.1 – 3.9): 3 months + +[^1]: [C5 criteria catalog with timeframes for responses on page 70.](https://www.bsi.bund.de/SharedDocs/Downloads/EN/BSI/CloudComputing/ComplianceControlsCatalogue/2020/C5_2020.pdf?__blob=publicationFile&v=3) + +This standard will follow this guidance and refer to these timeframes as "reasonable timeframes". + +## Standard for a minimum IaaS Layer Software version + +If a deployment is affected by a security issue and a maintained[^2] version of OpenStack is used as implementation for IaaS Layer software, security patches noted in OSSNs and OSSAs MUST be integrated within a reasonable timeframe according to the severity of the security issue[^1]. +Otherwise the CSP MUST implement security bug fixes themself within a reasonable timeframe, when the deplyoment is affected by a security issue according to the severity of the security issue[^1]. + +In both cases a notice of the update MUST be send to the OSBA, so that the compliance will not be revoked. + +If a deployment uses a dependency of the IaaS service software which is affected by a security issue, this software also MUST be updated with security patches within a reasonable timeframe[^1]. + +An open SBOM list MAY be used to propagate the current version of the software and may be used as proof of updates. + +[^2]: [OpenStack versions and their current status](https://releases.openstack.org) + +## Conformance Tests + +In case of provided SBOMs the version numbers of the software could be checked. +But this is not a requirement, so there cannot be such a test. +Tests on the integration of security patches itself are difficult. +And even if tests for certain security issues are possible, then those might be interpreted as an attack. +This is the reason there will be no conformance test. + +Rather the standard requires that CSPs provide notice of the fixed vulnerabilites themselves. diff --git a/Standards/scs-XXXX-w1-security-of-iaas-service-software.md b/Standards/scs-XXXX-w1-security-of-iaas-service-software.md new file mode 100644 index 000000000..3f0b1df8c --- /dev/null +++ b/Standards/scs-XXXX-w1-security-of-iaas-service-software.md @@ -0,0 +1,45 @@ +--- +title: "SCS Standard for the security of IaaS service software: Implementation and Testing Notes" +type: Supplement +track: IaaS +status: Draft +supplements: + - scs-XXXX-v1-security-of-iaas-service-software.md +--- + +## Testing or Detecting security updates in software + +It is not always possible to automatically test, whether the software has the newest security updates. +This is because software versions may differ or some CSPs might have added downstream code parts or using other software than the reference. +Also vulnerabilites and their fixes are quite different in testing, some might not be testable while others are. +Additionally testing might be perceived as an attack on the infrastructure. +So this standard will rely on the work and information CSPs must provide. +There are different cases and procedures which are addressed in the following parts, that lead to compliance for this standard. + +### Procedure to become compliant to the security of IaaS service software Standard + +This is the procedure when a new deployment wants to achieve SCS-conformancy. +There are two states such a deployment can be in: + +1. When a deployment is newly build or installed it usually uses software which includes all the latest security and bug fixes. +Such deployments should be considered compliant to the standard. + +2. When a CSP wants to make an older deployment compliant to the SCS standards and thus also to this standard, it should be checked, whether the running software is up to date and all vulnerabilites are fixed. +Any updates or upgrades to even newer versions should be done before the SCS compliance for every other standard is checked. +Afterwards the CSP may provide information about the used software in an SBOM or otherwise should provide a notice about the deployment having integrated all necessary vulnerability patches. + +### Procedure when new vulnerabilites are discovered + +Whenever there are new vulnerabilities discovered in IaaS service software like OpenStack there is either an internal discussion ongoing or it is just a smaller issue. +In the first case CSPs should have someone following such discussions and may even help preparing and testing patches. +From the moment on the vulnerability is disclosed publicly, the risk of it being actively exploited increases greatly. +So CSPs MUST watch out for announcements like in the OSSAs and OSSNs and when they are affected, update their deployment within the following timeframes according to the severity of the issue: + +1. Critical (CVSS = 9.0 – 10.0): 3 hours +2. High (CVSS = 7.0 – 8.9): 3 days +3. Mid (CVSS = 4.0 – 6.9): 1 month +4. Low (CVSS = 0.1 – 3.9): 3 months + +Afterwards CSPs MUST provide a notice to the OSBA, that they are not or not anymore affected by the vulnerabilty. +This can be done through either telling, what patches were integrated or showing configuration that renders the attack impossible. +It could also be provided a list of services, when the affected service is not used in that deployment. From 5bb59b40b56504355c3f358fcf30899d6838eaf6 Mon Sep 17 00:00:00 2001 From: Kurt Garloff Date: Wed, 20 Nov 2024 18:21:03 +0100 Subject: [PATCH 03/11] Stabilize 0123 and mention need for S3 endpoints being documented. (#828) * Stabilize 0123 and mention need for S3 endpoints being documented. * Fix location of test script. It is in the main branch now ... * Adjust stabilization date, remove ceilometer API. - We have waited for today's IaaS team call for the final approval. - ceilometer does not offer an API (has been removed many years ago), so we should not list the API as supported - Remove one comma and one "the" in front of SCS. Signed-off-by: Kurt Garloff --- ...-0123-v1-mandatory-and-supported-IaaS-services.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Standards/scs-0123-v1-mandatory-and-supported-IaaS-services.md b/Standards/scs-0123-v1-mandatory-and-supported-IaaS-services.md index 1d94990bc..2f7a74326 100644 --- a/Standards/scs-0123-v1-mandatory-and-supported-IaaS-services.md +++ b/Standards/scs-0123-v1-mandatory-and-supported-IaaS-services.md @@ -1,7 +1,8 @@ --- title: Mandatory and Supported IaaS Services type: Standard -status: Draft +status: Stable +stabilized_at: 2024-11-20 track: IaaS --- @@ -40,7 +41,7 @@ The following IaaS APIs MUST be present in SCS-compliant IaaS deployments and co :::caution S3 API implementations may differ in certain offered features. -CSPs must publicly describe, which implementation they use in their deployment. +CSPs must publicly describe the endpoints of their S3 solutions and which implementations they use in their deployment. Users should always research whether a needed feature is supported in the offered implementation. ::: @@ -63,13 +64,12 @@ The following IaaS APIs MAY be present in SCS-compliant IaaS deployment, e.g. im | **object-store** | Swift | Object Store with different possible backends | | **orchestration** | Heat | Orchestration service | | **shared-file-systems** | Manila | Shared File Systems service | -| **telemetry** | Ceilometer | Telemetry service | | **time-series-database** | Gnocchi | Time Series Database service | ## Unsupported IaaS APIs All other OpenStack services, whose APIs are not mentioned in the mandatory or supported lists will not be tested for their compatibility and conformance in SCS clouds by the SCS community. -Those services MAY be integrated into IaaS deployments by a Cloud Service Provider on their own responsibility but the SCS will not assume they are present and potential issues that occur during deployment or usage have to be handled by the CSP on their own accord. +Those services MAY be integrated into IaaS deployments by a Cloud Service Provider on their own responsibility but SCS will not assume they are present and potential issues that occur during deployment or usage have to be handled by the CSP on their own accord. The SCS standard offers no guarantees for compatibility or reliability of services categorized as unsupported. ## Related Documents @@ -78,5 +78,5 @@ The SCS standard offers no guarantees for compatibility or reliability of servic ## Conformance Tests -The presence of the mandatory OpenStack APIs will be tested in [this test-script](https://github.com/SovereignCloudStack/standards/blob/mandatory-and-supported-IaaS-services/Tests/iaas/mandatory-services/mandatory-iaas-services.py). -The test will further check, whether the object store endpoint is compatible to s3. +The presence of the mandatory OpenStack APIs will be tested in [this test-script](https://github.com/SovereignCloudStack/standards/blob/main/Tests/iaas/mandatory-services/mandatory-iaas-services.py) +The test will further check whether the object-store endpoint is compatible to s3. From b70acdae52ae0c6da0f01c092c8c3cd37828dcf2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:32:38 +0100 Subject: [PATCH 04/11] Bump cross-spawn from 7.0.3 to 7.0.6 in the npm_and_yarn group (#830) Bumps the npm_and_yarn group with 1 update: [cross-spawn](https://github.com/moxystudio/node-cross-spawn). Updates `cross-spawn` from 7.0.3 to 7.0.6 - [Changelog](https://github.com/moxystudio/node-cross-spawn/blob/master/CHANGELOG.md) - [Commits](https://github.com/moxystudio/node-cross-spawn/compare/v7.0.3...v7.0.6) --- updated-dependencies: - dependency-name: cross-spawn dependency-type: indirect dependency-group: npm_and_yarn ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Kurt Garloff --- package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/package-lock.json b/package-lock.json index e20b3925b..1fcf557e6 100644 --- a/package-lock.json +++ b/package-lock.json @@ -152,9 +152,9 @@ } }, "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", From 87f4e4b7d6ceb5a689f3d99567560889ec64705f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 18:33:30 +0100 Subject: [PATCH 05/11] Bump aiohttp in /Tests in the pip group across 1 directory (#829) Bumps the pip group with 1 update in the /Tests directory: [aiohttp](https://github.com/aio-libs/aiohttp). Updates `aiohttp` from 3.10.3 to 3.10.11 - [Release notes](https://github.com/aio-libs/aiohttp/releases) - [Changelog](https://github.com/aio-libs/aiohttp/blob/master/CHANGES.rst) - [Commits](https://github.com/aio-libs/aiohttp/compare/v3.10.3...v3.10.11) --- updated-dependencies: - dependency-name: aiohttp dependency-type: direct:production dependency-group: pip ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Kurt Garloff --- Tests/requirements.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Tests/requirements.txt b/Tests/requirements.txt index 9505a7061..bf93aff83 100644 --- a/Tests/requirements.txt +++ b/Tests/requirements.txt @@ -6,7 +6,7 @@ # aiohappyeyeballs==2.3.5 # via aiohttp -aiohttp==3.10.3 +aiohttp==3.10.11 # via # -r requirements.in # kubernetes-asyncio @@ -76,6 +76,8 @@ pbr==6.0.0 # stevedore platformdirs==4.2.2 # via openstacksdk +propcache==0.2.0 + # via yarl pycparser==2.22 # via cffi python-dateutil==2.9.0.post0 @@ -109,5 +111,5 @@ urllib3==2.2.2 # via # kubernetes-asyncio # requests -yarl==1.9.4 +yarl==1.17.2 # via aiohttp From 3274fff4fe1a45e926aa7b21e78ae96c22b85a08 Mon Sep 17 00:00:00 2001 From: Markus Hentsch <129268441+markus-hentsch@users.noreply.github.com> Date: Wed, 20 Nov 2024 19:08:48 +0100 Subject: [PATCH 06/11] Replace README.md of the Drafts folder with deprecation notice (#822) * Replace README.md of the Drafts folder with deprecation notice * Improve formatting of depreaction note. Also try to appease markdownlint ... * More markdownlint: trailing space & relative links. Signed-off-by: Markus Hentsch <129268441+markus-hentsch@users.noreply.github.com> Signed-off-by: Kurt Garloff Co-authored-by: Kurt Garloff --- Drafts/README.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Drafts/README.md b/Drafts/README.md index f4ee47aae..e3b903091 100644 --- a/Drafts/README.md +++ b/Drafts/README.md @@ -1,5 +1,11 @@ -# Design-Docs +# Drafts Archive -Design Documents, Architecture etc. for SCS and related technology +## Deprecation Notice -Here we collect docs that cover overarching SCS topics or topics that otherwise do not belong to an existing repository. +> [!CAUTION] +> Please do not create new files in this folder! + +The contents of this folder are for archival purposes only. New drafts belong +in the [`../Standards/`](https://github.com/SovereignCloudStack/standards/tree/main/Standards) +folder instead and adhere to the lifecycle described in +[scs-0001-v1-sovereign-cloud-standards](https://github.com/SovereignCloudStack/standards/blob/main/Standards/scs-0001-v1-sovereign-cloud-standards.md). From f2d6e3c9463b399051ea87eba767f7bf8522700c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matthias=20B=C3=BCchse?= Date: Wed, 20 Nov 2024 21:30:24 +0000 Subject: [PATCH 07/11] Revert any non-editorial changes to scs-0214-v1 that happened after stabilization (#834) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Matthias Büchse --- .../scs-0214-v1-k8s-node-distribution.md | 36 ------------------- 1 file changed, 36 deletions(-) diff --git a/Standards/scs-0214-v1-k8s-node-distribution.md b/Standards/scs-0214-v1-k8s-node-distribution.md index ffec30efc..ce70e605e 100644 --- a/Standards/scs-0214-v1-k8s-node-distribution.md +++ b/Standards/scs-0214-v1-k8s-node-distribution.md @@ -80,42 +80,6 @@ If the standard is used by a provider, the following decisions are binding and v can also be scaled vertically first before scaling horizontally. - Worker node distribution MUST be indicated to the user through some kind of labeling in order to enable (anti)-affinity for workloads over "failure zones". -- To provide metadata about the node distribution, which also enables testing of this standard, - providers MUST label their K8s nodes with the labels listed below. - - `topology.kubernetes.io/zone` - - Corresponds with the label described in [K8s labels documentation][k8s-labels-docs]. - It provides a logical zone of failure on the side of the provider, e.g. a server rack - in the same electrical circuit or multiple machines bound to the internet through a - singular network structure. How this is defined exactly is up to the plans of the provider. - The field gets autopopulated most of the time by either the kubelet or external mechanisms - like the cloud controller. - - - `topology.kubernetes.io/region` - - Corresponds with the label described in [K8s labels documentation][k8s-labels-docs]. - It describes the combination of one or more failure zones into a region or domain, therefore - showing a larger entity of logical failure zone. An example for this could be a building - containing racks that are put into such a zone, since they're all prone to failure, if e.g. - the power for the building is cut. How this is defined exactly is also up to the provider. - The field gets autopopulated most of the time by either the kubelet or external mechanisms - like the cloud controller. - - - `topology.scs.community/host-id` - - This is an SCS-specific label; it MUST contain the hostID of the physical machine running - the hypervisor (NOT: the hostID of a virtual machine). Here, the hostID is an arbitrary identifier, - which need not contain the actual hostname, but it should nonetheless be unique to the host. - This helps identify the distribution over underlying physical machines, - which would be masked if VM hostIDs were used. - -## Conformance Tests - -The script `k8s-node-distribution-check.py` checks the nodes available with a user-provided -kubeconfig file. It then determines based on the labels `kubernetes.io/hostname`, `topology.kubernetes.io/zone`, -`topology.kubernetes.io/region` and `node-role.kubernetes.io/control-plane`, if a distribution -of the available nodes is present. If this isn't the case, the script produces an error. -If also produces warnings and informational outputs, if e.g. labels don't seem to be set. [k8s-ha]: https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/ [k8s-large-clusters]: https://kubernetes.io/docs/setup/best-practices/cluster-large/ From 933c2357f250be7c5f1656b461802c5ae7d42981 Mon Sep 17 00:00:00 2001 From: josephineSei <128813814+josephineSei@users.noreply.github.com> Date: Thu, 21 Nov 2024 09:47:12 +0100 Subject: [PATCH 08/11] Update number for security iaa s standard (#836) * Rename scs-XXXX-v1-security-of-iaas-service-software.md to scs-0124-v1-security-of-iaas-service-software.md Signed-off-by: josephineSei <128813814+josephineSei@users.noreply.github.com> * Update and rename scs-XXXX-w1-security-of-iaas-service-software.md to scs-0124-w1-security-of-iaas-service-software.md Signed-off-by: josephineSei <128813814+josephineSei@users.noreply.github.com> --------- Signed-off-by: josephineSei <128813814+josephineSei@users.noreply.github.com> --- ...ware.md => scs-0124-v1-security-of-iaas-service-software.md} | 0 ...ware.md => scs-0124-w1-security-of-iaas-service-software.md} | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) rename Standards/{scs-XXXX-v1-security-of-iaas-service-software.md => scs-0124-v1-security-of-iaas-service-software.md} (100%) rename Standards/{scs-XXXX-w1-security-of-iaas-service-software.md => scs-0124-w1-security-of-iaas-service-software.md} (98%) diff --git a/Standards/scs-XXXX-v1-security-of-iaas-service-software.md b/Standards/scs-0124-v1-security-of-iaas-service-software.md similarity index 100% rename from Standards/scs-XXXX-v1-security-of-iaas-service-software.md rename to Standards/scs-0124-v1-security-of-iaas-service-software.md diff --git a/Standards/scs-XXXX-w1-security-of-iaas-service-software.md b/Standards/scs-0124-w1-security-of-iaas-service-software.md similarity index 98% rename from Standards/scs-XXXX-w1-security-of-iaas-service-software.md rename to Standards/scs-0124-w1-security-of-iaas-service-software.md index 3f0b1df8c..6cc7233ee 100644 --- a/Standards/scs-XXXX-w1-security-of-iaas-service-software.md +++ b/Standards/scs-0124-w1-security-of-iaas-service-software.md @@ -4,7 +4,7 @@ type: Supplement track: IaaS status: Draft supplements: - - scs-XXXX-v1-security-of-iaas-service-software.md + - scs-0124-v1-security-of-iaas-service-software.md --- ## Testing or Detecting security updates in software From 3b54ca2765e42ab54b08de196d576cff91d289a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matthias=20B=C3=BCchse?= Date: Thu, 21 Nov 2024 10:16:24 +0000 Subject: [PATCH 09/11] Stabilize scs-0219-v1 (#833) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Matthias Büchse Co-authored-by: Kurt Garloff --- Standards/scs-0219-v1-kaas-networking.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Standards/scs-0219-v1-kaas-networking.md b/Standards/scs-0219-v1-kaas-networking.md index 8f35f7925..8c58d5c7a 100644 --- a/Standards/scs-0219-v1-kaas-networking.md +++ b/Standards/scs-0219-v1-kaas-networking.md @@ -1,7 +1,8 @@ --- title: KaaS Networking Standard type: Standard -status: Draft +status: Stable +stabilized_at: 2024-11-21 track: KaaS --- From 7a2662a2ec21cbe95a402af017207e5a555f3eda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matthias=20B=C3=BCchse?= Date: Thu, 21 Nov 2024 12:32:22 +0000 Subject: [PATCH 10/11] Feat/sonobuoy integration (#832) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Matthias Büchse Signed-off-by: Toni Finger Co-authored-by: Toni Finger --- Tests/kaas/{plugin => }/README.md | 4 +- Tests/kaas/{plugin => }/requirements.in | 1 + Tests/kaas/{plugin => }/requirements.txt | 2 + Tests/kaas/sonobuoy_handler/run_sonobuoy.py | 26 ++++ .../kaas/sonobuoy_handler/sonobuoy_handler.py | 133 ++++++++++++++++++ Tests/scs-compatible-kaas.yaml | 14 ++ 6 files changed, 178 insertions(+), 2 deletions(-) rename Tests/kaas/{plugin => }/README.md (89%) rename Tests/kaas/{plugin => }/requirements.in (65%) rename Tests/kaas/{plugin => }/requirements.txt (95%) create mode 100755 Tests/kaas/sonobuoy_handler/run_sonobuoy.py create mode 100644 Tests/kaas/sonobuoy_handler/sonobuoy_handler.py diff --git a/Tests/kaas/plugin/README.md b/Tests/kaas/README.md similarity index 89% rename from Tests/kaas/plugin/README.md rename to Tests/kaas/README.md index e54cf1864..16697d3fd 100644 --- a/Tests/kaas/plugin/README.md +++ b/Tests/kaas/README.md @@ -1,4 +1,4 @@ -# Plugin for provisioning k8s clusters and performing conformance tests on these clusters +# Test suite for SCS-compatible KaaS ## Development environment @@ -6,6 +6,7 @@ * [docker](https://docs.docker.com/engine/install/) * [kind](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) +* [sonobuoy](https://sonobuoy.io/docs/v0.57.1/#installation) ### setup for development @@ -19,7 +20,6 @@ (venv) curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10 (venv) python3.10 -m pip install --upgrade pip (venv) python3.10 -m pip --version - ``` 2. Install dependencies: diff --git a/Tests/kaas/plugin/requirements.in b/Tests/kaas/requirements.in similarity index 65% rename from Tests/kaas/plugin/requirements.in rename to Tests/kaas/requirements.in index 0a60c3c3c..640831e54 100644 --- a/Tests/kaas/plugin/requirements.in +++ b/Tests/kaas/requirements.in @@ -1,2 +1,3 @@ pytest-kind kubernetes +junitparser diff --git a/Tests/kaas/plugin/requirements.txt b/Tests/kaas/requirements.txt similarity index 95% rename from Tests/kaas/plugin/requirements.txt rename to Tests/kaas/requirements.txt index a04a03167..c36ca21d1 100644 --- a/Tests/kaas/plugin/requirements.txt +++ b/Tests/kaas/requirements.txt @@ -16,6 +16,8 @@ google-auth==2.34.0 # via kubernetes idna==3.8 # via requests +junitparser==3.2.0 + # via -r requirements.in kubernetes==30.1.0 # via -r requirements.in oauthlib==3.2.2 diff --git a/Tests/kaas/sonobuoy_handler/run_sonobuoy.py b/Tests/kaas/sonobuoy_handler/run_sonobuoy.py new file mode 100755 index 000000000..50ef4249c --- /dev/null +++ b/Tests/kaas/sonobuoy_handler/run_sonobuoy.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# vim: set ts=4 sw=4 et: +# +import logging +import sys + +import click + +from sonobuoy_handler import SonobuoyHandler + +logger = logging.getLogger(__name__) + + +@click.command() +@click.option("-k", "--kubeconfig", "kubeconfig", required=True, type=click.Path(exists=True), help="path/to/kubeconfig_file.yaml",) +@click.option("-r", "--result_dir_name", "result_dir_name", type=str, default="sonobuoy_results", help="directory name to store results at",) +@click.option("-c", "--check", "check_name", type=str, default="sonobuoy_executor", help="this MUST be the same name as the id in 'scs-compatible-kaas.yaml'",) +@click.option("-a", "--arg", "args", multiple=True) +def sonobuoy_run(kubeconfig, result_dir_name, check_name, args): + sonobuoy_handler = SonobuoyHandler(check_name, kubeconfig, result_dir_name, args) + sys.exit(sonobuoy_handler.run()) + + +if __name__ == "__main__": + logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG) + sonobuoy_run() diff --git a/Tests/kaas/sonobuoy_handler/sonobuoy_handler.py b/Tests/kaas/sonobuoy_handler/sonobuoy_handler.py new file mode 100644 index 000000000..65593a411 --- /dev/null +++ b/Tests/kaas/sonobuoy_handler/sonobuoy_handler.py @@ -0,0 +1,133 @@ +from collections import Counter +import json +import logging +import os +import shlex +import shutil +import subprocess + +from junitparser import JUnitXml + +logger = logging.getLogger(__name__) + + +class SonobuoyHandler: + """ + A class that handles both the execution of sonobuoy and + the generation of the results for a test report + """ + + kubeconfig_path = None + working_directory = None + + def __init__( + self, + check_name="sonobuoy_handler", + kubeconfig=None, + result_dir_name="sonobuoy_results", + args=(), + ): + self.check_name = check_name + logger.debug(f"kubeconfig: {kubeconfig} ") + if kubeconfig is None: + raise RuntimeError("No kubeconfig provided") + self.kubeconfig_path = kubeconfig + self.working_directory = os.getcwd() + self.result_dir_name = result_dir_name + self.sonobuoy = shutil.which('sonobuoy') + logger.debug(f"working from {self.working_directory}") + logger.debug(f"placing results at {self.result_dir_name}") + logger.debug(f"sonobuoy executable at {self.sonobuoy}") + self.args = (arg0 for arg in args for arg0 in shlex.split(str(arg))) + + def _invoke_sonobuoy(self, *args, **kwargs): + inv_args = (self.sonobuoy, "--kubeconfig", self.kubeconfig_path) + args + logger.debug(f'invoking {" ".join(inv_args)}') + return subprocess.run(args=inv_args, capture_output=True, check=True, **kwargs) + + def _sonobuoy_run(self): + self._invoke_sonobuoy("run", "--wait", *self.args) + + def _sonobuoy_delete(self): + self._invoke_sonobuoy("delete", "--wait") + + def _sonobuoy_status_result(self): + process = self._invoke_sonobuoy("status", "--json") + json_data = json.loads(process.stdout) + counter = Counter() + for entry in json_data["plugins"]: + logger.debug(f"plugin:{entry['plugin']}:{entry['result-status']}") + for result, count in entry["result-counts"].items(): + counter[result] += count + return counter + + def _eval_result(self, counter): + """evaluate test results and return return code""" + result_str = ', '.join(f"{counter[key]} {key}" for key in ('passed', 'failed', 'skipped')) + result_message = f"sonobuoy reports {result_str}" + if counter['failed']: + logger.error(result_message) + return 3 + logger.info(result_message) + return 0 + + def _preflight_check(self): + """ + Preflight test to ensure that everything is set up correctly for execution + """ + if not self.sonobuoy: + raise RuntimeError("sonobuoy executable not found; is it in PATH?") + + def _sonobuoy_retrieve_result(self): + """ + This method invokes sonobuoy to store the results in a subdirectory of + the working directory. The Junit results file contained in it is then + analyzed in order to interpret the relevant information it containes + """ + logger.debug(f"retrieving results to {self.result_dir_name}") + result_dir = os.path.join(self.working_directory, self.result_dir_name) + if os.path.exists(result_dir): + raise Exception("result directory already existing") + os.mkdir(result_dir) + + # XXX use self._invoke_sonobuoy + os.system( + # ~ f"sonobuoy retrieve {result_dir} -x --filename='{result_dir}' --kubeconfig='{self.kubeconfig_path}'" + f"sonobuoy retrieve {result_dir} --kubeconfig='{self.kubeconfig_path}'" + ) + logger.debug( + f"parsing JUnit result from {result_dir + '/plugins/e2e/results/global/junit_01.xml'} " + ) + xml = JUnitXml.fromfile(result_dir + "/plugins/e2e/results/global/junit_01.xml") + counter = Counter() + for suite in xml: + for case in suite: + if case.is_passed is True: # XXX why `is True`??? + counter['passed'] += 1 + elif case.is_skipped is True: + counter['skipped'] += 1 + else: + counter['failed'] += 1 + logger.error(f"{case.name}") + return counter + + def run(self): + """ + This method is to be called to run the plugin + """ + logger.info(f"running sonobuoy for testcase {self.check_name}") + self._preflight_check() + try: + self._sonobuoy_run() + return_code = self._eval_result(self._sonobuoy_status_result()) + print(self.check_name + ": " + ("PASS", "FAIL")[min(1, return_code)]) + return return_code + + # ERROR: currently disabled due to: "error retrieving results: unexpected EOF" + # might be related to following bug: https://github.com/vmware-tanzu/sonobuoy/issues/1633 + # self._sonobuoy_retrieve_result(self) + except BaseException: + logger.exception("something went wrong") + return 112 + finally: + self._sonobuoy_delete() diff --git a/Tests/scs-compatible-kaas.yaml b/Tests/scs-compatible-kaas.yaml index a4010c64e..7cb2fbd58 100644 --- a/Tests/scs-compatible-kaas.yaml +++ b/Tests/scs-compatible-kaas.yaml @@ -9,6 +9,10 @@ modules: - id: cncf-k8s-conformance name: CNCF Kubernetes conformance url: https://github.com/cncf/k8s-conformance/tree/master + run: + - executable: ./kaas/sonobuoy_handler/run_sonobuoy.py + args: -k {subject_root}/kubeconfig.yaml -r {subject_root}/sono-results -c 'cncf-k8s-conformance' -a '--mode=certified-conformance' + #~ args: -k {subject_root}/kubeconfig.yaml -r {subject_root}/sono-results -c 'cncf-k8s-conformance' -a '--plugin-env e2e.E2E_DRYRUN=true' testcases: - id: cncf-k8s-conformance tags: [mandatory] @@ -30,6 +34,15 @@ modules: testcases: - id: node-distribution-check tags: [mandatory] + - id: scs-0219-v1 + name: KaaS networking + url: https://docs.scs.community/standards/scs-0219-v1-kaas-networking + run: + - executable: ./kaas/sonobuoy_handler/run_sonobuoy.py + args: -k {subject_root}/kubeconfig.yaml -r {subject_root}/sono-results -c 'kaas-networking-check' -a '--e2e-focus "NetworkPolicy"' + testcases: + - id: kaas-networking-check + tags: [mandatory] timeline: - date: 2024-02-28 versions: @@ -40,5 +53,6 @@ versions: - cncf-k8s-conformance - scs-0210-v2 - scs-0214-v2 + - scs-0219-v1 targets: main: mandatory From ebfaa1af4fdb1d7a3dbeb3c88261777db2bdb664 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Matthias=20B=C3=BCchse?= Date: Thu, 21 Nov 2024 13:55:49 +0000 Subject: [PATCH 11/11] Stabilize SCS-compatible IaaS v5 (#824) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Stabilize SCS-compatible IaaS v5 make it effective from 2024-11-16 and turn v4 into warn from 2025-01-01 * Include scs-0123-v1 and adapt test script accordingly * move manual docs check to target preview for the time being * Make it effective today, not in the past. Signed-off-by: Matthias Büchse Signed-off-by: Kurt Garloff Co-authored-by: Kurt Garloff --- .../mandatory-iaas-services.py | 122 +++++++----------- Tests/requirements.in | 5 +- Tests/requirements.txt | 21 ++- Tests/scs-compatible-iaas.yaml | 28 +++- 4 files changed, 95 insertions(+), 81 deletions(-) mode change 100644 => 100755 Tests/iaas/mandatory-services/mandatory-iaas-services.py diff --git a/Tests/iaas/mandatory-services/mandatory-iaas-services.py b/Tests/iaas/mandatory-services/mandatory-iaas-services.py old mode 100644 new mode 100755 index ab5cc0a2f..41d67960c --- a/Tests/iaas/mandatory-services/mandatory-iaas-services.py +++ b/Tests/iaas/mandatory-services/mandatory-iaas-services.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """Mandatory APIs checker This script retrieves the endpoint catalog from Keystone using the OpenStack SDK and checks whether all mandatory APi endpoints, are present. @@ -26,28 +27,8 @@ block_storage_service = ["volume", "volumev3", "block-storage"] -def connect(cloud_name: str) -> openstack.connection.Connection: - """Create a connection to an OpenStack cloud - :param string cloud_name: - The name of the configuration to load from clouds.yaml. - :returns: openstack.connnection.Connection - """ - return openstack.connect( - cloud=cloud_name, - ) - - -def check_presence_of_mandatory_services(cloud_name: str, s3_credentials=None): - try: - connection = connect(cloud_name) - services = connection.service_catalog - except Exception as e: - print(str(e)) - raise Exception( - f"Connection to cloud '{cloud_name}' was not successfully. " - f"The Catalog endpoint could not be accessed. " - f"Please check your cloud connection and authorization." - ) +def check_presence_of_mandatory_services(conn: openstack.connection.Connection, s3_credentials=None): + services = conn.service_catalog if s3_credentials: mandatory_services.remove("object-store") @@ -55,25 +36,21 @@ def check_presence_of_mandatory_services(cloud_name: str, s3_credentials=None): svc_type = svc['type'] if svc_type in mandatory_services: mandatory_services.remove(svc_type) - continue - if svc_type in block_storage_service: + elif svc_type in block_storage_service: block_storage_service.remove(svc_type) bs_service_not_present = 0 if len(block_storage_service) == 3: # neither block-storage nor volume nor volumev3 is present # we must assume, that there is no volume service - logger.error("FAIL: No block-storage (volume) endpoint found.") + logger.error("No block-storage (volume) endpoint found.") mandatory_services.append(block_storage_service[0]) bs_service_not_present = 1 - if not mandatory_services: - # every mandatory service API had an endpoint - return 0 + bs_service_not_present - else: - # there were multiple mandatory APIs not found - logger.error(f"FAIL: The following endpoints are missing: " - f"{mandatory_services}") - return len(mandatory_services) + bs_service_not_present + if mandatory_services: + # some mandatory APIs were not found + logger.error(f"The following endpoints are missing: " + f"{', '.join(mandatory_services)}.") + return len(mandatory_services) + bs_service_not_present def list_containers(conn): @@ -167,8 +144,8 @@ def s3_from_ostack(creds, conn, endpoint): # pass -def check_for_s3_and_swift(cloud_name: str, s3_credentials=None): - # If we get credentials we assume, that there is no Swift and only test s3 +def check_for_s3_and_swift(conn: openstack.connection.Connection, s3_credentials=None): + # If we get credentials, we assume that there is no Swift and only test s3 if s3_credentials: try: s3 = s3_conn(s3_credentials) @@ -183,58 +160,46 @@ def check_for_s3_and_swift(cloud_name: str, s3_credentials=None): if s3_buckets == [TESTCONTNAME]: del_bucket(s3, TESTCONTNAME) # everything worked, and we don't need to test for Swift: - print("SUCCESS: S3 exists") + logger.info("SUCCESS: S3 exists") return 0 # there were no credentials given, so we assume s3 is accessable via # the service catalog and Swift might exist too - try: - connection = connect(cloud_name) - connection.authorize() - except Exception as e: - print(str(e)) - raise Exception( - f"Connection to cloud '{cloud_name}' was not successfully. " - f"The Catalog endpoint could not be accessed. " - f"Please check your cloud connection and authorization." - ) s3_creds = {} try: - endpoint = connection.object_store.get_endpoint() - except Exception as e: - logger.error( - f"FAIL: No object store endpoint found in cloud " - f"'{cloud_name}'. No testing for the s3 service possible. " - f"Details: %s", e + endpoint = conn.object_store.get_endpoint() + except Exception: + logger.exception( + "No object store endpoint found. No testing for the s3 service possible." ) return 1 # Get S3 endpoint (swift) and ec2 creds from OpenStack (keystone) - s3_from_ostack(s3_creds, connection, endpoint) + s3_from_ostack(s3_creds, conn, endpoint) # Overrides (var names are from libs3, in case you wonder) s3_from_env(s3_creds, "HOST", "S3_HOSTNAME", "https://") s3_from_env(s3_creds, "AK", "S3_ACCESS_KEY_ID") s3_from_env(s3_creds, "SK", "S3_SECRET_ACCESS_KEY") - s3 = s3_conn(s3_creds, connection) + s3 = s3_conn(s3_creds, conn) s3_buckets = list_s3_buckets(s3) if not s3_buckets: s3_buckets = create_bucket(s3, TESTCONTNAME) assert s3_buckets # If we got till here, s3 is working, now swift - swift_containers = list_containers(connection) + swift_containers = list_containers(conn) # if not swift_containers: - # swift_containers = create_container(connection, TESTCONTNAME) + # swift_containers = create_container(conn, TESTCONTNAME) result = 0 if Counter(s3_buckets) != Counter(swift_containers): - print("WARNING: S3 buckets and Swift Containers differ:\n" - f"S3: {sorted(s3_buckets)}\nSW: {sorted(swift_containers)}") + logger.warning("S3 buckets and Swift Containers differ:\n" + f"S3: {sorted(s3_buckets)}\nSW: {sorted(swift_containers)}") result = 1 else: - print("SUCCESS: S3 and Swift exist and agree") + logger.info("SUCCESS: S3 and Swift exist and agree") # Clean up # FIXME: Cleanup created EC2 credential # if swift_containers == [TESTCONTNAME]: - # del_container(connection, TESTCONTNAME) + # del_container(conn, TESTCONTNAME) # Cleanup created S3 bucket if s3_buckets == [TESTCONTNAME]: del_bucket(s3, TESTCONTNAME) @@ -266,34 +231,47 @@ def main(): help="Enable OpenStack SDK debug logging" ) args = parser.parse_args() + logging.basicConfig( + format="%(levelname)s: %(message)s", + level=logging.DEBUG if args.debug else logging.INFO, + ) openstack.enable_logging(debug=args.debug) # parse cloud name for lookup in clouds.yaml - cloud = os.environ.get("OS_CLOUD", None) - if args.os_cloud: - cloud = args.os_cloud - assert cloud, ( - "You need to have the OS_CLOUD environment variable set to your cloud " - "name or pass it via --os-cloud" - ) + cloud = args.os_cloud or os.environ.get("OS_CLOUD", None) + if not cloud: + raise RuntimeError( + "You need to have the OS_CLOUD environment variable set to your " + "cloud name or pass it via --os-cloud" + ) s3_credentials = None if args.s3_endpoint: if (not args.s3_access) or (not args.s3_access_secret): - print("WARNING: test for external s3 needs access key and access secret.") + logger.warning("test for external s3 needs access key and access secret.") s3_credentials = { "AK": args.s3_access, "SK": args.s3_access_secret, "HOST": args.s3_endpoint } elif args.s3_access or args.s3_access_secret: - print("WARNING: access to s3 was given, but no endpoint provided.") + logger.warning("access to s3 was given, but no endpoint provided.") - result = check_presence_of_mandatory_services(cloud, s3_credentials) - result = result + check_for_s3_and_swift(cloud, s3_credentials) + with openstack.connect(cloud) as conn: + result = check_presence_of_mandatory_services(conn, s3_credentials) + result += check_for_s3_and_swift(conn, s3_credentials) + + print('service-apis-check: ' + ('PASS', 'FAIL')[min(1, result)]) return result if __name__ == "__main__": - main() + try: + sys.exit(main()) + except SystemExit: + raise + except BaseException as exc: + logging.debug("traceback", exc_info=True) + logging.critical(str(exc)) + sys.exit(1) diff --git a/Tests/requirements.in b/Tests/requirements.in index e2113c5b7..cb3c3bac0 100644 --- a/Tests/requirements.in +++ b/Tests/requirements.in @@ -1,8 +1,9 @@ aiohttp +boto3 # TODO: move into iaas/requirements.in click -kubernetes_asyncio +kubernetes_asyncio # TODO: move into kaas/requirements.in python-dateutil PyYAML -openstacksdk +openstacksdk # TODO: move into iaas/requirements.in requests tomli diff --git a/Tests/requirements.txt b/Tests/requirements.txt index bf93aff83..19f9e1990 100644 --- a/Tests/requirements.txt +++ b/Tests/requirements.txt @@ -1,5 +1,5 @@ # -# This file is autogenerated by pip-compile with Python 3.10 +# This file is autogenerated by pip-compile with Python 3.12 # by the following command: # # pip-compile requirements.in @@ -12,10 +12,14 @@ aiohttp==3.10.11 # kubernetes-asyncio aiosignal==1.3.1 # via aiohttp -async-timeout==4.0.3 - # via aiohttp attrs==24.2.0 # via aiohttp +boto3==1.35.65 + # via -r requirements.in +botocore==1.35.65 + # via + # boto3 + # s3transfer certifi==2024.7.4 # via # kubernetes-asyncio @@ -47,7 +51,10 @@ iso8601==2.1.0 # keystoneauth1 # openstacksdk jmespath==1.0.1 - # via openstacksdk + # via + # boto3 + # botocore + # openstacksdk jsonpatch==1.33 # via openstacksdk jsonpointer==3.0.0 @@ -83,6 +90,7 @@ pycparser==2.22 python-dateutil==2.9.0.post0 # via # -r requirements.in + # botocore # kubernetes-asyncio pyyaml==6.0.2 # via @@ -95,6 +103,8 @@ requests==2.32.3 # keystoneauth1 requestsexceptions==1.4.0 # via openstacksdk +s3transfer==0.10.3 + # via boto3 six==1.16.0 # via # kubernetes-asyncio @@ -105,10 +115,9 @@ stevedore==5.2.0 # keystoneauth1 tomli==2.0.1 # via -r requirements.in -typing-extensions==4.12.2 - # via dogpile-cache urllib3==2.2.2 # via + # botocore # kubernetes-asyncio # requests yarl==1.17.2 diff --git a/Tests/scs-compatible-iaas.yaml b/Tests/scs-compatible-iaas.yaml index 5ad119fbf..2d9b8b192 100644 --- a/Tests/scs-compatible-iaas.yaml +++ b/Tests/scs-compatible-iaas.yaml @@ -206,6 +206,21 @@ modules: tags: [availability-zones] description: > Note: manual check! Must fulfill all requirements of + - id: scs-0123-v1 + name: Mandatory and Supported IaaS Services + url: https://docs.scs.community/standards/scs-0123-v1-mandatory-and-supported-IaaS-services + run: + - executable: ./iaas/mandatory-services/mandatory-iaas-services.py + args: --os-cloud {os_cloud} --debug + testcases: + - id: service-apis-check + tags: [mandatory] + description: > + Must fulfill all requirements of (except for documentation requirements, which are tested manually with service-apis-docs-check). + - id: service-apis-docs-check + tags: [service-apis-docs] + description: > + Note: manual check! Must fulfill documentation requirements of . - id: scs-0302-v1 name: Domain Manager Role url: https://docs.scs.community/standards/scs-0302-v1-domain-manager-role @@ -218,6 +233,16 @@ modules: description: > Note: manual check! Must fulfill all requirements of timeline: + - date: 2025-01-01 + versions: + v5: effective + v4: warn + v3: deprecated + - date: 2024-11-21 + versions: + v5: effective + v4: effective + v3: deprecated - date: 2024-11-08 versions: v5: draft @@ -261,6 +286,7 @@ timeline: v1: effective versions: - version: v5 + stabilized_at: 2024-11-14 include: - opc-v2022.11 - scs-0100-v3.1 @@ -278,7 +304,7 @@ versions: - scs-0302-v1 targets: main: mandatory - preview: domain-manager/availability-zones + preview: domain-manager/availability-zones/service-apis-docs - version: v4 stabilized_at: 2024-02-28 include: