diff --git a/.github/scs-compliance-check/openstack/clouds.yaml b/.github/scs-compliance-check/openstack/clouds.yaml index 325f6e33b..63a2b9805 100644 --- a/.github/scs-compliance-check/openstack/clouds.yaml +++ b/.github/scs-compliance-check/openstack/clouds.yaml @@ -89,6 +89,14 @@ clouds: auth: auth_url: https://identity.l1a.cloudandheat.com/v3 application_credential_id: "7ab4e3339ea04255bc131868974cfe63" + scaleup-occ2: + auth_type: v3applicationcredential + auth: + auth_url: https://keystone.occ2.scaleup.cloud + application_credential_id: "5d2eea4e8bf8448092490b4190d4430a" + region_name: "RegionOne" + interface: "public" + identity_api_version: 3 syseleven-dus2: interface: public identity_api_verion: 3 diff --git a/.github/workflows/check-scaleup-occ2-v4.yml b/.github/workflows/check-scaleup-occ2-v4.yml new file mode 100644 index 000000000..b5bf70a2d --- /dev/null +++ b/.github/workflows/check-scaleup-occ2-v4.yml @@ -0,0 +1,23 @@ +name: "Compliance IaaS v4 of scaleup-occ2" + +on: + # Trigger compliance check every day at 4:30 UTC + schedule: + - cron: '30 4 * * *' + # Trigger compliance check after Docker image has been built + workflow_run: + workflows: [Build and publish scs-compliance-check Docker image] + types: + - completed + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + check-scaleup-occ2: + uses: ./.github/workflows/scs-compliance-check-with-application-credential.yml + with: + version: v4 + layer: iaas + cloud: scaleup-occ2 + secret_name: OS_PASSWORD_SCALEUP_OCC2 + secrets: inherit diff --git a/.gitignore b/.gitignore index 4595d59ea..2b83a0983 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,7 @@ **/__pycache__/ .venv/ .idea +.sandbox .DS_Store node_modules Tests/kaas/results/ diff --git a/.zuul.d/secure.yaml b/.zuul.d/secure.yaml index 318a1b6b0..78dbb906f 100644 --- a/.zuul.d/secure.yaml +++ b/.zuul.d/secure.yaml @@ -233,6 +233,28 @@ VCsXjf0qBBMrzz6HP9z95Bk44fiJ3L/LkA3Iij961dYrQXbZKDrKOiX/QPwrcSrVmjmew UbPexJFHgvTCqjadoLejSt9cUd9lVzhuzLJ8CS+CcCMbZOno6qathrd2B88riQaPNIGNu gfkNT9R63ZzKB1qIA2n5RZi7SH9DPIUd0AwLMn2bhp3uok5pNAPP/4/1RkQiCA= + scaleup_occ2_ac_id: !encrypted/pkcs1-oaep + - N2duwkcMdOXw6wF0deE/0BPM1M/URt3eWmrnBJ89VHeCDENGfTfDHcWPYs3wW4rSRCG6t + gqgNuA049OvOhL7rtjNHZ6yIj6xEHH/YdqT4UxjXPS9GFwoJXDtE8rIGjK3KU8GfUgKnG + DLplyyzGzx5j39rJAS628InmC56aip47rO1J4HQE9Ku25Wb06R7ykx+0ZOWr0HXjV/VsV + uwfyL+DPgewbL+4u8/XkcI0FwAM9/KkF/CcYUq5aVMdQS2foatTQW0C2idg+pffSTRaau + VF44rkVfzsCOz4MYAFpLIaL9Zxx1FifaPOd0oi6rEFjGd6vFtFCHk1BRpKmOITLyx3Te5 + zVffSkQAsqpn/4er8800bjQzxXvqmQmR0QwPM7dhvRnrNbTSCA/Awm5BPaUgeCZFN3MPN + Mc0XIaEwjuJvDK6fqj5tJrVIs5bxAmqRDj8d76AlJcOdDxHicTHgR3aUG4AKOWkUsskgQ + 3xR8lPh31O/HgzG9tq6o/DCPA1O9wyyOyT7KwJAaRASPCA1O80ZAzhZUNUVyut6dYEwaS + QXP4IaEJOxP8EkxR7FDEuO99UFZ7TXQ1CF7ots4wIs5tEpQvcdLnvBjJckp0fNBFTuGMm + FCvhgBK30NC93U4DxQv6xZBhqtvHYjHcTOXvz2fryRJT2teMN+eI+RDdV1Jj8Y= + scaleup_occ2_ac_secret: !encrypted/pkcs1-oaep + - LfUHhslK41JDp3CpslWGGA4bZ3udZh4KnytcXohkdbchb8QVt8eNc4nD0ti0/XS18YKwq + DlHOWw2rDJZ8RGIXENVUYzDbECoBErE8IAqQE0q3oS/8Oq0NYOFTGvvlKuue7U4s87Pwi + YFi+Q0Rv7vO8cWFVtbRHK+Hw6pC42Biq2T+tuVBCLqylIMViXpuEy9UpFLEv59zr6EHa9 + uB3xkjnpWuabe7vrG+LQHc0pJ5tNhcLiOnJggU5Ef02FBy+t6xvuJW8f6cXCnRRj1q0fl + D/vTmC7avwHnWC+J4WLL69HCwW05I7iHftVSWOXQgRzMBd4D4ND2OXfsWElu0eOV5XG6X + JsQH8lDnVN/lqaDAOYR4fk4+9yt3RURwvNL5FUnDK1t7LAI4X0gcvLrQAfzgOlpBYDXSK + 0kbUzqwivuw1v2zO/gxQU+J28PsOfZaKf/7ZZyj3e/tiq4wBpvPb0mVBwWXigKqzr+QED + Iy2u/g3x2qdcTpXR/RPq+xiXM2B2rw1V5gdkscdL+avXtTF7hT9HrcayHx3HDZ/h6aGPD + RWIJ8bstl+x2Q4zExgR13amWM8ZR1iLGCN20U/ZAaqANCqjDbrSVSTjTPzYtNFwAXwxkB + 3NHhPDHZ1MIdr6IJE4IZ4TCMsIeTA2UHNfF4RCzeDSIJ+CXOQxUFWOxZkf97WY= syseleven_dus2_ac_id: !encrypted/pkcs1-oaep - SjwtIvJO7DkLJDmS+T/Z5utFBa22hmPRBd8mzonJHGgURB2W7fmXFreD9NPrLfbt7ujKi KNqJm8k1Vr1F3Mu+Osr0BWSnq5makwVt2ikBY4qPbL8iyVXsByaT/HNPLCOokqy+REpfu diff --git a/Standards/scs-0102-v1-image-metadata.md b/Standards/scs-0102-v1-image-metadata.md index 8b0ab98ba..18d42adf7 100644 --- a/Standards/scs-0102-v1-image-metadata.md +++ b/Standards/scs-0102-v1-image-metadata.md @@ -1,5 +1,5 @@ --- -title: SCS Image Metadata Standard +title: SCS Image Metadata type: Standard stabilized_at: 2022-10-31 status: Stable diff --git a/Standards/scs-0115-v1-default-rules-for-security-groups.md b/Standards/scs-0115-v1-default-rules-for-security-groups.md index b118dcf1f..57e28cd16 100644 --- a/Standards/scs-0115-v1-default-rules-for-security-groups.md +++ b/Standards/scs-0115-v1-default-rules-for-security-groups.md @@ -25,7 +25,7 @@ Administrator (abbr. Admin) ### Default Security Groups, Custom Security Groups and default Security Group Rules -To properly understand the concepts in this standard and avoid ambiguity, is very important to distinguish between the following similar-sounding but different resources in the OpenStack Networking API: +To properly understand the concepts in this standard and avoid ambiguity, it is very important to distinguish between the following similar-sounding but different resources in the OpenStack Networking API: 1. default Security Group 2. custom Security Group @@ -59,10 +59,10 @@ Therefore, this standard proposes default Security Group rules that MUST be set ## Design Considerations -Up to the 2023.1 release (antelope) the default Security Group rules are hardcoded in the OpenStack code. -We should not require to change this behavior through code changes in deployments. +Up to the 2023.1 release (Antelope) the default Security Group rules are defined in the OpenStack code. +We should not require changing this behavior through code changes in deployments. -Beginning with the 2023.2 release (bobcat) the default Security Group rules can now be edited by administrators through an API. +Beginning with the 2023.2 release (Bobcat) the default Security Group rules can now be edited by administrators through an API. All rules that should be present as default in Security Groups have to be configured by admins through this API. There are two ways to approach a standard for the default rules of Security Groups. diff --git a/Standards/scs-0116-w1-key-manager-implementation-testing.md b/Standards/scs-0116-w1-key-manager-implementation-testing.md index 0ca20bf2e..74d0d2c49 100644 --- a/Standards/scs-0116-w1-key-manager-implementation-testing.md +++ b/Standards/scs-0116-w1-key-manager-implementation-testing.md @@ -44,6 +44,11 @@ This can be done with a small change in the policy.yaml file. The `creator` has The check for the presence of a Key Manager is done with a test script, that checks the presence of a Key Manager service in the catalog endpoint of Openstack. This check can eventually be moved to the checks for the mandatory an supported service/API list, in case of a promotion of the Key Manager to the mandatory list. +### Implementation + +The script [`check-for-key-manager.py`](https://github.com/SovereignCloudStack/standards/blob/main/Tests/iaas/key-manager/check-for-key-manager.py) +connects to OpenStack and performs the checks described in this section. + ## Manual Tests It is not possible to check a deployment for a correctly protected Master KEK automatically from the outside. diff --git a/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md b/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md index 069fdfc52..45f494368 100644 --- a/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md +++ b/Standards/scs-0118-v1-taxonomy-of-failsafe-levels.md @@ -1,5 +1,5 @@ --- -title: Taxonomy of Failsafe Levels +title: SCS Taxonomy of Failsafe Levels type: Decision Record status: Draft track: IaaS diff --git a/Drafts/node-to-node-encryption.md b/Standards/scs-0122-v1-node-to-node-encryption.md similarity index 99% rename from Drafts/node-to-node-encryption.md rename to Standards/scs-0122-v1-node-to-node-encryption.md index 4234b64db..f3d298706 100644 --- a/Drafts/node-to-node-encryption.md +++ b/Standards/scs-0122-v1-node-to-node-encryption.md @@ -1,7 +1,7 @@ --- title: _End-to-End Encryption between Customer Workloads_ type: Decision Record -status: Proposal +status: Draft track: IaaS --- diff --git a/Tests/config.toml b/Tests/config.toml index 0f270cd4b..a0173c25d 100644 --- a/Tests/config.toml +++ b/Tests/config.toml @@ -26,6 +26,7 @@ subjects = [ "poc-kdo", "poc-wgcloud", "regio-a", + "scaleup-occ2", "syseleven-dus2", "syseleven-ham1", "wavestack", diff --git a/Tests/iaas/key-manager/check-for-key-manager.py b/Tests/iaas/key-manager/check-for-key-manager.py old mode 100644 new mode 100755 index 6b5a5b70a..dae49acdd --- a/Tests/iaas/key-manager/check-for-key-manager.py +++ b/Tests/iaas/key-manager/check-for-key-manager.py @@ -1,135 +1,84 @@ -"""Mandatory APIs checker +#!/usr/bin/env python3 +"""Key Manager service checker for scs-0116-v1-key-manager-standard.md + This script retrieves the endpoint catalog from Keystone using the OpenStack -SDK and checks whether a key manager APi endpoint is present. +SDK and checks whether a key manager API endpoint is present. +It then checks whether a user with the maximum of a member role can create secrets. +This will only work after policy adjustments or with the new secure RBAC roles and policies. The script relies on an OpenStack SDK compatible clouds.yaml file for authentication with Keystone. """ import argparse -import json import logging import os +import sys import openstack - logger = logging.getLogger(__name__) -def connect(cloud_name: str) -> openstack.connection.Connection: - """Create a connection to an OpenStack cloud - :param string cloud_name: - The name of the configuration to load from clouds.yaml. - :returns: openstack.connnection.Connection - """ - return openstack.connect( - cloud=cloud_name, - ) +def initialize_logging(): + logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO) -def check_for_member_role(conn: openstack.connection.Connection - ) -> None: - """Checks whether the current user has at maximum privileges - of the member role. - :param connection: - The current connection to an OpenStack cloud. - :returns: boolean, when role with most priviledges is member - """ +def check_for_member_role(conn: openstack.connection.Connection) -> None: + """Checks whether the current user has at maximum privileges of the member role. - auth_data = conn.auth - auth_dict = { - "identity": { - "methods": ["password"], - "password": { - "user": { - "name": auth_data['username'], - "domain": {"name": auth_data['project_domain_name']}, - "password": auth_data['password'] - } - }, - }, - "scope": { - "project": { - "domain": {"name": auth_data['project_domain_name']}, - "name": auth_data['project_name'] - } - } - } - - has_member_role = False - request = conn.session.request(auth_data['auth_url'] + '/v3/auth/tokens', - 'POST', - json={'auth': auth_dict}) - for role in json.loads(request.content)["token"]["roles"]: - role_name = role["name"] - if role_name == "admin" or role_name == "manager": - return False - elif role_name == "member": - print("User has member role.") - has_member_role = True - elif role_name == "reader": - print("User has reader role.") - else: - print("User has custom role.") - return False - return has_member_role - - -def check_presence_of_key_manager(cloud_name: str): + :param conn: connection to an OpenStack cloud. + :returns: boolean, when role with most privileges is member + """ + role_names = set(conn.session.auth.get_access(conn.session).role_names) + if role_names & {"admin", "manager"}: + return False + if "reader" in role_names: + logger.info("User has reader role.") + custom_roles = sorted(role_names - {"reader", "member"}) + if custom_roles: + logger.info(f"User has custom roles {', '.join(custom_roles)}.") + return "member" in role_names + + +def check_presence_of_key_manager(conn: openstack.connection.Connection) -> None: try: - connection = connect(cloud_name) - services = connection.service_catalog - except Exception as e: - print(str(e)) - raise Exception( - f"Connection to cloud '{cloud_name}' was not successfully. " - f"The Catalog endpoint could not be accessed. " - f"Please check your cloud connection and authorization." - ) + services = conn.service_catalog + except Exception: + logger.critical("Could not access Catalog endpoint.") + raise for svc in services: - svc_type = svc['type'] + svc_type = svc["type"] if svc_type == "key-manager": # key-manager is present # now we want to check whether a user with member role # can create and access secrets - check_key_manager_permissions(connection) - return 0 + logger.info("Key Manager is present") + return True + - # we did not find the key-manager service - logger.warning("There is no key-manager endpoint in the cloud.") - # we do not fail, until a key-manager MUST be present - return 0 +def _find_secret(conn: openstack.connection.Connection, secret_name_or_id: str): + """Replacement method for finding secrets. + + Mimicks the behavior of Connection.key_manager.find_secret() + but fixes an issue with the internal implementation raising an + exception due to an unexpected microversion parameter. + """ + secrets = conn.key_manager.secrets() + for s in secrets: + if s.name == secret_name_or_id or s.id == secret_name_or_id: + return s -def check_key_manager_permissions(conn: openstack.connection.Connection - ) -> None: +def check_key_manager_permissions(conn: openstack.connection.Connection) -> None: """ After checking that the current user only has the member and maybe the reader role, this method verifies that the user with a member role has sufficient access to the Key Manager API functionality. """ secret_name = "scs-member-role-test-secret" - if not check_for_member_role(conn): - logger.warning("Cannot test key-manager permissions. " - "User has wrong roles") - return None - - def _find_secret(secret_name_or_id: str): - """Replacement method for finding secrets. - - Mimicks the behavior of Connection.key_manager.find_secret() - but fixes an issue with the internal implementation raising an - exception due to an unexpected microversion parameter. - """ - secrets = conn.key_manager.secrets() - for s in secrets: - if s.name == secret_name_or_id or s.id == secret_name_or_id: - return s - return None - try: - existing_secret = _find_secret(secret_name) + existing_secret = _find_secret(conn, secret_name) if existing_secret: conn.key_manager.delete_secret(existing_secret) @@ -137,54 +86,71 @@ def _find_secret(secret_name_or_id: str): name=secret_name, payload_content_type="text/plain", secret_type="opaque", - payload="foo" - ) - - new_secret = _find_secret(secret_name) - assert new_secret, ( - f"Secret created with name '{secret_name}' was not discoverable by " - f"the user" - ) - conn.key_manager.delete_secret(new_secret) - except openstack.exceptions.ForbiddenException as e: - print( - "Users of the 'member' role can use Key Manager API: FAIL" + payload="foo", ) - print( - f"ERROR: {str(e)}" + try: + new_secret = _find_secret(conn, secret_name) + if not new_secret: + raise ValueError(f"Secret '{secret_name}' was not discoverable by the user") + finally: + conn.key_manager.delete_secret(new_secret) + except openstack.exceptions.ForbiddenException: + logger.debug('exception details', exc_info=True) + logger.error( + "Users with the 'member' role can use Key Manager API: FAIL" ) - exit(1) - print( - "Users of the 'member' role can use Key Manager API: PASS" + return 1 + logger.info( + "Users with the 'member' role can use Key Manager API: PASS" ) def main(): - parser = argparse.ArgumentParser( - description="SCS Mandatory IaaS Service Checker") + initialize_logging() + parser = argparse.ArgumentParser(description="SCS Mandatory IaaS Service Checker") parser.add_argument( - "--os-cloud", type=str, + "--os-cloud", + type=str, help="Name of the cloud from clouds.yaml, alternative " - "to the OS_CLOUD environment variable" + "to the OS_CLOUD environment variable", ) parser.add_argument( - "--debug", action="store_true", - help="Enable OpenStack SDK debug logging" + "--debug", action="store_true", help="Enable OpenStack SDK debug logging" ) args = parser.parse_args() - openstack.enable_logging(debug=args.debug) + # @mbuechse: I think this is so much as to be unusable! + # (If necessary, a developer can always uncomment) + # openstack.enable_logging(debug=args.debug) + if args.debug: + logger.setLevel(logging.DEBUG) # parse cloud name for lookup in clouds.yaml - cloud = os.environ.get("OS_CLOUD", None) - if args.os_cloud: - cloud = args.os_cloud - assert cloud, ( - "You need to have the OS_CLOUD environment variable set to your cloud " - "name or pass it via --os-cloud" - ) - - return check_presence_of_key_manager(cloud) + cloud = args.os_cloud or os.environ.get("OS_CLOUD", None) + if not cloud: + logger.critical( + "You need to have the OS_CLOUD environment variable set to your cloud " + "name or pass it via --os-cloud" + ) + return 2 + + with openstack.connect(cloud=cloud) as conn: + if not check_for_member_role(conn): + logger.critical("Cannot test key-manager permissions. User has wrong roles") + return 2 + if check_presence_of_key_manager(conn): + return check_key_manager_permissions(conn) + else: + # not an error, because key manager is merely recommended + logger.warning("There is no key-manager endpoint in the cloud.") if __name__ == "__main__": - main() + try: + sys.exit(main() or 0) + except SystemExit as e: + if e.code < 2: + print("key-manager-check: " + ('PASS', 'FAIL')[min(1, e.code)]) + raise + except BaseException: + logger.critical("exception", exc_info=True) + sys.exit(2) diff --git a/Tests/iaas/security-groups/default-security-group-rules.py b/Tests/iaas/security-groups/default-security-group-rules.py old mode 100644 new mode 100755 index 773cf0bb8..def511956 --- a/Tests/iaas/security-groups/default-security-group-rules.py +++ b/Tests/iaas/security-groups/default-security-group-rules.py @@ -1,130 +1,181 @@ +#!/usr/bin/env python3 """Default Security Group Rules Checker This script tests the absence of any ingress default security group rule except for ingress rules from the same Security Group. Furthermore the presence of default rules for egress traffic is checked. """ +import argparse +from collections import Counter +import logging +import os +import sys import openstack -import os -import argparse +from openstack.exceptions import ResourceNotFound +logger = logging.getLogger(__name__) -def connect(cloud_name: str) -> openstack.connection.Connection: - """Create a connection to an OpenStack cloud +SG_NAME = "scs-test-default-sg" +DESCRIPTION = "scs-test-default-sg" - :param string cloud_name: - The name of the configuration to load from clouds.yaml. - :returns: openstack.connnection.Connection +def check_default_rules(rules, short=False): """ - return openstack.connect( - cloud=cloud_name, - ) + counts all verall ingress rules and egress rules, depending on the requested testing mode - -def test_rules(cloud_name: str): - try: - connection = connect(cloud_name) - rules = connection.network.default_security_group_rules() - except Exception as e: - print(str(e)) - raise Exception( - f"Connection to cloud '{cloud_name}' was not successfully. " - f"The default Security Group Rules could not be accessed. " - f"Please check your cloud connection and authorization." - ) - - # count all overall ingress rules and egress rules. - ingress_rules = 0 - ingress_from_same_sg = 0 - egress_rules = 0 - egress_ipv4_default_sg = 0 - egress_ipv4_custom_sg = 0 - egress_ipv6_default_sg = 0 - egress_ipv6_custom_sg = 0 + :param bool short + if short is True, the testing mode is set on short for older OpenStack versions + """ + ingress_rules = egress_rules = 0 + egress_vars = {'IPv4': {}, 'IPv6': {}} + for key, value in egress_vars.items(): + value['default'] = 0 + if not short: + value['custom'] = 0 if not rules: - print("No default security group rules defined.") - else: - for rule in rules: - direction = rule.direction - ethertype = rule.ethertype - r_custom_sg = rule.used_in_non_default_sg - r_default_sg = rule.used_in_default_sg - if direction == "ingress": - ingress_rules += 1 + logger.info("No default security group rules defined.") + for rule in rules: + direction = rule["direction"] + ethertype = rule["ethertype"] + if direction == "ingress": + if not short: # we allow ingress from the same security group # but only for the default security group - r_group_id = rule.remote_group_id - if (r_group_id == "PARENT" and not r_custom_sg): - ingress_from_same_sg += 1 - elif direction == "egress" and ethertype == "IPv4": - egress_rules += 1 - if rule.remote_ip_prefix: - # this rule does not allow traffic to all external ips - continue - if r_custom_sg: - egress_ipv4_custom_sg += 1 - if r_default_sg: - egress_ipv4_default_sg += 1 - elif direction == "egress" and ethertype == "IPv6": - egress_rules += 1 - if rule.remote_ip_prefix: - # this rule does not allow traffic to all external ips + if rule.remote_group_id == "PARENT" and not rule["used_in_non_default_sg"]: continue - if r_custom_sg: - egress_ipv6_custom_sg += 1 - if r_default_sg: - egress_ipv6_default_sg += 1 - - # test whether there are no other than the allowed ingress rules - assert ingress_rules == ingress_from_same_sg, ( - f"Expected only ingress rules for default security groups, " - f"that allow ingress traffic from the same group. " - f"But there are more - in total {ingress_rules} ingress rules. " - f"There should be only {ingress_from_same_sg} ingress rules.") - assert egress_rules > 0, ( - f"Expected to have more than {egress_rules} egress rules present.") - var_list = [egress_ipv4_default_sg, egress_ipv4_custom_sg, - egress_ipv6_default_sg, egress_ipv6_custom_sg] - assert all([var > 0 for var in var_list]), ( - "Not all expected egress rules are present. " - "Expected rules for egress for IPv4 and IPv6 " - "both for default and custom security groups.") - - result_dict = { - "Ingress Rules": ingress_rules, - "Egress Rules": egress_rules - } - return result_dict + ingress_rules += 1 + elif direction == "egress" and ethertype in egress_vars: + egress_rules += 1 + if short: + egress_vars[ethertype]['default'] += 1 + continue + if rule.remote_ip_prefix: + # this rule does not allow traffic to all external ips + continue + # note: these two are not mutually exclusive + if rule["used_in_default_sg"]: + egress_vars[ethertype]['default'] += 1 + if rule["used_in_non_default_sg"]: + egress_vars[ethertype]['custom'] += 1 + # test whether there are no unallowed ingress rules + if ingress_rules: + logger.error(f"Expected no default ingress rules, found {ingress_rules}.") + # test whether all expected egress rules are present + missing = [(key, key2) for key, val in egress_vars.items() for key2, val2 in val.items() if not val2] + if missing: + logger.error( + "Expected rules for egress for IPv4 and IPv6 both for default and custom security groups. " + f"Missing rule types: {', '.join(str(x) for x in missing)}" + ) + logger.info(str({ + "Unallowed Ingress Rules": ingress_rules, + "Egress Rules": egress_rules, + })) + + +def create_security_group(conn, sg_name: str = SG_NAME, description: str = DESCRIPTION): + """Create security group in openstack + + :returns: + ~openstack.network.v2.security_group.SecurityGroup: The new security group or None + """ + sg = conn.network.create_security_group(name=sg_name, description=description) + return sg.id + + +def delete_security_group(conn, sg_id): + conn.network.delete_security_group(sg_id) + # in case of a successful delete finding the sg will throw an exception + try: + conn.network.find_security_group(name_or_id=sg_id) + except ResourceNotFound: + logger.debug(f"Security group {sg_id} was deleted successfully.") + except Exception: + logger.critical(f"Security group {sg_id} was not deleted successfully") + raise + + +def altern_test_rules(connection: openstack.connection.Connection): + sg_id = create_security_group(connection) + try: + sg = connection.network.find_security_group(name_or_id=sg_id) + check_default_rules(sg.security_group_rules, short=True) + finally: + delete_security_group(connection, sg_id) + + +def test_rules(connection: openstack.connection.Connection): + try: + rules = list(connection.network.default_security_group_rules()) + except ResourceNotFound: + logger.info( + "API call failed. OpenStack components might not be up to date. " + "Falling back to old-style test method. " + ) + logger.debug("traceback", exc_info=True) + altern_test_rules(connection) + else: + check_default_rules(rules) + + +class CountingHandler(logging.Handler): + def __init__(self, level=logging.NOTSET): + super().__init__(level=level) + self.bylevel = Counter() + + def handle(self, record): + self.bylevel[record.levelno] += 1 def main(): parser = argparse.ArgumentParser( - description="SCS Default Security Group Rules Checker") + description="SCS Default Security Group Rules Checker", + ) parser.add_argument( - "--os-cloud", type=str, + "--os-cloud", + type=str, help="Name of the cloud from clouds.yaml, alternative " - "to the OS_CLOUD environment variable" + "to the OS_CLOUD environment variable", ) parser.add_argument( - "--debug", action="store_true", - help="Enable OpenStack SDK debug logging" + "--debug", action="store_true", help="Enable debug logging", ) args = parser.parse_args() openstack.enable_logging(debug=args.debug) + logging.basicConfig( + format="%(levelname)s: %(message)s", + level=logging.DEBUG if args.debug else logging.INFO, + ) + + # count the number of log records per level (used for summary and return code) + counting_handler = CountingHandler(level=logging.INFO) + logger.addHandler(counting_handler) # parse cloud name for lookup in clouds.yaml - cloud = os.environ.get("OS_CLOUD", None) - if args.os_cloud: - cloud = args.os_cloud - assert cloud, ( - "You need to have the OS_CLOUD environment variable set to your cloud " - "name or pass it via --os-cloud" - ) + cloud = args.os_cloud or os.environ.get("OS_CLOUD", None) + if not cloud: + raise ValueError( + "You need to have the OS_CLOUD environment variable set to your cloud " + "name or pass it via --os-cloud" + ) - print(test_rules(cloud)) + with openstack.connect(cloud) as conn: + test_rules(conn) + + c = counting_handler.bylevel + logger.debug(f"Total critical / error / warning: {c[logging.CRITICAL]} / {c[logging.ERROR]} / {c[logging.WARNING]}") + if not c[logging.CRITICAL]: + print("security-groups-default-rules-check: " + ('PASS', 'FAIL')[min(1, c[logging.ERROR])]) + return min(127, c[logging.CRITICAL] + c[logging.ERROR]) # cap at 127 due to OS restrictions if __name__ == "__main__": - main() + try: + sys.exit(main()) + except SystemExit: + raise + except BaseException as exc: + logging.debug("traceback", exc_info=True) + logging.critical(str(exc)) + sys.exit(1) diff --git a/Tests/iaas/volume-backup/volume-backup-tester.py b/Tests/iaas/volume-backup/volume-backup-tester.py old mode 100644 new mode 100755 index f4fa9522d..bcbb89664 --- a/Tests/iaas/volume-backup/volume-backup-tester.py +++ b/Tests/iaas/volume-backup/volume-backup-tester.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """Volume Backup API tester for Block Storage API This test script executes basic operations on the Block Storage API centered @@ -14,7 +15,9 @@ import argparse import getpass +import logging import os +import sys import time import typing @@ -29,28 +32,23 @@ WAIT_TIMEOUT = 60 -def connect(cloud_name: str, password: typing.Optional[str] = None - ) -> openstack.connection.Connection: - """Create a connection to an OpenStack cloud - - :param string cloud_name: - The name of the configuration to load from clouds.yaml. - - :param string password: - Optional password override for the connection. - - :returns: openstack.connnection.Connection - """ - - if password: - return openstack.connect( - cloud=cloud_name, - password=password - ) - else: - return openstack.connect( - cloud=cloud_name, - ) +def wait_for_resource( + get_func: typing.Callable[[str], openstack.resource.Resource], + resource_id: str, + expected_status=("available", ), + timeout=WAIT_TIMEOUT, +) -> None: + seconds_waited = 0 + resource = get_func(resource_id) + while resource is None or resource.status not in expected_status: + time.sleep(1.0) + seconds_waited += 1 + if seconds_waited >= timeout: + raise RuntimeError( + f"Timed out after {seconds_waited} s: waiting for resource {resource_id} " + f"to be in status {expected_status} (current: {resource and resource.status})" + ) + resource = get_func(resource_id) def test_backup(conn: openstack.connection.Connection, @@ -64,133 +62,90 @@ def test_backup(conn: openstack.connection.Connection, """ # CREATE VOLUME - print("Creating volume ...") - volume = conn.block_storage.create_volume( - name=f"{prefix}volume", - size=1 - ) - assert volume is not None, ( - "Initial volume creation failed" - ) + volume_name = f"{prefix}volume" + logging.info(f"Creating volume '{volume_name}' ...") + volume = conn.block_storage.create_volume(name=volume_name, size=1) + if volume is None: + raise RuntimeError(f"Creation of initial volume '{volume_name}' failed") volume_id = volume.id - assert conn.block_storage.get_volume(volume_id) is not None, ( - "Retrieving initial volume by ID failed" - ) + if conn.block_storage.get_volume(volume_id) is None: + raise RuntimeError(f"Retrieving initial volume by ID '{volume_id}' failed") - print( + logging.info( f"↳ waiting for volume with ID '{volume_id}' to reach status " f"'available' ..." ) - seconds_waited = 0 - while conn.block_storage.get_volume(volume_id).status != "available": - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for volume to reach status " - f"'available' (volume id: {volume_id}) after {seconds_waited} " - f"seconds" - ) - print("Create empty volume: PASS") + wait_for_resource(conn.block_storage.get_volume, volume_id, timeout=timeout) + logging.info("Create empty volume: PASS") # CREATE BACKUP - print("Creating backup from volume ...") - backup = conn.block_storage.create_backup( - name=f"{prefix}volume-backup", - volume_id=volume_id - ) - assert backup is not None, ( - "Backup creation failed" - ) + logging.info("Creating backup from volume ...") + backup = conn.block_storage.create_backup(name=f"{prefix}volume-backup", volume_id=volume_id) + if backup is None: + raise RuntimeError("Backup creation failed") backup_id = backup.id - assert conn.block_storage.get_backup(backup_id) is not None, ( - "Retrieving backup by ID failed" - ) + if conn.block_storage.get_backup(backup_id) is None: + raise RuntimeError("Retrieving backup by ID failed") - print(f"↳ waiting for backup '{backup_id}' to become available ...") - seconds_waited = 0 - while conn.block_storage.get_backup(backup_id).status != "available": - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for backup to reach status " - f"'available' (backup id: {backup_id}) after {seconds_waited} " - f"seconds" - ) - print("Create backup from volume: PASS") + logging.info(f"↳ waiting for backup '{backup_id}' to become available ...") + wait_for_resource(conn.block_storage.get_backup, backup_id, timeout=timeout) + logging.info("Create backup from volume: PASS") # RESTORE BACKUP - print("Restoring backup to volume ...") restored_volume_name = f"{prefix}restored-backup" - conn.block_storage.restore_backup( - backup_id, - name=restored_volume_name - ) + logging.info(f"Restoring backup to volume '{restored_volume_name}' ...") + conn.block_storage.restore_backup(backup_id, name=restored_volume_name) - print( + logging.info( f"↳ waiting for restoration target volume '{restored_volume_name}' " f"to be created ..." ) - seconds_waited = 0 - while conn.block_storage.find_volume(restored_volume_name) is None: - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for restored volume to be created " - f"(volume name: {restored_volume_name}) after {seconds_waited} " - f"seconds" - ) + wait_for_resource(conn.block_storage.find_volume, restored_volume_name, timeout=timeout) # wait for the volume restoration to finish - print( + logging.info( f"↳ waiting for restoration target volume '{restored_volume_name}' " f"to reach 'available' status ..." ) volume_id = conn.block_storage.find_volume(restored_volume_name).id - while conn.block_storage.get_volume(volume_id).status != "available": - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for restored volume reach status " - f"'available' (volume id: {volume_id}) after {seconds_waited} " - f"seconds" - ) - print("Restore volume from backup: PASS") + wait_for_resource(conn.block_storage.get_volume, volume_id, timeout=timeout) + logging.info("Restore volume from backup: PASS") def cleanup(conn: openstack.connection.Connection, prefix=DEFAULT_PREFIX, - timeout=WAIT_TIMEOUT): + timeout=WAIT_TIMEOUT) -> bool: """ Looks up volume and volume backup resources matching the given prefix and deletes them. + Returns False if there were any errors during cleanup which might leave + resources behind. Otherwise returns True to indicate cleanup success. """ - def wait_for_resource(resource_type: str, resource_id: str, - expected_status="available") -> None: - seconds_waited = 0 - get_func = getattr(conn.block_storage, f"get_{resource_type}") - while get_func(resource_id).status != expected_status: - time.sleep(1.0) - seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for {resource_type} during " - f"cleanup to be in status '{expected_status}' " - f"({resource_type} id: {resource_id}) after {seconds_waited} " - f"seconds" - ) - - print(f"\nPerforming cleanup for resources with the " - f"'{prefix}' prefix ...") + logging.info(f"Performing cleanup for resources with the '{prefix}' prefix ...") + cleanup_issues = 0 # count failed cleanup operations backups = conn.block_storage.backups() for backup in backups: - if backup.name.startswith(prefix): - try: - wait_for_resource("backup", backup.id) - except openstack.exceptions.ResourceNotFound: - # if the resource has vanished on - # its own in the meantime ignore it - continue - print(f"↳ deleting volume backup '{backup.id}' ...") + if not backup.name.startswith(prefix): + continue + try: + # we can only delete if status is available or error, so try and wait + wait_for_resource( + conn.block_storage.get_backup, + backup.id, + expected_status=("available", "error"), + timeout=timeout, + ) + logging.info(f"↳ deleting volume backup '{backup.id}' ...") conn.block_storage.delete_backup(backup.id) + except openstack.exceptions.ResourceNotFound: + # if the resource has vanished on its own in the meantime ignore it + continue + except Exception as e: + # Most common exception would be a timeout in wait_for_resource. + # We do not need to increment cleanup_issues here since + # any remaining ones will be caught in the next loop down below anyway. + logging.debug("traceback", exc_info=True) + logging.warning(str(e)) # wait for all backups to be cleaned up before attempting to remove volumes seconds_waited = 0 @@ -200,22 +155,42 @@ def wait_for_resource(resource_type: str, resource_id: str, ) > 0: time.sleep(1.0) seconds_waited += 1 - assert seconds_waited < timeout, ( - f"Timeout reached while waiting for all backups with prefix " - f"'{prefix}' to finish deletion" - ) + if seconds_waited >= timeout: + cleanup_issues += 1 + logging.warning( + f"Timeout reached while waiting for all backups with prefix " + f"'{prefix}' to finish deletion during cleanup after " + f"{seconds_waited} seconds" + ) + break volumes = conn.block_storage.volumes() for volume in volumes: - if volume.name.startswith(prefix): - try: - wait_for_resource("volume", volume.id) - except openstack.exceptions.ResourceNotFound: - # if the resource has vanished on - # its own in the meantime ignore it - continue - print(f"↳ deleting volume '{volume.id}' ...") + if not volume.name.startswith(prefix): + continue + try: + wait_for_resource( + conn.block_storage.get_volume, + volume.id, + expected_status=("available", "error"), + timeout=timeout, + ) + logging.info(f"↳ deleting volume '{volume.id}' ...") conn.block_storage.delete_volume(volume.id) + except openstack.exceptions.ResourceNotFound: + # if the resource has vanished on its own in the meantime ignore it + continue + except Exception as e: + logging.debug("traceback", exc_info=True) + logging.warning(str(e)) + cleanup_issues += 1 + + if cleanup_issues: + logging.info( + f"Some resources with the '{prefix}' prefix were not cleaned up!" + ) + + return not cleanup_issues def main(): @@ -257,26 +232,43 @@ def main(): ) args = parser.parse_args() openstack.enable_logging(debug=args.debug) + logging.basicConfig( + format="%(levelname)s: %(message)s", + level=logging.DEBUG if args.debug else logging.INFO, + ) # parse cloud name for lookup in clouds.yaml - cloud = os.environ.get("OS_CLOUD", None) - if args.os_cloud: - cloud = args.os_cloud - assert cloud, ( - "You need to have the OS_CLOUD environment variable set to your " - "cloud name or pass it via --os-cloud" - ) - conn = connect( - cloud, - password=getpass.getpass("Enter password: ") if args.ask else None - ) - if args.cleanup_only: - cleanup(conn, prefix=args.prefix, timeout=args.timeout) - else: - cleanup(conn, prefix=args.prefix, timeout=args.timeout) - test_backup(conn, prefix=args.prefix, timeout=args.timeout) - cleanup(conn, prefix=args.prefix, timeout=args.timeout) + cloud = args.os_cloud or os.environ.get("OS_CLOUD", None) + if not cloud: + raise Exception( + "You need to have the OS_CLOUD environment variable set to your " + "cloud name or pass it via --os-cloud" + ) + password = getpass.getpass("Enter password: ") if args.ask else None + + with openstack.connect(cloud, password=password) as conn: + if not cleanup(conn, prefix=args.prefix, timeout=args.timeout): + raise RuntimeError("Initial cleanup failed") + if args.cleanup_only: + logging.info("Cleanup-only run finished.") + return + try: + test_backup(conn, prefix=args.prefix, timeout=args.timeout) + except BaseException: + print('volume-backup-check: FAIL') + raise + else: + print('volume-backup-check: PASS') + finally: + cleanup(conn, prefix=args.prefix, timeout=args.timeout) if __name__ == "__main__": - main() + try: + sys.exit(main()) + except SystemExit: + raise + except BaseException as exc: + logging.debug("traceback", exc_info=True) + logging.critical(str(exc)) + sys.exit(1) diff --git a/Tests/iaas/volume-types/volume-types-check.py b/Tests/iaas/volume-types/volume-types-check.py old mode 100644 new mode 100755 index 444755816..4b1945fb8 --- a/Tests/iaas/volume-types/volume-types-check.py +++ b/Tests/iaas/volume-types/volume-types-check.py @@ -141,6 +141,8 @@ def main(argv): "Total critical / error / warning: " f"{c[logging.CRITICAL]} / {c[logging.ERROR]} / {c[logging.WARNING]}" ) + if not c[logging.CRITICAL]: + print("volume-types-check: " + ('PASS', 'FAIL')[min(1, c[logging.ERROR])]) return min(127, c[logging.CRITICAL] + c[logging.ERROR]) # cap at 127 due to OS restrictions diff --git a/Tests/iam/domain-manager/domain-manager-check.py b/Tests/iam/domain-manager/domain-manager-check.py old mode 100644 new mode 100755 index e56aad884..41040122b --- a/Tests/iam/domain-manager/domain-manager-check.py +++ b/Tests/iam/domain-manager/domain-manager-check.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 """Domain Manager policy configuration checker This script uses the OpenStack SDK to validate the proper implementation diff --git a/Tests/scs-compatible-iaas.yaml b/Tests/scs-compatible-iaas.yaml index 0d9c0ee61..5ad119fbf 100644 --- a/Tests/scs-compatible-iaas.yaml +++ b/Tests/scs-compatible-iaas.yaml @@ -154,7 +154,75 @@ modules: tags: [mandatory] description: > Must fulfill all requirements of + - id: scs-0114-v1 + name: Volume Types + url: https://docs.scs.community/standards/scs-0114-v1-volume-type-standard + run: + - executable: ./iaas/volume-types/volume-types-check.py + args: -c {os_cloud} -d + testcases: + - id: volume-types-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0115-v1 + name: Default rules for security groups + url: https://docs.scs.community/standards/scs-0115-v1-default-rules-for-security-groups + run: + - executable: ./iaas/security-groups/default-security-group-rules.py + args: --os-cloud {os_cloud} --debug + testcases: + - id: security-groups-default-rules-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0116-v1 + name: Key manager + url: https://docs.scs.community/standards/scs-0116-v1-key-manager-standard + run: + - executable: ./iaas/key-manager/check-for-key-manager.py + args: --os-cloud {os_cloud} --debug + testcases: + - id: key-manager-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0117-v1 + name: Volume backup + url: https://docs.scs.community/standards/scs-0117-v1-volume-backup-service + run: + - executable: ./iaas/volume-backup/volume-backup-tester.py + args: --os-cloud {os_cloud} --debug + testcases: + - id: volume-backup-check + tags: [mandatory] + description: > + Must fulfill all requirements of + - id: scs-0121-v1 + name: Availability Zones + url: https://docs.scs.community/standards/scs-0121-v1-Availability-Zones-Standard + testcases: + - id: availability-zones-check + tags: [availability-zones] + description: > + Note: manual check! Must fulfill all requirements of + - id: scs-0302-v1 + name: Domain Manager Role + url: https://docs.scs.community/standards/scs-0302-v1-domain-manager-role + # run: + # - executable: ./iam/domain-manager/domain-manager-check.py + # args: --os-cloud {os_cloud} --debug --domain-config ... + testcases: + - id: domain-manager-check + tags: [domain-manager] + description: > + Note: manual check! Must fulfill all requirements of timeline: + - date: 2024-11-08 + versions: + v5: draft + v4: effective + v3: deprecated - date: 2024-08-23 versions: v5: draft @@ -202,8 +270,15 @@ versions: - ref: scs-0104-v1 parameters: image_spec: https://raw.githubusercontent.com/SovereignCloudStack/standards/main/Tests/iaas/scs-0104-v1-images-v5.yaml + - scs-0114-v1 + - scs-0115-v1 + - scs-0116-v1 + - scs-0117-v1 + - scs-0121-v1 + - scs-0302-v1 targets: main: mandatory + preview: domain-manager/availability-zones - version: v4 stabilized_at: 2024-02-28 include: diff --git a/compliance-monitor/bootstrap.yaml b/compliance-monitor/bootstrap.yaml index 50b722703..8339c422d 100644 --- a/compliance-monitor/bootstrap.yaml +++ b/compliance-monitor/bootstrap.yaml @@ -50,6 +50,9 @@ accounts: - public_key: "AAAAC3NzaC1lZDI1NTE5AAAAILufk4C7e0eQQIkmUDK8GB2IoiDjYtv6mx2eE8wZ3VWT" public_key_type: "ssh-ed25519" public_key_name: "primary" + - subject: scaleup-occ2 + delegates: + - zuul_ci - subject: syseleven-dus2 delegates: - zuul_ci diff --git a/compliance-monitor/monitor.py b/compliance-monitor/monitor.py index aa02cbae1..c6dcb2a41 100755 --- a/compliance-monitor/monitor.py +++ b/compliance-monitor/monitor.py @@ -96,6 +96,11 @@ class ViewType(Enum): fragment = "fragment" +VIEW_REPORT = { + ViewType.markdown: 'report.md', + ViewType.fragment: 'report.md', + ViewType.page: 'overview.html', +} VIEW_DETAIL = { ViewType.markdown: 'details.md', ViewType.fragment: 'details.md', @@ -111,7 +116,7 @@ class ViewType(Enum): ViewType.fragment: 'scope.md', ViewType.page: 'overview.html', } -REQUIRED_TEMPLATES = tuple(set(fn for view in (VIEW_DETAIL, VIEW_TABLE, VIEW_SCOPE) for fn in view.values())) +REQUIRED_TEMPLATES = tuple(set(fn for view in (VIEW_REPORT, VIEW_DETAIL, VIEW_TABLE, VIEW_SCOPE) for fn in view.values())) # do I hate these globals, but I don't see another way with these frameworks @@ -276,18 +281,16 @@ def evaluate(self, scope_results): by_validity[self.versions[vname].validity].append(vname) # go through worsening validity values until a passing version is found relevant = [] + best_passed = None for validity in ('effective', 'warn', 'deprecated'): vnames = by_validity[validity] relevant.extend(vnames) if any(version_results[vname]['result'] == 1 for vname in vnames): + best_passed = validity break # always include draft (but only at the end) relevant.extend(by_validity['draft']) passed = [vname for vname in relevant if version_results[vname]['result'] == 1] - if passed: - summary = 1 if self.versions[passed[0]].validity in ('effective', 'warn') else -1 - else: - summary = 0 return { 'name': self.name, 'versions': version_results, @@ -297,7 +300,7 @@ def evaluate(self, scope_results): vname + ASTERISK_LOOKUP[self.versions[vname].validity] for vname in passed ]), - 'summary': summary, + 'best_passed': best_passed, } def update_lookup(self, target_dict): @@ -544,14 +547,23 @@ async def get_status( return convert_result_rows_to_dict2(rows2, get_scopes(), include_report=True) -def render_view(view, view_type, base_url='/', title=None, **kwargs): +def _build_report_url(base_url, report, *args, **kwargs): + if kwargs.get('download'): + return f"{base_url}reports/{report}" + url = f"{base_url}page/report/{report}" + if len(args) == 2: # version, testcase_id --> add corresponding fragment specifier + url += f"#{args[0]}_{args[1]}" + return url + + +def render_view(view, view_type, detail_page='detail', base_url='/', title=None, **kwargs): media_type = {ViewType.markdown: 'text/markdown'}.get(view_type, 'text/html') stage1 = stage2 = view[view_type] if view_type is ViewType.page: stage1 = view[ViewType.fragment] def scope_url(uuid): return f"{base_url}page/scope/{uuid}" # noqa: E306,E704 - def detail_url(subject, scope): return f"{base_url}page/detail/{subject}/{scope}" # noqa: E306,E704 - def report_url(report): return f"{base_url}reports/{report}" # noqa: E306,E704 + def detail_url(subject, scope): return f"{base_url}page/{detail_page}/{subject}/{scope}" # noqa: E306,E704 + def report_url(report, *args, **kwargs): return _build_report_url(base_url, report, *args, **kwargs) # noqa: E306,E704 fragment = templates_map[stage1].render(detail_url=detail_url, report_url=report_url, scope_url=scope_url, **kwargs) if view_type != ViewType.markdown and stage1.endswith('.md'): fragment = markdown(fragment, extensions=['extra']) @@ -560,6 +572,23 @@ def report_url(report): return f"{base_url}reports/{report}" # noqa: E306,E704 return Response(content=fragment, media_type=media_type) +@app.get("/{view_type}/report/{report_uuid}") +async def get_report_view( + request: Request, + account: Annotated[Optional[tuple[str, str]], Depends(auth)], + conn: Annotated[connection, Depends(get_conn)], + view_type: ViewType, + report_uuid: str, +): + with conn.cursor() as cur: + specs = db_get_report(cur, report_uuid) + if not specs: + raise HTTPException(status_code=404) + spec = specs[0] + check_role(account, spec['subject'], ROLES['read_any']) + return render_view(VIEW_REPORT, view_type, report=spec, base_url=settings.base_url, title=f'Report {report_uuid}') + + @app.get("/{view_type}/detail/{subject}/{scopeuuid}") async def get_detail( request: Request, @@ -618,7 +647,11 @@ async def get_table_full( with conn.cursor() as cur: rows2 = db_get_relevant_results2(cur, approved_only=False) results2 = convert_result_rows_to_dict2(rows2, get_scopes()) - return render_view(VIEW_TABLE, view_type, results=results2, base_url=settings.base_url, title="SCS compliance overview") + return render_view( + VIEW_TABLE, view_type, results=results2, + detail_page='detail_full', base_url=settings.base_url, + title="SCS compliance overview", + ) @app.get("/{view_type}/scope/{scopeuuid}") @@ -692,8 +725,13 @@ def pick_filter(results, subject, scope): def summary_filter(scope_results): """Jinja filter to construct summary from `scope_results`""" passed_str = scope_results.get('passed_str', '') or '–' - summary = scope_results.get('summary', 0) - color = {1: '✅'}.get(summary, '🛑') # instead of 🟢🔴 (hard to distinguish for color-blind folks) + best_passed = scope_results.get('best_passed') + # avoid simple 🟢🔴 (hard to distinguish for color-blind folks) + color = { + 'effective': '✅', + 'warn': '✅', # forgo differentiation here in favor of simplicity (will be apparent in version list) + 'deprecated': '🟧', + }.get(best_passed, '🛑') return f'{color} {passed_str}' diff --git a/compliance-monitor/templates/details.md.j2 b/compliance-monitor/templates/details.md.j2 index e812cd741..30136b149 100644 --- a/compliance-monitor/templates/details.md.j2 +++ b/compliance-monitor/templates/details.md.j2 @@ -24,7 +24,7 @@ No recent test results available. {% set res = version_result.results[testcase_id] if testcase_id in version_result.results else dict(result=0) -%} | {% if res.result != 1 %}⚠️ {% endif %}{{ testcase.id }} | {#- #} {% if res.report -%} -[{{ res.result | verdict_check }}]({{ report_url(res.report) }}) +[{{ res.result | verdict_check }}]({{ report_url(res.report, version, testcase_id) }}) {%- else -%} {{ res.result | verdict_check }} {%- endif -%} diff --git a/compliance-monitor/templates/overview.html.j2 b/compliance-monitor/templates/overview.html.j2 index 154bd0cb2..830b94121 100644 --- a/compliance-monitor/templates/overview.html.j2 +++ b/compliance-monitor/templates/overview.html.j2 @@ -1,16 +1,18 @@ + + +{{ title or 'SCS compliance overview' }} + + - - -{{ title or 'SCS compliance overview' }} - - {% if title %}

{{title}}

{% endif %}{{fragment}} diff --git a/compliance-monitor/templates/overview.md.j2 b/compliance-monitor/templates/overview.md.j2 index 36e3ced23..77ba6bcc9 100644 --- a/compliance-monitor/templates/overview.md.j2 +++ b/compliance-monitor/templates/overview.md.j2 @@ -2,6 +2,9 @@ we could of course iterate over results etc., but hardcode the table (except the actual results, of course) for the time being to have the highest degree of control -#} + +Version numbers are suffixed by a symbol depending on state: * for _draft_, † for _warn_ (soon to be deprecated), and †† for _deprecated_. + {% set iaas = '50393e6f-2ae1-4c5c-a62c-3b75f2abef3f' -%} | Name | Description | Operator | [SCS-compatible IaaS](https://docs.scs.community/standards/scs-compatible-iaas/) | HealthMon | |-------|--------------|-----------|----------------------|:----------:| @@ -32,6 +35,9 @@ for the time being to have the highest degree of control | [REGIO.cloud](https://regio.digital) | Public cloud for customers | OSISM GmbH | {#- #} [{{ results | pick('regio-a', iaas) | summary }}]({{ detail_url('regio-a', iaas) }}) {# -#} | [HM](https://apimon.services.regio.digital/public-dashboards/17cf094a47404398a5b8e35a4a3968d4?orgId=1&refresh=5m) | +| [ScaleUp Open Cloud](https://www.scaleuptech.com/cloud-hosting/) | Public cloud for customers | ScaleUp Technologies GmbH & Co. KG | +{#- #} [{{ results | pick('scaleup-occ2', iaas) | summary }}]({{ detail_url('scaleup-occ2', iaas) }}) {# -#} +| [HM](https://health.occ2.scaleup.sovereignit.cloud) | | [syseleven](https://www.syseleven.de/en/products-services/openstack-cloud/)
(2 SCS regions) | Public OpenStack Cloud | SysEleven GmbH | {# #} {#- #}dus2: [{{ results | pick('syseleven-dus2', iaas) | summary }}]({{ detail_url('syseleven-dus2', iaas) }}){# -#}
diff --git a/compliance-monitor/templates/report.md.j2 b/compliance-monitor/templates/report.md.j2 new file mode 100644 index 000000000..e46c2e086 --- /dev/null +++ b/compliance-monitor/templates/report.md.j2 @@ -0,0 +1,66 @@ +## General info + +- uuid: [{{ report.run.uuid }}]({{ report_url(report.run.uuid, download=True) }}) +- subject: {{ report.subject }} +- scope: [{{ report.spec.name }}]({{ scope_url(report.spec.uuid) }}) +- checked at: {{ report.checked_at }} + +## Results + +{% for version, version_results in report.versions.items() %}{% if version_results %} +### {{ version }} + +| test case | result | invocation | +|---|---|---| +{% for testcase_id, result_data in version_results.items() -%} +| {{ testcase_id }} {: #{{ version + '_' + testcase_id }} } | {{ result_data.result | verdict_check }} | [{{ result_data.invocation }}](#{{ result_data.invocation }}) | +{% endfor %} +{% endif %}{% endfor %} + +## Run + +### Variable assignment + +| key | value | +|---|---| +{% for key, value in report.run.assignment.items() -%} +| `{{ key }}` | `{{ value }}` | +{% endfor %} + +### Check tool invocations + +{% for invid, invdata in report.run.invocations.items() %} +#### Invocation {{invid}} {: #{{ invid }} } + +- cmd: `{{ invdata.cmd }}` +- rc: {{ invdata.rc }} +- channel summary +{%- for channel in ('critical', 'error', 'warning') %} +{%- if invdata[channel] %} + - **{{ channel }}: {{ invdata[channel] }}** +{%- else %} + - {{ channel }}: – +{%- endif %} +{%- endfor %} +- results +{%- for resultid, result in invdata.results.items() %} + - {{ resultid }}: {{ result | verdict_check }} +{%- endfor %} + +{% if invdata.stdout -%} +
Captured stdout +```text +{{ '\n'.join(invdata.stdout) }} +``` +
+{%- endif %} + +{% if invdata.stderr -%} +
Captured stderr +{%- for line in invdata.stderr %} +
{% if line.split(':', 1)[0].lower() in ('warning', 'error', 'critical') %}{{ '' + line + '' }}{% else %}{{ line }}{% endif %}
+{%- endfor %} +
+{%- endif %} + +{% endfor %} diff --git a/playbooks/clouds.yaml.j2 b/playbooks/clouds.yaml.j2 index da0d3602d..2df1cdbd8 100644 --- a/playbooks/clouds.yaml.j2 +++ b/playbooks/clouds.yaml.j2 @@ -83,6 +83,15 @@ clouds: application_credential_id: "{{ clouds_conf.regio_a_ac_id }}" application_credential_secret: "{{ clouds_conf.regio_a_ac_secret }}" auth_type: "v3applicationcredential" + scaleup-occ2: + auth_type: v3applicationcredential + auth: + auth_url: https://keystone.occ2.scaleup.cloud + application_credential_id: "{{ clouds_conf.scaleup_occ2_ac_id }}" + application_credential_secret: "{{ clouds_conf.scaleup_occ2_ac_secret }}" + region_name: "RegionOne" + interface: "public" + identity_api_version: 3 syseleven-dus2: interface: public identity_api_verion: 3