diff --git a/.ansible-lint b/.ansible-lint
new file mode 100644
index 00000000..42081f53
--- /dev/null
+++ b/.ansible-lint
@@ -0,0 +1,10 @@
+---
+use_default_rules: true
+skip_list:
+ - yaml # disabled because we use yamllint
+# Roles and modules imported from https://opendev.org/zuul/zuul-jobs
+mock_roles:
+ - ensure-docker
+ - ensure-go
+mock_modules:
+ - zuul_return
diff --git a/.github/workflows/ansible-lint.yml b/.github/workflows/ansible-lint.yml
new file mode 100644
index 00000000..9b0c2bcd
--- /dev/null
+++ b/.github/workflows/ansible-lint.yml
@@ -0,0 +1,21 @@
+---
+name: Ansible lint
+
+"on":
+ push:
+ branches:
+ - main
+ paths:
+ - 'playbooks/**'
+ pull_request:
+ paths:
+ - 'playbooks/**'
+
+jobs:
+ build:
+ name: Ansible Lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Run ansible-lint
+ uses: ansible/ansible-lint@v24
diff --git a/.github/workflows/check-yaml-syntax.yml b/.github/workflows/check-yaml-syntax.yml
new file mode 100644
index 00000000..c61dd341
--- /dev/null
+++ b/.github/workflows/check-yaml-syntax.yml
@@ -0,0 +1,25 @@
+---
+name: Check yaml syntax
+
+"on":
+ push:
+ branches:
+ - main
+ paths:
+ - '**.yaml'
+ - '**.yml'
+ pull_request:
+ paths:
+ - '**.yaml'
+ - '**.yml'
+
+jobs:
+ check-yaml-syntax:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-python@v5
+ with:
+ python-version: '3.x'
+ - run: pip3 install yamllint
+ - run: yamllint .
diff --git a/.yamllint.yml b/.yamllint.yml
new file mode 100644
index 00000000..3853fdb7
--- /dev/null
+++ b/.yamllint.yml
@@ -0,0 +1,13 @@
+---
+extends: default
+
+rules:
+ comments: enable
+ line-length: disable
+ # accept both key:
+ # - item
+ # and key:
+ # - item
+ # (the latter is very common in k8s land)
+ indentation:
+ indent-sequences: whatever
diff --git a/.zuul.yaml b/.zuul.yaml
new file mode 100644
index 00000000..91be9129
--- /dev/null
+++ b/.zuul.yaml
@@ -0,0 +1,72 @@
+---
+- job:
+ name: openstack-e2e-abstract
+ abstract: true
+ parent: openstack-access-base
+ description: |
+ An abstract job for e2e testing of cluster stacks project.
+ This job is not intended to be run directly, but instead
+ must be inherited from it.
+ pre-run: playbooks/dependencies.yaml
+ run: playbooks/openstack/e2e.yaml
+ cleanup-run: playbooks/openstack/cleanup.yaml # executed also when the job is canceled
+ vars:
+ wait_for_cluster_stack_resource: 120 # 2min
+ wait_for_clusteraddons: 120 # 2min
+ wait_for_cluster_stack: 1440 # 24min
+ wait_for_cluster: 600 # 10min
+ sonobouy:
+ enabled: false
+ scs_compliance:
+ enabled: false
+
+- job:
+ name: e2e-openstack-conformance
+ parent: openstack-e2e-abstract
+ description: |
+ Run e2e tests of cluster-stacks project using
+ [sonobuoy](https://sonobuoy.io/) with mode conformance and
+ SCS compliance checks meaning it will test if the Kubernetes
+ cluster is conformant to the CNCF and to the SCS.
+ timeout: 10800 # 3h
+ vars:
+ wait_for_cluster: 1200 # 20min
+ sonobouy:
+ enabled: true
+ mode: conformance
+ scs_compliance:
+ enabled: true
+
+- job:
+ name: e2e-openstack-quick
+ parent: openstack-e2e-abstract
+ description: |
+ Run e2e tests of cluster-stacks project using
+ [sonobuoy](https://sonobuoy.io/) with mode quick and
+ SCS compliance checks.
+ timeout: 7200 # 2h
+ vars:
+ wait_for_cluster: 1200 # 20min
+ sonobouy:
+ enabled: true
+ mode: quick
+ scs_compliance:
+ enabled: true
+
+
+- project:
+ name: SovereignCloudStack/cluster-stacks
+ default-branch: main
+ merge-mode: "squash-merge"
+ e2e-test:
+ jobs:
+ - e2e-openstack-conformance
+ unlabel-on-update-e2e-test:
+ jobs:
+ - noop
+ e2e-quick-test:
+ jobs:
+ - e2e-openstack-quick
+ unlabel-on-update-e2e-quick-test:
+ jobs:
+ - noop
diff --git a/playbooks/dependencies.yaml b/playbooks/dependencies.yaml
new file mode 100644
index 00000000..a4c5dd8c
--- /dev/null
+++ b/playbooks/dependencies.yaml
@@ -0,0 +1,61 @@
+---
+- name: Ensure cluster stacks dependencies
+ hosts: all
+ vars:
+ kind_version: "0.22.0"
+ kubectl_version: "1.29.3"
+ clusterctl_version: "1.7.2"
+ helm_version: "3.14.4"
+ yq_version: "4.44.1"
+ envsubst_version: "1.4.2"
+ install_dir: "{{ ansible_user_dir }}/.local/bin"
+ roles: # https://opendev.org/zuul/zuul-jobs
+ - role: ensure-docker
+ - role: ensure-go
+ vars:
+ go_version: 1.21.6
+ environment:
+ PATH: "{{ install_dir }}:{{ ansible_env.PATH }}"
+ tasks:
+ - name: Make sure installation directory exists
+ ansible.builtin.file:
+ path: "{{ install_dir }}"
+ state: directory
+ mode: 0755
+ - name: Install clusterctl
+ ansible.builtin.get_url:
+ url: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v{{ clusterctl_version }}/clusterctl-linux-amd64"
+ dest: "{{ install_dir }}/clusterctl"
+ mode: "+x"
+ - name: Install envsubst
+ ansible.builtin.get_url:
+ url: "https://github.com/a8m/envsubst/releases/download/v{{ envsubst_version }}/envsubst-Linux-x86_64"
+ dest: "{{ install_dir }}/envsubst"
+ mode: "+x"
+ - name: Install yq
+ ansible.builtin.get_url:
+ url: "https://github.com/mikefarah/yq/releases/download/v{{ yq_version }}/yq_linux_amd64"
+ dest: "{{ install_dir }}/yq"
+ mode: "+x"
+ - name: Install KinD
+ ansible.builtin.get_url:
+ url: "https://kind.sigs.k8s.io/dl/v{{ kind_version }}/kind-linux-amd64"
+ dest: "{{ install_dir }}/kind"
+ mode: "+x"
+ - name: Install kubectl
+ ansible.builtin.get_url:
+ url: "https://dl.k8s.io/release/v{{ kubectl_version }}/bin/linux/amd64/kubectl"
+ dest: "{{ install_dir }}/kubectl"
+ mode: "+x"
+ # TODO: Install csctl and csctl-openstack from the release when it will be available
+ - name: Install csctl and csctl-openstack
+ ansible.builtin.import_tasks: tasks/csctl.yaml
+ - name: Install helm
+ ansible.builtin.unarchive:
+ src: "https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz"
+ dest: "{{ install_dir }}"
+ extra_opts: "--strip-components=1"
+ mode: "+x"
+ remote_src: true
+ args:
+ creates: "{{ install_dir }}/helm"
diff --git a/playbooks/openstack/cleanup.yaml b/playbooks/openstack/cleanup.yaml
new file mode 100644
index 00000000..8f508ffe
--- /dev/null
+++ b/playbooks/openstack/cleanup.yaml
@@ -0,0 +1,48 @@
+---
+- name: Cleanup
+ hosts: all
+ vars:
+ cloud_name: "{{ cloud }}" # inherited from the parent job
+ environment:
+ PATH: "{{ ansible_user_dir }}/.local/bin:{{ ansible_env.PATH }}"
+ tasks:
+ - name: Delete server groups
+ when: scs_compliance.enabled
+ block:
+ - name: List existing server groups
+ ansible.builtin.command: "openstack server group list -f value -c Name -c ID"
+ register: server_groups
+ environment:
+ OS_CLOUD: "{{ cloud_name }}"
+ changed_when: true
+ - name: Parse test-cluster-controller srvgrp and assign ID to srvgrp_controller
+ ansible.builtin.set_fact:
+ srvgrp_controller: "{{ item.split(' ')[0] }}"
+ loop: "{{ server_groups.stdout_lines }}"
+ when: "server_groups is defined and server_groups.stdout_lines | length > 0 and 'test-cluster-controller' in item.split(' ')"
+ - name: Parse test-cluster-worker srvgrp and assign ID to srvgrp_worker
+ ansible.builtin.set_fact:
+ srvgrp_worker: "{{ item.split(' ')[0] }}"
+ loop: "{{ server_groups.stdout_lines }}"
+ when: "server_groups is defined and server_groups.stdout_lines | length > 0 and 'test-cluster-worker' in item.split(' ')"
+ - name: Delete Server Group for worker nodes
+ ansible.builtin.command: "openstack server group delete {{ srvgrp_worker }}"
+ environment:
+ OS_CLOUD: "{{ cloud_name }}"
+ when: srvgrp_worker is defined
+ changed_when: true
+ - name: Delete Server Group for control-plane nodes
+ ansible.builtin.command: "openstack server group delete {{ srvgrp_controller }}"
+ environment:
+ OS_CLOUD: "{{ cloud_name }}"
+ when: srvgrp_controller is defined
+ changed_when: true
+ - name: Check if test-cluster exists
+ ansible.builtin.command: "kubectl get cluster test-cluster"
+ register: cluster_check
+ ignore_errors: true
+ changed_when: true
+ - name: Cleanup workload cluster
+ ansible.builtin.command: "kubectl delete -f {{ ansible_user_dir }}/cluster.yaml"
+ when: cluster_check.rc == 0
+ changed_when: true
diff --git a/playbooks/openstack/e2e.yaml b/playbooks/openstack/e2e.yaml
new file mode 100644
index 00000000..e2931a0d
--- /dev/null
+++ b/playbooks/openstack/e2e.yaml
@@ -0,0 +1,211 @@
+---
+- name: Cluster stack OpenStack E2E test
+ hosts: all
+ vars:
+ cluster_stack: "providers/openstack/alpha/1-29"
+ cluster_stack_name: "openstack-alpha-1-29"
+ cluster_stack_version_name: alpha
+ project_dir: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}"
+ cluster_stack_release_dir: "{{ ansible_user_dir }}/.release"
+ cluster_manifest_dir: "{{ ansible_user_dir }}/cluster_manifest"
+ cluster_stack_release_container_dir: "/.release"
+ openstack_csp_helper_chart_version: v0.6.0
+ openstack_csp_helper_chart_url: "https://github.com/SovereignCloudStack/openstack-csp-helper/releases/download/{{ openstack_csp_helper_chart_version }}/openstack-csp-helper.tgz"
+ capo_version: "v0.10.3"
+ openstackclient_version: "6.6.0"
+
+ k8s_management_name: "management"
+ k8s_management_version: "v1.29.2@sha256:51a1434a5397193442f0be2a297b488b6c919ce8a3931be0ce822606ea5ca245"
+ k8s_management_cluster_wait_for: "180"
+
+ environment:
+ PATH: "{{ ansible_user_dir }}/.local/bin:{{ ansible_env.PATH }}"
+ tasks:
+ - name: Determine cluster stack directory
+ block:
+ - name: Make sure directory structure exists
+ ansible.builtin.file:
+ path: "{{ item }}"
+ state: directory
+ mode: 0755
+ loop:
+ - "{{ cluster_manifest_dir }}"
+ - "{{ cluster_stack_release_dir }}"
+ - name: Extract Zuul config
+ ansible.builtin.set_fact:
+ zuul_config: "{{ zuul.change_message | regex_search('(?s)```ZUUL_CONFIG(.+?)```', '\\1', multiline=true) }}"
+ when: zuul.change_message is defined
+ - name: Trim Zuul config
+ ansible.builtin.set_fact:
+ zuul_config: "{{ zuul_config | first | split('/n') | map('trim') | join('\n') }}"
+ when: zuul_config is defined and zuul_config is not none and zuul_config != ''
+ - name: Extract cluster stack from Zuul config
+ ansible.builtin.set_fact:
+ cluster_stack_name: "{{ zuul_config | regex_search('cluster_stack\\s*=\\s*\"([^\"]+)\"', '\\1') | first }}"
+ when:
+ - zuul_config is defined and zuul_config is not none and zuul_config != ''
+ - zuul_config | regex_search('cluster_stack\\s*=\\s*\"([^\"]+)\"') is defined
+ - name: Override cluster stack if extracted
+ ansible.builtin.set_fact:
+ cluster_stack: "{{ cluster_stack_name | regex_replace('^openstack-([a-zA-Z0-9]+)-([0-9]+-[0-9]+)$', 'providers/openstack/\\1/\\2') }}"
+ cluster_stack_version_name: "{{ cluster_stack_name | regex_replace('^openstack-([a-zA-Z0-9]+)-([0-9]+-[0-9]+)$', '\\1') }}"
+ when: cluster_stack_name is defined
+ - name: Create cluster stack
+ ansible.builtin.command: "csctl create {{ project_dir }}/{{ cluster_stack }} --output {{ cluster_stack_release_dir }} --mode hash"
+ args:
+ chdir: "{{ project_dir }}"
+ changed_when: true
+ - name: Ensure management cluster
+ block:
+ - name: Create management cluster config file
+ ansible.builtin.template:
+ src: "mgmt-cluster-config.yaml.j2"
+ dest: "{{ ansible_user_dir }}/mgmt-cluster-config.yaml"
+ mode: "0644"
+ - name: Create management cluster
+ ansible.builtin.command: "kind create cluster --config {{ ansible_user_dir }}/mgmt-cluster-config.yaml"
+ changed_when: true
+ - name: Wait for all system pods in the management cluster to become ready
+ ansible.builtin.command: "kubectl wait -n kube-system --for=condition=Ready --timeout={{ k8s_management_cluster_wait_for }}s pod --all"
+ changed_when: true
+ - name: Install CAPI and CAPO
+ ansible.builtin.command: "clusterctl init --infrastructure openstack:{{ capo_version }}"
+ changed_when: true
+ environment:
+ CLUSTER_TOPOLOGY: "true"
+ EXP_CLUSTER_RESOURCE_SET: "true"
+ - name: Install CSO and mount cluster stack release
+ ansible.builtin.import_tasks: ../tasks/cso.yaml
+ vars:
+ release_dir: "{{ cluster_stack_release_container_dir }}"
+ - name: Install CSPO and mount cluster stack release
+ ansible.builtin.import_tasks: ../tasks/cspo.yaml
+ vars:
+ release_dir: "{{ cluster_stack_release_container_dir }}"
+ - name: Read Zuul's clouds.yaml content, base64 encoded
+ ansible.builtin.slurp:
+ src: /etc/openstack/clouds.yaml
+ register: clouds_yaml_b64
+ - name: Read Zuul's secure.yaml content, base64 encoded
+ ansible.builtin.slurp:
+ src: /etc/openstack/secure.yaml
+ register: secure_yaml_b64
+ - name: Combine clouds_yaml_b64 and secure_yaml_b64 to produce full clouds.yaml
+ ansible.builtin.set_fact:
+ clouds_yaml_full: "{{ clouds_yaml_b64.content | b64decode | from_yaml | ansible.builtin.combine(secure_yaml_b64.content | b64decode | from_yaml, recursive=true) }}"
+ - name: Write clouds.yaml file
+ ansible.builtin.copy:
+ content: "{{ clouds_yaml_full | to_yaml }}"
+ dest: "{{ ansible_user_dir }}/clouds.yaml"
+ mode: "0644"
+ - name: Create secrets and ClusterResourceSet for the clusterstacks approach
+ ansible.builtin.shell:
+ cmd: |
+ set -o pipefail
+ helm upgrade -i clusterstacks-credentials {{ openstack_csp_helper_chart_url }} -f {{ ansible_user_dir }}/clouds.yaml
+ executable: /bin/bash
+ changed_when: true
+ - name: Find the directory containing metadata.yaml
+ ansible.builtin.find:
+ paths: "{{ cluster_stack_release_dir }}"
+ patterns: "metadata.yaml"
+ recurse: true
+ register: found_files
+ - name: Read metadata.yaml
+ ansible.builtin.slurp:
+ src: "{{ found_files.files[0].path }}"
+ register: metadata_content
+ when: found_files.matched > 0
+ - name: Get cluster-stack and k8s version
+ ansible.builtin.set_fact:
+ cluster_stack_version: "{{ (metadata_content['content'] | b64decode | from_yaml)['versions']['clusterStack'] }}"
+ k8s_version: "{{ (metadata_content['content'] | b64decode | from_yaml)['versions']['kubernetes'] }}"
+ when: metadata_content is defined
+ - name: Parse k8s version to major.minor
+ ansible.builtin.set_fact:
+ k8s_version_major_minor: "{{ k8s_version | regex_replace('^v?([0-9]+\\.[0-9]+)\\..*', '\\1') }}"
+ when: k8s_version is defined
+ - name: Extract cloud name from clouds_yaml_full
+ ansible.builtin.set_fact:
+ cloud_name: "{{ clouds_yaml_full.clouds.keys() | first }}"
+ when: clouds_yaml_full.clouds is defined and clouds_yaml_full.clouds | dict2items | length == 1
+ - name: Generate clusterstack YAML
+ ansible.builtin.template:
+ src: "cluster-stack-template.yaml.j2"
+ dest: "{{ ansible_user_dir }}/clusterstack.yaml"
+ mode: "0644"
+ - name: Apply cluster-stack template
+ ansible.builtin.command: "kubectl apply -f {{ ansible_user_dir }}/clusterstack.yaml"
+ changed_when: true
+ - name: Necessary pause for the clusterstack resource to exist (default is 2 minutes)
+ ansible.builtin.pause:
+ seconds: "{{ wait_for_cluster_stack_resource }}"
+ - name: Wait for cluster-stack to be ready
+ ansible.builtin.command: "kubectl wait clusterstack/clusterstack --for=condition=Ready --timeout={{ wait_for_cluster_stack }}s"
+ changed_when: true
+ - name: Create k8s workload cluster and execute checks
+ block:
+ - name: Create Server Groups for nodes when scs_compliance tests are enabled
+ ansible.builtin.import_tasks: ../tasks/create_server_groups.yaml
+ when: scs_compliance.enabled
+ - name: Generate cluster YAML
+ ansible.builtin.template:
+ src: "cluster.yaml.j2"
+ dest: "{{ ansible_user_dir }}/cluster.yaml"
+ mode: "0644"
+ vars:
+ worker_server_group_id: "{{ srvgrp_worker.stdout | default('') }}"
+ worker_server_group_id_value: "{% if worker_server_group_id == '' %}\"\"{% else %}{{ worker_server_group_id }}{% endif %}"
+ controller_server_group_id: "{{ srvgrp_controller.stdout | default('') }}"
+ controller_server_group_id_value: "{% if controller_server_group_id == '' %}\"\"{% else %}{{ controller_server_group_id }}{% endif %}"
+ - name: Apply cluster template
+ ansible.builtin.command: "kubectl apply -f {{ ansible_user_dir }}/cluster.yaml"
+ changed_when: true
+ - name: Get kubeadmcontrolplane name
+ ansible.builtin.command: "kubectl get kubeadmcontrolplane -o=jsonpath='{.items[0].metadata.name}'"
+ retries: 6
+ delay: 10
+ until: kcp_name.rc == 0
+ register: kcp_name
+ changed_when: true
+ - name: Get kubeadmcontrolplane status
+ ansible.builtin.command: "kubectl wait kubeadmcontrolplane/{{ kcp_name.stdout }} --for=condition=Available --timeout={{ wait_for_cluster }}s"
+ changed_when: true
+ - name: Wait for control-plane machines to be ready
+ ansible.builtin.command: "kubectl wait machines --for=condition=Ready -l cluster.x-k8s.io/control-plane,cluster.x-k8s.io/cluster-name=test-cluster --timeout={{ wait_for_cluster }}s"
+ changed_when: true
+ - name: Get kubeconfig of the workload k8s cluster
+ ansible.builtin.shell: "clusterctl get kubeconfig test-cluster > {{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ changed_when: true
+ - name: Wait for clusteraddons resource to become ready
+ ansible.builtin.command: "kubectl wait clusteraddons/cluster-addon-test-cluster --for=condition=Ready --timeout={{ wait_for_clusteraddons }}s"
+ changed_when: true
+ - name: Wait for all system pods in the workload k8s cluster to become ready
+ ansible.builtin.command: "kubectl wait -n kube-system --for=condition=Ready --timeout={{ wait_for_cluster }}s pod --all"
+ environment:
+ KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ changed_when: true
+ - name: Import sonobouy tasks
+ ansible.builtin.import_tasks: ../tasks/sonobouy.yaml
+ when: sonobouy.enabled
+ - name: Import scs_compliance pre-tasks
+ ansible.builtin.import_tasks: ../tasks/label_nodes.yaml
+ vars:
+ os_cloud: "{{ cloud_name }}"
+ kubeconfig_path: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ when: scs_compliance.enabled
+ - name: Import scs_compliance tasks
+ ansible.builtin.import_tasks: ../tasks/scs_compliance.yaml
+ vars:
+ kubeconfig_path: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ when: scs_compliance.enabled
+ always:
+ - name: Delete Server Groups
+ ansible.builtin.command: "openstack server group delete {{ srvgrp_worker.stdout }} {{ srvgrp_controller.stdout }}"
+ environment:
+ OS_CLOUD: "{{ cloud_name }}"
+ changed_when: true
+ when: scs_compliance.enabled
+ - name: Cleanup workload cluster
+ ansible.builtin.command: "kubectl delete -f {{ ansible_user_dir }}/cluster.yaml"
+ changed_when: true
diff --git a/playbooks/openstack/files/patch_csx_deployment.sh b/playbooks/openstack/files/patch_csx_deployment.sh
new file mode 100755
index 00000000..fbc31ddb
--- /dev/null
+++ b/playbooks/openstack/files/patch_csx_deployment.sh
@@ -0,0 +1,56 @@
+#!/usr/bin/env bash
+# ./patch_csx_deployment.sh csx_manifest.yaml HOST_PATH_DIR
+#
+# Script adjusts CSO or CSPO manifest to use local mode.
+# It injects cluster stack release assets via the given HOST_PATH_DIR volume and mount to the CSO or CSPO containers and
+# enables local mode for them.
+
+if test -z "$1"; then echo "ERROR: Need CSO or CSPO manifest file arg" 1>&2; exit 1; fi
+if test -z "$2"; then echo "ERROR: Need HOST_PATH_DIR arg" 1>&2; exit 1; fi
+
+# Test whether the argument is already present in CSX manager container args
+local_mode_exist=$(yq 'select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "manager").args[] | select(. == "--local=true")' "$1")
+
+if test -z "$local_mode_exist"; then
+ echo "Enabling local mode for the CSX manager container"
+ yq 'select(.kind == "Deployment").spec.template.spec.containers[] |= select(.name == "manager").args += ["--local=true"]' -i "$1"
+else
+ echo "Local mode is already enabled in the CSX manager container"
+fi
+
+export HOST_PATH_DIR=$2
+export VOLUME_SNIPPET=volume_snippet.yaml
+export VOLUME_MOUNT_SNIPPET=volume_mount_snippet.yaml
+
+yq --null-input '
+ {
+ "name": "cluster-stacks-volume",
+ "hostPath":
+ {
+ "path": env(HOST_PATH_DIR),
+ "type": "Directory"
+ }
+ }' > $VOLUME_SNIPPET
+
+yq --null-input '
+ {
+ "name": "cluster-stacks-volume",
+ "mountPath": "/tmp/downloads/cluster-stacks",
+ "readOnly": true
+ }' > $VOLUME_MOUNT_SNIPPET
+
+# Test whether the mountPath: /tmp/downloads/cluster-stacks is already present in CSX manager container mounts
+mount_exist=$(yq 'select(.kind == "Deployment").spec.template.spec.containers[] | select(.name == "manager").volumeMounts[] | select(.mountPath == "/tmp/downloads/cluster-stacks")' "$1")
+
+if test -z "$mount_exist"; then
+ echo "Injecting volume and volume mount to the CSX manager container"
+ yq 'select(.kind == "Deployment").spec.template.spec.containers[] |= select(.name == "manager").volumeMounts += [load(env(VOLUME_MOUNT_SNIPPET))]' -i "$1"
+ yq 'select(.kind == "Deployment").spec.template.spec.volumes += [load(env(VOLUME_SNIPPET))]' -i "$1"
+else
+ echo "Mount path /tmp/downloads/cluster-stacks is already present in the CSX manager container"
+fi
+
+rm $VOLUME_SNIPPET
+rm $VOLUME_MOUNT_SNIPPET
+
+exit 0
diff --git a/playbooks/openstack/templates/cluster-stack-template.yaml.j2 b/playbooks/openstack/templates/cluster-stack-template.yaml.j2
new file mode 100644
index 00000000..0c451d9b
--- /dev/null
+++ b/playbooks/openstack/templates/cluster-stack-template.yaml.j2
@@ -0,0 +1,27 @@
+apiVersion: clusterstack.x-k8s.io/v1alpha1
+kind: ClusterStack
+metadata:
+ name: clusterstack
+spec:
+ provider: openstack
+ name: {{ cluster_stack_version_name }}
+ kubernetesVersion: "{{ k8s_version_major_minor }}"
+ channel: custom
+ autoSubscribe: false
+ providerRef:
+ apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1
+ kind: OpenStackClusterStackReleaseTemplate
+ name: cspotemplate
+ versions:
+ - "{{ cluster_stack_version }}"
+---
+apiVersion: infrastructure.clusterstack.x-k8s.io/v1alpha1
+kind: OpenStackClusterStackReleaseTemplate
+metadata:
+ name: cspotemplate
+spec:
+ template:
+ spec:
+ identityRef:
+ kind: Secret
+ name: {{ cloud_name }}
diff --git a/playbooks/openstack/templates/cluster.yaml.j2 b/playbooks/openstack/templates/cluster.yaml.j2
new file mode 100644
index 00000000..e27ffbfa
--- /dev/null
+++ b/playbooks/openstack/templates/cluster.yaml.j2
@@ -0,0 +1,41 @@
+apiVersion: cluster.x-k8s.io/v1beta1
+kind: Cluster
+metadata:
+ name: test-cluster
+ labels:
+ managed-secret: cloud-config
+spec:
+ clusterNetwork:
+ pods:
+ cidrBlocks:
+ - 192.168.0.0/16
+ serviceDomain: cluster.local
+ services:
+ cidrBlocks:
+ - 10.96.0.0/12
+ topology:
+ variables:
+ - name: controller_flavor
+ value: "SCS-2V-4-50"
+ - name: worker_flavor
+ value: "SCS-2V-4-50"
+ - name: external_id
+ value: "ebfe5546-f09f-4f42-ab54-094e457d42ec" # gx-scs
+ - name: cloud_name
+ value: {{ cloud_name }}
+ - name: secret_name
+ value: {{ cloud_name }}
+ - name: controller_server_group_id
+ value: {{ controller_server_group_id_value }}
+ - name: worker_server_group_id
+ value: {{ worker_server_group_id_value }}
+ class: {{ cluster_stack_name }}-{{ cluster_stack_version }}
+ controlPlane:
+ replicas: 3
+ version: {{ k8s_version }}
+ workers:
+ machineDeployments:
+ - class: {{ cluster_stack_name }}-{{ cluster_stack_version }}
+ failureDomain: nova
+ name: {{ cluster_stack_name }}
+ replicas: 3
diff --git a/playbooks/openstack/templates/mgmt-cluster-config.yaml.j2 b/playbooks/openstack/templates/mgmt-cluster-config.yaml.j2
new file mode 100644
index 00000000..71d9f3dc
--- /dev/null
+++ b/playbooks/openstack/templates/mgmt-cluster-config.yaml.j2
@@ -0,0 +1,12 @@
+---
+kind: Cluster
+apiVersion: kind.x-k8s.io/v1alpha4
+name: "{{ k8s_management_name }}"
+nodes:
+- role: control-plane
+ image: "kindest/node:{{ k8s_management_version }}"
+- role: worker
+ image: "kindest/node:{{ k8s_management_version }}"
+ extraMounts:
+ - hostPath: "{{ cluster_stack_release_dir }}"
+ containerPath: "{{ cluster_stack_release_container_dir }}"
diff --git a/playbooks/tasks/create_server_groups.yaml b/playbooks/tasks/create_server_groups.yaml
new file mode 100644
index 00000000..5621aabf
--- /dev/null
+++ b/playbooks/tasks/create_server_groups.yaml
@@ -0,0 +1,25 @@
+---
+- name: Create Server Groups for nodes
+ block:
+ - name: Ensure pip is installed
+ ansible.builtin.package:
+ name: python3-pip
+ state: present
+ become: true
+ - name: Install openstack cli
+ ansible.builtin.pip:
+ name:
+ - "python-openstackclient=={{ openstackclient_version }}"
+ extra_args: --user
+ - name: Create Server Group for control-plane nodes
+ ansible.builtin.command: "openstack server group create --policy anti-affinity -f value -c id test-cluster-controller"
+ register: srvgrp_controller
+ environment:
+ OS_CLOUD: "{{ cloud_name }}"
+ changed_when: true
+ - name: Create Server Group for worker nodes
+ ansible.builtin.command: "openstack server group create --policy soft-anti-affinity -f value -c id test-cluster-worker"
+ register: srvgrp_worker
+ environment:
+ OS_CLOUD: "{{ cloud_name }}"
+ changed_when: true
diff --git a/playbooks/tasks/csctl.yaml b/playbooks/tasks/csctl.yaml
new file mode 100644
index 00000000..41471b07
--- /dev/null
+++ b/playbooks/tasks/csctl.yaml
@@ -0,0 +1,41 @@
+---
+- name: Install csctl and csctl-openstack
+ vars:
+ csctl_dir: "{{ ansible_user_dir }}/csctl"
+ csctl_openstack_dir: "{{ ansible_user_dir }}/csctl-openstack"
+ csctl_version: "0.0.3"
+ csctl_openstack_version: "0.0.1"
+ install_dir: "{{ ansible_user_dir }}/.local/bin"
+ block:
+ - name: Make sure csctl directory exists
+ ansible.builtin.file:
+ path: "{{ csctl_dir }}"
+ state: directory
+ mode: 0755
+ - name: Make sure csctl-openstack directory exists
+ ansible.builtin.file:
+ path: "{{ csctl_openstack_dir }}"
+ state: directory
+ mode: 0755
+ - name: Get csctl release
+ ansible.builtin.unarchive:
+ src: "https://github.com/SovereignCloudStack/csctl/releases/download/v{{ csctl_version }}/csctl_{{ csctl_version }}_linux_amd64.tar.gz"
+ dest: "{{ csctl_dir }}"
+ remote_src: true
+ - name: Get csctl-openstack release
+ ansible.builtin.unarchive:
+ src: "https://github.com/SovereignCloudStack/csctl-plugin-openstack/releases/download/v{{ csctl_openstack_version }}/csctl-plugin-openstack_{{ csctl_openstack_version }}_linux_amd64.tar.gz"
+ dest: "{{ csctl_openstack_dir }}"
+ remote_src: true
+ - name: Install csctl
+ ansible.builtin.copy:
+ src: "{{ csctl_dir }}/csctl"
+ dest: "{{ install_dir }}/csctl"
+ mode: "+x"
+ remote_src: true
+ - name: Install csctl-openstack
+ ansible.builtin.copy:
+ src: "{{ csctl_openstack_dir }}/csctl-openstack"
+ dest: "{{ install_dir }}/csctl-openstack"
+ mode: "+x"
+ remote_src: true
diff --git a/playbooks/tasks/cso.yaml b/playbooks/tasks/cso.yaml
new file mode 100644
index 00000000..8a93f430
--- /dev/null
+++ b/playbooks/tasks/cso.yaml
@@ -0,0 +1,39 @@
+---
+- name: Install CSO
+ vars:
+ cso_version: "0.1.0-alpha.5"
+ cso_dir: "{{ ansible_user_dir }}/cso"
+ cso_wait_for_pods: 240
+ install_dir: "{{ ansible_user_dir }}/.local/bin"
+ block:
+ - name: Make sure CSO directory exists
+ ansible.builtin.file:
+ path: "{{ cso_dir }}"
+ state: directory
+ mode: 0755
+ - name: Get CSO manifest
+ ansible.builtin.get_url:
+ url: "https://github.com/sovereignCloudStack/cluster-stack-operator/releases/download/v{{ cso_version }}/cso-infrastructure-components.yaml"
+ dest: "{{ cso_dir }}/cso-infrastructure-components.yaml"
+ mode: "+w"
+ - name: Patch the CSO deployment - enable the local mode and mount the cluster stack release
+ ansible.builtin.script:
+ cmd: "../files/patch_csx_deployment.sh {{ cso_dir }}/cso-infrastructure-components.yaml {{ release_dir }}"
+ executable: /bin/bash
+ changed_when: true
+ - name: Apply CSO manifest
+ ansible.builtin.shell:
+ cmd: |
+ set -o pipefail
+ cat {{ cso_dir }}/cso-infrastructure-components.yaml | {{ install_dir }}/envsubst | kubectl apply -f -
+ executable: /bin/bash
+ changed_when: true
+ environment:
+ GIT_PROVIDER_B64: Z2l0aHVi # github
+ GIT_ORG_NAME_B64: U292ZXJlaWduQ2xvdWRTdGFjaw== # SovereignCloudStack
+ GIT_REPOSITORY_NAME_B64: Y2x1c3Rlci1zdGFja3M= # cluster-stacks
+ # FIXME: It should be fetched from the zuul secret
+ # GIT_ACCESS_TOKEN_B64:
+ - name: Wait for all CSO pods to become ready
+ ansible.builtin.command: "kubectl wait -n cso-system --for=condition=Ready --timeout={{ cso_wait_for_pods }}s pod --all"
+ changed_when: true
diff --git a/playbooks/tasks/cspo.yaml b/playbooks/tasks/cspo.yaml
new file mode 100644
index 00000000..04811bd2
--- /dev/null
+++ b/playbooks/tasks/cspo.yaml
@@ -0,0 +1,39 @@
+---
+- name: Install CSPO
+ vars:
+ cspo_version: "0.1.0-alpha.3"
+ cspo_dir: "{{ ansible_user_dir }}/cspo"
+ cspo_wait_for_pods: 240
+ install_dir: "{{ ansible_user_dir }}/.local/bin"
+ block:
+ - name: Make sure CSPO directory exists
+ ansible.builtin.file:
+ path: "{{ cspo_dir }}"
+ state: directory
+ mode: 0755
+ - name: Get CSPO manifest
+ ansible.builtin.get_url:
+ url: "https://github.com/sovereignCloudStack/cluster-stack-provider-openstack/releases/download/v{{ cspo_version }}/cspo-infrastructure-components.yaml"
+ dest: "{{ cspo_dir }}/cspo-infrastructure-components.yaml"
+ mode: "+w"
+ - name: Patch the CSPO deployment - enable the local mode and mount the cluster stack release
+ ansible.builtin.script:
+ cmd: "../files/patch_csx_deployment.sh {{ cspo_dir }}/cspo-infrastructure-components.yaml {{ release_dir }}"
+ executable: /bin/bash
+ changed_when: true
+ - name: Apply CSPO manifest
+ ansible.builtin.shell:
+ cmd: |
+ set -o pipefail
+ cat {{ cspo_dir }}/cspo-infrastructure-components.yaml | {{ install_dir }}/envsubst | kubectl apply -f -
+ executable: /bin/bash
+ changed_when: true
+ environment:
+ GIT_PROVIDER_B64: Z2l0aHVi # github
+ GIT_ORG_NAME_B64: U292ZXJlaWduQ2xvdWRTdGFjaw== # SovereignCloudStack
+ GIT_REPOSITORY_NAME_B64: Y2x1c3Rlci1zdGFja3M= # cluster-stacks
+ # FIXME: It should be fetched from the zuul secret
+ # GIT_ACCESS_TOKEN_B64:
+ - name: Wait for all CSPO pods to become ready
+ ansible.builtin.command: "kubectl wait -n cspo-system --for=condition=Ready --timeout={{ cspo_wait_for_pods }}s pod --all"
+ changed_when: true
diff --git a/playbooks/tasks/label_nodes.yaml b/playbooks/tasks/label_nodes.yaml
new file mode 100644
index 00000000..d67c71ab
--- /dev/null
+++ b/playbooks/tasks/label_nodes.yaml
@@ -0,0 +1,51 @@
+---
+- name: Label k8s nodes based on OpenStack host IDs
+ vars:
+ # Note (@mfeder): The following label key serves as a temporary label until upstream
+ # proposes and implements an alternative label key/solution for indicating a physical machine
+ # within the Kubernetes cluster.
+ # refer to: https://github.com/SovereignCloudStack/issues/issues/540
+ label_key: "topology.scs.community/host-id"
+ jq_version: "1.7.1"
+ install_dir: "{{ ansible_user_dir }}/.local/bin"
+ block:
+ - name: Check if `os_cloud` variable is defined
+ ansible.builtin.fail:
+ msg: "os_cloud is not defined or empty"
+ when: os_cloud is not defined or os_cloud == ''
+ - name: Check if `kubeconfig_path` variable is defined
+ ansible.builtin.fail:
+ msg: "kubeconfig_path is not defined or empty"
+ when: kubeconfig_path is not defined or kubeconfig_path == ''
+ - name: Install jq
+ ansible.builtin.get_url:
+ url: "https://github.com/jqlang/jq/releases/download/jq-{{ jq_version }}/jq-linux64"
+ dest: "{{ install_dir }}/jq"
+ mode: "+x"
+ # TODO: use `checksum` attr here to verify the digest of the destination file, if available
+ - name: Get list of OpenStack server details
+ ansible.builtin.shell:
+ cmd: |
+ set -o pipefail
+ openstack server list -f json | jq -r '.[].ID' | while read id; do openstack server show $id -f json; done | jq -s '.'
+ executable: /bin/bash
+ register: openstack_server_list
+ changed_when: false
+ environment:
+ OS_CLOUD: "{{ os_cloud }}"
+ - name: Populate openstack_hosts dict with hostname=host_id pairs
+ ansible.builtin.set_fact:
+ openstack_hosts: "{{ openstack_hosts | default({}) | combine({item.name: item.hostId}) }}"
+ with_items: "{{ openstack_server_list.stdout | from_json }}"
+ - name: Get a list of nodes
+ ansible.builtin.command: kubectl get nodes -o json
+ register: kubernetes_node_list
+ changed_when: false
+ environment:
+ KUBECONFIG: "{{ kubeconfig_path }}"
+ - name: Add node label
+ ansible.builtin.command: "kubectl label nodes {{ item.metadata.name }} {{ label_key }}={{ openstack_hosts[item.metadata.name] }}"
+ with_items: "{{ (kubernetes_node_list.stdout | from_json)['items'] }}"
+ changed_when: false
+ environment:
+ KUBECONFIG: "{{ kubeconfig_path }}"
diff --git a/playbooks/tasks/scs_compliance.yaml b/playbooks/tasks/scs_compliance.yaml
new file mode 100644
index 00000000..843efc7e
--- /dev/null
+++ b/playbooks/tasks/scs_compliance.yaml
@@ -0,0 +1,50 @@
+---
+- name: Download, install, configure, and execute SCS KaaS compliance check
+ vars:
+ check_dir: "{{ ansible_user_dir }}/scs-compliance"
+ python_venv_dir: "{{ ansible_user_dir }}/scs-compliance/venv"
+ block:
+ - name: Check if `kubeconfig_path` variable is defined
+ ansible.builtin.fail:
+ msg: "kubeconfig_path is not defined or empty"
+ when: kubeconfig_path is not defined or kubeconfig_path == ''
+ - name: Ensure check directory
+ ansible.builtin.file:
+ path: "{{ check_dir }}"
+ state: directory
+ mode: 0755
+ - name: Get SCS KaaS compliance check assets
+ ansible.builtin.git:
+ repo: https://github.com/SovereignCloudStack/standards.git
+ dest: "{{ check_dir }}"
+ single_branch: true
+ version: main
+ - name: Install virtualenv
+ ansible.builtin.package:
+ name: virtualenv
+ become: true
+ - name: Install check requirements
+ ansible.builtin.pip:
+ requirements: "{{ check_dir }}/Tests/requirements.txt"
+ virtualenv: "{{ python_venv_dir }}"
+ - name: Execute SCS KaaS compliance check
+ ansible.builtin.shell:
+ cmd:
+ ". {{ python_venv_dir }}/bin/activate &&
+ python3 {{ check_dir }}/Tests/scs-compliance-check.py {{ check_dir }}/Tests/scs-compatible-kaas.yaml -v -s KaaS_V2 -a kubeconfig={{ kubeconfig_path }}"
+ changed_when: false
+ register: scs_compliance_results
+ always:
+ - name: Parse SCS KaaS compliance results # noqa: ignore-errors
+ ansible.builtin.set_fact:
+ scs_compliance_results_parsed: "{{ scs_compliance_results.stdout }}"
+ when: scs_compliance_results is defined
+ ignore_errors: true
+ - name: Insert SCS compliance results to the warning message that will be appended to the comment zuul leaves on the PR # noqa: ignore-errors
+ zuul_return:
+ data:
+ zuul:
+ warnings:
+ - "\n SCS Compliance results
\n{{ scs_compliance_results_parsed }}\n "
+ when: scs_compliance_results_parsed is defined and scs_compliance_results_parsed | length > 0
+ ignore_errors: true
diff --git a/playbooks/tasks/sonobouy.yaml b/playbooks/tasks/sonobouy.yaml
new file mode 100644
index 00000000..8b166b96
--- /dev/null
+++ b/playbooks/tasks/sonobouy.yaml
@@ -0,0 +1,66 @@
+---
+- name: Execute sonobouy check mode {{ sonobouy.mode }}
+ vars:
+ sonobuoy_version: "0.57.1"
+ install_dir: "{{ ansible_user_dir }}/.local/bin"
+ block:
+ - name: Install Sonobuoy
+ ansible.builtin.unarchive:
+ src: "https://github.com/vmware-tanzu/sonobuoy/releases/download/v{{ sonobuoy_version }}/sonobuoy_{{ sonobuoy_version }}_linux_amd64.tar.gz"
+ dest: "{{ install_dir }}"
+ mode: '+x'
+ remote_src: true
+ args:
+ creates: "{{ install_dir }}/sonobuoy"
+ - name: Run Sonobuoy tests
+ ansible.builtin.command: "sonobuoy run --plugin-env=e2e.E2E_PROVIDER=openstack --mode={{ sonobouy.mode }}"
+ environment:
+ KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ changed_when: true
+ - name: Wait for Sonobuoy tests to complete
+ ansible.builtin.shell:
+ cmd: |
+ set -o pipefail
+ until sonobuoy status | grep "has completed" >/dev/null 2>&1; do
+ sleep 10
+ sonobuoy status
+ done
+ executable: /bin/bash
+ environment:
+ KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ changed_when: true
+ - name: Sonobuoy status
+ ansible.builtin.command: "sonobuoy status"
+ environment:
+ KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ register: sonobuoy_status
+ changed_when: true
+ - name: Retrieve Sonobuoy results
+ ansible.builtin.command: "sonobuoy retrieve"
+ environment:
+ KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ register: sonobuoy_retrieve_output
+ changed_when: true
+ - name: Get Sonobuoy results
+ ansible.builtin.command: "sonobuoy results {{ sonobuoy_retrieve_output.stdout }}"
+ environment:
+ KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ register: sonobouy_results
+ changed_when: true
+ - name: Delete k8s resources that were generated by a Sonobuoy run
+ ansible.builtin.command: "sonobuoy delete --all"
+ environment:
+ KUBECONFIG: "{{ cluster_manifest_dir }}/kubeconfig-test-cluster"
+ changed_when: true
+ - name: Remove Sonobuoy retrieve file
+ ansible.builtin.file:
+ path: "{{ sonobuoy_retrieve_output.stdout }}"
+ state: absent
+ - name: Insert sonobouy results to the warning message that will be appended to the comment zuul leaves on the PR # noqa: ignore-errors
+ zuul_return:
+ data:
+ zuul:
+ warnings:
+ - "\n Sonobouy results
\n{{ sonobouy_results.stdout }}\n "
+ when: sonobouy_results is defined and sonobouy_results | length > 0
+ ignore_errors: true