Skip to content

Commit

Permalink
🐛 Fix events recording (#50)
Browse files Browse the repository at this point in the history
* Fix events recording

CSPO controllers record multiple events to the resources they manage.
However, these events were not recorded in CRs.

Fixes: #47

Signed-off-by: Matej Feder <matej.feder@dnation.cloud>

* Apply suggestions from code review

Co-authored-by: Roman Hros <roman.hros@dnation.cloud>
Signed-off-by: Matej Feder <matej.feder@dnation.cloud>

---------

Signed-off-by: Matej Feder <matej.feder@dnation.cloud>
Co-authored-by: Roman Hros <roman.hros@dnation.cloud>
  • Loading branch information
matofeder and chess-knight authored Jan 17, 2024
1 parent 74318e1 commit c1900a7
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 9 deletions.
4 changes: 4 additions & 0 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"sigs.k8s.io/cluster-api/util/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/healthz"
Expand Down Expand Up @@ -102,6 +103,9 @@ func main() {
os.Exit(1)
}

// Initialize event recorder.
record.InitFromRecorder(mgr.GetEventRecorderFor("cspo-controller"))

gitFactory := githubclient.NewFactory()

if err = (&controller.OpenStackClusterStackReleaseReconciler{
Expand Down
7 changes: 7 additions & 0 deletions config/rbac/role.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@ kind: ClusterRole
metadata:
name: manager-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- ""
resources:
Expand Down
14 changes: 7 additions & 7 deletions internal/controller/openstackclusterstackrelease_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ const (
//+kubebuilder:rbac:groups=infrastructure.clusterstack.x-k8s.io,resources=openstackclusterstackreleases,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=infrastructure.clusterstack.x-k8s.io,resources=openstackclusterstackreleases/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=infrastructure.clusterstack.x-k8s.io,resources=openstackclusterstackreleases/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
Expand Down Expand Up @@ -148,6 +149,7 @@ func (r *OpenStackClusterStackReleaseReconciler) Reconcile(ctx context.Context,

r.openStackClusterStackRelDownloadDirectoryMutex.Unlock()

record.Eventf(openstackclusterstackrelease, "ClusterStackReleaseAssetsReady", "successfully downloaded ClusterStackReleaseAssets %q", releaseTag)
// requeue to make sure release assets can be accessed
return ctrl.Result{Requeue: true}, nil
}
Expand All @@ -174,7 +176,7 @@ func (r *OpenStackClusterStackReleaseReconciler) Reconcile(ctx context.Context,
osnirName := fmt.Sprintf("%s-%s-%s", nameWithoutVersion, openStackNodeImage.CreateOpts.Name, nodeImageVersion)

if err := r.createOrUpdateOpenStackNodeImageRelease(ctx, openstackclusterstackrelease, osnirName, openStackNodeImage, ownerRef); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to get or create OpenStackNodeImageRelease %s/%s: %w", openstackclusterstackrelease.Namespace, osnirName, err)
return ctrl.Result{}, fmt.Errorf("failed to create or update OpenStackNodeImageRelease %s/%s: %w", openstackclusterstackrelease.Namespace, osnirName, err)
}
}

Expand Down Expand Up @@ -210,6 +212,7 @@ func (r *OpenStackClusterStackReleaseReconciler) Reconcile(ctx context.Context,

logger.Info("OpenStackClusterStackRelease **ready**")
conditions.MarkTrue(openstackclusterstackrelease, apiv1alpha1.OpenStackNodeImageReleasesReadyCondition)
record.Eventf(openstackclusterstackrelease, "OpenStackNodeImageReleasesReady", "OpenStackNodeImageRelease objects are ready")
openstackclusterstackrelease.Status.Ready = true

return ctrl.Result{}, nil
Expand All @@ -226,10 +229,7 @@ func (r *OpenStackClusterStackReleaseReconciler) createOrUpdateOpenStackNodeImag
openStackNodeImageRelease.SetOwnerReferences(util.EnsureOwnerRef(openStackNodeImageRelease.GetOwnerReferences(), *ownerRef))

if err := r.Update(ctx, openStackNodeImageRelease); err != nil {
record.Eventf(openStackNodeImageRelease,
"ErrorOpenStackNodeImageRelease",
"failed to update %s OpenStackNodeImageRelease: %s", osnirName, err.Error(),
)
record.Warnf(openStackNodeImageRelease, "FailedUpdateOpenStackNodeImageRelease", err.Error())
return fmt.Errorf("failed to update OpenStackNodeImageRelease: %w", err)
}

Expand All @@ -254,11 +254,11 @@ func (r *OpenStackClusterStackReleaseReconciler) createOrUpdateOpenStackNodeImag
openStackNodeImageRelease.Spec.IdentityRef = openstackclusterstackrelease.Spec.IdentityRef

if err := r.Create(ctx, openStackNodeImageRelease); err != nil {
record.Warnf(openStackNodeImageRelease, "ErrorOpenStackNodeImageRelease", err.Error())
record.Warnf(openStackNodeImageRelease, "FailedCreateOpenStackNodeImageRelease", err.Error())
return fmt.Errorf("failed to create OpenStackNodeImageRelease: %w", err)
}

record.Eventf(openStackNodeImageRelease, "OpenStackNodeImageReleaseCreated", "successfully created OpenStackNodeImageRelease object %q", osnirName)
record.Eventf(openstackclusterstackrelease, "OpenStackNodeImageReleaseCreated", "successfully created OpenStackNodeImageRelease object %q", osnirName)
return nil
}

Expand Down
8 changes: 6 additions & 2 deletions internal/controller/openstacknodeimagerelease_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ const (
//+kubebuilder:rbac:groups=infrastructure.clusterstack.x-k8s.io,resources=openstacknodeimagereleases/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=infrastructure.clusterstack.x-k8s.io,resources=openstacknodeimagereleases/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch

// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
Expand Down Expand Up @@ -160,6 +161,7 @@ func (r *OpenStackNodeImageReleaseReconciler) Reconcile(ctx context.Context, req
if imageID == "" {
conditions.MarkFalse(openstacknodeimagerelease, apiv1alpha1.OpenStackImageReadyCondition, apiv1alpha1.OpenStackImageNotCreatedYetReason, clusterv1beta1.ConditionSeverityInfo, "image is not created yet")
conditions.MarkFalse(openstacknodeimagerelease, apiv1alpha1.OpenStackImageImportStartedCondition, apiv1alpha1.OpenStackImageImportNotStartReason, clusterv1beta1.ConditionSeverityInfo, "image import not start yet")
record.Eventf(openstacknodeimagerelease, "OpenStackImageImportStarted", "image is neither created nor imported yet %q", openstacknodeimagerelease.Spec.Image.CreateOpts.Name)
openstacknodeimagerelease.Status.Ready = false

imageCreateOpts := openstacknodeimagerelease.Spec.Image.CreateOpts
Expand All @@ -175,6 +177,7 @@ func (r *OpenStackNodeImageReleaseReconciler) Reconcile(ctx context.Context, req
logger.Error(err, "failed to create an image")
return ctrl.Result{}, nil
}
record.Eventf(openstacknodeimagerelease, "OpenStackImageCreated", "successfully created an image %q, ID %q", openstacknodeimagerelease.Spec.Image.CreateOpts.Name, imageCreated.ID)

imageImportOpts := imageimport.CreateOpts{
Name: imageimport.WebDownloadMethod,
Expand All @@ -194,6 +197,8 @@ func (r *OpenStackNodeImageReleaseReconciler) Reconcile(ctx context.Context, req
}

conditions.MarkTrue(openstacknodeimagerelease, apiv1alpha1.OpenStackImageImportStartedCondition)
record.Eventf(openstacknodeimagerelease, "OpenStackImageImportStarted", "successfully started an image import %q, ID %q", openstacknodeimagerelease.Spec.Image.CreateOpts.Name, imageCreated.ID)

// requeue to make sure that image ID can be found via image name
return ctrl.Result{Requeue: true}, nil
}
Expand Down Expand Up @@ -242,6 +247,7 @@ func (r *OpenStackNodeImageReleaseReconciler) Reconcile(ctx context.Context, req
logger.Info("OpenStackNodeImageRelease **ready** - image is **ACTIVE**.", "name", openstacknodeimagerelease.Spec.Image.CreateOpts.Name, "ID", imageID)
conditions.MarkTrue(openstacknodeimagerelease, apiv1alpha1.OpenStackImageReadyCondition)
openstacknodeimagerelease.Status.Ready = true
record.Eventf(openstacknodeimagerelease, "OpenStackImageActive", "image status is ACTIVE %q, ID %q", openstacknodeimagerelease.Spec.Image.CreateOpts.Name, imageID)

case images.ImageStatusDeactivated, images.ImageStatusKilled:
// These statuses are unexpected. Hence we set a failure for them. See the explanation below:
Expand All @@ -257,7 +263,6 @@ func (r *OpenStackNodeImageReleaseReconciler) Reconcile(ctx context.Context, req
openstacknodeimagerelease.Status.Ready = false
record.Warnf(openstacknodeimagerelease, "OpenStackImageStatusUnexpected", err.Error())
logger.Error(err, "unexpected image status")
return ctrl.Result{}, nil

case images.ImageStatusQueued, images.ImageStatusSaving, images.ImageStatusDeleted, images.ImageStatusPendingDelete, images.ImageStatusImporting:
// The other statuses are expected. See the explanation below:
Expand All @@ -281,7 +286,6 @@ func (r *OpenStackNodeImageReleaseReconciler) Reconcile(ctx context.Context, req
openstacknodeimagerelease.Status.Ready = false
record.Warnf(openstacknodeimagerelease, "OpenStackImageStatusUnknown", err.Error())
logger.Error(err, "unknown image status")
return ctrl.Result{}, nil
}

return ctrl.Result{}, nil
Expand Down

0 comments on commit c1900a7

Please sign in to comment.