diff --git a/README.md b/README.md index 41ab1473..696220d6 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,8 @@ kind: Module metadata: name: hello spec: - repoName: terraform-applier + repoURL: git@github.com:utilitywarehouse/terraform-applier.git + repoRef: master path: dev/hello schedule: "00 */1 * * *" planOnly: false @@ -143,26 +144,29 @@ it will not do `apply` even if drift is detected. Controller will force shutdown on current stage run if it takes more time then `TERMINATION_GRACE_PERIOD` set on controller. ### Git Sync - -Terraform-applier has built in git sync functionality, it will periodically pull files down from a repository and make it available for modules. -it supports multiple repositories, use following config to add repositories. config is map of repository name and repo config. -modules must use this repository name in CRD as `repoName` to reference a repository. git-sync only supports 1 branch and revision per repository. -all repositories will be cloned to given `repos-root-path` path. +Terraform-applier uses [git-mirror](https://github.com/utilitywarehouse/git-mirror) package to sync git repositories. +This package supports mirroring multiple repositories and all available references. +Because of this terraform-applier can also support different revisions on same repo. it can be set in module CRD by `repoRef` field. +Use following config to add repositories. supported urls formats are +'git@host.xz:org/repo.git','ssh://git@host.xz/org/repo.git' or 'https://host.xz/org/repo.git' ```yaml -repositories: - terraform-applier: - remote: git@github.com:utilitywarehouse/terraform-applier.git - branch: master - revision: HEAD - depth: 0 - another-repo: - remote: git@github.com/org/another-repo.git +git_mirror: + defaults: + interval: 1m # defaults to 30s + git_gc: always # defaults to always + auth: + ssh_key_path: /etc/git-secret/ssh # defaults to --git-ssh-key-file flag + ssh_known_hosts_path: /etc/git-secret/known_hosts # defaults to --git-ssh-known-hosts + repositories: + - remote: git@github.com:utilitywarehouse/terraform-applier.git + - remote: git@github.com:utilitywarehouse/other-repo.git ``` ### Controller config -- `--repos-root-path (REPOS_ROOT_PATH)` - (default: `/src`) Absolute path to the directory containing all repositories of the modules. The immediate subdirectories of this directory should contain the module repo directories and directory name should match repoName referenced in module. +- `--repos-root-path (REPOS_ROOT_PATH)` - (default: `/src`) Absolute path to the directory containing all repositories of the modules. + This dir will be cleared on start. - `--config (TF_APPLIER_CONFIG)` - (default: `/config/config.yaml`) Path to the tf applier config file containing repository config. - `--min-interval-between-runs (MIN_INTERVAL_BETWEEN_RUNS)` - (default: `60`) The minimum interval in seconds, user can set between 2 consecutive runs. This value defines the frequency of runs. - `--termination-grace-period (TERMINATION_GRACE_PERIOD)` - (default: `60`) Termination grace period is the ime given to diff --git a/api/v1beta1/module_types.go b/api/v1beta1/module_types.go index 3c85aa13..15a14bf5 100644 --- a/api/v1beta1/module_types.go +++ b/api/v1beta1/module_types.go @@ -78,8 +78,19 @@ type ModuleSpec struct { // Important: Run "make" to regenerate code after modifying this file // Name of the repository containing Terraform module directory. + // +optional RepoName string `json:"repoName"` + // URL to the repository containing Terraform module source code. + // +optional + RepoURL string `json:"repoURL"` + + // The RepoRef specifies the revision of the repository for the module source code. + // this can be tag or branch. If not specified, this defaults to "HEAD" (repo's default branch) + // +optional + // +kubebuilder:default=HEAD + RepoRef string `json:"repoRef,omitempty"` + // Path to the directory containing Terraform Root Module (.tf) files. Path string `json:"path"` diff --git a/config.go b/config.go index fbfbce56..ba257cff 100644 --- a/config.go +++ b/config.go @@ -3,12 +3,12 @@ package main import ( "os" - "github.com/utilitywarehouse/terraform-applier/git" + "github.com/utilitywarehouse/git-mirror/pkg/mirror" "gopkg.in/yaml.v2" ) type Config struct { - Repositories map[string]git.RepositoryConfig `yaml:"repositories"` + GitMirror mirror.RepoPoolConfig `yaml:"git_mirror"` } func parseConfigFile(path string) (*Config, error) { diff --git a/config.yaml b/config.yaml index 55c66cc0..f8f0a005 100644 --- a/config.yaml +++ b/config.yaml @@ -1,6 +1,10 @@ -repositories: - terraform-applier: - remote: git@github.com:utilitywarehouse/terraform-applier.git - branch: master - revision: HEAD - depth: 0 +git_mirror: + defaults: + root: /src + interval: 30s + git_gc: always + auth: + ssh_key_path: /etc/git-secret/ssh + ssh_known_hosts_path: /etc/git-secret/known_hosts + repositories: + - remote: git@github.com:utilitywarehouse/terraform-applier.git \ No newline at end of file diff --git a/config/crd/bases/terraform-applier.uw.systems_modules.yaml b/config/crd/bases/terraform-applier.uw.systems_modules.yaml index b788fa64..9c54544e 100644 --- a/config/crd/bases/terraform-applier.uw.systems_modules.yaml +++ b/config/crd/bases/terraform-applier.uw.systems_modules.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.13.0 + controller-gen.kubebuilder.io/version: v0.14.0 name: modules.terraform-applier.uw.systems spec: group: terraform-applier.uw.systems @@ -54,14 +54,19 @@ spec: description: Module is the Schema for the modules API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -69,8 +74,9 @@ spec: description: ModuleSpec defines the desired state of Module properties: backend: - description: List of backend config attributes passed to the Terraform - init for terraform backend configuration + description: |- + List of backend config attributes passed to the Terraform init + for terraform backend configuration items: description: EnvVar represents an environment variable present in a Container. @@ -79,15 +85,16 @@ spec: description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. Cannot @@ -100,8 +107,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its key @@ -112,10 +121,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath is @@ -130,10 +138,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -162,8 +169,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -180,11 +189,11 @@ spec: type: array delegateServiceAccountSecretRef: default: terraform-applier-delegate-token - description: DelegateServiceAccountSecretRef references a Secret of - type kubernetes.io/service-account-token in the same namespace as - the Module that will be used to fetch secrets, configmaps from modules' - namespace. if vaultRequests are specified, the service account's - jwt will be used for vault authentication. + description: |- + DelegateServiceAccountSecretRef references a Secret of type + kubernetes.io/service-account-token in the same namespace as the Module + that will be used to fetch secrets, configmaps from modules' namespace. + if vaultRequests are specified, the service account's jwt will be used for vault authentication. minLength: 1 type: string env: @@ -198,15 +207,16 @@ spec: description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. Cannot @@ -219,8 +229,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its key @@ -231,10 +243,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath is @@ -249,10 +260,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -281,8 +291,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -343,6 +355,16 @@ spec: repoName: description: Name of the repository containing Terraform module directory. type: string + repoRef: + default: HEAD + description: |- + The RepoRef specifies the revision of the repository for the module source code. + this can be tag or branch. If not specified, this defaults to "HEAD" (repo's default branch) + type: string + repoURL: + description: URL to the repository containing Terraform module source + code. + type: string runTimeout: default: 900 description: RunTimeout specifies the timeout in sec for performing @@ -350,9 +372,9 @@ spec: maximum: 1800 type: integer schedule: - description: The schedule in Cron format. Module will do periodic - run for a given schedule if no schedule provided then module will - only run if new PRs are added to given module path + description: |- + The schedule in Cron format. Module will do periodic run for a given schedule + if no schedule provided then module will only run if new PRs are added to given module path type: string var: description: List of input variables passed to the Terraform execution. @@ -364,15 +386,16 @@ spec: description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: 'Variable references $(VAR_NAME) are expanded using - the previously defined environment variables in the container - and any service environment variables. If a variable cannot - be resolved, the reference in the input string will be unchanged. - Double $$ are reduced to a single $, which allows for escaping - the $(VAR_NAME) syntax: i.e. "$$(VAR_NAME)" will produce the - string literal "$(VAR_NAME)". Escaped references will never - be expanded, regardless of whether the variable exists or - not. Defaults to "".' + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any service environment variables. If a variable cannot be resolved, + the reference in the input string will be unchanged. Double $$ are reduced + to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. + "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". + Escaped references will never be expanded, regardless of whether the variable + exists or not. + Defaults to "". type: string valueFrom: description: Source for the environment variable's value. Cannot @@ -385,8 +408,10 @@ spec: description: The key to select. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its key @@ -397,10 +422,9 @@ spec: type: object x-kubernetes-map-type: atomic fieldRef: - description: 'Selects a field of the pod: supports metadata.name, - metadata.namespace, `metadata.labels['''']`, `metadata.annotations['''']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, - status.podIP, status.podIPs.' + description: |- + Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, + spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: description: Version of the schema the FieldPath is @@ -415,10 +439,9 @@ spec: type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: 'Selects a resource of the container: only - resources limits and requests (limits.cpu, limits.memory, - limits.ephemeral-storage, requests.cpu, requests.memory - and requests.ephemeral-storage) are currently supported.' + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: description: 'Container name: required for volumes, @@ -447,8 +470,10 @@ spec: be a valid secret key. type: string name: - description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid?' + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must @@ -464,30 +489,32 @@ spec: type: object type: array vaultRequests: - description: VaultRequests specifies credential generate requests - from the vault configured on the controller + description: |- + VaultRequests specifies credential generate requests from the vault + configured on the controller properties: aws: - description: aws specifies vault credential generation request - for AWS secrets engine If specified, controller will request - AWS creds from vault and set AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY - and AWS_SESSION_TOKEN envs during terraform run. 'VAULT_AWS_ENG_PATH' - env set on controller will be used as credential path + description: |- + aws specifies vault credential generation request for AWS secrets engine + If specified, controller will request AWS creds from vault and set + AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN envs during + terraform run. + 'VAULT_AWS_ENG_PATH' env set on controller will be used as credential path properties: credentialType: default: assumed_role - description: CredentialType specifies the type of credential - to be used when retrieving credentials from the role. Must - be one of iam_user, assumed_role, or federation_token. + description: |- + CredentialType specifies the type of credential to be used when retrieving credentials from the role. + Must be one of iam_user, assumed_role, or federation_token. enum: - iam_user - assumed_role - federation_token type: string roleARN: - description: The ARN of the role to assume if credential_type - on the Vault role is assumed_role. Optional if the Vault - role only allows a single AWS role ARN. + description: |- + The ARN of the role to assume if credential_type on the Vault role is assumed_role. + Optional if the Vault role only allows a single AWS role ARN. type: string vaultRole: description: VaultRole Specifies the name of the vault role @@ -497,17 +524,17 @@ spec: type: object required: - path - - repoName type: object status: description: ModuleStatus defines the observed state of Module properties: currentState: - description: CurrentState denotes current overall status of module - run it will be either 'Running' -> Module is in running state 'Ready' - -> last run finished successfully and its waiting for next run/event - 'Errored' -> last run finished with Error and its waiting for next - run/event + description: |- + CurrentState denotes current overall status of module run + it will be either + 'Running' -> Module is in running state + 'Ready' -> last run finished successfully and its waiting for next run/event + 'Errored' -> last run finished with Error and its waiting for next run/event type: string lastApplyInfo: description: LastApplyInfo is the stdout of apply command. it may diff --git a/controllers/module_controller.go b/controllers/module_controller.go index 8eab710b..69d14b47 100644 --- a/controllers/module_controller.go +++ b/controllers/module_controller.go @@ -19,6 +19,7 @@ package controllers import ( "context" "fmt" + "log/slog" "sync" "time" @@ -29,7 +30,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "github.com/hashicorp/go-hclog" "github.com/robfig/cron/v3" tfaplv1beta1 "github.com/utilitywarehouse/terraform-applier/api/v1beta1" "github.com/utilitywarehouse/terraform-applier/git" @@ -38,15 +38,17 @@ import ( corev1 "k8s.io/api/core/v1" ) +const trace = slog.Level(-8) + // ModuleReconciler reconciles a Module object type ModuleReconciler struct { client.Client Scheme *runtime.Scheme Recorder record.EventRecorder Clock sysutil.ClockInterface - GitSyncPool git.SyncInterface + Repos git.Repositories Queue chan<- runner.Request - Log hclog.Logger + Log *slog.Logger MinIntervalBetweenRuns time.Duration RunStatus *sync.Map } @@ -70,7 +72,7 @@ type ModuleReconciler struct { func (r *ModuleReconciler) Reconcile(ctx context.Context, req reconcile.Request) (ctrl.Result, error) { log := r.Log.With("module", req.NamespacedName) - log.Trace("reconciling...") + log.Log(ctx, trace, "reconciling...") var module tfaplv1beta1.Module if err := r.Get(ctx, req.NamespacedName, &module); err != nil { @@ -86,6 +88,17 @@ func (r *ModuleReconciler) Reconcile(ctx context.Context, req reconcile.Request) return ctrl.Result{}, nil } + // verify repoURL exists + // this step is required as for migration we have kept repoURL as optional + if module.Spec.RepoURL == "" { + msg := fmt.Sprintf("repoURL is required, please add repoURL instead of repoName:%s", module.Spec.RepoName) + log.Error(msg) + r.setFailedStatus(req, &module, tfaplv1beta1.ReasonSpecsParsingFailure, msg, r.Clock.Now()) + // we don't really care about requeuing until we get an update that + // fixes the repoURL, so don't return an error + return ctrl.Result{}, nil + } + // pollIntervalDuration is used as minimum duration for the next run pollIntervalDuration := time.Duration(module.Spec.PollInterval) * time.Second @@ -111,24 +124,24 @@ func (r *ModuleReconciler) Reconcile(ctx context.Context, req reconcile.Request) } if module.Status.RunCommitHash == "" { - log.Info("starting initial run") + log.Debug("starting initial run") r.Queue <- runner.Request{NamespacedName: req.NamespacedName, Type: tfaplv1beta1.PollingRun, PlanOnly: isPlanOnly} // use next poll internal as minimum queue duration as status change will not trigger Reconcile return ctrl.Result{RequeueAfter: pollIntervalDuration}, nil } // check for new changes on modules path - changed, err := r.GitSyncPool.HasChangesForPath(ctx, module.Spec.RepoName, module.Spec.Path, module.Status.RunCommitHash) + hash, err := r.Repos.Hash(ctx, module.Spec.RepoURL, module.Spec.RepoRef, module.Spec.Path) if err != nil { - msg := fmt.Sprintf("unable to determine changes in repo based on hash:%s err:%s", module.Status.RunCommitHash, err) + msg := fmt.Sprintf("unable to get current hash of the repo err:%s", err) log.Error(msg) r.setFailedStatus(req, &module, tfaplv1beta1.ReasonGitFailure, msg, r.Clock.Now()) // since issue is not related to module specs, requeue again in case its fixed return ctrl.Result{RequeueAfter: pollIntervalDuration}, nil } - if changed { - log.Info("new changes are available starting run") + if hash != module.Status.RunCommitHash { + log.Debug("revision is changed on module path triggering run", "lastRun", module.Status.RunCommitHash, "current", hash) r.Queue <- runner.Request{NamespacedName: req.NamespacedName, Type: tfaplv1beta1.PollingRun, PlanOnly: isPlanOnly} // use next poll internal as minimum queue duration as status change will not trigger Reconcile return ctrl.Result{RequeueAfter: pollIntervalDuration}, nil @@ -151,7 +164,7 @@ func (r *ModuleReconciler) Reconcile(ctx context.Context, req reconcile.Request) } if numOfMissedRuns > 0 { - log.Info("starting scheduled run", "missed-runs", numOfMissedRuns) + log.Debug("starting scheduled run", "missed-runs", numOfMissedRuns) r.Queue <- runner.Request{NamespacedName: req.NamespacedName, Type: tfaplv1beta1.ScheduledRun} } diff --git a/controllers/predicate.go b/controllers/predicate.go index a0135143..c68da783 100644 --- a/controllers/predicate.go +++ b/controllers/predicate.go @@ -1,9 +1,10 @@ package controllers import ( + "context" "fmt" + "log/slog" - "github.com/hashicorp/go-hclog" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -13,7 +14,7 @@ type Filter struct { predicate.Funcs LabelSelectorKey string LabelSelectorValue string - Log hclog.Logger + Log *slog.Logger } func (f Filter) Create(e event.CreateEvent) bool { @@ -52,7 +53,7 @@ func (f Filter) Update(e event.UpdateEvent) bool { return true } - f.Log.Trace("skipping module update event", "module", fmt.Sprintf("%s/%s", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) + f.Log.Log(context.TODO(), trace, "skipping module update event", "module", fmt.Sprintf("%s/%s", e.ObjectNew.GetNamespace(), e.ObjectNew.GetName())) return false } diff --git a/git/gitutil.go b/git/gitutil.go index 05c53565..50b760d0 100644 --- a/git/gitutil.go +++ b/git/gitutil.go @@ -19,5 +19,5 @@ func GitSSHCommand(sshKeyPath, knownHostsFilePath string, verifyKnownHosts bool) knownHostsFragment = fmt.Sprintf("-o StrictHostKeyChecking=yes -o UserKnownHostsFile=%s", knownHostsFilePath) } - return fmt.Sprintf(`ssh -i %s %s`, sshKeyPath, knownHostsFragment), nil + return fmt.Sprintf(`ssh -q -F none -o IdentitiesOnly=yes -i %s %s`, sshKeyPath, knownHostsFragment), nil } diff --git a/git/metrics.go b/git/metrics.go deleted file mode 100644 index 8b4705bc..00000000 --- a/git/metrics.go +++ /dev/null @@ -1,89 +0,0 @@ -package git - -import ( - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" -) - -var ( - // lastSyncTimestamp is a Gauge that captures the timestamp of the last - // successful git sync - lastSyncTimestamp *prometheus.GaugeVec - // syncCount is a Counter vector of git sync operations - syncCount *prometheus.CounterVec - // syncLatency is a Histogram vector that keeps track of git repo sync durations - syncLatency *prometheus.HistogramVec -) - -func EnableMetrics(metricsNamespace string, registerer prometheus.Registerer) { - lastSyncTimestamp = promauto.NewGaugeVec(prometheus.GaugeOpts{ - Namespace: metricsNamespace, - Name: "git_last_sync_timestamp", - Help: "Timestamp of the last successful git sync", - }, - []string{ - // name of the repository - "repo", - }, - ) - - syncCount = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: metricsNamespace, - Name: "git_sync_count", - Help: "Count of git sync operations", - }, - []string{ - // name of the repository - "repo", - // Whether the apply was successful or not - "success", - }, - ) - - syncLatency = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: metricsNamespace, - Name: "git_sync_latency_seconds", - Help: "Latency for git repo sync", - Buckets: []float64{1, 5, 10, 20, 30, 60, 90, 120, 150, 300}, - }, - []string{ - // name of the repository - "repo", - }, - ) - - registerer.MustRegister( - lastSyncTimestamp, - syncCount, - syncLatency, - ) -} - -// recordGitSync records a git repository sync attempt by updating all the -// relevant metrics -func recordGitSync(repo string, success bool) { - // if metrics not enabled return - if lastSyncTimestamp == nil || syncCount == nil { - return - } - if success { - lastSyncTimestamp.With(prometheus.Labels{ - "repo": repo, - }).Set(float64(time.Now().Unix())) - } - syncCount.With(prometheus.Labels{ - "repo": repo, - "success": strconv.FormatBool(success), - }).Inc() -} - -func updateSyncLatency(repo string, start time.Time) { - // if metrics not enabled return - if syncLatency == nil { - return - } - syncLatency.WithLabelValues(repo).Observe(time.Since(start).Seconds()) -} diff --git a/git/repos.go b/git/repos.go new file mode 100644 index 00000000..e93f0741 --- /dev/null +++ b/git/repos.go @@ -0,0 +1,16 @@ +package git + +import ( + "context" +) + +//go:generate go run github.com/golang/mock/mockgen -package git -destination repos_mock.go github.com/utilitywarehouse/terraform-applier/git Repositories + +// Repositories allows for mocking out the functionality of git-mirror when +// testing the full process of an apply run +// mirror.RepoPool satisfies this interface and drop in replacement +type Repositories interface { + Hash(ctx context.Context, remote, ref, path string) (string, error) + LogMsg(ctx context.Context, remote, ref, path string) (string, error) + Clone(ctx context.Context, remote, dst, branch, pathspec string, rmGitDir bool) (string, error) +} diff --git a/git/repos_mock.go b/git/repos_mock.go new file mode 100644 index 00000000..fc11f18c --- /dev/null +++ b/git/repos_mock.go @@ -0,0 +1,80 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/utilitywarehouse/terraform-applier/git (interfaces: Repositories) + +// Package git is a generated GoMock package. +package git + +import ( + context "context" + reflect "reflect" + + gomock "github.com/golang/mock/gomock" +) + +// MockRepositories is a mock of Repositories interface. +type MockRepositories struct { + ctrl *gomock.Controller + recorder *MockRepositoriesMockRecorder +} + +// MockRepositoriesMockRecorder is the mock recorder for MockRepositories. +type MockRepositoriesMockRecorder struct { + mock *MockRepositories +} + +// NewMockRepositories creates a new mock instance. +func NewMockRepositories(ctrl *gomock.Controller) *MockRepositories { + mock := &MockRepositories{ctrl: ctrl} + mock.recorder = &MockRepositoriesMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockRepositories) EXPECT() *MockRepositoriesMockRecorder { + return m.recorder +} + +// Clone mocks base method. +func (m *MockRepositories) Clone(arg0 context.Context, arg1, arg2, arg3, arg4 string, arg5 bool) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Clone", arg0, arg1, arg2, arg3, arg4, arg5) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Clone indicates an expected call of Clone. +func (mr *MockRepositoriesMockRecorder) Clone(arg0, arg1, arg2, arg3, arg4, arg5 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Clone", reflect.TypeOf((*MockRepositories)(nil).Clone), arg0, arg1, arg2, arg3, arg4, arg5) +} + +// Hash mocks base method. +func (m *MockRepositories) Hash(arg0 context.Context, arg1, arg2, arg3 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Hash", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Hash indicates an expected call of Hash. +func (mr *MockRepositoriesMockRecorder) Hash(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Hash", reflect.TypeOf((*MockRepositories)(nil).Hash), arg0, arg1, arg2, arg3) +} + +// LogMsg mocks base method. +func (m *MockRepositories) LogMsg(arg0 context.Context, arg1, arg2, arg3 string) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "LogMsg", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// LogMsg indicates an expected call of LogMsg. +func (mr *MockRepositoriesMockRecorder) LogMsg(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogMsg", reflect.TypeOf((*MockRepositories)(nil).LogMsg), arg0, arg1, arg2, arg3) +} diff --git a/git/repository.go b/git/repository.go deleted file mode 100644 index c3f6600f..00000000 --- a/git/repository.go +++ /dev/null @@ -1,444 +0,0 @@ -// modified version of https://github.com/utilitywarehouse/kube-applier/blob/master/git/repository.go -// Package git provides methods for manipulating and querying git repositories -// on disk. -package git - -import ( - "bytes" - "context" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/utilitywarehouse/terraform-applier/sysutil" -) - -var ( - gitExecutablePath string -) - -func init() { - gitExecutablePath = exec.Command("git").String() -} - -// RepositoryConfig defines a remote git repository. -type RepositoryConfig struct { - Remote string `yaml:"remote"` - Branch string `yaml:"branch"` - Revision string `yaml:"revision"` - Depth int `yaml:"depth"` -} - -// SyncOptions encapsulates options about how a Repository should be fetched -// from the remote. -type SyncOptions struct { - GitSSHKeyPath string - GitSSHKnownHostsPath string - WithCheckout bool - CloneTimeout time.Duration - Interval time.Duration -} - -// gitSSHCommand returns the environment variable to be used for configuring -// git over ssh. -func (so SyncOptions) gitSSHCommand() string { - sshKeyPath := so.GitSSHKeyPath - if sshKeyPath == "" { - sshKeyPath = "/dev/null" - } - knownHostsOptions := "-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" - if so.GitSSHKeyPath != "" && so.GitSSHKnownHostsPath != "" { - knownHostsOptions = fmt.Sprintf("-o UserKnownHostsFile=%s", so.GitSSHKnownHostsPath) - } - return fmt.Sprintf(`GIT_SSH_COMMAND=ssh -q -F none -o IdentitiesOnly=yes -o IdentityFile=%s %s`, sshKeyPath, knownHostsOptions) -} - -// Repository defines a remote git repository that should be synced regularly -// and is the source of truth for a cluster. Changes in this repository trigger -// GitPolling type runs for namespaces. The implementation borrows heavily from -// git-sync. -type Repository struct { - lock sync.RWMutex - name string - path string - repositoryConfig RepositoryConfig - running bool - stop, stopped chan bool - syncOptions *SyncOptions - log hclog.Logger -} - -// NewRepository initialises a Repository struct. -func NewRepository(name, path string, repositoryConfig RepositoryConfig, syncOptions *SyncOptions, log hclog.Logger) (*Repository, error) { - if name == "" { - return nil, fmt.Errorf("cannot create Repository without name") - } - if path == "" { - return nil, fmt.Errorf("cannot create Repository with empty local path") - } - if !filepath.IsAbs(path) { - return nil, fmt.Errorf("Repository path must be absolute") - } - if repositoryConfig.Remote == "" { - return nil, fmt.Errorf("cannot create Repository with empty remote") - } - if repositoryConfig.Depth < 0 { - return nil, fmt.Errorf("Repository depth cannot be negative") - } - if repositoryConfig.Branch == "" { - log.Info("Defaulting repository branch to 'master'") - repositoryConfig.Branch = "master" - } - if repositoryConfig.Revision == "" { - log.Info("Defaulting repository revision to 'HEAD'") - repositoryConfig.Revision = "HEAD" - } - if syncOptions.CloneTimeout == 0 { - log.Info("Defaulting clone timeout to 5 minute") - syncOptions.CloneTimeout = time.Minute * 5 - } - if syncOptions.Interval == 0 { - log.Info("Defaulting Interval to 30 seconds") - syncOptions.Interval = time.Second * 30 - } - return &Repository{ - name: name, - path: path, - repositoryConfig: repositoryConfig, - syncOptions: syncOptions, - lock: sync.RWMutex{}, - log: log, - stop: make(chan bool), - stopped: make(chan bool), - }, nil -} - -// StartSync begins syncing from the remote git repository. first sync/clone is done with -// cloneTimeout value as it might take longer than usual depending on the size of the repository. -func (r *Repository) StartSync(ctx context.Context) error { - if r.running { - return fmt.Errorf("sync has already been started") - } - r.running = true - r.log.Info("waiting for the repository to complete initial sync") - - // The first sync is done outside of the syncLoop (and a separate timeout). - // The first clone might take longer than usual depending on the size of the repository. - // Additionally it runs in the foreground which simplifies startup since caller might - // require a repository clone to exist before starting up properly. - cloneCtx, cancel := context.WithTimeout(ctx, r.syncOptions.CloneTimeout) - defer cancel() - if err := r.sync(cloneCtx); err != nil { - return err - } - go r.syncLoop(ctx) - return nil -} - -func (r *Repository) syncLoop(ctx context.Context) { - defer func() { - r.running = false - close(r.stopped) - }() - - r.running = true - ticker := time.NewTicker(r.syncOptions.Interval) - defer ticker.Stop() - r.log.Info("started repository sync loop", "interval", r.syncOptions.Interval) - for { - select { - case <-ticker.C: - syncCtx, cancel := context.WithTimeout(ctx, r.syncOptions.Interval-time.Second) - err := r.sync(syncCtx) - if err != nil { - r.log.Error("could not sync git repository", "error", err) - } - recordGitSync(r.name, err == nil) - cancel() - case <-ctx.Done(): - return - case <-r.stop: - return - } - } -} - -// StopSync stops the syncing process. -func (r *Repository) StopSync() { - if !r.running { - r.log.Info("Sync has not been started, will not do anything") - return - } - close(r.stop) - <-r.stopped -} - -func (r *Repository) runGitCommand(ctx context.Context, environment []string, cwd string, args ...string) (string, error) { - cmdStr := gitExecutablePath + " " + strings.Join(args, " ") - r.log.Trace("running command", "cwd", cwd, "cmd", cmdStr) - - cmd := exec.CommandContext(ctx, gitExecutablePath, args...) - if cwd != "" { - cmd.Dir = cwd - } - outbuf := bytes.NewBuffer(nil) - errbuf := bytes.NewBuffer(nil) - cmd.Stdout = outbuf - cmd.Stderr = errbuf - - env := []string{ - fmt.Sprintf("PATH=%s", os.Getenv("PATH")), - r.syncOptions.gitSSHCommand(), - } - - cmd.Env = env - if len(environment) > 0 { - cmd.Env = append(cmd.Env, environment...) - } - - err := cmd.Run() - stdout := outbuf.String() - stderr := errbuf.String() - if ctx.Err() == context.DeadlineExceeded { - return "", fmt.Errorf("Run(%s): %w: { stdout: %q, stderr: %q }", cmdStr, ctx.Err(), stdout, stderr) - } - if err != nil { - return "", fmt.Errorf("Run(%s): %w: { stdout: %q, stderr: %q }", cmdStr, err, stdout, stderr) - } - r.log.Trace("command result", "stdout", stdout, "stderr", stderr) - - return stdout, nil -} - -// localHash returns the locally known hash for the configured Revision. -func (r *Repository) localHash(ctx context.Context) (string, error) { - output, err := r.runGitCommand(ctx, nil, r.path, "rev-parse", r.repositoryConfig.Revision) - if err != nil { - return "", err - } - return strings.Trim(string(output), "\n"), nil -} - -// localHashForPath returns the hash of the configured revision for the -// specified path. -func (r *Repository) localHashForPath(ctx context.Context, path string) (string, error) { - output, err := r.runGitCommand(ctx, nil, r.path, "log", "--pretty=format:%h", "-n", "1", "--", path) - if err != nil { - return "", err - } - return strings.Trim(string(output), "\n"), nil -} - -// remoteHash returns the upstream hash for the ref that corresponds to the -// configured Revision. -func (r *Repository) remoteHash(ctx context.Context) (string, error) { - // Build a ref string, depending on whether the user asked to track HEAD or - // a tag. - ref := "" - if r.repositoryConfig.Revision == "HEAD" { - ref = "refs/heads/" + r.repositoryConfig.Branch - } else { - ref = "refs/tags/" + r.repositoryConfig.Revision - } - // git ls-remote -q origin refs/XXX/XXX - output, err := r.runGitCommand(ctx, nil, r.path, "ls-remote", "-q", "origin", ref) - if err != nil { - return "", err - } - parts := strings.Split(string(output), "\t") - return parts[0], nil -} - -func (r *Repository) sync(ctx context.Context) error { - r.lock.Lock() - defer r.lock.Unlock() - - defer updateSyncLatency(r.name, time.Now()) - - gitRepoPath := filepath.Join(r.path, ".git") - _, err := os.Stat(gitRepoPath) - switch { - case os.IsNotExist(err): - // First time. Just clone it and get the hash. - return r.cloneRemote(ctx) - case err != nil: - return fmt.Errorf("error checking if repo exists %q: %v", gitRepoPath, err) - default: - // Not the first time. Figure out if the ref has changed. - local, err := r.localHash(ctx) - if err != nil { - return err - } - remote, err := r.remoteHash(ctx) - if err != nil { - return err - } - if local == remote { - r.log.Trace("no update required", "rev", r.repositoryConfig.Revision, "local", local, "remote", remote) - return nil - } - r.log.Info("update required", "rev", r.repositoryConfig.Revision, "local", local, "remote", remote) - } - - r.log.Info("syncing git", "branch", r.repositoryConfig.Branch, "rev", r.repositoryConfig.Revision) - args := []string{"fetch", "-f", "--tags"} - if r.repositoryConfig.Depth != 0 { - args = append(args, "--depth", strconv.Itoa(r.repositoryConfig.Depth)) - } - args = append(args, "origin", r.repositoryConfig.Branch) - // Update from the remote. - // git fetch -f --tags --depth x origin - if _, err := r.runGitCommand(ctx, nil, r.path, args...); err != nil { - return err - } - // GC clone - // git gc --prune=all - if _, err := r.runGitCommand(ctx, nil, r.path, "gc", "--prune=all"); err != nil { - commitGraphLock := filepath.Join(gitRepoPath, "objects/info/commit-graph.lock") - if strings.Contains(err.Error(), fmt.Sprintf("Unable to create '%s': File exists.", commitGraphLock)) { - if e := os.Remove(commitGraphLock); e != nil { - r.log.Error("possible git crash detected but could not remove commit graph lock", "path", commitGraphLock, "error", e) - } else { - r.log.Error("possible git crash detected, commit graph lock removed and next attempt should succeed", "path", commitGraphLock) - } - } - return err - } - // Reset HEAD - args = []string{"reset", fmt.Sprintf("origin/%s", r.repositoryConfig.Branch)} - if r.syncOptions.WithCheckout { - args = append(args, "--hard") - } else { - args = append(args, "--soft") - } - // git reset --soft origin/ - if _, err = r.runGitCommand(ctx, nil, r.path, args...); err != nil { - return err - } - return nil -} - -func (r *Repository) cloneRemote(ctx context.Context) error { - args := []string{"clone", "-b", r.repositoryConfig.Branch} - if r.repositoryConfig.Depth != 0 { - args = append(args, "--depth", strconv.Itoa(r.repositoryConfig.Depth)) - } - if !r.syncOptions.WithCheckout { - args = append(args, "--no-checkout") - } - args = append(args, r.repositoryConfig.Remote, r.path) - r.log.Info("cloning repo", "origin", r.repositoryConfig.Remote, "path", r.path) - - _, err := r.runGitCommand(ctx, nil, "", args...) - if err != nil { - if strings.Contains(err.Error(), "already exists and is not an empty directory") { - // Maybe a previous run crashed? Git won't use this dir. - r.log.Info("git root exists and is not empty (previous crash?), cleaning up", "path", r.path) - err := os.RemoveAll(r.path) - if err != nil { - return err - } - _, err = r.runGitCommand(ctx, nil, "", args...) - if err != nil { - return err - } - } else { - return err - } - } - return nil -} - -// CloneLocal creates a clone of the existing repository to a new location on -// disk and only checkouts the specified subpath. On success, it returns the -// hash of the new repository clone's HEAD. -func (r *Repository) CloneLocal(ctx context.Context, subpath, dst string, envs []string) (string, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - hash, err := r.localHashForPath(ctx, subpath) - if err != nil { - return "", err - } - - // git clone --no-checkout src dst - if _, err := r.runGitCommand(ctx, nil, "", "clone", "--no-checkout", r.path, dst); err != nil { - return "", err - } - - // git checkout HEAD -- ./path - if _, err := r.runGitCommand(ctx, envs, dst, "checkout", r.repositoryConfig.Revision, "--", subpath); err != nil { - return "", err - } - return hash, nil -} - -// CopyPath get read lock and then copies given subpath to new location. -// WithCheckout must be set to use this function -func (r *Repository) CopyPath(ctx context.Context, subpath, dst string) error { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.syncOptions.WithCheckout { - return fmt.Errorf("'WithCheckout' option is disabled there are no sub paths on the repo to copy. use 'CloneLocal()'") - } - - return sysutil.CopyDir(filepath.Join(r.path, subpath), dst) -} - -// CopyRepo get read lock and then copies full repository to new location. -// unlike CloneLocal CopyRepo checks out all the dir in repo hence 'WithCheckout' must be set to use this function -func (r *Repository) CopyRepo(ctx context.Context, dst string) error { - r.lock.RLock() - defer r.lock.RUnlock() - - if !r.syncOptions.WithCheckout { - return fmt.Errorf("'WithCheckout' option is disabled there are no sub paths on the repo to copy. use 'CloneLocal()'") - } - - return sysutil.CopyDir(r.path, dst) -} - -// HashForPath returns the hash of the configured revision for the specified path. -func (r *Repository) HashForPath(ctx context.Context, path string) (string, error) { - r.lock.RLock() - defer r.lock.RUnlock() - return r.localHashForPath(ctx, path) -} - -// LogMsgForPath returns the formatted log subject with author info of the configured revision for the specified path. -func (r *Repository) LogMsgForPath(ctx context.Context, path string) (string, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - output, err := r.runGitCommand(ctx, nil, r.path, "log", "--pretty=format:'%s (%an)'", "-n", "1", "--", path) - if err != nil { - return "", err - } - return strings.Trim(string(output), "'\n"), nil -} - -// HasChangesForPath returns true if there are changes that have been committed -// since the commit hash provided, under the specified path. -func (r *Repository) HasChangesForPath(ctx context.Context, path, sinceHash string) (bool, error) { - r.lock.RLock() - defer r.lock.RUnlock() - - cmd := []string{"diff", "--quiet", sinceHash, r.repositoryConfig.Revision, "--", path} - _, err := r.runGitCommand(ctx, nil, r.path, cmd...) - if err == nil { - return false, nil - } - var e *exec.ExitError - if errors.As(err, &e) && e.ExitCode() == 1 { - return true, nil - } - return false, err -} diff --git a/git/syncpool.go b/git/syncpool.go deleted file mode 100644 index d23f29b5..00000000 --- a/git/syncpool.go +++ /dev/null @@ -1,167 +0,0 @@ -package git - -import ( - "context" - "fmt" - "path/filepath" - "time" - - "github.com/hashicorp/go-hclog" -) - -//go:generate go run github.com/golang/mock/mockgen -package git -destination syncpool_mock.go github.com/utilitywarehouse/terraform-applier/git SyncInterface - -// SyncInterface allows for mocking out the functionality of SyncPool when -// testing the full process of an apply run. -type SyncInterface interface { - AddRepository(repoName string, repositoryConfig RepositoryConfig, syncOptions *SyncOptions) error - Repository(repoName string) (*Repository, error) - RepositoryConfig(repoName string) (RepositoryConfig, error) - - CloneLocal(ctx context.Context, repoName string, subpath string, dst string, envs []string) (string, error) - CopyPath(ctx context.Context, repoName string, subpath string, dst string) error - CopyRepo(ctx context.Context, repoName string, dst string) error - - HasChangesForPath(ctx context.Context, repoName string, path, sinceHash string) (bool, error) - HashForPath(ctx context.Context, repoName string, path string) (string, error) - LogMsgForPath(ctx context.Context, repoName string, path string) (string, error) -} - -type SyncPool struct { - ctx context.Context - log hclog.Logger - defaultSyncOptions *SyncOptions - root string - // using normal map assuming that all repositories will be added - // initially and then only accessed without deleting - repos map[string]*Repository -} - -// NewSync returns syncPool which is a collection of repository sync -// SyncPool object provides helper wrapper functions over Repository objects specially for multiple repo sync -func NewSyncPool(ctx context.Context, rootPath string, syncOptions SyncOptions, log hclog.Logger) (*SyncPool, error) { - if !filepath.IsAbs(rootPath) { - return nil, fmt.Errorf("Repository root path must be absolute") - } - - if syncOptions.CloneTimeout == 0 { - log.Info("Defaulting clone timeout to 5 minute") - syncOptions.CloneTimeout = time.Minute * 5 - } - if syncOptions.Interval == 0 { - log.Info("Defaulting Interval to 30 seconds") - syncOptions.Interval = time.Second * 30 - } - - return &SyncPool{ - ctx: ctx, - log: log, - defaultSyncOptions: &syncOptions, - root: rootPath, - repos: make(map[string]*Repository), - }, nil - -} - -// AddRepository creates new repository object adds to sync pool after starting its sync loop. -// repo dir will be created under syncPool 'root' path with same name as repoName. -// repoName will be used on all other helper call to identify/select particular repo from sync pool. -// if syncOptions is nil then it will use default sync objects syncOptions -// since AddRepository calls StartSync it will block until initial clone is done -func (s *SyncPool) AddRepository(repoName string, repositoryConfig RepositoryConfig, syncOptions *SyncOptions) error { - - if syncOptions == nil { - syncOptions = s.defaultSyncOptions - } - - if _, ok := s.repos[repoName]; ok { - return fmt.Errorf("repository with repoName '%s' already exists", repoName) - } - - repo, err := NewRepository(repoName, filepath.Join(s.root, repoName), repositoryConfig, syncOptions, s.log.Named(repoName)) - if err != nil { - return err - } - - s.repos[repoName] = repo - - return repo.StartSync(s.ctx) -} - -// Repository returns Repository object if its added to sync pool -func (s *SyncPool) Repository(repoName string) (*Repository, error) { - repo, ok := s.repos[repoName] - if !ok { - return nil, fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo, nil -} - -// Repository returns Repository config if its added to sync pool -func (s *SyncPool) RepositoryConfig(repoName string) (RepositoryConfig, error) { - repo, ok := s.repos[repoName] - if !ok { - return RepositoryConfig{}, fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo.repositoryConfig, nil -} - -// CloneLocal creates a clone of the existing repository to a new location on -// disk and only checkouts the specified subpath. On success, it returns the -// hash of the new repository clone's HEAD. -func (s *SyncPool) CloneLocal(ctx context.Context, repoName string, subpath string, dst string, envs []string) (string, error) { - repo, ok := s.repos[repoName] - if !ok { - return "", fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo.CloneLocal(ctx, subpath, dst, envs) -} - -// CopyPath get read lock and then copies given subpath to new location. -// WithCheckout must be set to use this function -func (s *SyncPool) CopyPath(ctx context.Context, repoName string, subpath string, dst string) error { - repo, ok := s.repos[repoName] - if !ok { - return fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo.CopyPath(ctx, subpath, dst) -} - -// CopyRepo get read lock and then copies given repository to new location. -// unlike CloneLocal CopyRepo checks out all the dir in repo hence 'WithCheckout' must be set to use this function -func (s *SyncPool) CopyRepo(ctx context.Context, repoName string, dst string) error { - repo, ok := s.repos[repoName] - if !ok { - return fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo.CopyRepo(ctx, dst) -} - -// HasChangesForPath returns true if there are changes that have been committed -// since the commit hash provided, under the specified path. -func (s *SyncPool) HasChangesForPath(ctx context.Context, repoName string, path, sinceHash string) (bool, error) { - repo, ok := s.repos[repoName] - if !ok { - return false, fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo.HasChangesForPath(ctx, path, sinceHash) -} - -// HashForPath returns the hash of the configured revision for the specified -// path. -func (s *SyncPool) HashForPath(ctx context.Context, repoName string, path string) (string, error) { - repo, ok := s.repos[repoName] - if !ok { - return "", fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo.HashForPath(ctx, path) -} - -// LogMsgForPath returns the formatted log subject with author info of the configured revision for the specified path. -func (s *SyncPool) LogMsgForPath(ctx context.Context, repoName string, path string) (string, error) { - repo, ok := s.repos[repoName] - if !ok { - return "", fmt.Errorf("repository with repoName '%s' is not yet added by admin", repoName) - } - return repo.LogMsgForPath(ctx, path) -} diff --git a/git/syncpool_mock.go b/git/syncpool_mock.go deleted file mode 100644 index 20fcda50..00000000 --- a/git/syncpool_mock.go +++ /dev/null @@ -1,167 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/utilitywarehouse/terraform-applier/git (interfaces: SyncInterface) - -// Package git is a generated GoMock package. -package git - -import ( - context "context" - reflect "reflect" - - gomock "github.com/golang/mock/gomock" -) - -// MockSyncInterface is a mock of SyncInterface interface. -type MockSyncInterface struct { - ctrl *gomock.Controller - recorder *MockSyncInterfaceMockRecorder -} - -// MockSyncInterfaceMockRecorder is the mock recorder for MockSyncInterface. -type MockSyncInterfaceMockRecorder struct { - mock *MockSyncInterface -} - -// NewMockSyncInterface creates a new mock instance. -func NewMockSyncInterface(ctrl *gomock.Controller) *MockSyncInterface { - mock := &MockSyncInterface{ctrl: ctrl} - mock.recorder = &MockSyncInterfaceMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockSyncInterface) EXPECT() *MockSyncInterfaceMockRecorder { - return m.recorder -} - -// AddRepository mocks base method. -func (m *MockSyncInterface) AddRepository(arg0 string, arg1 RepositoryConfig, arg2 *SyncOptions) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRepository", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// AddRepository indicates an expected call of AddRepository. -func (mr *MockSyncInterfaceMockRecorder) AddRepository(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRepository", reflect.TypeOf((*MockSyncInterface)(nil).AddRepository), arg0, arg1, arg2) -} - -// CloneLocal mocks base method. -func (m *MockSyncInterface) CloneLocal(arg0 context.Context, arg1, arg2, arg3 string, arg4 []string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CloneLocal", arg0, arg1, arg2, arg3, arg4) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// CloneLocal indicates an expected call of CloneLocal. -func (mr *MockSyncInterfaceMockRecorder) CloneLocal(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CloneLocal", reflect.TypeOf((*MockSyncInterface)(nil).CloneLocal), arg0, arg1, arg2, arg3, arg4) -} - -// CopyPath mocks base method. -func (m *MockSyncInterface) CopyPath(arg0 context.Context, arg1, arg2, arg3 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyPath", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(error) - return ret0 -} - -// CopyPath indicates an expected call of CopyPath. -func (mr *MockSyncInterfaceMockRecorder) CopyPath(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyPath", reflect.TypeOf((*MockSyncInterface)(nil).CopyPath), arg0, arg1, arg2, arg3) -} - -// CopyRepo mocks base method. -func (m *MockSyncInterface) CopyRepo(arg0 context.Context, arg1, arg2 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "CopyRepo", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// CopyRepo indicates an expected call of CopyRepo. -func (mr *MockSyncInterfaceMockRecorder) CopyRepo(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CopyRepo", reflect.TypeOf((*MockSyncInterface)(nil).CopyRepo), arg0, arg1, arg2) -} - -// HasChangesForPath mocks base method. -func (m *MockSyncInterface) HasChangesForPath(arg0 context.Context, arg1, arg2, arg3 string) (bool, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HasChangesForPath", arg0, arg1, arg2, arg3) - ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HasChangesForPath indicates an expected call of HasChangesForPath. -func (mr *MockSyncInterfaceMockRecorder) HasChangesForPath(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HasChangesForPath", reflect.TypeOf((*MockSyncInterface)(nil).HasChangesForPath), arg0, arg1, arg2, arg3) -} - -// HashForPath mocks base method. -func (m *MockSyncInterface) HashForPath(arg0 context.Context, arg1, arg2 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "HashForPath", arg0, arg1, arg2) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// HashForPath indicates an expected call of HashForPath. -func (mr *MockSyncInterfaceMockRecorder) HashForPath(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "HashForPath", reflect.TypeOf((*MockSyncInterface)(nil).HashForPath), arg0, arg1, arg2) -} - -// LogMsgForPath mocks base method. -func (m *MockSyncInterface) LogMsgForPath(arg0 context.Context, arg1, arg2 string) (string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "LogMsgForPath", arg0, arg1, arg2) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// LogMsgForPath indicates an expected call of LogMsgForPath. -func (mr *MockSyncInterfaceMockRecorder) LogMsgForPath(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "LogMsgForPath", reflect.TypeOf((*MockSyncInterface)(nil).LogMsgForPath), arg0, arg1, arg2) -} - -// Repository mocks base method. -func (m *MockSyncInterface) Repository(arg0 string) (*Repository, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Repository", arg0) - ret0, _ := ret[0].(*Repository) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Repository indicates an expected call of Repository. -func (mr *MockSyncInterfaceMockRecorder) Repository(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Repository", reflect.TypeOf((*MockSyncInterface)(nil).Repository), arg0) -} - -// RepositoryConfig mocks base method. -func (m *MockSyncInterface) RepositoryConfig(arg0 string) (RepositoryConfig, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "RepositoryConfig", arg0) - ret0, _ := ret[0].(RepositoryConfig) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// RepositoryConfig indicates an expected call of RepositoryConfig. -func (mr *MockSyncInterfaceMockRecorder) RepositoryConfig(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RepositoryConfig", reflect.TypeOf((*MockSyncInterface)(nil).RepositoryConfig), arg0) -} diff --git a/go.mod b/go.mod index 2184b12d..1e4b5b6c 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,12 @@ module github.com/utilitywarehouse/terraform-applier -go 1.22 +go 1.22.0 require ( + github.com/go-logr/logr v1.4.1 github.com/golang/mock v1.6.0 github.com/gorilla/mux v1.8.1 github.com/gorilla/securecookie v1.1.2 - github.com/hashicorp/go-hclog v1.6.2 github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/hc-install v0.6.3 github.com/hashicorp/terraform-exec v0.20.0 @@ -16,8 +16,8 @@ require ( github.com/prometheus/client_golang v1.19.0 github.com/robfig/cron/v3 v3.0.1 github.com/urfave/cli/v2 v2.27.1 + github.com/utilitywarehouse/git-mirror v0.0.0-20240320170707-c5188344fc50 github.com/utilitywarehouse/go-operational v0.0.0-20220413104526-79ce40a50281 - go.uber.org/zap v1.27.0 golang.org/x/oauth2 v0.17.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.2 @@ -43,7 +43,6 @@ require ( github.com/fatih/color v1.16.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-jose/go-jose/v3 v3.0.3 // indirect - github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect @@ -60,6 +59,7 @@ require ( github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.6.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-retryablehttp v0.7.5 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect @@ -92,6 +92,7 @@ require ( github.com/xrash/smetrics v0.0.0-20231213231151-1d8dd44e695e // indirect github.com/zclconf/go-cty v1.14.2 // indirect go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.20.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect golang.org/x/mod v0.15.0 // indirect diff --git a/go.sum b/go.sum index a82f600e..e77e4562 100644 --- a/go.sum +++ b/go.sum @@ -212,6 +212,10 @@ github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcU github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/urfave/cli/v2 v2.27.1 h1:8xSQ6szndafKVRmfyeUMxkNUJQMjL1F2zmsZ+qHpfho= github.com/urfave/cli/v2 v2.27.1/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ= +github.com/utilitywarehouse/git-mirror v0.0.0-20240319143905-78ec52d5d091 h1:78YV5e+wxcl7ViJ9Bb2x0A37zxkippIspLUTj9BUHew= +github.com/utilitywarehouse/git-mirror v0.0.0-20240319143905-78ec52d5d091/go.mod h1:iGusuCneGiwwKaPsxgBaR974OwLd/FKhUGDsSbC8zWM= +github.com/utilitywarehouse/git-mirror v0.0.0-20240320170707-c5188344fc50 h1:8A5AQpIx74Vx0Qncd7avauMnN66bvsjfT3r02sE5Noc= +github.com/utilitywarehouse/git-mirror v0.0.0-20240320170707-c5188344fc50/go.mod h1:iGusuCneGiwwKaPsxgBaR974OwLd/FKhUGDsSbC8zWM= github.com/utilitywarehouse/go-operational v0.0.0-20220413104526-79ce40a50281 h1:o2reBE9vn4ZXmq73rNDmipyJsH0GPqoS8A6CVeWZsGU= github.com/utilitywarehouse/go-operational v0.0.0-20220413104526-79ce40a50281/go.mod h1:NVEoiRSDBsLOEk9X+pwskLIPWL5YGmZMaGP0kXnpvhM= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= diff --git a/integration_test/module_controller_filter_test.go b/integration_test/module_controller_filter_test.go index 66371a55..c50351f1 100644 --- a/integration_test/module_controller_filter_test.go +++ b/integration_test/module_controller_filter_test.go @@ -8,7 +8,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" tfaplv1beta1 "github.com/utilitywarehouse/terraform-applier/api/v1beta1" - "github.com/utilitywarehouse/terraform-applier/git" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -36,17 +35,16 @@ var _ = Describe("Module controller without runner with label selector", func() testFilter.LabelSelectorValue = "true" // Trigger Job run as soon as module is created - testGitSyncPool.EXPECT().HashForPath(gomock.Any(), "modules", "hello-filter-test"). + testRepos.EXPECT().Hash(gomock.Any(), "https://host.xy/dummy/repo.git", "HEAD", "hello-filter-test"). Return(commitHash, nil).AnyTimes() - testGitSyncPool.EXPECT().LogMsgForPath(gomock.Any(), "modules", "hello-filter-test"). + testRepos.EXPECT().LogMsg(gomock.Any(), "https://host.xy/dummy/repo.git", "HEAD", "hello-filter-test"). Return(commitMsg, nil).AnyTimes() - testGitSyncPool.EXPECT().RepositoryConfig(gomock.Any()).Return(git.RepositoryConfig{Remote: "github.com/org/repo"}, nil).AnyTimes() }) It("Should send module with valid selector label selector to job queue", func() { const ( moduleName = "filter-test-module1" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello-filter-test" ) @@ -59,7 +57,7 @@ var _ = Describe("Module controller without runner with label selector", func() Namespace: moduleNamespace, Labels: map[string]string{labelSelectorKey: "true"}, }, - Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoName: repo, Path: path}, + Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoURL: repoURL, Path: path}, } Expect(k8sClient.Create(ctx, module)).Should(Succeed()) @@ -84,7 +82,7 @@ var _ = Describe("Module controller without runner with label selector", func() It("Should not send module with valid selector label key but invalid value to job queue", func() { const ( moduleName = "filter-test-module2" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello-filter-test" ) @@ -97,7 +95,7 @@ var _ = Describe("Module controller without runner with label selector", func() Namespace: moduleNamespace, Labels: map[string]string{labelSelectorKey: "false"}, }, - Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoName: repo, Path: path}, + Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoURL: repoURL, Path: path}, } Expect(k8sClient.Create(ctx, module)).Should(Succeed()) @@ -122,7 +120,7 @@ var _ = Describe("Module controller without runner with label selector", func() It("Should not send module with missing selector label selector to job queue", func() { const ( moduleName = "filter-test-module3" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello-filter-test" ) @@ -135,7 +133,7 @@ var _ = Describe("Module controller without runner with label selector", func() Namespace: moduleNamespace, Labels: map[string]string{labelSelectorKeyInvalid: "true"}, }, - Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoName: repo, Path: path}, + Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoURL: repoURL, Path: path}, } Expect(k8sClient.Create(ctx, module)).Should(Succeed()) @@ -160,7 +158,7 @@ var _ = Describe("Module controller without runner with label selector", func() It("Should not send module with mo labels to job queue", func() { const ( moduleName = "filter-test-module4" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello-filter-test" ) @@ -172,7 +170,7 @@ var _ = Describe("Module controller without runner with label selector", func() Name: moduleName, Namespace: moduleNamespace, }, - Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoName: repo, Path: path}, + Spec: tfaplv1beta1.ModuleSpec{Schedule: "1 * * * *", RepoURL: repoURL, Path: path}, } Expect(k8sClient.Create(ctx, module)).Should(Succeed()) diff --git a/integration_test/module_controller_no_runner_test.go b/integration_test/module_controller_no_runner_test.go index b57fccb1..e17a6b21 100644 --- a/integration_test/module_controller_no_runner_test.go +++ b/integration_test/module_controller_no_runner_test.go @@ -35,7 +35,7 @@ var _ = Describe("Module controller without runner", func() { It("Should send module to job queue on schedule", func() { const ( moduleName = "test-module" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "dev/" + moduleName ) @@ -52,7 +52,7 @@ var _ = Describe("Module controller without runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "1 * * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, }, } @@ -93,7 +93,7 @@ var _ = Describe("Module controller without runner", func() { module.Status.RunStartedAt = &metav1.Time{Time: time.Date(2022, 02, 01, 01, 00, 30, 0000, time.UTC)} Expect(k8sClient.Status().Update(ctx, module)).Should(Succeed()) - testGitSyncPool.EXPECT().HasChangesForPath(gomock.Any(), repo, path, "CommitAbc123").Return(false, nil) + testRepos.EXPECT().Hash(gomock.Any(), repoURL, "HEAD", path).Return("CommitAbc123", nil) By("By making sure job is not sent to job queue before schedule") fakeClock.T = time.Date(2022, 02, 01, 01, 00, 40, 0000, time.UTC) @@ -130,7 +130,7 @@ var _ = Describe("Module controller without runner", func() { It("Should send module to job queue on commit change", func() { const ( moduleName = "test-module2" - repo = "modules2" + repoURL = "https://host.xy/dummy/repo2.git" path = "dev/" + moduleName ) @@ -147,7 +147,7 @@ var _ = Describe("Module controller without runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "1 * * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, }, } @@ -174,7 +174,7 @@ var _ = Describe("Module controller without runner", func() { Expect(k8sClient.Status().Update(ctx, module)).Should(Succeed()) By("By making sure job was sent to jobQueue when commit hash is changed") - testGitSyncPool.EXPECT().HasChangesForPath(gomock.Any(), repo, path, "CommitAbc123").Return(true, nil) + testRepos.EXPECT().Hash(gomock.Any(), repoURL, "HEAD", path).Return("CommitAbc456", nil) // wait for just about 60 sec default poll interval Eventually(func() types.NamespacedName { @@ -195,7 +195,7 @@ var _ = Describe("Module controller without runner", func() { It("Should not trigger run for module with invalid schedule", func() { const ( moduleName = "test-module3" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "dev/" + moduleName ) @@ -212,7 +212,7 @@ var _ = Describe("Module controller without runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "1 * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, }, } @@ -236,7 +236,7 @@ var _ = Describe("Module controller without runner", func() { module.Status.RunCommitHash = "CommitAbc123" Expect(k8sClient.Status().Update(ctx, module)).Should(Succeed()) - testGitSyncPool.EXPECT().HasChangesForPath(gomock.Any(), repo, path, "CommitAbc123").Return(false, nil) + testRepos.EXPECT().Hash(gomock.Any(), repoURL, "HEAD", path).Return("CommitAbc123", nil) // wait for next reconcile loop time.Sleep(15 * time.Second) @@ -258,7 +258,7 @@ var _ = Describe("Module controller without runner", func() { It("Should not trigger run for module with git error", func() { const ( moduleName = "test-module4" - repo = "modules1" + repoURL = "https://host.xy/dummy/repo2.git" path = "dev/" + moduleName ) @@ -275,7 +275,7 @@ var _ = Describe("Module controller without runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "1 * * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, }, } @@ -299,7 +299,7 @@ var _ = Describe("Module controller without runner", func() { module.Status.RunCommitHash = "CommitAbc123" Expect(k8sClient.Status().Update(ctx, module)).Should(Succeed()) - testGitSyncPool.EXPECT().HasChangesForPath(gomock.Any(), repo, path, "CommitAbc123").Return(false, fmt.Errorf("some git error")) + testRepos.EXPECT().Hash(gomock.Any(), repoURL, "HEAD", path).Return("", fmt.Errorf("some git error")) // wait for next reconcile loop time.Sleep(15 * time.Second) diff --git a/integration_test/module_controller_with_runner_test.go b/integration_test/module_controller_with_runner_test.go index 10da027b..47ecaf64 100644 --- a/integration_test/module_controller_with_runner_test.go +++ b/integration_test/module_controller_with_runner_test.go @@ -11,7 +11,6 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" tfaplv1beta1 "github.com/utilitywarehouse/terraform-applier/api/v1beta1" - "github.com/utilitywarehouse/terraform-applier/git" "github.com/utilitywarehouse/terraform-applier/sysutil" "github.com/utilitywarehouse/terraform-applier/vault" corev1 "k8s.io/api/core/v1" @@ -50,16 +49,15 @@ var _ = Describe("Module controller with Runner", func() { testFilter.LabelSelectorValue = "" // all jobs will be triggered automatically as they do not have initial commit hash - // testGitSyncPool.EXPECT().HasChangesForPath(gomock.Any(), "modules", "hello", gomock.Any()).Return(true, nil).AnyTimes() - testGitSyncPool.EXPECT().HashForPath(gomock.Any(), "modules", "hello"). + testRepos.EXPECT().Hash(gomock.Any(), "https://host.xy/dummy/repo.git", "HEAD", "hello"). Return(commitHash, nil).AnyTimes() - testGitSyncPool.EXPECT().LogMsgForPath(gomock.Any(), "modules", "hello"). + testRepos.EXPECT().LogMsg(gomock.Any(), "https://host.xy/dummy/repo.git", "HEAD", "hello"). Return(commitMsg, nil).AnyTimes() - testGitSyncPool.EXPECT().RepositoryConfig(gomock.Any()).Return(git.RepositoryConfig{Remote: "github.com/org/repo"}, nil).AnyTimes() + var dst string - testGitSyncPool.EXPECT().CopyRepo(gomock.Any(), "modules", gomock.AssignableToTypeOf(dst)). - DoAndReturn(func(ctx context.Context, name string, dst string) error { - return sysutil.CopyDir(filepath.Join("src", name), dst) + testRepos.EXPECT().Clone(gomock.Any(), "https://host.xy/dummy/repo.git", gomock.AssignableToTypeOf(dst), "HEAD", "", true). + DoAndReturn(func(ctx context.Context, remote, dst, branch, pathspec string, rmGitDir bool) (string, error) { + return "commit124", sysutil.CopyDir(filepath.Join("src", "modules"), dst) }).AnyTimes() testMetrics.EXPECT().UpdateModuleRunDuration(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() @@ -73,7 +71,7 @@ var _ = Describe("Module controller with Runner", func() { It("Should send module to job queue on commit change and runner should do plan & apply", func() { const ( moduleName = "hello" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello" ) @@ -90,7 +88,7 @@ var _ = Describe("Module controller with Runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "50 * * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, Env: []corev1.EnvVar{ {Name: "TF_APPLIER_STRONGBOX_KEYRING", Value: string(sbKeyringData)}, @@ -156,7 +154,7 @@ var _ = Describe("Module controller with Runner", func() { It("Should send module to job queue on commit change and runner should only do plan", func() { const ( moduleName = "hello-plan-only" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello" ) @@ -173,7 +171,7 @@ var _ = Describe("Module controller with Runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "50 * * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, PlanOnly: &boolTrue, Env: []corev1.EnvVar{ @@ -236,7 +234,7 @@ var _ = Describe("Module controller with Runner", func() { It("Should send module to job queue on commit change and runner should read configmaps and secrets before apply and setup local backend", func() { const ( moduleName = "hello-with-var-env" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello" ) @@ -253,7 +251,7 @@ var _ = Describe("Module controller with Runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "50 * * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, Backend: []corev1.EnvVar{ {Name: "path", Value: testStateFilePath}, @@ -370,7 +368,7 @@ var _ = Describe("Module controller with Runner", func() { It("Should send module to job queue on commit change and runner should generate aws vault creds", func() { const ( moduleName = "hello-with-aws-creds" - repo = "modules" + repoURL = "https://host.xy/dummy/repo.git" path = "hello" ) @@ -392,7 +390,7 @@ var _ = Describe("Module controller with Runner", func() { }, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "50 * * * *", - RepoName: repo, + RepoURL: repoURL, Path: path, VaultRequests: &vaultReq, Env: []corev1.EnvVar{ diff --git a/integration_test/suite_test.go b/integration_test/suite_test.go index a5bdcc16..a599f98e 100644 --- a/integration_test/suite_test.go +++ b/integration_test/suite_test.go @@ -18,6 +18,7 @@ package integration_test import ( "context" + "log/slog" "os" "os/exec" "path/filepath" @@ -25,8 +26,8 @@ import ( "testing" "time" + "github.com/go-logr/logr" "github.com/golang/mock/gomock" - "github.com/hashicorp/go-hclog" "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/hc-install/src" @@ -44,7 +45,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" tfaplv1beta1 "github.com/utilitywarehouse/terraform-applier/api/v1beta1" "github.com/utilitywarehouse/terraform-applier/controllers" @@ -72,7 +72,7 @@ var ( fakeClock *sysutil.FakeClock goMockCtrl *gomock.Controller - testLogger hclog.Logger + testLogger *slog.Logger // testControllerQueue only used for controller behaviour testing testControllerQueue chan runner.Request testFilterControllerQueue chan runner.Request @@ -82,7 +82,7 @@ var ( // testRunnerQueue only used for send job to runner of runner testing testRunnerQueue chan runner.Request - testGitSyncPool *git.MockSyncInterface + testRepos *git.MockRepositories testMetrics *metrics.MockPrometheusInterface testDelegate *runner.MockDelegateInterface testRunner runner.Runner @@ -94,10 +94,21 @@ var ( func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) - RunSpecs(t, "Controller Suite") + // fetch the current config + suiteConfig, reporterConfig := GinkgoConfiguration() + // adjust it + suiteConfig.SkipStrings = []string{"NEVER-RUN"} + reporterConfig.Verbose = true + reporterConfig.FullTrace = true + // pass it in to RunSpecs + RunSpecs(t, "Controller Suite", suiteConfig, reporterConfig) } var _ = BeforeSuite(func() { + testLogger = slog.New(slog.NewTextHandler(GinkgoWriter, &slog.HandlerOptions{ + Level: slog.Level(-8), + })) + var err error // Download test assets to ./bin dir k8sAssetPath, err := exec.Command( @@ -109,7 +120,7 @@ var _ = BeforeSuite(func() { Expect(os.Setenv("KUBEBUILDER_ASSETS", string(k8sAssetPath))).To(Succeed()) - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + logf.SetLogger(logr.FromSlogHandler(testLogger.Handler())) ctx, cancel = context.WithCancel(context.TODO()) @@ -138,12 +149,6 @@ var _ = BeforeSuite(func() { }) Expect(err).ToNot(HaveOccurred()) - testLogger = hclog.New(&hclog.LoggerOptions{ - Name: "test", - Level: hclog.LevelFromString("TRACE"), - IncludeLocation: false, - }) - fakeClock = &sysutil.FakeClock{ T: time.Date(01, 01, 01, 0, 0, 0, 0, time.UTC), } @@ -157,7 +162,7 @@ var _ = BeforeSuite(func() { goMockCtrl = gomock.NewController(RecoveringGinkgoT()) - testGitSyncPool = git.NewMockSyncInterface(goMockCtrl) + testRepos = git.NewMockRepositories(goMockCtrl) testMetrics = metrics.NewMockPrometheusInterface(goMockCtrl) testDelegate = runner.NewMockDelegateInterface(goMockCtrl) @@ -169,14 +174,14 @@ var _ = BeforeSuite(func() { Recorder: k8sManager.GetEventRecorderFor("terraform-applier"), Clock: fakeClock, Queue: testControllerQueue, - GitSyncPool: testGitSyncPool, - Log: testLogger.Named("manager"), + Repos: testRepos, + Log: testLogger.With("logger", "manager"), MinIntervalBetweenRuns: minIntervalBetweenRunsDuration, RunStatus: runStatus, } testFilter = &controllers.Filter{ - Log: testLogger.Named("filter"), + Log: testLogger.With("logger", "filter"), LabelSelectorKey: "", LabelSelectorValue: "", } @@ -214,9 +219,9 @@ var _ = BeforeSuite(func() { Recorder: k8sManager.GetEventRecorderFor("terraform-applier"), KubeClt: fakeClient, Queue: testRunnerQueue, - GitSyncPool: testGitSyncPool, + Repos: testRepos, Delegate: testDelegate, - Log: testLogger.Named("runner"), + Log: testLogger.With("logger", "runner"), Metrics: testMetrics, TerraformExecPath: execPath, AWSSecretsEngineConfig: testVaultAWSConf, diff --git a/main.go b/main.go index c5e1023e..5de178cb 100644 --- a/main.go +++ b/main.go @@ -5,6 +5,7 @@ import ( "crypto/sha256" "fmt" "io" + "log/slog" "os" "os/exec" "os/signal" @@ -14,7 +15,7 @@ import ( "syscall" "time" - "github.com/hashicorp/go-hclog" + "github.com/go-logr/logr" "github.com/hashicorp/go-version" hcinstall "github.com/hashicorp/hc-install" "github.com/hashicorp/hc-install/fs" @@ -23,8 +24,8 @@ import ( "github.com/hashicorp/hc-install/src" "github.com/hashicorp/terraform-exec/tfexec" "github.com/urfave/cli/v2" - "go.uber.org/zap/zapcore" + "github.com/utilitywarehouse/git-mirror/pkg/mirror" "github.com/utilitywarehouse/terraform-applier/git" "github.com/utilitywarehouse/terraform-applier/metrics" "github.com/utilitywarehouse/terraform-applier/runner" @@ -47,7 +48,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/healthz" - "sigs.k8s.io/controller-runtime/pkg/log/zap" runTimeMetrics "sigs.k8s.io/controller-runtime/pkg/metrics" metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" @@ -57,7 +57,8 @@ import ( ) var ( - logger hclog.Logger + loggerLevel = new(slog.LevelVar) + logger *slog.Logger oidcAuthenticator *oidc.Authenticator scheme = runtime.NewScheme() @@ -72,12 +73,12 @@ var ( watchNamespaces []string globalRunEnv map[string]string - zapLevelStrings = map[string]zapcore.Level{ - "trace": zapcore.DebugLevel, - "debug": zapcore.DebugLevel, - "info": zapcore.InfoLevel, - "warn": zapcore.WarnLevel, - "error": zapcore.ErrorLevel, + levelStrings = map[string]slog.Level{ + "trace": slog.Level(-8), + "debug": slog.LevelDebug, + "info": slog.LevelInfo, + "warn": slog.LevelWarn, + "error": slog.LevelError, } flags = []cli.Flag{ @@ -252,15 +253,17 @@ func init() { utilruntime.Must(tfaplv1beta1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme - logger = hclog.New(&hclog.LoggerOptions{ - Name: "terraform-applier", - Level: hclog.Info, - IncludeLocation: false, - }) + loggerLevel.Set(slog.LevelInfo) + logger = slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: loggerLevel, + })) } func validate(c *cli.Context) { - logger.SetLevel(hclog.LevelFromString(logLevel)) + // set log level according to argument + if v, ok := levelStrings[strings.ToLower(logLevel)]; ok { + loggerLevel.Set(v) + } if c.IsSet("module-label-selector") { labelKV := strings.Split(c.String("module-label-selector"), "=") @@ -429,6 +432,29 @@ func cleanupTmpDir() { } } +func applyGitDefaults(c *cli.Context, mirrorConf mirror.RepoPoolConfig) mirror.RepoPoolConfig { + // always override root as we use root path passed as argument on controller + mirrorConf.Defaults.Root = reposRootPath + + if mirrorConf.Defaults.GitGC == "" { + mirrorConf.Defaults.GitGC = "always" + } + + if mirrorConf.Defaults.Interval == 0 { + mirrorConf.Defaults.Interval = 30 * time.Second + } + + if mirrorConf.Defaults.Auth.SSHKeyPath == "" { + mirrorConf.Defaults.Auth.SSHKeyPath = c.String("git-ssh-key-file") + } + + if mirrorConf.Defaults.Auth.SSHKnownHostsPath == "" { + mirrorConf.Defaults.Auth.SSHKnownHostsPath = c.String("git-ssh-known-hosts-file") + } + + return mirrorConf +} + func main() { app := &cli.App{ Name: "terraform-applier", @@ -453,13 +479,7 @@ func main() { func run(c *cli.Context) { ctx, cancel := context.WithCancel(c.Context) - setupLog := logger.Named("setup") - - opts := zap.Options{ - Development: true, - Level: zapLevelStrings[strings.ToLower(logLevel)], - } - ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) + ctrl.SetLogger(logr.FromSlogHandler(logger.Handler())) // runStatus keeps track of currently running modules runStatus := new(sync.Map) @@ -474,50 +494,37 @@ func run(c *cli.Context) { conf, err := parseConfigFile(c.String("config")) if err != nil { - setupLog.Error("unable to parse tf applier config file", "err", err) + logger.Error("unable to parse tf applier config file", "err", err) os.Exit(1) } - git.EnableMetrics("terraform_applier", runTimeMetrics.Registry) + // setup git-mirror + conf.GitMirror = applyGitDefaults(c, conf.GitMirror) - gitSyncPool, err := git.NewSyncPool( - ctx, - reposRootPath, - git.SyncOptions{ - GitSSHKeyPath: c.String("git-ssh-key-file"), - GitSSHKnownHostsPath: c.String("git-ssh-known-hosts-file"), - Interval: 30 * time.Second, - WithCheckout: true, - }, - logger.Named("git-sync"), - ) + mirror.EnableMetrics("terraform_applier", runTimeMetrics.Registry) + + // path to resolve strongbox + gitENV := []string{fmt.Sprintf("PATH=%s", os.Getenv("PATH"))} + repos, err := mirror.NewRepoPool(conf.GitMirror, logger.With("logger", "git-mirror"), gitENV) if err != nil { - setupLog.Error("could not create git sync pool", "err", err) + logger.Error("could not create git mirror pool", "err", err) os.Exit(1) } - for name, repoConfig := range conf.Repositories { - err := gitSyncPool.AddRepository(name, repoConfig, nil) - if err != nil { - setupLog.Error("unable to add repository to git sync pool", "name", name, "err", err) - os.Exit(1) - } - } - // Find the requested version of terraform and log the version // information execPath, cleanup, err := findTerraformExecPath(ctx, terraformPath, terraformVersion) defer cleanup() if err != nil { - setupLog.Error("error finding terraform", "err", err) + logger.Error("error finding terraform", "err", err) os.Exit(1) } version, err := terraformVersionString(ctx, execPath) if err != nil { - setupLog.Error("error getting terraform version", "err", err) + logger.Error("error getting terraform version", "err", err) os.Exit(1) } - setupLog.Info("found terraform binary", "version", version) + logger.Info("found terraform binary", "version", version) if electionID == "" { electionID = generateElectionID("4ee367ac", labelSelectorKey, labelSelectorValue, watchNamespaces) @@ -554,14 +561,14 @@ func run(c *cli.Context) { } filter := &controllers.Filter{ - Log: logger.Named("filter"), + Log: logger.With("logger", "filter"), LabelSelectorKey: labelSelectorKey, LabelSelectorValue: labelSelectorValue, } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options) if err != nil { - setupLog.Error("unable to start manager", "err", err) + logger.Error("unable to start manager", "err", err) os.Exit(1) } @@ -571,28 +578,28 @@ func run(c *cli.Context) { Recorder: mgr.GetEventRecorderFor("terraform-applier"), Clock: clock, Queue: moduleQueue, - GitSyncPool: gitSyncPool, - Log: logger.Named("manager"), + Repos: repos, + Log: logger.With("logger", "manager"), MinIntervalBetweenRuns: time.Duration(c.Int("min-interval-between-runs")) * time.Second, RunStatus: runStatus, }).SetupWithManager(mgr, filter); err != nil { - setupLog.Error("unable to create module controller", "err", err) + logger.Error("unable to create module controller", "err", err) os.Exit(1) } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { - setupLog.Error("unable to set up health check", "err", err) + logger.Error("unable to set up health check", "err", err) os.Exit(1) } if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { - setupLog.Error("unable to set up ready check", "err", err) + logger.Error("unable to set up ready check", "err", err) os.Exit(1) } kubeClient, err := kubeClient() if err != nil { - setupLog.Error("unable to create kube client", "err", err) + logger.Error("unable to create kube client", "err", err) os.Exit(1) } runner := runner.Runner{ @@ -601,8 +608,8 @@ func run(c *cli.Context) { Recorder: mgr.GetEventRecorderFor("terraform-applier"), KubeClt: kubeClient, Queue: moduleQueue, - GitSyncPool: gitSyncPool, - Log: logger.Named("runner"), + Repos: repos, + Log: logger.With("logger", "runner"), Metrics: metrics, TerraformExecPath: execPath, TerminationGracePeriod: time.Duration(c.Int("termination-grace-period")) * time.Second, @@ -622,10 +629,10 @@ func run(c *cli.Context) { c.String("oidc-callback-url"), ) if err != nil { - setupLog.Error("could not setup oidc authenticator", "error", err) + logger.Error("could not setup oidc authenticator", "error", err) os.Exit(1) } - setupLog.Info("OIDC authentication configured", "issuer", c.String("oidc-issuer"), "clientID", c.String("oidc-client-id")) + logger.Info("OIDC authentication configured", "issuer", c.String("oidc-issuer"), "clientID", c.String("oidc-client-id")) } webserver := &webserver.WebServer{ @@ -635,21 +642,21 @@ func run(c *cli.Context) { KubeClient: kubeClient, RunQueue: moduleQueue, RunStatus: runStatus, - Log: logger.Named("webserver"), + Log: logger.With("logger", "webserver"), } go runner.Start(ctx, done) go func() { err := webserver.Start(ctx) if err != nil { - setupLog.Error("unable to start webserver", "err", err) + logger.Error("unable to start webserver", "err", err) } }() go func() { - setupLog.Info("starting manager") + logger.Info("starting manager") if err := mgr.Start(ctx); err != nil { - setupLog.Error("problem running manager", "err", err) + logger.Error("problem running manager", "err", err) } }() @@ -661,7 +668,7 @@ func run(c *cli.Context) { signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) sig := <-sigCh - setupLog.Info("received Term signal, waiting for all running modules to finish before exiting", "signal", sig) + logger.Info("received Term signal, waiting for all running modules to finish before exiting", "signal", sig) // signal runner to start shutting down... cancel() @@ -669,12 +676,12 @@ func run(c *cli.Context) { for { select { case sig := <-sigCh: - setupLog.Error("received a second signal, force exiting", "signal", sig) + logger.Error("received a second signal, force exiting", "signal", sig) os.Exit(1) // wait for runner to finish case <-done: - setupLog.Info("runner successfully shutdown") + logger.Info("runner successfully shutdown") } } }(cancel) diff --git a/runner/runner.go b/runner/runner.go index b981d45f..19385f2a 100644 --- a/runner/runner.go +++ b/runner/runner.go @@ -3,11 +3,11 @@ package runner import ( "context" "fmt" + "log/slog" "regexp" "sync" "time" - "github.com/hashicorp/go-hclog" tfaplv1beta1 "github.com/utilitywarehouse/terraform-applier/api/v1beta1" "github.com/utilitywarehouse/terraform-applier/git" "github.com/utilitywarehouse/terraform-applier/metrics" @@ -37,9 +37,9 @@ type Runner struct { ClusterClt client.Client Recorder record.EventRecorder KubeClt kubernetes.Interface - GitSyncPool git.SyncInterface + Repos git.Repositories Queue <-chan Request - Log hclog.Logger + Log *slog.Logger Delegate DelegateInterface Metrics metrics.PrometheusInterface TerraformExecPath string @@ -143,23 +143,18 @@ func (r *Runner) process(req Request, cancelChan <-chan struct{}) bool { } }() - commitHash, err := r.GitSyncPool.HashForPath(ctx, module.Spec.RepoName, module.Spec.Path) + commitHash, err := r.Repos.Hash(ctx, module.Spec.RepoURL, module.Spec.RepoRef, module.Spec.Path) if err != nil { log.Error("unable to get commit hash", "err", err) return false } - commitLog, err := r.GitSyncPool.LogMsgForPath(ctx, module.Spec.RepoName, module.Spec.Path) + commitLog, err := r.Repos.LogMsg(ctx, module.Spec.RepoURL, module.Spec.RepoRef, module.Spec.Path) if err != nil { log.Error("unable to get commit log subject", "err", err) return false } - repo, err := r.GitSyncPool.RepositoryConfig(module.Spec.RepoName) - if err != nil { - log.Error("unable to get repo's remote url", "err", err) - } - // if termination signal received its safe to return here if isChannelClosed(cancelChan) { msg := "terraform run interrupted as runner is shutting down" @@ -169,7 +164,7 @@ func (r *Runner) process(req Request, cancelChan <-chan struct{}) bool { } // Update Status - if err = r.SetRunStartedStatus(req, module, "preparing for TF run", commitHash, commitLog, repo.Remote, r.Clock.Now()); err != nil { + if err = r.SetRunStartedStatus(req, module, "preparing for TF run", commitHash, commitLog, module.Spec.RepoURL, r.Clock.Now()); err != nil { log.Error("unable to set run starting status", "err", err) return false } @@ -261,7 +256,8 @@ func (r *Runner) runTF( _, err := te.init(ctx, backendConf) if err != nil { msg := fmt.Sprintf("unable to init module: err:%s", err) - log.Error(msg) + // tf err contains new lines not suitable logging + log.Error("unable to init module", "err", fmt.Sprintf("%q", err)) r.setFailedStatus(req, module, tfaplv1beta1.ReasonInitialiseFailed, msg, r.Clock.Now()) return false } @@ -287,7 +283,8 @@ func (r *Runner) runTF( if err != nil { module.Status.RunOutput = planOut msg := fmt.Sprintf("unable to plan module: err:%s", err) - log.Error(msg) + // tf err contains new lines not suitable logging + log.Error("unable to plan module", "err", fmt.Sprintf("%q", err)) r.setFailedStatus(req, module, tfaplv1beta1.ReasonPlanFailed, msg, r.Clock.Now()) return false } @@ -304,7 +301,8 @@ func (r *Runner) runTF( savedPlan, err := te.showPlanFileRaw(ctx) if err != nil { msg := fmt.Sprintf("unable to get saved plan: err:%s", err) - log.Error(msg) + // tf err contains new lines not suitable logging + log.Error("unable to get saved plan", "err", fmt.Sprintf("%q", err)) r.setFailedStatus(req, module, tfaplv1beta1.ReasonPlanFailed, msg, r.Clock.Now()) return false } @@ -346,11 +344,12 @@ func (r *Runner) runTF( if err != nil { module.Status.RunOutput = savedPlan + applyOut msg := fmt.Sprintf("unable to apply module: err:%s", err) - log.Error(msg) + // tf err contains new lines not suitable logging + log.Error("unable to apply module", "err", fmt.Sprintf("%q", err)) r.setFailedStatus(req, module, tfaplv1beta1.ReasonApplyFailed, msg, r.Clock.Now()) return false } - + module.Status.RunOutput = savedPlan + applyOut module.Status.LastApplyInfo = tfaplv1beta1.OutputStats{Timestamp: &metav1.Time{Time: r.Clock.Now()}, CommitHash: commitHash, Output: savedPlan + applyOut} // extract last line of output @@ -409,7 +408,7 @@ func (r *Runner) setFailedStatus(req Request, module *tfaplv1beta1.Module, reaso module.Status.StateMessage = msg module.Status.StateReason = reason - r.Recorder.Event(module, corev1.EventTypeWarning, reason, msg) + r.Recorder.Event(module, corev1.EventTypeWarning, reason, fmt.Sprintf("%q", msg)) if err := r.patchStatus(context.Background(), req.NamespacedName, module.Status); err != nil { r.Log.With("module", req.NamespacedName).Error("unable to set failed status", "err", err) diff --git a/runner/tfexec.go b/runner/tfexec.go index 4516bb4f..fd40bcf6 100644 --- a/runner/tfexec.go +++ b/runner/tfexec.go @@ -60,9 +60,9 @@ func (r *Runner) NewTFRunner( }() // clone repo to new temp dir so that file doesn't change during run. - // copy whole repository because module might contain relative path to modules/files + // checkout whole repo as module might contain relative path to modules/files // which are outside of its path - err = r.GitSyncPool.CopyRepo(ctx, module.Spec.RepoName, tmpRoot) + _, err = r.Repos.Clone(ctx, module.Spec.RepoURL, tmpRoot, module.Spec.RepoRef, "", true) if err != nil { return nil, fmt.Errorf("unable copy module's tf files to tmp dir err:%w", err) } diff --git a/webserver/template.go b/webserver/template.go index 9adb98eb..72c73d31 100644 --- a/webserver/template.go +++ b/webserver/template.go @@ -58,5 +58,9 @@ func commitURL(remoteURL, hash string) string { remoteURL = strings.TrimPrefix(remoteURL, "git@") remoteURL = strings.TrimSuffix(remoteURL, ".git") remoteURL = strings.ReplaceAll(remoteURL, ":", "/") + + if hash == "" { + return fmt.Sprintf("https://%s", remoteURL) + } return fmt.Sprintf("https://%s/commit/%s", remoteURL, hash) } diff --git a/webserver/template_test.go b/webserver/template_test.go index e708244f..2f56a251 100644 --- a/webserver/template_test.go +++ b/webserver/template_test.go @@ -25,6 +25,8 @@ func Test_ExecuteTemplate(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: "admins", Namespace: "foo"}, Spec: tfaplv1beta1.ModuleSpec{ Schedule: "00 */1 * * *", + RepoURL: "https://github.com/utilitywarehouse/terraform-applier.git", + RepoRef: "prj-dev", Path: "foo/admins", }, Status: tfaplv1beta1.ModuleStatus{ @@ -38,7 +40,8 @@ func Test_ExecuteTemplate(t *testing.T) { TypeMeta: metav1.TypeMeta{APIVersion: "terraform-applier.uw.systems/v1beta1", Kind: "Module"}, ObjectMeta: metav1.ObjectMeta{Name: "users", Namespace: "foo"}, Spec: tfaplv1beta1.ModuleSpec{ - Schedule: "*/30 * * * *", + RepoURL: "git@github.com:utilitywarehouse/terraform-applier.git", + RepoRef: "master", Path: "foo/users", PlanOnly: &boolTrue, }, @@ -55,8 +58,10 @@ func Test_ExecuteTemplate(t *testing.T) { TypeMeta: metav1.TypeMeta{APIVersion: "terraform-applier.uw.systems/v1beta1", Kind: "Module"}, ObjectMeta: metav1.ObjectMeta{Name: "groups", Namespace: "bar"}, Spec: tfaplv1beta1.ModuleSpec{ + RepoURL: "ssh://git@github.com/utilitywarehouse/terraform-applier.git", + RepoRef: "as-test-module", Schedule: "00 */2 * * *", - Path: "dev/groups", + Path: "integration_test/src/modules/hello", }, Status: tfaplv1beta1.ModuleStatus{ CurrentState: "Ready", @@ -116,6 +121,7 @@ Apply complete! Resources: 7 added, 0 changed, 0 destroyed.`, TypeMeta: metav1.TypeMeta{APIVersion: "terraform-applier.uw.systems/v1beta1", Kind: "Module"}, ObjectMeta: metav1.ObjectMeta{Name: "users", Namespace: "bar"}, Spec: tfaplv1beta1.ModuleSpec{ + RepoURL: "git@github.com:utilitywarehouse/terraform-applier.git", Schedule: "*/15 * * * ", Path: "dev/users", }, diff --git a/webserver/templates/status.html b/webserver/templates/status.html index ec00e0c5..51f07a1d 100644 --- a/webserver/templates/status.html +++ b/webserver/templates/status.html @@ -128,7 +128,7 @@

{{.Name}} plan only {{end}}

-
+
-

Status: - - {{.Status.CurrentState}} ({{.Status.StateReason}}) - -

-
-
Repo: {{ .Spec.RepoName }}
-
Path: {{ .Spec.Path }}
-
-

Schedule: {{ .Spec.Schedule }}

-

Message: {{.Status.StateMessage}}

+ +
+
+
+
Status
+
+ + {{.Status.CurrentState}} ({{.Status.StateReason}}) + +
+
+ {{if .Spec.Schedule}} +
+
Schedule
+
{{ .Spec.Schedule }}
+
+ {{end}} + +
+ +
+
Repo
+
+ + {{.Spec.RepoURL }} +
+
+
+
Ref
+
{{.Spec.RepoRef}}
+
+
+
Path
+
+ {{ .Spec.Path }} +
+
+
+
+
Message
+
{{.Status.StateMessage}}
+
+
+
-
-
-
-

Type: {{.Status.RunType}}

-

Started: {{ formattedTime .Status.RunStartedAt }} +

+
+
+
Last run type
+
{{ .Status.RunType }}
+
+
+
Last run started at
+
{{ formattedTime .Status.RunStartedAt }} (took {{ duration .Status.RunDuration }}) -

+
+
+
+
Last run commit hash
+
+ {{.Status.RunCommitHash}} +
-
Commit Hash: - - {{.Status.RunCommitHash}} +
+
Last run commit message
+
{{.Status.RunCommitMsg}}
-
Commit Message: {{.Status.RunCommitMsg}} +
+
Last applied commit hash
+
+ {{.Status.LastApplyInfo.CommitHash}} + +
+
+
+
Last applied at
+
{{ formattedTime .Status.LastApplyInfo.Timestamp }}
-
+
@@ -191,14 +246,6 @@

{{.Name}}

-
- -
At: {{ formattedTime - .Status.LastApplyInfo.Timestamp }}
-
 
diff --git a/webserver/webserver.go b/webserver/webserver.go
index dd6d40c8..12a6b160 100644
--- a/webserver/webserver.go
+++ b/webserver/webserver.go
@@ -8,11 +8,11 @@ import (
 	"fmt"
 	"html/template"
 	"io"
+	"log/slog"
 	"net/http"
 	"sync"
 
 	"github.com/gorilla/mux"
-	"github.com/hashicorp/go-hclog"
 	tfaplv1beta1 "github.com/utilitywarehouse/terraform-applier/api/v1beta1"
 	"github.com/utilitywarehouse/terraform-applier/runner"
 	"github.com/utilitywarehouse/terraform-applier/webserver/oidc"
@@ -21,13 +21,15 @@ import (
 	"sigs.k8s.io/controller-runtime/pkg/client"
 )
 
+const trace = slog.Level(-8)
+
 //go:embed static
 var staticFiles embed.FS
 
 //go:embed templates/status.html
 var statusHTML string
 
-var log hclog.Logger
+var log *slog.Logger
 
 // WebServer struct
 type WebServer struct {
@@ -37,7 +39,7 @@ type WebServer struct {
 	KubeClient    kubernetes.Interface
 	RunQueue      chan<- runner.Request
 	RunStatus     *sync.Map
-	Log           hclog.Logger
+	Log           *slog.Logger
 }
 
 // StatusPageHandler implements the http.Handler interface and serves a status page with info about the most recent applier run.
@@ -45,12 +47,12 @@ type StatusPageHandler struct {
 	Template      *template.Template
 	Authenticator *oidc.Authenticator
 	ClusterClt    client.Client
-	Log           hclog.Logger
+	Log           *slog.Logger
 }
 
 // ServeHTTP populates the status page template with data and serves it when there is a request.
 func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	s.Log.Trace("Applier status request")
+	s.Log.Log(r.Context(), trace, "Applier status request")
 
 	if s.Authenticator != nil {
 		_, err := s.Authenticator.Authenticate(r.Context(), w, r)
@@ -84,7 +86,7 @@ func (s *StatusPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
 		log.Error("Request failed: %v", err)
 		return
 	}
-	s.Log.Trace("Request completed successfully")
+	s.Log.Log(r.Context(), trace, "Request completed successfully")
 }
 
 // ForceRunHandler implements the http.Handle interface and serves an API
@@ -95,7 +97,7 @@ type ForceRunHandler struct {
 	KubeClt       kubernetes.Interface
 	RunQueue      chan<- runner.Request
 	RunStatus     *sync.Map
-	Log           hclog.Logger
+	Log           *slog.Logger
 }
 
 // ServeHTTP handles requests for forcing a run by attempting to add to the