diff --git a/.gcloudignore b/.gcloudignore index 2f786a1..586b408 100644 --- a/.gcloudignore +++ b/.gcloudignore @@ -5,7 +5,6 @@ *.so *.dylib *.zip -integrationcli .DS_Store # binaries diff --git a/.gitignore b/.gitignore index d1c9745..fbee20d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,9 @@ integrationcli .DS_Store +# VSCode +.vscode/launch.json + # binaries dist/ diff --git a/Dockerfile.deploy b/Dockerfile.deploy new file mode 100644 index 0000000..4bf9947 --- /dev/null +++ b/Dockerfile.deploy @@ -0,0 +1,61 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM golang:1.21 as builder + +ARG TAG +ARG COMMIT + +ADD ./internal /go/src/integrationcli/internal +ADD ./cmd /go/src/integrationcli/cmd + +COPY go.mod go.sum /go/src/integrationcli/ +WORKDIR /go/src/integrationcli + +ENV GO111MODULE=on +RUN go mod tidy +RUN go mod download +RUN date +%FT%H:%I:%M+%Z > /tmp/date +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -trimpath -buildvcs=true -a -gcflags='all="-l"' -ldflags='-s -w -extldflags "-static" -X main.version='${TAG}' -X main.commit='${COMMIT}' -X main.date='$(cat /tmp/date) -o /go/bin/integrationcli /go/src/integrationcli/cmd/integrationcli/integrationcli.go + +FROM us-docker.pkg.dev/appintegration-toolkit/internal/jq:latest as jq + +FROM alpine:latest +LABEL org.opencontainers.image.url='https://github.com/GoogleCloudPlatform/application-integration-management-toolkit' \ + org.opencontainers.image.documentation='https://github.com/GoogleCloudPlatform/application-integration-management-toolkit' \ + org.opencontainers.image.source='https://github.com/GoogleCloudPlatform/application-integration-management-toolkit' \ + org.opencontainers.image.vendor='Google LLC' \ + org.opencontainers.image.licenses='Apache-2.0' \ + org.opencontainers.image.description='This is a tool to interact with Application Integration APIs' + +RUN apk --no-cache add ca-certificates \ + && update-ca-certificates + +ARG USER=nonroot +ENV HOME /home/$USER + +RUN apk add --update sudo + +RUN adduser -D $USER \ + && mkdir -p /etc/sudoers.d \ + && echo "$USER ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/$USER \ + && chmod 0440 /etc/sudoers.d/$USER + +COPY --from=jq /jq /usr/local/bin/jq +COPY LICENSE.txt / +COPY third-party-licenses.txt / +COPY --from=builder /go/bin/integrationcli /usr/local/bin/integrationcli + +USER $USER +WORKDIR $HOME diff --git a/cloudbuild-debug.yaml b/cloudbuild-debug.yaml new file mode 100644 index 0000000..250200b --- /dev/null +++ b/cloudbuild-debug.yaml @@ -0,0 +1,43 @@ +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# gcloud builds submit --config=artifact-build.yaml --project=project-name --substitutions=TAG="v0.65" +steps: +#publish integrationcli-deploy +- name: 'gcr.io/cloud-builders/docker' + id: build_integrationcli + args: + - 'build' + - '-t' + - '${_CD_IMAGE_NAME}:debug' + - '-f' + - 'Dockerfile.deploy' + - '.' + - '--build-arg' + - 'TAG=debug' + - '--build-arg' + - 'COMMIT=not-set' +- name: 'gcr.io/cloud-builders/docker' + id: push_integrationcli + args: ['push', '${_CD_IMAGE_NAME}:debug'] + +#repo name +substitutions: + _REPO: "images" + _CD_IMAGE_NAME: "us-docker.pkg.dev/${PROJECT_ID}/${_REPO}/integrationcli-deploy" + +options: + machineType: E2_HIGHCPU_8 + logging: CLOUD_LOGGING_ONLY + substitution_option: ALLOW_LOOSE + dynamic_substitutions: true diff --git a/cloudbuild.yaml b/cloudbuild.yaml index 8211ece..8b327be 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -50,6 +50,20 @@ steps: id: push_latest_integrationcli args: ['push', '${_IMAGE_NAME}:latest'] +# the folowing image is built for cloud deploy +- name: 'gcr.io/cloud-builders/docker' + id: tag_integrationcli_deploy + args: ['tag', '${_IMAGE_NAME}:${TAG_NAME}', '${_CD_IMAGE_NAME}:${TAG_NAME}'] +- name: 'gcr.io/cloud-builders/docker' + id: push_integrationcli_deploy + args: ['push', '${_CD_IMAGE_NAME}:${TAG_NAME}'] +- name: 'gcr.io/cloud-builders/docker' + id: tag_latest_integrationcli_deploy + args: ['tag', '${_CD_IMAGE_NAME}:${TAG_NAME}', '${_CD_IMAGE_NAME}:latest'] +- name: 'gcr.io/cloud-builders/docker' + id: push_latest_integrationcli_deploy + args: ['push', '${_CD_IMAGE_NAME}:latest'] + #sign and attach integrationcli - name: 'us-docker.pkg.dev/${PROJECT_ID}/${_REPO}/cosign:latest' id: sign_integrationcli @@ -62,6 +76,18 @@ steps: cosign sign --key=/tmp/cosign.key --output-signature=/tmp/integrationcli_${TAG_NAME}.sig --yes ${_IMAGE_NAME}:${TAG_NAME} cosign attach signature --signature=/tmp/integrationcli_${TAG_NAME}.sig ${_IMAGE_NAME}:${TAG_NAME} +#sign and attach the integrationcli-deploy image +- name: 'us-docker.pkg.dev/${PROJECT_ID}/${_REPO}/cosign:latest' + id: sign_integrationcli_deloy + entrypoint: 'sh' + secretEnv: ['COSIGN_PASSWORD', 'COSIGN_PRIVATE_KEY'] + args: + - -c + - | + echo "$$COSIGN_PRIVATE_KEY" > /tmp/cosign.key + cosign sign --key=/tmp/cosign.key --output-signature=/tmp/integrationcli_${TAG_NAME}.sig --yes ${_CD_IMAGE_NAME}:${TAG_NAME} + cosign attach signature --signature=/tmp/integrationcli_${TAG_NAME}.sig ${_CD_IMAGE_NAME}:${TAG_NAME} + availableSecrets: secretManager: - versionName: projects/$PROJECT_ID/secrets/integrationcli-cosign-password/versions/latest @@ -73,6 +99,7 @@ availableSecrets: substitutions: _REPO: "images" _IMAGE_NAME: "us-docker.pkg.dev/${PROJECT_ID}/${_REPO}/integrationcli" + _CD_IMAGE_NAME: "us-docker.pkg.dev/${PROJECT_ID}/${_REPO}/integrationcli-deploy" _COSIGN_IMAGE_NAME: "us-docker.pkg.dev/${PROJECT_ID}/${_REPO}/cosign" options: diff --git a/go.mod b/go.mod index 7327b4e..002b369 100644 --- a/go.mod +++ b/go.mod @@ -29,11 +29,13 @@ replace internal/cmd => ./internal/cmd require github.com/spf13/cobra v1.8.0 require ( + cloud.google.com/go v0.112.1 // indirect cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.6 // indirect cloud.google.com/go/kms v1.15.7 // indirect cloud.google.com/go/secretmanager v1.11.5 // indirect + cloud.google.com/go/storage v1.39.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -41,8 +43,9 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -71,7 +74,7 @@ require ( google.golang.org/api v0.167.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240228224816-df926f6c8641 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240228224816-df926f6c8641 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240304161311-37d4d3c04a78 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240228224816-df926f6c8641 // indirect google.golang.org/grpc v1.62.0 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/go.sum b/go.sum index 074274c..3dffb41 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= @@ -11,6 +11,8 @@ cloud.google.com/go/kms v1.15.7 h1:7caV9K3yIxvlQPAcaFffhlT7d1qpxjB1wHBtjWa13SM= cloud.google.com/go/kms v1.15.7/go.mod h1:ub54lbsa6tDkUwnu4W7Yt1aAIFLnspgh0kPGToDukeI= cloud.google.com/go/secretmanager v1.11.5 h1:82fpF5vBBvu9XW4qj0FU2C6qVMtj1RM/XHwKXUEAfYY= cloud.google.com/go/secretmanager v1.11.5/go.mod h1:eAGv+DaCHkeVyQi0BeXgAHOU0RdrMeZIASKc+S7VqH4= +cloud.google.com/go/storage v1.39.1 h1:MvraqHKhogCOTXTlct/9C3K3+Uy2jBmFYb3/Sp6dVtY= +cloud.google.com/go/storage v1.39.1/go.mod h1:xK6xZmxZmo+fyP7+DEF6FhNc24/JAe95OLyOHCXFH1o= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -53,6 +55,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -62,9 +66,13 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= @@ -114,8 +122,8 @@ go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/sdk v1.22.0/go.mod h1:iu7luyVGYovrRpe2fmj3CVKouQNdTOkxtLzPvPz1DOc= go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -177,6 +185,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= google.golang.org/api v0.167.0 h1:CKHrQD1BLRii6xdkatBDXyKzM0mkawt2QP+H3LtPmSE= google.golang.org/api v0.167.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -188,8 +198,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240228224816-df926f6c8641 h1:GihpvzHjeZHw+/mzsWpdxwr1LaG6E3ff/gyeZlVHbyc= google.golang.org/genproto v0.0.0-20240228224816-df926f6c8641/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240228224816-df926f6c8641 h1:SO1wX9btGFrwj9EzH3ocqfwiPVOxfv4ggAJajzlHA5s= -google.golang.org/genproto/googleapis/api v0.0.0-20240228224816-df926f6c8641/go.mod h1:wLupoVsUfYPgOMwjzhYFbaVklw/INms+dqTp0tc1fv8= +google.golang.org/genproto/googleapis/api v0.0.0-20240304161311-37d4d3c04a78 h1:SzXBGiWM1LNVYLCRP3e0/Gsze804l4jGoJ5lYysEO5I= +google.golang.org/genproto/googleapis/api v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= google.golang.org/genproto/googleapis/rpc v0.0.0-20240228224816-df926f6c8641 h1:DKU1r6Tj5s1vlU/moGhuGz7E3xRfwjdAfDzbsaQJtEY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240228224816-df926f6c8641/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= diff --git a/internal/apiclient/bundles.go b/internal/apiclient/bundles.go index 2baa6a2..d134099 100644 --- a/internal/apiclient/bundles.go +++ b/internal/apiclient/bundles.go @@ -15,11 +15,22 @@ package apiclient import ( + "archive/tar" "bytes" + "compress/gzip" + "context" + "encoding/json" "fmt" + "io" + "net/url" "os" + "path" + "path/filepath" + "strings" "internal/clilog" + + "cloud.google.com/go/storage" ) // entityPayloadList stores list of entities @@ -108,3 +119,202 @@ func GetEntityPayloadList() [][]byte { func ClearEntityPayloadList() { entityPayloadList = entityPayloadList[:0] } + +func ExtractTgz(gcsURL string) (folder string, err error) { + + ctx := context.Background() + + folder, err = os.MkdirTemp("", "integration") + if err != nil { + return "", err + } + + // Parse the GCS URL + parsedURL, err := url.Parse(gcsURL) + if err != nil { + return "", fmt.Errorf("Error parsing GCS URL:", err) + } + if parsedURL.Scheme != "gs" { + return "", fmt.Errorf("Invalid GCS URL scheme. Should be 'gs://'") + } + + bucketName := parsedURL.Host + objectName := strings.TrimPrefix(parsedURL.Path, "/") + fileName := filepath.Base(gcsURL) + + // Create a Google Cloud Storage client + client, err := storage.NewClient(ctx) + if err != nil { + return "", fmt.Errorf("Error creating GCS client:", err) + } + defer client.Close() + + // Get a handle to the bucket and the object + bucket := client.Bucket(bucketName) + object := bucket.Object(objectName) + + // Create a reader to stream the object's content + reader, err := object.NewReader(ctx) + if err != nil { + return "", fmt.Errorf("Error creating object reader:", err) + } + defer reader.Close() + + // Create the local file to save the download + localFile, err := os.Create(path.Join(folder, fileName)) + if err != nil { + return "", fmt.Errorf("Error creating local file:", err) + } + defer localFile.Close() + + // Download the object and save it to the local file + if _, err := io.Copy(localFile, reader); err != nil { + return "", fmt.Errorf("Error downloading object:", err) + } + + // Open the .tgz file + file, err := os.Open(path.Join(folder, fileName)) + if err != nil { + return "", fmt.Errorf("Error opening file:", err) + } + defer file.Close() // Ensure file closure + + // Create a gzip reader + gzipReader, err := gzip.NewReader(file) + if err != nil { + return "", fmt.Errorf("Error creating gzip reader:", err) + } + defer gzipReader.Close() // Ensure closure + + // Create a tar reader + tarReader := tar.NewReader(gzipReader) + + // Extract each file from the tar archive + for { + header, err := tarReader.Next() + if err == io.EOF { + break // End of archive + } + if err != nil { + return "", fmt.Errorf("Error reading tar entry:", err) + } + if strings.Contains(header.Name, "..") { + continue + } + + // Process the file header + switch header.Typeflag { + case tar.TypeDir: + // Create directory + if err := os.Mkdir(path.Join(folder, header.Name), 0755); err != nil { + return "", fmt.Errorf("Error creating directory:", err) + } + case tar.TypeReg: + // Create output file + outFile, err := os.Create(path.Join(folder, header.Name)) + if err != nil { + return "", fmt.Errorf("Error creating file:", err) + } + defer outFile.Close() + + // Copy contents from the tar to the output file + if _, err := io.Copy(outFile, tarReader); err != nil { + return "", fmt.Errorf("Error writing file:", err) + } + default: + return "", fmt.Errorf("Unsupported type: %b in %s\n", header.Typeflag, header.Name) + } + } + return folder, nil +} + +func GetCloudDeployGCSLocations(pipeline string, release string) (skaffoldConfigUri string, err error) { + type cloudDeployRelease struct { + SkaffoldConfigUri string `json:"skaffoldConfigUri"` + TargetArtifacts map[string]struct { + SkaffoldConfigPath string `json:"skaffoldConfigPath"` + ManifestPath string `json:"manifestPath"` + ArtifactUri string `json:"artifactUri"` + PhaseArtifacts map[string]struct { + SkaffoldConfigPath string `json:"skaffoldConfigPath"` + ManifestPath string `json:"manifestPath"` + } `json:"phaseArtifacts"` + } `json:"targetArtifacts"` + } + + r := cloudDeployRelease{} + + cloudDeployURL := fmt.Sprintf("https://clouddeploy.googleapis.com/v1/projects/%s/locations/%s/deliveryPipelines/%s/releases/%s", + GetProjectID(), GetRegion(), pipeline, release) + u, _ := url.Parse(cloudDeployURL) + + ClientPrintHttpResponse.Set(false) + + respBody, err := HttpClient(u.String()) + if err != nil { + return "", err + } + defer ClientPrintHttpResponse.Set(GetCmdPrintHttpResponseSetting()) + + err = json.Unmarshal(respBody, &r) + if err != nil { + return "", err + } + + return r.SkaffoldConfigUri, nil +} + +func WriteResultsFile(deployOutputGCS string, status string) (err error) { + + contents := fmt.Sprintf("{\"resultStatus\": \"%s\"}", status) + filename := "results.json" + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + return fmt.Errorf("storage.NewClient: %v", err) + } + defer client.Close() + + // Extract bucket name and object path from GCS URI + bucketName, objectPath, err := parseGCSURI(deployOutputGCS) + objectName := path.Join(objectPath, filename) + + bucket := client.Bucket(bucketName) + object := bucket.Object(objectName) + writer := object.NewWriter(ctx) + + // Write the content + if _, err := writer.Write([]byte(contents)); err != nil { + return fmt.Errorf("Object(%q).NewWriter: %v", objectName, err) + } + + // Close the writer to ensure data is uploaded + if err := writer.Close(); err != nil { + return fmt.Errorf("Writer.Close: %v", err) + } + + return nil +} + +func parseGCSURI(gcsURI string) (bucketName, objectPath string, err error) { + // Parse the GCS URL + parsedURL, err := url.Parse(gcsURI) + if err != nil { + return "", "", fmt.Errorf("Error parsing GCS URL:", err) + } + if parsedURL.Scheme != "gs" { + return "", "", fmt.Errorf("Invalid GCS URL scheme. Should be 'gs://'") + } + // Remove the protocol prefix + uri := strings.TrimPrefix(gcsURI, "gs://") + + // Split based on the first '/' + parts := strings.SplitN(uri, "/", 2) + + // Check for proper URI format + if len(parts) != 2 { + return "", "", fmt.Errorf("Invalid GCS URI format") + } + return parts[0], parts[1], nil +} diff --git a/internal/client/connections/custom-connections.go b/internal/client/connections/custom-connections.go index a64d115..e63e989 100644 --- a/internal/client/connections/custom-connections.go +++ b/internal/client/connections/custom-connections.go @@ -55,6 +55,8 @@ type configVariableTemplate struct { LocationType string `json:"locationType,omitempty"` } +const waitTime = 1 * time.Second + // CreateCustom func CreateCustom(name string, description string, displayName string, connType string, labels map[string]string, @@ -183,6 +185,10 @@ func GetCustomVersion(connName string, connVersion string, overrides bool) (resp if err = json.Unmarshal(respBody, &cVerReq); err != nil { return nil, err } + // remove the default p4s from the overrides + if cVerReq.ServiceAccount != nil && strings.Contains(*cVerReq.ServiceAccount, "-compute@developer.gserviceaccount.com") { + cVerReq.ServiceAccount = nil + } c.CustomConnectorVersion = cVerReq overridesResp, err := json.Marshal(c) if err != nil { @@ -254,10 +260,12 @@ func CreateCustomWithVersion(name string, version string, contents []byte, } // wait for custom connection to be created - operationName := strings.Split(fmt.Sprintf("%s", createCustomMap["name"]), "/")[5] - err = waitForCustom(operationName) - if err != nil { - return err + if len(strings.Split(fmt.Sprintf("%s", createCustomMap["name"]), "/")) > 4 { + operationName := strings.Split(fmt.Sprintf("%s", createCustomMap["name"]), "/")[5] + err = waitForCustom(operationName) + if err != nil { + return err + } } connectionVersionContents, err := json.Marshal(c.CustomConnectorVersion) @@ -293,9 +301,10 @@ func waitForCustom(operationName string) error { } done := respMap["done"].(bool) if done { + time.Sleep(waitTime) return nil } - time.Sleep(1 * time.Second) + time.Sleep(waitTime) } } @@ -314,8 +323,9 @@ func waitForCustomVersion(name string, version string) error { } if respMap["state"] == "ACTIVE" { + time.Sleep(waitTime) return nil } - time.Sleep(1 * time.Second) + time.Sleep(waitTime) } } diff --git a/internal/client/integrations/integrations.go b/internal/client/integrations/integrations.go index 9492fdd..912a216 100644 --- a/internal/client/integrations/integrations.go +++ b/internal/client/integrations/integrations.go @@ -895,9 +895,18 @@ func GetConnectionsWithRegion(integration []byte) (connections []integrationConn connections = append(connections, newConnection) } if _, ok := taskConfig.Parameters["connectionName"]; ok { - newConnection := getIntegrationConnection(taskConfig.Parameters["connectionName"], - taskConfig.Parameters["connectionVersion"], iversion.IntegrationConfigParameters) - connections = append(connections, newConnection) + // check custom connection + if isCustomConnection(taskConfig.Parameters["connectionVersion"]) { + newCustomConnection := getIntegrationCustomConnection(taskConfig.Parameters["connectionVersion"]) + connections = append(connections, newCustomConnection) + newConnection := getIntegrationConnection(taskConfig.Parameters["connectionName"], + taskConfig.Parameters["connectionVersion"], iversion.IntegrationConfigParameters) + connections = append(connections, newConnection) + } else { + newConnection := getIntegrationConnection(taskConfig.Parameters["connectionName"], + taskConfig.Parameters["connectionVersion"], iversion.IntegrationConfigParameters) + connections = append(connections, newConnection) + } } } } @@ -1434,6 +1443,15 @@ func getJson(contents string) map[string]interface{} { return m } +func getIntegrationCustomConnection(connectionVersion eventparameter) integrationConnection { + ic := integrationConnection{} + ic.Name = strings.Split(*connectionVersion.Value.StringValue, "/")[7] + ic.Version = strings.Split(*connectionVersion.Value.StringValue, "/")[9] + ic.Region = "global" + ic.CustomConnection = true + return ic +} + func getIntegrationConnection(connectionName eventparameter, connectionVersion eventparameter, configParams []parameterConfig) integrationConnection { ic := integrationConnection{} @@ -1453,13 +1471,7 @@ func getIntegrationConnection(connectionName eventparameter, } ic.Version = strings.Split(*connectionVersion.Value.StringValue, "/")[9] - connectionType := strings.Split(*connectionVersion.Value.StringValue, "/")[5] - // CustomConnector will have the provider as customConnector. For others the provider can be default/GCP or any other provider. - if strings.EqualFold(connectionType, "customConnector") { - ic.CustomConnection = true - } else { - ic.CustomConnection = false - } + ic.CustomConnection = false return ic } @@ -1476,3 +1488,12 @@ func getConfigParamValue(name string, configParams []parameterConfig) string { } return "" } + +func isCustomConnection(connectionVersion eventparameter) bool { + connectionType := strings.Split(*connectionVersion.Value.StringValue, "/")[5] + if strings.EqualFold(connectionType, "customConnector") { + return true + } else { + return false + } +} diff --git a/internal/cmd/integrations/apply.go b/internal/cmd/integrations/apply.go index 0bfa5ec..adaee61 100644 --- a/internal/cmd/integrations/apply.go +++ b/internal/cmd/integrations/apply.go @@ -50,9 +50,34 @@ var ApplyCmd = &cobra.Command{ if err = apiclient.SetRegion(cmdRegion.Value.String()); err != nil { return err } + if folder == "" && (pipeline == "" || release == "" || outputGCSPath == "") { + return fmt.Errorf("atleast one of folder or pipeline, release and outputGCSPath must be supplied") + } + if folder != "" && (pipeline != "" || release != "" || outputGCSPath != "") { + return fmt.Errorf("both folder and pipeline, release and outputGCSPath cannot be supplied") + } + if (pipeline != "" && (release == "" || outputGCSPath == "")) || + (release != "" && (pipeline == "" && outputGCSPath == "")) || + (outputGCSPath != "" && (pipeline == "" && release == "")) { + return fmt.Errorf("release, pipeline and outputGCSPath must be set") + } return apiclient.SetProjectID(cmdProject.Value.String()) }, RunE: func(cmd *cobra.Command, args []string) (err error) { + + var skaffoldConfigUri string + + if folder == "" { + skaffoldConfigUri, err = apiclient.GetCloudDeployGCSLocations(pipeline, release) + if err != nil { + return err + } + folder, err = apiclient.ExtractTgz(skaffoldConfigUri) + if err != nil { + return err + } + } + srcFolder := folder if env != "" { folder = path.Join(folder, env) @@ -65,7 +90,6 @@ var ApplyCmd = &cobra.Command{ grantPermission, _ := strconv.ParseBool(cmd.Flag("grant-permission").Value.String()) wait, _ := strconv.ParseBool(cmd.Flag("wait").Value.String()) - rJSONFiles := regexp.MustCompile(`(\S*)\.json`) integrationFolder := path.Join(srcFolder, "src") authconfigFolder := path.Join(folder, "authconfigs") connectorsFolder := path.Join(folder, "connectors") @@ -77,332 +101,61 @@ var ApplyCmd = &cobra.Command{ endpointsFolder := path.Join(folder, "endpoints") zonesFolder := path.Join(folder, "zones") - var stat fs.FileInfo - var integrationNames []string - var overridesBytes []byte - const sfdcNamingConvention = 2 // when file is split with _, the result must be 2 - apiclient.DisableCmdPrintHttpResponse() - if stat, err = os.Stat(authconfigFolder); err == nil && stat.IsDir() { - // create any authconfigs - err = filepath.Walk(authconfigFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - authConfigFile := filepath.Base(path) - if rJSONFiles.MatchString(authConfigFile) { - clilog.Info.Printf("Found configuration for authconfig: %s\n", authConfigFile) - version, _ := authconfigs.Find(getFilenameWithoutExtension(authConfigFile), "") - // create the authconfig only if the version was not found - if version == "" { - authConfigBytes, err := utils.ReadFile(path) - if err != nil { - return err - } - clilog.Info.Printf("Creating authconfig: %s\n", authConfigFile) - if _, err = authconfigs.Create(authConfigBytes); err != nil { - return err - } - } else { - clilog.Info.Printf("Authconfig %s already exists\n", authConfigFile) - } - } - } - return nil - }) - - if err != nil { - return err - } - } - - if stat, err = os.Stat(endpointsFolder); err == nil && stat.IsDir() { - // create any endpoint attachments - err = filepath.Walk(endpointsFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - endpointFile := filepath.Base(path) - if rJSONFiles.MatchString(endpointFile) { - clilog.Info.Printf("Found configuration for endpoint attachment: %s\n", endpointFile) - } - if !connections.FindEndpoint(getFilenameWithoutExtension(endpointFile)) { - // the endpoint does not exist, try to create it - endpointBytes, err := utils.ReadFile(path) - if err != nil { - return err - } - serviceAccountName, err := getServiceAttachment(endpointBytes) - if err != nil { - return err - } - if _, err = connections.CreateEndpoint(getFilenameWithoutExtension(endpointFile), - serviceAccountName, "", false); err != nil { - return err - } - } else { - clilog.Info.Printf("Endpoint %s already exists\n", endpointFile) - } - } - return nil - }) - if err != nil { - return err - } - } - - // create any managed zones - if stat, err = os.Stat(zonesFolder); err == nil && stat.IsDir() { - // create any managedzones - err = filepath.Walk(zonesFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - zoneFile := filepath.Base(path) - if rJSONFiles.MatchString(zoneFile) { - clilog.Info.Printf("Found configuration for managed zone: %s\n", zoneFile) - } - if _, err = connections.GetZone(getFilenameWithoutExtension(zoneFile), true); err != nil { - // the managed zone does not exist, try to create it - zoneBytes, err := utils.ReadFile(path) - if err != nil { - return err - } - if _, err = connections.CreateZone(getFilenameWithoutExtension(zoneFile), - zoneBytes); err != nil { - return err - } - } else { - clilog.Info.Printf("Zone %s already exists\n", zoneFile) - } - } - return nil - }) - if err != nil { - return err - } + if err = processAuthConfigs(authconfigFolder); err != nil { + return err } - if stat, err = os.Stat(customConnectorsFolder); err == nil && stat.IsDir() { - //create any custom connectors - err = filepath.Walk(customConnectorsFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - customConnectionFile := filepath.Base(path) - if rJSONFiles.MatchString(customConnectionFile) { - customConnectionDetails := strings.Split(strings.TrimSuffix(customConnectionFile, filepath.Ext(customConnectionFile)), "-") - // the file format is name-version.json - if len(customConnectionDetails) == 2 { - clilog.Info.Printf("Found configuration for custom connection: %v\n", customConnectionFile) - contents, err := utils.ReadFile(path) - if err != nil { - return err - } - if err = connections.CreateCustomWithVersion(customConnectionDetails[0], - customConnectionDetails[1], contents, serviceAccountName, serviceAccountProject); err != nil { - return err - } - } - } - } - return nil - }) + if err = processEndpoints(endpointsFolder); err != nil { + return err } - if stat, err = os.Stat(connectorsFolder); err == nil && stat.IsDir() { - // create any connectors - err = filepath.Walk(connectorsFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - connectionFile := filepath.Base(path) - if rJSONFiles.MatchString(connectionFile) { - clilog.Info.Printf("Found configuration for connection: %s\n", connectionFile) - _, err = connections.Get(getFilenameWithoutExtension(connectionFile), "", true, false) - // create the connection only if the connection is not found - if err != nil { - connectionBytes, err := utils.ReadFile(path) - if err != nil { - return err - } - clilog.Info.Printf("Creating connector: %s\n", connectionFile) - - if _, err = connections.Create(getFilenameWithoutExtension(connectionFile), - connectionBytes, - serviceAccountName, - serviceAccountProject, - encryptionKey, - grantPermission, - createSecret, - wait); err != nil { - return err - } - } else { - clilog.Info.Printf("Connector %s already exists\n", connectionFile) - } - } - } - return nil - }) - - if err != nil { - return err - } + if err = processManagedZones(zonesFolder); err != nil { + return err } - if stat, err = os.Stat(sfdcinstancesFolder); err == nil && stat.IsDir() { - // create any sfdc instances - err = filepath.Walk(sfdcinstancesFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - instanceFile := filepath.Base(path) - if rJSONFiles.MatchString(instanceFile) { - clilog.Info.Printf("Found configuration for sfdc instance: %s\n", instanceFile) - _, err = sfdc.GetInstance(getFilenameWithoutExtension(instanceFile), true) - // create the instance only if the sfdc instance is not found - if err != nil { - instanceBytes, err := utils.ReadFile(path) - if err != nil { - return err - } - clilog.Info.Printf("Creating sfdc instance: %s\n", instanceFile) - _, err = sfdc.CreateInstanceFromContent(instanceBytes) - if err != nil { - return nil - } - } else { - clilog.Info.Printf("sfdc instance %s already exists\n", instanceFile) - } - } - } - return nil - }) - - if err != nil { + if !skipConnectors { + if err = processCustomConnectors(customConnectorsFolder); err != nil { return err } - } - if stat, err = os.Stat(sfdcchannelsFolder); err == nil && stat.IsDir() { - // create any sfdc channels - err = filepath.Walk(sfdcchannelsFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - channelFile := filepath.Base(path) - if rJSONFiles.MatchString(channelFile) { - clilog.Info.Printf("Found configuration for sfdc channel: %s\n", channelFile) - sfdcNames := strings.Split(getFilenameWithoutExtension(channelFile), "_") - if len(sfdcNames) != sfdcNamingConvention { - clilog.Warning.Printf("sfdc chanel file %s does not follow the naming "+ - "convention instanceName_channelName.json\n", channelFile) - return nil - } - version, _, err := sfdc.FindChannel(sfdcNames[1], sfdcNames[0]) - // create the instance only if the sfdc channel is not found - if err != nil { - channelBytes, err := utils.ReadFile(path) - if err != nil { - return err - } - clilog.Info.Printf("Creating sfdc channel: %s\n", channelFile) - _, err = sfdc.CreateChannelFromContent(version, channelBytes) - if err != nil { - return nil - } - } else { - clilog.Info.Printf("sfdc channel %s already exists\n", channelFile) - } - } - } - return nil - }) - - if err != nil { + if err = processConnectors(connectorsFolder, grantPermission, createSecret, wait); err != nil { return err } + } else { + clilog.Info.Printf("Skipping applying connector configuration\n") } - if _, err = os.Stat(overridesFile); err == nil { - overridesBytes, err = utils.ReadFile(overridesFile) - if err != nil { - return err - } + if err = processSfdcInstances(sfdcinstancesFolder); err != nil { + return err } - if len(overridesBytes) > 0 { - clilog.Info.Printf("Found overrides file %s\n", overridesFile) + if err = processSfdcChannels(sfdcchannelsFolder); err != nil { + return err } - // get the integration file - _ = filepath.Walk(integrationFolder, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - integrationFile := filepath.Base(path) - if rJSONFiles.MatchString(integrationFile) { - clilog.Info.Printf("Found configuration for integration: %s\n", integrationFile) - integrationNames = append(integrationNames, integrationFile) - } - } - return nil - }) - - if len(integrationNames) > 0 { - // get only the first file - integrationBytes, err := utils.ReadFile(path.Join(integrationFolder, integrationNames[0])) - if err != nil { - return err - } - clilog.Info.Printf("Create integration %s\n", getFilenameWithoutExtension(integrationNames[0])) - respBody, err := integrations.CreateVersion(getFilenameWithoutExtension(integrationNames[0]), - integrationBytes, overridesBytes, "", userLabel) - if err != nil { - return err - } - version, err := getVersion(respBody) - if err != nil { - return err - } - clilog.Info.Printf("Publish integration %s with version %s\n", - getFilenameWithoutExtension(integrationNames[0]), version) - // read any config variables - configVarsFile := path.Join(configVarsFolder, getFilenameWithoutExtension(integrationNames[0])+"-config.json") - var configVarBytes []byte - if _, err = os.Stat(configVarsFile); err == nil { - configVarBytes, err = utils.ReadFile(configVarsFile) - if err != nil { - return err - } - } - _, err = integrations.Publish(getFilenameWithoutExtension(integrationNames[0]), version, configVarBytes) + if err = processIntegration(overridesFile, integrationFolder, configVarsFolder, pipeline); err != nil { return err } - clilog.Warning.Printf("No integration files were found\n") - return err }, } -var serviceAccountName, serviceAccountProject, encryptionKey string +var serviceAccountName, serviceAccountProject, encryptionKey, pipeline, release, outputGCSPath string func init() { grantPermission, createSecret, wait := false, false, false ApplyCmd.Flags().StringVarP(&folder, "folder", "f", "", "Folder containing scaffolding configuration") + ApplyCmd.Flags().StringVarP(&pipeline, "pipeline", "", + "", "Cloud Deploy Pipeline name") + ApplyCmd.Flags().StringVarP(&release, "release", "", + "", "Cloud Deploy Release name") + ApplyCmd.Flags().StringVarP(&outputGCSPath, "output-gcs-path", "", + "", "Upload a file named results.json containing the results") ApplyCmd.Flags().BoolVarP(&grantPermission, "grant-permission", "g", false, "Grant the service account permission to the GCP resource; default is false") ApplyCmd.Flags().StringVarP(&userLabel, "userlabel", "u", @@ -419,8 +172,9 @@ func init() { false, "Create Secret Manager secrets when creating the connection; default is false") ApplyCmd.Flags().BoolVarP(&wait, "wait", "", false, "Waits for the connector to finish, with success or error; default is false") + ApplyCmd.Flags().BoolVarP(&skipConnectors, "skip-connectors", "", + false, "Skip applying connector configuration; default is false") - _ = ApplyCmd.MarkFlagRequired("folder") } func getFilenameWithoutExtension(filname string) string { @@ -455,3 +209,374 @@ func getServiceAttachment(respBody []byte) (sa string, err error) { } return jsonMap["serviceAttachment"], nil } + +func processAuthConfigs(authconfigFolder string) (err error) { + var stat fs.FileInfo + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + + if stat, err = os.Stat(authconfigFolder); err == nil && stat.IsDir() { + // create any authconfigs + err = filepath.Walk(authconfigFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + authConfigFile := filepath.Base(path) + if rJSONFiles.MatchString(authConfigFile) { + clilog.Info.Printf("Found configuration for authconfig: %s\n", authConfigFile) + version, _ := authconfigs.Find(getFilenameWithoutExtension(authConfigFile), "") + // create the authconfig only if the version was not found + if version == "" { + authConfigBytes, err := utils.ReadFile(path) + if err != nil { + return err + } + clilog.Info.Printf("Creating authconfig: %s\n", authConfigFile) + if _, err = authconfigs.Create(authConfigBytes); err != nil { + return err + } + } else { + clilog.Info.Printf("Authconfig %s already exists\n", authConfigFile) + } + } + } + return nil + }) + + if err != nil { + return err + } + } + return nil +} + +func processEndpoints(endpointsFolder string) (err error) { + var stat fs.FileInfo + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + + if stat, err = os.Stat(endpointsFolder); err == nil && stat.IsDir() { + // create any endpoint attachments + err = filepath.Walk(endpointsFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + endpointFile := filepath.Base(path) + if rJSONFiles.MatchString(endpointFile) { + clilog.Info.Printf("Found configuration for endpoint attachment: %s\n", endpointFile) + } + if !connections.FindEndpoint(getFilenameWithoutExtension(endpointFile)) { + // the endpoint does not exist, try to create it + endpointBytes, err := utils.ReadFile(path) + if err != nil { + return err + } + serviceAccountName, err := getServiceAttachment(endpointBytes) + if err != nil { + return err + } + if _, err = connections.CreateEndpoint(getFilenameWithoutExtension(endpointFile), + serviceAccountName, "", false); err != nil { + return err + } + } else { + clilog.Info.Printf("Endpoint %s already exists\n", endpointFile) + } + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func processManagedZones(zonesFolder string) (err error) { + var stat fs.FileInfo + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + + // create any managed zones + if stat, err = os.Stat(zonesFolder); err == nil && stat.IsDir() { + // create any managedzones + err = filepath.Walk(zonesFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + zoneFile := filepath.Base(path) + if rJSONFiles.MatchString(zoneFile) { + clilog.Info.Printf("Found configuration for managed zone: %s\n", zoneFile) + } + if _, err = connections.GetZone(getFilenameWithoutExtension(zoneFile), true); err != nil { + // the managed zone does not exist, try to create it + zoneBytes, err := utils.ReadFile(path) + if err != nil { + return err + } + if _, err = connections.CreateZone(getFilenameWithoutExtension(zoneFile), + zoneBytes); err != nil { + return err + } + } else { + clilog.Info.Printf("Zone %s already exists\n", zoneFile) + } + } + return nil + }) + if err != nil { + return err + } + } + return nil +} + +func processConnectors(connectorsFolder string, grantPermission bool, createSecret bool, wait bool) (err error) { + var stat fs.FileInfo + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + + if stat, err = os.Stat(connectorsFolder); err == nil && stat.IsDir() { + // create any connectors + err = filepath.Walk(connectorsFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + connectionFile := filepath.Base(path) + if rJSONFiles.MatchString(connectionFile) { + clilog.Info.Printf("Found configuration for connection: %s\n", connectionFile) + _, err = connections.Get(getFilenameWithoutExtension(connectionFile), "", true, false) + // create the connection only if the connection is not found + if err != nil { + connectionBytes, err := utils.ReadFile(path) + if err != nil { + return err + } + clilog.Info.Printf("Creating connector: %s\n", connectionFile) + + if _, err = connections.Create(getFilenameWithoutExtension(connectionFile), + connectionBytes, + serviceAccountName, + serviceAccountProject, + encryptionKey, + grantPermission, + createSecret, + wait); err != nil { + return err + } + } else { + clilog.Info.Printf("Connector %s already exists\n", connectionFile) + } + } + } + return nil + }) + + if err != nil { + return err + } + } + return nil +} + +func processCustomConnectors(customConnectorsFolder string) (err error) { + var stat fs.FileInfo + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + + if stat, err = os.Stat(customConnectorsFolder); err == nil && stat.IsDir() { + //create any custom connectors + err = filepath.Walk(customConnectorsFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + customConnectionFile := filepath.Base(path) + if rJSONFiles.MatchString(customConnectionFile) { + customConnectionDetails := strings.Split(strings.TrimSuffix(customConnectionFile, filepath.Ext(customConnectionFile)), "-") + // the file format is name-version.json + if len(customConnectionDetails) == 2 { + clilog.Info.Printf("Found configuration for custom connection: %v\n", customConnectionFile) + contents, err := utils.ReadFile(path) + if err != nil { + return err + } + clilog.Info.Printf("Creating custom connector: %s\n", customConnectionFile) + if _, err := connections.GetCustomVersion(customConnectionDetails[0], + customConnectionDetails[1], false); err != nil { + // didn't find the custom connector, create it + if err = connections.CreateCustomWithVersion(customConnectionDetails[0], + customConnectionDetails[1], contents, serviceAccountName, serviceAccountProject); err != nil { + return err + } + } else { + clilog.Info.Printf("Custom Connector %s already exists\n", customConnectionFile) + } + } + } + } + return nil + }) + } + return nil +} + +func processSfdcInstances(sfdcinstancesFolder string) (err error) { + var stat fs.FileInfo + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + + if stat, err = os.Stat(sfdcinstancesFolder); err == nil && stat.IsDir() { + // create any sfdc instances + err = filepath.Walk(sfdcinstancesFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + instanceFile := filepath.Base(path) + if rJSONFiles.MatchString(instanceFile) { + clilog.Info.Printf("Found configuration for sfdc instance: %s\n", instanceFile) + _, err = sfdc.GetInstance(getFilenameWithoutExtension(instanceFile), true) + // create the instance only if the sfdc instance is not found + if err != nil { + instanceBytes, err := utils.ReadFile(path) + if err != nil { + return err + } + clilog.Info.Printf("Creating sfdc instance: %s\n", instanceFile) + _, err = sfdc.CreateInstanceFromContent(instanceBytes) + if err != nil { + return nil + } + } else { + clilog.Info.Printf("sfdc instance %s already exists\n", instanceFile) + } + } + } + return nil + }) + + if err != nil { + return err + } + } + return nil +} + +func processSfdcChannels(sfdcchannelsFolder string) (err error) { + var stat fs.FileInfo + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + const sfdcNamingConvention = 2 // when file is split with _, the result must be 2 + + if stat, err = os.Stat(sfdcchannelsFolder); err == nil && stat.IsDir() { + // create any sfdc channels + err = filepath.Walk(sfdcchannelsFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + channelFile := filepath.Base(path) + if rJSONFiles.MatchString(channelFile) { + clilog.Info.Printf("Found configuration for sfdc channel: %s\n", channelFile) + sfdcNames := strings.Split(getFilenameWithoutExtension(channelFile), "_") + if len(sfdcNames) != sfdcNamingConvention { + clilog.Warning.Printf("sfdc chanel file %s does not follow the naming "+ + "convention instanceName_channelName.json\n", channelFile) + return nil + } + version, _, err := sfdc.FindChannel(sfdcNames[1], sfdcNames[0]) + // create the instance only if the sfdc channel is not found + if err != nil { + channelBytes, err := utils.ReadFile(path) + if err != nil { + return err + } + clilog.Info.Printf("Creating sfdc channel: %s\n", channelFile) + _, err = sfdc.CreateChannelFromContent(version, channelBytes) + if err != nil { + return nil + } + } else { + clilog.Info.Printf("sfdc channel %s already exists\n", channelFile) + } + } + } + return nil + }) + + if err != nil { + return err + } + } + return nil +} + +func processIntegration(overridesFile string, integrationFolder string, configVarsFolder string, pipeline string) (err error) { + rJSONFiles := regexp.MustCompile(`(\S*)\.json`) + + var integrationNames []string + var overridesBytes []byte + + if _, err = os.Stat(overridesFile); err == nil { + overridesBytes, err = utils.ReadFile(overridesFile) + if err != nil { + return err + } + } + + if len(overridesBytes) > 0 { + clilog.Info.Printf("Found overrides file %s\n", overridesFile) + } + + // get the integration file + _ = filepath.Walk(integrationFolder, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if !info.IsDir() { + integrationFile := filepath.Base(path) + if rJSONFiles.MatchString(integrationFile) { + clilog.Info.Printf("Found configuration for integration: %s\n", integrationFile) + integrationNames = append(integrationNames, integrationFile) + } + } + return nil + }) + + if len(integrationNames) > 0 { + // get only the first file + integrationBytes, err := utils.ReadFile(path.Join(integrationFolder, integrationNames[0])) + if err != nil { + return err + } + clilog.Info.Printf("Create integration %s\n", getFilenameWithoutExtension(integrationNames[0])) + respBody, err := integrations.CreateVersion(getFilenameWithoutExtension(integrationNames[0]), + integrationBytes, overridesBytes, "", userLabel) + if err != nil { + return err + } + version, err := getVersion(respBody) + if err != nil { + return err + } + clilog.Info.Printf("Publish integration %s with version %s\n", + getFilenameWithoutExtension(integrationNames[0]), version) + // read any config variables + configVarsFile := path.Join(configVarsFolder, getFilenameWithoutExtension(integrationNames[0])+"-config.json") + var configVarBytes []byte + if _, err = os.Stat(configVarsFile); err == nil { + configVarBytes, err = utils.ReadFile(configVarsFile) + if err != nil { + return err + } + } + _, err = integrations.Publish(getFilenameWithoutExtension(integrationNames[0]), version, configVarBytes) + if err != nil { + return err + } + if pipeline != "" { + err = apiclient.WriteResultsFile(outputGCSPath, "SUCCEEDED") + } + return err + } + clilog.Warning.Printf("No integration files were found\n") + return nil +} diff --git a/internal/cmd/integrations/scaffold.go b/internal/cmd/integrations/scaffold.go index a5cbc9a..fbae229 100644 --- a/internal/cmd/integrations/scaffold.go +++ b/internal/cmd/integrations/scaffold.go @@ -190,60 +190,64 @@ var ScaffoldCmd = &cobra.Command{ } } - connectors, err := integrations.GetConnectionsWithRegion(integrationBody) - if err != nil { - return err - } - - if len(connectors) > 0 { - clilog.Info.Printf("Found connectors in the integration\n") - if err = generateFolder(path.Join(folder, "connectors")); err != nil { + if !skipConnectors { + connectors, err := integrations.GetConnectionsWithRegion(integrationBody) + if err != nil { return err } - //check for custom connectors - for _, connector := range connectors { - if connector.CustomConnection { - if err = generateFolder(path.Join(folder, "custom-connectors")); err != nil { - return err - } - break + + if len(connectors) > 0 { + clilog.Info.Printf("Found connectors in the integration\n") + if err = generateFolder(path.Join(folder, "connectors")); err != nil { + return err } - } - for _, connector := range connectors { - if connector.CustomConnection { - customConnectionResp, err := connections.GetCustomVersion(connector.Name, connector.Version, true) - if err != nil { - return err - } - clilog.Info.Printf("Storing custom connector %s\n", connector.Name) - customConnectionResp, err = apiclient.PrettifyJson(customConnectionResp) - if err != nil { - return err - } - if err = apiclient.WriteByteArrayToFile( - path.Join(folder, "custom-connectors", connector.Name+"-"+connector.Version+jsonExt), - false, - customConnectionResp); err != nil { - return err - } - } else { - connectionResp, err := connections.GetConnectionDetailWithRegion(connector.Name, connector.Region, "", true, true) - if err != nil { - return err - } - clilog.Info.Printf("Storing connector %s\n", connector.Name) - connectionResp, err = apiclient.PrettifyJson(connectionResp) - if err != nil { - return err + //check for custom connectors + for _, connector := range connectors { + if connector.CustomConnection { + if err = generateFolder(path.Join(folder, "custom-connectors")); err != nil { + return err + } + break } - if err = apiclient.WriteByteArrayToFile( - path.Join(folder, "connectors", connector.Name+jsonExt), - false, - connectionResp); err != nil { - return err + } + for _, connector := range connectors { + if connector.CustomConnection { + customConnectionResp, err := connections.GetCustomVersion(connector.Name, connector.Version, true) + if err != nil { + return err + } + clilog.Info.Printf("Storing custom connector %s\n", connector.Name) + customConnectionResp, err = apiclient.PrettifyJson(customConnectionResp) + if err != nil { + return err + } + if err = apiclient.WriteByteArrayToFile( + path.Join(folder, "custom-connectors", connector.Name+"-"+connector.Version+jsonExt), + false, + customConnectionResp); err != nil { + return err + } + } else { + connectionResp, err := connections.GetConnectionDetailWithRegion(connector.Name, connector.Region, "", true, true) + if err != nil { + return err + } + clilog.Info.Printf("Storing connector %s\n", connector.Name) + connectionResp, err = apiclient.PrettifyJson(connectionResp) + if err != nil { + return err + } + if err = apiclient.WriteByteArrayToFile( + path.Join(folder, "connectors", connector.Name+jsonExt), + false, + connectionResp); err != nil { + return err + } } } } + } else { + clilog.Info.Printf("Skipping scaffold of connector configuration\n") } instances, err := integrations.GetSfdcInstances(integrationBody) @@ -297,13 +301,29 @@ var ScaffoldCmd = &cobra.Command{ } } + if cloudDeploy { + clilog.Info.Printf("Storing clouddeploy.yaml and skaffold.yaml\n") + if err = apiclient.WriteByteArrayToFile( + path.Join(baseFolder, "clouddeploy.yaml"), + false, + []byte(utils.GetCloudDeployYaml(name, env))); err != nil { + return err + } + if err = apiclient.WriteByteArrayToFile( + path.Join(baseFolder, "skaffold.yaml"), + false, + []byte(utils.GetSkaffoldYaml())); err != nil { + return err + } + } + return err }, } var ( - cloudBuild bool - env string + cloudBuild, cloudDeploy, skipConnectors bool + env string ) func init() { @@ -318,11 +338,15 @@ func init() { ScaffoldCmd.Flags().StringVarP(&snapshot, "snapshot", "s", "", "Integration flow snapshot number") ScaffoldCmd.Flags().BoolVarP(&cloudBuild, "cloud-build", "", - true, "don't generate cloud build file; default is true") + false, "Generate cloud build file; default is false") + ScaffoldCmd.Flags().BoolVarP(&cloudDeploy, "cloud-deploy", "", + false, "Generate cloud deploy files; default is false") ScaffoldCmd.Flags().StringVarP(&folder, "folder", "f", "", "Folder to generate the scaffolding") ScaffoldCmd.Flags().StringVarP(&env, "env", "e", "", "Environment name for the scaffolding") + ScaffoldCmd.Flags().BoolVarP(&skipConnectors, "skip-connectors", "", + false, "Exclude connectors from scaffold") _ = ScaffoldCmd.MarkFlagRequired("name") } diff --git a/internal/cmd/utils/utils.go b/internal/cmd/utils/utils.go index d264764..39c91e0 100644 --- a/internal/cmd/utils/utils.go +++ b/internal/cmd/utils/utils.go @@ -15,6 +15,7 @@ package utils import ( + "fmt" "io" "os" ) @@ -72,6 +73,95 @@ options: logging: CLOUD_LOGGING_ONLY substitution_option: "ALLOW_LOOSE"` +var cloudDeploy = `# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: deploy.cloud.google.com/v1 +kind: DeliveryPipeline +metadata: + name: appint-%s-pipeline +serialPipeline: + stages: + - targetId: %s-env +--- + +apiVersion: deploy.cloud.google.com/v1 +kind: Target +metadata: + name: %s-env +customTarget: + customTargetType: appint-%s-target +--- + +apiVersion: deploy.cloud.google.com/v1 +kind: CustomTargetType +metadata: + name: appint-%s-target +customActions: + renderAction: render-app-integration + deployAction: deploy-app-integration` + +var skaffold = `# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: skaffold/v4beta7 +kind: Config +customActions: +- name: render-app-integration + containers: + - name: render + image: gcr.io/google.com/cloudsdktool/google-cloud-cli@sha256:66e2681aa3099b4e517e4cdcdefff8f2aa45d305007124ccdc09686f6712d018 + command: ['/bin/bash'] + args: + - '-c' + - |- + echo "Sample manifest rendered content" > manifest.txt + gsutil cp manifest.txt $CLOUD_DEPLOY_OUTPUT_GCS_PATH/manifest.txt + echo {\"resultStatus\": \"SUCCEEDED\", \"manifestFile\": \"$CLOUD_DEPLOY_OUTPUT_GCS_PATH/manifest.txt\"} > results.json + gsutil cp results.json $CLOUD_DEPLOY_OUTPUT_GCS_PATH/results.json +- name: deploy-app-integration + containers: + - name: deploy + image: us-docker.pkg.dev/appintegration-toolkit/images/integrationcli-deploy:latest + command: ['sh'] + args: + - '-c' + - |- + integrationcli integrations apply --env=dev --reg=$CLOUD_DEPLOY_LOCATION --proj=$CLOUD_DEPLOY_PROJECT --pipeline=$CLOUD_DEPLOY_DELIVERY_PIPELINE --release=$CLOUD_DEPLOY_RELEASE --target=$CLOUD_DEPLOY_TARGET --metadata-token` + +func GetCloudDeployYaml(integrationName string, env string) string { + if env == "" { + env = "dev" + } + return fmt.Sprintf(cloudDeploy, integrationName, env, env, integrationName, integrationName) +} + +func GetSkaffoldYaml() string { + return skaffold +} + func GetCloudBuildYaml() string { return cloudBuild }